code
stringlengths 13
6.09M
| order_type
stringclasses 2
values | original_example
dict | step_ids
listlengths 1
5
|
|---|---|---|---|
def DFS(x):
# 전위순회
if x > 7:
return
else:
DFS((x * 2))
print(x)
DFS((x*2)+1)
if __name__ == "__main__":
DFS(1)
|
normal
|
{
"blob_id": "1cc8695aa694359314b6d478fe6abed29fdc6c91",
"index": 3309,
"step-1": "<mask token>\n",
"step-2": "def DFS(x):\n if x > 7:\n return\n else:\n DFS(x * 2)\n print(x)\n DFS(x * 2 + 1)\n\n\n<mask token>\n",
"step-3": "def DFS(x):\n if x > 7:\n return\n else:\n DFS(x * 2)\n print(x)\n DFS(x * 2 + 1)\n\n\nif __name__ == '__main__':\n DFS(1)\n",
"step-4": "\ndef DFS(x):\n # 전위순회\n if x > 7:\n return\n else:\n \n DFS((x * 2))\n print(x)\n DFS((x*2)+1)\n\n \nif __name__ == \"__main__\":\n DFS(1)",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
class ArrayHardwareMetrics:
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class ArrayHardwareMetrics:
def __init__(self, fa):
self.fa = fa
self.chassis_health = None
self.controller_health = None
self.component_health = None
self.temperature = None
self.temperature = None
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class ArrayHardwareMetrics:
def __init__(self, fa):
self.fa = fa
self.chassis_health = None
self.controller_health = None
self.component_health = None
self.temperature = None
self.temperature = None
<|reserved_special_token_0|>
def get_metrics(self):
self._array_hardware_status()
yield self.chassis_health
yield self.controller_health
yield self.component_health
yield self.temperature
yield self.power
<|reserved_special_token_1|>
import re
from prometheus_client.core import GaugeMetricFamily
class ArrayHardwareMetrics:
def __init__(self, fa):
self.fa = fa
self.chassis_health = None
self.controller_health = None
self.component_health = None
self.temperature = None
self.temperature = None
def _array_hardware_status(self):
"""Collect information about all system sensors."""
data = self.fa.get_hardware_status()
self.chassis_health = GaugeMetricFamily(
'purefa_hardware_chassis_health',
'FlashArray hardware chassis health status')
self.controller_health = GaugeMetricFamily(
'purefa_hardware_controller_health',
'FlashArray hardware controller health status', labels=[
'controller'])
self.component_health = GaugeMetricFamily(
'purefa_hardware_component_health',
'FlashArray hardware component health status', labels=[
'chassis', 'controller', 'component', 'index'])
self.temperature = GaugeMetricFamily(
'purefa_hardware_temperature_celsius',
'FlashArray hardware temperature sensors', labels=['chassis',
'controller', 'sensor'])
self.power = GaugeMetricFamily('purefa_hardware_power_volts',
'FlashArray hardware power supply voltage', labels=['chassis',
'power_supply'])
re_chassis = re.compile('^CH(\\d+)$')
re_controller = re.compile('^CT(\\d+)$')
re_component = re.compile('^(CH|CT)(\\d+)\\.([A-Z]+)([0-9]+)$')
for comp in data:
if comp['status'] == 'not_installed':
continue
component_name = comp['name']
component_state = 1 if comp['status'] == 'ok' else 0
if re.match('^CH\\d+$', component_name):
detail = re_chassis.match(component_name)
c_index = detail.group(1)
self.chassis_health.add_metric([c_index], component_state)
continue
elif re.match('^CT\\d+$', component_name):
detail = re_controller.match(component_name)
c_index = detail.group(1)
self.controller_health.add_metric([c_index], component_state)
continue
elif re.match('^C(H|T)\\d+\\.[A-Z]+[0-9]+$', component_name):
detail = re_component.match(component_name)
c_base = detail.group(1)
c_base_index = detail.group(2)
c_type = detail.group(3)
c_index = detail.group(4)
if c_base == 'CH':
labelset = [c_base_index, '', c_type, c_index]
else:
labelset = ['', c_base_index, c_type, c_index]
self.component_health.add_metric(labels=labelset, value=
component_state)
if c_type.lower() == 'tmp':
if c_base == 'CH':
self.temperature.add_metric([c_base_index, '',
c_index], float(comp['temperature']))
else:
self.temperature.add_metric(['', c_base_index,
c_index], float(comp['temperature']))
elif c_type.lower() == 'pwr':
if comp['voltage'] is not None:
self.power.add_metric([c_base_index, c_index],
float(comp['voltage']))
def get_metrics(self):
self._array_hardware_status()
yield self.chassis_health
yield self.controller_health
yield self.component_health
yield self.temperature
yield self.power
<|reserved_special_token_1|>
import re
from prometheus_client.core import GaugeMetricFamily
class ArrayHardwareMetrics:
def __init__(self, fa):
self.fa = fa
self.chassis_health = None
self.controller_health = None
self.component_health = None
self.temperature = None
self.temperature = None
def _array_hardware_status(self):
"""Collect information about all system sensors."""
data = self.fa.get_hardware_status()
self.chassis_health = GaugeMetricFamily(
'purefa_hardware_chassis_health',
'FlashArray hardware chassis health status')
self.controller_health = GaugeMetricFamily(
'purefa_hardware_controller_health',
'FlashArray hardware controller health status',
labels=['controller'])
self.component_health = GaugeMetricFamily(
'purefa_hardware_component_health',
'FlashArray hardware component health status',
labels=['chassis', 'controller', 'component',
'index'])
self.temperature = GaugeMetricFamily(
'purefa_hardware_temperature_celsius',
'FlashArray hardware temperature sensors',
labels=['chassis', 'controller',
'sensor'])
self.power = GaugeMetricFamily(
'purefa_hardware_power_volts',
'FlashArray hardware power supply voltage',
labels=['chassis', 'power_supply'])
re_chassis = re.compile(r"^CH(\d+)$")
re_controller = re.compile(r"^CT(\d+)$")
re_component = re.compile(r"^(CH|CT)(\d+)\.([A-Z]+)([0-9]+)$")
for comp in data:
if (comp['status'] == 'not_installed'):
continue
component_name = comp['name']
component_state = 1 if (comp['status'] == 'ok') else 0
# Chassis
if re.match(r"^CH\d+$", component_name):
detail = re_chassis.match(component_name)
c_index = detail.group(1)
self.chassis_health.add_metric([c_index], component_state)
continue
# Controller
elif re.match(r"^CT\d+$", component_name):
detail = re_controller.match(component_name)
c_index = detail.group(1)
self.controller_health.add_metric([c_index], component_state)
continue
# Components
elif re.match(r"^C(H|T)\d+\.[A-Z]+[0-9]+$", component_name):
detail = re_component.match(component_name)
c_base = detail.group(1)
c_base_index = detail.group(2)
c_type = detail.group(3)
c_index = detail.group(4)
if c_base == 'CH':
# Chassis-based
labelset = [c_base_index, '', c_type, c_index]
else:
# Controller-based
labelset = ['', c_base_index, c_type, c_index]
# Component health status
self.component_health.add_metric(
labels=labelset, value=component_state)
if c_type.lower() == 'tmp':
# Additional metric for temperature
if c_base == 'CH':
self.temperature.add_metric(
[c_base_index, '', c_index], float(comp['temperature']))
else:
self.temperature.add_metric(
['', c_base_index, c_index], float(comp['temperature']))
elif c_type.lower() == 'pwr':
# Additional metric for voltage level
if comp['voltage'] is not None:
self.power.add_metric([c_base_index, c_index],
float(comp['voltage']))
def get_metrics(self):
self._array_hardware_status()
yield self.chassis_health
yield self.controller_health
yield self.component_health
yield self.temperature
yield self.power
|
flexible
|
{
"blob_id": "527d514cbad0916fecfe0da68de04d3b130d94c7",
"index": 5156,
"step-1": "<mask token>\n\n\nclass ArrayHardwareMetrics:\n <mask token>\n <mask token>\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass ArrayHardwareMetrics:\n\n def __init__(self, fa):\n self.fa = fa\n self.chassis_health = None\n self.controller_health = None\n self.component_health = None\n self.temperature = None\n self.temperature = None\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass ArrayHardwareMetrics:\n\n def __init__(self, fa):\n self.fa = fa\n self.chassis_health = None\n self.controller_health = None\n self.component_health = None\n self.temperature = None\n self.temperature = None\n <mask token>\n\n def get_metrics(self):\n self._array_hardware_status()\n yield self.chassis_health\n yield self.controller_health\n yield self.component_health\n yield self.temperature\n yield self.power\n",
"step-4": "import re\nfrom prometheus_client.core import GaugeMetricFamily\n\n\nclass ArrayHardwareMetrics:\n\n def __init__(self, fa):\n self.fa = fa\n self.chassis_health = None\n self.controller_health = None\n self.component_health = None\n self.temperature = None\n self.temperature = None\n\n def _array_hardware_status(self):\n \"\"\"Collect information about all system sensors.\"\"\"\n data = self.fa.get_hardware_status()\n self.chassis_health = GaugeMetricFamily(\n 'purefa_hardware_chassis_health',\n 'FlashArray hardware chassis health status')\n self.controller_health = GaugeMetricFamily(\n 'purefa_hardware_controller_health',\n 'FlashArray hardware controller health status', labels=[\n 'controller'])\n self.component_health = GaugeMetricFamily(\n 'purefa_hardware_component_health',\n 'FlashArray hardware component health status', labels=[\n 'chassis', 'controller', 'component', 'index'])\n self.temperature = GaugeMetricFamily(\n 'purefa_hardware_temperature_celsius',\n 'FlashArray hardware temperature sensors', labels=['chassis',\n 'controller', 'sensor'])\n self.power = GaugeMetricFamily('purefa_hardware_power_volts',\n 'FlashArray hardware power supply voltage', labels=['chassis',\n 'power_supply'])\n re_chassis = re.compile('^CH(\\\\d+)$')\n re_controller = re.compile('^CT(\\\\d+)$')\n re_component = re.compile('^(CH|CT)(\\\\d+)\\\\.([A-Z]+)([0-9]+)$')\n for comp in data:\n if comp['status'] == 'not_installed':\n continue\n component_name = comp['name']\n component_state = 1 if comp['status'] == 'ok' else 0\n if re.match('^CH\\\\d+$', component_name):\n detail = re_chassis.match(component_name)\n c_index = detail.group(1)\n self.chassis_health.add_metric([c_index], component_state)\n continue\n elif re.match('^CT\\\\d+$', component_name):\n detail = re_controller.match(component_name)\n c_index = detail.group(1)\n self.controller_health.add_metric([c_index], component_state)\n continue\n elif re.match('^C(H|T)\\\\d+\\\\.[A-Z]+[0-9]+$', component_name):\n detail = re_component.match(component_name)\n c_base = detail.group(1)\n c_base_index = detail.group(2)\n c_type = detail.group(3)\n c_index = detail.group(4)\n if c_base == 'CH':\n labelset = [c_base_index, '', c_type, c_index]\n else:\n labelset = ['', c_base_index, c_type, c_index]\n self.component_health.add_metric(labels=labelset, value=\n component_state)\n if c_type.lower() == 'tmp':\n if c_base == 'CH':\n self.temperature.add_metric([c_base_index, '',\n c_index], float(comp['temperature']))\n else:\n self.temperature.add_metric(['', c_base_index,\n c_index], float(comp['temperature']))\n elif c_type.lower() == 'pwr':\n if comp['voltage'] is not None:\n self.power.add_metric([c_base_index, c_index],\n float(comp['voltage']))\n\n def get_metrics(self):\n self._array_hardware_status()\n yield self.chassis_health\n yield self.controller_health\n yield self.component_health\n yield self.temperature\n yield self.power\n",
"step-5": "import re\nfrom prometheus_client.core import GaugeMetricFamily\n\n\nclass ArrayHardwareMetrics:\n\n def __init__(self, fa):\n self.fa = fa\n self.chassis_health = None\n self.controller_health = None\n self.component_health = None\n self.temperature = None\n self.temperature = None\n\n def _array_hardware_status(self):\n \"\"\"Collect information about all system sensors.\"\"\"\n data = self.fa.get_hardware_status()\n\n self.chassis_health = GaugeMetricFamily(\n 'purefa_hardware_chassis_health',\n 'FlashArray hardware chassis health status')\n self.controller_health = GaugeMetricFamily(\n 'purefa_hardware_controller_health',\n 'FlashArray hardware controller health status',\n labels=['controller'])\n self.component_health = GaugeMetricFamily(\n 'purefa_hardware_component_health',\n 'FlashArray hardware component health status',\n labels=['chassis', 'controller', 'component',\n 'index'])\n self.temperature = GaugeMetricFamily(\n 'purefa_hardware_temperature_celsius',\n 'FlashArray hardware temperature sensors',\n labels=['chassis', 'controller',\n 'sensor'])\n self.power = GaugeMetricFamily(\n 'purefa_hardware_power_volts',\n 'FlashArray hardware power supply voltage',\n labels=['chassis', 'power_supply'])\n\n re_chassis = re.compile(r\"^CH(\\d+)$\")\n re_controller = re.compile(r\"^CT(\\d+)$\")\n re_component = re.compile(r\"^(CH|CT)(\\d+)\\.([A-Z]+)([0-9]+)$\")\n\n for comp in data:\n if (comp['status'] == 'not_installed'):\n continue\n component_name = comp['name']\n component_state = 1 if (comp['status'] == 'ok') else 0\n\n # Chassis\n if re.match(r\"^CH\\d+$\", component_name):\n detail = re_chassis.match(component_name)\n c_index = detail.group(1)\n self.chassis_health.add_metric([c_index], component_state)\n continue\n # Controller\n elif re.match(r\"^CT\\d+$\", component_name):\n detail = re_controller.match(component_name)\n c_index = detail.group(1)\n self.controller_health.add_metric([c_index], component_state)\n continue\n # Components\n elif re.match(r\"^C(H|T)\\d+\\.[A-Z]+[0-9]+$\", component_name):\n detail = re_component.match(component_name)\n c_base = detail.group(1)\n c_base_index = detail.group(2)\n c_type = detail.group(3)\n c_index = detail.group(4)\n\n if c_base == 'CH':\n # Chassis-based\n labelset = [c_base_index, '', c_type, c_index]\n else:\n # Controller-based\n labelset = ['', c_base_index, c_type, c_index]\n\n # Component health status\n self.component_health.add_metric(\n labels=labelset, value=component_state)\n\n if c_type.lower() == 'tmp':\n # Additional metric for temperature\n if c_base == 'CH':\n self.temperature.add_metric(\n [c_base_index, '', c_index], float(comp['temperature']))\n else:\n self.temperature.add_metric(\n ['', c_base_index, c_index], float(comp['temperature']))\n elif c_type.lower() == 'pwr':\n # Additional metric for voltage level\n if comp['voltage'] is not None:\n self.power.add_metric([c_base_index, c_index],\n float(comp['voltage']))\n\n def get_metrics(self):\n self._array_hardware_status()\n yield self.chassis_health\n yield self.controller_health\n yield self.component_health\n yield self.temperature\n yield self.power\n",
"step-ids": [
1,
2,
3,
5,
6
]
}
|
[
1,
2,
3,
5,
6
] |
from skimage import data, filters, measure, exposure
from skimage.filters import threshold_mean
from skimage.transform import resize
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import pyfits as pf
import time
import numpy as np
import healpy as hp
from healpy.projector import CartesianProj
from healpy.projector import MollweideProj
# benjamin.racine@astro.uio.no
start = time.time()
path = '/Users/trygveleithesvalheim/datafiles/'
# MAX SLIDES ON N: 134 (ID:135) and 136 (ID:137)
# MAX SLIDES ON S: 6 (ID:7)
# Data: 360 degrees longitude, 50-90 latitude
# Dimensions: (480,4320)
# Full map: (2160,4320) need to add 1680
NN = 338 # Identified clouds in N. hemisphere
NS = 438 # Identified clouds in S. hemisphere
nside = 512
npix = 12*nside**2
z = np.zeros((1680, 4320)) # Extra array for full sky array
"""
full
"""
c1 = 420
c2 = 475
hdulist = pf.open(path+'data/cubesCombinedN.fits')
Nfull = hdulist[0].data
hdulist = pf.open(path+'data/cubesCombinedS.fits')
Sfull = hdulist[0].data
fullLOSN = Nfull[c1:c2].sum(axis=(0))
fullLOSS = Sfull[c1:c2].sum(axis=(0))
# Add empty array for converting to full sky
fullLOSS = np.concatenate((fullLOSS, z), axis=0)
fullLOSN = np.concatenate((z,fullLOSN), axis=0)
full = fullLOSN + fullLOSS
"""
Add full first
"""
hdulist = pf.open(path+'data/LOScloudsN.fits')
LOScloudsN = hdulist[0].data
hdulist = pf.open(path+'data/LOScloudsS.fits')
LOScloudsS = hdulist[0].data
# LOS of all clouds
LOScloudsN = LOScloudsN.sum(axis=(0))
LOScloudsS = LOScloudsS.sum(axis=(0))
# Add empty array for converting to full sky
LOScloudsS = np.concatenate((LOScloudsS, z), axis=0)
LOScloudsN = np.concatenate((z,LOScloudsN), axis=0)
# Add N and S hemisphere
image_array = LOScloudsN+LOScloudsS
"""
GENERAL
"""
# Find theta and phi coordinates of image
theta = np.linspace(0, np.pi, num=image_array.shape[0])[:, None]
phi = np.linspace(-np.pi, np.pi, num=image_array.shape[1])
# Get pixel positions of full picture
pix = hp.ang2pix(nside, theta, phi)
"""
GENERAL END
"""
# Make healpix map array
healpix_map = np.zeros(hp.nside2npix(nside), dtype=np.double)
# put image in healpy map array
healpix_map[pix] = image_array # Magic
#healpix_map[np.where(healpix_map == 0)] = hp.UNSEEN # Set empty pixels to UNSEEN
"""
For full
"""
full_map = np.zeros(hp.nside2npix(nside), dtype=np.double)
full_map[pix] = full # Magic
#full_map[np.where(healpix_map == 0)] = hp.UNSEEN # Set empty pixels to UNSEEN
"""
Full end
"""
le = full_map - healpix_map
ma = le[::-1]
ma[np.where(ma == 0)] = hp.UNSEEN
full_map[np.where(full_map == 0)] = hp.UNSEEN
fu = full_map[::-1]
healpix_map[np.where(healpix_map == 0)] = hp.UNSEEN
se = healpix_map[::-1]
"""
hp.write_map(path+'data/fullHI50.fits',fu, partial=True)
hp.write_map(path+'data/segmentedHI50.fits',se, partial=True)
hp.write_map(path+'data/cloudsFITS/subtractedHI50fits',ma, partial=True)
"""
#min = 4.
#max = 350.
hp.mollview(fu,title="Full map +50 GLAT",sub=311)
hp.mollview(se,title="Above threshold (4.0) +50 GLAT", sub = 312)
hp.mollview(ma,title="Diff +50 GLAT",sub=313)
plt.savefig('figs/diff4a.pdf', bbox_inches='tight',pad_inches=0.106)
plt.show()
"""
NX = 4320
NY = 2160
#image_array = resize(LOScloudsN,(NY,NX)) # Resizing image
"""
|
normal
|
{
"blob_id": "d86fd2e6ef5dab4444772192471538842112b3fd",
"index": 2675,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nhp.mollview(fu, title='Full map +50 GLAT', sub=311)\nhp.mollview(se, title='Above threshold (4.0) +50 GLAT', sub=312)\nhp.mollview(ma, title='Diff +50 GLAT', sub=313)\nplt.savefig('figs/diff4a.pdf', bbox_inches='tight', pad_inches=0.106)\nplt.show()\n<mask token>\n",
"step-3": "<mask token>\nstart = time.time()\npath = '/Users/trygveleithesvalheim/datafiles/'\nNN = 338\nNS = 438\nnside = 512\nnpix = 12 * nside ** 2\nz = np.zeros((1680, 4320))\n<mask token>\nc1 = 420\nc2 = 475\nhdulist = pf.open(path + 'data/cubesCombinedN.fits')\nNfull = hdulist[0].data\nhdulist = pf.open(path + 'data/cubesCombinedS.fits')\nSfull = hdulist[0].data\nfullLOSN = Nfull[c1:c2].sum(axis=0)\nfullLOSS = Sfull[c1:c2].sum(axis=0)\nfullLOSS = np.concatenate((fullLOSS, z), axis=0)\nfullLOSN = np.concatenate((z, fullLOSN), axis=0)\nfull = fullLOSN + fullLOSS\n<mask token>\nhdulist = pf.open(path + 'data/LOScloudsN.fits')\nLOScloudsN = hdulist[0].data\nhdulist = pf.open(path + 'data/LOScloudsS.fits')\nLOScloudsS = hdulist[0].data\nLOScloudsN = LOScloudsN.sum(axis=0)\nLOScloudsS = LOScloudsS.sum(axis=0)\nLOScloudsS = np.concatenate((LOScloudsS, z), axis=0)\nLOScloudsN = np.concatenate((z, LOScloudsN), axis=0)\nimage_array = LOScloudsN + LOScloudsS\n<mask token>\ntheta = np.linspace(0, np.pi, num=image_array.shape[0])[:, None]\nphi = np.linspace(-np.pi, np.pi, num=image_array.shape[1])\npix = hp.ang2pix(nside, theta, phi)\n<mask token>\nhealpix_map = np.zeros(hp.nside2npix(nside), dtype=np.double)\nhealpix_map[pix] = image_array\n<mask token>\nfull_map = np.zeros(hp.nside2npix(nside), dtype=np.double)\nfull_map[pix] = full\n<mask token>\nle = full_map - healpix_map\nma = le[::-1]\nma[np.where(ma == 0)] = hp.UNSEEN\nfull_map[np.where(full_map == 0)] = hp.UNSEEN\nfu = full_map[::-1]\nhealpix_map[np.where(healpix_map == 0)] = hp.UNSEEN\nse = healpix_map[::-1]\n<mask token>\nhp.mollview(fu, title='Full map +50 GLAT', sub=311)\nhp.mollview(se, title='Above threshold (4.0) +50 GLAT', sub=312)\nhp.mollview(ma, title='Diff +50 GLAT', sub=313)\nplt.savefig('figs/diff4a.pdf', bbox_inches='tight', pad_inches=0.106)\nplt.show()\n<mask token>\n",
"step-4": "from skimage import data, filters, measure, exposure\nfrom skimage.filters import threshold_mean\nfrom skimage.transform import resize\nimport matplotlib.pyplot as plt\nfrom mpl_toolkits.mplot3d import Axes3D\nimport pyfits as pf\nimport time\nimport numpy as np\nimport healpy as hp\nfrom healpy.projector import CartesianProj\nfrom healpy.projector import MollweideProj\nstart = time.time()\npath = '/Users/trygveleithesvalheim/datafiles/'\nNN = 338\nNS = 438\nnside = 512\nnpix = 12 * nside ** 2\nz = np.zeros((1680, 4320))\n<mask token>\nc1 = 420\nc2 = 475\nhdulist = pf.open(path + 'data/cubesCombinedN.fits')\nNfull = hdulist[0].data\nhdulist = pf.open(path + 'data/cubesCombinedS.fits')\nSfull = hdulist[0].data\nfullLOSN = Nfull[c1:c2].sum(axis=0)\nfullLOSS = Sfull[c1:c2].sum(axis=0)\nfullLOSS = np.concatenate((fullLOSS, z), axis=0)\nfullLOSN = np.concatenate((z, fullLOSN), axis=0)\nfull = fullLOSN + fullLOSS\n<mask token>\nhdulist = pf.open(path + 'data/LOScloudsN.fits')\nLOScloudsN = hdulist[0].data\nhdulist = pf.open(path + 'data/LOScloudsS.fits')\nLOScloudsS = hdulist[0].data\nLOScloudsN = LOScloudsN.sum(axis=0)\nLOScloudsS = LOScloudsS.sum(axis=0)\nLOScloudsS = np.concatenate((LOScloudsS, z), axis=0)\nLOScloudsN = np.concatenate((z, LOScloudsN), axis=0)\nimage_array = LOScloudsN + LOScloudsS\n<mask token>\ntheta = np.linspace(0, np.pi, num=image_array.shape[0])[:, None]\nphi = np.linspace(-np.pi, np.pi, num=image_array.shape[1])\npix = hp.ang2pix(nside, theta, phi)\n<mask token>\nhealpix_map = np.zeros(hp.nside2npix(nside), dtype=np.double)\nhealpix_map[pix] = image_array\n<mask token>\nfull_map = np.zeros(hp.nside2npix(nside), dtype=np.double)\nfull_map[pix] = full\n<mask token>\nle = full_map - healpix_map\nma = le[::-1]\nma[np.where(ma == 0)] = hp.UNSEEN\nfull_map[np.where(full_map == 0)] = hp.UNSEEN\nfu = full_map[::-1]\nhealpix_map[np.where(healpix_map == 0)] = hp.UNSEEN\nse = healpix_map[::-1]\n<mask token>\nhp.mollview(fu, title='Full map +50 GLAT', sub=311)\nhp.mollview(se, title='Above threshold (4.0) +50 GLAT', sub=312)\nhp.mollview(ma, title='Diff +50 GLAT', sub=313)\nplt.savefig('figs/diff4a.pdf', bbox_inches='tight', pad_inches=0.106)\nplt.show()\n<mask token>\n",
"step-5": "from skimage import data, filters, measure, exposure\nfrom skimage.filters import threshold_mean\nfrom skimage.transform import resize\nimport matplotlib.pyplot as plt\nfrom mpl_toolkits.mplot3d import Axes3D\nimport pyfits as pf\nimport time\nimport numpy as np\nimport healpy as hp\nfrom healpy.projector import CartesianProj\nfrom healpy.projector import MollweideProj\n# benjamin.racine@astro.uio.no\nstart = time.time()\npath = '/Users/trygveleithesvalheim/datafiles/'\n\n\n# MAX SLIDES ON N: 134 (ID:135) and 136 (ID:137)\n# MAX SLIDES ON S: 6 (ID:7)\n# Data: 360 degrees longitude, 50-90 latitude\n# Dimensions: (480,4320)\n# Full map: (2160,4320) need to add 1680\n\nNN = 338 # Identified clouds in N. hemisphere\nNS = 438 # Identified clouds in S. hemisphere\nnside = 512\nnpix = 12*nside**2\nz = np.zeros((1680, 4320)) # Extra array for full sky array\n\n\"\"\"\nfull\n\"\"\"\nc1 = 420\nc2 = 475\nhdulist = pf.open(path+'data/cubesCombinedN.fits')\nNfull = hdulist[0].data\nhdulist = pf.open(path+'data/cubesCombinedS.fits')\nSfull = hdulist[0].data\n\nfullLOSN = Nfull[c1:c2].sum(axis=(0))\nfullLOSS = Sfull[c1:c2].sum(axis=(0))\n\n# Add empty array for converting to full sky\nfullLOSS = np.concatenate((fullLOSS, z), axis=0)\nfullLOSN = np.concatenate((z,fullLOSN), axis=0)\n\nfull = fullLOSN + fullLOSS\n\"\"\"\nAdd full first\n\"\"\"\nhdulist = pf.open(path+'data/LOScloudsN.fits')\nLOScloudsN = hdulist[0].data\nhdulist = pf.open(path+'data/LOScloudsS.fits')\nLOScloudsS = hdulist[0].data\n\n# LOS of all clouds\nLOScloudsN = LOScloudsN.sum(axis=(0))\nLOScloudsS = LOScloudsS.sum(axis=(0))\n\n# Add empty array for converting to full sky\nLOScloudsS = np.concatenate((LOScloudsS, z), axis=0)\nLOScloudsN = np.concatenate((z,LOScloudsN), axis=0)\n\n# Add N and S hemisphere\nimage_array = LOScloudsN+LOScloudsS\n\n\"\"\"\nGENERAL\n\"\"\"\n# Find theta and phi coordinates of image\ntheta = np.linspace(0, np.pi, num=image_array.shape[0])[:, None]\nphi = np.linspace(-np.pi, np.pi, num=image_array.shape[1])\n\n# Get pixel positions of full picture\npix = hp.ang2pix(nside, theta, phi)\n\n\"\"\"\nGENERAL END\n\"\"\"\n# Make healpix map array\nhealpix_map = np.zeros(hp.nside2npix(nside), dtype=np.double)\n\n# put image in healpy map array\nhealpix_map[pix] = image_array # Magic\n#healpix_map[np.where(healpix_map == 0)] = hp.UNSEEN # Set empty pixels to UNSEEN\n\n\"\"\"\nFor full\n\"\"\"\nfull_map = np.zeros(hp.nside2npix(nside), dtype=np.double)\n\nfull_map[pix] = full # Magic\n#full_map[np.where(healpix_map == 0)] = hp.UNSEEN # Set empty pixels to UNSEEN\n\n\"\"\"\nFull end\n\"\"\"\nle = full_map - healpix_map\nma = le[::-1]\nma[np.where(ma == 0)] = hp.UNSEEN\n\nfull_map[np.where(full_map == 0)] = hp.UNSEEN\nfu = full_map[::-1]\nhealpix_map[np.where(healpix_map == 0)] = hp.UNSEEN\nse = healpix_map[::-1]\n\n\"\"\"\nhp.write_map(path+'data/fullHI50.fits',fu, partial=True)\nhp.write_map(path+'data/segmentedHI50.fits',se, partial=True)\nhp.write_map(path+'data/cloudsFITS/subtractedHI50fits',ma, partial=True)\n\"\"\"\n#min = 4.\n#max = 350.\nhp.mollview(fu,title=\"Full map +50 GLAT\",sub=311)\nhp.mollview(se,title=\"Above threshold (4.0) +50 GLAT\", sub = 312)\nhp.mollview(ma,title=\"Diff +50 GLAT\",sub=313)\nplt.savefig('figs/diff4a.pdf', bbox_inches='tight',pad_inches=0.106)\nplt.show()\n\n\"\"\"\nNX = 4320\nNY = 2160\n#image_array = resize(LOScloudsN,(NY,NX)) # Resizing image\n\"\"\"\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
for nth in moves:
for i in range(len(board)):
selected = board[i][nth - 1]
if selected == 0:
continue
else:
resultList.append(selected)
lenR = len(resultList)
if lenR > 1:
if resultList[lenR - 2] == resultList[lenR - 1]:
del resultList[lenR - 2:]
count += 2
board[i][nth - 1] = 0
break
print(count)
<|reserved_special_token_1|>
board = [[0, 0, 0, 0, 0], [0, 0, 1, 0, 3], [0, 2, 5, 0, 1], [4, 2, 4, 4, 2],
[3, 5, 1, 3, 1]]
moves = [1, 5, 3, 5, 1, 2, 1, 4]
resultList = []
count = 0
for nth in moves:
for i in range(len(board)):
selected = board[i][nth - 1]
if selected == 0:
continue
else:
resultList.append(selected)
lenR = len(resultList)
if lenR > 1:
if resultList[lenR - 2] == resultList[lenR - 1]:
del resultList[lenR - 2:]
count += 2
board[i][nth - 1] = 0
break
print(count)
<|reserved_special_token_1|>
# 예시 입력값
board = [[0,0,0,0,0],[0,0,1,0,3],[0,2,5,0,1],[4,2,4,4,2],[3,5,1,3,1]]
moves = [1,5,3,5,1,2,1,4]
# 로직
resultList = []
count = 0
for nth in moves:
for i in range(len(board)):
selected = board[i][nth - 1]
if selected == 0:
continue
else:
# 인형을 resultList에 넣고
resultList.append(selected)
# resultList를 탐색하여 같은 인형이 있는지 보기
lenR = len(resultList)
if lenR > 1:
if resultList[lenR - 2] == resultList[lenR - 1]:
del resultList[lenR - 2:]
count += 2
# 뽑힌 인형은 board에서 사라짐
board[i][nth - 1] = 0
break
# print(resultList)
print(count)
|
flexible
|
{
"blob_id": "18e032b7ff7ae9d3f5fecc86f63d12f4da7b8067",
"index": 6180,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor nth in moves:\n for i in range(len(board)):\n selected = board[i][nth - 1]\n if selected == 0:\n continue\n else:\n resultList.append(selected)\n lenR = len(resultList)\n if lenR > 1:\n if resultList[lenR - 2] == resultList[lenR - 1]:\n del resultList[lenR - 2:]\n count += 2\n board[i][nth - 1] = 0\n break\nprint(count)\n",
"step-3": "board = [[0, 0, 0, 0, 0], [0, 0, 1, 0, 3], [0, 2, 5, 0, 1], [4, 2, 4, 4, 2],\n [3, 5, 1, 3, 1]]\nmoves = [1, 5, 3, 5, 1, 2, 1, 4]\nresultList = []\ncount = 0\nfor nth in moves:\n for i in range(len(board)):\n selected = board[i][nth - 1]\n if selected == 0:\n continue\n else:\n resultList.append(selected)\n lenR = len(resultList)\n if lenR > 1:\n if resultList[lenR - 2] == resultList[lenR - 1]:\n del resultList[lenR - 2:]\n count += 2\n board[i][nth - 1] = 0\n break\nprint(count)\n",
"step-4": "# 예시 입력값\nboard = [[0,0,0,0,0],[0,0,1,0,3],[0,2,5,0,1],[4,2,4,4,2],[3,5,1,3,1]]\nmoves = [1,5,3,5,1,2,1,4]\n\n# 로직\nresultList = []\ncount = 0\n\nfor nth in moves:\n for i in range(len(board)):\n selected = board[i][nth - 1]\n if selected == 0:\n continue\n else:\n # 인형을 resultList에 넣고\n resultList.append(selected)\n # resultList를 탐색하여 같은 인형이 있는지 보기\n lenR = len(resultList)\n if lenR > 1:\n if resultList[lenR - 2] == resultList[lenR - 1]:\n del resultList[lenR - 2:]\n count += 2\n\n # 뽑힌 인형은 board에서 사라짐\n board[i][nth - 1] = 0\n break\n\n# print(resultList)\nprint(count)\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
from envs import DATASET_FOLDER
from os.path import join
import json
import collections
from tqdm import tqdm
def add_space(context_list):
space_context = []
for idx, context in enumerate(context_list):
space_sent_list = []
sent_list = context[1]
if idx == 0:
for sent_idx, sent in enumerate(sent_list):
sent = sent.replace(' .', '.')
sent = sent.replace(' ,', ',')
sent = sent.strip()
if sent_idx == 0:
space_sent_list.append(sent.strip())
else:
space_sent_list.append(' ' + sent)
else:
for sent_idx, sent in enumerate(sent_list):
sent = sent.replace(' .', '.')
sent = sent.replace(' ,', ',')
sent = sent.strip()
space_sent_list.append(' ' + sent)
space_context.append([context[0], space_sent_list])
return space_context
def find_answer(answer, sents):
for s_idx, sent in enumerate(sents):
if answer in sent:
return s_idx
return -1
def find_in_answer_context(answer, context):
founds = []
for ctx_idx, ctx in enumerate(context):
ans_idx = find_answer(answer=answer, sents=ctx[1])
if ans_idx >= 0:
founds.append(1)
# if ctx_idx == 0:
# print('{} : {}: {}'.format(ctx_idx, ans_idx, len(ctx[1])))
else:
founds.append(0)
ans_found_idx = -1
assert sum(founds) <= 2
if sum(founds) > 0:
if founds[0] == 1:
ans_found_idx = 0
else:
ans_found_idx = 1
return ans_found_idx
def fintuner_in_answer_context(answer, context, supporting_facts):
ans_idx = find_answer(answer=answer, sents=context[0][1])
support_facts = set([(x[0], x[1]) for x in supporting_facts])
if ans_idx > 0 and len(support_facts) > 1:
# if (context[0][0], ans_idx) not in support_facts:
# print(ans_idx, len(context[0][1]))
# print(supporting_facts)
return True
return False
def docred_refiner():
DOCRED_OUTPUT_PROCESSED_para_file = join(DATASET_FOLDER, 'data_processed/docred/docred_multihop_para.json')
DOCRED_OUTPUT_PROCESSED_raw_file = join(DATASET_FOLDER,
'data_raw/converted_docred_total.json') # converted_docred_total.json
REFINEd_DOCRED_OUTPUT_PROCESSED = join(DATASET_FOLDER, 'data_raw/refined_converted_docred_total.json')
with open(DOCRED_OUTPUT_PROCESSED_raw_file, 'r', encoding='utf-8') as reader:
raw_data = json.load(reader)
with open(DOCRED_OUTPUT_PROCESSED_para_file, 'r', encoding='utf-8') as reader:
para_data = json.load(reader)
print('loading {} data from {}'.format(len(raw_data), DOCRED_OUTPUT_PROCESSED_raw_file))
examples = []
answer_position = []
answer_not_found = []
no_answer_found = 0
first_one_sent = 0
title_dict = {}
tunable_count = 0
for case in tqdm(raw_data):
# print(case)
key = case['_id']
answer = case['answer']
context = case['context']
support_facts = case['supporting_facts']
title = context[0][0][:-2].strip()
if title not in title_dict:
title_dict[title] = 1
else:
title_dict[title] = title_dict[title] + 1
fine_tune_flag = fintuner_in_answer_context(answer=answer, supporting_facts=support_facts, context=context)
if fine_tune_flag:
tunable_count = tunable_count + 1
ans_find_idx = find_in_answer_context(answer=answer, context=context)
if ans_find_idx >= 0:
answer_position.append(ans_find_idx)
else:
no_answer_found = no_answer_found + 1
if ans_find_idx == 0 and len(context[0][1]) > 1:
first_one_sent = first_one_sent + 1
# for ctx_idx, ctx in enumerate(context):
# is_answer_found = find_answer(answer=answer, sents=ctx[1])
# if is_answer_found:
# answer_position.append(ctx_idx)
# break
# else:
# continue
# for key_name, key_value in case.items():
# if key_name != 'context':
# print('{}: {}'.format(key_name, key_value))
# else:
# for ctx_idx, ctx in enumerate(key_value):
# print('{}: {}'.format(ctx_idx + 1, ctx))
# context = case['context']
# space_context = add_space(context_list=context)
# case['context'] = space_context
# examples.append(case)
# print(context)
# print('-' * 50)
# print(add_space(context_list=context))
# print('*' * 100)
print(len(raw_data))
print(len(answer_position))
print(sum(answer_position))
print('no answer found = {}'.format(no_answer_found))
print('first one sent = {}'.format(first_one_sent))
print('tunable count = {}'.format(tunable_count))
print('title number = {}'.format(len(title_dict)))
# sorted_title_dict = sorted(title_dict.items(), key=lambda kv: kv[1])
# for key, value in sorted_title_dict:
# print('{}: {}'.format(key, value))
def docred_checker():
DOCRED_OUTPUT_PROCESSED_para_file = join(DATASET_FOLDER, 'data_processed/docred/docred_multihop_para.json')
DOCRED_OUTPUT_PROCESSED_raw_file = join(DATASET_FOLDER, 'data_raw/converted_docred_total.json') #converted_docred_total.json
# Saved_raw_DOCRED_OUTPUT_PROCESSED = join(DATASET_FOLDER, 'data_raw/space_converted_docred_total.json')
with open(DOCRED_OUTPUT_PROCESSED_raw_file, 'r', encoding='utf-8') as reader:
raw_data = json.load(reader)
with open(DOCRED_OUTPUT_PROCESSED_para_file, 'r', encoding='utf-8') as reader:
para_data = json.load(reader)
print('loading {} data from {}'.format(len(raw_data), DOCRED_OUTPUT_PROCESSED_raw_file))
examples = []
for case in tqdm(raw_data):
# print(case)
key = case['_id']
for key_name, key_value in case.items():
if key_name != 'context':
print('{}: {}'.format(key_name, key_value))
else:
for ctx_idx, ctx in enumerate(key_value):
print('{}: {}'.format(ctx_idx + 1, ctx))
# context = case['context']
# space_context = add_space(context_list=context)
# case['context'] = space_context
# examples.append(case)
# print(context)
# print('-' * 50)
# print(add_space(context_list=context))
print('*' * 100)
# print('key {}'.format(key))
# print(para_data[key])
# json.dump(examples, open(Saved_raw_DOCRED_OUTPUT_PROCESSED, 'w'))
|
normal
|
{
"blob_id": "a179d3d2f04a101eaa60b5964c2b1cd77071633f",
"index": 5344,
"step-1": "<mask token>\n\n\ndef find_answer(answer, sents):\n for s_idx, sent in enumerate(sents):\n if answer in sent:\n return s_idx\n return -1\n\n\n<mask token>\n\n\ndef docred_refiner():\n DOCRED_OUTPUT_PROCESSED_para_file = join(DATASET_FOLDER,\n 'data_processed/docred/docred_multihop_para.json')\n DOCRED_OUTPUT_PROCESSED_raw_file = join(DATASET_FOLDER,\n 'data_raw/converted_docred_total.json')\n REFINEd_DOCRED_OUTPUT_PROCESSED = join(DATASET_FOLDER,\n 'data_raw/refined_converted_docred_total.json')\n with open(DOCRED_OUTPUT_PROCESSED_raw_file, 'r', encoding='utf-8'\n ) as reader:\n raw_data = json.load(reader)\n with open(DOCRED_OUTPUT_PROCESSED_para_file, 'r', encoding='utf-8'\n ) as reader:\n para_data = json.load(reader)\n print('loading {} data from {}'.format(len(raw_data),\n DOCRED_OUTPUT_PROCESSED_raw_file))\n examples = []\n answer_position = []\n answer_not_found = []\n no_answer_found = 0\n first_one_sent = 0\n title_dict = {}\n tunable_count = 0\n for case in tqdm(raw_data):\n key = case['_id']\n answer = case['answer']\n context = case['context']\n support_facts = case['supporting_facts']\n title = context[0][0][:-2].strip()\n if title not in title_dict:\n title_dict[title] = 1\n else:\n title_dict[title] = title_dict[title] + 1\n fine_tune_flag = fintuner_in_answer_context(answer=answer,\n supporting_facts=support_facts, context=context)\n if fine_tune_flag:\n tunable_count = tunable_count + 1\n ans_find_idx = find_in_answer_context(answer=answer, context=context)\n if ans_find_idx >= 0:\n answer_position.append(ans_find_idx)\n else:\n no_answer_found = no_answer_found + 1\n if ans_find_idx == 0 and len(context[0][1]) > 1:\n first_one_sent = first_one_sent + 1\n print(len(raw_data))\n print(len(answer_position))\n print(sum(answer_position))\n print('no answer found = {}'.format(no_answer_found))\n print('first one sent = {}'.format(first_one_sent))\n print('tunable count = {}'.format(tunable_count))\n print('title number = {}'.format(len(title_dict)))\n\n\ndef docred_checker():\n DOCRED_OUTPUT_PROCESSED_para_file = join(DATASET_FOLDER,\n 'data_processed/docred/docred_multihop_para.json')\n DOCRED_OUTPUT_PROCESSED_raw_file = join(DATASET_FOLDER,\n 'data_raw/converted_docred_total.json')\n with open(DOCRED_OUTPUT_PROCESSED_raw_file, 'r', encoding='utf-8'\n ) as reader:\n raw_data = json.load(reader)\n with open(DOCRED_OUTPUT_PROCESSED_para_file, 'r', encoding='utf-8'\n ) as reader:\n para_data = json.load(reader)\n print('loading {} data from {}'.format(len(raw_data),\n DOCRED_OUTPUT_PROCESSED_raw_file))\n examples = []\n for case in tqdm(raw_data):\n key = case['_id']\n for key_name, key_value in case.items():\n if key_name != 'context':\n print('{}: {}'.format(key_name, key_value))\n else:\n for ctx_idx, ctx in enumerate(key_value):\n print('{}: {}'.format(ctx_idx + 1, ctx))\n print('*' * 100)\n",
"step-2": "<mask token>\n\n\ndef add_space(context_list):\n space_context = []\n for idx, context in enumerate(context_list):\n space_sent_list = []\n sent_list = context[1]\n if idx == 0:\n for sent_idx, sent in enumerate(sent_list):\n sent = sent.replace(' .', '.')\n sent = sent.replace(' ,', ',')\n sent = sent.strip()\n if sent_idx == 0:\n space_sent_list.append(sent.strip())\n else:\n space_sent_list.append(' ' + sent)\n else:\n for sent_idx, sent in enumerate(sent_list):\n sent = sent.replace(' .', '.')\n sent = sent.replace(' ,', ',')\n sent = sent.strip()\n space_sent_list.append(' ' + sent)\n space_context.append([context[0], space_sent_list])\n return space_context\n\n\ndef find_answer(answer, sents):\n for s_idx, sent in enumerate(sents):\n if answer in sent:\n return s_idx\n return -1\n\n\n<mask token>\n\n\ndef fintuner_in_answer_context(answer, context, supporting_facts):\n ans_idx = find_answer(answer=answer, sents=context[0][1])\n support_facts = set([(x[0], x[1]) for x in supporting_facts])\n if ans_idx > 0 and len(support_facts) > 1:\n return True\n return False\n\n\ndef docred_refiner():\n DOCRED_OUTPUT_PROCESSED_para_file = join(DATASET_FOLDER,\n 'data_processed/docred/docred_multihop_para.json')\n DOCRED_OUTPUT_PROCESSED_raw_file = join(DATASET_FOLDER,\n 'data_raw/converted_docred_total.json')\n REFINEd_DOCRED_OUTPUT_PROCESSED = join(DATASET_FOLDER,\n 'data_raw/refined_converted_docred_total.json')\n with open(DOCRED_OUTPUT_PROCESSED_raw_file, 'r', encoding='utf-8'\n ) as reader:\n raw_data = json.load(reader)\n with open(DOCRED_OUTPUT_PROCESSED_para_file, 'r', encoding='utf-8'\n ) as reader:\n para_data = json.load(reader)\n print('loading {} data from {}'.format(len(raw_data),\n DOCRED_OUTPUT_PROCESSED_raw_file))\n examples = []\n answer_position = []\n answer_not_found = []\n no_answer_found = 0\n first_one_sent = 0\n title_dict = {}\n tunable_count = 0\n for case in tqdm(raw_data):\n key = case['_id']\n answer = case['answer']\n context = case['context']\n support_facts = case['supporting_facts']\n title = context[0][0][:-2].strip()\n if title not in title_dict:\n title_dict[title] = 1\n else:\n title_dict[title] = title_dict[title] + 1\n fine_tune_flag = fintuner_in_answer_context(answer=answer,\n supporting_facts=support_facts, context=context)\n if fine_tune_flag:\n tunable_count = tunable_count + 1\n ans_find_idx = find_in_answer_context(answer=answer, context=context)\n if ans_find_idx >= 0:\n answer_position.append(ans_find_idx)\n else:\n no_answer_found = no_answer_found + 1\n if ans_find_idx == 0 and len(context[0][1]) > 1:\n first_one_sent = first_one_sent + 1\n print(len(raw_data))\n print(len(answer_position))\n print(sum(answer_position))\n print('no answer found = {}'.format(no_answer_found))\n print('first one sent = {}'.format(first_one_sent))\n print('tunable count = {}'.format(tunable_count))\n print('title number = {}'.format(len(title_dict)))\n\n\ndef docred_checker():\n DOCRED_OUTPUT_PROCESSED_para_file = join(DATASET_FOLDER,\n 'data_processed/docred/docred_multihop_para.json')\n DOCRED_OUTPUT_PROCESSED_raw_file = join(DATASET_FOLDER,\n 'data_raw/converted_docred_total.json')\n with open(DOCRED_OUTPUT_PROCESSED_raw_file, 'r', encoding='utf-8'\n ) as reader:\n raw_data = json.load(reader)\n with open(DOCRED_OUTPUT_PROCESSED_para_file, 'r', encoding='utf-8'\n ) as reader:\n para_data = json.load(reader)\n print('loading {} data from {}'.format(len(raw_data),\n DOCRED_OUTPUT_PROCESSED_raw_file))\n examples = []\n for case in tqdm(raw_data):\n key = case['_id']\n for key_name, key_value in case.items():\n if key_name != 'context':\n print('{}: {}'.format(key_name, key_value))\n else:\n for ctx_idx, ctx in enumerate(key_value):\n print('{}: {}'.format(ctx_idx + 1, ctx))\n print('*' * 100)\n",
"step-3": "<mask token>\n\n\ndef add_space(context_list):\n space_context = []\n for idx, context in enumerate(context_list):\n space_sent_list = []\n sent_list = context[1]\n if idx == 0:\n for sent_idx, sent in enumerate(sent_list):\n sent = sent.replace(' .', '.')\n sent = sent.replace(' ,', ',')\n sent = sent.strip()\n if sent_idx == 0:\n space_sent_list.append(sent.strip())\n else:\n space_sent_list.append(' ' + sent)\n else:\n for sent_idx, sent in enumerate(sent_list):\n sent = sent.replace(' .', '.')\n sent = sent.replace(' ,', ',')\n sent = sent.strip()\n space_sent_list.append(' ' + sent)\n space_context.append([context[0], space_sent_list])\n return space_context\n\n\ndef find_answer(answer, sents):\n for s_idx, sent in enumerate(sents):\n if answer in sent:\n return s_idx\n return -1\n\n\ndef find_in_answer_context(answer, context):\n founds = []\n for ctx_idx, ctx in enumerate(context):\n ans_idx = find_answer(answer=answer, sents=ctx[1])\n if ans_idx >= 0:\n founds.append(1)\n else:\n founds.append(0)\n ans_found_idx = -1\n assert sum(founds) <= 2\n if sum(founds) > 0:\n if founds[0] == 1:\n ans_found_idx = 0\n else:\n ans_found_idx = 1\n return ans_found_idx\n\n\ndef fintuner_in_answer_context(answer, context, supporting_facts):\n ans_idx = find_answer(answer=answer, sents=context[0][1])\n support_facts = set([(x[0], x[1]) for x in supporting_facts])\n if ans_idx > 0 and len(support_facts) > 1:\n return True\n return False\n\n\ndef docred_refiner():\n DOCRED_OUTPUT_PROCESSED_para_file = join(DATASET_FOLDER,\n 'data_processed/docred/docred_multihop_para.json')\n DOCRED_OUTPUT_PROCESSED_raw_file = join(DATASET_FOLDER,\n 'data_raw/converted_docred_total.json')\n REFINEd_DOCRED_OUTPUT_PROCESSED = join(DATASET_FOLDER,\n 'data_raw/refined_converted_docred_total.json')\n with open(DOCRED_OUTPUT_PROCESSED_raw_file, 'r', encoding='utf-8'\n ) as reader:\n raw_data = json.load(reader)\n with open(DOCRED_OUTPUT_PROCESSED_para_file, 'r', encoding='utf-8'\n ) as reader:\n para_data = json.load(reader)\n print('loading {} data from {}'.format(len(raw_data),\n DOCRED_OUTPUT_PROCESSED_raw_file))\n examples = []\n answer_position = []\n answer_not_found = []\n no_answer_found = 0\n first_one_sent = 0\n title_dict = {}\n tunable_count = 0\n for case in tqdm(raw_data):\n key = case['_id']\n answer = case['answer']\n context = case['context']\n support_facts = case['supporting_facts']\n title = context[0][0][:-2].strip()\n if title not in title_dict:\n title_dict[title] = 1\n else:\n title_dict[title] = title_dict[title] + 1\n fine_tune_flag = fintuner_in_answer_context(answer=answer,\n supporting_facts=support_facts, context=context)\n if fine_tune_flag:\n tunable_count = tunable_count + 1\n ans_find_idx = find_in_answer_context(answer=answer, context=context)\n if ans_find_idx >= 0:\n answer_position.append(ans_find_idx)\n else:\n no_answer_found = no_answer_found + 1\n if ans_find_idx == 0 and len(context[0][1]) > 1:\n first_one_sent = first_one_sent + 1\n print(len(raw_data))\n print(len(answer_position))\n print(sum(answer_position))\n print('no answer found = {}'.format(no_answer_found))\n print('first one sent = {}'.format(first_one_sent))\n print('tunable count = {}'.format(tunable_count))\n print('title number = {}'.format(len(title_dict)))\n\n\ndef docred_checker():\n DOCRED_OUTPUT_PROCESSED_para_file = join(DATASET_FOLDER,\n 'data_processed/docred/docred_multihop_para.json')\n DOCRED_OUTPUT_PROCESSED_raw_file = join(DATASET_FOLDER,\n 'data_raw/converted_docred_total.json')\n with open(DOCRED_OUTPUT_PROCESSED_raw_file, 'r', encoding='utf-8'\n ) as reader:\n raw_data = json.load(reader)\n with open(DOCRED_OUTPUT_PROCESSED_para_file, 'r', encoding='utf-8'\n ) as reader:\n para_data = json.load(reader)\n print('loading {} data from {}'.format(len(raw_data),\n DOCRED_OUTPUT_PROCESSED_raw_file))\n examples = []\n for case in tqdm(raw_data):\n key = case['_id']\n for key_name, key_value in case.items():\n if key_name != 'context':\n print('{}: {}'.format(key_name, key_value))\n else:\n for ctx_idx, ctx in enumerate(key_value):\n print('{}: {}'.format(ctx_idx + 1, ctx))\n print('*' * 100)\n",
"step-4": "from envs import DATASET_FOLDER\nfrom os.path import join\nimport json\nimport collections\nfrom tqdm import tqdm\n\n\ndef add_space(context_list):\n space_context = []\n for idx, context in enumerate(context_list):\n space_sent_list = []\n sent_list = context[1]\n if idx == 0:\n for sent_idx, sent in enumerate(sent_list):\n sent = sent.replace(' .', '.')\n sent = sent.replace(' ,', ',')\n sent = sent.strip()\n if sent_idx == 0:\n space_sent_list.append(sent.strip())\n else:\n space_sent_list.append(' ' + sent)\n else:\n for sent_idx, sent in enumerate(sent_list):\n sent = sent.replace(' .', '.')\n sent = sent.replace(' ,', ',')\n sent = sent.strip()\n space_sent_list.append(' ' + sent)\n space_context.append([context[0], space_sent_list])\n return space_context\n\n\ndef find_answer(answer, sents):\n for s_idx, sent in enumerate(sents):\n if answer in sent:\n return s_idx\n return -1\n\n\ndef find_in_answer_context(answer, context):\n founds = []\n for ctx_idx, ctx in enumerate(context):\n ans_idx = find_answer(answer=answer, sents=ctx[1])\n if ans_idx >= 0:\n founds.append(1)\n else:\n founds.append(0)\n ans_found_idx = -1\n assert sum(founds) <= 2\n if sum(founds) > 0:\n if founds[0] == 1:\n ans_found_idx = 0\n else:\n ans_found_idx = 1\n return ans_found_idx\n\n\ndef fintuner_in_answer_context(answer, context, supporting_facts):\n ans_idx = find_answer(answer=answer, sents=context[0][1])\n support_facts = set([(x[0], x[1]) for x in supporting_facts])\n if ans_idx > 0 and len(support_facts) > 1:\n return True\n return False\n\n\ndef docred_refiner():\n DOCRED_OUTPUT_PROCESSED_para_file = join(DATASET_FOLDER,\n 'data_processed/docred/docred_multihop_para.json')\n DOCRED_OUTPUT_PROCESSED_raw_file = join(DATASET_FOLDER,\n 'data_raw/converted_docred_total.json')\n REFINEd_DOCRED_OUTPUT_PROCESSED = join(DATASET_FOLDER,\n 'data_raw/refined_converted_docred_total.json')\n with open(DOCRED_OUTPUT_PROCESSED_raw_file, 'r', encoding='utf-8'\n ) as reader:\n raw_data = json.load(reader)\n with open(DOCRED_OUTPUT_PROCESSED_para_file, 'r', encoding='utf-8'\n ) as reader:\n para_data = json.load(reader)\n print('loading {} data from {}'.format(len(raw_data),\n DOCRED_OUTPUT_PROCESSED_raw_file))\n examples = []\n answer_position = []\n answer_not_found = []\n no_answer_found = 0\n first_one_sent = 0\n title_dict = {}\n tunable_count = 0\n for case in tqdm(raw_data):\n key = case['_id']\n answer = case['answer']\n context = case['context']\n support_facts = case['supporting_facts']\n title = context[0][0][:-2].strip()\n if title not in title_dict:\n title_dict[title] = 1\n else:\n title_dict[title] = title_dict[title] + 1\n fine_tune_flag = fintuner_in_answer_context(answer=answer,\n supporting_facts=support_facts, context=context)\n if fine_tune_flag:\n tunable_count = tunable_count + 1\n ans_find_idx = find_in_answer_context(answer=answer, context=context)\n if ans_find_idx >= 0:\n answer_position.append(ans_find_idx)\n else:\n no_answer_found = no_answer_found + 1\n if ans_find_idx == 0 and len(context[0][1]) > 1:\n first_one_sent = first_one_sent + 1\n print(len(raw_data))\n print(len(answer_position))\n print(sum(answer_position))\n print('no answer found = {}'.format(no_answer_found))\n print('first one sent = {}'.format(first_one_sent))\n print('tunable count = {}'.format(tunable_count))\n print('title number = {}'.format(len(title_dict)))\n\n\ndef docred_checker():\n DOCRED_OUTPUT_PROCESSED_para_file = join(DATASET_FOLDER,\n 'data_processed/docred/docred_multihop_para.json')\n DOCRED_OUTPUT_PROCESSED_raw_file = join(DATASET_FOLDER,\n 'data_raw/converted_docred_total.json')\n with open(DOCRED_OUTPUT_PROCESSED_raw_file, 'r', encoding='utf-8'\n ) as reader:\n raw_data = json.load(reader)\n with open(DOCRED_OUTPUT_PROCESSED_para_file, 'r', encoding='utf-8'\n ) as reader:\n para_data = json.load(reader)\n print('loading {} data from {}'.format(len(raw_data),\n DOCRED_OUTPUT_PROCESSED_raw_file))\n examples = []\n for case in tqdm(raw_data):\n key = case['_id']\n for key_name, key_value in case.items():\n if key_name != 'context':\n print('{}: {}'.format(key_name, key_value))\n else:\n for ctx_idx, ctx in enumerate(key_value):\n print('{}: {}'.format(ctx_idx + 1, ctx))\n print('*' * 100)\n",
"step-5": "from envs import DATASET_FOLDER\nfrom os.path import join\nimport json\nimport collections\nfrom tqdm import tqdm\n\ndef add_space(context_list):\n space_context = []\n for idx, context in enumerate(context_list):\n space_sent_list = []\n sent_list = context[1]\n if idx == 0:\n for sent_idx, sent in enumerate(sent_list):\n sent = sent.replace(' .', '.')\n sent = sent.replace(' ,', ',')\n sent = sent.strip()\n if sent_idx == 0:\n space_sent_list.append(sent.strip())\n else:\n space_sent_list.append(' ' + sent)\n else:\n for sent_idx, sent in enumerate(sent_list):\n sent = sent.replace(' .', '.')\n sent = sent.replace(' ,', ',')\n sent = sent.strip()\n space_sent_list.append(' ' + sent)\n space_context.append([context[0], space_sent_list])\n return space_context\n\ndef find_answer(answer, sents):\n for s_idx, sent in enumerate(sents):\n if answer in sent:\n return s_idx\n return -1\n\ndef find_in_answer_context(answer, context):\n founds = []\n for ctx_idx, ctx in enumerate(context):\n ans_idx = find_answer(answer=answer, sents=ctx[1])\n if ans_idx >= 0:\n founds.append(1)\n # if ctx_idx == 0:\n # print('{} : {}: {}'.format(ctx_idx, ans_idx, len(ctx[1])))\n else:\n founds.append(0)\n ans_found_idx = -1\n assert sum(founds) <= 2\n if sum(founds) > 0:\n if founds[0] == 1:\n ans_found_idx = 0\n else:\n ans_found_idx = 1\n return ans_found_idx\n\ndef fintuner_in_answer_context(answer, context, supporting_facts):\n ans_idx = find_answer(answer=answer, sents=context[0][1])\n support_facts = set([(x[0], x[1]) for x in supporting_facts])\n if ans_idx > 0 and len(support_facts) > 1:\n # if (context[0][0], ans_idx) not in support_facts:\n # print(ans_idx, len(context[0][1]))\n # print(supporting_facts)\n return True\n return False\n\ndef docred_refiner():\n DOCRED_OUTPUT_PROCESSED_para_file = join(DATASET_FOLDER, 'data_processed/docred/docred_multihop_para.json')\n DOCRED_OUTPUT_PROCESSED_raw_file = join(DATASET_FOLDER,\n 'data_raw/converted_docred_total.json') # converted_docred_total.json\n REFINEd_DOCRED_OUTPUT_PROCESSED = join(DATASET_FOLDER, 'data_raw/refined_converted_docred_total.json')\n with open(DOCRED_OUTPUT_PROCESSED_raw_file, 'r', encoding='utf-8') as reader:\n raw_data = json.load(reader)\n with open(DOCRED_OUTPUT_PROCESSED_para_file, 'r', encoding='utf-8') as reader:\n para_data = json.load(reader)\n print('loading {} data from {}'.format(len(raw_data), DOCRED_OUTPUT_PROCESSED_raw_file))\n examples = []\n answer_position = []\n answer_not_found = []\n no_answer_found = 0\n first_one_sent = 0\n title_dict = {}\n tunable_count = 0\n for case in tqdm(raw_data):\n # print(case)\n key = case['_id']\n answer = case['answer']\n context = case['context']\n support_facts = case['supporting_facts']\n title = context[0][0][:-2].strip()\n if title not in title_dict:\n title_dict[title] = 1\n else:\n title_dict[title] = title_dict[title] + 1\n\n fine_tune_flag = fintuner_in_answer_context(answer=answer, supporting_facts=support_facts, context=context)\n if fine_tune_flag:\n tunable_count = tunable_count + 1\n ans_find_idx = find_in_answer_context(answer=answer, context=context)\n if ans_find_idx >= 0:\n answer_position.append(ans_find_idx)\n else:\n no_answer_found = no_answer_found + 1\n\n if ans_find_idx == 0 and len(context[0][1]) > 1:\n first_one_sent = first_one_sent + 1\n # for ctx_idx, ctx in enumerate(context):\n # is_answer_found = find_answer(answer=answer, sents=ctx[1])\n # if is_answer_found:\n # answer_position.append(ctx_idx)\n # break\n # else:\n # continue\n # for key_name, key_value in case.items():\n # if key_name != 'context':\n # print('{}: {}'.format(key_name, key_value))\n # else:\n # for ctx_idx, ctx in enumerate(key_value):\n # print('{}: {}'.format(ctx_idx + 1, ctx))\n # context = case['context']\n # space_context = add_space(context_list=context)\n # case['context'] = space_context\n # examples.append(case)\n # print(context)\n # print('-' * 50)\n # print(add_space(context_list=context))\n # print('*' * 100)\n print(len(raw_data))\n print(len(answer_position))\n print(sum(answer_position))\n print('no answer found = {}'.format(no_answer_found))\n print('first one sent = {}'.format(first_one_sent))\n print('tunable count = {}'.format(tunable_count))\n print('title number = {}'.format(len(title_dict)))\n # sorted_title_dict = sorted(title_dict.items(), key=lambda kv: kv[1])\n # for key, value in sorted_title_dict:\n # print('{}: {}'.format(key, value))\n\n\ndef docred_checker():\n DOCRED_OUTPUT_PROCESSED_para_file = join(DATASET_FOLDER, 'data_processed/docred/docred_multihop_para.json')\n DOCRED_OUTPUT_PROCESSED_raw_file = join(DATASET_FOLDER, 'data_raw/converted_docred_total.json') #converted_docred_total.json\n # Saved_raw_DOCRED_OUTPUT_PROCESSED = join(DATASET_FOLDER, 'data_raw/space_converted_docred_total.json')\n with open(DOCRED_OUTPUT_PROCESSED_raw_file, 'r', encoding='utf-8') as reader:\n raw_data = json.load(reader)\n with open(DOCRED_OUTPUT_PROCESSED_para_file, 'r', encoding='utf-8') as reader:\n para_data = json.load(reader)\n print('loading {} data from {}'.format(len(raw_data), DOCRED_OUTPUT_PROCESSED_raw_file))\n examples = []\n for case in tqdm(raw_data):\n # print(case)\n key = case['_id']\n for key_name, key_value in case.items():\n if key_name != 'context':\n print('{}: {}'.format(key_name, key_value))\n else:\n for ctx_idx, ctx in enumerate(key_value):\n print('{}: {}'.format(ctx_idx + 1, ctx))\n # context = case['context']\n # space_context = add_space(context_list=context)\n # case['context'] = space_context\n # examples.append(case)\n # print(context)\n # print('-' * 50)\n # print(add_space(context_list=context))\n print('*' * 100)\n # print('key {}'.format(key))\n # print(para_data[key])\n\n # json.dump(examples, open(Saved_raw_DOCRED_OUTPUT_PROCESSED, 'w'))\n",
"step-ids": [
3,
5,
6,
7,
8
]
}
|
[
3,
5,
6,
7,
8
] |
<|reserved_special_token_0|>
def gen_arbitrary_network(num_eps, ep_label=None, ep_capacity=12500,
num_channels=1, racks_dict=None, topology_type=None):
"""Generates an arbitrary network with num_eps nodes labelled as ep_label.
Note that no edges are formed in this network; it is purely for ep name
indexing purposes when using Demand class. This is useful where want to
use the demand class but not necessarily with a carefully crafted networkx
graph that accurately mimics the network you will use for the demands
Args:
num_eps (int): Number of endpoints in network.
ep_label (str,int,float): Endpoint label (e.g. 'server'). All endpoints will have
ep_label appended to the start of their label (e.g. 'server_0', 'server_1', ...).
ep_capacity (int, float): Byte capacity per end point channel.
num_channels (int, float): Number of channels on each link in network.
racks_dict (dict): Mapping of which end points are in which racks. Keys are
rack ids, values are list of end points. If None, assume there is not
clustering/rack system in the network where have different end points
in different clusters/racks.
Returns:
networkx graph: network object
"""
network = nx.Graph()
network.add_nodes_from([node for node in range(num_eps)])
if ep_label is None:
servers = [str(i) for i in range(num_eps)]
else:
servers = [(ep_label + '_' + str(i)) for i in range(num_eps)]
relabel_mapping = {node: label for node, label in zip(range(num_eps),
servers)}
network = nx.relabel_nodes(network, relabel_mapping)
eps = []
for node in list(network.nodes):
try:
if ep_label in node:
eps.append(node)
except TypeError:
eps.append(node)
network.graph['endpoints'] = eps
max_nw_capacity = num_eps * ep_capacity * num_channels / 2
if topology_type is None:
topology_type = 'arbitrary_endpoints_{}_chancap_{}_channels_{}'.format(
num_eps, ep_capacity, num_channels)
init_global_network_attrs(network, max_nw_capacity, num_channels,
ep_link_capacity=ep_capacity * num_channels, endpoint_label=
ep_label, node_labels=[ep_label], racks_dict=racks_dict,
topology_type=topology_type)
return network
def gen_nsfnet_network(ep_label='server', rack_label='rack', N=0,
num_channels=2, server_to_rack_channel_capacity=1,
rack_to_rack_channel_capacity=10, show_fig=False):
"""Generates the standard 14-node NSFNET topology (a U.S. core network).
Args:
ep_label (str,int,float): Endpoint label (e.g. 'server'). All endpoints will have
ep_label appended to the start of their label (e.g. 'server_0', 'server_1', ...).
N (int): Number of servers per rack. If 0, assume all nodes in nsfnet
are endpoints
num_channels (int,float): Number of channels on each link in network.
server_to_rack_channel_capacity (int,float): Byte capacity per channel
between servers and ToR switch.
rack_to_rack_channel_capacity (int,float): Byte capacity per channel between racks.
show_fig (bool): Whether or not to plot and show fig. If True, will
display fig.
Returns:
networkx graph: network object
"""
channel_names = gen_channel_names(num_channels)
network = nx.Graph()
node_pair_list = [[0, 1], [0, 3], [0, 2], [1, 2], [1, 7], [3, 8], [3, 4
], [3, 6], [4, 5], [4, 5], [5, 2], [5, 13], [5, 12], [6, 7], [7, 10
], [8, 11], [8, 9], [9, 10], [9, 12], [10, 11], [10, 13], [11, 12]]
if N == 0:
label = ep_label
else:
label = rack_label
for idx in range(len(node_pair_list)):
node_pair_list[idx][0] = label + '_' + str(node_pair_list[idx][0])
node_pair_list[idx][1] = label + '_' + str(node_pair_list[idx][1])
for edge in node_pair_list:
network.add_edge(*tuple(edge))
if N == 0:
racks_dict = None
else:
i = 0
racks_dict = {rack: [] for rack in range(14)}
for rack in range(14):
for server in range(N):
racks_dict[rack].append(ep_label + '_' + str(i))
network.add_edge(ep_label + '_' + str(i), rack_label + '_' +
str(rack))
i += 1
channel_names = gen_channel_names(num_channels)
edges = [edge for edge in network.edges]
add_edges_capacity_attrs(network, edges, channel_names,
rack_to_rack_channel_capacity)
network.graph['endpoints'] = get_endpoints(network, ep_label)
max_nw_capacity = len(network.edges
) * num_channels * rack_to_rack_channel_capacity / 2
init_global_network_attrs(network, max_nw_capacity, num_channels,
ep_link_capacity=server_to_rack_channel_capacity * num_channels,
endpoint_label=ep_label, node_labels=[ep_label, rack_label],
topology_type='14_node_nsfnet', racks_dict=racks_dict)
if show_fig:
plot_network(network, show_fig=True)
return network
def gen_simple_network(ep_label='server', num_channels=2,
server_to_rack_channel_capacity=500, show_fig=False):
"""Generates very simple 5-node topology.
Args:
ep_label (str,int,float): Endpoint label (e.g. 'server'). All endpoints will have
ep_label appended to the start of their label (e.g. 'server_0', 'server_1', ...).
num_channels (int,float): Number of channels on each link in network.
channel_capacity (int,float): Byte capacity per channel.
show_fig (bool): Whether or not to plot and show fig. If True, will
display fig.
Returns:
networkx graph: network object
"""
network = nx.Graph()
network.add_nodes_from([node for node in range(5)])
network.add_edges_from([(0, 1), (0, 2), (1, 2), (2, 4), (4, 3), (3, 1)],
weight=1)
servers = [(ep_label + '_' + str(i)) for i in range(5)]
relabel_mapping = {node: label for node, label in zip(range(5), servers)}
network = nx.relabel_nodes(network, relabel_mapping)
channel_names = gen_channel_names(num_channels)
edges = [edge for edge in network.edges]
add_edges_capacity_attrs(network, edges, channel_names,
server_to_rack_channel_capacity)
network.graph['endpoints'] = get_endpoints(network, ep_label)
max_nw_capacity = len(network.edges
) * num_channels * server_to_rack_channel_capacity / 2
init_global_network_attrs(network, max_nw_capacity, num_channels,
ep_link_capacity=server_to_rack_channel_capacity * num_channels,
endpoint_label=ep_label, node_labels=[ep_label], topology_type=
'5_node_simple_network')
if show_fig:
plot_network(network, show_fig=True)
return network
def get_endpoints(network, ep_label):
"""Gets list of endpoints of network.
Args:
network (networkx graph): Networkx object.
ep_label (str,int,float): Endpoint label (e.g. 'server'). All endpoints will have
ep_label appended to the start of their label (e.g. 'server_0', 'server_1', ...).
Returns:
eps (list): List of endpoints.
"""
eps = []
for node in list(network.nodes):
if ep_label in node:
eps.append(node)
return eps
def gen_fat_tree(k=4, L=2, n=4, ep_label='server', rack_label='rack',
edge_label='edge', aggregate_label='agg', core_label='core',
num_channels=2, server_to_rack_channel_capacity=500,
rack_to_edge_channel_capacity=1000, edge_to_agg_channel_capacity=1000,
agg_to_core_channel_capacity=2000, rack_to_core_channel_capacity=2000,
show_fig=False):
"""Generates a perfect fat tree (i.e. all layers have switches with same radix/number of ports).
Top layer is always core (spine) switch layer, bottom layer is always
ToR (leaf) layer.
L must be either 2 (core, ToR) or 4 (core, agg, edge, ToR)
N.B. L=2 is commonly referred to as '2-layer Clos' or 'Clos' or 'spine-leaf' topology
Resource for building (scroll down to summary table with equations):
https://packetpushers.net/demystifying-dcn-topologies-clos-fat-trees-part2/
Another good resource for data centre topologies etc. in general:
https://www.oreilly.com/library/view/bgp-in-the/9781491983416/ch01.html#:~:text=The%20most%20common%20routing%20protocol,single%20data%20center%2C%20as%20well.
Parameters of network:
- number of core (spine) switches = (k/2)^(L/2) (top layer)
- number of edge switches (if L=4) = (k^2)/2
- number of agg switches (if L=4) = (k^2)/2
- number of pods (if L=4) (pod is a group of agg and/or edge switches) = 2*(k/2)^(L-2)
- number of ToR (leaf) switches (racks) = 2*(k/2)^(L-1) (bottom layer)
- number of server-facing ToR 'host' ports = 2*(k/2)^2 (can have multiple servers connected to same host port, & can oversubscribe)
- number of servers = number ToR switches * n
Args:
k (int): Number of ports (links) on each switch (both up and down).
L (int): Number of layers in the fat tree.
n (int): Number of server per rack.
ep_label (str,int,float): Endpoint label (e.g. 'server'). All endpoints will have
ep_label appended to the start of their label (e.g. 'server_0', 'server_1', ...).
edge_label (str,int): Label to assign to edge switch nodes
aggregate_label (str,int): Label to assign to edge switch nodes
core_label (str,int): Label to assign to core switch nodes
num_channels (int, float): Number of channels on each link in network
server_to_edge_channel_capacity (int,float): Byte capacity per channel
edge_to_agg_channel_capacity (int,float): (if L==4) Byte capacity per channel
agg_to_core_channel_capacity (int,float): (if L==4) Byte capacity per channel
rack_to_core_channel_capacity (int,float): (if L==2) Byte capacity per channel
Returns:
networkx graph: network object
"""
if L != 2 and L != 4:
raise Exception(
'L must be 2 (ToR layer, core layer) or 4 (ToR layer, edge layer, agg layer, core layer), but is {}.'
.format(L))
if k % 2 != 0:
raise Exception(
'k must be even since, in perfect fat tree, have equal number of up and down ports on each switch, but is {}.'
.format(k))
channel_names = gen_channel_names(num_channels)
if L == 2:
node_labels = [ep_label, rack_label, core_label]
else:
node_labels = [ep_label, rack_label, edge_label, aggregate_label,
core_label]
num_cores = int((k / 2) ** (L / 2))
num_aggs = int(k ** 2 / 2)
num_edges = int(k ** 2 / 2)
num_pods = int(2 * (k / 2) ** (L - 2))
num_racks = int(2 * (k / 2) ** (L - 1))
num_servers = int(num_racks * n)
cores = [(core_label + '_' + str(i)) for i in range(num_cores)]
aggs = [(aggregate_label + '_' + str(i)) for i in range(num_aggs)]
edges = [(edge_label + '_' + str(i)) for i in range(num_edges)]
racks = [(rack_label + '_' + str(i)) for i in range(num_racks)]
servers = [(ep_label + '_' + str(i)) for i in range(num_servers)]
core_layer = nx.Graph()
rack_layer = nx.Graph()
core_layer.add_nodes_from(cores)
rack_layer.add_nodes_from(racks)
fat_tree_network = nx.compose(core_layer, rack_layer)
if L == 2:
rack_iterator = iter(racks)
for rack in racks:
core_iterator = iter(cores)
for up_port in range(int(k / 2)):
core = next(core_iterator)
fat_tree_network.add_edge(rack, core)
add_edge_capacity_attrs(fat_tree_network, (rack, core),
channel_names, rack_to_core_channel_capacity)
else:
num_pods = int(k)
pods = [[] for i in range(num_pods)]
prev_iter = 0
for pod_iter in range(len(pods)):
curr_iter = int(prev_iter + k / 2)
pods[pod_iter].append(edges[prev_iter:curr_iter])
pods[pod_iter].append(aggs[prev_iter:curr_iter])
prev_iter = curr_iter
pod_labels = [('pod_' + str(i)) for i in range(num_pods)]
pods_dict = {tuple([pod]): nx.Graph() for pod in pod_labels}
for pod_iter in range(num_pods):
key = 'pod_' + str(pod_iter),
pod_edges = pods[pod_iter][0]
pod_aggs = pods[pod_iter][1]
pods_dict[key].add_nodes_from(pod_edges)
pods_dict[key].add_nodes_from(pod_aggs)
for pod_edge in pod_edges:
for pod_agg in pod_aggs:
pods_dict[key].add_edge(pod_agg, pod_edge)
add_edge_capacity_attrs(pods_dict[key], (pod_agg,
pod_edge), channel_names, edge_to_agg_channel_capacity)
pod_networks = list(pods_dict.values())
for pod_iter in range(num_pods):
fat_tree_network = nx.compose(fat_tree_network, pod_networks[
pod_iter])
for pod_iter in range(num_pods):
pod_aggs = pods[pod_iter][1]
core_iterator = iter(cores)
for pod_agg in pod_aggs:
while fat_tree_network.degree[pod_agg] < k:
core = next(core_iterator)
fat_tree_network.add_edge(core, pod_agg)
add_edge_capacity_attrs(fat_tree_network, (core,
pod_agg), channel_names, agg_to_core_channel_capacity)
rack_iterator = iter(racks)
for pod_iter in range(num_pods):
pod_edges = pods[pod_iter][0]
for pod_edge in pod_edges:
while fat_tree_network.degree[pod_edge] < k:
rack = next(rack_iterator)
fat_tree_network.add_edge(pod_edge, rack)
add_edge_capacity_attrs(fat_tree_network, (pod_edge,
rack), channel_names, rack_to_edge_channel_capacity)
racks_dict = {rack: [] for rack in racks}
server_iterator = iter(servers)
for rack in racks:
for _ in range(n):
server = next(server_iterator)
fat_tree_network.add_edge(rack, server)
add_edge_capacity_attrs(fat_tree_network, (rack, server),
channel_names, server_to_rack_channel_capacity)
racks_dict[rack].append(server)
max_nw_capacity = (num_servers * num_channels *
server_to_rack_channel_capacity / 2)
fat_tree_network.graph['endpoints'] = servers
init_global_network_attrs(fat_tree_network, max_nw_capacity,
num_channels, ep_link_capacity=server_to_rack_channel_capacity *
num_channels, endpoint_label=ep_label, node_labels=node_labels,
topology_type='fat_tree', racks_dict=racks_dict)
if show_fig:
plot_network(fat_tree_network, show_fig=True)
return fat_tree_network
def init_global_network_attrs(network, max_nw_capacity, num_channels,
ep_link_capacity, endpoint_label='server', topology_type='unknown',
node_labels=['server'], racks_dict=None):
"""Initialises the standard global network attributes of a given network.
Args:
network (obj): NetworkX object.
max_nw_capacity (int/float): Maximum rate at which info can be reliably
transmitted over the network (sum of all link capacities).
num_channels (int): Number of channels on each link in network.
topology_type (str): Label of network topology (e.g. 'fat_tree').
node_labels (list): Label classes assigned to network nodes
(e.g. ['server', 'rack', 'edge']).
racks_dict (dict): Which servers/endpoints are in which rack. If None,
assume do not have rack system where have multiple servers in one
rack.
"""
network.graph['endpoint_label'] = endpoint_label
network.graph['num_channels_per_link'] = num_channels
network.graph['ep_link_capacity'] = ep_link_capacity
network.graph['ep_link_port_capacity'] = ep_link_capacity / 2
network.graph['max_nw_capacity'] = max_nw_capacity
network.graph['curr_nw_capacity_used'] = 0
network.graph['num_active_connections'] = 0
network.graph['total_connections_blocked'] = 0
network.graph['node_labels'] = node_labels
network.graph['topology_type'] = topology_type
network.graph['channel_names'] = gen_channel_names(num_channels)
if racks_dict is not None:
_racks_dict = {}
for key, val in racks_dict.items():
_racks_dict[str(key)] = []
for v in val:
_racks_dict[str(key)].append(str(v))
network.graph['rack_to_ep_dict'] = _racks_dict
else:
network.graph['rack_to_ep_dict'] = None
if racks_dict is not None:
ep_to_rack_dict = {}
for key, val in _racks_dict.items():
for v in val:
if v not in ep_to_rack_dict.keys():
ep_to_rack_dict[v] = key
network.graph['ep_to_rack_dict'] = ep_to_rack_dict
else:
network.graph['ep_to_rack_dict'] = None
<|reserved_special_token_0|>
def add_edge_capacity_attrs(network, edge, channel_names, channel_capacity,
bidirectional_links=True):
"""Adds channels and corresponding max channel bytes to single edge in network.
Args:
network (networkx graph): Network containing edges to whiich attrs will
be added.
edge (tuple): Node-node edge pair.
channel_names (list): List of channel names to add to edge.
channel_capacity (int,float): Capacity to allocate to each channel.
bidirectional_links (bool): If True, each link has capacity split equally
between src and dst port. I.e. all links have a src and dst port
which are treated separately to incoming and outgoing traffic to and
from given node (switch or server).
"""
if bidirectional_links:
attrs = {edge: {'{}_to_{}_port'.format(edge[0], edge[1]): {
'channels': {channel: (channel_capacity / 2) for channel in
channel_names}, 'max_channel_capacity': channel_capacity / 2},
'{}_to_{}_port'.format(edge[1], edge[0]): {'channels': {channel:
(channel_capacity / 2) for channel in channel_names},
'max_channel_capacity': channel_capacity / 2}}}
else:
attrs = {edge: {'channels': {channel: channel_capacity for channel in
channel_names}, 'max_channel_capacity': channel_capacity}}
nx.set_edge_attributes(network, attrs)
def add_edges_capacity_attrs(network, edges, channel_names,
channel_capacity, bidirectional_links=True):
"""Adds channels & max channel capacitys to single edge in network.
To access e.g. the edge going from node 0 to node 1 (edge (0, 1)), you
would index the network with network[0][1]
To access e.g. the channel_1 attribute of this particular (0, 1) edge, you
would do network[0][1]['channels']['channel_1']
OR
if bidirectional_links, you do network[0][1]['0_to_1_port']['channels']['channel_1']
or network[0][1]['1_to_0_port']['channels']['channel_1] depending on which direction
of the link you want to access.
Args:
network (networkx graph): Network containing edges to which attrs will
be added.
edges (list): List of node pairs in tuples.
channel_names (list of str): List of channel names to add to edge.
channel_capacity (int, float): Capacity to allocate to each channel.
bidirectional_links (bool): If True, each link has capacity split equally
between src and dst port. I.e. all links have a src and dst port
which are treated separately to incoming and outgoing traffic to and
from given node (switch or server).
"""
if bidirectional_links:
attrs = {edge: {'{}_to_{}_port'.format(edge[0], edge[1]): {
'channels': {channel: (channel_capacity / 2) for channel in
channel_names}, 'max_channel_capacity': channel_capacity / 2},
'{}_to_{}_port'.format(edge[1], edge[0]): {'channels': {channel:
(channel_capacity / 2) for channel in channel_names},
'max_channel_capacity': channel_capacity / 2}} for edge in edges}
else:
attrs = {edge: {'channels': {channel: channel_capacity for channel in
channel_names}, 'max_channel_capacity': channel_capacity} for
edge in edges}
nx.set_edge_attributes(network, attrs)
def get_node_type_dict(network, node_types=[]):
"""Gets dict where keys are node types, values are list of nodes for each node type in graph."""
network_nodes = []
for network_node in network.nodes:
network_nodes.append(network_node)
network_nodes_dict = {node_type: [] for node_type in node_types}
for n in network_nodes:
for node_type in node_types:
if node_type in n:
network_nodes_dict[node_type].append(n)
else:
pass
return network_nodes_dict
def get_fat_tree_positions(net, width_scale=500, height_scale=10):
"""Gets networkx positions of nodes in fat tree network for plotting."""
pos = {}
node_type_dict = get_node_type_dict(net, net.graph['node_labels'])
node_types = list(node_type_dict.keys())
heights = {}
widths = {}
h = iter([1, 2, 3, 4, 5])
for node_type in node_types:
heights[node_type] = next(h)
widths[node_type] = 1 / (len(node_type_dict[node_type]) + 1)
idx = 0
for node in node_type_dict[node_type]:
pos[node] = (idx + 1) * widths[node_type] * width_scale, heights[
node_type] * height_scale
idx += 1
return pos
<|reserved_special_token_0|>
def plot_network(network, draw_node_labels=True, ep_label='server',
network_node_size=2000, font_size=30, linewidths=1, fig_scale=2,
path_to_save=None, show_fig=False):
"""Plots networkx graph.
Recognises special fat tree network and applies appropriate node positioning,
labelling, colouring etc.
Args:
network (networkx graph): Network object to be plotted.
draw_node_labels (bool): Whether or not to draw node labels on plot.
ep_label (str,int,float): Endpoint label (e.g. 'server'). All endpoints will have
ep_label appended to the start of their label (e.g. 'server_0', 'server_1', ...).
network_node_size (int,float): Size of plotted nodes.
font_size (int,float): Size of of font of plotted labels etc.
linewidths (int,float): Width of edges in network.
fig_scale (int,float): Scaling factor to apply to plotted network.
path_to_save (str): Path to directory (with file name included) in which
to save generated plot. E.g. path_to_save='data/my_plot'
show_fig (bool): Whether or not to plot and show fig. If True, will
return and display fig.
Returns:
matplotlib.figure.Figure: node distribution plotted as a 2d matrix.
"""
net_node_positions = init_network_node_positions(copy.deepcopy(network))
fig = plt.figure(figsize=[15 * fig_scale, 15 * fig_scale])
pos = {}
network_nodes = []
network_nodes_dict = get_node_type_dict(network, network.graph[
'node_labels'])
for nodes in list(network_nodes_dict.values()):
for network_node in nodes:
pos[network_node] = net_node_positions[network_node]
node_colours = iter(['#25c44d', '#36a0c7', '#e8b017', '#6115a3', '#160e63']
)
for node_type in network.graph['node_labels']:
nx.draw_networkx_nodes(network, pos, nodelist=network_nodes_dict[
node_type], node_size=network_node_size, node_color=next(
node_colours), linewidths=linewidths, label=node_type)
if draw_node_labels:
nx.draw_networkx_labels(network, pos, font_size=font_size,
font_color='k', font_family='sans-serif', font_weight='normal',
alpha=1.0)
fibre_links = list(network.edges)
nx.draw_networkx_edges(network, pos, edgelist=fibre_links, edge_color=
'k', width=3, label='Fibre link')
if path_to_save is not None:
tools.pickle_data(path_to_save, fig)
if show_fig:
plt.show()
return fig
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def gen_arbitrary_network(num_eps, ep_label=None, ep_capacity=12500,
num_channels=1, racks_dict=None, topology_type=None):
"""Generates an arbitrary network with num_eps nodes labelled as ep_label.
Note that no edges are formed in this network; it is purely for ep name
indexing purposes when using Demand class. This is useful where want to
use the demand class but not necessarily with a carefully crafted networkx
graph that accurately mimics the network you will use for the demands
Args:
num_eps (int): Number of endpoints in network.
ep_label (str,int,float): Endpoint label (e.g. 'server'). All endpoints will have
ep_label appended to the start of their label (e.g. 'server_0', 'server_1', ...).
ep_capacity (int, float): Byte capacity per end point channel.
num_channels (int, float): Number of channels on each link in network.
racks_dict (dict): Mapping of which end points are in which racks. Keys are
rack ids, values are list of end points. If None, assume there is not
clustering/rack system in the network where have different end points
in different clusters/racks.
Returns:
networkx graph: network object
"""
network = nx.Graph()
network.add_nodes_from([node for node in range(num_eps)])
if ep_label is None:
servers = [str(i) for i in range(num_eps)]
else:
servers = [(ep_label + '_' + str(i)) for i in range(num_eps)]
relabel_mapping = {node: label for node, label in zip(range(num_eps),
servers)}
network = nx.relabel_nodes(network, relabel_mapping)
eps = []
for node in list(network.nodes):
try:
if ep_label in node:
eps.append(node)
except TypeError:
eps.append(node)
network.graph['endpoints'] = eps
max_nw_capacity = num_eps * ep_capacity * num_channels / 2
if topology_type is None:
topology_type = 'arbitrary_endpoints_{}_chancap_{}_channels_{}'.format(
num_eps, ep_capacity, num_channels)
init_global_network_attrs(network, max_nw_capacity, num_channels,
ep_link_capacity=ep_capacity * num_channels, endpoint_label=
ep_label, node_labels=[ep_label], racks_dict=racks_dict,
topology_type=topology_type)
return network
def gen_nsfnet_network(ep_label='server', rack_label='rack', N=0,
num_channels=2, server_to_rack_channel_capacity=1,
rack_to_rack_channel_capacity=10, show_fig=False):
"""Generates the standard 14-node NSFNET topology (a U.S. core network).
Args:
ep_label (str,int,float): Endpoint label (e.g. 'server'). All endpoints will have
ep_label appended to the start of their label (e.g. 'server_0', 'server_1', ...).
N (int): Number of servers per rack. If 0, assume all nodes in nsfnet
are endpoints
num_channels (int,float): Number of channels on each link in network.
server_to_rack_channel_capacity (int,float): Byte capacity per channel
between servers and ToR switch.
rack_to_rack_channel_capacity (int,float): Byte capacity per channel between racks.
show_fig (bool): Whether or not to plot and show fig. If True, will
display fig.
Returns:
networkx graph: network object
"""
channel_names = gen_channel_names(num_channels)
network = nx.Graph()
node_pair_list = [[0, 1], [0, 3], [0, 2], [1, 2], [1, 7], [3, 8], [3, 4
], [3, 6], [4, 5], [4, 5], [5, 2], [5, 13], [5, 12], [6, 7], [7, 10
], [8, 11], [8, 9], [9, 10], [9, 12], [10, 11], [10, 13], [11, 12]]
if N == 0:
label = ep_label
else:
label = rack_label
for idx in range(len(node_pair_list)):
node_pair_list[idx][0] = label + '_' + str(node_pair_list[idx][0])
node_pair_list[idx][1] = label + '_' + str(node_pair_list[idx][1])
for edge in node_pair_list:
network.add_edge(*tuple(edge))
if N == 0:
racks_dict = None
else:
i = 0
racks_dict = {rack: [] for rack in range(14)}
for rack in range(14):
for server in range(N):
racks_dict[rack].append(ep_label + '_' + str(i))
network.add_edge(ep_label + '_' + str(i), rack_label + '_' +
str(rack))
i += 1
channel_names = gen_channel_names(num_channels)
edges = [edge for edge in network.edges]
add_edges_capacity_attrs(network, edges, channel_names,
rack_to_rack_channel_capacity)
network.graph['endpoints'] = get_endpoints(network, ep_label)
max_nw_capacity = len(network.edges
) * num_channels * rack_to_rack_channel_capacity / 2
init_global_network_attrs(network, max_nw_capacity, num_channels,
ep_link_capacity=server_to_rack_channel_capacity * num_channels,
endpoint_label=ep_label, node_labels=[ep_label, rack_label],
topology_type='14_node_nsfnet', racks_dict=racks_dict)
if show_fig:
plot_network(network, show_fig=True)
return network
def gen_simple_network(ep_label='server', num_channels=2,
server_to_rack_channel_capacity=500, show_fig=False):
"""Generates very simple 5-node topology.
Args:
ep_label (str,int,float): Endpoint label (e.g. 'server'). All endpoints will have
ep_label appended to the start of their label (e.g. 'server_0', 'server_1', ...).
num_channels (int,float): Number of channels on each link in network.
channel_capacity (int,float): Byte capacity per channel.
show_fig (bool): Whether or not to plot and show fig. If True, will
display fig.
Returns:
networkx graph: network object
"""
network = nx.Graph()
network.add_nodes_from([node for node in range(5)])
network.add_edges_from([(0, 1), (0, 2), (1, 2), (2, 4), (4, 3), (3, 1)],
weight=1)
servers = [(ep_label + '_' + str(i)) for i in range(5)]
relabel_mapping = {node: label for node, label in zip(range(5), servers)}
network = nx.relabel_nodes(network, relabel_mapping)
channel_names = gen_channel_names(num_channels)
edges = [edge for edge in network.edges]
add_edges_capacity_attrs(network, edges, channel_names,
server_to_rack_channel_capacity)
network.graph['endpoints'] = get_endpoints(network, ep_label)
max_nw_capacity = len(network.edges
) * num_channels * server_to_rack_channel_capacity / 2
init_global_network_attrs(network, max_nw_capacity, num_channels,
ep_link_capacity=server_to_rack_channel_capacity * num_channels,
endpoint_label=ep_label, node_labels=[ep_label], topology_type=
'5_node_simple_network')
if show_fig:
plot_network(network, show_fig=True)
return network
def get_endpoints(network, ep_label):
"""Gets list of endpoints of network.
Args:
network (networkx graph): Networkx object.
ep_label (str,int,float): Endpoint label (e.g. 'server'). All endpoints will have
ep_label appended to the start of their label (e.g. 'server_0', 'server_1', ...).
Returns:
eps (list): List of endpoints.
"""
eps = []
for node in list(network.nodes):
if ep_label in node:
eps.append(node)
return eps
def gen_fat_tree(k=4, L=2, n=4, ep_label='server', rack_label='rack',
edge_label='edge', aggregate_label='agg', core_label='core',
num_channels=2, server_to_rack_channel_capacity=500,
rack_to_edge_channel_capacity=1000, edge_to_agg_channel_capacity=1000,
agg_to_core_channel_capacity=2000, rack_to_core_channel_capacity=2000,
show_fig=False):
"""Generates a perfect fat tree (i.e. all layers have switches with same radix/number of ports).
Top layer is always core (spine) switch layer, bottom layer is always
ToR (leaf) layer.
L must be either 2 (core, ToR) or 4 (core, agg, edge, ToR)
N.B. L=2 is commonly referred to as '2-layer Clos' or 'Clos' or 'spine-leaf' topology
Resource for building (scroll down to summary table with equations):
https://packetpushers.net/demystifying-dcn-topologies-clos-fat-trees-part2/
Another good resource for data centre topologies etc. in general:
https://www.oreilly.com/library/view/bgp-in-the/9781491983416/ch01.html#:~:text=The%20most%20common%20routing%20protocol,single%20data%20center%2C%20as%20well.
Parameters of network:
- number of core (spine) switches = (k/2)^(L/2) (top layer)
- number of edge switches (if L=4) = (k^2)/2
- number of agg switches (if L=4) = (k^2)/2
- number of pods (if L=4) (pod is a group of agg and/or edge switches) = 2*(k/2)^(L-2)
- number of ToR (leaf) switches (racks) = 2*(k/2)^(L-1) (bottom layer)
- number of server-facing ToR 'host' ports = 2*(k/2)^2 (can have multiple servers connected to same host port, & can oversubscribe)
- number of servers = number ToR switches * n
Args:
k (int): Number of ports (links) on each switch (both up and down).
L (int): Number of layers in the fat tree.
n (int): Number of server per rack.
ep_label (str,int,float): Endpoint label (e.g. 'server'). All endpoints will have
ep_label appended to the start of their label (e.g. 'server_0', 'server_1', ...).
edge_label (str,int): Label to assign to edge switch nodes
aggregate_label (str,int): Label to assign to edge switch nodes
core_label (str,int): Label to assign to core switch nodes
num_channels (int, float): Number of channels on each link in network
server_to_edge_channel_capacity (int,float): Byte capacity per channel
edge_to_agg_channel_capacity (int,float): (if L==4) Byte capacity per channel
agg_to_core_channel_capacity (int,float): (if L==4) Byte capacity per channel
rack_to_core_channel_capacity (int,float): (if L==2) Byte capacity per channel
Returns:
networkx graph: network object
"""
if L != 2 and L != 4:
raise Exception(
'L must be 2 (ToR layer, core layer) or 4 (ToR layer, edge layer, agg layer, core layer), but is {}.'
.format(L))
if k % 2 != 0:
raise Exception(
'k must be even since, in perfect fat tree, have equal number of up and down ports on each switch, but is {}.'
.format(k))
channel_names = gen_channel_names(num_channels)
if L == 2:
node_labels = [ep_label, rack_label, core_label]
else:
node_labels = [ep_label, rack_label, edge_label, aggregate_label,
core_label]
num_cores = int((k / 2) ** (L / 2))
num_aggs = int(k ** 2 / 2)
num_edges = int(k ** 2 / 2)
num_pods = int(2 * (k / 2) ** (L - 2))
num_racks = int(2 * (k / 2) ** (L - 1))
num_servers = int(num_racks * n)
cores = [(core_label + '_' + str(i)) for i in range(num_cores)]
aggs = [(aggregate_label + '_' + str(i)) for i in range(num_aggs)]
edges = [(edge_label + '_' + str(i)) for i in range(num_edges)]
racks = [(rack_label + '_' + str(i)) for i in range(num_racks)]
servers = [(ep_label + '_' + str(i)) for i in range(num_servers)]
core_layer = nx.Graph()
rack_layer = nx.Graph()
core_layer.add_nodes_from(cores)
rack_layer.add_nodes_from(racks)
fat_tree_network = nx.compose(core_layer, rack_layer)
if L == 2:
rack_iterator = iter(racks)
for rack in racks:
core_iterator = iter(cores)
for up_port in range(int(k / 2)):
core = next(core_iterator)
fat_tree_network.add_edge(rack, core)
add_edge_capacity_attrs(fat_tree_network, (rack, core),
channel_names, rack_to_core_channel_capacity)
else:
num_pods = int(k)
pods = [[] for i in range(num_pods)]
prev_iter = 0
for pod_iter in range(len(pods)):
curr_iter = int(prev_iter + k / 2)
pods[pod_iter].append(edges[prev_iter:curr_iter])
pods[pod_iter].append(aggs[prev_iter:curr_iter])
prev_iter = curr_iter
pod_labels = [('pod_' + str(i)) for i in range(num_pods)]
pods_dict = {tuple([pod]): nx.Graph() for pod in pod_labels}
for pod_iter in range(num_pods):
key = 'pod_' + str(pod_iter),
pod_edges = pods[pod_iter][0]
pod_aggs = pods[pod_iter][1]
pods_dict[key].add_nodes_from(pod_edges)
pods_dict[key].add_nodes_from(pod_aggs)
for pod_edge in pod_edges:
for pod_agg in pod_aggs:
pods_dict[key].add_edge(pod_agg, pod_edge)
add_edge_capacity_attrs(pods_dict[key], (pod_agg,
pod_edge), channel_names, edge_to_agg_channel_capacity)
pod_networks = list(pods_dict.values())
for pod_iter in range(num_pods):
fat_tree_network = nx.compose(fat_tree_network, pod_networks[
pod_iter])
for pod_iter in range(num_pods):
pod_aggs = pods[pod_iter][1]
core_iterator = iter(cores)
for pod_agg in pod_aggs:
while fat_tree_network.degree[pod_agg] < k:
core = next(core_iterator)
fat_tree_network.add_edge(core, pod_agg)
add_edge_capacity_attrs(fat_tree_network, (core,
pod_agg), channel_names, agg_to_core_channel_capacity)
rack_iterator = iter(racks)
for pod_iter in range(num_pods):
pod_edges = pods[pod_iter][0]
for pod_edge in pod_edges:
while fat_tree_network.degree[pod_edge] < k:
rack = next(rack_iterator)
fat_tree_network.add_edge(pod_edge, rack)
add_edge_capacity_attrs(fat_tree_network, (pod_edge,
rack), channel_names, rack_to_edge_channel_capacity)
racks_dict = {rack: [] for rack in racks}
server_iterator = iter(servers)
for rack in racks:
for _ in range(n):
server = next(server_iterator)
fat_tree_network.add_edge(rack, server)
add_edge_capacity_attrs(fat_tree_network, (rack, server),
channel_names, server_to_rack_channel_capacity)
racks_dict[rack].append(server)
max_nw_capacity = (num_servers * num_channels *
server_to_rack_channel_capacity / 2)
fat_tree_network.graph['endpoints'] = servers
init_global_network_attrs(fat_tree_network, max_nw_capacity,
num_channels, ep_link_capacity=server_to_rack_channel_capacity *
num_channels, endpoint_label=ep_label, node_labels=node_labels,
topology_type='fat_tree', racks_dict=racks_dict)
if show_fig:
plot_network(fat_tree_network, show_fig=True)
return fat_tree_network
def init_global_network_attrs(network, max_nw_capacity, num_channels,
ep_link_capacity, endpoint_label='server', topology_type='unknown',
node_labels=['server'], racks_dict=None):
"""Initialises the standard global network attributes of a given network.
Args:
network (obj): NetworkX object.
max_nw_capacity (int/float): Maximum rate at which info can be reliably
transmitted over the network (sum of all link capacities).
num_channels (int): Number of channels on each link in network.
topology_type (str): Label of network topology (e.g. 'fat_tree').
node_labels (list): Label classes assigned to network nodes
(e.g. ['server', 'rack', 'edge']).
racks_dict (dict): Which servers/endpoints are in which rack. If None,
assume do not have rack system where have multiple servers in one
rack.
"""
network.graph['endpoint_label'] = endpoint_label
network.graph['num_channels_per_link'] = num_channels
network.graph['ep_link_capacity'] = ep_link_capacity
network.graph['ep_link_port_capacity'] = ep_link_capacity / 2
network.graph['max_nw_capacity'] = max_nw_capacity
network.graph['curr_nw_capacity_used'] = 0
network.graph['num_active_connections'] = 0
network.graph['total_connections_blocked'] = 0
network.graph['node_labels'] = node_labels
network.graph['topology_type'] = topology_type
network.graph['channel_names'] = gen_channel_names(num_channels)
if racks_dict is not None:
_racks_dict = {}
for key, val in racks_dict.items():
_racks_dict[str(key)] = []
for v in val:
_racks_dict[str(key)].append(str(v))
network.graph['rack_to_ep_dict'] = _racks_dict
else:
network.graph['rack_to_ep_dict'] = None
if racks_dict is not None:
ep_to_rack_dict = {}
for key, val in _racks_dict.items():
for v in val:
if v not in ep_to_rack_dict.keys():
ep_to_rack_dict[v] = key
network.graph['ep_to_rack_dict'] = ep_to_rack_dict
else:
network.graph['ep_to_rack_dict'] = None
<|reserved_special_token_0|>
def add_edge_capacity_attrs(network, edge, channel_names, channel_capacity,
bidirectional_links=True):
"""Adds channels and corresponding max channel bytes to single edge in network.
Args:
network (networkx graph): Network containing edges to whiich attrs will
be added.
edge (tuple): Node-node edge pair.
channel_names (list): List of channel names to add to edge.
channel_capacity (int,float): Capacity to allocate to each channel.
bidirectional_links (bool): If True, each link has capacity split equally
between src and dst port. I.e. all links have a src and dst port
which are treated separately to incoming and outgoing traffic to and
from given node (switch or server).
"""
if bidirectional_links:
attrs = {edge: {'{}_to_{}_port'.format(edge[0], edge[1]): {
'channels': {channel: (channel_capacity / 2) for channel in
channel_names}, 'max_channel_capacity': channel_capacity / 2},
'{}_to_{}_port'.format(edge[1], edge[0]): {'channels': {channel:
(channel_capacity / 2) for channel in channel_names},
'max_channel_capacity': channel_capacity / 2}}}
else:
attrs = {edge: {'channels': {channel: channel_capacity for channel in
channel_names}, 'max_channel_capacity': channel_capacity}}
nx.set_edge_attributes(network, attrs)
def add_edges_capacity_attrs(network, edges, channel_names,
channel_capacity, bidirectional_links=True):
"""Adds channels & max channel capacitys to single edge in network.
To access e.g. the edge going from node 0 to node 1 (edge (0, 1)), you
would index the network with network[0][1]
To access e.g. the channel_1 attribute of this particular (0, 1) edge, you
would do network[0][1]['channels']['channel_1']
OR
if bidirectional_links, you do network[0][1]['0_to_1_port']['channels']['channel_1']
or network[0][1]['1_to_0_port']['channels']['channel_1] depending on which direction
of the link you want to access.
Args:
network (networkx graph): Network containing edges to which attrs will
be added.
edges (list): List of node pairs in tuples.
channel_names (list of str): List of channel names to add to edge.
channel_capacity (int, float): Capacity to allocate to each channel.
bidirectional_links (bool): If True, each link has capacity split equally
between src and dst port. I.e. all links have a src and dst port
which are treated separately to incoming and outgoing traffic to and
from given node (switch or server).
"""
if bidirectional_links:
attrs = {edge: {'{}_to_{}_port'.format(edge[0], edge[1]): {
'channels': {channel: (channel_capacity / 2) for channel in
channel_names}, 'max_channel_capacity': channel_capacity / 2},
'{}_to_{}_port'.format(edge[1], edge[0]): {'channels': {channel:
(channel_capacity / 2) for channel in channel_names},
'max_channel_capacity': channel_capacity / 2}} for edge in edges}
else:
attrs = {edge: {'channels': {channel: channel_capacity for channel in
channel_names}, 'max_channel_capacity': channel_capacity} for
edge in edges}
nx.set_edge_attributes(network, attrs)
def get_node_type_dict(network, node_types=[]):
"""Gets dict where keys are node types, values are list of nodes for each node type in graph."""
network_nodes = []
for network_node in network.nodes:
network_nodes.append(network_node)
network_nodes_dict = {node_type: [] for node_type in node_types}
for n in network_nodes:
for node_type in node_types:
if node_type in n:
network_nodes_dict[node_type].append(n)
else:
pass
return network_nodes_dict
def get_fat_tree_positions(net, width_scale=500, height_scale=10):
"""Gets networkx positions of nodes in fat tree network for plotting."""
pos = {}
node_type_dict = get_node_type_dict(net, net.graph['node_labels'])
node_types = list(node_type_dict.keys())
heights = {}
widths = {}
h = iter([1, 2, 3, 4, 5])
for node_type in node_types:
heights[node_type] = next(h)
widths[node_type] = 1 / (len(node_type_dict[node_type]) + 1)
idx = 0
for node in node_type_dict[node_type]:
pos[node] = (idx + 1) * widths[node_type] * width_scale, heights[
node_type] * height_scale
idx += 1
return pos
def init_network_node_positions(net):
"""Initialises network node positions for plotting."""
if net.graph['topology_type'] == 'fat_tree':
pos = get_fat_tree_positions(net)
else:
pos = nx.nx_agraph.graphviz_layout(net, prog='neato')
return pos
def plot_network(network, draw_node_labels=True, ep_label='server',
network_node_size=2000, font_size=30, linewidths=1, fig_scale=2,
path_to_save=None, show_fig=False):
"""Plots networkx graph.
Recognises special fat tree network and applies appropriate node positioning,
labelling, colouring etc.
Args:
network (networkx graph): Network object to be plotted.
draw_node_labels (bool): Whether or not to draw node labels on plot.
ep_label (str,int,float): Endpoint label (e.g. 'server'). All endpoints will have
ep_label appended to the start of their label (e.g. 'server_0', 'server_1', ...).
network_node_size (int,float): Size of plotted nodes.
font_size (int,float): Size of of font of plotted labels etc.
linewidths (int,float): Width of edges in network.
fig_scale (int,float): Scaling factor to apply to plotted network.
path_to_save (str): Path to directory (with file name included) in which
to save generated plot. E.g. path_to_save='data/my_plot'
show_fig (bool): Whether or not to plot and show fig. If True, will
return and display fig.
Returns:
matplotlib.figure.Figure: node distribution plotted as a 2d matrix.
"""
net_node_positions = init_network_node_positions(copy.deepcopy(network))
fig = plt.figure(figsize=[15 * fig_scale, 15 * fig_scale])
pos = {}
network_nodes = []
network_nodes_dict = get_node_type_dict(network, network.graph[
'node_labels'])
for nodes in list(network_nodes_dict.values()):
for network_node in nodes:
pos[network_node] = net_node_positions[network_node]
node_colours = iter(['#25c44d', '#36a0c7', '#e8b017', '#6115a3', '#160e63']
)
for node_type in network.graph['node_labels']:
nx.draw_networkx_nodes(network, pos, nodelist=network_nodes_dict[
node_type], node_size=network_node_size, node_color=next(
node_colours), linewidths=linewidths, label=node_type)
if draw_node_labels:
nx.draw_networkx_labels(network, pos, font_size=font_size,
font_color='k', font_family='sans-serif', font_weight='normal',
alpha=1.0)
fibre_links = list(network.edges)
nx.draw_networkx_edges(network, pos, edgelist=fibre_links, edge_color=
'k', width=3, label='Fibre link')
if path_to_save is not None:
tools.pickle_data(path_to_save, fig)
if show_fig:
plt.show()
return fig
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def gen_arbitrary_network(num_eps, ep_label=None, ep_capacity=12500,
num_channels=1, racks_dict=None, topology_type=None):
"""Generates an arbitrary network with num_eps nodes labelled as ep_label.
Note that no edges are formed in this network; it is purely for ep name
indexing purposes when using Demand class. This is useful where want to
use the demand class but not necessarily with a carefully crafted networkx
graph that accurately mimics the network you will use for the demands
Args:
num_eps (int): Number of endpoints in network.
ep_label (str,int,float): Endpoint label (e.g. 'server'). All endpoints will have
ep_label appended to the start of their label (e.g. 'server_0', 'server_1', ...).
ep_capacity (int, float): Byte capacity per end point channel.
num_channels (int, float): Number of channels on each link in network.
racks_dict (dict): Mapping of which end points are in which racks. Keys are
rack ids, values are list of end points. If None, assume there is not
clustering/rack system in the network where have different end points
in different clusters/racks.
Returns:
networkx graph: network object
"""
network = nx.Graph()
network.add_nodes_from([node for node in range(num_eps)])
if ep_label is None:
servers = [str(i) for i in range(num_eps)]
else:
servers = [(ep_label + '_' + str(i)) for i in range(num_eps)]
relabel_mapping = {node: label for node, label in zip(range(num_eps),
servers)}
network = nx.relabel_nodes(network, relabel_mapping)
eps = []
for node in list(network.nodes):
try:
if ep_label in node:
eps.append(node)
except TypeError:
eps.append(node)
network.graph['endpoints'] = eps
max_nw_capacity = num_eps * ep_capacity * num_channels / 2
if topology_type is None:
topology_type = 'arbitrary_endpoints_{}_chancap_{}_channels_{}'.format(
num_eps, ep_capacity, num_channels)
init_global_network_attrs(network, max_nw_capacity, num_channels,
ep_link_capacity=ep_capacity * num_channels, endpoint_label=
ep_label, node_labels=[ep_label], racks_dict=racks_dict,
topology_type=topology_type)
return network
def gen_nsfnet_network(ep_label='server', rack_label='rack', N=0,
num_channels=2, server_to_rack_channel_capacity=1,
rack_to_rack_channel_capacity=10, show_fig=False):
"""Generates the standard 14-node NSFNET topology (a U.S. core network).
Args:
ep_label (str,int,float): Endpoint label (e.g. 'server'). All endpoints will have
ep_label appended to the start of their label (e.g. 'server_0', 'server_1', ...).
N (int): Number of servers per rack. If 0, assume all nodes in nsfnet
are endpoints
num_channels (int,float): Number of channels on each link in network.
server_to_rack_channel_capacity (int,float): Byte capacity per channel
between servers and ToR switch.
rack_to_rack_channel_capacity (int,float): Byte capacity per channel between racks.
show_fig (bool): Whether or not to plot and show fig. If True, will
display fig.
Returns:
networkx graph: network object
"""
channel_names = gen_channel_names(num_channels)
network = nx.Graph()
node_pair_list = [[0, 1], [0, 3], [0, 2], [1, 2], [1, 7], [3, 8], [3, 4
], [3, 6], [4, 5], [4, 5], [5, 2], [5, 13], [5, 12], [6, 7], [7, 10
], [8, 11], [8, 9], [9, 10], [9, 12], [10, 11], [10, 13], [11, 12]]
if N == 0:
label = ep_label
else:
label = rack_label
for idx in range(len(node_pair_list)):
node_pair_list[idx][0] = label + '_' + str(node_pair_list[idx][0])
node_pair_list[idx][1] = label + '_' + str(node_pair_list[idx][1])
for edge in node_pair_list:
network.add_edge(*tuple(edge))
if N == 0:
racks_dict = None
else:
i = 0
racks_dict = {rack: [] for rack in range(14)}
for rack in range(14):
for server in range(N):
racks_dict[rack].append(ep_label + '_' + str(i))
network.add_edge(ep_label + '_' + str(i), rack_label + '_' +
str(rack))
i += 1
channel_names = gen_channel_names(num_channels)
edges = [edge for edge in network.edges]
add_edges_capacity_attrs(network, edges, channel_names,
rack_to_rack_channel_capacity)
network.graph['endpoints'] = get_endpoints(network, ep_label)
max_nw_capacity = len(network.edges
) * num_channels * rack_to_rack_channel_capacity / 2
init_global_network_attrs(network, max_nw_capacity, num_channels,
ep_link_capacity=server_to_rack_channel_capacity * num_channels,
endpoint_label=ep_label, node_labels=[ep_label, rack_label],
topology_type='14_node_nsfnet', racks_dict=racks_dict)
if show_fig:
plot_network(network, show_fig=True)
return network
def gen_simple_network(ep_label='server', num_channels=2,
server_to_rack_channel_capacity=500, show_fig=False):
"""Generates very simple 5-node topology.
Args:
ep_label (str,int,float): Endpoint label (e.g. 'server'). All endpoints will have
ep_label appended to the start of their label (e.g. 'server_0', 'server_1', ...).
num_channels (int,float): Number of channels on each link in network.
channel_capacity (int,float): Byte capacity per channel.
show_fig (bool): Whether or not to plot and show fig. If True, will
display fig.
Returns:
networkx graph: network object
"""
network = nx.Graph()
network.add_nodes_from([node for node in range(5)])
network.add_edges_from([(0, 1), (0, 2), (1, 2), (2, 4), (4, 3), (3, 1)],
weight=1)
servers = [(ep_label + '_' + str(i)) for i in range(5)]
relabel_mapping = {node: label for node, label in zip(range(5), servers)}
network = nx.relabel_nodes(network, relabel_mapping)
channel_names = gen_channel_names(num_channels)
edges = [edge for edge in network.edges]
add_edges_capacity_attrs(network, edges, channel_names,
server_to_rack_channel_capacity)
network.graph['endpoints'] = get_endpoints(network, ep_label)
max_nw_capacity = len(network.edges
) * num_channels * server_to_rack_channel_capacity / 2
init_global_network_attrs(network, max_nw_capacity, num_channels,
ep_link_capacity=server_to_rack_channel_capacity * num_channels,
endpoint_label=ep_label, node_labels=[ep_label], topology_type=
'5_node_simple_network')
if show_fig:
plot_network(network, show_fig=True)
return network
def get_endpoints(network, ep_label):
"""Gets list of endpoints of network.
Args:
network (networkx graph): Networkx object.
ep_label (str,int,float): Endpoint label (e.g. 'server'). All endpoints will have
ep_label appended to the start of their label (e.g. 'server_0', 'server_1', ...).
Returns:
eps (list): List of endpoints.
"""
eps = []
for node in list(network.nodes):
if ep_label in node:
eps.append(node)
return eps
def gen_fat_tree(k=4, L=2, n=4, ep_label='server', rack_label='rack',
edge_label='edge', aggregate_label='agg', core_label='core',
num_channels=2, server_to_rack_channel_capacity=500,
rack_to_edge_channel_capacity=1000, edge_to_agg_channel_capacity=1000,
agg_to_core_channel_capacity=2000, rack_to_core_channel_capacity=2000,
show_fig=False):
"""Generates a perfect fat tree (i.e. all layers have switches with same radix/number of ports).
Top layer is always core (spine) switch layer, bottom layer is always
ToR (leaf) layer.
L must be either 2 (core, ToR) or 4 (core, agg, edge, ToR)
N.B. L=2 is commonly referred to as '2-layer Clos' or 'Clos' or 'spine-leaf' topology
Resource for building (scroll down to summary table with equations):
https://packetpushers.net/demystifying-dcn-topologies-clos-fat-trees-part2/
Another good resource for data centre topologies etc. in general:
https://www.oreilly.com/library/view/bgp-in-the/9781491983416/ch01.html#:~:text=The%20most%20common%20routing%20protocol,single%20data%20center%2C%20as%20well.
Parameters of network:
- number of core (spine) switches = (k/2)^(L/2) (top layer)
- number of edge switches (if L=4) = (k^2)/2
- number of agg switches (if L=4) = (k^2)/2
- number of pods (if L=4) (pod is a group of agg and/or edge switches) = 2*(k/2)^(L-2)
- number of ToR (leaf) switches (racks) = 2*(k/2)^(L-1) (bottom layer)
- number of server-facing ToR 'host' ports = 2*(k/2)^2 (can have multiple servers connected to same host port, & can oversubscribe)
- number of servers = number ToR switches * n
Args:
k (int): Number of ports (links) on each switch (both up and down).
L (int): Number of layers in the fat tree.
n (int): Number of server per rack.
ep_label (str,int,float): Endpoint label (e.g. 'server'). All endpoints will have
ep_label appended to the start of their label (e.g. 'server_0', 'server_1', ...).
edge_label (str,int): Label to assign to edge switch nodes
aggregate_label (str,int): Label to assign to edge switch nodes
core_label (str,int): Label to assign to core switch nodes
num_channels (int, float): Number of channels on each link in network
server_to_edge_channel_capacity (int,float): Byte capacity per channel
edge_to_agg_channel_capacity (int,float): (if L==4) Byte capacity per channel
agg_to_core_channel_capacity (int,float): (if L==4) Byte capacity per channel
rack_to_core_channel_capacity (int,float): (if L==2) Byte capacity per channel
Returns:
networkx graph: network object
"""
if L != 2 and L != 4:
raise Exception(
'L must be 2 (ToR layer, core layer) or 4 (ToR layer, edge layer, agg layer, core layer), but is {}.'
.format(L))
if k % 2 != 0:
raise Exception(
'k must be even since, in perfect fat tree, have equal number of up and down ports on each switch, but is {}.'
.format(k))
channel_names = gen_channel_names(num_channels)
if L == 2:
node_labels = [ep_label, rack_label, core_label]
else:
node_labels = [ep_label, rack_label, edge_label, aggregate_label,
core_label]
num_cores = int((k / 2) ** (L / 2))
num_aggs = int(k ** 2 / 2)
num_edges = int(k ** 2 / 2)
num_pods = int(2 * (k / 2) ** (L - 2))
num_racks = int(2 * (k / 2) ** (L - 1))
num_servers = int(num_racks * n)
cores = [(core_label + '_' + str(i)) for i in range(num_cores)]
aggs = [(aggregate_label + '_' + str(i)) for i in range(num_aggs)]
edges = [(edge_label + '_' + str(i)) for i in range(num_edges)]
racks = [(rack_label + '_' + str(i)) for i in range(num_racks)]
servers = [(ep_label + '_' + str(i)) for i in range(num_servers)]
core_layer = nx.Graph()
rack_layer = nx.Graph()
core_layer.add_nodes_from(cores)
rack_layer.add_nodes_from(racks)
fat_tree_network = nx.compose(core_layer, rack_layer)
if L == 2:
rack_iterator = iter(racks)
for rack in racks:
core_iterator = iter(cores)
for up_port in range(int(k / 2)):
core = next(core_iterator)
fat_tree_network.add_edge(rack, core)
add_edge_capacity_attrs(fat_tree_network, (rack, core),
channel_names, rack_to_core_channel_capacity)
else:
num_pods = int(k)
pods = [[] for i in range(num_pods)]
prev_iter = 0
for pod_iter in range(len(pods)):
curr_iter = int(prev_iter + k / 2)
pods[pod_iter].append(edges[prev_iter:curr_iter])
pods[pod_iter].append(aggs[prev_iter:curr_iter])
prev_iter = curr_iter
pod_labels = [('pod_' + str(i)) for i in range(num_pods)]
pods_dict = {tuple([pod]): nx.Graph() for pod in pod_labels}
for pod_iter in range(num_pods):
key = 'pod_' + str(pod_iter),
pod_edges = pods[pod_iter][0]
pod_aggs = pods[pod_iter][1]
pods_dict[key].add_nodes_from(pod_edges)
pods_dict[key].add_nodes_from(pod_aggs)
for pod_edge in pod_edges:
for pod_agg in pod_aggs:
pods_dict[key].add_edge(pod_agg, pod_edge)
add_edge_capacity_attrs(pods_dict[key], (pod_agg,
pod_edge), channel_names, edge_to_agg_channel_capacity)
pod_networks = list(pods_dict.values())
for pod_iter in range(num_pods):
fat_tree_network = nx.compose(fat_tree_network, pod_networks[
pod_iter])
for pod_iter in range(num_pods):
pod_aggs = pods[pod_iter][1]
core_iterator = iter(cores)
for pod_agg in pod_aggs:
while fat_tree_network.degree[pod_agg] < k:
core = next(core_iterator)
fat_tree_network.add_edge(core, pod_agg)
add_edge_capacity_attrs(fat_tree_network, (core,
pod_agg), channel_names, agg_to_core_channel_capacity)
rack_iterator = iter(racks)
for pod_iter in range(num_pods):
pod_edges = pods[pod_iter][0]
for pod_edge in pod_edges:
while fat_tree_network.degree[pod_edge] < k:
rack = next(rack_iterator)
fat_tree_network.add_edge(pod_edge, rack)
add_edge_capacity_attrs(fat_tree_network, (pod_edge,
rack), channel_names, rack_to_edge_channel_capacity)
racks_dict = {rack: [] for rack in racks}
server_iterator = iter(servers)
for rack in racks:
for _ in range(n):
server = next(server_iterator)
fat_tree_network.add_edge(rack, server)
add_edge_capacity_attrs(fat_tree_network, (rack, server),
channel_names, server_to_rack_channel_capacity)
racks_dict[rack].append(server)
max_nw_capacity = (num_servers * num_channels *
server_to_rack_channel_capacity / 2)
fat_tree_network.graph['endpoints'] = servers
init_global_network_attrs(fat_tree_network, max_nw_capacity,
num_channels, ep_link_capacity=server_to_rack_channel_capacity *
num_channels, endpoint_label=ep_label, node_labels=node_labels,
topology_type='fat_tree', racks_dict=racks_dict)
if show_fig:
plot_network(fat_tree_network, show_fig=True)
return fat_tree_network
def init_global_network_attrs(network, max_nw_capacity, num_channels,
ep_link_capacity, endpoint_label='server', topology_type='unknown',
node_labels=['server'], racks_dict=None):
"""Initialises the standard global network attributes of a given network.
Args:
network (obj): NetworkX object.
max_nw_capacity (int/float): Maximum rate at which info can be reliably
transmitted over the network (sum of all link capacities).
num_channels (int): Number of channels on each link in network.
topology_type (str): Label of network topology (e.g. 'fat_tree').
node_labels (list): Label classes assigned to network nodes
(e.g. ['server', 'rack', 'edge']).
racks_dict (dict): Which servers/endpoints are in which rack. If None,
assume do not have rack system where have multiple servers in one
rack.
"""
network.graph['endpoint_label'] = endpoint_label
network.graph['num_channels_per_link'] = num_channels
network.graph['ep_link_capacity'] = ep_link_capacity
network.graph['ep_link_port_capacity'] = ep_link_capacity / 2
network.graph['max_nw_capacity'] = max_nw_capacity
network.graph['curr_nw_capacity_used'] = 0
network.graph['num_active_connections'] = 0
network.graph['total_connections_blocked'] = 0
network.graph['node_labels'] = node_labels
network.graph['topology_type'] = topology_type
network.graph['channel_names'] = gen_channel_names(num_channels)
if racks_dict is not None:
_racks_dict = {}
for key, val in racks_dict.items():
_racks_dict[str(key)] = []
for v in val:
_racks_dict[str(key)].append(str(v))
network.graph['rack_to_ep_dict'] = _racks_dict
else:
network.graph['rack_to_ep_dict'] = None
if racks_dict is not None:
ep_to_rack_dict = {}
for key, val in _racks_dict.items():
for v in val:
if v not in ep_to_rack_dict.keys():
ep_to_rack_dict[v] = key
network.graph['ep_to_rack_dict'] = ep_to_rack_dict
else:
network.graph['ep_to_rack_dict'] = None
def gen_channel_names(num_channels):
"""Generates channel names for channels on each link in network."""
channels = [(channel + 1) for channel in range(num_channels)]
channel_names = [('channel_' + str(channel)) for channel in channels]
return channel_names
def add_edge_capacity_attrs(network, edge, channel_names, channel_capacity,
bidirectional_links=True):
"""Adds channels and corresponding max channel bytes to single edge in network.
Args:
network (networkx graph): Network containing edges to whiich attrs will
be added.
edge (tuple): Node-node edge pair.
channel_names (list): List of channel names to add to edge.
channel_capacity (int,float): Capacity to allocate to each channel.
bidirectional_links (bool): If True, each link has capacity split equally
between src and dst port. I.e. all links have a src and dst port
which are treated separately to incoming and outgoing traffic to and
from given node (switch or server).
"""
if bidirectional_links:
attrs = {edge: {'{}_to_{}_port'.format(edge[0], edge[1]): {
'channels': {channel: (channel_capacity / 2) for channel in
channel_names}, 'max_channel_capacity': channel_capacity / 2},
'{}_to_{}_port'.format(edge[1], edge[0]): {'channels': {channel:
(channel_capacity / 2) for channel in channel_names},
'max_channel_capacity': channel_capacity / 2}}}
else:
attrs = {edge: {'channels': {channel: channel_capacity for channel in
channel_names}, 'max_channel_capacity': channel_capacity}}
nx.set_edge_attributes(network, attrs)
def add_edges_capacity_attrs(network, edges, channel_names,
channel_capacity, bidirectional_links=True):
"""Adds channels & max channel capacitys to single edge in network.
To access e.g. the edge going from node 0 to node 1 (edge (0, 1)), you
would index the network with network[0][1]
To access e.g. the channel_1 attribute of this particular (0, 1) edge, you
would do network[0][1]['channels']['channel_1']
OR
if bidirectional_links, you do network[0][1]['0_to_1_port']['channels']['channel_1']
or network[0][1]['1_to_0_port']['channels']['channel_1] depending on which direction
of the link you want to access.
Args:
network (networkx graph): Network containing edges to which attrs will
be added.
edges (list): List of node pairs in tuples.
channel_names (list of str): List of channel names to add to edge.
channel_capacity (int, float): Capacity to allocate to each channel.
bidirectional_links (bool): If True, each link has capacity split equally
between src and dst port. I.e. all links have a src and dst port
which are treated separately to incoming and outgoing traffic to and
from given node (switch or server).
"""
if bidirectional_links:
attrs = {edge: {'{}_to_{}_port'.format(edge[0], edge[1]): {
'channels': {channel: (channel_capacity / 2) for channel in
channel_names}, 'max_channel_capacity': channel_capacity / 2},
'{}_to_{}_port'.format(edge[1], edge[0]): {'channels': {channel:
(channel_capacity / 2) for channel in channel_names},
'max_channel_capacity': channel_capacity / 2}} for edge in edges}
else:
attrs = {edge: {'channels': {channel: channel_capacity for channel in
channel_names}, 'max_channel_capacity': channel_capacity} for
edge in edges}
nx.set_edge_attributes(network, attrs)
def get_node_type_dict(network, node_types=[]):
"""Gets dict where keys are node types, values are list of nodes for each node type in graph."""
network_nodes = []
for network_node in network.nodes:
network_nodes.append(network_node)
network_nodes_dict = {node_type: [] for node_type in node_types}
for n in network_nodes:
for node_type in node_types:
if node_type in n:
network_nodes_dict[node_type].append(n)
else:
pass
return network_nodes_dict
def get_fat_tree_positions(net, width_scale=500, height_scale=10):
"""Gets networkx positions of nodes in fat tree network for plotting."""
pos = {}
node_type_dict = get_node_type_dict(net, net.graph['node_labels'])
node_types = list(node_type_dict.keys())
heights = {}
widths = {}
h = iter([1, 2, 3, 4, 5])
for node_type in node_types:
heights[node_type] = next(h)
widths[node_type] = 1 / (len(node_type_dict[node_type]) + 1)
idx = 0
for node in node_type_dict[node_type]:
pos[node] = (idx + 1) * widths[node_type] * width_scale, heights[
node_type] * height_scale
idx += 1
return pos
def init_network_node_positions(net):
"""Initialises network node positions for plotting."""
if net.graph['topology_type'] == 'fat_tree':
pos = get_fat_tree_positions(net)
else:
pos = nx.nx_agraph.graphviz_layout(net, prog='neato')
return pos
def plot_network(network, draw_node_labels=True, ep_label='server',
network_node_size=2000, font_size=30, linewidths=1, fig_scale=2,
path_to_save=None, show_fig=False):
"""Plots networkx graph.
Recognises special fat tree network and applies appropriate node positioning,
labelling, colouring etc.
Args:
network (networkx graph): Network object to be plotted.
draw_node_labels (bool): Whether or not to draw node labels on plot.
ep_label (str,int,float): Endpoint label (e.g. 'server'). All endpoints will have
ep_label appended to the start of their label (e.g. 'server_0', 'server_1', ...).
network_node_size (int,float): Size of plotted nodes.
font_size (int,float): Size of of font of plotted labels etc.
linewidths (int,float): Width of edges in network.
fig_scale (int,float): Scaling factor to apply to plotted network.
path_to_save (str): Path to directory (with file name included) in which
to save generated plot. E.g. path_to_save='data/my_plot'
show_fig (bool): Whether or not to plot and show fig. If True, will
return and display fig.
Returns:
matplotlib.figure.Figure: node distribution plotted as a 2d matrix.
"""
net_node_positions = init_network_node_positions(copy.deepcopy(network))
fig = plt.figure(figsize=[15 * fig_scale, 15 * fig_scale])
pos = {}
network_nodes = []
network_nodes_dict = get_node_type_dict(network, network.graph[
'node_labels'])
for nodes in list(network_nodes_dict.values()):
for network_node in nodes:
pos[network_node] = net_node_positions[network_node]
node_colours = iter(['#25c44d', '#36a0c7', '#e8b017', '#6115a3', '#160e63']
)
for node_type in network.graph['node_labels']:
nx.draw_networkx_nodes(network, pos, nodelist=network_nodes_dict[
node_type], node_size=network_node_size, node_color=next(
node_colours), linewidths=linewidths, label=node_type)
if draw_node_labels:
nx.draw_networkx_labels(network, pos, font_size=font_size,
font_color='k', font_family='sans-serif', font_weight='normal',
alpha=1.0)
fibre_links = list(network.edges)
nx.draw_networkx_edges(network, pos, edgelist=fibre_links, edge_color=
'k', width=3, label='Fibre link')
if path_to_save is not None:
tools.pickle_data(path_to_save, fig)
if show_fig:
plt.show()
return fig
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
from trafpy.generator.src import tools
import copy
import networkx as nx
import matplotlib.pyplot as plt
import json
def gen_arbitrary_network(num_eps, ep_label=None, ep_capacity=12500,
num_channels=1, racks_dict=None, topology_type=None):
"""Generates an arbitrary network with num_eps nodes labelled as ep_label.
Note that no edges are formed in this network; it is purely for ep name
indexing purposes when using Demand class. This is useful where want to
use the demand class but not necessarily with a carefully crafted networkx
graph that accurately mimics the network you will use for the demands
Args:
num_eps (int): Number of endpoints in network.
ep_label (str,int,float): Endpoint label (e.g. 'server'). All endpoints will have
ep_label appended to the start of their label (e.g. 'server_0', 'server_1', ...).
ep_capacity (int, float): Byte capacity per end point channel.
num_channels (int, float): Number of channels on each link in network.
racks_dict (dict): Mapping of which end points are in which racks. Keys are
rack ids, values are list of end points. If None, assume there is not
clustering/rack system in the network where have different end points
in different clusters/racks.
Returns:
networkx graph: network object
"""
network = nx.Graph()
network.add_nodes_from([node for node in range(num_eps)])
if ep_label is None:
servers = [str(i) for i in range(num_eps)]
else:
servers = [(ep_label + '_' + str(i)) for i in range(num_eps)]
relabel_mapping = {node: label for node, label in zip(range(num_eps),
servers)}
network = nx.relabel_nodes(network, relabel_mapping)
eps = []
for node in list(network.nodes):
try:
if ep_label in node:
eps.append(node)
except TypeError:
eps.append(node)
network.graph['endpoints'] = eps
max_nw_capacity = num_eps * ep_capacity * num_channels / 2
if topology_type is None:
topology_type = 'arbitrary_endpoints_{}_chancap_{}_channels_{}'.format(
num_eps, ep_capacity, num_channels)
init_global_network_attrs(network, max_nw_capacity, num_channels,
ep_link_capacity=ep_capacity * num_channels, endpoint_label=
ep_label, node_labels=[ep_label], racks_dict=racks_dict,
topology_type=topology_type)
return network
def gen_nsfnet_network(ep_label='server', rack_label='rack', N=0,
num_channels=2, server_to_rack_channel_capacity=1,
rack_to_rack_channel_capacity=10, show_fig=False):
"""Generates the standard 14-node NSFNET topology (a U.S. core network).
Args:
ep_label (str,int,float): Endpoint label (e.g. 'server'). All endpoints will have
ep_label appended to the start of their label (e.g. 'server_0', 'server_1', ...).
N (int): Number of servers per rack. If 0, assume all nodes in nsfnet
are endpoints
num_channels (int,float): Number of channels on each link in network.
server_to_rack_channel_capacity (int,float): Byte capacity per channel
between servers and ToR switch.
rack_to_rack_channel_capacity (int,float): Byte capacity per channel between racks.
show_fig (bool): Whether or not to plot and show fig. If True, will
display fig.
Returns:
networkx graph: network object
"""
channel_names = gen_channel_names(num_channels)
network = nx.Graph()
node_pair_list = [[0, 1], [0, 3], [0, 2], [1, 2], [1, 7], [3, 8], [3, 4
], [3, 6], [4, 5], [4, 5], [5, 2], [5, 13], [5, 12], [6, 7], [7, 10
], [8, 11], [8, 9], [9, 10], [9, 12], [10, 11], [10, 13], [11, 12]]
if N == 0:
label = ep_label
else:
label = rack_label
for idx in range(len(node_pair_list)):
node_pair_list[idx][0] = label + '_' + str(node_pair_list[idx][0])
node_pair_list[idx][1] = label + '_' + str(node_pair_list[idx][1])
for edge in node_pair_list:
network.add_edge(*tuple(edge))
if N == 0:
racks_dict = None
else:
i = 0
racks_dict = {rack: [] for rack in range(14)}
for rack in range(14):
for server in range(N):
racks_dict[rack].append(ep_label + '_' + str(i))
network.add_edge(ep_label + '_' + str(i), rack_label + '_' +
str(rack))
i += 1
channel_names = gen_channel_names(num_channels)
edges = [edge for edge in network.edges]
add_edges_capacity_attrs(network, edges, channel_names,
rack_to_rack_channel_capacity)
network.graph['endpoints'] = get_endpoints(network, ep_label)
max_nw_capacity = len(network.edges
) * num_channels * rack_to_rack_channel_capacity / 2
init_global_network_attrs(network, max_nw_capacity, num_channels,
ep_link_capacity=server_to_rack_channel_capacity * num_channels,
endpoint_label=ep_label, node_labels=[ep_label, rack_label],
topology_type='14_node_nsfnet', racks_dict=racks_dict)
if show_fig:
plot_network(network, show_fig=True)
return network
def gen_simple_network(ep_label='server', num_channels=2,
server_to_rack_channel_capacity=500, show_fig=False):
"""Generates very simple 5-node topology.
Args:
ep_label (str,int,float): Endpoint label (e.g. 'server'). All endpoints will have
ep_label appended to the start of their label (e.g. 'server_0', 'server_1', ...).
num_channels (int,float): Number of channels on each link in network.
channel_capacity (int,float): Byte capacity per channel.
show_fig (bool): Whether or not to plot and show fig. If True, will
display fig.
Returns:
networkx graph: network object
"""
network = nx.Graph()
network.add_nodes_from([node for node in range(5)])
network.add_edges_from([(0, 1), (0, 2), (1, 2), (2, 4), (4, 3), (3, 1)],
weight=1)
servers = [(ep_label + '_' + str(i)) for i in range(5)]
relabel_mapping = {node: label for node, label in zip(range(5), servers)}
network = nx.relabel_nodes(network, relabel_mapping)
channel_names = gen_channel_names(num_channels)
edges = [edge for edge in network.edges]
add_edges_capacity_attrs(network, edges, channel_names,
server_to_rack_channel_capacity)
network.graph['endpoints'] = get_endpoints(network, ep_label)
max_nw_capacity = len(network.edges
) * num_channels * server_to_rack_channel_capacity / 2
init_global_network_attrs(network, max_nw_capacity, num_channels,
ep_link_capacity=server_to_rack_channel_capacity * num_channels,
endpoint_label=ep_label, node_labels=[ep_label], topology_type=
'5_node_simple_network')
if show_fig:
plot_network(network, show_fig=True)
return network
def get_endpoints(network, ep_label):
"""Gets list of endpoints of network.
Args:
network (networkx graph): Networkx object.
ep_label (str,int,float): Endpoint label (e.g. 'server'). All endpoints will have
ep_label appended to the start of their label (e.g. 'server_0', 'server_1', ...).
Returns:
eps (list): List of endpoints.
"""
eps = []
for node in list(network.nodes):
if ep_label in node:
eps.append(node)
return eps
def gen_fat_tree(k=4, L=2, n=4, ep_label='server', rack_label='rack',
edge_label='edge', aggregate_label='agg', core_label='core',
num_channels=2, server_to_rack_channel_capacity=500,
rack_to_edge_channel_capacity=1000, edge_to_agg_channel_capacity=1000,
agg_to_core_channel_capacity=2000, rack_to_core_channel_capacity=2000,
show_fig=False):
"""Generates a perfect fat tree (i.e. all layers have switches with same radix/number of ports).
Top layer is always core (spine) switch layer, bottom layer is always
ToR (leaf) layer.
L must be either 2 (core, ToR) or 4 (core, agg, edge, ToR)
N.B. L=2 is commonly referred to as '2-layer Clos' or 'Clos' or 'spine-leaf' topology
Resource for building (scroll down to summary table with equations):
https://packetpushers.net/demystifying-dcn-topologies-clos-fat-trees-part2/
Another good resource for data centre topologies etc. in general:
https://www.oreilly.com/library/view/bgp-in-the/9781491983416/ch01.html#:~:text=The%20most%20common%20routing%20protocol,single%20data%20center%2C%20as%20well.
Parameters of network:
- number of core (spine) switches = (k/2)^(L/2) (top layer)
- number of edge switches (if L=4) = (k^2)/2
- number of agg switches (if L=4) = (k^2)/2
- number of pods (if L=4) (pod is a group of agg and/or edge switches) = 2*(k/2)^(L-2)
- number of ToR (leaf) switches (racks) = 2*(k/2)^(L-1) (bottom layer)
- number of server-facing ToR 'host' ports = 2*(k/2)^2 (can have multiple servers connected to same host port, & can oversubscribe)
- number of servers = number ToR switches * n
Args:
k (int): Number of ports (links) on each switch (both up and down).
L (int): Number of layers in the fat tree.
n (int): Number of server per rack.
ep_label (str,int,float): Endpoint label (e.g. 'server'). All endpoints will have
ep_label appended to the start of their label (e.g. 'server_0', 'server_1', ...).
edge_label (str,int): Label to assign to edge switch nodes
aggregate_label (str,int): Label to assign to edge switch nodes
core_label (str,int): Label to assign to core switch nodes
num_channels (int, float): Number of channels on each link in network
server_to_edge_channel_capacity (int,float): Byte capacity per channel
edge_to_agg_channel_capacity (int,float): (if L==4) Byte capacity per channel
agg_to_core_channel_capacity (int,float): (if L==4) Byte capacity per channel
rack_to_core_channel_capacity (int,float): (if L==2) Byte capacity per channel
Returns:
networkx graph: network object
"""
if L != 2 and L != 4:
raise Exception(
'L must be 2 (ToR layer, core layer) or 4 (ToR layer, edge layer, agg layer, core layer), but is {}.'
.format(L))
if k % 2 != 0:
raise Exception(
'k must be even since, in perfect fat tree, have equal number of up and down ports on each switch, but is {}.'
.format(k))
channel_names = gen_channel_names(num_channels)
if L == 2:
node_labels = [ep_label, rack_label, core_label]
else:
node_labels = [ep_label, rack_label, edge_label, aggregate_label,
core_label]
num_cores = int((k / 2) ** (L / 2))
num_aggs = int(k ** 2 / 2)
num_edges = int(k ** 2 / 2)
num_pods = int(2 * (k / 2) ** (L - 2))
num_racks = int(2 * (k / 2) ** (L - 1))
num_servers = int(num_racks * n)
cores = [(core_label + '_' + str(i)) for i in range(num_cores)]
aggs = [(aggregate_label + '_' + str(i)) for i in range(num_aggs)]
edges = [(edge_label + '_' + str(i)) for i in range(num_edges)]
racks = [(rack_label + '_' + str(i)) for i in range(num_racks)]
servers = [(ep_label + '_' + str(i)) for i in range(num_servers)]
core_layer = nx.Graph()
rack_layer = nx.Graph()
core_layer.add_nodes_from(cores)
rack_layer.add_nodes_from(racks)
fat_tree_network = nx.compose(core_layer, rack_layer)
if L == 2:
rack_iterator = iter(racks)
for rack in racks:
core_iterator = iter(cores)
for up_port in range(int(k / 2)):
core = next(core_iterator)
fat_tree_network.add_edge(rack, core)
add_edge_capacity_attrs(fat_tree_network, (rack, core),
channel_names, rack_to_core_channel_capacity)
else:
num_pods = int(k)
pods = [[] for i in range(num_pods)]
prev_iter = 0
for pod_iter in range(len(pods)):
curr_iter = int(prev_iter + k / 2)
pods[pod_iter].append(edges[prev_iter:curr_iter])
pods[pod_iter].append(aggs[prev_iter:curr_iter])
prev_iter = curr_iter
pod_labels = [('pod_' + str(i)) for i in range(num_pods)]
pods_dict = {tuple([pod]): nx.Graph() for pod in pod_labels}
for pod_iter in range(num_pods):
key = 'pod_' + str(pod_iter),
pod_edges = pods[pod_iter][0]
pod_aggs = pods[pod_iter][1]
pods_dict[key].add_nodes_from(pod_edges)
pods_dict[key].add_nodes_from(pod_aggs)
for pod_edge in pod_edges:
for pod_agg in pod_aggs:
pods_dict[key].add_edge(pod_agg, pod_edge)
add_edge_capacity_attrs(pods_dict[key], (pod_agg,
pod_edge), channel_names, edge_to_agg_channel_capacity)
pod_networks = list(pods_dict.values())
for pod_iter in range(num_pods):
fat_tree_network = nx.compose(fat_tree_network, pod_networks[
pod_iter])
for pod_iter in range(num_pods):
pod_aggs = pods[pod_iter][1]
core_iterator = iter(cores)
for pod_agg in pod_aggs:
while fat_tree_network.degree[pod_agg] < k:
core = next(core_iterator)
fat_tree_network.add_edge(core, pod_agg)
add_edge_capacity_attrs(fat_tree_network, (core,
pod_agg), channel_names, agg_to_core_channel_capacity)
rack_iterator = iter(racks)
for pod_iter in range(num_pods):
pod_edges = pods[pod_iter][0]
for pod_edge in pod_edges:
while fat_tree_network.degree[pod_edge] < k:
rack = next(rack_iterator)
fat_tree_network.add_edge(pod_edge, rack)
add_edge_capacity_attrs(fat_tree_network, (pod_edge,
rack), channel_names, rack_to_edge_channel_capacity)
racks_dict = {rack: [] for rack in racks}
server_iterator = iter(servers)
for rack in racks:
for _ in range(n):
server = next(server_iterator)
fat_tree_network.add_edge(rack, server)
add_edge_capacity_attrs(fat_tree_network, (rack, server),
channel_names, server_to_rack_channel_capacity)
racks_dict[rack].append(server)
max_nw_capacity = (num_servers * num_channels *
server_to_rack_channel_capacity / 2)
fat_tree_network.graph['endpoints'] = servers
init_global_network_attrs(fat_tree_network, max_nw_capacity,
num_channels, ep_link_capacity=server_to_rack_channel_capacity *
num_channels, endpoint_label=ep_label, node_labels=node_labels,
topology_type='fat_tree', racks_dict=racks_dict)
if show_fig:
plot_network(fat_tree_network, show_fig=True)
return fat_tree_network
def init_global_network_attrs(network, max_nw_capacity, num_channels,
ep_link_capacity, endpoint_label='server', topology_type='unknown',
node_labels=['server'], racks_dict=None):
"""Initialises the standard global network attributes of a given network.
Args:
network (obj): NetworkX object.
max_nw_capacity (int/float): Maximum rate at which info can be reliably
transmitted over the network (sum of all link capacities).
num_channels (int): Number of channels on each link in network.
topology_type (str): Label of network topology (e.g. 'fat_tree').
node_labels (list): Label classes assigned to network nodes
(e.g. ['server', 'rack', 'edge']).
racks_dict (dict): Which servers/endpoints are in which rack. If None,
assume do not have rack system where have multiple servers in one
rack.
"""
network.graph['endpoint_label'] = endpoint_label
network.graph['num_channels_per_link'] = num_channels
network.graph['ep_link_capacity'] = ep_link_capacity
network.graph['ep_link_port_capacity'] = ep_link_capacity / 2
network.graph['max_nw_capacity'] = max_nw_capacity
network.graph['curr_nw_capacity_used'] = 0
network.graph['num_active_connections'] = 0
network.graph['total_connections_blocked'] = 0
network.graph['node_labels'] = node_labels
network.graph['topology_type'] = topology_type
network.graph['channel_names'] = gen_channel_names(num_channels)
if racks_dict is not None:
_racks_dict = {}
for key, val in racks_dict.items():
_racks_dict[str(key)] = []
for v in val:
_racks_dict[str(key)].append(str(v))
network.graph['rack_to_ep_dict'] = _racks_dict
else:
network.graph['rack_to_ep_dict'] = None
if racks_dict is not None:
ep_to_rack_dict = {}
for key, val in _racks_dict.items():
for v in val:
if v not in ep_to_rack_dict.keys():
ep_to_rack_dict[v] = key
network.graph['ep_to_rack_dict'] = ep_to_rack_dict
else:
network.graph['ep_to_rack_dict'] = None
def gen_channel_names(num_channels):
"""Generates channel names for channels on each link in network."""
channels = [(channel + 1) for channel in range(num_channels)]
channel_names = [('channel_' + str(channel)) for channel in channels]
return channel_names
def add_edge_capacity_attrs(network, edge, channel_names, channel_capacity,
bidirectional_links=True):
"""Adds channels and corresponding max channel bytes to single edge in network.
Args:
network (networkx graph): Network containing edges to whiich attrs will
be added.
edge (tuple): Node-node edge pair.
channel_names (list): List of channel names to add to edge.
channel_capacity (int,float): Capacity to allocate to each channel.
bidirectional_links (bool): If True, each link has capacity split equally
between src and dst port. I.e. all links have a src and dst port
which are treated separately to incoming and outgoing traffic to and
from given node (switch or server).
"""
if bidirectional_links:
attrs = {edge: {'{}_to_{}_port'.format(edge[0], edge[1]): {
'channels': {channel: (channel_capacity / 2) for channel in
channel_names}, 'max_channel_capacity': channel_capacity / 2},
'{}_to_{}_port'.format(edge[1], edge[0]): {'channels': {channel:
(channel_capacity / 2) for channel in channel_names},
'max_channel_capacity': channel_capacity / 2}}}
else:
attrs = {edge: {'channels': {channel: channel_capacity for channel in
channel_names}, 'max_channel_capacity': channel_capacity}}
nx.set_edge_attributes(network, attrs)
def add_edges_capacity_attrs(network, edges, channel_names,
channel_capacity, bidirectional_links=True):
"""Adds channels & max channel capacitys to single edge in network.
To access e.g. the edge going from node 0 to node 1 (edge (0, 1)), you
would index the network with network[0][1]
To access e.g. the channel_1 attribute of this particular (0, 1) edge, you
would do network[0][1]['channels']['channel_1']
OR
if bidirectional_links, you do network[0][1]['0_to_1_port']['channels']['channel_1']
or network[0][1]['1_to_0_port']['channels']['channel_1] depending on which direction
of the link you want to access.
Args:
network (networkx graph): Network containing edges to which attrs will
be added.
edges (list): List of node pairs in tuples.
channel_names (list of str): List of channel names to add to edge.
channel_capacity (int, float): Capacity to allocate to each channel.
bidirectional_links (bool): If True, each link has capacity split equally
between src and dst port. I.e. all links have a src and dst port
which are treated separately to incoming and outgoing traffic to and
from given node (switch or server).
"""
if bidirectional_links:
attrs = {edge: {'{}_to_{}_port'.format(edge[0], edge[1]): {
'channels': {channel: (channel_capacity / 2) for channel in
channel_names}, 'max_channel_capacity': channel_capacity / 2},
'{}_to_{}_port'.format(edge[1], edge[0]): {'channels': {channel:
(channel_capacity / 2) for channel in channel_names},
'max_channel_capacity': channel_capacity / 2}} for edge in edges}
else:
attrs = {edge: {'channels': {channel: channel_capacity for channel in
channel_names}, 'max_channel_capacity': channel_capacity} for
edge in edges}
nx.set_edge_attributes(network, attrs)
def get_node_type_dict(network, node_types=[]):
"""Gets dict where keys are node types, values are list of nodes for each node type in graph."""
network_nodes = []
for network_node in network.nodes:
network_nodes.append(network_node)
network_nodes_dict = {node_type: [] for node_type in node_types}
for n in network_nodes:
for node_type in node_types:
if node_type in n:
network_nodes_dict[node_type].append(n)
else:
pass
return network_nodes_dict
def get_fat_tree_positions(net, width_scale=500, height_scale=10):
"""Gets networkx positions of nodes in fat tree network for plotting."""
pos = {}
node_type_dict = get_node_type_dict(net, net.graph['node_labels'])
node_types = list(node_type_dict.keys())
heights = {}
widths = {}
h = iter([1, 2, 3, 4, 5])
for node_type in node_types:
heights[node_type] = next(h)
widths[node_type] = 1 / (len(node_type_dict[node_type]) + 1)
idx = 0
for node in node_type_dict[node_type]:
pos[node] = (idx + 1) * widths[node_type] * width_scale, heights[
node_type] * height_scale
idx += 1
return pos
def init_network_node_positions(net):
"""Initialises network node positions for plotting."""
if net.graph['topology_type'] == 'fat_tree':
pos = get_fat_tree_positions(net)
else:
pos = nx.nx_agraph.graphviz_layout(net, prog='neato')
return pos
def plot_network(network, draw_node_labels=True, ep_label='server',
network_node_size=2000, font_size=30, linewidths=1, fig_scale=2,
path_to_save=None, show_fig=False):
"""Plots networkx graph.
Recognises special fat tree network and applies appropriate node positioning,
labelling, colouring etc.
Args:
network (networkx graph): Network object to be plotted.
draw_node_labels (bool): Whether or not to draw node labels on plot.
ep_label (str,int,float): Endpoint label (e.g. 'server'). All endpoints will have
ep_label appended to the start of their label (e.g. 'server_0', 'server_1', ...).
network_node_size (int,float): Size of plotted nodes.
font_size (int,float): Size of of font of plotted labels etc.
linewidths (int,float): Width of edges in network.
fig_scale (int,float): Scaling factor to apply to plotted network.
path_to_save (str): Path to directory (with file name included) in which
to save generated plot. E.g. path_to_save='data/my_plot'
show_fig (bool): Whether or not to plot and show fig. If True, will
return and display fig.
Returns:
matplotlib.figure.Figure: node distribution plotted as a 2d matrix.
"""
net_node_positions = init_network_node_positions(copy.deepcopy(network))
fig = plt.figure(figsize=[15 * fig_scale, 15 * fig_scale])
pos = {}
network_nodes = []
network_nodes_dict = get_node_type_dict(network, network.graph[
'node_labels'])
for nodes in list(network_nodes_dict.values()):
for network_node in nodes:
pos[network_node] = net_node_positions[network_node]
node_colours = iter(['#25c44d', '#36a0c7', '#e8b017', '#6115a3', '#160e63']
)
for node_type in network.graph['node_labels']:
nx.draw_networkx_nodes(network, pos, nodelist=network_nodes_dict[
node_type], node_size=network_node_size, node_color=next(
node_colours), linewidths=linewidths, label=node_type)
if draw_node_labels:
nx.draw_networkx_labels(network, pos, font_size=font_size,
font_color='k', font_family='sans-serif', font_weight='normal',
alpha=1.0)
fibre_links = list(network.edges)
nx.draw_networkx_edges(network, pos, edgelist=fibre_links, edge_color=
'k', width=3, label='Fibre link')
if path_to_save is not None:
tools.pickle_data(path_to_save, fig)
if show_fig:
plt.show()
return fig
if __name__ == '__main__':
network = gen_fat_tree(k=3)
plot_network(network, 'figures/graph/', name='network_graph.png',
with_labels=True)
<|reserved_special_token_1|>
'''Module for generating and plotting networks.'''
from trafpy.generator.src import tools
import copy
import networkx as nx
import matplotlib.pyplot as plt
import json
def gen_arbitrary_network(num_eps,
ep_label=None,
ep_capacity=12500,
num_channels=1,
racks_dict=None,
topology_type=None):
'''Generates an arbitrary network with num_eps nodes labelled as ep_label.
Note that no edges are formed in this network; it is purely for ep name
indexing purposes when using Demand class. This is useful where want to
use the demand class but not necessarily with a carefully crafted networkx
graph that accurately mimics the network you will use for the demands
Args:
num_eps (int): Number of endpoints in network.
ep_label (str,int,float): Endpoint label (e.g. 'server'). All endpoints will have
ep_label appended to the start of their label (e.g. 'server_0', 'server_1', ...).
ep_capacity (int, float): Byte capacity per end point channel.
num_channels (int, float): Number of channels on each link in network.
racks_dict (dict): Mapping of which end points are in which racks. Keys are
rack ids, values are list of end points. If None, assume there is not
clustering/rack system in the network where have different end points
in different clusters/racks.
Returns:
networkx graph: network object
'''
network = nx.Graph()
network.add_nodes_from([node for node in range(num_eps)])
if ep_label is None:
# must be str or not json serialisable
servers = [str(i) for i in range(num_eps)]
else:
servers = [ep_label+'_'+str(i) for i in range(num_eps)]
relabel_mapping = {node: label for node, label in zip(range(num_eps),servers)}
network = nx.relabel_nodes(network, relabel_mapping)
eps = []
for node in list(network.nodes):
try:
if ep_label in node:
eps.append(node)
except TypeError:
# ep_label is None
eps.append(node)
network.graph['endpoints'] = eps
# /= 2 to get max theoretical capacity (number of units which network can transfer per unit time)
max_nw_capacity = (num_eps * ep_capacity * num_channels) / 2
if topology_type is None:
topology_type = 'arbitrary_endpoints_{}_chancap_{}_channels_{}'.format(num_eps, ep_capacity, num_channels)
init_global_network_attrs(network,
max_nw_capacity,
num_channels,
ep_link_capacity=ep_capacity*num_channels,
endpoint_label=ep_label,
node_labels=[ep_label],
racks_dict=racks_dict,
topology_type=topology_type)
return network
def gen_nsfnet_network(ep_label='server',
rack_label='rack',
N=0,
num_channels=2,
server_to_rack_channel_capacity=1,
rack_to_rack_channel_capacity=10,
show_fig=False):
'''Generates the standard 14-node NSFNET topology (a U.S. core network).
Args:
ep_label (str,int,float): Endpoint label (e.g. 'server'). All endpoints will have
ep_label appended to the start of their label (e.g. 'server_0', 'server_1', ...).
N (int): Number of servers per rack. If 0, assume all nodes in nsfnet
are endpoints
num_channels (int,float): Number of channels on each link in network.
server_to_rack_channel_capacity (int,float): Byte capacity per channel
between servers and ToR switch.
rack_to_rack_channel_capacity (int,float): Byte capacity per channel between racks.
show_fig (bool): Whether or not to plot and show fig. If True, will
display fig.
Returns:
networkx graph: network object
'''
channel_names = gen_channel_names(num_channels)
network = nx.Graph()
node_pair_list = [[0,1],
[0,3],
[0,2],
[1,2],
[1,7],
[3,8],
[3,4],
[3,6],
[4,5],
[4,5],
[5,2],
[5,13],
[5,12],
[6,7],
[7,10],
[8,11],
[8,9],
[9,10],
[9,12],
[10,11],
[10,13],
[11,12]]
if N == 0:
# above nodes are all end points
label = ep_label
else:
# above nodes are ToR switch nodes
label = rack_label
for idx in range(len(node_pair_list)):
node_pair_list[idx][0] = label + '_' + str(node_pair_list[idx][0])
node_pair_list[idx][1] = label + '_' + str(node_pair_list[idx][1])
# add 14 nodes
for edge in node_pair_list:
network.add_edge(*tuple(edge))
if N == 0:
# assume all nodes are servers
racks_dict = None
else:
# each of 14 nodes in NSFNET is a ToR switch
i = 0
racks_dict = {rack: [] for rack in range(14)}
for rack in range(14):
for server in range(N):
racks_dict[rack].append(ep_label+'_'+str(i))
network.add_edge(ep_label+'_'+str(i), rack_label+'_'+str(rack))
i += 1
channel_names = gen_channel_names(num_channels)
edges = [edge for edge in network.edges]
add_edges_capacity_attrs(network, edges, channel_names, rack_to_rack_channel_capacity)
# set gloabl network attrs
network.graph['endpoints'] = get_endpoints(network, ep_label)
# /= 2 to get max theoretical capacity (number of units which network can transfer per unit time)
max_nw_capacity = (len(network.edges) * num_channels * rack_to_rack_channel_capacity) / 2
init_global_network_attrs(network,
max_nw_capacity,
num_channels,
ep_link_capacity=server_to_rack_channel_capacity*num_channels,
endpoint_label=ep_label,
node_labels=[ep_label, rack_label],
topology_type='14_node_nsfnet',
racks_dict=racks_dict)
if show_fig:
plot_network(network, show_fig=True)
return network
def gen_simple_network(ep_label='server',
num_channels=2,
server_to_rack_channel_capacity=500,
show_fig=False):
'''Generates very simple 5-node topology.
Args:
ep_label (str,int,float): Endpoint label (e.g. 'server'). All endpoints will have
ep_label appended to the start of their label (e.g. 'server_0', 'server_1', ...).
num_channels (int,float): Number of channels on each link in network.
channel_capacity (int,float): Byte capacity per channel.
show_fig (bool): Whether or not to plot and show fig. If True, will
display fig.
Returns:
networkx graph: network object
'''
network = nx.Graph()
network.add_nodes_from([node for node in range(5)])
network.add_edges_from([(0,1),
(0,2),
(1,2),
(2,4),
(4,3),
(3,1)],weight=1)
servers = [ep_label+'_'+str(i) for i in range(5)]
relabel_mapping = {node: label for node, label in zip(range(5),servers)}
network = nx.relabel_nodes(network, relabel_mapping)
channel_names = gen_channel_names(num_channels)
edges = [edge for edge in network.edges]
add_edges_capacity_attrs(network, edges, channel_names, server_to_rack_channel_capacity)
# set gloabl network attrs
network.graph['endpoints'] = get_endpoints(network, ep_label)
# /= 2 to get max theoretical capacity (number of units which network can transfer per unit time)
max_nw_capacity = (len(network.edges) * num_channels * server_to_rack_channel_capacity) / 2
init_global_network_attrs(network,
max_nw_capacity,
num_channels,
ep_link_capacity=server_to_rack_channel_capacity*num_channels,
endpoint_label=ep_label,
node_labels=[ep_label],
topology_type='5_node_simple_network')
if show_fig:
plot_network(network, show_fig=True)
return network
def get_endpoints(network, ep_label):
'''Gets list of endpoints of network.
Args:
network (networkx graph): Networkx object.
ep_label (str,int,float): Endpoint label (e.g. 'server'). All endpoints will have
ep_label appended to the start of their label (e.g. 'server_0', 'server_1', ...).
Returns:
eps (list): List of endpoints.
'''
eps = []
for node in list(network.nodes):
if ep_label in node:
eps.append(node)
return eps
def gen_fat_tree(k=4,
L=2,
n=4,
ep_label='server',
rack_label='rack',
edge_label='edge',
aggregate_label='agg',
core_label='core',
num_channels = 2,
server_to_rack_channel_capacity=500,
rack_to_edge_channel_capacity=1000,
edge_to_agg_channel_capacity=1000,
agg_to_core_channel_capacity=2000,
rack_to_core_channel_capacity=2000,
show_fig=False):
'''Generates a perfect fat tree (i.e. all layers have switches with same radix/number of ports).
Top layer is always core (spine) switch layer, bottom layer is always
ToR (leaf) layer.
L must be either 2 (core, ToR) or 4 (core, agg, edge, ToR)
N.B. L=2 is commonly referred to as '2-layer Clos' or 'Clos' or 'spine-leaf' topology
Resource for building (scroll down to summary table with equations):
https://packetpushers.net/demystifying-dcn-topologies-clos-fat-trees-part2/
Another good resource for data centre topologies etc. in general:
https://www.oreilly.com/library/view/bgp-in-the/9781491983416/ch01.html#:~:text=The%20most%20common%20routing%20protocol,single%20data%20center%2C%20as%20well.
Parameters of network:
- number of core (spine) switches = (k/2)^(L/2) (top layer)
- number of edge switches (if L=4) = (k^2)/2
- number of agg switches (if L=4) = (k^2)/2
- number of pods (if L=4) (pod is a group of agg and/or edge switches) = 2*(k/2)^(L-2)
- number of ToR (leaf) switches (racks) = 2*(k/2)^(L-1) (bottom layer)
- number of server-facing ToR 'host' ports = 2*(k/2)^2 (can have multiple servers connected to same host port, & can oversubscribe)
- number of servers = number ToR switches * n
Args:
k (int): Number of ports (links) on each switch (both up and down).
L (int): Number of layers in the fat tree.
n (int): Number of server per rack.
ep_label (str,int,float): Endpoint label (e.g. 'server'). All endpoints will have
ep_label appended to the start of their label (e.g. 'server_0', 'server_1', ...).
edge_label (str,int): Label to assign to edge switch nodes
aggregate_label (str,int): Label to assign to edge switch nodes
core_label (str,int): Label to assign to core switch nodes
num_channels (int, float): Number of channels on each link in network
server_to_edge_channel_capacity (int,float): Byte capacity per channel
edge_to_agg_channel_capacity (int,float): (if L==4) Byte capacity per channel
agg_to_core_channel_capacity (int,float): (if L==4) Byte capacity per channel
rack_to_core_channel_capacity (int,float): (if L==2) Byte capacity per channel
Returns:
networkx graph: network object
'''
if L != 2 and L != 4:
raise Exception('L must be 2 (ToR layer, core layer) or 4 (ToR layer, edge layer, agg layer, core layer), but is {}.'.format(L))
if k % 2 != 0:
raise Exception('k must be even since, in perfect fat tree, have equal number of up and down ports on each switch, but is {}.'.format(k))
channel_names = gen_channel_names(num_channels)
# initialise network nodes
if L == 2:
node_labels = [ep_label, rack_label, core_label]
else:
node_labels = [ep_label, rack_label, edge_label, aggregate_label, core_label]
#num_cores = int((k/2)**(L-1))
#num_cores = int((k/2)**2)
num_cores = int((k/2)**(L/2))
num_aggs = int((k**2)/2)
num_edges = int((k**2)/2)
num_pods = int(2*(k/2)**(L-2))
num_racks = int(2*(k/2)**(L-1))
num_servers = int(num_racks * n)
cores = [core_label+'_'+str(i) for i in range(num_cores)]
aggs = [aggregate_label+'_'+str(i) for i in range(num_aggs)]
edges = [edge_label+'_'+str(i) for i in range(num_edges)]
racks = [rack_label+'_'+str(i) for i in range(num_racks)]
servers = [ep_label+'_'+str(i) for i in range(num_servers)]
# create core and rack layer networks
core_layer = nx.Graph()
rack_layer = nx.Graph()
core_layer.add_nodes_from(cores)
rack_layer.add_nodes_from(racks)
# combine cores and racks into single network
fat_tree_network = nx.compose(core_layer, rack_layer)
if L == 2:
# 2 layers: Core, ToR
# link racks to cores, add link attributes
rack_iterator = iter(racks)
for rack in racks:
core_iterator = iter(cores)
# have k/2 up-ports on each switch
for up_port in range(int(k/2)):
core = next(core_iterator)
fat_tree_network.add_edge(rack, core)
add_edge_capacity_attrs(fat_tree_network,
(rack, core),
channel_names,
rack_to_core_channel_capacity)
else:
# 4 layers: Core, Agg, Edge, ToR. Agg and Edge switches grouped into pods.
# group edges and aggregates into pods
num_pods = int(k)
pods = [[] for i in range(num_pods)]
prev_iter = 0
for pod_iter in range(len(pods)):
curr_iter = int(prev_iter + (k/2))
pods[pod_iter].append(edges[prev_iter:curr_iter])
pods[pod_iter].append(aggs[prev_iter:curr_iter])
prev_iter = curr_iter
# create dict of pod networks
pod_labels = ['pod_'+str(i) for i in range(num_pods)]
pods_dict = {tuple([pod]): nx.Graph() for pod in pod_labels}
for pod_iter in range(num_pods):
key = ('pod_'+str(pod_iter),)
pod_edges = pods[pod_iter][0]
pod_aggs = pods[pod_iter][1]
pods_dict[key].add_nodes_from(pod_edges)
pods_dict[key].add_nodes_from(pod_aggs)
# connect edge and aggregate switches within pod, add link attributes
for pod_edge in pod_edges:
for pod_agg in pod_aggs:
pods_dict[key].add_edge(pod_agg, pod_edge)
add_edge_capacity_attrs(pods_dict[key],
(pod_agg,pod_edge),
channel_names,
edge_to_agg_channel_capacity)
# add pods (agg + edge) layer to fat-tree
pod_networks = list(pods_dict.values())
for pod_iter in range(num_pods):
fat_tree_network = nx.compose(fat_tree_network, pod_networks[pod_iter])
# link aggregate switches in pods to core switches, add link attributes
for pod_iter in range(num_pods):
pod_aggs = pods[pod_iter][1]
core_iterator = iter(cores)
for pod_agg in pod_aggs:
while fat_tree_network.degree[pod_agg] < k:
core = next(core_iterator)
fat_tree_network.add_edge(core, pod_agg)
add_edge_capacity_attrs(fat_tree_network,
(core,pod_agg),
channel_names,
agg_to_core_channel_capacity)
# link edge switches in pods to racks, add link attributes
rack_iterator = iter(racks)
for pod_iter in range(num_pods):
pod_edges = pods[pod_iter][0]
for pod_edge in pod_edges:
while fat_tree_network.degree[pod_edge] < k:
rack = next(rack_iterator)
fat_tree_network.add_edge(pod_edge, rack)
add_edge_capacity_attrs(fat_tree_network,
(pod_edge,rack),
channel_names,
rack_to_edge_channel_capacity)
# link servers to racks, add link attributes
racks_dict = {rack: [] for rack in racks} # track which endpoints in which rack
server_iterator = iter(servers)
for rack in racks:
for _ in range(n):
server = next(server_iterator)
fat_tree_network.add_edge(rack, server)
add_edge_capacity_attrs(fat_tree_network,
(rack, server),
channel_names,
server_to_rack_channel_capacity)
racks_dict[rack].append(server)
# calc total network capacity
# /= 2 to get max theoretical capacity (number of units which network can transfer per unit time)
max_nw_capacity = (num_servers * num_channels * server_to_rack_channel_capacity) / 2
# init global network attrs
fat_tree_network.graph['endpoints'] = servers
init_global_network_attrs(fat_tree_network,
max_nw_capacity,
num_channels,
ep_link_capacity=server_to_rack_channel_capacity*num_channels,
endpoint_label=ep_label,
node_labels=node_labels,
topology_type='fat_tree',
racks_dict=racks_dict)
if show_fig:
plot_network(fat_tree_network, show_fig=True)
return fat_tree_network
def init_global_network_attrs(network,
max_nw_capacity,
num_channels,
ep_link_capacity,
endpoint_label = 'server',
topology_type='unknown',
node_labels=['server'],
racks_dict=None):
'''Initialises the standard global network attributes of a given network.
Args:
network (obj): NetworkX object.
max_nw_capacity (int/float): Maximum rate at which info can be reliably
transmitted over the network (sum of all link capacities).
num_channels (int): Number of channels on each link in network.
topology_type (str): Label of network topology (e.g. 'fat_tree').
node_labels (list): Label classes assigned to network nodes
(e.g. ['server', 'rack', 'edge']).
racks_dict (dict): Which servers/endpoints are in which rack. If None,
assume do not have rack system where have multiple servers in one
rack.
'''
network.graph['endpoint_label'] = endpoint_label
network.graph['num_channels_per_link'] = num_channels
network.graph['ep_link_capacity'] = ep_link_capacity
network.graph['ep_link_port_capacity'] = ep_link_capacity / 2 # all eps have a src & a dst port
network.graph['max_nw_capacity'] = max_nw_capacity
network.graph['curr_nw_capacity_used'] = 0
network.graph['num_active_connections'] = 0
network.graph['total_connections_blocked'] = 0
network.graph['node_labels'] = node_labels
network.graph['topology_type'] = topology_type
network.graph['channel_names'] = gen_channel_names(num_channels)
# ensure racks dict is str so json serialisable
if racks_dict is not None:
_racks_dict = {}
for key, val in racks_dict.items():
_racks_dict[str(key)] = []
for v in val:
_racks_dict[str(key)].append(str(v))
network.graph['rack_to_ep_dict'] = _racks_dict
else:
network.graph['rack_to_ep_dict'] = None
if racks_dict is not None:
# switch racks_dict keys and values to make hashing easier
ep_to_rack_dict = {}
for key, val in _racks_dict.items():
for v in val:
if v not in ep_to_rack_dict.keys():
ep_to_rack_dict[v] = key
network.graph['ep_to_rack_dict'] = ep_to_rack_dict
else:
network.graph['ep_to_rack_dict'] = None
def gen_channel_names(num_channels):
'''Generates channel names for channels on each link in network.'''
channels = [channel+1 for channel in range(num_channels)]
channel_names = ['channel_' + str(channel) for channel in channels]
return channel_names
def add_edge_capacity_attrs(network,
edge,
channel_names,
channel_capacity,
bidirectional_links=True):
'''Adds channels and corresponding max channel bytes to single edge in network.
Args:
network (networkx graph): Network containing edges to whiich attrs will
be added.
edge (tuple): Node-node edge pair.
channel_names (list): List of channel names to add to edge.
channel_capacity (int,float): Capacity to allocate to each channel.
bidirectional_links (bool): If True, each link has capacity split equally
between src and dst port. I.e. all links have a src and dst port
which are treated separately to incoming and outgoing traffic to and
from given node (switch or server).
'''
if bidirectional_links:
attrs = {edge:
{'{}_to_{}_port'.format(edge[0], edge[1]):
{'channels':
{channel: channel_capacity/2 for channel in channel_names},
'max_channel_capacity': channel_capacity/2
},
'{}_to_{}_port'.format(edge[1], edge[0]):
{'channels':
{channel: channel_capacity/2 for channel in channel_names},
'max_channel_capacity': channel_capacity/2
}
}
}
else:
attrs = {edge:
{'channels': {channel: channel_capacity for channel in channel_names},
'max_channel_capacity': channel_capacity}}
nx.set_edge_attributes(network, attrs)
def add_edges_capacity_attrs(network,
edges,
channel_names,
channel_capacity,
bidirectional_links=True):
'''Adds channels & max channel capacitys to single edge in network.
To access e.g. the edge going from node 0 to node 1 (edge (0, 1)), you
would index the network with network[0][1]
To access e.g. the channel_1 attribute of this particular (0, 1) edge, you
would do network[0][1]['channels']['channel_1']
OR
if bidirectional_links, you do network[0][1]['0_to_1_port']['channels']['channel_1']
or network[0][1]['1_to_0_port']['channels']['channel_1] depending on which direction
of the link you want to access.
Args:
network (networkx graph): Network containing edges to which attrs will
be added.
edges (list): List of node pairs in tuples.
channel_names (list of str): List of channel names to add to edge.
channel_capacity (int, float): Capacity to allocate to each channel.
bidirectional_links (bool): If True, each link has capacity split equally
between src and dst port. I.e. all links have a src and dst port
which are treated separately to incoming and outgoing traffic to and
from given node (switch or server).
'''
if bidirectional_links:
attrs = {edge:
{'{}_to_{}_port'.format(edge[0], edge[1]):
{'channels':
{channel: channel_capacity/2 for channel in channel_names},
'max_channel_capacity': channel_capacity/2
},
'{}_to_{}_port'.format(edge[1], edge[0]):
{'channels':
{channel: channel_capacity/2 for channel in channel_names},
'max_channel_capacity': channel_capacity/2
}
}
for edge in edges}
else:
attrs = {edge:
{'channels':
{channel: channel_capacity for channel in channel_names},
'max_channel_capacity':
channel_capacity
} for edge in edges}
nx.set_edge_attributes(network, attrs)
def get_node_type_dict(network, node_types=[]):
'''Gets dict where keys are node types, values are list of nodes for each node type in graph.'''
network_nodes = []
for network_node in network.nodes:
network_nodes.append(network_node)
network_nodes_dict = {node_type: [] for node_type in node_types}
for n in network_nodes:
for node_type in node_types:
if node_type in n:
network_nodes_dict[node_type].append(n)
else:
# not this node type
pass
return network_nodes_dict
def get_fat_tree_positions(net, width_scale=500, height_scale=10):
'''Gets networkx positions of nodes in fat tree network for plotting.'''
pos = {}
node_type_dict = get_node_type_dict(net, net.graph['node_labels'])
node_types = list(node_type_dict.keys())
heights = {} # dict for heigh separation between fat tree layers
widths = {} # dict for width separation between nodes within layers
h = iter([1, 2, 3, 4, 5]) # server, rack, edge, agg, core heights
for node_type in node_types:
heights[node_type] = next(h)
widths[node_type] = 1/(len(node_type_dict[node_type])+1)
idx = 0
for node in node_type_dict[node_type]:
pos[node] = ((idx+1)*widths[node_type]*width_scale,heights[node_type]*height_scale)
idx += 1
return pos
def init_network_node_positions(net):
'''Initialises network node positions for plotting.'''
if net.graph['topology_type'] == 'fat_tree':
pos = get_fat_tree_positions(net)
else:
pos = nx.nx_agraph.graphviz_layout(net, prog='neato')
return pos
def plot_network(network,
draw_node_labels=True,
ep_label='server',
network_node_size=2000,
font_size=30,
linewidths=1,
fig_scale=2,
path_to_save=None,
show_fig=False):
'''Plots networkx graph.
Recognises special fat tree network and applies appropriate node positioning,
labelling, colouring etc.
Args:
network (networkx graph): Network object to be plotted.
draw_node_labels (bool): Whether or not to draw node labels on plot.
ep_label (str,int,float): Endpoint label (e.g. 'server'). All endpoints will have
ep_label appended to the start of their label (e.g. 'server_0', 'server_1', ...).
network_node_size (int,float): Size of plotted nodes.
font_size (int,float): Size of of font of plotted labels etc.
linewidths (int,float): Width of edges in network.
fig_scale (int,float): Scaling factor to apply to plotted network.
path_to_save (str): Path to directory (with file name included) in which
to save generated plot. E.g. path_to_save='data/my_plot'
show_fig (bool): Whether or not to plot and show fig. If True, will
return and display fig.
Returns:
matplotlib.figure.Figure: node distribution plotted as a 2d matrix.
'''
net_node_positions = init_network_node_positions(copy.deepcopy(network))
fig = plt.figure(figsize=[15*fig_scale,15*fig_scale])
# add nodes and edges
pos = {}
network_nodes = []
network_nodes_dict = get_node_type_dict(network, network.graph['node_labels'])
for nodes in list(network_nodes_dict.values()):
for network_node in nodes:
pos[network_node] = net_node_positions[network_node]
# network nodes
node_colours = iter(['#25c44d', '#36a0c7', '#e8b017', '#6115a3', '#160e63']) # server, rack, edge, agg, core
for node_type in network.graph['node_labels']:
nx.draw_networkx_nodes(network,
pos,
nodelist=network_nodes_dict[node_type],
node_size=network_node_size,
node_color=next(node_colours),
linewidths=linewidths,
label=node_type)
if draw_node_labels:
# nodes
nx.draw_networkx_labels(network,
pos,
font_size=font_size,
font_color='k',
font_family='sans-serif',
font_weight='normal',
alpha=1.0)
# fibre links
fibre_links = list(network.edges)
nx.draw_networkx_edges(network,
pos,
edgelist=fibre_links,
edge_color='k',
width=3,
label='Fibre link')
if path_to_save is not None:
tools.pickle_data(path_to_save, fig)
if show_fig:
plt.show()
return fig
if __name__ == '__main__':
#network = gen_simple_network()
#network = gen_nsfnet_network()
network = gen_fat_tree(k=3)
plot_network(network, 'figures/graph/',name='network_graph.png',with_labels=True)
|
flexible
|
{
"blob_id": "4cf2829282cb0a1673e741f78f17ce27a2817ff2",
"index": 651,
"step-1": "<mask token>\n\n\ndef gen_arbitrary_network(num_eps, ep_label=None, ep_capacity=12500,\n num_channels=1, racks_dict=None, topology_type=None):\n \"\"\"Generates an arbitrary network with num_eps nodes labelled as ep_label.\n\n Note that no edges are formed in this network; it is purely for ep name \n indexing purposes when using Demand class. This is useful where want to\n use the demand class but not necessarily with a carefully crafted networkx\n graph that accurately mimics the network you will use for the demands\n\n Args:\n num_eps (int): Number of endpoints in network.\n ep_label (str,int,float): Endpoint label (e.g. 'server'). All endpoints will have\n ep_label appended to the start of their label (e.g. 'server_0', 'server_1', ...).\n ep_capacity (int, float): Byte capacity per end point channel.\n num_channels (int, float): Number of channels on each link in network.\n racks_dict (dict): Mapping of which end points are in which racks. Keys are\n rack ids, values are list of end points. If None, assume there is not\n clustering/rack system in the network where have different end points\n in different clusters/racks.\n\n Returns:\n networkx graph: network object\n\n \"\"\"\n network = nx.Graph()\n network.add_nodes_from([node for node in range(num_eps)])\n if ep_label is None:\n servers = [str(i) for i in range(num_eps)]\n else:\n servers = [(ep_label + '_' + str(i)) for i in range(num_eps)]\n relabel_mapping = {node: label for node, label in zip(range(num_eps),\n servers)}\n network = nx.relabel_nodes(network, relabel_mapping)\n eps = []\n for node in list(network.nodes):\n try:\n if ep_label in node:\n eps.append(node)\n except TypeError:\n eps.append(node)\n network.graph['endpoints'] = eps\n max_nw_capacity = num_eps * ep_capacity * num_channels / 2\n if topology_type is None:\n topology_type = 'arbitrary_endpoints_{}_chancap_{}_channels_{}'.format(\n num_eps, ep_capacity, num_channels)\n init_global_network_attrs(network, max_nw_capacity, num_channels,\n ep_link_capacity=ep_capacity * num_channels, endpoint_label=\n ep_label, node_labels=[ep_label], racks_dict=racks_dict,\n topology_type=topology_type)\n return network\n\n\ndef gen_nsfnet_network(ep_label='server', rack_label='rack', N=0,\n num_channels=2, server_to_rack_channel_capacity=1,\n rack_to_rack_channel_capacity=10, show_fig=False):\n \"\"\"Generates the standard 14-node NSFNET topology (a U.S. core network).\n \n Args:\n ep_label (str,int,float): Endpoint label (e.g. 'server'). All endpoints will have\n ep_label appended to the start of their label (e.g. 'server_0', 'server_1', ...).\n N (int): Number of servers per rack. If 0, assume all nodes in nsfnet\n are endpoints\n num_channels (int,float): Number of channels on each link in network.\n server_to_rack_channel_capacity (int,float): Byte capacity per channel \n between servers and ToR switch.\n rack_to_rack_channel_capacity (int,float): Byte capacity per channel between racks.\n show_fig (bool): Whether or not to plot and show fig. If True, will\n display fig.\n\n Returns:\n networkx graph: network object\n\n \"\"\"\n channel_names = gen_channel_names(num_channels)\n network = nx.Graph()\n node_pair_list = [[0, 1], [0, 3], [0, 2], [1, 2], [1, 7], [3, 8], [3, 4\n ], [3, 6], [4, 5], [4, 5], [5, 2], [5, 13], [5, 12], [6, 7], [7, 10\n ], [8, 11], [8, 9], [9, 10], [9, 12], [10, 11], [10, 13], [11, 12]]\n if N == 0:\n label = ep_label\n else:\n label = rack_label\n for idx in range(len(node_pair_list)):\n node_pair_list[idx][0] = label + '_' + str(node_pair_list[idx][0])\n node_pair_list[idx][1] = label + '_' + str(node_pair_list[idx][1])\n for edge in node_pair_list:\n network.add_edge(*tuple(edge))\n if N == 0:\n racks_dict = None\n else:\n i = 0\n racks_dict = {rack: [] for rack in range(14)}\n for rack in range(14):\n for server in range(N):\n racks_dict[rack].append(ep_label + '_' + str(i))\n network.add_edge(ep_label + '_' + str(i), rack_label + '_' +\n str(rack))\n i += 1\n channel_names = gen_channel_names(num_channels)\n edges = [edge for edge in network.edges]\n add_edges_capacity_attrs(network, edges, channel_names,\n rack_to_rack_channel_capacity)\n network.graph['endpoints'] = get_endpoints(network, ep_label)\n max_nw_capacity = len(network.edges\n ) * num_channels * rack_to_rack_channel_capacity / 2\n init_global_network_attrs(network, max_nw_capacity, num_channels,\n ep_link_capacity=server_to_rack_channel_capacity * num_channels,\n endpoint_label=ep_label, node_labels=[ep_label, rack_label],\n topology_type='14_node_nsfnet', racks_dict=racks_dict)\n if show_fig:\n plot_network(network, show_fig=True)\n return network\n\n\ndef gen_simple_network(ep_label='server', num_channels=2,\n server_to_rack_channel_capacity=500, show_fig=False):\n \"\"\"Generates very simple 5-node topology.\n\n Args:\n ep_label (str,int,float): Endpoint label (e.g. 'server'). All endpoints will have\n ep_label appended to the start of their label (e.g. 'server_0', 'server_1', ...).\n num_channels (int,float): Number of channels on each link in network.\n channel_capacity (int,float): Byte capacity per channel.\n show_fig (bool): Whether or not to plot and show fig. If True, will\n display fig.\n\n Returns:\n networkx graph: network object\n\n \"\"\"\n network = nx.Graph()\n network.add_nodes_from([node for node in range(5)])\n network.add_edges_from([(0, 1), (0, 2), (1, 2), (2, 4), (4, 3), (3, 1)],\n weight=1)\n servers = [(ep_label + '_' + str(i)) for i in range(5)]\n relabel_mapping = {node: label for node, label in zip(range(5), servers)}\n network = nx.relabel_nodes(network, relabel_mapping)\n channel_names = gen_channel_names(num_channels)\n edges = [edge for edge in network.edges]\n add_edges_capacity_attrs(network, edges, channel_names,\n server_to_rack_channel_capacity)\n network.graph['endpoints'] = get_endpoints(network, ep_label)\n max_nw_capacity = len(network.edges\n ) * num_channels * server_to_rack_channel_capacity / 2\n init_global_network_attrs(network, max_nw_capacity, num_channels,\n ep_link_capacity=server_to_rack_channel_capacity * num_channels,\n endpoint_label=ep_label, node_labels=[ep_label], topology_type=\n '5_node_simple_network')\n if show_fig:\n plot_network(network, show_fig=True)\n return network\n\n\ndef get_endpoints(network, ep_label):\n \"\"\"Gets list of endpoints of network.\n\n Args:\n network (networkx graph): Networkx object.\n ep_label (str,int,float): Endpoint label (e.g. 'server'). All endpoints will have\n ep_label appended to the start of their label (e.g. 'server_0', 'server_1', ...).\n\n Returns:\n eps (list): List of endpoints.\n\n \"\"\"\n eps = []\n for node in list(network.nodes):\n if ep_label in node:\n eps.append(node)\n return eps\n\n\ndef gen_fat_tree(k=4, L=2, n=4, ep_label='server', rack_label='rack',\n edge_label='edge', aggregate_label='agg', core_label='core',\n num_channels=2, server_to_rack_channel_capacity=500,\n rack_to_edge_channel_capacity=1000, edge_to_agg_channel_capacity=1000,\n agg_to_core_channel_capacity=2000, rack_to_core_channel_capacity=2000,\n show_fig=False):\n \"\"\"Generates a perfect fat tree (i.e. all layers have switches with same radix/number of ports).\n\n Top layer is always core (spine) switch layer, bottom layer is always\n ToR (leaf) layer.\n\n L must be either 2 (core, ToR) or 4 (core, agg, edge, ToR)\n\n N.B. L=2 is commonly referred to as '2-layer Clos' or 'Clos' or 'spine-leaf' topology\n\n Resource for building (scroll down to summary table with equations):\n\n https://packetpushers.net/demystifying-dcn-topologies-clos-fat-trees-part2/\n\n Another good resource for data centre topologies etc. in general:\n\n https://www.oreilly.com/library/view/bgp-in-the/9781491983416/ch01.html#:~:text=The%20most%20common%20routing%20protocol,single%20data%20center%2C%20as%20well.\n\n Parameters of network:\n\n - number of core (spine) switches = (k/2)^(L/2) (top layer)\n - number of edge switches (if L=4) = (k^2)/2\n - number of agg switches (if L=4) = (k^2)/2\n - number of pods (if L=4) (pod is a group of agg and/or edge switches) = 2*(k/2)^(L-2)\n - number of ToR (leaf) switches (racks) = 2*(k/2)^(L-1) (bottom layer)\n - number of server-facing ToR 'host' ports = 2*(k/2)^2 (can have multiple servers connected to same host port, & can oversubscribe)\n - number of servers = number ToR switches * n\n\n Args:\n k (int): Number of ports (links) on each switch (both up and down).\n L (int): Number of layers in the fat tree.\n n (int): Number of server per rack.\n ep_label (str,int,float): Endpoint label (e.g. 'server'). All endpoints will have\n ep_label appended to the start of their label (e.g. 'server_0', 'server_1', ...).\n edge_label (str,int): Label to assign to edge switch nodes\n aggregate_label (str,int): Label to assign to edge switch nodes\n core_label (str,int): Label to assign to core switch nodes\n num_channels (int, float): Number of channels on each link in network\n server_to_edge_channel_capacity (int,float): Byte capacity per channel\n edge_to_agg_channel_capacity (int,float): (if L==4) Byte capacity per channel\n agg_to_core_channel_capacity (int,float): (if L==4) Byte capacity per channel\n rack_to_core_channel_capacity (int,float): (if L==2) Byte capacity per channel\n\n Returns:\n networkx graph: network object\n\n \"\"\"\n if L != 2 and L != 4:\n raise Exception(\n 'L must be 2 (ToR layer, core layer) or 4 (ToR layer, edge layer, agg layer, core layer), but is {}.'\n .format(L))\n if k % 2 != 0:\n raise Exception(\n 'k must be even since, in perfect fat tree, have equal number of up and down ports on each switch, but is {}.'\n .format(k))\n channel_names = gen_channel_names(num_channels)\n if L == 2:\n node_labels = [ep_label, rack_label, core_label]\n else:\n node_labels = [ep_label, rack_label, edge_label, aggregate_label,\n core_label]\n num_cores = int((k / 2) ** (L / 2))\n num_aggs = int(k ** 2 / 2)\n num_edges = int(k ** 2 / 2)\n num_pods = int(2 * (k / 2) ** (L - 2))\n num_racks = int(2 * (k / 2) ** (L - 1))\n num_servers = int(num_racks * n)\n cores = [(core_label + '_' + str(i)) for i in range(num_cores)]\n aggs = [(aggregate_label + '_' + str(i)) for i in range(num_aggs)]\n edges = [(edge_label + '_' + str(i)) for i in range(num_edges)]\n racks = [(rack_label + '_' + str(i)) for i in range(num_racks)]\n servers = [(ep_label + '_' + str(i)) for i in range(num_servers)]\n core_layer = nx.Graph()\n rack_layer = nx.Graph()\n core_layer.add_nodes_from(cores)\n rack_layer.add_nodes_from(racks)\n fat_tree_network = nx.compose(core_layer, rack_layer)\n if L == 2:\n rack_iterator = iter(racks)\n for rack in racks:\n core_iterator = iter(cores)\n for up_port in range(int(k / 2)):\n core = next(core_iterator)\n fat_tree_network.add_edge(rack, core)\n add_edge_capacity_attrs(fat_tree_network, (rack, core),\n channel_names, rack_to_core_channel_capacity)\n else:\n num_pods = int(k)\n pods = [[] for i in range(num_pods)]\n prev_iter = 0\n for pod_iter in range(len(pods)):\n curr_iter = int(prev_iter + k / 2)\n pods[pod_iter].append(edges[prev_iter:curr_iter])\n pods[pod_iter].append(aggs[prev_iter:curr_iter])\n prev_iter = curr_iter\n pod_labels = [('pod_' + str(i)) for i in range(num_pods)]\n pods_dict = {tuple([pod]): nx.Graph() for pod in pod_labels}\n for pod_iter in range(num_pods):\n key = 'pod_' + str(pod_iter),\n pod_edges = pods[pod_iter][0]\n pod_aggs = pods[pod_iter][1]\n pods_dict[key].add_nodes_from(pod_edges)\n pods_dict[key].add_nodes_from(pod_aggs)\n for pod_edge in pod_edges:\n for pod_agg in pod_aggs:\n pods_dict[key].add_edge(pod_agg, pod_edge)\n add_edge_capacity_attrs(pods_dict[key], (pod_agg,\n pod_edge), channel_names, edge_to_agg_channel_capacity)\n pod_networks = list(pods_dict.values())\n for pod_iter in range(num_pods):\n fat_tree_network = nx.compose(fat_tree_network, pod_networks[\n pod_iter])\n for pod_iter in range(num_pods):\n pod_aggs = pods[pod_iter][1]\n core_iterator = iter(cores)\n for pod_agg in pod_aggs:\n while fat_tree_network.degree[pod_agg] < k:\n core = next(core_iterator)\n fat_tree_network.add_edge(core, pod_agg)\n add_edge_capacity_attrs(fat_tree_network, (core,\n pod_agg), channel_names, agg_to_core_channel_capacity)\n rack_iterator = iter(racks)\n for pod_iter in range(num_pods):\n pod_edges = pods[pod_iter][0]\n for pod_edge in pod_edges:\n while fat_tree_network.degree[pod_edge] < k:\n rack = next(rack_iterator)\n fat_tree_network.add_edge(pod_edge, rack)\n add_edge_capacity_attrs(fat_tree_network, (pod_edge,\n rack), channel_names, rack_to_edge_channel_capacity)\n racks_dict = {rack: [] for rack in racks}\n server_iterator = iter(servers)\n for rack in racks:\n for _ in range(n):\n server = next(server_iterator)\n fat_tree_network.add_edge(rack, server)\n add_edge_capacity_attrs(fat_tree_network, (rack, server),\n channel_names, server_to_rack_channel_capacity)\n racks_dict[rack].append(server)\n max_nw_capacity = (num_servers * num_channels *\n server_to_rack_channel_capacity / 2)\n fat_tree_network.graph['endpoints'] = servers\n init_global_network_attrs(fat_tree_network, max_nw_capacity,\n num_channels, ep_link_capacity=server_to_rack_channel_capacity *\n num_channels, endpoint_label=ep_label, node_labels=node_labels,\n topology_type='fat_tree', racks_dict=racks_dict)\n if show_fig:\n plot_network(fat_tree_network, show_fig=True)\n return fat_tree_network\n\n\ndef init_global_network_attrs(network, max_nw_capacity, num_channels,\n ep_link_capacity, endpoint_label='server', topology_type='unknown',\n node_labels=['server'], racks_dict=None):\n \"\"\"Initialises the standard global network attributes of a given network.\n\n Args:\n network (obj): NetworkX object.\n max_nw_capacity (int/float): Maximum rate at which info can be reliably \n transmitted over the network (sum of all link capacities).\n num_channels (int): Number of channels on each link in network.\n topology_type (str): Label of network topology (e.g. 'fat_tree').\n node_labels (list): Label classes assigned to network nodes \n (e.g. ['server', 'rack', 'edge']).\n racks_dict (dict): Which servers/endpoints are in which rack. If None,\n assume do not have rack system where have multiple servers in one\n rack.\n\n \"\"\"\n network.graph['endpoint_label'] = endpoint_label\n network.graph['num_channels_per_link'] = num_channels\n network.graph['ep_link_capacity'] = ep_link_capacity\n network.graph['ep_link_port_capacity'] = ep_link_capacity / 2\n network.graph['max_nw_capacity'] = max_nw_capacity\n network.graph['curr_nw_capacity_used'] = 0\n network.graph['num_active_connections'] = 0\n network.graph['total_connections_blocked'] = 0\n network.graph['node_labels'] = node_labels\n network.graph['topology_type'] = topology_type\n network.graph['channel_names'] = gen_channel_names(num_channels)\n if racks_dict is not None:\n _racks_dict = {}\n for key, val in racks_dict.items():\n _racks_dict[str(key)] = []\n for v in val:\n _racks_dict[str(key)].append(str(v))\n network.graph['rack_to_ep_dict'] = _racks_dict\n else:\n network.graph['rack_to_ep_dict'] = None\n if racks_dict is not None:\n ep_to_rack_dict = {}\n for key, val in _racks_dict.items():\n for v in val:\n if v not in ep_to_rack_dict.keys():\n ep_to_rack_dict[v] = key\n network.graph['ep_to_rack_dict'] = ep_to_rack_dict\n else:\n network.graph['ep_to_rack_dict'] = None\n\n\n<mask token>\n\n\ndef add_edge_capacity_attrs(network, edge, channel_names, channel_capacity,\n bidirectional_links=True):\n \"\"\"Adds channels and corresponding max channel bytes to single edge in network.\n \n Args:\n network (networkx graph): Network containing edges to whiich attrs will\n be added.\n edge (tuple): Node-node edge pair.\n channel_names (list): List of channel names to add to edge.\n channel_capacity (int,float): Capacity to allocate to each channel.\n bidirectional_links (bool): If True, each link has capacity split equally\n between src and dst port. I.e. all links have a src and dst port\n which are treated separately to incoming and outgoing traffic to and\n from given node (switch or server).\n\n \"\"\"\n if bidirectional_links:\n attrs = {edge: {'{}_to_{}_port'.format(edge[0], edge[1]): {\n 'channels': {channel: (channel_capacity / 2) for channel in\n channel_names}, 'max_channel_capacity': channel_capacity / 2},\n '{}_to_{}_port'.format(edge[1], edge[0]): {'channels': {channel:\n (channel_capacity / 2) for channel in channel_names},\n 'max_channel_capacity': channel_capacity / 2}}}\n else:\n attrs = {edge: {'channels': {channel: channel_capacity for channel in\n channel_names}, 'max_channel_capacity': channel_capacity}}\n nx.set_edge_attributes(network, attrs)\n\n\ndef add_edges_capacity_attrs(network, edges, channel_names,\n channel_capacity, bidirectional_links=True):\n \"\"\"Adds channels & max channel capacitys to single edge in network.\n \n To access e.g. the edge going from node 0 to node 1 (edge (0, 1)), you\n would index the network with network[0][1]\n\n To access e.g. the channel_1 attribute of this particular (0, 1) edge, you\n would do network[0][1]['channels']['channel_1']\n OR\n if bidirectional_links, you do network[0][1]['0_to_1_port']['channels']['channel_1']\n or network[0][1]['1_to_0_port']['channels']['channel_1] depending on which direction\n of the link you want to access.\n \n Args:\n network (networkx graph): Network containing edges to which attrs will\n be added.\n edges (list): List of node pairs in tuples.\n channel_names (list of str): List of channel names to add to edge.\n channel_capacity (int, float): Capacity to allocate to each channel.\n bidirectional_links (bool): If True, each link has capacity split equally\n between src and dst port. I.e. all links have a src and dst port\n which are treated separately to incoming and outgoing traffic to and\n from given node (switch or server).\n\n \"\"\"\n if bidirectional_links:\n attrs = {edge: {'{}_to_{}_port'.format(edge[0], edge[1]): {\n 'channels': {channel: (channel_capacity / 2) for channel in\n channel_names}, 'max_channel_capacity': channel_capacity / 2},\n '{}_to_{}_port'.format(edge[1], edge[0]): {'channels': {channel:\n (channel_capacity / 2) for channel in channel_names},\n 'max_channel_capacity': channel_capacity / 2}} for edge in edges}\n else:\n attrs = {edge: {'channels': {channel: channel_capacity for channel in\n channel_names}, 'max_channel_capacity': channel_capacity} for\n edge in edges}\n nx.set_edge_attributes(network, attrs)\n\n\ndef get_node_type_dict(network, node_types=[]):\n \"\"\"Gets dict where keys are node types, values are list of nodes for each node type in graph.\"\"\"\n network_nodes = []\n for network_node in network.nodes:\n network_nodes.append(network_node)\n network_nodes_dict = {node_type: [] for node_type in node_types}\n for n in network_nodes:\n for node_type in node_types:\n if node_type in n:\n network_nodes_dict[node_type].append(n)\n else:\n pass\n return network_nodes_dict\n\n\ndef get_fat_tree_positions(net, width_scale=500, height_scale=10):\n \"\"\"Gets networkx positions of nodes in fat tree network for plotting.\"\"\"\n pos = {}\n node_type_dict = get_node_type_dict(net, net.graph['node_labels'])\n node_types = list(node_type_dict.keys())\n heights = {}\n widths = {}\n h = iter([1, 2, 3, 4, 5])\n for node_type in node_types:\n heights[node_type] = next(h)\n widths[node_type] = 1 / (len(node_type_dict[node_type]) + 1)\n idx = 0\n for node in node_type_dict[node_type]:\n pos[node] = (idx + 1) * widths[node_type] * width_scale, heights[\n node_type] * height_scale\n idx += 1\n return pos\n\n\n<mask token>\n\n\ndef plot_network(network, draw_node_labels=True, ep_label='server',\n network_node_size=2000, font_size=30, linewidths=1, fig_scale=2,\n path_to_save=None, show_fig=False):\n \"\"\"Plots networkx graph.\n\n Recognises special fat tree network and applies appropriate node positioning,\n labelling, colouring etc.\n\n Args:\n network (networkx graph): Network object to be plotted.\n draw_node_labels (bool): Whether or not to draw node labels on plot. \n ep_label (str,int,float): Endpoint label (e.g. 'server'). All endpoints will have\n ep_label appended to the start of their label (e.g. 'server_0', 'server_1', ...).\n network_node_size (int,float): Size of plotted nodes.\n font_size (int,float): Size of of font of plotted labels etc.\n linewidths (int,float): Width of edges in network.\n fig_scale (int,float): Scaling factor to apply to plotted network.\n path_to_save (str): Path to directory (with file name included) in which\n to save generated plot. E.g. path_to_save='data/my_plot'\n show_fig (bool): Whether or not to plot and show fig. If True, will\n return and display fig.\n \n Returns:\n matplotlib.figure.Figure: node distribution plotted as a 2d matrix. \n\n \"\"\"\n net_node_positions = init_network_node_positions(copy.deepcopy(network))\n fig = plt.figure(figsize=[15 * fig_scale, 15 * fig_scale])\n pos = {}\n network_nodes = []\n network_nodes_dict = get_node_type_dict(network, network.graph[\n 'node_labels'])\n for nodes in list(network_nodes_dict.values()):\n for network_node in nodes:\n pos[network_node] = net_node_positions[network_node]\n node_colours = iter(['#25c44d', '#36a0c7', '#e8b017', '#6115a3', '#160e63']\n )\n for node_type in network.graph['node_labels']:\n nx.draw_networkx_nodes(network, pos, nodelist=network_nodes_dict[\n node_type], node_size=network_node_size, node_color=next(\n node_colours), linewidths=linewidths, label=node_type)\n if draw_node_labels:\n nx.draw_networkx_labels(network, pos, font_size=font_size,\n font_color='k', font_family='sans-serif', font_weight='normal',\n alpha=1.0)\n fibre_links = list(network.edges)\n nx.draw_networkx_edges(network, pos, edgelist=fibre_links, edge_color=\n 'k', width=3, label='Fibre link')\n if path_to_save is not None:\n tools.pickle_data(path_to_save, fig)\n if show_fig:\n plt.show()\n return fig\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef gen_arbitrary_network(num_eps, ep_label=None, ep_capacity=12500,\n num_channels=1, racks_dict=None, topology_type=None):\n \"\"\"Generates an arbitrary network with num_eps nodes labelled as ep_label.\n\n Note that no edges are formed in this network; it is purely for ep name \n indexing purposes when using Demand class. This is useful where want to\n use the demand class but not necessarily with a carefully crafted networkx\n graph that accurately mimics the network you will use for the demands\n\n Args:\n num_eps (int): Number of endpoints in network.\n ep_label (str,int,float): Endpoint label (e.g. 'server'). All endpoints will have\n ep_label appended to the start of their label (e.g. 'server_0', 'server_1', ...).\n ep_capacity (int, float): Byte capacity per end point channel.\n num_channels (int, float): Number of channels on each link in network.\n racks_dict (dict): Mapping of which end points are in which racks. Keys are\n rack ids, values are list of end points. If None, assume there is not\n clustering/rack system in the network where have different end points\n in different clusters/racks.\n\n Returns:\n networkx graph: network object\n\n \"\"\"\n network = nx.Graph()\n network.add_nodes_from([node for node in range(num_eps)])\n if ep_label is None:\n servers = [str(i) for i in range(num_eps)]\n else:\n servers = [(ep_label + '_' + str(i)) for i in range(num_eps)]\n relabel_mapping = {node: label for node, label in zip(range(num_eps),\n servers)}\n network = nx.relabel_nodes(network, relabel_mapping)\n eps = []\n for node in list(network.nodes):\n try:\n if ep_label in node:\n eps.append(node)\n except TypeError:\n eps.append(node)\n network.graph['endpoints'] = eps\n max_nw_capacity = num_eps * ep_capacity * num_channels / 2\n if topology_type is None:\n topology_type = 'arbitrary_endpoints_{}_chancap_{}_channels_{}'.format(\n num_eps, ep_capacity, num_channels)\n init_global_network_attrs(network, max_nw_capacity, num_channels,\n ep_link_capacity=ep_capacity * num_channels, endpoint_label=\n ep_label, node_labels=[ep_label], racks_dict=racks_dict,\n topology_type=topology_type)\n return network\n\n\ndef gen_nsfnet_network(ep_label='server', rack_label='rack', N=0,\n num_channels=2, server_to_rack_channel_capacity=1,\n rack_to_rack_channel_capacity=10, show_fig=False):\n \"\"\"Generates the standard 14-node NSFNET topology (a U.S. core network).\n \n Args:\n ep_label (str,int,float): Endpoint label (e.g. 'server'). All endpoints will have\n ep_label appended to the start of their label (e.g. 'server_0', 'server_1', ...).\n N (int): Number of servers per rack. If 0, assume all nodes in nsfnet\n are endpoints\n num_channels (int,float): Number of channels on each link in network.\n server_to_rack_channel_capacity (int,float): Byte capacity per channel \n between servers and ToR switch.\n rack_to_rack_channel_capacity (int,float): Byte capacity per channel between racks.\n show_fig (bool): Whether or not to plot and show fig. If True, will\n display fig.\n\n Returns:\n networkx graph: network object\n\n \"\"\"\n channel_names = gen_channel_names(num_channels)\n network = nx.Graph()\n node_pair_list = [[0, 1], [0, 3], [0, 2], [1, 2], [1, 7], [3, 8], [3, 4\n ], [3, 6], [4, 5], [4, 5], [5, 2], [5, 13], [5, 12], [6, 7], [7, 10\n ], [8, 11], [8, 9], [9, 10], [9, 12], [10, 11], [10, 13], [11, 12]]\n if N == 0:\n label = ep_label\n else:\n label = rack_label\n for idx in range(len(node_pair_list)):\n node_pair_list[idx][0] = label + '_' + str(node_pair_list[idx][0])\n node_pair_list[idx][1] = label + '_' + str(node_pair_list[idx][1])\n for edge in node_pair_list:\n network.add_edge(*tuple(edge))\n if N == 0:\n racks_dict = None\n else:\n i = 0\n racks_dict = {rack: [] for rack in range(14)}\n for rack in range(14):\n for server in range(N):\n racks_dict[rack].append(ep_label + '_' + str(i))\n network.add_edge(ep_label + '_' + str(i), rack_label + '_' +\n str(rack))\n i += 1\n channel_names = gen_channel_names(num_channels)\n edges = [edge for edge in network.edges]\n add_edges_capacity_attrs(network, edges, channel_names,\n rack_to_rack_channel_capacity)\n network.graph['endpoints'] = get_endpoints(network, ep_label)\n max_nw_capacity = len(network.edges\n ) * num_channels * rack_to_rack_channel_capacity / 2\n init_global_network_attrs(network, max_nw_capacity, num_channels,\n ep_link_capacity=server_to_rack_channel_capacity * num_channels,\n endpoint_label=ep_label, node_labels=[ep_label, rack_label],\n topology_type='14_node_nsfnet', racks_dict=racks_dict)\n if show_fig:\n plot_network(network, show_fig=True)\n return network\n\n\ndef gen_simple_network(ep_label='server', num_channels=2,\n server_to_rack_channel_capacity=500, show_fig=False):\n \"\"\"Generates very simple 5-node topology.\n\n Args:\n ep_label (str,int,float): Endpoint label (e.g. 'server'). All endpoints will have\n ep_label appended to the start of their label (e.g. 'server_0', 'server_1', ...).\n num_channels (int,float): Number of channels on each link in network.\n channel_capacity (int,float): Byte capacity per channel.\n show_fig (bool): Whether or not to plot and show fig. If True, will\n display fig.\n\n Returns:\n networkx graph: network object\n\n \"\"\"\n network = nx.Graph()\n network.add_nodes_from([node for node in range(5)])\n network.add_edges_from([(0, 1), (0, 2), (1, 2), (2, 4), (4, 3), (3, 1)],\n weight=1)\n servers = [(ep_label + '_' + str(i)) for i in range(5)]\n relabel_mapping = {node: label for node, label in zip(range(5), servers)}\n network = nx.relabel_nodes(network, relabel_mapping)\n channel_names = gen_channel_names(num_channels)\n edges = [edge for edge in network.edges]\n add_edges_capacity_attrs(network, edges, channel_names,\n server_to_rack_channel_capacity)\n network.graph['endpoints'] = get_endpoints(network, ep_label)\n max_nw_capacity = len(network.edges\n ) * num_channels * server_to_rack_channel_capacity / 2\n init_global_network_attrs(network, max_nw_capacity, num_channels,\n ep_link_capacity=server_to_rack_channel_capacity * num_channels,\n endpoint_label=ep_label, node_labels=[ep_label], topology_type=\n '5_node_simple_network')\n if show_fig:\n plot_network(network, show_fig=True)\n return network\n\n\ndef get_endpoints(network, ep_label):\n \"\"\"Gets list of endpoints of network.\n\n Args:\n network (networkx graph): Networkx object.\n ep_label (str,int,float): Endpoint label (e.g. 'server'). All endpoints will have\n ep_label appended to the start of their label (e.g. 'server_0', 'server_1', ...).\n\n Returns:\n eps (list): List of endpoints.\n\n \"\"\"\n eps = []\n for node in list(network.nodes):\n if ep_label in node:\n eps.append(node)\n return eps\n\n\ndef gen_fat_tree(k=4, L=2, n=4, ep_label='server', rack_label='rack',\n edge_label='edge', aggregate_label='agg', core_label='core',\n num_channels=2, server_to_rack_channel_capacity=500,\n rack_to_edge_channel_capacity=1000, edge_to_agg_channel_capacity=1000,\n agg_to_core_channel_capacity=2000, rack_to_core_channel_capacity=2000,\n show_fig=False):\n \"\"\"Generates a perfect fat tree (i.e. all layers have switches with same radix/number of ports).\n\n Top layer is always core (spine) switch layer, bottom layer is always\n ToR (leaf) layer.\n\n L must be either 2 (core, ToR) or 4 (core, agg, edge, ToR)\n\n N.B. L=2 is commonly referred to as '2-layer Clos' or 'Clos' or 'spine-leaf' topology\n\n Resource for building (scroll down to summary table with equations):\n\n https://packetpushers.net/demystifying-dcn-topologies-clos-fat-trees-part2/\n\n Another good resource for data centre topologies etc. in general:\n\n https://www.oreilly.com/library/view/bgp-in-the/9781491983416/ch01.html#:~:text=The%20most%20common%20routing%20protocol,single%20data%20center%2C%20as%20well.\n\n Parameters of network:\n\n - number of core (spine) switches = (k/2)^(L/2) (top layer)\n - number of edge switches (if L=4) = (k^2)/2\n - number of agg switches (if L=4) = (k^2)/2\n - number of pods (if L=4) (pod is a group of agg and/or edge switches) = 2*(k/2)^(L-2)\n - number of ToR (leaf) switches (racks) = 2*(k/2)^(L-1) (bottom layer)\n - number of server-facing ToR 'host' ports = 2*(k/2)^2 (can have multiple servers connected to same host port, & can oversubscribe)\n - number of servers = number ToR switches * n\n\n Args:\n k (int): Number of ports (links) on each switch (both up and down).\n L (int): Number of layers in the fat tree.\n n (int): Number of server per rack.\n ep_label (str,int,float): Endpoint label (e.g. 'server'). All endpoints will have\n ep_label appended to the start of their label (e.g. 'server_0', 'server_1', ...).\n edge_label (str,int): Label to assign to edge switch nodes\n aggregate_label (str,int): Label to assign to edge switch nodes\n core_label (str,int): Label to assign to core switch nodes\n num_channels (int, float): Number of channels on each link in network\n server_to_edge_channel_capacity (int,float): Byte capacity per channel\n edge_to_agg_channel_capacity (int,float): (if L==4) Byte capacity per channel\n agg_to_core_channel_capacity (int,float): (if L==4) Byte capacity per channel\n rack_to_core_channel_capacity (int,float): (if L==2) Byte capacity per channel\n\n Returns:\n networkx graph: network object\n\n \"\"\"\n if L != 2 and L != 4:\n raise Exception(\n 'L must be 2 (ToR layer, core layer) or 4 (ToR layer, edge layer, agg layer, core layer), but is {}.'\n .format(L))\n if k % 2 != 0:\n raise Exception(\n 'k must be even since, in perfect fat tree, have equal number of up and down ports on each switch, but is {}.'\n .format(k))\n channel_names = gen_channel_names(num_channels)\n if L == 2:\n node_labels = [ep_label, rack_label, core_label]\n else:\n node_labels = [ep_label, rack_label, edge_label, aggregate_label,\n core_label]\n num_cores = int((k / 2) ** (L / 2))\n num_aggs = int(k ** 2 / 2)\n num_edges = int(k ** 2 / 2)\n num_pods = int(2 * (k / 2) ** (L - 2))\n num_racks = int(2 * (k / 2) ** (L - 1))\n num_servers = int(num_racks * n)\n cores = [(core_label + '_' + str(i)) for i in range(num_cores)]\n aggs = [(aggregate_label + '_' + str(i)) for i in range(num_aggs)]\n edges = [(edge_label + '_' + str(i)) for i in range(num_edges)]\n racks = [(rack_label + '_' + str(i)) for i in range(num_racks)]\n servers = [(ep_label + '_' + str(i)) for i in range(num_servers)]\n core_layer = nx.Graph()\n rack_layer = nx.Graph()\n core_layer.add_nodes_from(cores)\n rack_layer.add_nodes_from(racks)\n fat_tree_network = nx.compose(core_layer, rack_layer)\n if L == 2:\n rack_iterator = iter(racks)\n for rack in racks:\n core_iterator = iter(cores)\n for up_port in range(int(k / 2)):\n core = next(core_iterator)\n fat_tree_network.add_edge(rack, core)\n add_edge_capacity_attrs(fat_tree_network, (rack, core),\n channel_names, rack_to_core_channel_capacity)\n else:\n num_pods = int(k)\n pods = [[] for i in range(num_pods)]\n prev_iter = 0\n for pod_iter in range(len(pods)):\n curr_iter = int(prev_iter + k / 2)\n pods[pod_iter].append(edges[prev_iter:curr_iter])\n pods[pod_iter].append(aggs[prev_iter:curr_iter])\n prev_iter = curr_iter\n pod_labels = [('pod_' + str(i)) for i in range(num_pods)]\n pods_dict = {tuple([pod]): nx.Graph() for pod in pod_labels}\n for pod_iter in range(num_pods):\n key = 'pod_' + str(pod_iter),\n pod_edges = pods[pod_iter][0]\n pod_aggs = pods[pod_iter][1]\n pods_dict[key].add_nodes_from(pod_edges)\n pods_dict[key].add_nodes_from(pod_aggs)\n for pod_edge in pod_edges:\n for pod_agg in pod_aggs:\n pods_dict[key].add_edge(pod_agg, pod_edge)\n add_edge_capacity_attrs(pods_dict[key], (pod_agg,\n pod_edge), channel_names, edge_to_agg_channel_capacity)\n pod_networks = list(pods_dict.values())\n for pod_iter in range(num_pods):\n fat_tree_network = nx.compose(fat_tree_network, pod_networks[\n pod_iter])\n for pod_iter in range(num_pods):\n pod_aggs = pods[pod_iter][1]\n core_iterator = iter(cores)\n for pod_agg in pod_aggs:\n while fat_tree_network.degree[pod_agg] < k:\n core = next(core_iterator)\n fat_tree_network.add_edge(core, pod_agg)\n add_edge_capacity_attrs(fat_tree_network, (core,\n pod_agg), channel_names, agg_to_core_channel_capacity)\n rack_iterator = iter(racks)\n for pod_iter in range(num_pods):\n pod_edges = pods[pod_iter][0]\n for pod_edge in pod_edges:\n while fat_tree_network.degree[pod_edge] < k:\n rack = next(rack_iterator)\n fat_tree_network.add_edge(pod_edge, rack)\n add_edge_capacity_attrs(fat_tree_network, (pod_edge,\n rack), channel_names, rack_to_edge_channel_capacity)\n racks_dict = {rack: [] for rack in racks}\n server_iterator = iter(servers)\n for rack in racks:\n for _ in range(n):\n server = next(server_iterator)\n fat_tree_network.add_edge(rack, server)\n add_edge_capacity_attrs(fat_tree_network, (rack, server),\n channel_names, server_to_rack_channel_capacity)\n racks_dict[rack].append(server)\n max_nw_capacity = (num_servers * num_channels *\n server_to_rack_channel_capacity / 2)\n fat_tree_network.graph['endpoints'] = servers\n init_global_network_attrs(fat_tree_network, max_nw_capacity,\n num_channels, ep_link_capacity=server_to_rack_channel_capacity *\n num_channels, endpoint_label=ep_label, node_labels=node_labels,\n topology_type='fat_tree', racks_dict=racks_dict)\n if show_fig:\n plot_network(fat_tree_network, show_fig=True)\n return fat_tree_network\n\n\ndef init_global_network_attrs(network, max_nw_capacity, num_channels,\n ep_link_capacity, endpoint_label='server', topology_type='unknown',\n node_labels=['server'], racks_dict=None):\n \"\"\"Initialises the standard global network attributes of a given network.\n\n Args:\n network (obj): NetworkX object.\n max_nw_capacity (int/float): Maximum rate at which info can be reliably \n transmitted over the network (sum of all link capacities).\n num_channels (int): Number of channels on each link in network.\n topology_type (str): Label of network topology (e.g. 'fat_tree').\n node_labels (list): Label classes assigned to network nodes \n (e.g. ['server', 'rack', 'edge']).\n racks_dict (dict): Which servers/endpoints are in which rack. If None,\n assume do not have rack system where have multiple servers in one\n rack.\n\n \"\"\"\n network.graph['endpoint_label'] = endpoint_label\n network.graph['num_channels_per_link'] = num_channels\n network.graph['ep_link_capacity'] = ep_link_capacity\n network.graph['ep_link_port_capacity'] = ep_link_capacity / 2\n network.graph['max_nw_capacity'] = max_nw_capacity\n network.graph['curr_nw_capacity_used'] = 0\n network.graph['num_active_connections'] = 0\n network.graph['total_connections_blocked'] = 0\n network.graph['node_labels'] = node_labels\n network.graph['topology_type'] = topology_type\n network.graph['channel_names'] = gen_channel_names(num_channels)\n if racks_dict is not None:\n _racks_dict = {}\n for key, val in racks_dict.items():\n _racks_dict[str(key)] = []\n for v in val:\n _racks_dict[str(key)].append(str(v))\n network.graph['rack_to_ep_dict'] = _racks_dict\n else:\n network.graph['rack_to_ep_dict'] = None\n if racks_dict is not None:\n ep_to_rack_dict = {}\n for key, val in _racks_dict.items():\n for v in val:\n if v not in ep_to_rack_dict.keys():\n ep_to_rack_dict[v] = key\n network.graph['ep_to_rack_dict'] = ep_to_rack_dict\n else:\n network.graph['ep_to_rack_dict'] = None\n\n\n<mask token>\n\n\ndef add_edge_capacity_attrs(network, edge, channel_names, channel_capacity,\n bidirectional_links=True):\n \"\"\"Adds channels and corresponding max channel bytes to single edge in network.\n \n Args:\n network (networkx graph): Network containing edges to whiich attrs will\n be added.\n edge (tuple): Node-node edge pair.\n channel_names (list): List of channel names to add to edge.\n channel_capacity (int,float): Capacity to allocate to each channel.\n bidirectional_links (bool): If True, each link has capacity split equally\n between src and dst port. I.e. all links have a src and dst port\n which are treated separately to incoming and outgoing traffic to and\n from given node (switch or server).\n\n \"\"\"\n if bidirectional_links:\n attrs = {edge: {'{}_to_{}_port'.format(edge[0], edge[1]): {\n 'channels': {channel: (channel_capacity / 2) for channel in\n channel_names}, 'max_channel_capacity': channel_capacity / 2},\n '{}_to_{}_port'.format(edge[1], edge[0]): {'channels': {channel:\n (channel_capacity / 2) for channel in channel_names},\n 'max_channel_capacity': channel_capacity / 2}}}\n else:\n attrs = {edge: {'channels': {channel: channel_capacity for channel in\n channel_names}, 'max_channel_capacity': channel_capacity}}\n nx.set_edge_attributes(network, attrs)\n\n\ndef add_edges_capacity_attrs(network, edges, channel_names,\n channel_capacity, bidirectional_links=True):\n \"\"\"Adds channels & max channel capacitys to single edge in network.\n \n To access e.g. the edge going from node 0 to node 1 (edge (0, 1)), you\n would index the network with network[0][1]\n\n To access e.g. the channel_1 attribute of this particular (0, 1) edge, you\n would do network[0][1]['channels']['channel_1']\n OR\n if bidirectional_links, you do network[0][1]['0_to_1_port']['channels']['channel_1']\n or network[0][1]['1_to_0_port']['channels']['channel_1] depending on which direction\n of the link you want to access.\n \n Args:\n network (networkx graph): Network containing edges to which attrs will\n be added.\n edges (list): List of node pairs in tuples.\n channel_names (list of str): List of channel names to add to edge.\n channel_capacity (int, float): Capacity to allocate to each channel.\n bidirectional_links (bool): If True, each link has capacity split equally\n between src and dst port. I.e. all links have a src and dst port\n which are treated separately to incoming and outgoing traffic to and\n from given node (switch or server).\n\n \"\"\"\n if bidirectional_links:\n attrs = {edge: {'{}_to_{}_port'.format(edge[0], edge[1]): {\n 'channels': {channel: (channel_capacity / 2) for channel in\n channel_names}, 'max_channel_capacity': channel_capacity / 2},\n '{}_to_{}_port'.format(edge[1], edge[0]): {'channels': {channel:\n (channel_capacity / 2) for channel in channel_names},\n 'max_channel_capacity': channel_capacity / 2}} for edge in edges}\n else:\n attrs = {edge: {'channels': {channel: channel_capacity for channel in\n channel_names}, 'max_channel_capacity': channel_capacity} for\n edge in edges}\n nx.set_edge_attributes(network, attrs)\n\n\ndef get_node_type_dict(network, node_types=[]):\n \"\"\"Gets dict where keys are node types, values are list of nodes for each node type in graph.\"\"\"\n network_nodes = []\n for network_node in network.nodes:\n network_nodes.append(network_node)\n network_nodes_dict = {node_type: [] for node_type in node_types}\n for n in network_nodes:\n for node_type in node_types:\n if node_type in n:\n network_nodes_dict[node_type].append(n)\n else:\n pass\n return network_nodes_dict\n\n\ndef get_fat_tree_positions(net, width_scale=500, height_scale=10):\n \"\"\"Gets networkx positions of nodes in fat tree network for plotting.\"\"\"\n pos = {}\n node_type_dict = get_node_type_dict(net, net.graph['node_labels'])\n node_types = list(node_type_dict.keys())\n heights = {}\n widths = {}\n h = iter([1, 2, 3, 4, 5])\n for node_type in node_types:\n heights[node_type] = next(h)\n widths[node_type] = 1 / (len(node_type_dict[node_type]) + 1)\n idx = 0\n for node in node_type_dict[node_type]:\n pos[node] = (idx + 1) * widths[node_type] * width_scale, heights[\n node_type] * height_scale\n idx += 1\n return pos\n\n\ndef init_network_node_positions(net):\n \"\"\"Initialises network node positions for plotting.\"\"\"\n if net.graph['topology_type'] == 'fat_tree':\n pos = get_fat_tree_positions(net)\n else:\n pos = nx.nx_agraph.graphviz_layout(net, prog='neato')\n return pos\n\n\ndef plot_network(network, draw_node_labels=True, ep_label='server',\n network_node_size=2000, font_size=30, linewidths=1, fig_scale=2,\n path_to_save=None, show_fig=False):\n \"\"\"Plots networkx graph.\n\n Recognises special fat tree network and applies appropriate node positioning,\n labelling, colouring etc.\n\n Args:\n network (networkx graph): Network object to be plotted.\n draw_node_labels (bool): Whether or not to draw node labels on plot. \n ep_label (str,int,float): Endpoint label (e.g. 'server'). All endpoints will have\n ep_label appended to the start of their label (e.g. 'server_0', 'server_1', ...).\n network_node_size (int,float): Size of plotted nodes.\n font_size (int,float): Size of of font of plotted labels etc.\n linewidths (int,float): Width of edges in network.\n fig_scale (int,float): Scaling factor to apply to plotted network.\n path_to_save (str): Path to directory (with file name included) in which\n to save generated plot. E.g. path_to_save='data/my_plot'\n show_fig (bool): Whether or not to plot and show fig. If True, will\n return and display fig.\n \n Returns:\n matplotlib.figure.Figure: node distribution plotted as a 2d matrix. \n\n \"\"\"\n net_node_positions = init_network_node_positions(copy.deepcopy(network))\n fig = plt.figure(figsize=[15 * fig_scale, 15 * fig_scale])\n pos = {}\n network_nodes = []\n network_nodes_dict = get_node_type_dict(network, network.graph[\n 'node_labels'])\n for nodes in list(network_nodes_dict.values()):\n for network_node in nodes:\n pos[network_node] = net_node_positions[network_node]\n node_colours = iter(['#25c44d', '#36a0c7', '#e8b017', '#6115a3', '#160e63']\n )\n for node_type in network.graph['node_labels']:\n nx.draw_networkx_nodes(network, pos, nodelist=network_nodes_dict[\n node_type], node_size=network_node_size, node_color=next(\n node_colours), linewidths=linewidths, label=node_type)\n if draw_node_labels:\n nx.draw_networkx_labels(network, pos, font_size=font_size,\n font_color='k', font_family='sans-serif', font_weight='normal',\n alpha=1.0)\n fibre_links = list(network.edges)\n nx.draw_networkx_edges(network, pos, edgelist=fibre_links, edge_color=\n 'k', width=3, label='Fibre link')\n if path_to_save is not None:\n tools.pickle_data(path_to_save, fig)\n if show_fig:\n plt.show()\n return fig\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef gen_arbitrary_network(num_eps, ep_label=None, ep_capacity=12500,\n num_channels=1, racks_dict=None, topology_type=None):\n \"\"\"Generates an arbitrary network with num_eps nodes labelled as ep_label.\n\n Note that no edges are formed in this network; it is purely for ep name \n indexing purposes when using Demand class. This is useful where want to\n use the demand class but not necessarily with a carefully crafted networkx\n graph that accurately mimics the network you will use for the demands\n\n Args:\n num_eps (int): Number of endpoints in network.\n ep_label (str,int,float): Endpoint label (e.g. 'server'). All endpoints will have\n ep_label appended to the start of their label (e.g. 'server_0', 'server_1', ...).\n ep_capacity (int, float): Byte capacity per end point channel.\n num_channels (int, float): Number of channels on each link in network.\n racks_dict (dict): Mapping of which end points are in which racks. Keys are\n rack ids, values are list of end points. If None, assume there is not\n clustering/rack system in the network where have different end points\n in different clusters/racks.\n\n Returns:\n networkx graph: network object\n\n \"\"\"\n network = nx.Graph()\n network.add_nodes_from([node for node in range(num_eps)])\n if ep_label is None:\n servers = [str(i) for i in range(num_eps)]\n else:\n servers = [(ep_label + '_' + str(i)) for i in range(num_eps)]\n relabel_mapping = {node: label for node, label in zip(range(num_eps),\n servers)}\n network = nx.relabel_nodes(network, relabel_mapping)\n eps = []\n for node in list(network.nodes):\n try:\n if ep_label in node:\n eps.append(node)\n except TypeError:\n eps.append(node)\n network.graph['endpoints'] = eps\n max_nw_capacity = num_eps * ep_capacity * num_channels / 2\n if topology_type is None:\n topology_type = 'arbitrary_endpoints_{}_chancap_{}_channels_{}'.format(\n num_eps, ep_capacity, num_channels)\n init_global_network_attrs(network, max_nw_capacity, num_channels,\n ep_link_capacity=ep_capacity * num_channels, endpoint_label=\n ep_label, node_labels=[ep_label], racks_dict=racks_dict,\n topology_type=topology_type)\n return network\n\n\ndef gen_nsfnet_network(ep_label='server', rack_label='rack', N=0,\n num_channels=2, server_to_rack_channel_capacity=1,\n rack_to_rack_channel_capacity=10, show_fig=False):\n \"\"\"Generates the standard 14-node NSFNET topology (a U.S. core network).\n \n Args:\n ep_label (str,int,float): Endpoint label (e.g. 'server'). All endpoints will have\n ep_label appended to the start of their label (e.g. 'server_0', 'server_1', ...).\n N (int): Number of servers per rack. If 0, assume all nodes in nsfnet\n are endpoints\n num_channels (int,float): Number of channels on each link in network.\n server_to_rack_channel_capacity (int,float): Byte capacity per channel \n between servers and ToR switch.\n rack_to_rack_channel_capacity (int,float): Byte capacity per channel between racks.\n show_fig (bool): Whether or not to plot and show fig. If True, will\n display fig.\n\n Returns:\n networkx graph: network object\n\n \"\"\"\n channel_names = gen_channel_names(num_channels)\n network = nx.Graph()\n node_pair_list = [[0, 1], [0, 3], [0, 2], [1, 2], [1, 7], [3, 8], [3, 4\n ], [3, 6], [4, 5], [4, 5], [5, 2], [5, 13], [5, 12], [6, 7], [7, 10\n ], [8, 11], [8, 9], [9, 10], [9, 12], [10, 11], [10, 13], [11, 12]]\n if N == 0:\n label = ep_label\n else:\n label = rack_label\n for idx in range(len(node_pair_list)):\n node_pair_list[idx][0] = label + '_' + str(node_pair_list[idx][0])\n node_pair_list[idx][1] = label + '_' + str(node_pair_list[idx][1])\n for edge in node_pair_list:\n network.add_edge(*tuple(edge))\n if N == 0:\n racks_dict = None\n else:\n i = 0\n racks_dict = {rack: [] for rack in range(14)}\n for rack in range(14):\n for server in range(N):\n racks_dict[rack].append(ep_label + '_' + str(i))\n network.add_edge(ep_label + '_' + str(i), rack_label + '_' +\n str(rack))\n i += 1\n channel_names = gen_channel_names(num_channels)\n edges = [edge for edge in network.edges]\n add_edges_capacity_attrs(network, edges, channel_names,\n rack_to_rack_channel_capacity)\n network.graph['endpoints'] = get_endpoints(network, ep_label)\n max_nw_capacity = len(network.edges\n ) * num_channels * rack_to_rack_channel_capacity / 2\n init_global_network_attrs(network, max_nw_capacity, num_channels,\n ep_link_capacity=server_to_rack_channel_capacity * num_channels,\n endpoint_label=ep_label, node_labels=[ep_label, rack_label],\n topology_type='14_node_nsfnet', racks_dict=racks_dict)\n if show_fig:\n plot_network(network, show_fig=True)\n return network\n\n\ndef gen_simple_network(ep_label='server', num_channels=2,\n server_to_rack_channel_capacity=500, show_fig=False):\n \"\"\"Generates very simple 5-node topology.\n\n Args:\n ep_label (str,int,float): Endpoint label (e.g. 'server'). All endpoints will have\n ep_label appended to the start of their label (e.g. 'server_0', 'server_1', ...).\n num_channels (int,float): Number of channels on each link in network.\n channel_capacity (int,float): Byte capacity per channel.\n show_fig (bool): Whether or not to plot and show fig. If True, will\n display fig.\n\n Returns:\n networkx graph: network object\n\n \"\"\"\n network = nx.Graph()\n network.add_nodes_from([node for node in range(5)])\n network.add_edges_from([(0, 1), (0, 2), (1, 2), (2, 4), (4, 3), (3, 1)],\n weight=1)\n servers = [(ep_label + '_' + str(i)) for i in range(5)]\n relabel_mapping = {node: label for node, label in zip(range(5), servers)}\n network = nx.relabel_nodes(network, relabel_mapping)\n channel_names = gen_channel_names(num_channels)\n edges = [edge for edge in network.edges]\n add_edges_capacity_attrs(network, edges, channel_names,\n server_to_rack_channel_capacity)\n network.graph['endpoints'] = get_endpoints(network, ep_label)\n max_nw_capacity = len(network.edges\n ) * num_channels * server_to_rack_channel_capacity / 2\n init_global_network_attrs(network, max_nw_capacity, num_channels,\n ep_link_capacity=server_to_rack_channel_capacity * num_channels,\n endpoint_label=ep_label, node_labels=[ep_label], topology_type=\n '5_node_simple_network')\n if show_fig:\n plot_network(network, show_fig=True)\n return network\n\n\ndef get_endpoints(network, ep_label):\n \"\"\"Gets list of endpoints of network.\n\n Args:\n network (networkx graph): Networkx object.\n ep_label (str,int,float): Endpoint label (e.g. 'server'). All endpoints will have\n ep_label appended to the start of their label (e.g. 'server_0', 'server_1', ...).\n\n Returns:\n eps (list): List of endpoints.\n\n \"\"\"\n eps = []\n for node in list(network.nodes):\n if ep_label in node:\n eps.append(node)\n return eps\n\n\ndef gen_fat_tree(k=4, L=2, n=4, ep_label='server', rack_label='rack',\n edge_label='edge', aggregate_label='agg', core_label='core',\n num_channels=2, server_to_rack_channel_capacity=500,\n rack_to_edge_channel_capacity=1000, edge_to_agg_channel_capacity=1000,\n agg_to_core_channel_capacity=2000, rack_to_core_channel_capacity=2000,\n show_fig=False):\n \"\"\"Generates a perfect fat tree (i.e. all layers have switches with same radix/number of ports).\n\n Top layer is always core (spine) switch layer, bottom layer is always\n ToR (leaf) layer.\n\n L must be either 2 (core, ToR) or 4 (core, agg, edge, ToR)\n\n N.B. L=2 is commonly referred to as '2-layer Clos' or 'Clos' or 'spine-leaf' topology\n\n Resource for building (scroll down to summary table with equations):\n\n https://packetpushers.net/demystifying-dcn-topologies-clos-fat-trees-part2/\n\n Another good resource for data centre topologies etc. in general:\n\n https://www.oreilly.com/library/view/bgp-in-the/9781491983416/ch01.html#:~:text=The%20most%20common%20routing%20protocol,single%20data%20center%2C%20as%20well.\n\n Parameters of network:\n\n - number of core (spine) switches = (k/2)^(L/2) (top layer)\n - number of edge switches (if L=4) = (k^2)/2\n - number of agg switches (if L=4) = (k^2)/2\n - number of pods (if L=4) (pod is a group of agg and/or edge switches) = 2*(k/2)^(L-2)\n - number of ToR (leaf) switches (racks) = 2*(k/2)^(L-1) (bottom layer)\n - number of server-facing ToR 'host' ports = 2*(k/2)^2 (can have multiple servers connected to same host port, & can oversubscribe)\n - number of servers = number ToR switches * n\n\n Args:\n k (int): Number of ports (links) on each switch (both up and down).\n L (int): Number of layers in the fat tree.\n n (int): Number of server per rack.\n ep_label (str,int,float): Endpoint label (e.g. 'server'). All endpoints will have\n ep_label appended to the start of their label (e.g. 'server_0', 'server_1', ...).\n edge_label (str,int): Label to assign to edge switch nodes\n aggregate_label (str,int): Label to assign to edge switch nodes\n core_label (str,int): Label to assign to core switch nodes\n num_channels (int, float): Number of channels on each link in network\n server_to_edge_channel_capacity (int,float): Byte capacity per channel\n edge_to_agg_channel_capacity (int,float): (if L==4) Byte capacity per channel\n agg_to_core_channel_capacity (int,float): (if L==4) Byte capacity per channel\n rack_to_core_channel_capacity (int,float): (if L==2) Byte capacity per channel\n\n Returns:\n networkx graph: network object\n\n \"\"\"\n if L != 2 and L != 4:\n raise Exception(\n 'L must be 2 (ToR layer, core layer) or 4 (ToR layer, edge layer, agg layer, core layer), but is {}.'\n .format(L))\n if k % 2 != 0:\n raise Exception(\n 'k must be even since, in perfect fat tree, have equal number of up and down ports on each switch, but is {}.'\n .format(k))\n channel_names = gen_channel_names(num_channels)\n if L == 2:\n node_labels = [ep_label, rack_label, core_label]\n else:\n node_labels = [ep_label, rack_label, edge_label, aggregate_label,\n core_label]\n num_cores = int((k / 2) ** (L / 2))\n num_aggs = int(k ** 2 / 2)\n num_edges = int(k ** 2 / 2)\n num_pods = int(2 * (k / 2) ** (L - 2))\n num_racks = int(2 * (k / 2) ** (L - 1))\n num_servers = int(num_racks * n)\n cores = [(core_label + '_' + str(i)) for i in range(num_cores)]\n aggs = [(aggregate_label + '_' + str(i)) for i in range(num_aggs)]\n edges = [(edge_label + '_' + str(i)) for i in range(num_edges)]\n racks = [(rack_label + '_' + str(i)) for i in range(num_racks)]\n servers = [(ep_label + '_' + str(i)) for i in range(num_servers)]\n core_layer = nx.Graph()\n rack_layer = nx.Graph()\n core_layer.add_nodes_from(cores)\n rack_layer.add_nodes_from(racks)\n fat_tree_network = nx.compose(core_layer, rack_layer)\n if L == 2:\n rack_iterator = iter(racks)\n for rack in racks:\n core_iterator = iter(cores)\n for up_port in range(int(k / 2)):\n core = next(core_iterator)\n fat_tree_network.add_edge(rack, core)\n add_edge_capacity_attrs(fat_tree_network, (rack, core),\n channel_names, rack_to_core_channel_capacity)\n else:\n num_pods = int(k)\n pods = [[] for i in range(num_pods)]\n prev_iter = 0\n for pod_iter in range(len(pods)):\n curr_iter = int(prev_iter + k / 2)\n pods[pod_iter].append(edges[prev_iter:curr_iter])\n pods[pod_iter].append(aggs[prev_iter:curr_iter])\n prev_iter = curr_iter\n pod_labels = [('pod_' + str(i)) for i in range(num_pods)]\n pods_dict = {tuple([pod]): nx.Graph() for pod in pod_labels}\n for pod_iter in range(num_pods):\n key = 'pod_' + str(pod_iter),\n pod_edges = pods[pod_iter][0]\n pod_aggs = pods[pod_iter][1]\n pods_dict[key].add_nodes_from(pod_edges)\n pods_dict[key].add_nodes_from(pod_aggs)\n for pod_edge in pod_edges:\n for pod_agg in pod_aggs:\n pods_dict[key].add_edge(pod_agg, pod_edge)\n add_edge_capacity_attrs(pods_dict[key], (pod_agg,\n pod_edge), channel_names, edge_to_agg_channel_capacity)\n pod_networks = list(pods_dict.values())\n for pod_iter in range(num_pods):\n fat_tree_network = nx.compose(fat_tree_network, pod_networks[\n pod_iter])\n for pod_iter in range(num_pods):\n pod_aggs = pods[pod_iter][1]\n core_iterator = iter(cores)\n for pod_agg in pod_aggs:\n while fat_tree_network.degree[pod_agg] < k:\n core = next(core_iterator)\n fat_tree_network.add_edge(core, pod_agg)\n add_edge_capacity_attrs(fat_tree_network, (core,\n pod_agg), channel_names, agg_to_core_channel_capacity)\n rack_iterator = iter(racks)\n for pod_iter in range(num_pods):\n pod_edges = pods[pod_iter][0]\n for pod_edge in pod_edges:\n while fat_tree_network.degree[pod_edge] < k:\n rack = next(rack_iterator)\n fat_tree_network.add_edge(pod_edge, rack)\n add_edge_capacity_attrs(fat_tree_network, (pod_edge,\n rack), channel_names, rack_to_edge_channel_capacity)\n racks_dict = {rack: [] for rack in racks}\n server_iterator = iter(servers)\n for rack in racks:\n for _ in range(n):\n server = next(server_iterator)\n fat_tree_network.add_edge(rack, server)\n add_edge_capacity_attrs(fat_tree_network, (rack, server),\n channel_names, server_to_rack_channel_capacity)\n racks_dict[rack].append(server)\n max_nw_capacity = (num_servers * num_channels *\n server_to_rack_channel_capacity / 2)\n fat_tree_network.graph['endpoints'] = servers\n init_global_network_attrs(fat_tree_network, max_nw_capacity,\n num_channels, ep_link_capacity=server_to_rack_channel_capacity *\n num_channels, endpoint_label=ep_label, node_labels=node_labels,\n topology_type='fat_tree', racks_dict=racks_dict)\n if show_fig:\n plot_network(fat_tree_network, show_fig=True)\n return fat_tree_network\n\n\ndef init_global_network_attrs(network, max_nw_capacity, num_channels,\n ep_link_capacity, endpoint_label='server', topology_type='unknown',\n node_labels=['server'], racks_dict=None):\n \"\"\"Initialises the standard global network attributes of a given network.\n\n Args:\n network (obj): NetworkX object.\n max_nw_capacity (int/float): Maximum rate at which info can be reliably \n transmitted over the network (sum of all link capacities).\n num_channels (int): Number of channels on each link in network.\n topology_type (str): Label of network topology (e.g. 'fat_tree').\n node_labels (list): Label classes assigned to network nodes \n (e.g. ['server', 'rack', 'edge']).\n racks_dict (dict): Which servers/endpoints are in which rack. If None,\n assume do not have rack system where have multiple servers in one\n rack.\n\n \"\"\"\n network.graph['endpoint_label'] = endpoint_label\n network.graph['num_channels_per_link'] = num_channels\n network.graph['ep_link_capacity'] = ep_link_capacity\n network.graph['ep_link_port_capacity'] = ep_link_capacity / 2\n network.graph['max_nw_capacity'] = max_nw_capacity\n network.graph['curr_nw_capacity_used'] = 0\n network.graph['num_active_connections'] = 0\n network.graph['total_connections_blocked'] = 0\n network.graph['node_labels'] = node_labels\n network.graph['topology_type'] = topology_type\n network.graph['channel_names'] = gen_channel_names(num_channels)\n if racks_dict is not None:\n _racks_dict = {}\n for key, val in racks_dict.items():\n _racks_dict[str(key)] = []\n for v in val:\n _racks_dict[str(key)].append(str(v))\n network.graph['rack_to_ep_dict'] = _racks_dict\n else:\n network.graph['rack_to_ep_dict'] = None\n if racks_dict is not None:\n ep_to_rack_dict = {}\n for key, val in _racks_dict.items():\n for v in val:\n if v not in ep_to_rack_dict.keys():\n ep_to_rack_dict[v] = key\n network.graph['ep_to_rack_dict'] = ep_to_rack_dict\n else:\n network.graph['ep_to_rack_dict'] = None\n\n\ndef gen_channel_names(num_channels):\n \"\"\"Generates channel names for channels on each link in network.\"\"\"\n channels = [(channel + 1) for channel in range(num_channels)]\n channel_names = [('channel_' + str(channel)) for channel in channels]\n return channel_names\n\n\ndef add_edge_capacity_attrs(network, edge, channel_names, channel_capacity,\n bidirectional_links=True):\n \"\"\"Adds channels and corresponding max channel bytes to single edge in network.\n \n Args:\n network (networkx graph): Network containing edges to whiich attrs will\n be added.\n edge (tuple): Node-node edge pair.\n channel_names (list): List of channel names to add to edge.\n channel_capacity (int,float): Capacity to allocate to each channel.\n bidirectional_links (bool): If True, each link has capacity split equally\n between src and dst port. I.e. all links have a src and dst port\n which are treated separately to incoming and outgoing traffic to and\n from given node (switch or server).\n\n \"\"\"\n if bidirectional_links:\n attrs = {edge: {'{}_to_{}_port'.format(edge[0], edge[1]): {\n 'channels': {channel: (channel_capacity / 2) for channel in\n channel_names}, 'max_channel_capacity': channel_capacity / 2},\n '{}_to_{}_port'.format(edge[1], edge[0]): {'channels': {channel:\n (channel_capacity / 2) for channel in channel_names},\n 'max_channel_capacity': channel_capacity / 2}}}\n else:\n attrs = {edge: {'channels': {channel: channel_capacity for channel in\n channel_names}, 'max_channel_capacity': channel_capacity}}\n nx.set_edge_attributes(network, attrs)\n\n\ndef add_edges_capacity_attrs(network, edges, channel_names,\n channel_capacity, bidirectional_links=True):\n \"\"\"Adds channels & max channel capacitys to single edge in network.\n \n To access e.g. the edge going from node 0 to node 1 (edge (0, 1)), you\n would index the network with network[0][1]\n\n To access e.g. the channel_1 attribute of this particular (0, 1) edge, you\n would do network[0][1]['channels']['channel_1']\n OR\n if bidirectional_links, you do network[0][1]['0_to_1_port']['channels']['channel_1']\n or network[0][1]['1_to_0_port']['channels']['channel_1] depending on which direction\n of the link you want to access.\n \n Args:\n network (networkx graph): Network containing edges to which attrs will\n be added.\n edges (list): List of node pairs in tuples.\n channel_names (list of str): List of channel names to add to edge.\n channel_capacity (int, float): Capacity to allocate to each channel.\n bidirectional_links (bool): If True, each link has capacity split equally\n between src and dst port. I.e. all links have a src and dst port\n which are treated separately to incoming and outgoing traffic to and\n from given node (switch or server).\n\n \"\"\"\n if bidirectional_links:\n attrs = {edge: {'{}_to_{}_port'.format(edge[0], edge[1]): {\n 'channels': {channel: (channel_capacity / 2) for channel in\n channel_names}, 'max_channel_capacity': channel_capacity / 2},\n '{}_to_{}_port'.format(edge[1], edge[0]): {'channels': {channel:\n (channel_capacity / 2) for channel in channel_names},\n 'max_channel_capacity': channel_capacity / 2}} for edge in edges}\n else:\n attrs = {edge: {'channels': {channel: channel_capacity for channel in\n channel_names}, 'max_channel_capacity': channel_capacity} for\n edge in edges}\n nx.set_edge_attributes(network, attrs)\n\n\ndef get_node_type_dict(network, node_types=[]):\n \"\"\"Gets dict where keys are node types, values are list of nodes for each node type in graph.\"\"\"\n network_nodes = []\n for network_node in network.nodes:\n network_nodes.append(network_node)\n network_nodes_dict = {node_type: [] for node_type in node_types}\n for n in network_nodes:\n for node_type in node_types:\n if node_type in n:\n network_nodes_dict[node_type].append(n)\n else:\n pass\n return network_nodes_dict\n\n\ndef get_fat_tree_positions(net, width_scale=500, height_scale=10):\n \"\"\"Gets networkx positions of nodes in fat tree network for plotting.\"\"\"\n pos = {}\n node_type_dict = get_node_type_dict(net, net.graph['node_labels'])\n node_types = list(node_type_dict.keys())\n heights = {}\n widths = {}\n h = iter([1, 2, 3, 4, 5])\n for node_type in node_types:\n heights[node_type] = next(h)\n widths[node_type] = 1 / (len(node_type_dict[node_type]) + 1)\n idx = 0\n for node in node_type_dict[node_type]:\n pos[node] = (idx + 1) * widths[node_type] * width_scale, heights[\n node_type] * height_scale\n idx += 1\n return pos\n\n\ndef init_network_node_positions(net):\n \"\"\"Initialises network node positions for plotting.\"\"\"\n if net.graph['topology_type'] == 'fat_tree':\n pos = get_fat_tree_positions(net)\n else:\n pos = nx.nx_agraph.graphviz_layout(net, prog='neato')\n return pos\n\n\ndef plot_network(network, draw_node_labels=True, ep_label='server',\n network_node_size=2000, font_size=30, linewidths=1, fig_scale=2,\n path_to_save=None, show_fig=False):\n \"\"\"Plots networkx graph.\n\n Recognises special fat tree network and applies appropriate node positioning,\n labelling, colouring etc.\n\n Args:\n network (networkx graph): Network object to be plotted.\n draw_node_labels (bool): Whether or not to draw node labels on plot. \n ep_label (str,int,float): Endpoint label (e.g. 'server'). All endpoints will have\n ep_label appended to the start of their label (e.g. 'server_0', 'server_1', ...).\n network_node_size (int,float): Size of plotted nodes.\n font_size (int,float): Size of of font of plotted labels etc.\n linewidths (int,float): Width of edges in network.\n fig_scale (int,float): Scaling factor to apply to plotted network.\n path_to_save (str): Path to directory (with file name included) in which\n to save generated plot. E.g. path_to_save='data/my_plot'\n show_fig (bool): Whether or not to plot and show fig. If True, will\n return and display fig.\n \n Returns:\n matplotlib.figure.Figure: node distribution plotted as a 2d matrix. \n\n \"\"\"\n net_node_positions = init_network_node_positions(copy.deepcopy(network))\n fig = plt.figure(figsize=[15 * fig_scale, 15 * fig_scale])\n pos = {}\n network_nodes = []\n network_nodes_dict = get_node_type_dict(network, network.graph[\n 'node_labels'])\n for nodes in list(network_nodes_dict.values()):\n for network_node in nodes:\n pos[network_node] = net_node_positions[network_node]\n node_colours = iter(['#25c44d', '#36a0c7', '#e8b017', '#6115a3', '#160e63']\n )\n for node_type in network.graph['node_labels']:\n nx.draw_networkx_nodes(network, pos, nodelist=network_nodes_dict[\n node_type], node_size=network_node_size, node_color=next(\n node_colours), linewidths=linewidths, label=node_type)\n if draw_node_labels:\n nx.draw_networkx_labels(network, pos, font_size=font_size,\n font_color='k', font_family='sans-serif', font_weight='normal',\n alpha=1.0)\n fibre_links = list(network.edges)\n nx.draw_networkx_edges(network, pos, edgelist=fibre_links, edge_color=\n 'k', width=3, label='Fibre link')\n if path_to_save is not None:\n tools.pickle_data(path_to_save, fig)\n if show_fig:\n plt.show()\n return fig\n\n\n<mask token>\n",
"step-4": "<mask token>\nfrom trafpy.generator.src import tools\nimport copy\nimport networkx as nx\nimport matplotlib.pyplot as plt\nimport json\n\n\ndef gen_arbitrary_network(num_eps, ep_label=None, ep_capacity=12500,\n num_channels=1, racks_dict=None, topology_type=None):\n \"\"\"Generates an arbitrary network with num_eps nodes labelled as ep_label.\n\n Note that no edges are formed in this network; it is purely for ep name \n indexing purposes when using Demand class. This is useful where want to\n use the demand class but not necessarily with a carefully crafted networkx\n graph that accurately mimics the network you will use for the demands\n\n Args:\n num_eps (int): Number of endpoints in network.\n ep_label (str,int,float): Endpoint label (e.g. 'server'). All endpoints will have\n ep_label appended to the start of their label (e.g. 'server_0', 'server_1', ...).\n ep_capacity (int, float): Byte capacity per end point channel.\n num_channels (int, float): Number of channels on each link in network.\n racks_dict (dict): Mapping of which end points are in which racks. Keys are\n rack ids, values are list of end points. If None, assume there is not\n clustering/rack system in the network where have different end points\n in different clusters/racks.\n\n Returns:\n networkx graph: network object\n\n \"\"\"\n network = nx.Graph()\n network.add_nodes_from([node for node in range(num_eps)])\n if ep_label is None:\n servers = [str(i) for i in range(num_eps)]\n else:\n servers = [(ep_label + '_' + str(i)) for i in range(num_eps)]\n relabel_mapping = {node: label for node, label in zip(range(num_eps),\n servers)}\n network = nx.relabel_nodes(network, relabel_mapping)\n eps = []\n for node in list(network.nodes):\n try:\n if ep_label in node:\n eps.append(node)\n except TypeError:\n eps.append(node)\n network.graph['endpoints'] = eps\n max_nw_capacity = num_eps * ep_capacity * num_channels / 2\n if topology_type is None:\n topology_type = 'arbitrary_endpoints_{}_chancap_{}_channels_{}'.format(\n num_eps, ep_capacity, num_channels)\n init_global_network_attrs(network, max_nw_capacity, num_channels,\n ep_link_capacity=ep_capacity * num_channels, endpoint_label=\n ep_label, node_labels=[ep_label], racks_dict=racks_dict,\n topology_type=topology_type)\n return network\n\n\ndef gen_nsfnet_network(ep_label='server', rack_label='rack', N=0,\n num_channels=2, server_to_rack_channel_capacity=1,\n rack_to_rack_channel_capacity=10, show_fig=False):\n \"\"\"Generates the standard 14-node NSFNET topology (a U.S. core network).\n \n Args:\n ep_label (str,int,float): Endpoint label (e.g. 'server'). All endpoints will have\n ep_label appended to the start of their label (e.g. 'server_0', 'server_1', ...).\n N (int): Number of servers per rack. If 0, assume all nodes in nsfnet\n are endpoints\n num_channels (int,float): Number of channels on each link in network.\n server_to_rack_channel_capacity (int,float): Byte capacity per channel \n between servers and ToR switch.\n rack_to_rack_channel_capacity (int,float): Byte capacity per channel between racks.\n show_fig (bool): Whether or not to plot and show fig. If True, will\n display fig.\n\n Returns:\n networkx graph: network object\n\n \"\"\"\n channel_names = gen_channel_names(num_channels)\n network = nx.Graph()\n node_pair_list = [[0, 1], [0, 3], [0, 2], [1, 2], [1, 7], [3, 8], [3, 4\n ], [3, 6], [4, 5], [4, 5], [5, 2], [5, 13], [5, 12], [6, 7], [7, 10\n ], [8, 11], [8, 9], [9, 10], [9, 12], [10, 11], [10, 13], [11, 12]]\n if N == 0:\n label = ep_label\n else:\n label = rack_label\n for idx in range(len(node_pair_list)):\n node_pair_list[idx][0] = label + '_' + str(node_pair_list[idx][0])\n node_pair_list[idx][1] = label + '_' + str(node_pair_list[idx][1])\n for edge in node_pair_list:\n network.add_edge(*tuple(edge))\n if N == 0:\n racks_dict = None\n else:\n i = 0\n racks_dict = {rack: [] for rack in range(14)}\n for rack in range(14):\n for server in range(N):\n racks_dict[rack].append(ep_label + '_' + str(i))\n network.add_edge(ep_label + '_' + str(i), rack_label + '_' +\n str(rack))\n i += 1\n channel_names = gen_channel_names(num_channels)\n edges = [edge for edge in network.edges]\n add_edges_capacity_attrs(network, edges, channel_names,\n rack_to_rack_channel_capacity)\n network.graph['endpoints'] = get_endpoints(network, ep_label)\n max_nw_capacity = len(network.edges\n ) * num_channels * rack_to_rack_channel_capacity / 2\n init_global_network_attrs(network, max_nw_capacity, num_channels,\n ep_link_capacity=server_to_rack_channel_capacity * num_channels,\n endpoint_label=ep_label, node_labels=[ep_label, rack_label],\n topology_type='14_node_nsfnet', racks_dict=racks_dict)\n if show_fig:\n plot_network(network, show_fig=True)\n return network\n\n\ndef gen_simple_network(ep_label='server', num_channels=2,\n server_to_rack_channel_capacity=500, show_fig=False):\n \"\"\"Generates very simple 5-node topology.\n\n Args:\n ep_label (str,int,float): Endpoint label (e.g. 'server'). All endpoints will have\n ep_label appended to the start of their label (e.g. 'server_0', 'server_1', ...).\n num_channels (int,float): Number of channels on each link in network.\n channel_capacity (int,float): Byte capacity per channel.\n show_fig (bool): Whether or not to plot and show fig. If True, will\n display fig.\n\n Returns:\n networkx graph: network object\n\n \"\"\"\n network = nx.Graph()\n network.add_nodes_from([node for node in range(5)])\n network.add_edges_from([(0, 1), (0, 2), (1, 2), (2, 4), (4, 3), (3, 1)],\n weight=1)\n servers = [(ep_label + '_' + str(i)) for i in range(5)]\n relabel_mapping = {node: label for node, label in zip(range(5), servers)}\n network = nx.relabel_nodes(network, relabel_mapping)\n channel_names = gen_channel_names(num_channels)\n edges = [edge for edge in network.edges]\n add_edges_capacity_attrs(network, edges, channel_names,\n server_to_rack_channel_capacity)\n network.graph['endpoints'] = get_endpoints(network, ep_label)\n max_nw_capacity = len(network.edges\n ) * num_channels * server_to_rack_channel_capacity / 2\n init_global_network_attrs(network, max_nw_capacity, num_channels,\n ep_link_capacity=server_to_rack_channel_capacity * num_channels,\n endpoint_label=ep_label, node_labels=[ep_label], topology_type=\n '5_node_simple_network')\n if show_fig:\n plot_network(network, show_fig=True)\n return network\n\n\ndef get_endpoints(network, ep_label):\n \"\"\"Gets list of endpoints of network.\n\n Args:\n network (networkx graph): Networkx object.\n ep_label (str,int,float): Endpoint label (e.g. 'server'). All endpoints will have\n ep_label appended to the start of their label (e.g. 'server_0', 'server_1', ...).\n\n Returns:\n eps (list): List of endpoints.\n\n \"\"\"\n eps = []\n for node in list(network.nodes):\n if ep_label in node:\n eps.append(node)\n return eps\n\n\ndef gen_fat_tree(k=4, L=2, n=4, ep_label='server', rack_label='rack',\n edge_label='edge', aggregate_label='agg', core_label='core',\n num_channels=2, server_to_rack_channel_capacity=500,\n rack_to_edge_channel_capacity=1000, edge_to_agg_channel_capacity=1000,\n agg_to_core_channel_capacity=2000, rack_to_core_channel_capacity=2000,\n show_fig=False):\n \"\"\"Generates a perfect fat tree (i.e. all layers have switches with same radix/number of ports).\n\n Top layer is always core (spine) switch layer, bottom layer is always\n ToR (leaf) layer.\n\n L must be either 2 (core, ToR) or 4 (core, agg, edge, ToR)\n\n N.B. L=2 is commonly referred to as '2-layer Clos' or 'Clos' or 'spine-leaf' topology\n\n Resource for building (scroll down to summary table with equations):\n\n https://packetpushers.net/demystifying-dcn-topologies-clos-fat-trees-part2/\n\n Another good resource for data centre topologies etc. in general:\n\n https://www.oreilly.com/library/view/bgp-in-the/9781491983416/ch01.html#:~:text=The%20most%20common%20routing%20protocol,single%20data%20center%2C%20as%20well.\n\n Parameters of network:\n\n - number of core (spine) switches = (k/2)^(L/2) (top layer)\n - number of edge switches (if L=4) = (k^2)/2\n - number of agg switches (if L=4) = (k^2)/2\n - number of pods (if L=4) (pod is a group of agg and/or edge switches) = 2*(k/2)^(L-2)\n - number of ToR (leaf) switches (racks) = 2*(k/2)^(L-1) (bottom layer)\n - number of server-facing ToR 'host' ports = 2*(k/2)^2 (can have multiple servers connected to same host port, & can oversubscribe)\n - number of servers = number ToR switches * n\n\n Args:\n k (int): Number of ports (links) on each switch (both up and down).\n L (int): Number of layers in the fat tree.\n n (int): Number of server per rack.\n ep_label (str,int,float): Endpoint label (e.g. 'server'). All endpoints will have\n ep_label appended to the start of their label (e.g. 'server_0', 'server_1', ...).\n edge_label (str,int): Label to assign to edge switch nodes\n aggregate_label (str,int): Label to assign to edge switch nodes\n core_label (str,int): Label to assign to core switch nodes\n num_channels (int, float): Number of channels on each link in network\n server_to_edge_channel_capacity (int,float): Byte capacity per channel\n edge_to_agg_channel_capacity (int,float): (if L==4) Byte capacity per channel\n agg_to_core_channel_capacity (int,float): (if L==4) Byte capacity per channel\n rack_to_core_channel_capacity (int,float): (if L==2) Byte capacity per channel\n\n Returns:\n networkx graph: network object\n\n \"\"\"\n if L != 2 and L != 4:\n raise Exception(\n 'L must be 2 (ToR layer, core layer) or 4 (ToR layer, edge layer, agg layer, core layer), but is {}.'\n .format(L))\n if k % 2 != 0:\n raise Exception(\n 'k must be even since, in perfect fat tree, have equal number of up and down ports on each switch, but is {}.'\n .format(k))\n channel_names = gen_channel_names(num_channels)\n if L == 2:\n node_labels = [ep_label, rack_label, core_label]\n else:\n node_labels = [ep_label, rack_label, edge_label, aggregate_label,\n core_label]\n num_cores = int((k / 2) ** (L / 2))\n num_aggs = int(k ** 2 / 2)\n num_edges = int(k ** 2 / 2)\n num_pods = int(2 * (k / 2) ** (L - 2))\n num_racks = int(2 * (k / 2) ** (L - 1))\n num_servers = int(num_racks * n)\n cores = [(core_label + '_' + str(i)) for i in range(num_cores)]\n aggs = [(aggregate_label + '_' + str(i)) for i in range(num_aggs)]\n edges = [(edge_label + '_' + str(i)) for i in range(num_edges)]\n racks = [(rack_label + '_' + str(i)) for i in range(num_racks)]\n servers = [(ep_label + '_' + str(i)) for i in range(num_servers)]\n core_layer = nx.Graph()\n rack_layer = nx.Graph()\n core_layer.add_nodes_from(cores)\n rack_layer.add_nodes_from(racks)\n fat_tree_network = nx.compose(core_layer, rack_layer)\n if L == 2:\n rack_iterator = iter(racks)\n for rack in racks:\n core_iterator = iter(cores)\n for up_port in range(int(k / 2)):\n core = next(core_iterator)\n fat_tree_network.add_edge(rack, core)\n add_edge_capacity_attrs(fat_tree_network, (rack, core),\n channel_names, rack_to_core_channel_capacity)\n else:\n num_pods = int(k)\n pods = [[] for i in range(num_pods)]\n prev_iter = 0\n for pod_iter in range(len(pods)):\n curr_iter = int(prev_iter + k / 2)\n pods[pod_iter].append(edges[prev_iter:curr_iter])\n pods[pod_iter].append(aggs[prev_iter:curr_iter])\n prev_iter = curr_iter\n pod_labels = [('pod_' + str(i)) for i in range(num_pods)]\n pods_dict = {tuple([pod]): nx.Graph() for pod in pod_labels}\n for pod_iter in range(num_pods):\n key = 'pod_' + str(pod_iter),\n pod_edges = pods[pod_iter][0]\n pod_aggs = pods[pod_iter][1]\n pods_dict[key].add_nodes_from(pod_edges)\n pods_dict[key].add_nodes_from(pod_aggs)\n for pod_edge in pod_edges:\n for pod_agg in pod_aggs:\n pods_dict[key].add_edge(pod_agg, pod_edge)\n add_edge_capacity_attrs(pods_dict[key], (pod_agg,\n pod_edge), channel_names, edge_to_agg_channel_capacity)\n pod_networks = list(pods_dict.values())\n for pod_iter in range(num_pods):\n fat_tree_network = nx.compose(fat_tree_network, pod_networks[\n pod_iter])\n for pod_iter in range(num_pods):\n pod_aggs = pods[pod_iter][1]\n core_iterator = iter(cores)\n for pod_agg in pod_aggs:\n while fat_tree_network.degree[pod_agg] < k:\n core = next(core_iterator)\n fat_tree_network.add_edge(core, pod_agg)\n add_edge_capacity_attrs(fat_tree_network, (core,\n pod_agg), channel_names, agg_to_core_channel_capacity)\n rack_iterator = iter(racks)\n for pod_iter in range(num_pods):\n pod_edges = pods[pod_iter][0]\n for pod_edge in pod_edges:\n while fat_tree_network.degree[pod_edge] < k:\n rack = next(rack_iterator)\n fat_tree_network.add_edge(pod_edge, rack)\n add_edge_capacity_attrs(fat_tree_network, (pod_edge,\n rack), channel_names, rack_to_edge_channel_capacity)\n racks_dict = {rack: [] for rack in racks}\n server_iterator = iter(servers)\n for rack in racks:\n for _ in range(n):\n server = next(server_iterator)\n fat_tree_network.add_edge(rack, server)\n add_edge_capacity_attrs(fat_tree_network, (rack, server),\n channel_names, server_to_rack_channel_capacity)\n racks_dict[rack].append(server)\n max_nw_capacity = (num_servers * num_channels *\n server_to_rack_channel_capacity / 2)\n fat_tree_network.graph['endpoints'] = servers\n init_global_network_attrs(fat_tree_network, max_nw_capacity,\n num_channels, ep_link_capacity=server_to_rack_channel_capacity *\n num_channels, endpoint_label=ep_label, node_labels=node_labels,\n topology_type='fat_tree', racks_dict=racks_dict)\n if show_fig:\n plot_network(fat_tree_network, show_fig=True)\n return fat_tree_network\n\n\ndef init_global_network_attrs(network, max_nw_capacity, num_channels,\n ep_link_capacity, endpoint_label='server', topology_type='unknown',\n node_labels=['server'], racks_dict=None):\n \"\"\"Initialises the standard global network attributes of a given network.\n\n Args:\n network (obj): NetworkX object.\n max_nw_capacity (int/float): Maximum rate at which info can be reliably \n transmitted over the network (sum of all link capacities).\n num_channels (int): Number of channels on each link in network.\n topology_type (str): Label of network topology (e.g. 'fat_tree').\n node_labels (list): Label classes assigned to network nodes \n (e.g. ['server', 'rack', 'edge']).\n racks_dict (dict): Which servers/endpoints are in which rack. If None,\n assume do not have rack system where have multiple servers in one\n rack.\n\n \"\"\"\n network.graph['endpoint_label'] = endpoint_label\n network.graph['num_channels_per_link'] = num_channels\n network.graph['ep_link_capacity'] = ep_link_capacity\n network.graph['ep_link_port_capacity'] = ep_link_capacity / 2\n network.graph['max_nw_capacity'] = max_nw_capacity\n network.graph['curr_nw_capacity_used'] = 0\n network.graph['num_active_connections'] = 0\n network.graph['total_connections_blocked'] = 0\n network.graph['node_labels'] = node_labels\n network.graph['topology_type'] = topology_type\n network.graph['channel_names'] = gen_channel_names(num_channels)\n if racks_dict is not None:\n _racks_dict = {}\n for key, val in racks_dict.items():\n _racks_dict[str(key)] = []\n for v in val:\n _racks_dict[str(key)].append(str(v))\n network.graph['rack_to_ep_dict'] = _racks_dict\n else:\n network.graph['rack_to_ep_dict'] = None\n if racks_dict is not None:\n ep_to_rack_dict = {}\n for key, val in _racks_dict.items():\n for v in val:\n if v not in ep_to_rack_dict.keys():\n ep_to_rack_dict[v] = key\n network.graph['ep_to_rack_dict'] = ep_to_rack_dict\n else:\n network.graph['ep_to_rack_dict'] = None\n\n\ndef gen_channel_names(num_channels):\n \"\"\"Generates channel names for channels on each link in network.\"\"\"\n channels = [(channel + 1) for channel in range(num_channels)]\n channel_names = [('channel_' + str(channel)) for channel in channels]\n return channel_names\n\n\ndef add_edge_capacity_attrs(network, edge, channel_names, channel_capacity,\n bidirectional_links=True):\n \"\"\"Adds channels and corresponding max channel bytes to single edge in network.\n \n Args:\n network (networkx graph): Network containing edges to whiich attrs will\n be added.\n edge (tuple): Node-node edge pair.\n channel_names (list): List of channel names to add to edge.\n channel_capacity (int,float): Capacity to allocate to each channel.\n bidirectional_links (bool): If True, each link has capacity split equally\n between src and dst port. I.e. all links have a src and dst port\n which are treated separately to incoming and outgoing traffic to and\n from given node (switch or server).\n\n \"\"\"\n if bidirectional_links:\n attrs = {edge: {'{}_to_{}_port'.format(edge[0], edge[1]): {\n 'channels': {channel: (channel_capacity / 2) for channel in\n channel_names}, 'max_channel_capacity': channel_capacity / 2},\n '{}_to_{}_port'.format(edge[1], edge[0]): {'channels': {channel:\n (channel_capacity / 2) for channel in channel_names},\n 'max_channel_capacity': channel_capacity / 2}}}\n else:\n attrs = {edge: {'channels': {channel: channel_capacity for channel in\n channel_names}, 'max_channel_capacity': channel_capacity}}\n nx.set_edge_attributes(network, attrs)\n\n\ndef add_edges_capacity_attrs(network, edges, channel_names,\n channel_capacity, bidirectional_links=True):\n \"\"\"Adds channels & max channel capacitys to single edge in network.\n \n To access e.g. the edge going from node 0 to node 1 (edge (0, 1)), you\n would index the network with network[0][1]\n\n To access e.g. the channel_1 attribute of this particular (0, 1) edge, you\n would do network[0][1]['channels']['channel_1']\n OR\n if bidirectional_links, you do network[0][1]['0_to_1_port']['channels']['channel_1']\n or network[0][1]['1_to_0_port']['channels']['channel_1] depending on which direction\n of the link you want to access.\n \n Args:\n network (networkx graph): Network containing edges to which attrs will\n be added.\n edges (list): List of node pairs in tuples.\n channel_names (list of str): List of channel names to add to edge.\n channel_capacity (int, float): Capacity to allocate to each channel.\n bidirectional_links (bool): If True, each link has capacity split equally\n between src and dst port. I.e. all links have a src and dst port\n which are treated separately to incoming and outgoing traffic to and\n from given node (switch or server).\n\n \"\"\"\n if bidirectional_links:\n attrs = {edge: {'{}_to_{}_port'.format(edge[0], edge[1]): {\n 'channels': {channel: (channel_capacity / 2) for channel in\n channel_names}, 'max_channel_capacity': channel_capacity / 2},\n '{}_to_{}_port'.format(edge[1], edge[0]): {'channels': {channel:\n (channel_capacity / 2) for channel in channel_names},\n 'max_channel_capacity': channel_capacity / 2}} for edge in edges}\n else:\n attrs = {edge: {'channels': {channel: channel_capacity for channel in\n channel_names}, 'max_channel_capacity': channel_capacity} for\n edge in edges}\n nx.set_edge_attributes(network, attrs)\n\n\ndef get_node_type_dict(network, node_types=[]):\n \"\"\"Gets dict where keys are node types, values are list of nodes for each node type in graph.\"\"\"\n network_nodes = []\n for network_node in network.nodes:\n network_nodes.append(network_node)\n network_nodes_dict = {node_type: [] for node_type in node_types}\n for n in network_nodes:\n for node_type in node_types:\n if node_type in n:\n network_nodes_dict[node_type].append(n)\n else:\n pass\n return network_nodes_dict\n\n\ndef get_fat_tree_positions(net, width_scale=500, height_scale=10):\n \"\"\"Gets networkx positions of nodes in fat tree network for plotting.\"\"\"\n pos = {}\n node_type_dict = get_node_type_dict(net, net.graph['node_labels'])\n node_types = list(node_type_dict.keys())\n heights = {}\n widths = {}\n h = iter([1, 2, 3, 4, 5])\n for node_type in node_types:\n heights[node_type] = next(h)\n widths[node_type] = 1 / (len(node_type_dict[node_type]) + 1)\n idx = 0\n for node in node_type_dict[node_type]:\n pos[node] = (idx + 1) * widths[node_type] * width_scale, heights[\n node_type] * height_scale\n idx += 1\n return pos\n\n\ndef init_network_node_positions(net):\n \"\"\"Initialises network node positions for plotting.\"\"\"\n if net.graph['topology_type'] == 'fat_tree':\n pos = get_fat_tree_positions(net)\n else:\n pos = nx.nx_agraph.graphviz_layout(net, prog='neato')\n return pos\n\n\ndef plot_network(network, draw_node_labels=True, ep_label='server',\n network_node_size=2000, font_size=30, linewidths=1, fig_scale=2,\n path_to_save=None, show_fig=False):\n \"\"\"Plots networkx graph.\n\n Recognises special fat tree network and applies appropriate node positioning,\n labelling, colouring etc.\n\n Args:\n network (networkx graph): Network object to be plotted.\n draw_node_labels (bool): Whether or not to draw node labels on plot. \n ep_label (str,int,float): Endpoint label (e.g. 'server'). All endpoints will have\n ep_label appended to the start of their label (e.g. 'server_0', 'server_1', ...).\n network_node_size (int,float): Size of plotted nodes.\n font_size (int,float): Size of of font of plotted labels etc.\n linewidths (int,float): Width of edges in network.\n fig_scale (int,float): Scaling factor to apply to plotted network.\n path_to_save (str): Path to directory (with file name included) in which\n to save generated plot. E.g. path_to_save='data/my_plot'\n show_fig (bool): Whether or not to plot and show fig. If True, will\n return and display fig.\n \n Returns:\n matplotlib.figure.Figure: node distribution plotted as a 2d matrix. \n\n \"\"\"\n net_node_positions = init_network_node_positions(copy.deepcopy(network))\n fig = plt.figure(figsize=[15 * fig_scale, 15 * fig_scale])\n pos = {}\n network_nodes = []\n network_nodes_dict = get_node_type_dict(network, network.graph[\n 'node_labels'])\n for nodes in list(network_nodes_dict.values()):\n for network_node in nodes:\n pos[network_node] = net_node_positions[network_node]\n node_colours = iter(['#25c44d', '#36a0c7', '#e8b017', '#6115a3', '#160e63']\n )\n for node_type in network.graph['node_labels']:\n nx.draw_networkx_nodes(network, pos, nodelist=network_nodes_dict[\n node_type], node_size=network_node_size, node_color=next(\n node_colours), linewidths=linewidths, label=node_type)\n if draw_node_labels:\n nx.draw_networkx_labels(network, pos, font_size=font_size,\n font_color='k', font_family='sans-serif', font_weight='normal',\n alpha=1.0)\n fibre_links = list(network.edges)\n nx.draw_networkx_edges(network, pos, edgelist=fibre_links, edge_color=\n 'k', width=3, label='Fibre link')\n if path_to_save is not None:\n tools.pickle_data(path_to_save, fig)\n if show_fig:\n plt.show()\n return fig\n\n\nif __name__ == '__main__':\n network = gen_fat_tree(k=3)\n plot_network(network, 'figures/graph/', name='network_graph.png',\n with_labels=True)\n",
"step-5": "'''Module for generating and plotting networks.'''\n\nfrom trafpy.generator.src import tools\nimport copy\nimport networkx as nx\nimport matplotlib.pyplot as plt\nimport json\n\n\ndef gen_arbitrary_network(num_eps,\n ep_label=None, \n ep_capacity=12500, \n num_channels=1, \n racks_dict=None,\n topology_type=None):\n '''Generates an arbitrary network with num_eps nodes labelled as ep_label.\n\n Note that no edges are formed in this network; it is purely for ep name \n indexing purposes when using Demand class. This is useful where want to\n use the demand class but not necessarily with a carefully crafted networkx\n graph that accurately mimics the network you will use for the demands\n\n Args:\n num_eps (int): Number of endpoints in network.\n ep_label (str,int,float): Endpoint label (e.g. 'server'). All endpoints will have\n ep_label appended to the start of their label (e.g. 'server_0', 'server_1', ...).\n ep_capacity (int, float): Byte capacity per end point channel.\n num_channels (int, float): Number of channels on each link in network.\n racks_dict (dict): Mapping of which end points are in which racks. Keys are\n rack ids, values are list of end points. If None, assume there is not\n clustering/rack system in the network where have different end points\n in different clusters/racks.\n\n Returns:\n networkx graph: network object\n\n '''\n network = nx.Graph()\n network.add_nodes_from([node for node in range(num_eps)])\n \n if ep_label is None:\n # must be str or not json serialisable\n servers = [str(i) for i in range(num_eps)]\n else:\n servers = [ep_label+'_'+str(i) for i in range(num_eps)]\n relabel_mapping = {node: label for node, label in zip(range(num_eps),servers)}\n network = nx.relabel_nodes(network, relabel_mapping)\n eps = []\n for node in list(network.nodes):\n try:\n if ep_label in node:\n eps.append(node)\n except TypeError:\n # ep_label is None\n eps.append(node)\n network.graph['endpoints'] = eps\n\n # /= 2 to get max theoretical capacity (number of units which network can transfer per unit time)\n max_nw_capacity = (num_eps * ep_capacity * num_channels) / 2\n\n if topology_type is None:\n topology_type = 'arbitrary_endpoints_{}_chancap_{}_channels_{}'.format(num_eps, ep_capacity, num_channels)\n\n init_global_network_attrs(network,\n max_nw_capacity,\n num_channels,\n ep_link_capacity=ep_capacity*num_channels,\n endpoint_label=ep_label,\n node_labels=[ep_label],\n racks_dict=racks_dict,\n topology_type=topology_type)\n \n return network\n\n\n\ndef gen_nsfnet_network(ep_label='server', \n rack_label='rack',\n N=0, \n num_channels=2, \n server_to_rack_channel_capacity=1,\n rack_to_rack_channel_capacity=10,\n show_fig=False):\n '''Generates the standard 14-node NSFNET topology (a U.S. core network).\n \n Args:\n ep_label (str,int,float): Endpoint label (e.g. 'server'). All endpoints will have\n ep_label appended to the start of their label (e.g. 'server_0', 'server_1', ...).\n N (int): Number of servers per rack. If 0, assume all nodes in nsfnet\n are endpoints\n num_channels (int,float): Number of channels on each link in network.\n server_to_rack_channel_capacity (int,float): Byte capacity per channel \n between servers and ToR switch.\n rack_to_rack_channel_capacity (int,float): Byte capacity per channel between racks.\n show_fig (bool): Whether or not to plot and show fig. If True, will\n display fig.\n\n Returns:\n networkx graph: network object\n\n '''\n channel_names = gen_channel_names(num_channels)\n network = nx.Graph()\n\n node_pair_list = [[0,1],\n [0,3],\n [0,2],\n [1,2],\n [1,7],\n [3,8],\n [3,4],\n [3,6],\n [4,5],\n [4,5],\n [5,2],\n [5,13],\n [5,12],\n [6,7],\n [7,10],\n [8,11],\n [8,9],\n [9,10],\n [9,12],\n [10,11],\n [10,13],\n [11,12]]\n\n if N == 0:\n # above nodes are all end points\n label = ep_label\n else:\n # above nodes are ToR switch nodes\n label = rack_label\n for idx in range(len(node_pair_list)):\n node_pair_list[idx][0] = label + '_' + str(node_pair_list[idx][0])\n node_pair_list[idx][1] = label + '_' + str(node_pair_list[idx][1])\n\n # add 14 nodes\n for edge in node_pair_list:\n network.add_edge(*tuple(edge))\n\n if N == 0:\n # assume all nodes are servers\n racks_dict = None\n else:\n # each of 14 nodes in NSFNET is a ToR switch\n i = 0\n racks_dict = {rack: [] for rack in range(14)}\n for rack in range(14):\n for server in range(N):\n racks_dict[rack].append(ep_label+'_'+str(i))\n network.add_edge(ep_label+'_'+str(i), rack_label+'_'+str(rack))\n i += 1\n \n channel_names = gen_channel_names(num_channels)\n edges = [edge for edge in network.edges]\n add_edges_capacity_attrs(network, edges, channel_names, rack_to_rack_channel_capacity)\n\n # set gloabl network attrs\n network.graph['endpoints'] = get_endpoints(network, ep_label)\n\n # /= 2 to get max theoretical capacity (number of units which network can transfer per unit time)\n max_nw_capacity = (len(network.edges) * num_channels * rack_to_rack_channel_capacity) / 2\n\n init_global_network_attrs(network, \n max_nw_capacity, \n num_channels, \n ep_link_capacity=server_to_rack_channel_capacity*num_channels,\n endpoint_label=ep_label,\n node_labels=[ep_label, rack_label],\n topology_type='14_node_nsfnet',\n racks_dict=racks_dict)\n if show_fig:\n plot_network(network, show_fig=True)\n \n return network\n\ndef gen_simple_network(ep_label='server', \n num_channels=2, \n server_to_rack_channel_capacity=500,\n show_fig=False):\n '''Generates very simple 5-node topology.\n\n Args:\n ep_label (str,int,float): Endpoint label (e.g. 'server'). All endpoints will have\n ep_label appended to the start of their label (e.g. 'server_0', 'server_1', ...).\n num_channels (int,float): Number of channels on each link in network.\n channel_capacity (int,float): Byte capacity per channel.\n show_fig (bool): Whether or not to plot and show fig. If True, will\n display fig.\n\n Returns:\n networkx graph: network object\n\n '''\n network = nx.Graph()\n network.add_nodes_from([node for node in range(5)])\n network.add_edges_from([(0,1),\n (0,2),\n (1,2),\n (2,4),\n (4,3),\n (3,1)],weight=1)\n servers = [ep_label+'_'+str(i) for i in range(5)]\n relabel_mapping = {node: label for node, label in zip(range(5),servers)}\n network = nx.relabel_nodes(network, relabel_mapping)\n\n channel_names = gen_channel_names(num_channels)\n edges = [edge for edge in network.edges]\n add_edges_capacity_attrs(network, edges, channel_names, server_to_rack_channel_capacity)\n\n # set gloabl network attrs\n network.graph['endpoints'] = get_endpoints(network, ep_label)\n\n # /= 2 to get max theoretical capacity (number of units which network can transfer per unit time)\n max_nw_capacity = (len(network.edges) * num_channels * server_to_rack_channel_capacity) / 2\n\n init_global_network_attrs(network, \n max_nw_capacity, \n num_channels, \n ep_link_capacity=server_to_rack_channel_capacity*num_channels,\n endpoint_label=ep_label,\n node_labels=[ep_label],\n topology_type='5_node_simple_network')\n\n if show_fig:\n plot_network(network, show_fig=True)\n\n \n return network\n\ndef get_endpoints(network, ep_label):\n '''Gets list of endpoints of network.\n\n Args:\n network (networkx graph): Networkx object.\n ep_label (str,int,float): Endpoint label (e.g. 'server'). All endpoints will have\n ep_label appended to the start of their label (e.g. 'server_0', 'server_1', ...).\n\n Returns:\n eps (list): List of endpoints.\n\n '''\n eps = []\n for node in list(network.nodes):\n if ep_label in node:\n eps.append(node)\n\n return eps\n\ndef gen_fat_tree(k=4,\n L=2,\n n=4,\n ep_label='server',\n rack_label='rack',\n edge_label='edge',\n aggregate_label='agg',\n core_label='core',\n num_channels = 2,\n server_to_rack_channel_capacity=500,\n rack_to_edge_channel_capacity=1000,\n edge_to_agg_channel_capacity=1000,\n agg_to_core_channel_capacity=2000,\n rack_to_core_channel_capacity=2000,\n show_fig=False):\n '''Generates a perfect fat tree (i.e. all layers have switches with same radix/number of ports).\n\n Top layer is always core (spine) switch layer, bottom layer is always\n ToR (leaf) layer.\n\n L must be either 2 (core, ToR) or 4 (core, agg, edge, ToR)\n\n N.B. L=2 is commonly referred to as '2-layer Clos' or 'Clos' or 'spine-leaf' topology\n\n Resource for building (scroll down to summary table with equations):\n\n https://packetpushers.net/demystifying-dcn-topologies-clos-fat-trees-part2/\n\n Another good resource for data centre topologies etc. in general:\n\n https://www.oreilly.com/library/view/bgp-in-the/9781491983416/ch01.html#:~:text=The%20most%20common%20routing%20protocol,single%20data%20center%2C%20as%20well.\n\n Parameters of network:\n\n - number of core (spine) switches = (k/2)^(L/2) (top layer)\n - number of edge switches (if L=4) = (k^2)/2\n - number of agg switches (if L=4) = (k^2)/2\n - number of pods (if L=4) (pod is a group of agg and/or edge switches) = 2*(k/2)^(L-2)\n - number of ToR (leaf) switches (racks) = 2*(k/2)^(L-1) (bottom layer)\n - number of server-facing ToR 'host' ports = 2*(k/2)^2 (can have multiple servers connected to same host port, & can oversubscribe)\n - number of servers = number ToR switches * n\n\n Args:\n k (int): Number of ports (links) on each switch (both up and down).\n L (int): Number of layers in the fat tree.\n n (int): Number of server per rack.\n ep_label (str,int,float): Endpoint label (e.g. 'server'). All endpoints will have\n ep_label appended to the start of their label (e.g. 'server_0', 'server_1', ...).\n edge_label (str,int): Label to assign to edge switch nodes\n aggregate_label (str,int): Label to assign to edge switch nodes\n core_label (str,int): Label to assign to core switch nodes\n num_channels (int, float): Number of channels on each link in network\n server_to_edge_channel_capacity (int,float): Byte capacity per channel\n edge_to_agg_channel_capacity (int,float): (if L==4) Byte capacity per channel\n agg_to_core_channel_capacity (int,float): (if L==4) Byte capacity per channel\n rack_to_core_channel_capacity (int,float): (if L==2) Byte capacity per channel\n\n Returns:\n networkx graph: network object\n\n '''\n if L != 2 and L != 4:\n raise Exception('L must be 2 (ToR layer, core layer) or 4 (ToR layer, edge layer, agg layer, core layer), but is {}.'.format(L))\n if k % 2 != 0:\n raise Exception('k must be even since, in perfect fat tree, have equal number of up and down ports on each switch, but is {}.'.format(k))\n\n channel_names = gen_channel_names(num_channels)\n\n # initialise network nodes\n if L == 2:\n node_labels = [ep_label, rack_label, core_label]\n else:\n node_labels = [ep_label, rack_label, edge_label, aggregate_label, core_label]\n\n #num_cores = int((k/2)**(L-1))\n #num_cores = int((k/2)**2)\n num_cores = int((k/2)**(L/2))\n num_aggs = int((k**2)/2)\n num_edges = int((k**2)/2)\n num_pods = int(2*(k/2)**(L-2))\n num_racks = int(2*(k/2)**(L-1))\n num_servers = int(num_racks * n)\n\n cores = [core_label+'_'+str(i) for i in range(num_cores)]\n aggs = [aggregate_label+'_'+str(i) for i in range(num_aggs)]\n edges = [edge_label+'_'+str(i) for i in range(num_edges)]\n racks = [rack_label+'_'+str(i) for i in range(num_racks)]\n servers = [ep_label+'_'+str(i) for i in range(num_servers)]\n\n # create core and rack layer networks\n core_layer = nx.Graph()\n rack_layer = nx.Graph()\n core_layer.add_nodes_from(cores)\n rack_layer.add_nodes_from(racks)\n\n # combine cores and racks into single network\n fat_tree_network = nx.compose(core_layer, rack_layer)\n \n if L == 2:\n # 2 layers: Core, ToR\n # link racks to cores, add link attributes\n rack_iterator = iter(racks)\n for rack in racks:\n core_iterator = iter(cores)\n # have k/2 up-ports on each switch\n for up_port in range(int(k/2)):\n core = next(core_iterator)\n fat_tree_network.add_edge(rack, core)\n add_edge_capacity_attrs(fat_tree_network,\n (rack, core),\n channel_names,\n rack_to_core_channel_capacity)\n else:\n # 4 layers: Core, Agg, Edge, ToR. Agg and Edge switches grouped into pods.\n # group edges and aggregates into pods\n num_pods = int(k)\n pods = [[] for i in range(num_pods)]\n prev_iter = 0\n for pod_iter in range(len(pods)):\n curr_iter = int(prev_iter + (k/2))\n pods[pod_iter].append(edges[prev_iter:curr_iter])\n pods[pod_iter].append(aggs[prev_iter:curr_iter])\n prev_iter = curr_iter\n\n # create dict of pod networks\n pod_labels = ['pod_'+str(i) for i in range(num_pods)]\n pods_dict = {tuple([pod]): nx.Graph() for pod in pod_labels}\n for pod_iter in range(num_pods):\n key = ('pod_'+str(pod_iter),)\n pod_edges = pods[pod_iter][0]\n\n pod_aggs = pods[pod_iter][1]\n pods_dict[key].add_nodes_from(pod_edges)\n pods_dict[key].add_nodes_from(pod_aggs)\n\n # connect edge and aggregate switches within pod, add link attributes\n for pod_edge in pod_edges:\n for pod_agg in pod_aggs:\n pods_dict[key].add_edge(pod_agg, pod_edge)\n add_edge_capacity_attrs(pods_dict[key], \n (pod_agg,pod_edge), \n channel_names, \n edge_to_agg_channel_capacity)\n\n # add pods (agg + edge) layer to fat-tree\n pod_networks = list(pods_dict.values())\n for pod_iter in range(num_pods):\n fat_tree_network = nx.compose(fat_tree_network, pod_networks[pod_iter])\n\n # link aggregate switches in pods to core switches, add link attributes\n for pod_iter in range(num_pods):\n pod_aggs = pods[pod_iter][1]\n core_iterator = iter(cores)\n for pod_agg in pod_aggs:\n while fat_tree_network.degree[pod_agg] < k:\n core = next(core_iterator)\n fat_tree_network.add_edge(core, pod_agg)\n add_edge_capacity_attrs(fat_tree_network,\n (core,pod_agg),\n channel_names,\n agg_to_core_channel_capacity)\n\n # link edge switches in pods to racks, add link attributes\n rack_iterator = iter(racks)\n for pod_iter in range(num_pods):\n pod_edges = pods[pod_iter][0]\n for pod_edge in pod_edges:\n while fat_tree_network.degree[pod_edge] < k:\n rack = next(rack_iterator)\n fat_tree_network.add_edge(pod_edge, rack)\n add_edge_capacity_attrs(fat_tree_network,\n (pod_edge,rack),\n channel_names,\n rack_to_edge_channel_capacity)\n\n # link servers to racks, add link attributes\n racks_dict = {rack: [] for rack in racks} # track which endpoints in which rack\n server_iterator = iter(servers)\n for rack in racks:\n for _ in range(n):\n server = next(server_iterator)\n fat_tree_network.add_edge(rack, server)\n add_edge_capacity_attrs(fat_tree_network,\n (rack, server),\n channel_names,\n server_to_rack_channel_capacity)\n racks_dict[rack].append(server)\n\n # calc total network capacity\n # /= 2 to get max theoretical capacity (number of units which network can transfer per unit time)\n max_nw_capacity = (num_servers * num_channels * server_to_rack_channel_capacity) / 2\n\n\n # init global network attrs\n fat_tree_network.graph['endpoints'] = servers\n init_global_network_attrs(fat_tree_network, \n max_nw_capacity, \n num_channels, \n ep_link_capacity=server_to_rack_channel_capacity*num_channels,\n endpoint_label=ep_label,\n node_labels=node_labels,\n topology_type='fat_tree',\n racks_dict=racks_dict)\n\n if show_fig:\n plot_network(fat_tree_network, show_fig=True)\n\n return fat_tree_network\n\n \n\n\ndef init_global_network_attrs(network, \n max_nw_capacity, \n num_channels, \n ep_link_capacity,\n endpoint_label = 'server',\n topology_type='unknown', \n node_labels=['server'],\n racks_dict=None):\n '''Initialises the standard global network attributes of a given network.\n\n Args:\n network (obj): NetworkX object.\n max_nw_capacity (int/float): Maximum rate at which info can be reliably \n transmitted over the network (sum of all link capacities).\n num_channels (int): Number of channels on each link in network.\n topology_type (str): Label of network topology (e.g. 'fat_tree').\n node_labels (list): Label classes assigned to network nodes \n (e.g. ['server', 'rack', 'edge']).\n racks_dict (dict): Which servers/endpoints are in which rack. If None,\n assume do not have rack system where have multiple servers in one\n rack.\n\n '''\n network.graph['endpoint_label'] = endpoint_label\n network.graph['num_channels_per_link'] = num_channels\n network.graph['ep_link_capacity'] = ep_link_capacity\n network.graph['ep_link_port_capacity'] = ep_link_capacity / 2 # all eps have a src & a dst port\n network.graph['max_nw_capacity'] = max_nw_capacity\n network.graph['curr_nw_capacity_used'] = 0\n network.graph['num_active_connections'] = 0\n network.graph['total_connections_blocked'] = 0\n network.graph['node_labels'] = node_labels\n network.graph['topology_type'] = topology_type\n network.graph['channel_names'] = gen_channel_names(num_channels)\n\n # ensure racks dict is str so json serialisable\n if racks_dict is not None:\n _racks_dict = {}\n for key, val in racks_dict.items():\n _racks_dict[str(key)] = []\n for v in val:\n _racks_dict[str(key)].append(str(v))\n network.graph['rack_to_ep_dict'] = _racks_dict\n else:\n network.graph['rack_to_ep_dict'] = None\n\n if racks_dict is not None:\n # switch racks_dict keys and values to make hashing easier\n ep_to_rack_dict = {}\n for key, val in _racks_dict.items():\n for v in val:\n if v not in ep_to_rack_dict.keys():\n ep_to_rack_dict[v] = key\n network.graph['ep_to_rack_dict'] = ep_to_rack_dict\n else:\n network.graph['ep_to_rack_dict'] = None\n\n\ndef gen_channel_names(num_channels):\n '''Generates channel names for channels on each link in network.'''\n channels = [channel+1 for channel in range(num_channels)]\n channel_names = ['channel_' + str(channel) for channel in channels]\n \n return channel_names\n\ndef add_edge_capacity_attrs(network, \n edge, \n channel_names, \n channel_capacity, \n bidirectional_links=True):\n '''Adds channels and corresponding max channel bytes to single edge in network.\n \n Args:\n network (networkx graph): Network containing edges to whiich attrs will\n be added.\n edge (tuple): Node-node edge pair.\n channel_names (list): List of channel names to add to edge.\n channel_capacity (int,float): Capacity to allocate to each channel.\n bidirectional_links (bool): If True, each link has capacity split equally\n between src and dst port. I.e. all links have a src and dst port\n which are treated separately to incoming and outgoing traffic to and\n from given node (switch or server).\n\n '''\n if bidirectional_links:\n attrs = {edge:\n {'{}_to_{}_port'.format(edge[0], edge[1]):\n {'channels':\n {channel: channel_capacity/2 for channel in channel_names},\n 'max_channel_capacity': channel_capacity/2\n },\n '{}_to_{}_port'.format(edge[1], edge[0]):\n {'channels':\n {channel: channel_capacity/2 for channel in channel_names},\n 'max_channel_capacity': channel_capacity/2\n }\n }\n }\n \n else:\n attrs = {edge:\n {'channels': {channel: channel_capacity for channel in channel_names},\n 'max_channel_capacity': channel_capacity}}\n \n nx.set_edge_attributes(network, attrs)\n\n\n\n\ndef add_edges_capacity_attrs(network, \n edges,\n channel_names,\n channel_capacity,\n bidirectional_links=True):\n '''Adds channels & max channel capacitys to single edge in network.\n \n To access e.g. the edge going from node 0 to node 1 (edge (0, 1)), you\n would index the network with network[0][1]\n\n To access e.g. the channel_1 attribute of this particular (0, 1) edge, you\n would do network[0][1]['channels']['channel_1']\n OR\n if bidirectional_links, you do network[0][1]['0_to_1_port']['channels']['channel_1']\n or network[0][1]['1_to_0_port']['channels']['channel_1] depending on which direction\n of the link you want to access.\n \n Args:\n network (networkx graph): Network containing edges to which attrs will\n be added.\n edges (list): List of node pairs in tuples.\n channel_names (list of str): List of channel names to add to edge.\n channel_capacity (int, float): Capacity to allocate to each channel.\n bidirectional_links (bool): If True, each link has capacity split equally\n between src and dst port. I.e. all links have a src and dst port\n which are treated separately to incoming and outgoing traffic to and\n from given node (switch or server).\n\n '''\n if bidirectional_links:\n attrs = {edge:\n {'{}_to_{}_port'.format(edge[0], edge[1]):\n {'channels':\n {channel: channel_capacity/2 for channel in channel_names},\n 'max_channel_capacity': channel_capacity/2\n },\n '{}_to_{}_port'.format(edge[1], edge[0]):\n {'channels':\n {channel: channel_capacity/2 for channel in channel_names},\n 'max_channel_capacity': channel_capacity/2\n }\n }\n for edge in edges}\n else:\n attrs = {edge: \n {'channels': \n {channel: channel_capacity for channel in channel_names},\n 'max_channel_capacity': \n channel_capacity\n } for edge in edges}\n\n nx.set_edge_attributes(network, attrs)\n \n\ndef get_node_type_dict(network, node_types=[]):\n '''Gets dict where keys are node types, values are list of nodes for each node type in graph.'''\n network_nodes = []\n for network_node in network.nodes:\n network_nodes.append(network_node)\n network_nodes_dict = {node_type: [] for node_type in node_types}\n for n in network_nodes:\n for node_type in node_types:\n if node_type in n:\n network_nodes_dict[node_type].append(n)\n else:\n # not this node type\n pass\n \n return network_nodes_dict\n\n\ndef get_fat_tree_positions(net, width_scale=500, height_scale=10):\n '''Gets networkx positions of nodes in fat tree network for plotting.'''\n pos = {}\n\n node_type_dict = get_node_type_dict(net, net.graph['node_labels'])\n node_types = list(node_type_dict.keys())\n \n heights = {} # dict for heigh separation between fat tree layers\n widths = {} # dict for width separation between nodes within layers\n h = iter([1, 2, 3, 4, 5]) # server, rack, edge, agg, core heights\n for node_type in node_types: \n heights[node_type] = next(h)\n widths[node_type] = 1/(len(node_type_dict[node_type])+1)\n idx = 0\n for node in node_type_dict[node_type]:\n pos[node] = ((idx+1)*widths[node_type]*width_scale,heights[node_type]*height_scale)\n idx += 1\n\n return pos\n \n\ndef init_network_node_positions(net):\n '''Initialises network node positions for plotting.'''\n if net.graph['topology_type'] == 'fat_tree':\n pos = get_fat_tree_positions(net)\n\n else:\n pos = nx.nx_agraph.graphviz_layout(net, prog='neato')\n \n return pos\n\n\ndef plot_network(network,\n draw_node_labels=True,\n ep_label='server',\n network_node_size=2000,\n font_size=30,\n linewidths=1,\n fig_scale=2,\n path_to_save=None, \n show_fig=False):\n '''Plots networkx graph.\n\n Recognises special fat tree network and applies appropriate node positioning,\n labelling, colouring etc.\n\n Args:\n network (networkx graph): Network object to be plotted.\n draw_node_labels (bool): Whether or not to draw node labels on plot. \n ep_label (str,int,float): Endpoint label (e.g. 'server'). All endpoints will have\n ep_label appended to the start of their label (e.g. 'server_0', 'server_1', ...).\n network_node_size (int,float): Size of plotted nodes.\n font_size (int,float): Size of of font of plotted labels etc.\n linewidths (int,float): Width of edges in network.\n fig_scale (int,float): Scaling factor to apply to plotted network.\n path_to_save (str): Path to directory (with file name included) in which\n to save generated plot. E.g. path_to_save='data/my_plot'\n show_fig (bool): Whether or not to plot and show fig. If True, will\n return and display fig.\n \n Returns:\n matplotlib.figure.Figure: node distribution plotted as a 2d matrix. \n\n '''\n \n net_node_positions = init_network_node_positions(copy.deepcopy(network))\n\n fig = plt.figure(figsize=[15*fig_scale,15*fig_scale])\n\n # add nodes and edges\n pos = {}\n network_nodes = []\n network_nodes_dict = get_node_type_dict(network, network.graph['node_labels'])\n for nodes in list(network_nodes_dict.values()):\n for network_node in nodes:\n pos[network_node] = net_node_positions[network_node]\n \n # network nodes\n node_colours = iter(['#25c44d', '#36a0c7', '#e8b017', '#6115a3', '#160e63']) # server, rack, edge, agg, core\n for node_type in network.graph['node_labels']:\n nx.draw_networkx_nodes(network, \n pos, \n nodelist=network_nodes_dict[node_type],\n node_size=network_node_size, \n node_color=next(node_colours), \n linewidths=linewidths, \n label=node_type)\n if draw_node_labels:\n # nodes\n nx.draw_networkx_labels(network, \n pos, \n font_size=font_size, \n font_color='k', \n font_family='sans-serif', \n font_weight='normal', \n alpha=1.0)\n \n # fibre links\n fibre_links = list(network.edges)\n nx.draw_networkx_edges(network, \n pos,\n edgelist=fibre_links,\n edge_color='k',\n width=3,\n label='Fibre link')\n\n\n if path_to_save is not None:\n tools.pickle_data(path_to_save, fig)\n\n if show_fig:\n plt.show()\n\n return fig\n\n\nif __name__ == '__main__':\n #network = gen_simple_network()\n #network = gen_nsfnet_network()\n network = gen_fat_tree(k=3)\n \n plot_network(network, 'figures/graph/',name='network_graph.png',with_labels=True)\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n \n\n\n\n",
"step-ids": [
11,
12,
13,
15,
16
]
}
|
[
11,
12,
13,
15,
16
] |
import os
import sys
import pytest
def run_test(file_name, capture_stdout=True, allure_dir=None):
cmd = [
file_name, "-vvv",
]
if capture_stdout:
cmd.append("-s")
test_name = os.path.splitext(os.path.basename(file_name))[0]
alluredir = os.path.normpath("%s/%s/" % (allure_dir or "allure-results", test_name))
cmd.extend(["--alluredir", alluredir])
print(cmd)
sys.exit(pytest.main(cmd))
|
normal
|
{
"blob_id": "7e7a50cb8e66a71c1df2d61241f8a55c042b7d59",
"index": 2664,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef run_test(file_name, capture_stdout=True, allure_dir=None):\n cmd = [file_name, '-vvv']\n if capture_stdout:\n cmd.append('-s')\n test_name = os.path.splitext(os.path.basename(file_name))[0]\n alluredir = os.path.normpath('%s/%s/' % (allure_dir or 'allure-results',\n test_name))\n cmd.extend(['--alluredir', alluredir])\n print(cmd)\n sys.exit(pytest.main(cmd))\n",
"step-3": "import os\nimport sys\nimport pytest\n\n\ndef run_test(file_name, capture_stdout=True, allure_dir=None):\n cmd = [file_name, '-vvv']\n if capture_stdout:\n cmd.append('-s')\n test_name = os.path.splitext(os.path.basename(file_name))[0]\n alluredir = os.path.normpath('%s/%s/' % (allure_dir or 'allure-results',\n test_name))\n cmd.extend(['--alluredir', alluredir])\n print(cmd)\n sys.exit(pytest.main(cmd))\n",
"step-4": "import os\nimport sys\nimport pytest\n\n\ndef run_test(file_name, capture_stdout=True, allure_dir=None):\n cmd = [\n file_name, \"-vvv\",\n ]\n\n if capture_stdout:\n cmd.append(\"-s\")\n\n test_name = os.path.splitext(os.path.basename(file_name))[0]\n alluredir = os.path.normpath(\"%s/%s/\" % (allure_dir or \"allure-results\", test_name))\n cmd.extend([\"--alluredir\", alluredir])\n print(cmd)\n sys.exit(pytest.main(cmd))\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
style.use('ggplot')
<|reserved_special_token_0|>
clf.fit(X)
<|reserved_special_token_0|>
print(cluster_centers)
<|reserved_special_token_0|>
print('Number of clusters found:', n_clusters)
<|reserved_special_token_0|>
for i in range(len(X)):
ax.scatter(X[i][0], X[i][1], X[i][2], c=colors[labels[i]], marker='o')
ax.scatter(cluster_centers[:, 0], cluster_centers[:, 1], cluster_centers[:,
2], marker='x', s=150, linewidth=5, zorder=10, color='k')
plt.show()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
style.use('ggplot')
centers = [[20, 0, 0], [0, 20, 0], [0, 0, 20], [0, 0, 0]]
X, _ = make_blobs(n_samples=200, centers=centers, cluster_std=2)
clf = MeanShift(bandwidth=10)
clf.fit(X)
labels = clf.labels_
cluster_centers = clf.cluster_centers_
print(cluster_centers)
n_clusters = len(cluster_centers)
print('Number of clusters found:', n_clusters)
colors = 10 * ['r', 'g', 'b', 'y', 'c']
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
for i in range(len(X)):
ax.scatter(X[i][0], X[i][1], X[i][2], c=colors[labels[i]], marker='o')
ax.scatter(cluster_centers[:, 0], cluster_centers[:, 1], cluster_centers[:,
2], marker='x', s=150, linewidth=5, zorder=10, color='k')
plt.show()
<|reserved_special_token_1|>
from sklearn.cluster import MeanShift
from sklearn.datasets.samples_generator import make_blobs
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from matplotlib import style
style.use('ggplot')
centers = [[20, 0, 0], [0, 20, 0], [0, 0, 20], [0, 0, 0]]
X, _ = make_blobs(n_samples=200, centers=centers, cluster_std=2)
clf = MeanShift(bandwidth=10)
clf.fit(X)
labels = clf.labels_
cluster_centers = clf.cluster_centers_
print(cluster_centers)
n_clusters = len(cluster_centers)
print('Number of clusters found:', n_clusters)
colors = 10 * ['r', 'g', 'b', 'y', 'c']
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
for i in range(len(X)):
ax.scatter(X[i][0], X[i][1], X[i][2], c=colors[labels[i]], marker='o')
ax.scatter(cluster_centers[:, 0], cluster_centers[:, 1], cluster_centers[:,
2], marker='x', s=150, linewidth=5, zorder=10, color='k')
plt.show()
<|reserved_special_token_1|>
from sklearn.cluster import MeanShift
from sklearn.datasets.samples_generator import make_blobs
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from matplotlib import style
style.use('ggplot')
# Create random data points whose centers are the following
centers = [[20, 0, 0], [0, 20, 0], [0, 0, 20], [0, 0, 0]]
X, _ = make_blobs(n_samples=200, centers=centers, cluster_std=2)
# Fit the data into MeanShift classifier with search bandwidth = 10
clf = MeanShift(bandwidth=10)
clf.fit(X)
# Get the labels of each data point
# and cluster centers of the number of clusters formed
labels = clf.labels_
cluster_centers = clf.cluster_centers_
print(cluster_centers)
n_clusters = len(cluster_centers)
print('Number of clusters found:', n_clusters)
# Plot the data points with their clusters and centers on a 3d graph
colors = 10*['r', 'g', 'b', 'y', 'c']
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
for i in range(len(X)):
ax.scatter(X[i][0], X[i][1], X[i][2], c=colors[labels[i]], marker='o')
ax.scatter(cluster_centers[:, 0], cluster_centers[:, 1], cluster_centers[:, 2],
marker='x', s=150, linewidth=5, zorder=10, color='k')
plt.show()
|
flexible
|
{
"blob_id": "c0216dbd52be134eb417c20ed80b398b22e5d844",
"index": 6967,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nstyle.use('ggplot')\n<mask token>\nclf.fit(X)\n<mask token>\nprint(cluster_centers)\n<mask token>\nprint('Number of clusters found:', n_clusters)\n<mask token>\nfor i in range(len(X)):\n ax.scatter(X[i][0], X[i][1], X[i][2], c=colors[labels[i]], marker='o')\nax.scatter(cluster_centers[:, 0], cluster_centers[:, 1], cluster_centers[:,\n 2], marker='x', s=150, linewidth=5, zorder=10, color='k')\nplt.show()\n",
"step-3": "<mask token>\nstyle.use('ggplot')\ncenters = [[20, 0, 0], [0, 20, 0], [0, 0, 20], [0, 0, 0]]\nX, _ = make_blobs(n_samples=200, centers=centers, cluster_std=2)\nclf = MeanShift(bandwidth=10)\nclf.fit(X)\nlabels = clf.labels_\ncluster_centers = clf.cluster_centers_\nprint(cluster_centers)\nn_clusters = len(cluster_centers)\nprint('Number of clusters found:', n_clusters)\ncolors = 10 * ['r', 'g', 'b', 'y', 'c']\nfig = plt.figure()\nax = fig.add_subplot(111, projection='3d')\nfor i in range(len(X)):\n ax.scatter(X[i][0], X[i][1], X[i][2], c=colors[labels[i]], marker='o')\nax.scatter(cluster_centers[:, 0], cluster_centers[:, 1], cluster_centers[:,\n 2], marker='x', s=150, linewidth=5, zorder=10, color='k')\nplt.show()\n",
"step-4": "from sklearn.cluster import MeanShift\nfrom sklearn.datasets.samples_generator import make_blobs\nimport matplotlib.pyplot as plt\nfrom mpl_toolkits.mplot3d import Axes3D\nfrom matplotlib import style\nstyle.use('ggplot')\ncenters = [[20, 0, 0], [0, 20, 0], [0, 0, 20], [0, 0, 0]]\nX, _ = make_blobs(n_samples=200, centers=centers, cluster_std=2)\nclf = MeanShift(bandwidth=10)\nclf.fit(X)\nlabels = clf.labels_\ncluster_centers = clf.cluster_centers_\nprint(cluster_centers)\nn_clusters = len(cluster_centers)\nprint('Number of clusters found:', n_clusters)\ncolors = 10 * ['r', 'g', 'b', 'y', 'c']\nfig = plt.figure()\nax = fig.add_subplot(111, projection='3d')\nfor i in range(len(X)):\n ax.scatter(X[i][0], X[i][1], X[i][2], c=colors[labels[i]], marker='o')\nax.scatter(cluster_centers[:, 0], cluster_centers[:, 1], cluster_centers[:,\n 2], marker='x', s=150, linewidth=5, zorder=10, color='k')\nplt.show()\n",
"step-5": "from sklearn.cluster import MeanShift\nfrom sklearn.datasets.samples_generator import make_blobs\nimport matplotlib.pyplot as plt\nfrom mpl_toolkits.mplot3d import Axes3D\nfrom matplotlib import style\n\nstyle.use('ggplot')\n\n\n# Create random data points whose centers are the following\ncenters = [[20, 0, 0], [0, 20, 0], [0, 0, 20], [0, 0, 0]]\nX, _ = make_blobs(n_samples=200, centers=centers, cluster_std=2)\n\n# Fit the data into MeanShift classifier with search bandwidth = 10\nclf = MeanShift(bandwidth=10)\nclf.fit(X)\n\n# Get the labels of each data point\n# and cluster centers of the number of clusters formed\nlabels = clf.labels_\ncluster_centers = clf.cluster_centers_\nprint(cluster_centers)\nn_clusters = len(cluster_centers)\nprint('Number of clusters found:', n_clusters)\n\n# Plot the data points with their clusters and centers on a 3d graph\ncolors = 10*['r', 'g', 'b', 'y', 'c']\nfig = plt.figure()\nax = fig.add_subplot(111, projection='3d')\n\nfor i in range(len(X)):\n ax.scatter(X[i][0], X[i][1], X[i][2], c=colors[labels[i]], marker='o')\n\nax.scatter(cluster_centers[:, 0], cluster_centers[:, 1], cluster_centers[:, 2],\n marker='x', s=150, linewidth=5, zorder=10, color='k')\n\nplt.show()\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
class Song:
def __init__(self, songName, artistName, lyric):
self.songName = songName
self.artistName = artistName
self.lyric = lyric
self.phrasePinyinDict = util.lyricToPinYi(self.lyric)
<|reserved_special_token_0|>
def getArtistName(self):
return self.artistName
def getLyric(self):
return self.lyric
def getName(self):
return util.sanitizeName(self.artistName) + '-' + util.sanitizeName(
self.songName)
<|reserved_special_token_0|>
def write(self):
file = open(self.getSongName(), 'w+')
file.write(self.getLyric())
file.close()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Song:
def __init__(self, songName, artistName, lyric):
self.songName = songName
self.artistName = artistName
self.lyric = lyric
self.phrasePinyinDict = util.lyricToPinYi(self.lyric)
def getSongName(self):
return self.songName
def getArtistName(self):
return self.artistName
def getLyric(self):
return self.lyric
def getName(self):
return util.sanitizeName(self.artistName) + '-' + util.sanitizeName(
self.songName)
<|reserved_special_token_0|>
def write(self):
file = open(self.getSongName(), 'w+')
file.write(self.getLyric())
file.close()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Song:
def __init__(self, songName, artistName, lyric):
self.songName = songName
self.artistName = artistName
self.lyric = lyric
self.phrasePinyinDict = util.lyricToPinYi(self.lyric)
def getSongName(self):
return self.songName
def getArtistName(self):
return self.artistName
def getLyric(self):
return self.lyric
def getName(self):
return util.sanitizeName(self.artistName) + '-' + util.sanitizeName(
self.songName)
def storeToFileSystem(self, filename, append):
file = open(filename, ('w+', 'a+')[append], encoding='utf8')
json.dump(self.__dict__, file, indent=4, ensure_ascii=False)
file.close()
def write(self):
file = open(self.getSongName(), 'w+')
file.write(self.getLyric())
file.close()
<|reserved_special_token_1|>
import json
import jieba
import util
from pypinyin import pinyin, Style
class Song:
def __init__(self, songName, artistName, lyric):
self.songName = songName
self.artistName = artistName
self.lyric = lyric
self.phrasePinyinDict = util.lyricToPinYi(self.lyric)
def getSongName(self):
return self.songName
def getArtistName(self):
return self.artistName
def getLyric(self):
return self.lyric
def getName(self):
return util.sanitizeName(self.artistName) + '-' + util.sanitizeName(
self.songName)
def storeToFileSystem(self, filename, append):
file = open(filename, ('w+', 'a+')[append], encoding='utf8')
json.dump(self.__dict__, file, indent=4, ensure_ascii=False)
file.close()
def write(self):
file = open(self.getSongName(), 'w+')
file.write(self.getLyric())
file.close()
<|reserved_special_token_1|>
import json
import jieba
import util
from pypinyin import pinyin, Style
class Song:
def __init__(self, songName, artistName, lyric):
self.songName = songName
self.artistName = artistName
self.lyric = lyric
self.phrasePinyinDict = util.lyricToPinYi(self.lyric)
def getSongName(self):
return self.songName
def getArtistName(self):
return self.artistName
def getLyric(self):
return self.lyric
def getName(self):
return util.sanitizeName(self.artistName)+"-"+ util.sanitizeName(self.songName)
def storeToFileSystem(self, filename, append):
file = open(filename, ("w+","a+")[append],encoding="utf8")
json.dump(self.__dict__, file, indent=4, ensure_ascii=False)
file.close()
def write(self):
file = open(self.getSongName(), "w+")
file.write(self.getLyric())
file.close()
|
flexible
|
{
"blob_id": "fa3cec0781b9ca5c1d99a7500748104d7cdce631",
"index": 130,
"step-1": "<mask token>\n\n\nclass Song:\n\n def __init__(self, songName, artistName, lyric):\n self.songName = songName\n self.artistName = artistName\n self.lyric = lyric\n self.phrasePinyinDict = util.lyricToPinYi(self.lyric)\n <mask token>\n\n def getArtistName(self):\n return self.artistName\n\n def getLyric(self):\n return self.lyric\n\n def getName(self):\n return util.sanitizeName(self.artistName) + '-' + util.sanitizeName(\n self.songName)\n <mask token>\n\n def write(self):\n file = open(self.getSongName(), 'w+')\n file.write(self.getLyric())\n file.close()\n",
"step-2": "<mask token>\n\n\nclass Song:\n\n def __init__(self, songName, artistName, lyric):\n self.songName = songName\n self.artistName = artistName\n self.lyric = lyric\n self.phrasePinyinDict = util.lyricToPinYi(self.lyric)\n\n def getSongName(self):\n return self.songName\n\n def getArtistName(self):\n return self.artistName\n\n def getLyric(self):\n return self.lyric\n\n def getName(self):\n return util.sanitizeName(self.artistName) + '-' + util.sanitizeName(\n self.songName)\n <mask token>\n\n def write(self):\n file = open(self.getSongName(), 'w+')\n file.write(self.getLyric())\n file.close()\n",
"step-3": "<mask token>\n\n\nclass Song:\n\n def __init__(self, songName, artistName, lyric):\n self.songName = songName\n self.artistName = artistName\n self.lyric = lyric\n self.phrasePinyinDict = util.lyricToPinYi(self.lyric)\n\n def getSongName(self):\n return self.songName\n\n def getArtistName(self):\n return self.artistName\n\n def getLyric(self):\n return self.lyric\n\n def getName(self):\n return util.sanitizeName(self.artistName) + '-' + util.sanitizeName(\n self.songName)\n\n def storeToFileSystem(self, filename, append):\n file = open(filename, ('w+', 'a+')[append], encoding='utf8')\n json.dump(self.__dict__, file, indent=4, ensure_ascii=False)\n file.close()\n\n def write(self):\n file = open(self.getSongName(), 'w+')\n file.write(self.getLyric())\n file.close()\n",
"step-4": "import json\nimport jieba\nimport util\nfrom pypinyin import pinyin, Style\n\n\nclass Song:\n\n def __init__(self, songName, artistName, lyric):\n self.songName = songName\n self.artistName = artistName\n self.lyric = lyric\n self.phrasePinyinDict = util.lyricToPinYi(self.lyric)\n\n def getSongName(self):\n return self.songName\n\n def getArtistName(self):\n return self.artistName\n\n def getLyric(self):\n return self.lyric\n\n def getName(self):\n return util.sanitizeName(self.artistName) + '-' + util.sanitizeName(\n self.songName)\n\n def storeToFileSystem(self, filename, append):\n file = open(filename, ('w+', 'a+')[append], encoding='utf8')\n json.dump(self.__dict__, file, indent=4, ensure_ascii=False)\n file.close()\n\n def write(self):\n file = open(self.getSongName(), 'w+')\n file.write(self.getLyric())\n file.close()\n",
"step-5": "import json\nimport jieba\nimport util\nfrom pypinyin import pinyin, Style\n\nclass Song:\n def __init__(self, songName, artistName, lyric):\n self.songName = songName\n self.artistName = artistName\n self.lyric = lyric\n self.phrasePinyinDict = util.lyricToPinYi(self.lyric)\n\n def getSongName(self):\n return self.songName\n def getArtistName(self):\n return self.artistName\n def getLyric(self):\n return self.lyric\n def getName(self):\n return util.sanitizeName(self.artistName)+\"-\"+ util.sanitizeName(self.songName)\n\n def storeToFileSystem(self, filename, append):\n file = open(filename, (\"w+\",\"a+\")[append],encoding=\"utf8\")\n json.dump(self.__dict__, file, indent=4, ensure_ascii=False)\n file.close()\n\n def write(self):\n file = open(self.getSongName(), \"w+\")\n file.write(self.getLyric())\n file.close()\n",
"step-ids": [
6,
7,
8,
9,
10
]
}
|
[
6,
7,
8,
9,
10
] |
from golem import actions
from projects.golem_gui.pages import common
from projects.golem_gui.pages import api
from projects.golem_gui.pages import test_builder_code
description = 'Verify the user can edit test code and save it'
tags = ['smoke']
def setup(data):
common.access_golem(data.env.url, data.env.admin)
api.project.using_project('test_builder_code')
data.test = api.test.create_access_test_code(data.project)
def test(data):
test_line = "description = 'desc'"
test_builder_code.set_value(test_line)
actions.click(test_builder_code.save_button)
common.assert_toast_message_is_displayed('Test ' + data.test + ' saved')
actions.refresh_page()
test_builder_code.assert_value(test_line)
|
normal
|
{
"blob_id": "d4cdc4f1995eab7f01c970b43cb0a3c5ed4a2711",
"index": 3673,
"step-1": "<mask token>\n\n\ndef setup(data):\n common.access_golem(data.env.url, data.env.admin)\n api.project.using_project('test_builder_code')\n data.test = api.test.create_access_test_code(data.project)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef setup(data):\n common.access_golem(data.env.url, data.env.admin)\n api.project.using_project('test_builder_code')\n data.test = api.test.create_access_test_code(data.project)\n\n\ndef test(data):\n test_line = \"description = 'desc'\"\n test_builder_code.set_value(test_line)\n actions.click(test_builder_code.save_button)\n common.assert_toast_message_is_displayed('Test ' + data.test + ' saved')\n actions.refresh_page()\n test_builder_code.assert_value(test_line)\n",
"step-3": "<mask token>\ndescription = 'Verify the user can edit test code and save it'\ntags = ['smoke']\n\n\ndef setup(data):\n common.access_golem(data.env.url, data.env.admin)\n api.project.using_project('test_builder_code')\n data.test = api.test.create_access_test_code(data.project)\n\n\ndef test(data):\n test_line = \"description = 'desc'\"\n test_builder_code.set_value(test_line)\n actions.click(test_builder_code.save_button)\n common.assert_toast_message_is_displayed('Test ' + data.test + ' saved')\n actions.refresh_page()\n test_builder_code.assert_value(test_line)\n",
"step-4": "from golem import actions\nfrom projects.golem_gui.pages import common\nfrom projects.golem_gui.pages import api\nfrom projects.golem_gui.pages import test_builder_code\ndescription = 'Verify the user can edit test code and save it'\ntags = ['smoke']\n\n\ndef setup(data):\n common.access_golem(data.env.url, data.env.admin)\n api.project.using_project('test_builder_code')\n data.test = api.test.create_access_test_code(data.project)\n\n\ndef test(data):\n test_line = \"description = 'desc'\"\n test_builder_code.set_value(test_line)\n actions.click(test_builder_code.save_button)\n common.assert_toast_message_is_displayed('Test ' + data.test + ' saved')\n actions.refresh_page()\n test_builder_code.assert_value(test_line)\n",
"step-5": null,
"step-ids": [
1,
2,
3,
4
]
}
|
[
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
for c in s:
if c == 'R':
t += 1
else:
ans = max(ans, t)
t = 0
<|reserved_special_token_0|>
print(ans)
<|reserved_special_token_1|>
s = input()
ans = 0
t = 0
for c in s:
if c == 'R':
t += 1
else:
ans = max(ans, t)
t = 0
ans = max(ans, t)
print(ans)
<|reserved_special_token_1|>
s = input()
ans = 0
t = 0
for c in s:
if c == "R":
t += 1
else:
ans = max(ans, t)
t = 0
ans = max(ans, t)
print(ans)
|
flexible
|
{
"blob_id": "85c97dfeb766f127fa51067e5155b2da3a88e3be",
"index": 4811,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor c in s:\n if c == 'R':\n t += 1\n else:\n ans = max(ans, t)\n t = 0\n<mask token>\nprint(ans)\n",
"step-3": "s = input()\nans = 0\nt = 0\nfor c in s:\n if c == 'R':\n t += 1\n else:\n ans = max(ans, t)\n t = 0\nans = max(ans, t)\nprint(ans)\n",
"step-4": "s = input()\n\nans = 0\nt = 0\nfor c in s:\n if c == \"R\":\n t += 1\n else:\n ans = max(ans, t)\n t = 0\nans = max(ans, t)\n\n\nprint(ans)\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
class _PartitionedResults(BaseResults):
<|reserved_special_token_0|>
def mask(self, indices):
self.theta.mask[indices] = True
self.skeletons.mask[indices] = True
self.scores.mask[indices] = True
self.partitions.mask[indices] = True
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
class _ResolvedResults(BaseResults):
def __init__(self, partitioned_results: _PartitionedResults):
self._partitioned_results = partitioned_results
theta = _init_unified_series(partitioned_results.theta)
skeletons = _init_unified_series(partitioned_results.skeletons)
scores = _init_unified_series(partitioned_results.scores)
super().__init__(theta=theta, skeletons=skeletons, scores=scores)
def resolve(self, segment, segment_alignment):
self.scores[segment] = self._partitioned_results.scores[segment][:,
segment_alignment]
self.skeletons[segment] = self._partitioned_results.skeletons[segment][
:, segment_alignment]
self.theta[segment] = self._partitioned_results.theta[segment][:,
segment_alignment]
def mask(self, indices):
self.theta.mask[indices] = True
self.skeletons.mask[indices] = True
self.scores.mask[indices] = True
def num_valid(self):
return np.sum(~self.scores.mask)
class _FinalResults(BaseResults):
@classmethod
def from_resolved(cls, resolved_results: _ResolvedResults):
return _FinalResults(theta=resolved_results.theta.filled(np.nan),
skeletons=resolved_results.skeletons.filled(np.nan), scores=
resolved_results.scores.filled(np.nan))
@classmethod
def from_shuffled(cls, shuffled_results: ShuffledResults):
return _FinalResults(theta=np.full_like(shuffled_results.theta[:, 0
], np.nan), skeletons=np.full_like(shuffled_results.scores[:, 0
], np.nan), scores=np.full_like(shuffled_results.skeletons[:, 0
], np.nan))
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def _init_partitioned_series(shuffled_series: np.ndarray):
return ma.masked_all_like(shuffled_series)
<|reserved_special_token_0|>
class _PartitionedResults(BaseResults):
def __init__(self, shuffled_results: ShuffledResults):
self.cur_partition = -1
self.partitions = ma.masked_all((len(shuffled_results),), dtype=int)
self._shuffled_results = shuffled_results
theta = _init_partitioned_series(shuffled_results.theta)
skeletons = _init_partitioned_series(shuffled_results.skeletons)
scores = _init_partitioned_series(shuffled_results.scores)
super().__init__(theta=theta, skeletons=skeletons, scores=scores)
def mask(self, indices):
self.theta.mask[indices] = True
self.skeletons.mask[indices] = True
self.scores.mask[indices] = True
self.partitions.mask[indices] = True
def set_partition(self, frame_index: int, partition: int, new_partition:
bool=False):
if new_partition:
self.cur_partition += 1
_set_partition(self.theta, self._shuffled_results.theta,
frame_index, partition)
_set_partition(self.skeletons, self._shuffled_results.skeletons,
frame_index, partition)
_set_partition(self.scores, self._shuffled_results.scores,
frame_index, partition)
self.partitions[frame_index] = self.cur_partition
def _get_partition_indices(self, partition_index: int):
return np.where(self.partitions == partition_index)[0]
def get_segments(self):
all_partitions_indexes = np.unique(self.partitions.filled(-1))
return [self._get_partition_indices(partition_index) for
partition_index in all_partitions_indexes if partition_index >= 0]
class _ResolvedResults(BaseResults):
def __init__(self, partitioned_results: _PartitionedResults):
self._partitioned_results = partitioned_results
theta = _init_unified_series(partitioned_results.theta)
skeletons = _init_unified_series(partitioned_results.skeletons)
scores = _init_unified_series(partitioned_results.scores)
super().__init__(theta=theta, skeletons=skeletons, scores=scores)
def resolve(self, segment, segment_alignment):
self.scores[segment] = self._partitioned_results.scores[segment][:,
segment_alignment]
self.skeletons[segment] = self._partitioned_results.skeletons[segment][
:, segment_alignment]
self.theta[segment] = self._partitioned_results.theta[segment][:,
segment_alignment]
def mask(self, indices):
self.theta.mask[indices] = True
self.skeletons.mask[indices] = True
self.scores.mask[indices] = True
def num_valid(self):
return np.sum(~self.scores.mask)
class _FinalResults(BaseResults):
@classmethod
def from_resolved(cls, resolved_results: _ResolvedResults):
return _FinalResults(theta=resolved_results.theta.filled(np.nan),
skeletons=resolved_results.skeletons.filled(np.nan), scores=
resolved_results.scores.filled(np.nan))
@classmethod
def from_shuffled(cls, shuffled_results: ShuffledResults):
return _FinalResults(theta=np.full_like(shuffled_results.theta[:, 0
], np.nan), skeletons=np.full_like(shuffled_results.scores[:, 0
], np.nan), scores=np.full_like(shuffled_results.skeletons[:, 0
], np.nan))
<|reserved_special_token_0|>
def _calculate_smallest_gap_to_adjacent(segment_index, segments,
segments_alignment):
score = np.nan
segment_offset = np.nan
if segment_index - 1 >= 0 and not segments_alignment.mask[segment_index - 1
]:
gap = segments[segment_index][0] - segments[segment_index - 1][-1]
score = gap
segment_offset = -1
if segment_index + 1 < len(segments_alignment
) and not segments_alignment.mask[segment_index + 1]:
gap = segments[segment_index + 1][0] - segments[segment_index][-1]
if np.isnan(score) or gap < score:
score = gap
segment_offset = 1
return score, segment_offset
def _align_unlabelled_segments_with_adjacents(segments, segments_alignment,
partitioned_skeletons, frame_rate: float):
"""
Resolve the unaligned segments by comparing with adjacent segments,
starting with the segments that have the least frames gap between an adjacent trusted segment
Don't align isolated segments which a big gap between trusted segments
"""
maximum_gap_allowed = max(1, int(frame_rate *
MAXIMUM_GAP_ALLOWED_WITH_ADJACENT_SEGMENT_SEC))
if np.all(segments_alignment.mask):
logger.info(
'There are no trusted segments with head decision to resolve the whole video, stopping analysis.'
)
return segments_alignment
unaligned = np.where(segments_alignment.mask)[0]
while len(unaligned) > 0:
all_gaps = [_calculate_smallest_gap_to_adjacent(segment_index=x,
segments=segments, segments_alignment=segments_alignment) for x in
unaligned]
segment_to_fix_index = np.nanargmin(all_gaps, axis=0)[0]
gap_to_adjacent_segment, adjacent_segment_offset = all_gaps[
segment_to_fix_index]
if gap_to_adjacent_segment > maximum_gap_allowed:
break
cur_segment_index = unaligned[segment_to_fix_index]
cur_segment_skeleton = partitioned_skeletons[segments[
cur_segment_index]]
adjacent_segment_index = cur_segment_index + adjacent_segment_offset
adjacent_alignment = segments_alignment[adjacent_segment_index]
adjacent_segment = segments[adjacent_segment_index]
adjacent_segment_skeleton = partitioned_skeletons[adjacent_segment][
:, adjacent_alignment]
if adjacent_segment_offset == -1:
closest_unaligned_skeleton = cur_segment_skeleton[0]
closest_known_skeleton = adjacent_segment_skeleton[-1]
elif adjacent_segment_offset == 1:
closest_unaligned_skeleton = cur_segment_skeleton[-1]
closest_known_skeleton = adjacent_segment_skeleton[0]
else:
raise ValueError()
dists = [skeleton_distance(closest_known_skeleton, skel) for skel in
closest_unaligned_skeleton]
segments_alignment[cur_segment_index] = int(np.argmax(dists))
unaligned = np.where(segments_alignment.mask)[0]
return segments_alignment
def _init_unified_series(mixed_series):
return ma.masked_all((mixed_series.shape[0],) + mixed_series.shape[2:],
dtype=mixed_series.dtype)
def resolve_head_tail(shuffled_results: ShuffledResults, original_results:
OriginalResults, frame_rate: float, score_threshold) ->BaseResults:
len_series = len(shuffled_results)
partitioned_results = _make_continuous_partitions(score_threshold=
score_threshold, frame_rate=frame_rate, shuffled_results=
shuffled_results)
segments = partitioned_results.get_segments()
if len(segments) == 0:
logger.error(
f"Couldn't find any continuous segments of predicted data above the threshold {score_threshold}, stopping analysis."
)
return _FinalResults.from_shuffled(shuffled_results)
segments_alignment = _align_segments_with_labels(segments,
partitioned_results.skeletons, original_results.skeletons)
segments_alignment = _align_unlabelled_segments_with_adjacents(segments,
segments_alignment, partitioned_results.skeletons, frame_rate)
resolved_results = _ResolvedResults(partitioned_results)
for segment, segment_alignment in zip(segments, segments_alignment):
if not ma.is_masked(segment_alignment):
resolved_results.resolve(segment, segment_alignment)
low_scores_indices = np.where(ma.masked_less(resolved_results.scores,
score_threshold).mask)[0]
resolved_results.mask(low_scores_indices)
num_success = resolved_results.num_valid()
original_num_success = np.any(~np.isnan(original_results.skeletons),
axis=(1, 2)).sum()
logger.info(
f'Resolved head/tail, {num_success} out of {len_series} frames analyzed successfully ({float(num_success) / len_series * 100:.1f}%) (original features : {original_num_success} or {float(original_num_success) / len_series * 100:.1f}% of total)'
)
if num_success < original_num_success:
logger.warning(
f'Original results had {original_num_success - num_success} more successfully analyzed frames!'
)
return _FinalResults.from_resolved(resolved_results)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def _init_partitioned_series(shuffled_series: np.ndarray):
return ma.masked_all_like(shuffled_series)
<|reserved_special_token_0|>
class _PartitionedResults(BaseResults):
def __init__(self, shuffled_results: ShuffledResults):
self.cur_partition = -1
self.partitions = ma.masked_all((len(shuffled_results),), dtype=int)
self._shuffled_results = shuffled_results
theta = _init_partitioned_series(shuffled_results.theta)
skeletons = _init_partitioned_series(shuffled_results.skeletons)
scores = _init_partitioned_series(shuffled_results.scores)
super().__init__(theta=theta, skeletons=skeletons, scores=scores)
def mask(self, indices):
self.theta.mask[indices] = True
self.skeletons.mask[indices] = True
self.scores.mask[indices] = True
self.partitions.mask[indices] = True
def set_partition(self, frame_index: int, partition: int, new_partition:
bool=False):
if new_partition:
self.cur_partition += 1
_set_partition(self.theta, self._shuffled_results.theta,
frame_index, partition)
_set_partition(self.skeletons, self._shuffled_results.skeletons,
frame_index, partition)
_set_partition(self.scores, self._shuffled_results.scores,
frame_index, partition)
self.partitions[frame_index] = self.cur_partition
def _get_partition_indices(self, partition_index: int):
return np.where(self.partitions == partition_index)[0]
def get_segments(self):
all_partitions_indexes = np.unique(self.partitions.filled(-1))
return [self._get_partition_indices(partition_index) for
partition_index in all_partitions_indexes if partition_index >= 0]
class _ResolvedResults(BaseResults):
def __init__(self, partitioned_results: _PartitionedResults):
self._partitioned_results = partitioned_results
theta = _init_unified_series(partitioned_results.theta)
skeletons = _init_unified_series(partitioned_results.skeletons)
scores = _init_unified_series(partitioned_results.scores)
super().__init__(theta=theta, skeletons=skeletons, scores=scores)
def resolve(self, segment, segment_alignment):
self.scores[segment] = self._partitioned_results.scores[segment][:,
segment_alignment]
self.skeletons[segment] = self._partitioned_results.skeletons[segment][
:, segment_alignment]
self.theta[segment] = self._partitioned_results.theta[segment][:,
segment_alignment]
def mask(self, indices):
self.theta.mask[indices] = True
self.skeletons.mask[indices] = True
self.scores.mask[indices] = True
def num_valid(self):
return np.sum(~self.scores.mask)
class _FinalResults(BaseResults):
@classmethod
def from_resolved(cls, resolved_results: _ResolvedResults):
return _FinalResults(theta=resolved_results.theta.filled(np.nan),
skeletons=resolved_results.skeletons.filled(np.nan), scores=
resolved_results.scores.filled(np.nan))
@classmethod
def from_shuffled(cls, shuffled_results: ShuffledResults):
return _FinalResults(theta=np.full_like(shuffled_results.theta[:, 0
], np.nan), skeletons=np.full_like(shuffled_results.scores[:, 0
], np.nan), scores=np.full_like(shuffled_results.skeletons[:, 0
], np.nan))
<|reserved_special_token_0|>
def _align_segments_with_labels(segments, partitioned_skeletons,
labelled_skeletons, min_labelled=5):
"""
Match the head/tail alignment with the results of the classical tracking in each of the segments,
if there is enough labelled data in the segment
"""
segments_alignment = ma.masked_all((len(segments),), dtype=np.uint8)
for segment_index, segment in enumerate(segments):
segment_skeletons = labelled_skeletons[segment]
non_nan_labelled = np.any(~np.isnan(segment_skeletons), axis=(1, 2))
labels_count = np.sum(non_nan_labelled)
non_masked = ~np.any(partitioned_skeletons[segment].mask, axis=(1,
2, 3))
to_compare = np.logical_and(non_nan_labelled, non_masked)
similarity_scores = []
for label_skel, partitioned_skeleton in zip(segment_skeletons[
to_compare], partitioned_skeletons[segment][to_compare]):
dists = [skeleton_distance(label_skel, x) for x in
partitioned_skeleton]
similarity_scores.append(dists)
if len(similarity_scores) > 0:
mean_similarity_scores = np.mean(similarity_scores, axis=0)
if mean_similarity_scores[0] * mean_similarity_scores[1
] < 0 and labels_count > min_labelled:
segments_alignment[segment_index] = np.argmax(
mean_similarity_scores)
return segments_alignment
def _calculate_smallest_gap_to_adjacent(segment_index, segments,
segments_alignment):
score = np.nan
segment_offset = np.nan
if segment_index - 1 >= 0 and not segments_alignment.mask[segment_index - 1
]:
gap = segments[segment_index][0] - segments[segment_index - 1][-1]
score = gap
segment_offset = -1
if segment_index + 1 < len(segments_alignment
) and not segments_alignment.mask[segment_index + 1]:
gap = segments[segment_index + 1][0] - segments[segment_index][-1]
if np.isnan(score) or gap < score:
score = gap
segment_offset = 1
return score, segment_offset
def _align_unlabelled_segments_with_adjacents(segments, segments_alignment,
partitioned_skeletons, frame_rate: float):
"""
Resolve the unaligned segments by comparing with adjacent segments,
starting with the segments that have the least frames gap between an adjacent trusted segment
Don't align isolated segments which a big gap between trusted segments
"""
maximum_gap_allowed = max(1, int(frame_rate *
MAXIMUM_GAP_ALLOWED_WITH_ADJACENT_SEGMENT_SEC))
if np.all(segments_alignment.mask):
logger.info(
'There are no trusted segments with head decision to resolve the whole video, stopping analysis.'
)
return segments_alignment
unaligned = np.where(segments_alignment.mask)[0]
while len(unaligned) > 0:
all_gaps = [_calculate_smallest_gap_to_adjacent(segment_index=x,
segments=segments, segments_alignment=segments_alignment) for x in
unaligned]
segment_to_fix_index = np.nanargmin(all_gaps, axis=0)[0]
gap_to_adjacent_segment, adjacent_segment_offset = all_gaps[
segment_to_fix_index]
if gap_to_adjacent_segment > maximum_gap_allowed:
break
cur_segment_index = unaligned[segment_to_fix_index]
cur_segment_skeleton = partitioned_skeletons[segments[
cur_segment_index]]
adjacent_segment_index = cur_segment_index + adjacent_segment_offset
adjacent_alignment = segments_alignment[adjacent_segment_index]
adjacent_segment = segments[adjacent_segment_index]
adjacent_segment_skeleton = partitioned_skeletons[adjacent_segment][
:, adjacent_alignment]
if adjacent_segment_offset == -1:
closest_unaligned_skeleton = cur_segment_skeleton[0]
closest_known_skeleton = adjacent_segment_skeleton[-1]
elif adjacent_segment_offset == 1:
closest_unaligned_skeleton = cur_segment_skeleton[-1]
closest_known_skeleton = adjacent_segment_skeleton[0]
else:
raise ValueError()
dists = [skeleton_distance(closest_known_skeleton, skel) for skel in
closest_unaligned_skeleton]
segments_alignment[cur_segment_index] = int(np.argmax(dists))
unaligned = np.where(segments_alignment.mask)[0]
return segments_alignment
def _init_unified_series(mixed_series):
return ma.masked_all((mixed_series.shape[0],) + mixed_series.shape[2:],
dtype=mixed_series.dtype)
def resolve_head_tail(shuffled_results: ShuffledResults, original_results:
OriginalResults, frame_rate: float, score_threshold) ->BaseResults:
len_series = len(shuffled_results)
partitioned_results = _make_continuous_partitions(score_threshold=
score_threshold, frame_rate=frame_rate, shuffled_results=
shuffled_results)
segments = partitioned_results.get_segments()
if len(segments) == 0:
logger.error(
f"Couldn't find any continuous segments of predicted data above the threshold {score_threshold}, stopping analysis."
)
return _FinalResults.from_shuffled(shuffled_results)
segments_alignment = _align_segments_with_labels(segments,
partitioned_results.skeletons, original_results.skeletons)
segments_alignment = _align_unlabelled_segments_with_adjacents(segments,
segments_alignment, partitioned_results.skeletons, frame_rate)
resolved_results = _ResolvedResults(partitioned_results)
for segment, segment_alignment in zip(segments, segments_alignment):
if not ma.is_masked(segment_alignment):
resolved_results.resolve(segment, segment_alignment)
low_scores_indices = np.where(ma.masked_less(resolved_results.scores,
score_threshold).mask)[0]
resolved_results.mask(low_scores_indices)
num_success = resolved_results.num_valid()
original_num_success = np.any(~np.isnan(original_results.skeletons),
axis=(1, 2)).sum()
logger.info(
f'Resolved head/tail, {num_success} out of {len_series} frames analyzed successfully ({float(num_success) / len_series * 100:.1f}%) (original features : {original_num_success} or {float(original_num_success) / len_series * 100:.1f}% of total)'
)
if num_success < original_num_success:
logger.warning(
f'Original results had {original_num_success - num_success} more successfully analyzed frames!'
)
return _FinalResults.from_resolved(resolved_results)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
logging.basicConfig()
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
CONTINUOUS_ANGLES_DIST_THRESHOLD = np.deg2rad(30)
CONTINOUS_SEGMENT_TIME_WINDOW_SEC = 0.2
MIN_SEGMENT_SIZE_SEC = 0.2
MAXIMUM_GAP_ALLOWED_WITH_ADJACENT_SEGMENT_SEC = 1
def _init_partitioned_series(shuffled_series: np.ndarray):
return ma.masked_all_like(shuffled_series)
def _set_partition(partitioned_series, shuffled_series, frame_index: int,
partition: int):
partitioned_series[frame_index][0] = shuffled_series[frame_index, partition
]
partitioned_series[frame_index][1] = shuffled_series[frame_index, 1 -
partition]
class _PartitionedResults(BaseResults):
def __init__(self, shuffled_results: ShuffledResults):
self.cur_partition = -1
self.partitions = ma.masked_all((len(shuffled_results),), dtype=int)
self._shuffled_results = shuffled_results
theta = _init_partitioned_series(shuffled_results.theta)
skeletons = _init_partitioned_series(shuffled_results.skeletons)
scores = _init_partitioned_series(shuffled_results.scores)
super().__init__(theta=theta, skeletons=skeletons, scores=scores)
def mask(self, indices):
self.theta.mask[indices] = True
self.skeletons.mask[indices] = True
self.scores.mask[indices] = True
self.partitions.mask[indices] = True
def set_partition(self, frame_index: int, partition: int, new_partition:
bool=False):
if new_partition:
self.cur_partition += 1
_set_partition(self.theta, self._shuffled_results.theta,
frame_index, partition)
_set_partition(self.skeletons, self._shuffled_results.skeletons,
frame_index, partition)
_set_partition(self.scores, self._shuffled_results.scores,
frame_index, partition)
self.partitions[frame_index] = self.cur_partition
def _get_partition_indices(self, partition_index: int):
return np.where(self.partitions == partition_index)[0]
def get_segments(self):
all_partitions_indexes = np.unique(self.partitions.filled(-1))
return [self._get_partition_indices(partition_index) for
partition_index in all_partitions_indexes if partition_index >= 0]
class _ResolvedResults(BaseResults):
def __init__(self, partitioned_results: _PartitionedResults):
self._partitioned_results = partitioned_results
theta = _init_unified_series(partitioned_results.theta)
skeletons = _init_unified_series(partitioned_results.skeletons)
scores = _init_unified_series(partitioned_results.scores)
super().__init__(theta=theta, skeletons=skeletons, scores=scores)
def resolve(self, segment, segment_alignment):
self.scores[segment] = self._partitioned_results.scores[segment][:,
segment_alignment]
self.skeletons[segment] = self._partitioned_results.skeletons[segment][
:, segment_alignment]
self.theta[segment] = self._partitioned_results.theta[segment][:,
segment_alignment]
def mask(self, indices):
self.theta.mask[indices] = True
self.skeletons.mask[indices] = True
self.scores.mask[indices] = True
def num_valid(self):
return np.sum(~self.scores.mask)
class _FinalResults(BaseResults):
@classmethod
def from_resolved(cls, resolved_results: _ResolvedResults):
return _FinalResults(theta=resolved_results.theta.filled(np.nan),
skeletons=resolved_results.skeletons.filled(np.nan), scores=
resolved_results.scores.filled(np.nan))
@classmethod
def from_shuffled(cls, shuffled_results: ShuffledResults):
return _FinalResults(theta=np.full_like(shuffled_results.theta[:, 0
], np.nan), skeletons=np.full_like(shuffled_results.scores[:, 0
], np.nan), scores=np.full_like(shuffled_results.skeletons[:, 0
], np.nan))
def _make_continuous_partitions(shuffled_results: ShuffledResults,
score_threshold: float, frame_rate: float) ->_PartitionedResults:
time_window = max(1, int(frame_rate * CONTINOUS_SEGMENT_TIME_WINDOW_SEC))
min_segment_size = max(1, int(frame_rate * MIN_SEGMENT_SIZE_SEC))
partitioned_results = _PartitionedResults(shuffled_results)
good_score_frames = np.where(ma.greater_equal(ma.max(shuffled_results.
scores, axis=1), score_threshold))[0]
for frame_index in good_score_frames:
prev_theta = partitioned_results.theta[frame_index - min(
time_window, frame_index):frame_index, 0]
if np.all(np.any(prev_theta.mask, axis=1)):
partitioned_results.set_partition(frame_index=frame_index,
partition=0, new_partition=True)
else:
last_valid_index = np.where(~np.any(prev_theta.mask, axis=1))[0][-1
]
dists = [angle_distance(shuffled_results.theta[frame_index, k,
:], prev_theta[last_valid_index]) for k in range(2)]
partition = int(np.argmin(dists))
if dists[partition] < CONTINUOUS_ANGLES_DIST_THRESHOLD:
partitioned_results.set_partition(frame_index=frame_index,
partition=partition)
for cur_partition_indices in partitioned_results.get_segments():
if len(cur_partition_indices) < min_segment_size:
partitioned_results.mask(cur_partition_indices)
return partitioned_results
def _align_segments_with_labels(segments, partitioned_skeletons,
labelled_skeletons, min_labelled=5):
"""
Match the head/tail alignment with the results of the classical tracking in each of the segments,
if there is enough labelled data in the segment
"""
segments_alignment = ma.masked_all((len(segments),), dtype=np.uint8)
for segment_index, segment in enumerate(segments):
segment_skeletons = labelled_skeletons[segment]
non_nan_labelled = np.any(~np.isnan(segment_skeletons), axis=(1, 2))
labels_count = np.sum(non_nan_labelled)
non_masked = ~np.any(partitioned_skeletons[segment].mask, axis=(1,
2, 3))
to_compare = np.logical_and(non_nan_labelled, non_masked)
similarity_scores = []
for label_skel, partitioned_skeleton in zip(segment_skeletons[
to_compare], partitioned_skeletons[segment][to_compare]):
dists = [skeleton_distance(label_skel, x) for x in
partitioned_skeleton]
similarity_scores.append(dists)
if len(similarity_scores) > 0:
mean_similarity_scores = np.mean(similarity_scores, axis=0)
if mean_similarity_scores[0] * mean_similarity_scores[1
] < 0 and labels_count > min_labelled:
segments_alignment[segment_index] = np.argmax(
mean_similarity_scores)
return segments_alignment
def _calculate_smallest_gap_to_adjacent(segment_index, segments,
segments_alignment):
score = np.nan
segment_offset = np.nan
if segment_index - 1 >= 0 and not segments_alignment.mask[segment_index - 1
]:
gap = segments[segment_index][0] - segments[segment_index - 1][-1]
score = gap
segment_offset = -1
if segment_index + 1 < len(segments_alignment
) and not segments_alignment.mask[segment_index + 1]:
gap = segments[segment_index + 1][0] - segments[segment_index][-1]
if np.isnan(score) or gap < score:
score = gap
segment_offset = 1
return score, segment_offset
def _align_unlabelled_segments_with_adjacents(segments, segments_alignment,
partitioned_skeletons, frame_rate: float):
"""
Resolve the unaligned segments by comparing with adjacent segments,
starting with the segments that have the least frames gap between an adjacent trusted segment
Don't align isolated segments which a big gap between trusted segments
"""
maximum_gap_allowed = max(1, int(frame_rate *
MAXIMUM_GAP_ALLOWED_WITH_ADJACENT_SEGMENT_SEC))
if np.all(segments_alignment.mask):
logger.info(
'There are no trusted segments with head decision to resolve the whole video, stopping analysis.'
)
return segments_alignment
unaligned = np.where(segments_alignment.mask)[0]
while len(unaligned) > 0:
all_gaps = [_calculate_smallest_gap_to_adjacent(segment_index=x,
segments=segments, segments_alignment=segments_alignment) for x in
unaligned]
segment_to_fix_index = np.nanargmin(all_gaps, axis=0)[0]
gap_to_adjacent_segment, adjacent_segment_offset = all_gaps[
segment_to_fix_index]
if gap_to_adjacent_segment > maximum_gap_allowed:
break
cur_segment_index = unaligned[segment_to_fix_index]
cur_segment_skeleton = partitioned_skeletons[segments[
cur_segment_index]]
adjacent_segment_index = cur_segment_index + adjacent_segment_offset
adjacent_alignment = segments_alignment[adjacent_segment_index]
adjacent_segment = segments[adjacent_segment_index]
adjacent_segment_skeleton = partitioned_skeletons[adjacent_segment][
:, adjacent_alignment]
if adjacent_segment_offset == -1:
closest_unaligned_skeleton = cur_segment_skeleton[0]
closest_known_skeleton = adjacent_segment_skeleton[-1]
elif adjacent_segment_offset == 1:
closest_unaligned_skeleton = cur_segment_skeleton[-1]
closest_known_skeleton = adjacent_segment_skeleton[0]
else:
raise ValueError()
dists = [skeleton_distance(closest_known_skeleton, skel) for skel in
closest_unaligned_skeleton]
segments_alignment[cur_segment_index] = int(np.argmax(dists))
unaligned = np.where(segments_alignment.mask)[0]
return segments_alignment
def _init_unified_series(mixed_series):
return ma.masked_all((mixed_series.shape[0],) + mixed_series.shape[2:],
dtype=mixed_series.dtype)
def resolve_head_tail(shuffled_results: ShuffledResults, original_results:
OriginalResults, frame_rate: float, score_threshold) ->BaseResults:
len_series = len(shuffled_results)
partitioned_results = _make_continuous_partitions(score_threshold=
score_threshold, frame_rate=frame_rate, shuffled_results=
shuffled_results)
segments = partitioned_results.get_segments()
if len(segments) == 0:
logger.error(
f"Couldn't find any continuous segments of predicted data above the threshold {score_threshold}, stopping analysis."
)
return _FinalResults.from_shuffled(shuffled_results)
segments_alignment = _align_segments_with_labels(segments,
partitioned_results.skeletons, original_results.skeletons)
segments_alignment = _align_unlabelled_segments_with_adjacents(segments,
segments_alignment, partitioned_results.skeletons, frame_rate)
resolved_results = _ResolvedResults(partitioned_results)
for segment, segment_alignment in zip(segments, segments_alignment):
if not ma.is_masked(segment_alignment):
resolved_results.resolve(segment, segment_alignment)
low_scores_indices = np.where(ma.masked_less(resolved_results.scores,
score_threshold).mask)[0]
resolved_results.mask(low_scores_indices)
num_success = resolved_results.num_valid()
original_num_success = np.any(~np.isnan(original_results.skeletons),
axis=(1, 2)).sum()
logger.info(
f'Resolved head/tail, {num_success} out of {len_series} frames analyzed successfully ({float(num_success) / len_series * 100:.1f}%) (original features : {original_num_success} or {float(original_num_success) / len_series * 100:.1f}% of total)'
)
if num_success < original_num_success:
logger.warning(
f'Original results had {original_num_success - num_success} more successfully analyzed frames!'
)
return _FinalResults.from_resolved(resolved_results)
<|reserved_special_token_1|>
"""
This module contains the logic to resolve the head-tail orientation of a predicted video time series.
"""
import logging
import numpy as np
import numpy.ma as ma
from wormpose.pose.distance_metrics import angle_distance, skeleton_distance
from wormpose.pose.results_datatypes import (
BaseResults,
ShuffledResults,
OriginalResults,
)
logging.basicConfig()
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
# threshold to compare neighbor frames theta, to be considered continuous and belong to the same segment
CONTINUOUS_ANGLES_DIST_THRESHOLD = np.deg2rad(30)
# we consider frames to be part of the same segment if they are maximum this amount of seconds apart
# (and satisfy the distance threshold)
CONTINOUS_SEGMENT_TIME_WINDOW_SEC = 0.2
# discard too small segments less than this amount of seconds
MIN_SEGMENT_SIZE_SEC = 0.2
# don't align isolated segments that are more than this amount of seconds apart from aligned segments
MAXIMUM_GAP_ALLOWED_WITH_ADJACENT_SEGMENT_SEC = 1
def _init_partitioned_series(shuffled_series: np.ndarray):
return ma.masked_all_like(shuffled_series)
def _set_partition(partitioned_series, shuffled_series, frame_index: int, partition: int):
partitioned_series[frame_index][0] = shuffled_series[frame_index, partition]
partitioned_series[frame_index][1] = shuffled_series[frame_index, 1 - partition]
class _PartitionedResults(BaseResults):
def __init__(self, shuffled_results: ShuffledResults):
self.cur_partition = -1
self.partitions = ma.masked_all((len(shuffled_results),), dtype=int)
self._shuffled_results = shuffled_results
theta = _init_partitioned_series(shuffled_results.theta)
skeletons = _init_partitioned_series(shuffled_results.skeletons)
scores = _init_partitioned_series(shuffled_results.scores)
super().__init__(theta=theta, skeletons=skeletons, scores=scores)
def mask(self, indices):
self.theta.mask[indices] = True
self.skeletons.mask[indices] = True
self.scores.mask[indices] = True
self.partitions.mask[indices] = True
def set_partition(self, frame_index: int, partition: int, new_partition: bool = False):
if new_partition:
self.cur_partition += 1
_set_partition(self.theta, self._shuffled_results.theta, frame_index, partition)
_set_partition(self.skeletons, self._shuffled_results.skeletons, frame_index, partition)
_set_partition(self.scores, self._shuffled_results.scores, frame_index, partition)
self.partitions[frame_index] = self.cur_partition
def _get_partition_indices(self, partition_index: int):
return np.where(self.partitions == partition_index)[0]
def get_segments(self):
all_partitions_indexes = np.unique(self.partitions.filled(-1))
return [
self._get_partition_indices(partition_index)
for partition_index in all_partitions_indexes
if partition_index >= 0
]
class _ResolvedResults(BaseResults):
def __init__(self, partitioned_results: _PartitionedResults):
self._partitioned_results = partitioned_results
theta = _init_unified_series(partitioned_results.theta)
skeletons = _init_unified_series(partitioned_results.skeletons)
scores = _init_unified_series(partitioned_results.scores)
super().__init__(theta=theta, skeletons=skeletons, scores=scores)
def resolve(self, segment, segment_alignment):
self.scores[segment] = self._partitioned_results.scores[segment][:, segment_alignment]
self.skeletons[segment] = self._partitioned_results.skeletons[segment][:, segment_alignment]
self.theta[segment] = self._partitioned_results.theta[segment][:, segment_alignment]
def mask(self, indices):
self.theta.mask[indices] = True
self.skeletons.mask[indices] = True
self.scores.mask[indices] = True
def num_valid(self):
return np.sum(~self.scores.mask)
class _FinalResults(BaseResults):
@classmethod
def from_resolved(cls, resolved_results: _ResolvedResults):
return _FinalResults(
theta=resolved_results.theta.filled(np.nan),
skeletons=resolved_results.skeletons.filled(np.nan),
scores=resolved_results.scores.filled(np.nan),
)
@classmethod
def from_shuffled(cls, shuffled_results: ShuffledResults):
return _FinalResults(
theta=np.full_like(shuffled_results.theta[:, 0], np.nan),
skeletons=np.full_like(shuffled_results.scores[:, 0], np.nan),
scores=np.full_like(shuffled_results.skeletons[:, 0], np.nan),
)
def _make_continuous_partitions(
shuffled_results: ShuffledResults, score_threshold: float, frame_rate: float
) -> _PartitionedResults:
time_window = max(1, int(frame_rate * CONTINOUS_SEGMENT_TIME_WINDOW_SEC))
min_segment_size = max(1, int(frame_rate * MIN_SEGMENT_SIZE_SEC))
partitioned_results = _PartitionedResults(shuffled_results)
# discard low score frames early (use the maximum value of both scores for now)
good_score_frames = np.where(ma.greater_equal(ma.max(shuffled_results.scores, axis=1), score_threshold))[0]
for frame_index in good_score_frames:
prev_theta = partitioned_results.theta[frame_index - min(time_window, frame_index) : frame_index, 0]
# if there is a big gap > time_window we start a new partition, with a random value (0)
if np.all(np.any(prev_theta.mask, axis=1)):
partitioned_results.set_partition(frame_index=frame_index, partition=0, new_partition=True)
# otherwise we look in the time_window close past the closest non nan frame see if we can continue the
# partition as long as the values stay continuous
else:
last_valid_index = np.where(~np.any(prev_theta.mask, axis=1))[0][-1]
dists = [
angle_distance(
shuffled_results.theta[frame_index, k, :],
prev_theta[last_valid_index],
)
for k in range(2)
]
partition = int(np.argmin(dists))
if dists[partition] < CONTINUOUS_ANGLES_DIST_THRESHOLD:
partitioned_results.set_partition(frame_index=frame_index, partition=partition)
# discard short segments
for cur_partition_indices in partitioned_results.get_segments():
if len(cur_partition_indices) < min_segment_size:
partitioned_results.mask(cur_partition_indices)
return partitioned_results
def _align_segments_with_labels(segments, partitioned_skeletons, labelled_skeletons, min_labelled=5):
"""
Match the head/tail alignment with the results of the classical tracking in each of the segments,
if there is enough labelled data in the segment
"""
segments_alignment = ma.masked_all((len(segments),), dtype=np.uint8)
for segment_index, segment in enumerate(segments):
segment_skeletons = labelled_skeletons[segment]
non_nan_labelled = np.any(~np.isnan(segment_skeletons), axis=(1, 2))
labels_count = np.sum(non_nan_labelled)
non_masked = ~np.any(partitioned_skeletons[segment].mask, axis=(1, 2, 3))
to_compare = np.logical_and(non_nan_labelled, non_masked)
similarity_scores = []
for label_skel, partitioned_skeleton in zip(
segment_skeletons[to_compare], partitioned_skeletons[segment][to_compare]
):
dists = [skeleton_distance(label_skel, x) for x in partitioned_skeleton]
similarity_scores.append(dists)
if len(similarity_scores) > 0:
mean_similarity_scores = np.mean(similarity_scores, axis=0)
if mean_similarity_scores[0] * mean_similarity_scores[1] < 0 and labels_count > min_labelled:
segments_alignment[segment_index] = np.argmax(mean_similarity_scores)
return segments_alignment
def _calculate_smallest_gap_to_adjacent(segment_index, segments, segments_alignment):
# evaluate how far away this segment is from known values
score = np.nan
segment_offset = np.nan
if segment_index - 1 >= 0 and not segments_alignment.mask[segment_index - 1]:
gap = segments[segment_index][0] - segments[segment_index - 1][-1]
score = gap
segment_offset = -1
if segment_index + 1 < len(segments_alignment) and not segments_alignment.mask[segment_index + 1]:
gap = segments[segment_index + 1][0] - segments[segment_index][-1]
if np.isnan(score) or gap < score:
score = gap
segment_offset = 1
return score, segment_offset
def _align_unlabelled_segments_with_adjacents(segments, segments_alignment, partitioned_skeletons, frame_rate: float):
"""
Resolve the unaligned segments by comparing with adjacent segments,
starting with the segments that have the least frames gap between an adjacent trusted segment
Don't align isolated segments which a big gap between trusted segments
"""
maximum_gap_allowed = max(1, int(frame_rate * MAXIMUM_GAP_ALLOWED_WITH_ADJACENT_SEGMENT_SEC))
# ensure that if no segments have been aligned at all, pick one solution randomly to start
if np.all(segments_alignment.mask):
logger.info("There are no trusted segments with head decision to resolve the whole video, stopping analysis.")
return segments_alignment
# fix in priority the segments with known adjacent frames with little gap
# until all segments are aligned except the isolated ones (further than maximum_gap_allowed)
unaligned = np.where(segments_alignment.mask)[0]
while len(unaligned) > 0:
# we first pick the best candidate segment to align (there are known frames nearby before or after or both)
all_gaps = [
_calculate_smallest_gap_to_adjacent(
segment_index=x,
segments=segments,
segments_alignment=segments_alignment,
)
for x in unaligned
]
segment_to_fix_index = np.nanargmin(all_gaps, axis=0)[0]
gap_to_adjacent_segment, adjacent_segment_offset = all_gaps[segment_to_fix_index]
# abort if only isolated segments are left
if gap_to_adjacent_segment > maximum_gap_allowed:
break
cur_segment_index = unaligned[segment_to_fix_index]
cur_segment_skeleton = partitioned_skeletons[segments[cur_segment_index]]
adjacent_segment_index = cur_segment_index + adjacent_segment_offset
adjacent_alignment = segments_alignment[adjacent_segment_index]
adjacent_segment = segments[adjacent_segment_index]
adjacent_segment_skeleton = partitioned_skeletons[adjacent_segment][:, adjacent_alignment]
if adjacent_segment_offset == -1:
closest_unaligned_skeleton = cur_segment_skeleton[0] # first frame of cur segment
closest_known_skeleton = adjacent_segment_skeleton[-1] # last frame of prev segment
elif adjacent_segment_offset == 1:
closest_unaligned_skeleton = cur_segment_skeleton[-1] # last frame of cur segment
closest_known_skeleton = adjacent_segment_skeleton[0] # first frame of next segment
else:
raise ValueError()
dists = [skeleton_distance(closest_known_skeleton, skel) for skel in closest_unaligned_skeleton]
segments_alignment[cur_segment_index] = int(np.argmax(dists))
unaligned = np.where(segments_alignment.mask)[0]
return segments_alignment
def _init_unified_series(mixed_series):
return ma.masked_all((mixed_series.shape[0],) + mixed_series.shape[2:], dtype=mixed_series.dtype)
def resolve_head_tail(
shuffled_results: ShuffledResults,
original_results: OriginalResults,
frame_rate: float,
score_threshold,
) -> BaseResults:
len_series = len(shuffled_results)
# Create continuous segments without jumps
partitioned_results = _make_continuous_partitions(
score_threshold=score_threshold,
frame_rate=frame_rate,
shuffled_results=shuffled_results,
)
segments = partitioned_results.get_segments()
if len(segments) == 0:
logger.error(
f"Couldn't find any continuous segments of predicted data above the threshold {score_threshold},"
f" stopping analysis."
)
return _FinalResults.from_shuffled(shuffled_results)
# Choose each segment global alignment by comparing with labelled data
segments_alignment = _align_segments_with_labels(
segments, partitioned_results.skeletons, original_results.skeletons
)
# Fix unaligned segments here by comparing skeletons with neighboring segments iteratively
segments_alignment = _align_unlabelled_segments_with_adjacents(
segments, segments_alignment, partitioned_results.skeletons, frame_rate
)
# Compile results
resolved_results = _ResolvedResults(partitioned_results)
for segment, segment_alignment in zip(segments, segments_alignment):
if not ma.is_masked(segment_alignment):
resolved_results.resolve(segment, segment_alignment)
# Filter the final results again by score threshold
low_scores_indices = np.where(ma.masked_less(resolved_results.scores, score_threshold).mask)[0]
resolved_results.mask(low_scores_indices)
num_success = resolved_results.num_valid()
original_num_success = np.any(~np.isnan(original_results.skeletons), axis=(1, 2)).sum()
logger.info(
f"Resolved head/tail, {num_success} out of {len_series} frames analyzed successfully "
f"({float(num_success) / len_series * 100:.1f}%) (original features : {original_num_success}"
f" or {(float(original_num_success) / len_series * 100):.1f}% of total)"
)
if num_success < original_num_success:
logger.warning(f"Original results had {original_num_success - num_success} more successfully analyzed frames!")
return _FinalResults.from_resolved(resolved_results)
|
flexible
|
{
"blob_id": "b8fcd8e6dce8d210576bc4166dd258e5fd51278d",
"index": 517,
"step-1": "<mask token>\n\n\nclass _PartitionedResults(BaseResults):\n <mask token>\n\n def mask(self, indices):\n self.theta.mask[indices] = True\n self.skeletons.mask[indices] = True\n self.scores.mask[indices] = True\n self.partitions.mask[indices] = True\n <mask token>\n <mask token>\n <mask token>\n\n\nclass _ResolvedResults(BaseResults):\n\n def __init__(self, partitioned_results: _PartitionedResults):\n self._partitioned_results = partitioned_results\n theta = _init_unified_series(partitioned_results.theta)\n skeletons = _init_unified_series(partitioned_results.skeletons)\n scores = _init_unified_series(partitioned_results.scores)\n super().__init__(theta=theta, skeletons=skeletons, scores=scores)\n\n def resolve(self, segment, segment_alignment):\n self.scores[segment] = self._partitioned_results.scores[segment][:,\n segment_alignment]\n self.skeletons[segment] = self._partitioned_results.skeletons[segment][\n :, segment_alignment]\n self.theta[segment] = self._partitioned_results.theta[segment][:,\n segment_alignment]\n\n def mask(self, indices):\n self.theta.mask[indices] = True\n self.skeletons.mask[indices] = True\n self.scores.mask[indices] = True\n\n def num_valid(self):\n return np.sum(~self.scores.mask)\n\n\nclass _FinalResults(BaseResults):\n\n @classmethod\n def from_resolved(cls, resolved_results: _ResolvedResults):\n return _FinalResults(theta=resolved_results.theta.filled(np.nan),\n skeletons=resolved_results.skeletons.filled(np.nan), scores=\n resolved_results.scores.filled(np.nan))\n\n @classmethod\n def from_shuffled(cls, shuffled_results: ShuffledResults):\n return _FinalResults(theta=np.full_like(shuffled_results.theta[:, 0\n ], np.nan), skeletons=np.full_like(shuffled_results.scores[:, 0\n ], np.nan), scores=np.full_like(shuffled_results.skeletons[:, 0\n ], np.nan))\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef _init_partitioned_series(shuffled_series: np.ndarray):\n return ma.masked_all_like(shuffled_series)\n\n\n<mask token>\n\n\nclass _PartitionedResults(BaseResults):\n\n def __init__(self, shuffled_results: ShuffledResults):\n self.cur_partition = -1\n self.partitions = ma.masked_all((len(shuffled_results),), dtype=int)\n self._shuffled_results = shuffled_results\n theta = _init_partitioned_series(shuffled_results.theta)\n skeletons = _init_partitioned_series(shuffled_results.skeletons)\n scores = _init_partitioned_series(shuffled_results.scores)\n super().__init__(theta=theta, skeletons=skeletons, scores=scores)\n\n def mask(self, indices):\n self.theta.mask[indices] = True\n self.skeletons.mask[indices] = True\n self.scores.mask[indices] = True\n self.partitions.mask[indices] = True\n\n def set_partition(self, frame_index: int, partition: int, new_partition:\n bool=False):\n if new_partition:\n self.cur_partition += 1\n _set_partition(self.theta, self._shuffled_results.theta,\n frame_index, partition)\n _set_partition(self.skeletons, self._shuffled_results.skeletons,\n frame_index, partition)\n _set_partition(self.scores, self._shuffled_results.scores,\n frame_index, partition)\n self.partitions[frame_index] = self.cur_partition\n\n def _get_partition_indices(self, partition_index: int):\n return np.where(self.partitions == partition_index)[0]\n\n def get_segments(self):\n all_partitions_indexes = np.unique(self.partitions.filled(-1))\n return [self._get_partition_indices(partition_index) for\n partition_index in all_partitions_indexes if partition_index >= 0]\n\n\nclass _ResolvedResults(BaseResults):\n\n def __init__(self, partitioned_results: _PartitionedResults):\n self._partitioned_results = partitioned_results\n theta = _init_unified_series(partitioned_results.theta)\n skeletons = _init_unified_series(partitioned_results.skeletons)\n scores = _init_unified_series(partitioned_results.scores)\n super().__init__(theta=theta, skeletons=skeletons, scores=scores)\n\n def resolve(self, segment, segment_alignment):\n self.scores[segment] = self._partitioned_results.scores[segment][:,\n segment_alignment]\n self.skeletons[segment] = self._partitioned_results.skeletons[segment][\n :, segment_alignment]\n self.theta[segment] = self._partitioned_results.theta[segment][:,\n segment_alignment]\n\n def mask(self, indices):\n self.theta.mask[indices] = True\n self.skeletons.mask[indices] = True\n self.scores.mask[indices] = True\n\n def num_valid(self):\n return np.sum(~self.scores.mask)\n\n\nclass _FinalResults(BaseResults):\n\n @classmethod\n def from_resolved(cls, resolved_results: _ResolvedResults):\n return _FinalResults(theta=resolved_results.theta.filled(np.nan),\n skeletons=resolved_results.skeletons.filled(np.nan), scores=\n resolved_results.scores.filled(np.nan))\n\n @classmethod\n def from_shuffled(cls, shuffled_results: ShuffledResults):\n return _FinalResults(theta=np.full_like(shuffled_results.theta[:, 0\n ], np.nan), skeletons=np.full_like(shuffled_results.scores[:, 0\n ], np.nan), scores=np.full_like(shuffled_results.skeletons[:, 0\n ], np.nan))\n\n\n<mask token>\n\n\ndef _calculate_smallest_gap_to_adjacent(segment_index, segments,\n segments_alignment):\n score = np.nan\n segment_offset = np.nan\n if segment_index - 1 >= 0 and not segments_alignment.mask[segment_index - 1\n ]:\n gap = segments[segment_index][0] - segments[segment_index - 1][-1]\n score = gap\n segment_offset = -1\n if segment_index + 1 < len(segments_alignment\n ) and not segments_alignment.mask[segment_index + 1]:\n gap = segments[segment_index + 1][0] - segments[segment_index][-1]\n if np.isnan(score) or gap < score:\n score = gap\n segment_offset = 1\n return score, segment_offset\n\n\ndef _align_unlabelled_segments_with_adjacents(segments, segments_alignment,\n partitioned_skeletons, frame_rate: float):\n \"\"\"\n Resolve the unaligned segments by comparing with adjacent segments,\n starting with the segments that have the least frames gap between an adjacent trusted segment\n Don't align isolated segments which a big gap between trusted segments\n \"\"\"\n maximum_gap_allowed = max(1, int(frame_rate *\n MAXIMUM_GAP_ALLOWED_WITH_ADJACENT_SEGMENT_SEC))\n if np.all(segments_alignment.mask):\n logger.info(\n 'There are no trusted segments with head decision to resolve the whole video, stopping analysis.'\n )\n return segments_alignment\n unaligned = np.where(segments_alignment.mask)[0]\n while len(unaligned) > 0:\n all_gaps = [_calculate_smallest_gap_to_adjacent(segment_index=x,\n segments=segments, segments_alignment=segments_alignment) for x in\n unaligned]\n segment_to_fix_index = np.nanargmin(all_gaps, axis=0)[0]\n gap_to_adjacent_segment, adjacent_segment_offset = all_gaps[\n segment_to_fix_index]\n if gap_to_adjacent_segment > maximum_gap_allowed:\n break\n cur_segment_index = unaligned[segment_to_fix_index]\n cur_segment_skeleton = partitioned_skeletons[segments[\n cur_segment_index]]\n adjacent_segment_index = cur_segment_index + adjacent_segment_offset\n adjacent_alignment = segments_alignment[adjacent_segment_index]\n adjacent_segment = segments[adjacent_segment_index]\n adjacent_segment_skeleton = partitioned_skeletons[adjacent_segment][\n :, adjacent_alignment]\n if adjacent_segment_offset == -1:\n closest_unaligned_skeleton = cur_segment_skeleton[0]\n closest_known_skeleton = adjacent_segment_skeleton[-1]\n elif adjacent_segment_offset == 1:\n closest_unaligned_skeleton = cur_segment_skeleton[-1]\n closest_known_skeleton = adjacent_segment_skeleton[0]\n else:\n raise ValueError()\n dists = [skeleton_distance(closest_known_skeleton, skel) for skel in\n closest_unaligned_skeleton]\n segments_alignment[cur_segment_index] = int(np.argmax(dists))\n unaligned = np.where(segments_alignment.mask)[0]\n return segments_alignment\n\n\ndef _init_unified_series(mixed_series):\n return ma.masked_all((mixed_series.shape[0],) + mixed_series.shape[2:],\n dtype=mixed_series.dtype)\n\n\ndef resolve_head_tail(shuffled_results: ShuffledResults, original_results:\n OriginalResults, frame_rate: float, score_threshold) ->BaseResults:\n len_series = len(shuffled_results)\n partitioned_results = _make_continuous_partitions(score_threshold=\n score_threshold, frame_rate=frame_rate, shuffled_results=\n shuffled_results)\n segments = partitioned_results.get_segments()\n if len(segments) == 0:\n logger.error(\n f\"Couldn't find any continuous segments of predicted data above the threshold {score_threshold}, stopping analysis.\"\n )\n return _FinalResults.from_shuffled(shuffled_results)\n segments_alignment = _align_segments_with_labels(segments,\n partitioned_results.skeletons, original_results.skeletons)\n segments_alignment = _align_unlabelled_segments_with_adjacents(segments,\n segments_alignment, partitioned_results.skeletons, frame_rate)\n resolved_results = _ResolvedResults(partitioned_results)\n for segment, segment_alignment in zip(segments, segments_alignment):\n if not ma.is_masked(segment_alignment):\n resolved_results.resolve(segment, segment_alignment)\n low_scores_indices = np.where(ma.masked_less(resolved_results.scores,\n score_threshold).mask)[0]\n resolved_results.mask(low_scores_indices)\n num_success = resolved_results.num_valid()\n original_num_success = np.any(~np.isnan(original_results.skeletons),\n axis=(1, 2)).sum()\n logger.info(\n f'Resolved head/tail, {num_success} out of {len_series} frames analyzed successfully ({float(num_success) / len_series * 100:.1f}%) (original features : {original_num_success} or {float(original_num_success) / len_series * 100:.1f}% of total)'\n )\n if num_success < original_num_success:\n logger.warning(\n f'Original results had {original_num_success - num_success} more successfully analyzed frames!'\n )\n return _FinalResults.from_resolved(resolved_results)\n",
"step-3": "<mask token>\n\n\ndef _init_partitioned_series(shuffled_series: np.ndarray):\n return ma.masked_all_like(shuffled_series)\n\n\n<mask token>\n\n\nclass _PartitionedResults(BaseResults):\n\n def __init__(self, shuffled_results: ShuffledResults):\n self.cur_partition = -1\n self.partitions = ma.masked_all((len(shuffled_results),), dtype=int)\n self._shuffled_results = shuffled_results\n theta = _init_partitioned_series(shuffled_results.theta)\n skeletons = _init_partitioned_series(shuffled_results.skeletons)\n scores = _init_partitioned_series(shuffled_results.scores)\n super().__init__(theta=theta, skeletons=skeletons, scores=scores)\n\n def mask(self, indices):\n self.theta.mask[indices] = True\n self.skeletons.mask[indices] = True\n self.scores.mask[indices] = True\n self.partitions.mask[indices] = True\n\n def set_partition(self, frame_index: int, partition: int, new_partition:\n bool=False):\n if new_partition:\n self.cur_partition += 1\n _set_partition(self.theta, self._shuffled_results.theta,\n frame_index, partition)\n _set_partition(self.skeletons, self._shuffled_results.skeletons,\n frame_index, partition)\n _set_partition(self.scores, self._shuffled_results.scores,\n frame_index, partition)\n self.partitions[frame_index] = self.cur_partition\n\n def _get_partition_indices(self, partition_index: int):\n return np.where(self.partitions == partition_index)[0]\n\n def get_segments(self):\n all_partitions_indexes = np.unique(self.partitions.filled(-1))\n return [self._get_partition_indices(partition_index) for\n partition_index in all_partitions_indexes if partition_index >= 0]\n\n\nclass _ResolvedResults(BaseResults):\n\n def __init__(self, partitioned_results: _PartitionedResults):\n self._partitioned_results = partitioned_results\n theta = _init_unified_series(partitioned_results.theta)\n skeletons = _init_unified_series(partitioned_results.skeletons)\n scores = _init_unified_series(partitioned_results.scores)\n super().__init__(theta=theta, skeletons=skeletons, scores=scores)\n\n def resolve(self, segment, segment_alignment):\n self.scores[segment] = self._partitioned_results.scores[segment][:,\n segment_alignment]\n self.skeletons[segment] = self._partitioned_results.skeletons[segment][\n :, segment_alignment]\n self.theta[segment] = self._partitioned_results.theta[segment][:,\n segment_alignment]\n\n def mask(self, indices):\n self.theta.mask[indices] = True\n self.skeletons.mask[indices] = True\n self.scores.mask[indices] = True\n\n def num_valid(self):\n return np.sum(~self.scores.mask)\n\n\nclass _FinalResults(BaseResults):\n\n @classmethod\n def from_resolved(cls, resolved_results: _ResolvedResults):\n return _FinalResults(theta=resolved_results.theta.filled(np.nan),\n skeletons=resolved_results.skeletons.filled(np.nan), scores=\n resolved_results.scores.filled(np.nan))\n\n @classmethod\n def from_shuffled(cls, shuffled_results: ShuffledResults):\n return _FinalResults(theta=np.full_like(shuffled_results.theta[:, 0\n ], np.nan), skeletons=np.full_like(shuffled_results.scores[:, 0\n ], np.nan), scores=np.full_like(shuffled_results.skeletons[:, 0\n ], np.nan))\n\n\n<mask token>\n\n\ndef _align_segments_with_labels(segments, partitioned_skeletons,\n labelled_skeletons, min_labelled=5):\n \"\"\"\n Match the head/tail alignment with the results of the classical tracking in each of the segments,\n if there is enough labelled data in the segment\n \"\"\"\n segments_alignment = ma.masked_all((len(segments),), dtype=np.uint8)\n for segment_index, segment in enumerate(segments):\n segment_skeletons = labelled_skeletons[segment]\n non_nan_labelled = np.any(~np.isnan(segment_skeletons), axis=(1, 2))\n labels_count = np.sum(non_nan_labelled)\n non_masked = ~np.any(partitioned_skeletons[segment].mask, axis=(1, \n 2, 3))\n to_compare = np.logical_and(non_nan_labelled, non_masked)\n similarity_scores = []\n for label_skel, partitioned_skeleton in zip(segment_skeletons[\n to_compare], partitioned_skeletons[segment][to_compare]):\n dists = [skeleton_distance(label_skel, x) for x in\n partitioned_skeleton]\n similarity_scores.append(dists)\n if len(similarity_scores) > 0:\n mean_similarity_scores = np.mean(similarity_scores, axis=0)\n if mean_similarity_scores[0] * mean_similarity_scores[1\n ] < 0 and labels_count > min_labelled:\n segments_alignment[segment_index] = np.argmax(\n mean_similarity_scores)\n return segments_alignment\n\n\ndef _calculate_smallest_gap_to_adjacent(segment_index, segments,\n segments_alignment):\n score = np.nan\n segment_offset = np.nan\n if segment_index - 1 >= 0 and not segments_alignment.mask[segment_index - 1\n ]:\n gap = segments[segment_index][0] - segments[segment_index - 1][-1]\n score = gap\n segment_offset = -1\n if segment_index + 1 < len(segments_alignment\n ) and not segments_alignment.mask[segment_index + 1]:\n gap = segments[segment_index + 1][0] - segments[segment_index][-1]\n if np.isnan(score) or gap < score:\n score = gap\n segment_offset = 1\n return score, segment_offset\n\n\ndef _align_unlabelled_segments_with_adjacents(segments, segments_alignment,\n partitioned_skeletons, frame_rate: float):\n \"\"\"\n Resolve the unaligned segments by comparing with adjacent segments,\n starting with the segments that have the least frames gap between an adjacent trusted segment\n Don't align isolated segments which a big gap between trusted segments\n \"\"\"\n maximum_gap_allowed = max(1, int(frame_rate *\n MAXIMUM_GAP_ALLOWED_WITH_ADJACENT_SEGMENT_SEC))\n if np.all(segments_alignment.mask):\n logger.info(\n 'There are no trusted segments with head decision to resolve the whole video, stopping analysis.'\n )\n return segments_alignment\n unaligned = np.where(segments_alignment.mask)[0]\n while len(unaligned) > 0:\n all_gaps = [_calculate_smallest_gap_to_adjacent(segment_index=x,\n segments=segments, segments_alignment=segments_alignment) for x in\n unaligned]\n segment_to_fix_index = np.nanargmin(all_gaps, axis=0)[0]\n gap_to_adjacent_segment, adjacent_segment_offset = all_gaps[\n segment_to_fix_index]\n if gap_to_adjacent_segment > maximum_gap_allowed:\n break\n cur_segment_index = unaligned[segment_to_fix_index]\n cur_segment_skeleton = partitioned_skeletons[segments[\n cur_segment_index]]\n adjacent_segment_index = cur_segment_index + adjacent_segment_offset\n adjacent_alignment = segments_alignment[adjacent_segment_index]\n adjacent_segment = segments[adjacent_segment_index]\n adjacent_segment_skeleton = partitioned_skeletons[adjacent_segment][\n :, adjacent_alignment]\n if adjacent_segment_offset == -1:\n closest_unaligned_skeleton = cur_segment_skeleton[0]\n closest_known_skeleton = adjacent_segment_skeleton[-1]\n elif adjacent_segment_offset == 1:\n closest_unaligned_skeleton = cur_segment_skeleton[-1]\n closest_known_skeleton = adjacent_segment_skeleton[0]\n else:\n raise ValueError()\n dists = [skeleton_distance(closest_known_skeleton, skel) for skel in\n closest_unaligned_skeleton]\n segments_alignment[cur_segment_index] = int(np.argmax(dists))\n unaligned = np.where(segments_alignment.mask)[0]\n return segments_alignment\n\n\ndef _init_unified_series(mixed_series):\n return ma.masked_all((mixed_series.shape[0],) + mixed_series.shape[2:],\n dtype=mixed_series.dtype)\n\n\ndef resolve_head_tail(shuffled_results: ShuffledResults, original_results:\n OriginalResults, frame_rate: float, score_threshold) ->BaseResults:\n len_series = len(shuffled_results)\n partitioned_results = _make_continuous_partitions(score_threshold=\n score_threshold, frame_rate=frame_rate, shuffled_results=\n shuffled_results)\n segments = partitioned_results.get_segments()\n if len(segments) == 0:\n logger.error(\n f\"Couldn't find any continuous segments of predicted data above the threshold {score_threshold}, stopping analysis.\"\n )\n return _FinalResults.from_shuffled(shuffled_results)\n segments_alignment = _align_segments_with_labels(segments,\n partitioned_results.skeletons, original_results.skeletons)\n segments_alignment = _align_unlabelled_segments_with_adjacents(segments,\n segments_alignment, partitioned_results.skeletons, frame_rate)\n resolved_results = _ResolvedResults(partitioned_results)\n for segment, segment_alignment in zip(segments, segments_alignment):\n if not ma.is_masked(segment_alignment):\n resolved_results.resolve(segment, segment_alignment)\n low_scores_indices = np.where(ma.masked_less(resolved_results.scores,\n score_threshold).mask)[0]\n resolved_results.mask(low_scores_indices)\n num_success = resolved_results.num_valid()\n original_num_success = np.any(~np.isnan(original_results.skeletons),\n axis=(1, 2)).sum()\n logger.info(\n f'Resolved head/tail, {num_success} out of {len_series} frames analyzed successfully ({float(num_success) / len_series * 100:.1f}%) (original features : {original_num_success} or {float(original_num_success) / len_series * 100:.1f}% of total)'\n )\n if num_success < original_num_success:\n logger.warning(\n f'Original results had {original_num_success - num_success} more successfully analyzed frames!'\n )\n return _FinalResults.from_resolved(resolved_results)\n",
"step-4": "<mask token>\nlogging.basicConfig()\nlogger = logging.getLogger(__name__)\nlogger.setLevel(logging.DEBUG)\nCONTINUOUS_ANGLES_DIST_THRESHOLD = np.deg2rad(30)\nCONTINOUS_SEGMENT_TIME_WINDOW_SEC = 0.2\nMIN_SEGMENT_SIZE_SEC = 0.2\nMAXIMUM_GAP_ALLOWED_WITH_ADJACENT_SEGMENT_SEC = 1\n\n\ndef _init_partitioned_series(shuffled_series: np.ndarray):\n return ma.masked_all_like(shuffled_series)\n\n\ndef _set_partition(partitioned_series, shuffled_series, frame_index: int,\n partition: int):\n partitioned_series[frame_index][0] = shuffled_series[frame_index, partition\n ]\n partitioned_series[frame_index][1] = shuffled_series[frame_index, 1 -\n partition]\n\n\nclass _PartitionedResults(BaseResults):\n\n def __init__(self, shuffled_results: ShuffledResults):\n self.cur_partition = -1\n self.partitions = ma.masked_all((len(shuffled_results),), dtype=int)\n self._shuffled_results = shuffled_results\n theta = _init_partitioned_series(shuffled_results.theta)\n skeletons = _init_partitioned_series(shuffled_results.skeletons)\n scores = _init_partitioned_series(shuffled_results.scores)\n super().__init__(theta=theta, skeletons=skeletons, scores=scores)\n\n def mask(self, indices):\n self.theta.mask[indices] = True\n self.skeletons.mask[indices] = True\n self.scores.mask[indices] = True\n self.partitions.mask[indices] = True\n\n def set_partition(self, frame_index: int, partition: int, new_partition:\n bool=False):\n if new_partition:\n self.cur_partition += 1\n _set_partition(self.theta, self._shuffled_results.theta,\n frame_index, partition)\n _set_partition(self.skeletons, self._shuffled_results.skeletons,\n frame_index, partition)\n _set_partition(self.scores, self._shuffled_results.scores,\n frame_index, partition)\n self.partitions[frame_index] = self.cur_partition\n\n def _get_partition_indices(self, partition_index: int):\n return np.where(self.partitions == partition_index)[0]\n\n def get_segments(self):\n all_partitions_indexes = np.unique(self.partitions.filled(-1))\n return [self._get_partition_indices(partition_index) for\n partition_index in all_partitions_indexes if partition_index >= 0]\n\n\nclass _ResolvedResults(BaseResults):\n\n def __init__(self, partitioned_results: _PartitionedResults):\n self._partitioned_results = partitioned_results\n theta = _init_unified_series(partitioned_results.theta)\n skeletons = _init_unified_series(partitioned_results.skeletons)\n scores = _init_unified_series(partitioned_results.scores)\n super().__init__(theta=theta, skeletons=skeletons, scores=scores)\n\n def resolve(self, segment, segment_alignment):\n self.scores[segment] = self._partitioned_results.scores[segment][:,\n segment_alignment]\n self.skeletons[segment] = self._partitioned_results.skeletons[segment][\n :, segment_alignment]\n self.theta[segment] = self._partitioned_results.theta[segment][:,\n segment_alignment]\n\n def mask(self, indices):\n self.theta.mask[indices] = True\n self.skeletons.mask[indices] = True\n self.scores.mask[indices] = True\n\n def num_valid(self):\n return np.sum(~self.scores.mask)\n\n\nclass _FinalResults(BaseResults):\n\n @classmethod\n def from_resolved(cls, resolved_results: _ResolvedResults):\n return _FinalResults(theta=resolved_results.theta.filled(np.nan),\n skeletons=resolved_results.skeletons.filled(np.nan), scores=\n resolved_results.scores.filled(np.nan))\n\n @classmethod\n def from_shuffled(cls, shuffled_results: ShuffledResults):\n return _FinalResults(theta=np.full_like(shuffled_results.theta[:, 0\n ], np.nan), skeletons=np.full_like(shuffled_results.scores[:, 0\n ], np.nan), scores=np.full_like(shuffled_results.skeletons[:, 0\n ], np.nan))\n\n\ndef _make_continuous_partitions(shuffled_results: ShuffledResults,\n score_threshold: float, frame_rate: float) ->_PartitionedResults:\n time_window = max(1, int(frame_rate * CONTINOUS_SEGMENT_TIME_WINDOW_SEC))\n min_segment_size = max(1, int(frame_rate * MIN_SEGMENT_SIZE_SEC))\n partitioned_results = _PartitionedResults(shuffled_results)\n good_score_frames = np.where(ma.greater_equal(ma.max(shuffled_results.\n scores, axis=1), score_threshold))[0]\n for frame_index in good_score_frames:\n prev_theta = partitioned_results.theta[frame_index - min(\n time_window, frame_index):frame_index, 0]\n if np.all(np.any(prev_theta.mask, axis=1)):\n partitioned_results.set_partition(frame_index=frame_index,\n partition=0, new_partition=True)\n else:\n last_valid_index = np.where(~np.any(prev_theta.mask, axis=1))[0][-1\n ]\n dists = [angle_distance(shuffled_results.theta[frame_index, k,\n :], prev_theta[last_valid_index]) for k in range(2)]\n partition = int(np.argmin(dists))\n if dists[partition] < CONTINUOUS_ANGLES_DIST_THRESHOLD:\n partitioned_results.set_partition(frame_index=frame_index,\n partition=partition)\n for cur_partition_indices in partitioned_results.get_segments():\n if len(cur_partition_indices) < min_segment_size:\n partitioned_results.mask(cur_partition_indices)\n return partitioned_results\n\n\ndef _align_segments_with_labels(segments, partitioned_skeletons,\n labelled_skeletons, min_labelled=5):\n \"\"\"\n Match the head/tail alignment with the results of the classical tracking in each of the segments,\n if there is enough labelled data in the segment\n \"\"\"\n segments_alignment = ma.masked_all((len(segments),), dtype=np.uint8)\n for segment_index, segment in enumerate(segments):\n segment_skeletons = labelled_skeletons[segment]\n non_nan_labelled = np.any(~np.isnan(segment_skeletons), axis=(1, 2))\n labels_count = np.sum(non_nan_labelled)\n non_masked = ~np.any(partitioned_skeletons[segment].mask, axis=(1, \n 2, 3))\n to_compare = np.logical_and(non_nan_labelled, non_masked)\n similarity_scores = []\n for label_skel, partitioned_skeleton in zip(segment_skeletons[\n to_compare], partitioned_skeletons[segment][to_compare]):\n dists = [skeleton_distance(label_skel, x) for x in\n partitioned_skeleton]\n similarity_scores.append(dists)\n if len(similarity_scores) > 0:\n mean_similarity_scores = np.mean(similarity_scores, axis=0)\n if mean_similarity_scores[0] * mean_similarity_scores[1\n ] < 0 and labels_count > min_labelled:\n segments_alignment[segment_index] = np.argmax(\n mean_similarity_scores)\n return segments_alignment\n\n\ndef _calculate_smallest_gap_to_adjacent(segment_index, segments,\n segments_alignment):\n score = np.nan\n segment_offset = np.nan\n if segment_index - 1 >= 0 and not segments_alignment.mask[segment_index - 1\n ]:\n gap = segments[segment_index][0] - segments[segment_index - 1][-1]\n score = gap\n segment_offset = -1\n if segment_index + 1 < len(segments_alignment\n ) and not segments_alignment.mask[segment_index + 1]:\n gap = segments[segment_index + 1][0] - segments[segment_index][-1]\n if np.isnan(score) or gap < score:\n score = gap\n segment_offset = 1\n return score, segment_offset\n\n\ndef _align_unlabelled_segments_with_adjacents(segments, segments_alignment,\n partitioned_skeletons, frame_rate: float):\n \"\"\"\n Resolve the unaligned segments by comparing with adjacent segments,\n starting with the segments that have the least frames gap between an adjacent trusted segment\n Don't align isolated segments which a big gap between trusted segments\n \"\"\"\n maximum_gap_allowed = max(1, int(frame_rate *\n MAXIMUM_GAP_ALLOWED_WITH_ADJACENT_SEGMENT_SEC))\n if np.all(segments_alignment.mask):\n logger.info(\n 'There are no trusted segments with head decision to resolve the whole video, stopping analysis.'\n )\n return segments_alignment\n unaligned = np.where(segments_alignment.mask)[0]\n while len(unaligned) > 0:\n all_gaps = [_calculate_smallest_gap_to_adjacent(segment_index=x,\n segments=segments, segments_alignment=segments_alignment) for x in\n unaligned]\n segment_to_fix_index = np.nanargmin(all_gaps, axis=0)[0]\n gap_to_adjacent_segment, adjacent_segment_offset = all_gaps[\n segment_to_fix_index]\n if gap_to_adjacent_segment > maximum_gap_allowed:\n break\n cur_segment_index = unaligned[segment_to_fix_index]\n cur_segment_skeleton = partitioned_skeletons[segments[\n cur_segment_index]]\n adjacent_segment_index = cur_segment_index + adjacent_segment_offset\n adjacent_alignment = segments_alignment[adjacent_segment_index]\n adjacent_segment = segments[adjacent_segment_index]\n adjacent_segment_skeleton = partitioned_skeletons[adjacent_segment][\n :, adjacent_alignment]\n if adjacent_segment_offset == -1:\n closest_unaligned_skeleton = cur_segment_skeleton[0]\n closest_known_skeleton = adjacent_segment_skeleton[-1]\n elif adjacent_segment_offset == 1:\n closest_unaligned_skeleton = cur_segment_skeleton[-1]\n closest_known_skeleton = adjacent_segment_skeleton[0]\n else:\n raise ValueError()\n dists = [skeleton_distance(closest_known_skeleton, skel) for skel in\n closest_unaligned_skeleton]\n segments_alignment[cur_segment_index] = int(np.argmax(dists))\n unaligned = np.where(segments_alignment.mask)[0]\n return segments_alignment\n\n\ndef _init_unified_series(mixed_series):\n return ma.masked_all((mixed_series.shape[0],) + mixed_series.shape[2:],\n dtype=mixed_series.dtype)\n\n\ndef resolve_head_tail(shuffled_results: ShuffledResults, original_results:\n OriginalResults, frame_rate: float, score_threshold) ->BaseResults:\n len_series = len(shuffled_results)\n partitioned_results = _make_continuous_partitions(score_threshold=\n score_threshold, frame_rate=frame_rate, shuffled_results=\n shuffled_results)\n segments = partitioned_results.get_segments()\n if len(segments) == 0:\n logger.error(\n f\"Couldn't find any continuous segments of predicted data above the threshold {score_threshold}, stopping analysis.\"\n )\n return _FinalResults.from_shuffled(shuffled_results)\n segments_alignment = _align_segments_with_labels(segments,\n partitioned_results.skeletons, original_results.skeletons)\n segments_alignment = _align_unlabelled_segments_with_adjacents(segments,\n segments_alignment, partitioned_results.skeletons, frame_rate)\n resolved_results = _ResolvedResults(partitioned_results)\n for segment, segment_alignment in zip(segments, segments_alignment):\n if not ma.is_masked(segment_alignment):\n resolved_results.resolve(segment, segment_alignment)\n low_scores_indices = np.where(ma.masked_less(resolved_results.scores,\n score_threshold).mask)[0]\n resolved_results.mask(low_scores_indices)\n num_success = resolved_results.num_valid()\n original_num_success = np.any(~np.isnan(original_results.skeletons),\n axis=(1, 2)).sum()\n logger.info(\n f'Resolved head/tail, {num_success} out of {len_series} frames analyzed successfully ({float(num_success) / len_series * 100:.1f}%) (original features : {original_num_success} or {float(original_num_success) / len_series * 100:.1f}% of total)'\n )\n if num_success < original_num_success:\n logger.warning(\n f'Original results had {original_num_success - num_success} more successfully analyzed frames!'\n )\n return _FinalResults.from_resolved(resolved_results)\n",
"step-5": "\"\"\"\nThis module contains the logic to resolve the head-tail orientation of a predicted video time series.\n\"\"\"\n\nimport logging\n\nimport numpy as np\nimport numpy.ma as ma\n\nfrom wormpose.pose.distance_metrics import angle_distance, skeleton_distance\nfrom wormpose.pose.results_datatypes import (\n BaseResults,\n ShuffledResults,\n OriginalResults,\n)\n\nlogging.basicConfig()\nlogger = logging.getLogger(__name__)\nlogger.setLevel(logging.DEBUG)\n\n# threshold to compare neighbor frames theta, to be considered continuous and belong to the same segment\nCONTINUOUS_ANGLES_DIST_THRESHOLD = np.deg2rad(30)\n\n# we consider frames to be part of the same segment if they are maximum this amount of seconds apart\n# (and satisfy the distance threshold)\nCONTINOUS_SEGMENT_TIME_WINDOW_SEC = 0.2\n\n# discard too small segments less than this amount of seconds\nMIN_SEGMENT_SIZE_SEC = 0.2\n\n# don't align isolated segments that are more than this amount of seconds apart from aligned segments\nMAXIMUM_GAP_ALLOWED_WITH_ADJACENT_SEGMENT_SEC = 1\n\n\ndef _init_partitioned_series(shuffled_series: np.ndarray):\n return ma.masked_all_like(shuffled_series)\n\n\ndef _set_partition(partitioned_series, shuffled_series, frame_index: int, partition: int):\n partitioned_series[frame_index][0] = shuffled_series[frame_index, partition]\n partitioned_series[frame_index][1] = shuffled_series[frame_index, 1 - partition]\n\n\nclass _PartitionedResults(BaseResults):\n def __init__(self, shuffled_results: ShuffledResults):\n\n self.cur_partition = -1\n self.partitions = ma.masked_all((len(shuffled_results),), dtype=int)\n self._shuffled_results = shuffled_results\n\n theta = _init_partitioned_series(shuffled_results.theta)\n skeletons = _init_partitioned_series(shuffled_results.skeletons)\n scores = _init_partitioned_series(shuffled_results.scores)\n super().__init__(theta=theta, skeletons=skeletons, scores=scores)\n\n def mask(self, indices):\n self.theta.mask[indices] = True\n self.skeletons.mask[indices] = True\n self.scores.mask[indices] = True\n self.partitions.mask[indices] = True\n\n def set_partition(self, frame_index: int, partition: int, new_partition: bool = False):\n if new_partition:\n self.cur_partition += 1\n\n _set_partition(self.theta, self._shuffled_results.theta, frame_index, partition)\n _set_partition(self.skeletons, self._shuffled_results.skeletons, frame_index, partition)\n _set_partition(self.scores, self._shuffled_results.scores, frame_index, partition)\n self.partitions[frame_index] = self.cur_partition\n\n def _get_partition_indices(self, partition_index: int):\n return np.where(self.partitions == partition_index)[0]\n\n def get_segments(self):\n all_partitions_indexes = np.unique(self.partitions.filled(-1))\n return [\n self._get_partition_indices(partition_index)\n for partition_index in all_partitions_indexes\n if partition_index >= 0\n ]\n\n\nclass _ResolvedResults(BaseResults):\n def __init__(self, partitioned_results: _PartitionedResults):\n self._partitioned_results = partitioned_results\n theta = _init_unified_series(partitioned_results.theta)\n skeletons = _init_unified_series(partitioned_results.skeletons)\n scores = _init_unified_series(partitioned_results.scores)\n super().__init__(theta=theta, skeletons=skeletons, scores=scores)\n\n def resolve(self, segment, segment_alignment):\n self.scores[segment] = self._partitioned_results.scores[segment][:, segment_alignment]\n self.skeletons[segment] = self._partitioned_results.skeletons[segment][:, segment_alignment]\n self.theta[segment] = self._partitioned_results.theta[segment][:, segment_alignment]\n\n def mask(self, indices):\n self.theta.mask[indices] = True\n self.skeletons.mask[indices] = True\n self.scores.mask[indices] = True\n\n def num_valid(self):\n return np.sum(~self.scores.mask)\n\n\nclass _FinalResults(BaseResults):\n @classmethod\n def from_resolved(cls, resolved_results: _ResolvedResults):\n return _FinalResults(\n theta=resolved_results.theta.filled(np.nan),\n skeletons=resolved_results.skeletons.filled(np.nan),\n scores=resolved_results.scores.filled(np.nan),\n )\n\n @classmethod\n def from_shuffled(cls, shuffled_results: ShuffledResults):\n return _FinalResults(\n theta=np.full_like(shuffled_results.theta[:, 0], np.nan),\n skeletons=np.full_like(shuffled_results.scores[:, 0], np.nan),\n scores=np.full_like(shuffled_results.skeletons[:, 0], np.nan),\n )\n\n\ndef _make_continuous_partitions(\n shuffled_results: ShuffledResults, score_threshold: float, frame_rate: float\n) -> _PartitionedResults:\n time_window = max(1, int(frame_rate * CONTINOUS_SEGMENT_TIME_WINDOW_SEC))\n min_segment_size = max(1, int(frame_rate * MIN_SEGMENT_SIZE_SEC))\n\n partitioned_results = _PartitionedResults(shuffled_results)\n\n # discard low score frames early (use the maximum value of both scores for now)\n good_score_frames = np.where(ma.greater_equal(ma.max(shuffled_results.scores, axis=1), score_threshold))[0]\n\n for frame_index in good_score_frames:\n\n prev_theta = partitioned_results.theta[frame_index - min(time_window, frame_index) : frame_index, 0]\n\n # if there is a big gap > time_window we start a new partition, with a random value (0)\n if np.all(np.any(prev_theta.mask, axis=1)):\n partitioned_results.set_partition(frame_index=frame_index, partition=0, new_partition=True)\n # otherwise we look in the time_window close past the closest non nan frame see if we can continue the\n # partition as long as the values stay continuous\n else:\n last_valid_index = np.where(~np.any(prev_theta.mask, axis=1))[0][-1]\n dists = [\n angle_distance(\n shuffled_results.theta[frame_index, k, :],\n prev_theta[last_valid_index],\n )\n for k in range(2)\n ]\n partition = int(np.argmin(dists))\n if dists[partition] < CONTINUOUS_ANGLES_DIST_THRESHOLD:\n partitioned_results.set_partition(frame_index=frame_index, partition=partition)\n\n # discard short segments\n for cur_partition_indices in partitioned_results.get_segments():\n if len(cur_partition_indices) < min_segment_size:\n partitioned_results.mask(cur_partition_indices)\n\n return partitioned_results\n\n\ndef _align_segments_with_labels(segments, partitioned_skeletons, labelled_skeletons, min_labelled=5):\n \"\"\"\n Match the head/tail alignment with the results of the classical tracking in each of the segments,\n if there is enough labelled data in the segment\n \"\"\"\n segments_alignment = ma.masked_all((len(segments),), dtype=np.uint8)\n for segment_index, segment in enumerate(segments):\n segment_skeletons = labelled_skeletons[segment]\n non_nan_labelled = np.any(~np.isnan(segment_skeletons), axis=(1, 2))\n labels_count = np.sum(non_nan_labelled)\n non_masked = ~np.any(partitioned_skeletons[segment].mask, axis=(1, 2, 3))\n to_compare = np.logical_and(non_nan_labelled, non_masked)\n\n similarity_scores = []\n for label_skel, partitioned_skeleton in zip(\n segment_skeletons[to_compare], partitioned_skeletons[segment][to_compare]\n ):\n dists = [skeleton_distance(label_skel, x) for x in partitioned_skeleton]\n similarity_scores.append(dists)\n\n if len(similarity_scores) > 0:\n mean_similarity_scores = np.mean(similarity_scores, axis=0)\n if mean_similarity_scores[0] * mean_similarity_scores[1] < 0 and labels_count > min_labelled:\n segments_alignment[segment_index] = np.argmax(mean_similarity_scores)\n\n return segments_alignment\n\n\ndef _calculate_smallest_gap_to_adjacent(segment_index, segments, segments_alignment):\n # evaluate how far away this segment is from known values\n score = np.nan\n segment_offset = np.nan\n if segment_index - 1 >= 0 and not segments_alignment.mask[segment_index - 1]:\n gap = segments[segment_index][0] - segments[segment_index - 1][-1]\n score = gap\n segment_offset = -1\n if segment_index + 1 < len(segments_alignment) and not segments_alignment.mask[segment_index + 1]:\n gap = segments[segment_index + 1][0] - segments[segment_index][-1]\n if np.isnan(score) or gap < score:\n score = gap\n segment_offset = 1\n\n return score, segment_offset\n\n\ndef _align_unlabelled_segments_with_adjacents(segments, segments_alignment, partitioned_skeletons, frame_rate: float):\n \"\"\"\n Resolve the unaligned segments by comparing with adjacent segments,\n starting with the segments that have the least frames gap between an adjacent trusted segment\n Don't align isolated segments which a big gap between trusted segments\n \"\"\"\n maximum_gap_allowed = max(1, int(frame_rate * MAXIMUM_GAP_ALLOWED_WITH_ADJACENT_SEGMENT_SEC))\n # ensure that if no segments have been aligned at all, pick one solution randomly to start\n if np.all(segments_alignment.mask):\n logger.info(\"There are no trusted segments with head decision to resolve the whole video, stopping analysis.\")\n return segments_alignment\n\n # fix in priority the segments with known adjacent frames with little gap\n # until all segments are aligned except the isolated ones (further than maximum_gap_allowed)\n unaligned = np.where(segments_alignment.mask)[0]\n while len(unaligned) > 0:\n # we first pick the best candidate segment to align (there are known frames nearby before or after or both)\n all_gaps = [\n _calculate_smallest_gap_to_adjacent(\n segment_index=x,\n segments=segments,\n segments_alignment=segments_alignment,\n )\n for x in unaligned\n ]\n segment_to_fix_index = np.nanargmin(all_gaps, axis=0)[0]\n gap_to_adjacent_segment, adjacent_segment_offset = all_gaps[segment_to_fix_index]\n\n # abort if only isolated segments are left\n if gap_to_adjacent_segment > maximum_gap_allowed:\n break\n\n cur_segment_index = unaligned[segment_to_fix_index]\n cur_segment_skeleton = partitioned_skeletons[segments[cur_segment_index]]\n\n adjacent_segment_index = cur_segment_index + adjacent_segment_offset\n adjacent_alignment = segments_alignment[adjacent_segment_index]\n adjacent_segment = segments[adjacent_segment_index]\n adjacent_segment_skeleton = partitioned_skeletons[adjacent_segment][:, adjacent_alignment]\n\n if adjacent_segment_offset == -1:\n closest_unaligned_skeleton = cur_segment_skeleton[0] # first frame of cur segment\n closest_known_skeleton = adjacent_segment_skeleton[-1] # last frame of prev segment\n elif adjacent_segment_offset == 1:\n closest_unaligned_skeleton = cur_segment_skeleton[-1] # last frame of cur segment\n closest_known_skeleton = adjacent_segment_skeleton[0] # first frame of next segment\n else:\n raise ValueError()\n\n dists = [skeleton_distance(closest_known_skeleton, skel) for skel in closest_unaligned_skeleton]\n segments_alignment[cur_segment_index] = int(np.argmax(dists))\n\n unaligned = np.where(segments_alignment.mask)[0]\n\n return segments_alignment\n\n\ndef _init_unified_series(mixed_series):\n return ma.masked_all((mixed_series.shape[0],) + mixed_series.shape[2:], dtype=mixed_series.dtype)\n\n\ndef resolve_head_tail(\n shuffled_results: ShuffledResults,\n original_results: OriginalResults,\n frame_rate: float,\n score_threshold,\n) -> BaseResults:\n len_series = len(shuffled_results)\n\n # Create continuous segments without jumps\n partitioned_results = _make_continuous_partitions(\n score_threshold=score_threshold,\n frame_rate=frame_rate,\n shuffled_results=shuffled_results,\n )\n segments = partitioned_results.get_segments()\n\n if len(segments) == 0:\n logger.error(\n f\"Couldn't find any continuous segments of predicted data above the threshold {score_threshold},\"\n f\" stopping analysis.\"\n )\n return _FinalResults.from_shuffled(shuffled_results)\n\n # Choose each segment global alignment by comparing with labelled data\n segments_alignment = _align_segments_with_labels(\n segments, partitioned_results.skeletons, original_results.skeletons\n )\n\n # Fix unaligned segments here by comparing skeletons with neighboring segments iteratively\n segments_alignment = _align_unlabelled_segments_with_adjacents(\n segments, segments_alignment, partitioned_results.skeletons, frame_rate\n )\n\n # Compile results\n resolved_results = _ResolvedResults(partitioned_results)\n for segment, segment_alignment in zip(segments, segments_alignment):\n if not ma.is_masked(segment_alignment):\n resolved_results.resolve(segment, segment_alignment)\n\n # Filter the final results again by score threshold\n low_scores_indices = np.where(ma.masked_less(resolved_results.scores, score_threshold).mask)[0]\n resolved_results.mask(low_scores_indices)\n\n num_success = resolved_results.num_valid()\n original_num_success = np.any(~np.isnan(original_results.skeletons), axis=(1, 2)).sum()\n logger.info(\n f\"Resolved head/tail, {num_success} out of {len_series} frames analyzed successfully \"\n f\"({float(num_success) / len_series * 100:.1f}%) (original features : {original_num_success}\"\n f\" or {(float(original_num_success) / len_series * 100):.1f}% of total)\"\n )\n if num_success < original_num_success:\n logger.warning(f\"Original results had {original_num_success - num_success} more successfully analyzed frames!\")\n\n return _FinalResults.from_resolved(resolved_results)\n",
"step-ids": [
10,
19,
20,
24,
26
]
}
|
[
10,
19,
20,
24,
26
] |
# Generated by Django 3.0.8 on 2020-08-11 13:43
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('recipe', '0006_recipe_description'),
]
operations = [
migrations.AddField(
model_name='recipe',
name='portions',
field=models.FloatField(default=1),
),
]
|
normal
|
{
"blob_id": "43dc69c66d94d85337c11eb4cfed48d7fdef2074",
"index": 5770,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Migration(migrations.Migration):\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Migration(migrations.Migration):\n dependencies = [('recipe', '0006_recipe_description')]\n operations = [migrations.AddField(model_name='recipe', name='portions',\n field=models.FloatField(default=1))]\n",
"step-4": "from django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n dependencies = [('recipe', '0006_recipe_description')]\n operations = [migrations.AddField(model_name='recipe', name='portions',\n field=models.FloatField(default=1))]\n",
"step-5": "# Generated by Django 3.0.8 on 2020-08-11 13:43\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('recipe', '0006_recipe_description'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='recipe',\n name='portions',\n field=models.FloatField(default=1),\n ),\n ]\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
print('%s Заварушку устроили' % alldude)
<|reserved_special_token_1|>
threehome = 25 * 3
twotonnel = 40 * 2
alldude = threehome + twotonnel
print('%s Заварушку устроили' % alldude)
|
flexible
|
{
"blob_id": "e492680efe57bd36b58c00977ecd79196501997a",
"index": 7952,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint('%s Заварушку устроили' % alldude)\n",
"step-3": "threehome = 25 * 3\ntwotonnel = 40 * 2\nalldude = threehome + twotonnel\nprint('%s Заварушку устроили' % alldude)\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
from django.shortcuts import render
# Create your views here.
from django.shortcuts import redirect
from django.contrib.auth.mixins import LoginRequiredMixin
from django.http import Http404, HttpResponseForbidden
from django.shortcuts import render
from django.urls import reverse
from django.views.generic.edit import FormMixin
from django.contrib.auth.decorators import login_required
from django.views.generic import DetailView, ListView
# from .forms import ComposeForm
# from .models import Thread, ChatMessage
from django.shortcuts import render
import os
import django
os.environ["DJANGO_SETTINGS_MODULE"] = 'arizona.settings'
django.setup()
def index(request):
return render(request, 'canyon/index.html')
def results(request):
return render(request, 'canyon/results.html')
|
normal
|
{
"blob_id": "c385fe2af9aebc9c4a42d4db5a341fcedeec3898",
"index": 3579,
"step-1": "<mask token>\n\n\ndef index(request):\n return render(request, 'canyon/index.html')\n\n\ndef results(request):\n return render(request, 'canyon/results.html')\n",
"step-2": "<mask token>\ndjango.setup()\n\n\ndef index(request):\n return render(request, 'canyon/index.html')\n\n\ndef results(request):\n return render(request, 'canyon/results.html')\n",
"step-3": "<mask token>\nos.environ['DJANGO_SETTINGS_MODULE'] = 'arizona.settings'\ndjango.setup()\n\n\ndef index(request):\n return render(request, 'canyon/index.html')\n\n\ndef results(request):\n return render(request, 'canyon/results.html')\n",
"step-4": "from django.shortcuts import render\nfrom django.shortcuts import redirect\nfrom django.contrib.auth.mixins import LoginRequiredMixin\nfrom django.http import Http404, HttpResponseForbidden\nfrom django.shortcuts import render\nfrom django.urls import reverse\nfrom django.views.generic.edit import FormMixin\nfrom django.contrib.auth.decorators import login_required\nfrom django.views.generic import DetailView, ListView\nfrom django.shortcuts import render\nimport os\nimport django\nos.environ['DJANGO_SETTINGS_MODULE'] = 'arizona.settings'\ndjango.setup()\n\n\ndef index(request):\n return render(request, 'canyon/index.html')\n\n\ndef results(request):\n return render(request, 'canyon/results.html')\n",
"step-5": "from django.shortcuts import render\n\n# Create your views here.\nfrom django.shortcuts import redirect\nfrom django.contrib.auth.mixins import LoginRequiredMixin\nfrom django.http import Http404, HttpResponseForbidden\nfrom django.shortcuts import render\nfrom django.urls import reverse\nfrom django.views.generic.edit import FormMixin\nfrom django.contrib.auth.decorators import login_required\nfrom django.views.generic import DetailView, ListView\n\n# from .forms import ComposeForm\n# from .models import Thread, ChatMessage\n\nfrom django.shortcuts import render\nimport os\nimport django\nos.environ[\"DJANGO_SETTINGS_MODULE\"] = 'arizona.settings'\ndjango.setup()\n\n\ndef index(request):\n return render(request, 'canyon/index.html')\n\n\ndef results(request):\n return render(request, 'canyon/results.html')\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
from zExceptions import Unauthorized
if REQUEST is not None:
raise Unauthorized
portal = context.getPortalObject()
compute_node = context
reference = "TIOCONS-%s-%s" % (compute_node.getReference(), source_reference)
version = "%s" % context.getPortalObject().portal_ids.generateNewId(
id_group=('slap_tioxml_consumption_reference', reference), default=1)
document = portal.consumption_document_module.newContent(
portal_type="Computer Consumption TioXML File",
source_reference=source_reference,
title="%s consumption (%s)" % (compute_node.getReference(), source_reference),
reference=reference,
version=version,
data=consumption_xml,
classification="personal",
publication_section="other",
contributor_value=compute_node,
)
document.submit()
return document.getRelativeUrl()
|
normal
|
{
"blob_id": "6c27f70e820202f6cc4348de3c9198e7b20ec7d9",
"index": 4470,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nif REQUEST is not None:\n raise Unauthorized\n<mask token>\ndocument.submit()\nreturn document.getRelativeUrl()\n",
"step-3": "<mask token>\nif REQUEST is not None:\n raise Unauthorized\nportal = context.getPortalObject()\ncompute_node = context\nreference = 'TIOCONS-%s-%s' % (compute_node.getReference(), source_reference)\nversion = '%s' % context.getPortalObject().portal_ids.generateNewId(id_group\n =('slap_tioxml_consumption_reference', reference), default=1)\ndocument = portal.consumption_document_module.newContent(portal_type=\n 'Computer Consumption TioXML File', source_reference=source_reference,\n title='%s consumption (%s)' % (compute_node.getReference(),\n source_reference), reference=reference, version=version, data=\n consumption_xml, classification='personal', publication_section='other',\n contributor_value=compute_node)\ndocument.submit()\nreturn document.getRelativeUrl()\n",
"step-4": "from zExceptions import Unauthorized\nif REQUEST is not None:\n raise Unauthorized\nportal = context.getPortalObject()\ncompute_node = context\nreference = 'TIOCONS-%s-%s' % (compute_node.getReference(), source_reference)\nversion = '%s' % context.getPortalObject().portal_ids.generateNewId(id_group\n =('slap_tioxml_consumption_reference', reference), default=1)\ndocument = portal.consumption_document_module.newContent(portal_type=\n 'Computer Consumption TioXML File', source_reference=source_reference,\n title='%s consumption (%s)' % (compute_node.getReference(),\n source_reference), reference=reference, version=version, data=\n consumption_xml, classification='personal', publication_section='other',\n contributor_value=compute_node)\ndocument.submit()\nreturn document.getRelativeUrl()\n",
"step-5": "from zExceptions import Unauthorized\nif REQUEST is not None:\n raise Unauthorized\n\nportal = context.getPortalObject()\ncompute_node = context\n\nreference = \"TIOCONS-%s-%s\" % (compute_node.getReference(), source_reference)\nversion = \"%s\" % context.getPortalObject().portal_ids.generateNewId(\n id_group=('slap_tioxml_consumption_reference', reference), default=1)\n\ndocument = portal.consumption_document_module.newContent(\n portal_type=\"Computer Consumption TioXML File\",\n source_reference=source_reference,\n title=\"%s consumption (%s)\" % (compute_node.getReference(), source_reference),\n reference=reference,\n version=version,\n data=consumption_xml,\n classification=\"personal\",\n publication_section=\"other\",\n contributor_value=compute_node,\n)\ndocument.submit()\nreturn document.getRelativeUrl()\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
from django.shortcuts import render
import codecs
import os.path
from django.conf import settings
OFFSET = 20
def show_raw_data(req):
filename = req.GET['file']
lineno = int(req.GET['line'])
from_lineno = max(0, lineno - OFFSET)
to_lineno = (lineno + OFFSET)
ctx = dict()
cur_lineno = 1
lines = []
file_path = os.path.join(settings.BASE_DIR, 'parser/unzip_data/%s' % filename)
with codecs.open(file_path, encoding="windows-1255") as fh:
for line in fh:
if cur_lineno >= from_lineno and cur_lineno <= to_lineno:
lines.append({'lineno': cur_lineno,
'line': line.strip().encode('utf-8', errors='ignore')})
cur_lineno += 1
ctx['lines'] = lines
ctx['filename'] = filename
ctx['lineno'] = lineno
ctx['prev'] = '/raw-data?file=%s&line=%s' % (filename, lineno - OFFSET * 2 - 1)
ctx['next'] = '/raw-data?file=%s&line=%s' % (filename, lineno + OFFSET * 2 + 1)
return render(req, 'data/raw_data.html', ctx)
def show_results_from_to(req):
return render(req, 'data/show_results.html', {'title': 'From To',
'app' : 'FromTo'})
def show_trip(req):
return render(req,'data/show_results.html',{'title' : 'Show Trip',
'app' : 'ShowTrip'})
def show_routes(req):
return render(req,'data/show_results.html',{'title': 'Show Routes',
'app': 'ShowRoutes'})
def route_explorer(req):
return render(req, 'ui/RouteExplorer.html')
|
normal
|
{
"blob_id": "576c28bb32b5e0b2b5a82a33cee73e3080dcf3ab",
"index": 1737,
"step-1": "<mask token>\n\n\ndef show_raw_data(req):\n filename = req.GET['file']\n lineno = int(req.GET['line'])\n from_lineno = max(0, lineno - OFFSET)\n to_lineno = lineno + OFFSET\n ctx = dict()\n cur_lineno = 1\n lines = []\n file_path = os.path.join(settings.BASE_DIR, 'parser/unzip_data/%s' %\n filename)\n with codecs.open(file_path, encoding='windows-1255') as fh:\n for line in fh:\n if cur_lineno >= from_lineno and cur_lineno <= to_lineno:\n lines.append({'lineno': cur_lineno, 'line': line.strip().\n encode('utf-8', errors='ignore')})\n cur_lineno += 1\n ctx['lines'] = lines\n ctx['filename'] = filename\n ctx['lineno'] = lineno\n ctx['prev'] = '/raw-data?file=%s&line=%s' % (filename, lineno - OFFSET *\n 2 - 1)\n ctx['next'] = '/raw-data?file=%s&line=%s' % (filename, lineno + OFFSET *\n 2 + 1)\n return render(req, 'data/raw_data.html', ctx)\n\n\ndef show_results_from_to(req):\n return render(req, 'data/show_results.html', {'title': 'From To', 'app':\n 'FromTo'})\n\n\n<mask token>\n\n\ndef show_routes(req):\n return render(req, 'data/show_results.html', {'title': 'Show Routes',\n 'app': 'ShowRoutes'})\n\n\ndef route_explorer(req):\n return render(req, 'ui/RouteExplorer.html')\n",
"step-2": "<mask token>\n\n\ndef show_raw_data(req):\n filename = req.GET['file']\n lineno = int(req.GET['line'])\n from_lineno = max(0, lineno - OFFSET)\n to_lineno = lineno + OFFSET\n ctx = dict()\n cur_lineno = 1\n lines = []\n file_path = os.path.join(settings.BASE_DIR, 'parser/unzip_data/%s' %\n filename)\n with codecs.open(file_path, encoding='windows-1255') as fh:\n for line in fh:\n if cur_lineno >= from_lineno and cur_lineno <= to_lineno:\n lines.append({'lineno': cur_lineno, 'line': line.strip().\n encode('utf-8', errors='ignore')})\n cur_lineno += 1\n ctx['lines'] = lines\n ctx['filename'] = filename\n ctx['lineno'] = lineno\n ctx['prev'] = '/raw-data?file=%s&line=%s' % (filename, lineno - OFFSET *\n 2 - 1)\n ctx['next'] = '/raw-data?file=%s&line=%s' % (filename, lineno + OFFSET *\n 2 + 1)\n return render(req, 'data/raw_data.html', ctx)\n\n\ndef show_results_from_to(req):\n return render(req, 'data/show_results.html', {'title': 'From To', 'app':\n 'FromTo'})\n\n\ndef show_trip(req):\n return render(req, 'data/show_results.html', {'title': 'Show Trip',\n 'app': 'ShowTrip'})\n\n\ndef show_routes(req):\n return render(req, 'data/show_results.html', {'title': 'Show Routes',\n 'app': 'ShowRoutes'})\n\n\ndef route_explorer(req):\n return render(req, 'ui/RouteExplorer.html')\n",
"step-3": "<mask token>\nOFFSET = 20\n\n\ndef show_raw_data(req):\n filename = req.GET['file']\n lineno = int(req.GET['line'])\n from_lineno = max(0, lineno - OFFSET)\n to_lineno = lineno + OFFSET\n ctx = dict()\n cur_lineno = 1\n lines = []\n file_path = os.path.join(settings.BASE_DIR, 'parser/unzip_data/%s' %\n filename)\n with codecs.open(file_path, encoding='windows-1255') as fh:\n for line in fh:\n if cur_lineno >= from_lineno and cur_lineno <= to_lineno:\n lines.append({'lineno': cur_lineno, 'line': line.strip().\n encode('utf-8', errors='ignore')})\n cur_lineno += 1\n ctx['lines'] = lines\n ctx['filename'] = filename\n ctx['lineno'] = lineno\n ctx['prev'] = '/raw-data?file=%s&line=%s' % (filename, lineno - OFFSET *\n 2 - 1)\n ctx['next'] = '/raw-data?file=%s&line=%s' % (filename, lineno + OFFSET *\n 2 + 1)\n return render(req, 'data/raw_data.html', ctx)\n\n\ndef show_results_from_to(req):\n return render(req, 'data/show_results.html', {'title': 'From To', 'app':\n 'FromTo'})\n\n\ndef show_trip(req):\n return render(req, 'data/show_results.html', {'title': 'Show Trip',\n 'app': 'ShowTrip'})\n\n\ndef show_routes(req):\n return render(req, 'data/show_results.html', {'title': 'Show Routes',\n 'app': 'ShowRoutes'})\n\n\ndef route_explorer(req):\n return render(req, 'ui/RouteExplorer.html')\n",
"step-4": "from django.shortcuts import render\nimport codecs\nimport os.path\nfrom django.conf import settings\nOFFSET = 20\n\n\ndef show_raw_data(req):\n filename = req.GET['file']\n lineno = int(req.GET['line'])\n from_lineno = max(0, lineno - OFFSET)\n to_lineno = lineno + OFFSET\n ctx = dict()\n cur_lineno = 1\n lines = []\n file_path = os.path.join(settings.BASE_DIR, 'parser/unzip_data/%s' %\n filename)\n with codecs.open(file_path, encoding='windows-1255') as fh:\n for line in fh:\n if cur_lineno >= from_lineno and cur_lineno <= to_lineno:\n lines.append({'lineno': cur_lineno, 'line': line.strip().\n encode('utf-8', errors='ignore')})\n cur_lineno += 1\n ctx['lines'] = lines\n ctx['filename'] = filename\n ctx['lineno'] = lineno\n ctx['prev'] = '/raw-data?file=%s&line=%s' % (filename, lineno - OFFSET *\n 2 - 1)\n ctx['next'] = '/raw-data?file=%s&line=%s' % (filename, lineno + OFFSET *\n 2 + 1)\n return render(req, 'data/raw_data.html', ctx)\n\n\ndef show_results_from_to(req):\n return render(req, 'data/show_results.html', {'title': 'From To', 'app':\n 'FromTo'})\n\n\ndef show_trip(req):\n return render(req, 'data/show_results.html', {'title': 'Show Trip',\n 'app': 'ShowTrip'})\n\n\ndef show_routes(req):\n return render(req, 'data/show_results.html', {'title': 'Show Routes',\n 'app': 'ShowRoutes'})\n\n\ndef route_explorer(req):\n return render(req, 'ui/RouteExplorer.html')\n",
"step-5": "from django.shortcuts import render\nimport codecs\nimport os.path\nfrom django.conf import settings\n\nOFFSET = 20\n\ndef show_raw_data(req):\n filename = req.GET['file']\n lineno = int(req.GET['line'])\n from_lineno = max(0, lineno - OFFSET)\n to_lineno = (lineno + OFFSET)\n ctx = dict()\n cur_lineno = 1\n lines = []\n file_path = os.path.join(settings.BASE_DIR, 'parser/unzip_data/%s' % filename)\n with codecs.open(file_path, encoding=\"windows-1255\") as fh:\n for line in fh:\n if cur_lineno >= from_lineno and cur_lineno <= to_lineno:\n lines.append({'lineno': cur_lineno,\n 'line': line.strip().encode('utf-8', errors='ignore')})\n cur_lineno += 1\n ctx['lines'] = lines\n ctx['filename'] = filename\n ctx['lineno'] = lineno\n ctx['prev'] = '/raw-data?file=%s&line=%s' % (filename, lineno - OFFSET * 2 - 1)\n ctx['next'] = '/raw-data?file=%s&line=%s' % (filename, lineno + OFFSET * 2 + 1)\n return render(req, 'data/raw_data.html', ctx)\n\n\ndef show_results_from_to(req):\n return render(req, 'data/show_results.html', {'title': 'From To',\n 'app' : 'FromTo'})\n\ndef show_trip(req):\n return render(req,'data/show_results.html',{'title' : 'Show Trip',\n 'app' : 'ShowTrip'})\n\ndef show_routes(req):\n return render(req,'data/show_results.html',{'title': 'Show Routes',\n 'app': 'ShowRoutes'})\ndef route_explorer(req):\n return render(req, 'ui/RouteExplorer.html')\n",
"step-ids": [
4,
5,
6,
7,
8
]
}
|
[
4,
5,
6,
7,
8
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
if A == 0:
print('Impossivel calcular')
else:
delta = B ** 2 - 4 * A * C
if delta < 0.0:
print('Impossivel calcular')
else:
raiz = delta ** 0.5
r1 = (-B + raiz) / (2 * A)
r2 = (-B - raiz) / (2 * A)
print('R1 = {:.5f}'.format(r1))
print('R2 = {:.5f}'.format(r2))
<|reserved_special_token_1|>
num = input().split()
A = float(num[0])
B = float(num[1])
C = float(num[2])
if A == 0:
print('Impossivel calcular')
else:
delta = B ** 2 - 4 * A * C
if delta < 0.0:
print('Impossivel calcular')
else:
raiz = delta ** 0.5
r1 = (-B + raiz) / (2 * A)
r2 = (-B - raiz) / (2 * A)
print('R1 = {:.5f}'.format(r1))
print('R2 = {:.5f}'.format(r2))
<|reserved_special_token_1|>
# -*- coding: utf-8 -*-
num = input().split()
A = float(num[0])
B = float(num[1])
C = float(num[2])
if A == 0:
print("Impossivel calcular")
else:
delta = B**2 - (4*A*C)
if delta < 0.0:
print("Impossivel calcular")
else:
raiz = delta ** 0.5
r1 = (-B+raiz)/(2*A)
r2 = (-B-raiz)/(2*A)
print("R1 = {:.5f}".format(r1))
print("R2 = {:.5f}".format(r2))
|
flexible
|
{
"blob_id": "f114a86a3c6bea274b01763ce3e8cd5c8aea44a0",
"index": 3115,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nif A == 0:\n print('Impossivel calcular')\nelse:\n delta = B ** 2 - 4 * A * C\n if delta < 0.0:\n print('Impossivel calcular')\n else:\n raiz = delta ** 0.5\n r1 = (-B + raiz) / (2 * A)\n r2 = (-B - raiz) / (2 * A)\n print('R1 = {:.5f}'.format(r1))\n print('R2 = {:.5f}'.format(r2))\n",
"step-3": "num = input().split()\nA = float(num[0])\nB = float(num[1])\nC = float(num[2])\nif A == 0:\n print('Impossivel calcular')\nelse:\n delta = B ** 2 - 4 * A * C\n if delta < 0.0:\n print('Impossivel calcular')\n else:\n raiz = delta ** 0.5\n r1 = (-B + raiz) / (2 * A)\n r2 = (-B - raiz) / (2 * A)\n print('R1 = {:.5f}'.format(r1))\n print('R2 = {:.5f}'.format(r2))\n",
"step-4": "# -*- coding: utf-8 -*-\n\nnum = input().split()\nA = float(num[0])\nB = float(num[1])\nC = float(num[2])\n\nif A == 0:\n print(\"Impossivel calcular\")\nelse:\n delta = B**2 - (4*A*C)\n \n if delta < 0.0:\n print(\"Impossivel calcular\")\n else:\n raiz = delta ** 0.5\n r1 = (-B+raiz)/(2*A)\n r2 = (-B-raiz)/(2*A)\n print(\"R1 = {:.5f}\".format(r1))\n print(\"R2 = {:.5f}\".format(r2))",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
class Tienda:
def __init__(self, nombre_tienda, lista_productos=[]):
self.nombre_tienda = nombre_tienda
self.lista_productos = lista_productos
<|reserved_special_token_0|>
def anhadir_producto(self, producto_nuevo):
self.lista_productos.append(producto_nuevo)
print('# # # # # # # PRODUCTO ANHADIDO # # # # # # #')
producto_nuevo.producto_info()
return self
<|reserved_special_token_0|>
def inflacion(self, porcentaje_incremento):
a = 0
for pro in self.lista_productos:
a += 1
print(f'=================Producto 0{a}:=================')
pro.producto_info()
print('AUMENTA su precio a: ')
pro.actualizar_precio(porcentaje_incremento, True).producto_info()
return self
<|reserved_special_token_0|>
<|reserved_special_token_1|>
class Tienda:
def __init__(self, nombre_tienda, lista_productos=[]):
self.nombre_tienda = nombre_tienda
self.lista_productos = lista_productos
def __str__(self):
return f"""Nombre de la Tienda: {self.nombre_tienda}
Lista de Productos: {self.lista_productos}
"""
def anhadir_producto(self, producto_nuevo):
self.lista_productos.append(producto_nuevo)
print('# # # # # # # PRODUCTO ANHADIDO # # # # # # #')
producto_nuevo.producto_info()
return self
<|reserved_special_token_0|>
def inflacion(self, porcentaje_incremento):
a = 0
for pro in self.lista_productos:
a += 1
print(f'=================Producto 0{a}:=================')
pro.producto_info()
print('AUMENTA su precio a: ')
pro.actualizar_precio(porcentaje_incremento, True).producto_info()
return self
<|reserved_special_token_0|>
<|reserved_special_token_1|>
class Tienda:
def __init__(self, nombre_tienda, lista_productos=[]):
self.nombre_tienda = nombre_tienda
self.lista_productos = lista_productos
def __str__(self):
return f"""Nombre de la Tienda: {self.nombre_tienda}
Lista de Productos: {self.lista_productos}
"""
def anhadir_producto(self, producto_nuevo):
self.lista_productos.append(producto_nuevo)
print('# # # # # # # PRODUCTO ANHADIDO # # # # # # #')
producto_nuevo.producto_info()
return self
def vender_producto(self, id):
print('\n# # # # # # # PRODUCTO VENDIDO # # # # # # #')
self.lista_productos.pop(id).producto_info()
return self
def inflacion(self, porcentaje_incremento):
a = 0
for pro in self.lista_productos:
a += 1
print(f'=================Producto 0{a}:=================')
pro.producto_info()
print('AUMENTA su precio a: ')
pro.actualizar_precio(porcentaje_incremento, True).producto_info()
return self
<|reserved_special_token_0|>
<|reserved_special_token_1|>
class Tienda:
def __init__(self, nombre_tienda, lista_productos=[]):
self.nombre_tienda = nombre_tienda
self.lista_productos = lista_productos
def __str__(self):
return f"""Nombre de la Tienda: {self.nombre_tienda}
Lista de Productos: {self.lista_productos}
"""
def anhadir_producto(self, producto_nuevo):
self.lista_productos.append(producto_nuevo)
print('# # # # # # # PRODUCTO ANHADIDO # # # # # # #')
producto_nuevo.producto_info()
return self
def vender_producto(self, id):
print('\n# # # # # # # PRODUCTO VENDIDO # # # # # # #')
self.lista_productos.pop(id).producto_info()
return self
def inflacion(self, porcentaje_incremento):
a = 0
for pro in self.lista_productos:
a += 1
print(f'=================Producto 0{a}:=================')
pro.producto_info()
print('AUMENTA su precio a: ')
pro.actualizar_precio(porcentaje_incremento, True).producto_info()
return self
def descuentazo(self, categoria, descuentazo_porcentaje):
a = 0
for product in self.lista_productos:
a += 1
if product.cat_producto == categoria:
print(f'=================Producto 0{a}:=================')
product.producto_info()
print('Se REMATA, y su nuevo precio de remate es: ')
product.actualizar_precio(descuentazo_porcentaje, False
).producto_info()
print(
f'Descuento de precios a toda la categoria {categoria}, realizado')
return self
<|reserved_special_token_1|>
class Tienda:
def __init__(self, nombre_tienda, lista_productos = []):
self.nombre_tienda = nombre_tienda
self.lista_productos = lista_productos
def __str__(self):
return f"Nombre de la Tienda: {self.nombre_tienda}\nLista de Productos: {self.lista_productos}\n"
def anhadir_producto(self, producto_nuevo):
self.lista_productos.append(producto_nuevo)
print("# # # # # # # PRODUCTO ANHADIDO # # # # # # #")
producto_nuevo.producto_info()
return self
def vender_producto(self, id):
print("\n# # # # # # # PRODUCTO VENDIDO # # # # # # #")
self.lista_productos.pop(id).producto_info()
return self
def inflacion(self, porcentaje_incremento):
a = 0
for pro in self.lista_productos:
a += 1
print(f"=================Producto 0{a}:=================")
pro.producto_info()
print("AUMENTA su precio a: ")
pro.actualizar_precio(porcentaje_incremento, True).producto_info()
return self
def descuentazo(self, categoria, descuentazo_porcentaje):
a = 0
for product in self.lista_productos:
a += 1
if product.cat_producto == categoria:
print(f"=================Producto 0{a}:=================")
product.producto_info()
print("Se REMATA, y su nuevo precio de remate es: ")
product.actualizar_precio(descuentazo_porcentaje, False).producto_info()
print(f"Descuento de precios a toda la categoria {categoria}, realizado")
return self
#########################################################
##### coso = Tienda("VERDULERIA")
##### print(coso)
##### print("anhadir_P")
##### pera = ("PERA", 1000, "FRUTAS")
##### coco = ("COCO", 1511, "FRUTAS")
##### coso.anhadir_producto(pera)
##### coso.anhadir_producto(coco)
##### print(coso)
##### print("#############################")
##### coso.vender_producto(1)
|
flexible
|
{
"blob_id": "0ae5d20b78bf7c23418de55ffd4d81cd5284c6d5",
"index": 8912,
"step-1": "class Tienda:\n\n def __init__(self, nombre_tienda, lista_productos=[]):\n self.nombre_tienda = nombre_tienda\n self.lista_productos = lista_productos\n <mask token>\n\n def anhadir_producto(self, producto_nuevo):\n self.lista_productos.append(producto_nuevo)\n print('# # # # # # # PRODUCTO ANHADIDO # # # # # # #')\n producto_nuevo.producto_info()\n return self\n <mask token>\n\n def inflacion(self, porcentaje_incremento):\n a = 0\n for pro in self.lista_productos:\n a += 1\n print(f'=================Producto 0{a}:=================')\n pro.producto_info()\n print('AUMENTA su precio a: ')\n pro.actualizar_precio(porcentaje_incremento, True).producto_info()\n return self\n <mask token>\n",
"step-2": "class Tienda:\n\n def __init__(self, nombre_tienda, lista_productos=[]):\n self.nombre_tienda = nombre_tienda\n self.lista_productos = lista_productos\n\n def __str__(self):\n return f\"\"\"Nombre de la Tienda: {self.nombre_tienda}\nLista de Productos: {self.lista_productos}\n\"\"\"\n\n def anhadir_producto(self, producto_nuevo):\n self.lista_productos.append(producto_nuevo)\n print('# # # # # # # PRODUCTO ANHADIDO # # # # # # #')\n producto_nuevo.producto_info()\n return self\n <mask token>\n\n def inflacion(self, porcentaje_incremento):\n a = 0\n for pro in self.lista_productos:\n a += 1\n print(f'=================Producto 0{a}:=================')\n pro.producto_info()\n print('AUMENTA su precio a: ')\n pro.actualizar_precio(porcentaje_incremento, True).producto_info()\n return self\n <mask token>\n",
"step-3": "class Tienda:\n\n def __init__(self, nombre_tienda, lista_productos=[]):\n self.nombre_tienda = nombre_tienda\n self.lista_productos = lista_productos\n\n def __str__(self):\n return f\"\"\"Nombre de la Tienda: {self.nombre_tienda}\nLista de Productos: {self.lista_productos}\n\"\"\"\n\n def anhadir_producto(self, producto_nuevo):\n self.lista_productos.append(producto_nuevo)\n print('# # # # # # # PRODUCTO ANHADIDO # # # # # # #')\n producto_nuevo.producto_info()\n return self\n\n def vender_producto(self, id):\n print('\\n# # # # # # # PRODUCTO VENDIDO # # # # # # #')\n self.lista_productos.pop(id).producto_info()\n return self\n\n def inflacion(self, porcentaje_incremento):\n a = 0\n for pro in self.lista_productos:\n a += 1\n print(f'=================Producto 0{a}:=================')\n pro.producto_info()\n print('AUMENTA su precio a: ')\n pro.actualizar_precio(porcentaje_incremento, True).producto_info()\n return self\n <mask token>\n",
"step-4": "class Tienda:\n\n def __init__(self, nombre_tienda, lista_productos=[]):\n self.nombre_tienda = nombre_tienda\n self.lista_productos = lista_productos\n\n def __str__(self):\n return f\"\"\"Nombre de la Tienda: {self.nombre_tienda}\nLista de Productos: {self.lista_productos}\n\"\"\"\n\n def anhadir_producto(self, producto_nuevo):\n self.lista_productos.append(producto_nuevo)\n print('# # # # # # # PRODUCTO ANHADIDO # # # # # # #')\n producto_nuevo.producto_info()\n return self\n\n def vender_producto(self, id):\n print('\\n# # # # # # # PRODUCTO VENDIDO # # # # # # #')\n self.lista_productos.pop(id).producto_info()\n return self\n\n def inflacion(self, porcentaje_incremento):\n a = 0\n for pro in self.lista_productos:\n a += 1\n print(f'=================Producto 0{a}:=================')\n pro.producto_info()\n print('AUMENTA su precio a: ')\n pro.actualizar_precio(porcentaje_incremento, True).producto_info()\n return self\n\n def descuentazo(self, categoria, descuentazo_porcentaje):\n a = 0\n for product in self.lista_productos:\n a += 1\n if product.cat_producto == categoria:\n print(f'=================Producto 0{a}:=================')\n product.producto_info()\n print('Se REMATA, y su nuevo precio de remate es: ')\n product.actualizar_precio(descuentazo_porcentaje, False\n ).producto_info()\n print(\n f'Descuento de precios a toda la categoria {categoria}, realizado')\n return self\n",
"step-5": "class Tienda:\n def __init__(self, nombre_tienda, lista_productos = []):\n self.nombre_tienda = nombre_tienda\n self.lista_productos = lista_productos\n\n def __str__(self):\n return f\"Nombre de la Tienda: {self.nombre_tienda}\\nLista de Productos: {self.lista_productos}\\n\"\n \n def anhadir_producto(self, producto_nuevo):\n self.lista_productos.append(producto_nuevo)\n print(\"# # # # # # # PRODUCTO ANHADIDO # # # # # # #\")\n producto_nuevo.producto_info()\n return self\n\n def vender_producto(self, id):\n print(\"\\n# # # # # # # PRODUCTO VENDIDO # # # # # # #\")\n self.lista_productos.pop(id).producto_info()\n return self\n\n def inflacion(self, porcentaje_incremento):\n a = 0\n for pro in self.lista_productos:\n a += 1\n print(f\"=================Producto 0{a}:=================\")\n pro.producto_info()\n print(\"AUMENTA su precio a: \")\n pro.actualizar_precio(porcentaje_incremento, True).producto_info()\n return self\n\n def descuentazo(self, categoria, descuentazo_porcentaje):\n a = 0\n for product in self.lista_productos:\n a += 1\n if product.cat_producto == categoria:\n print(f\"=================Producto 0{a}:=================\")\n product.producto_info()\n print(\"Se REMATA, y su nuevo precio de remate es: \")\n product.actualizar_precio(descuentazo_porcentaje, False).producto_info()\n print(f\"Descuento de precios a toda la categoria {categoria}, realizado\")\n return self\n\n#########################################################\n##### coso = Tienda(\"VERDULERIA\")\n##### print(coso)\n##### print(\"anhadir_P\")\n##### pera = (\"PERA\", 1000, \"FRUTAS\")\n##### coco = (\"COCO\", 1511, \"FRUTAS\")\n##### coso.anhadir_producto(pera)\n##### coso.anhadir_producto(coco)\n##### print(coso)\n##### print(\"#############################\")\n##### coso.vender_producto(1)",
"step-ids": [
4,
5,
6,
7,
8
]
}
|
[
4,
5,
6,
7,
8
] |
<|reserved_special_token_0|>
def data_split_GroupKFold(df, col_index, col_group, n_splits=5, random_state=42
):
"""
:param df:
:param col_index:
:param col_group:
:param n_splits:
:param random_state:
:return:
"""
group = np.sort(df[col_group].unique())
print('num group: {}'.format(len(group)))
np.random.seed(random_state)
group = group[np.random.permutation(len(group))]
fold_list = []
fold = 0
count = 0
fold_list.append([])
for i, item in enumerate(group):
count += (df[col_group] == item).sum()
fold_list[fold].append(item)
if count > len(df) / n_splits * (fold + 1):
fold_list.append([])
fold += 1
df_new = df[[col_index]]
for fold in range(n_splits):
df_new['fold{}_train'.format(fold + 1)] = df[col_group].apply(lambda
x: x not in fold_list[fold]).astype(np.int)
df_new['fold{}_valid'.format(fold + 1)] = 1 - df_new['fold{}_train'
.format(fold + 1)]
for i in range(n_splits):
print('fold: {}, valid: {}. group: {}'.format(i + 1, (df_new[
'fold{}_valid'.format(i + 1)] == 1).sum(), len(fold_list[i])))
return df_new
def main():
df = pd.read_csv('../input/melanoma/train.csv')
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def data_split_StratifiedKFold(df, col_index, col_stratified, n_splits=5,
random_state=42):
folds = list(StratifiedKFold(n_splits=n_splits, shuffle=True,
random_state=random_state).split(np.arange(len(df)), y=df[
col_stratified]))
df_new = df[[col_index]]
for fold in range(n_splits):
df_new['fold{}_train'.format(fold + 1)] = 0
df_new['fold{}_train'.format(fold + 1)][folds[fold][0]] = 1
df_new['fold{}_valid'.format(fold + 1)] = 0
df_new['fold{}_valid'.format(fold + 1)][folds[fold][1]] = 1
return df_new
def data_split_KFold(df, col_index, n_splits=5, random_state=42):
folds = list(KFold(n_splits=n_splits, shuffle=True, random_state=
random_state).split(np.arange(len(df))))
df_new = df[[col_index]]
for fold in range(n_splits):
df_new['fold{}_train'.format(fold + 1)] = 0
df_new['fold{}_train'.format(fold + 1)][folds[fold][0]] = 1
df_new['fold{}_valid'.format(fold + 1)] = 0
df_new['fold{}_valid'.format(fold + 1)][folds[fold][1]] = 1
return df_new
def data_split_GroupKFold(df, col_index, col_group, n_splits=5, random_state=42
):
"""
:param df:
:param col_index:
:param col_group:
:param n_splits:
:param random_state:
:return:
"""
group = np.sort(df[col_group].unique())
print('num group: {}'.format(len(group)))
np.random.seed(random_state)
group = group[np.random.permutation(len(group))]
fold_list = []
fold = 0
count = 0
fold_list.append([])
for i, item in enumerate(group):
count += (df[col_group] == item).sum()
fold_list[fold].append(item)
if count > len(df) / n_splits * (fold + 1):
fold_list.append([])
fold += 1
df_new = df[[col_index]]
for fold in range(n_splits):
df_new['fold{}_train'.format(fold + 1)] = df[col_group].apply(lambda
x: x not in fold_list[fold]).astype(np.int)
df_new['fold{}_valid'.format(fold + 1)] = 1 - df_new['fold{}_train'
.format(fold + 1)]
for i in range(n_splits):
print('fold: {}, valid: {}. group: {}'.format(i + 1, (df_new[
'fold{}_valid'.format(i + 1)] == 1).sum(), len(fold_list[i])))
return df_new
def main():
df = pd.read_csv('../input/melanoma/train.csv')
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
warnings.filterwarnings('ignore')
def data_split_StratifiedKFold(df, col_index, col_stratified, n_splits=5,
random_state=42):
folds = list(StratifiedKFold(n_splits=n_splits, shuffle=True,
random_state=random_state).split(np.arange(len(df)), y=df[
col_stratified]))
df_new = df[[col_index]]
for fold in range(n_splits):
df_new['fold{}_train'.format(fold + 1)] = 0
df_new['fold{}_train'.format(fold + 1)][folds[fold][0]] = 1
df_new['fold{}_valid'.format(fold + 1)] = 0
df_new['fold{}_valid'.format(fold + 1)][folds[fold][1]] = 1
return df_new
def data_split_KFold(df, col_index, n_splits=5, random_state=42):
folds = list(KFold(n_splits=n_splits, shuffle=True, random_state=
random_state).split(np.arange(len(df))))
df_new = df[[col_index]]
for fold in range(n_splits):
df_new['fold{}_train'.format(fold + 1)] = 0
df_new['fold{}_train'.format(fold + 1)][folds[fold][0]] = 1
df_new['fold{}_valid'.format(fold + 1)] = 0
df_new['fold{}_valid'.format(fold + 1)][folds[fold][1]] = 1
return df_new
def data_split_GroupKFold(df, col_index, col_group, n_splits=5, random_state=42
):
"""
:param df:
:param col_index:
:param col_group:
:param n_splits:
:param random_state:
:return:
"""
group = np.sort(df[col_group].unique())
print('num group: {}'.format(len(group)))
np.random.seed(random_state)
group = group[np.random.permutation(len(group))]
fold_list = []
fold = 0
count = 0
fold_list.append([])
for i, item in enumerate(group):
count += (df[col_group] == item).sum()
fold_list[fold].append(item)
if count > len(df) / n_splits * (fold + 1):
fold_list.append([])
fold += 1
df_new = df[[col_index]]
for fold in range(n_splits):
df_new['fold{}_train'.format(fold + 1)] = df[col_group].apply(lambda
x: x not in fold_list[fold]).astype(np.int)
df_new['fold{}_valid'.format(fold + 1)] = 1 - df_new['fold{}_train'
.format(fold + 1)]
for i in range(n_splits):
print('fold: {}, valid: {}. group: {}'.format(i + 1, (df_new[
'fold{}_valid'.format(i + 1)] == 1).sum(), len(fold_list[i])))
return df_new
def main():
df = pd.read_csv('../input/melanoma/train.csv')
if __name__ == '__main__':
main()
<|reserved_special_token_1|>
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import os, shutil, time, pickle, warnings, logging
import yaml
from sklearn import preprocessing
from sklearn.model_selection import StratifiedKFold, KFold
from sklearn import metrics
from scipy.special import erfinv
from scipy.stats import mode
warnings.filterwarnings('ignore')
def data_split_StratifiedKFold(df, col_index, col_stratified, n_splits=5,
random_state=42):
folds = list(StratifiedKFold(n_splits=n_splits, shuffle=True,
random_state=random_state).split(np.arange(len(df)), y=df[
col_stratified]))
df_new = df[[col_index]]
for fold in range(n_splits):
df_new['fold{}_train'.format(fold + 1)] = 0
df_new['fold{}_train'.format(fold + 1)][folds[fold][0]] = 1
df_new['fold{}_valid'.format(fold + 1)] = 0
df_new['fold{}_valid'.format(fold + 1)][folds[fold][1]] = 1
return df_new
def data_split_KFold(df, col_index, n_splits=5, random_state=42):
folds = list(KFold(n_splits=n_splits, shuffle=True, random_state=
random_state).split(np.arange(len(df))))
df_new = df[[col_index]]
for fold in range(n_splits):
df_new['fold{}_train'.format(fold + 1)] = 0
df_new['fold{}_train'.format(fold + 1)][folds[fold][0]] = 1
df_new['fold{}_valid'.format(fold + 1)] = 0
df_new['fold{}_valid'.format(fold + 1)][folds[fold][1]] = 1
return df_new
def data_split_GroupKFold(df, col_index, col_group, n_splits=5, random_state=42
):
"""
:param df:
:param col_index:
:param col_group:
:param n_splits:
:param random_state:
:return:
"""
group = np.sort(df[col_group].unique())
print('num group: {}'.format(len(group)))
np.random.seed(random_state)
group = group[np.random.permutation(len(group))]
fold_list = []
fold = 0
count = 0
fold_list.append([])
for i, item in enumerate(group):
count += (df[col_group] == item).sum()
fold_list[fold].append(item)
if count > len(df) / n_splits * (fold + 1):
fold_list.append([])
fold += 1
df_new = df[[col_index]]
for fold in range(n_splits):
df_new['fold{}_train'.format(fold + 1)] = df[col_group].apply(lambda
x: x not in fold_list[fold]).astype(np.int)
df_new['fold{}_valid'.format(fold + 1)] = 1 - df_new['fold{}_train'
.format(fold + 1)]
for i in range(n_splits):
print('fold: {}, valid: {}. group: {}'.format(i + 1, (df_new[
'fold{}_valid'.format(i + 1)] == 1).sum(), len(fold_list[i])))
return df_new
def main():
df = pd.read_csv('../input/melanoma/train.csv')
if __name__ == '__main__':
main()
<|reserved_special_token_1|>
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import os, shutil, time, pickle, warnings, logging
import yaml
from sklearn import preprocessing
from sklearn.model_selection import StratifiedKFold, KFold
from sklearn import metrics
from scipy.special import erfinv
from scipy.stats import mode
warnings.filterwarnings('ignore')
def data_split_StratifiedKFold(df, col_index, col_stratified, n_splits=5, random_state=42):
folds = list(StratifiedKFold(n_splits=n_splits, shuffle=True, random_state=random_state).split(
np.arange(len(df)), y=df[col_stratified]))
df_new = df[[col_index]]
for fold in range(n_splits):
df_new['fold{}_train'.format(fold + 1)] = 0
df_new['fold{}_train'.format(fold + 1)][folds[fold][0]] = 1
df_new['fold{}_valid'.format(fold + 1)] = 0
df_new['fold{}_valid'.format(fold + 1)][folds[fold][1]] = 1
return df_new
def data_split_KFold(df, col_index, n_splits=5, random_state=42):
folds = list(KFold(n_splits=n_splits, shuffle=True, random_state=random_state).split(
np.arange(len(df))))
df_new = df[[col_index]]
for fold in range(n_splits):
df_new['fold{}_train'.format(fold + 1)] = 0
df_new['fold{}_train'.format(fold + 1)][folds[fold][0]] = 1
df_new['fold{}_valid'.format(fold + 1)] = 0
df_new['fold{}_valid'.format(fold + 1)][folds[fold][1]] = 1
return df_new
def data_split_GroupKFold(df, col_index, col_group, n_splits=5, random_state=42):
"""
:param df:
:param col_index:
:param col_group:
:param n_splits:
:param random_state:
:return:
"""
group = np.sort(df[col_group].unique())
print("num group: {}".format(len(group)))
np.random.seed(random_state)
group = group[np.random.permutation(len(group))]
fold_list = []
fold = 0
count = 0
fold_list.append([])
for i, item in enumerate(group):
count += (df[col_group] == item).sum()
fold_list[fold].append(item)
if count > len(df) / n_splits * (fold + 1):
fold_list.append([])
fold += 1
df_new = df[[col_index]]
for fold in range(n_splits):
df_new['fold{}_train'.format(fold + 1)] = df[col_group].apply(
lambda x: x not in fold_list[fold]).astype(np.int)
df_new['fold{}_valid'.format(fold + 1)] = 1 - df_new['fold{}_train'.format(fold + 1)]
for i in range(n_splits):
print("fold: {}, valid: {}. group: {}".format(
i + 1,
(df_new['fold{}_valid'.format(i + 1)] == 1).sum(),
len(fold_list[i]))
)
return df_new
def main():
df = pd.read_csv("../input/melanoma/train.csv")
if __name__ == '__main__':
main()
|
flexible
|
{
"blob_id": "4d0b08f8ca77d188aa218442ac0689fd2c057a89",
"index": 8357,
"step-1": "<mask token>\n\n\ndef data_split_GroupKFold(df, col_index, col_group, n_splits=5, random_state=42\n ):\n \"\"\"\n\n :param df:\n :param col_index:\n :param col_group:\n :param n_splits:\n :param random_state:\n :return:\n \"\"\"\n group = np.sort(df[col_group].unique())\n print('num group: {}'.format(len(group)))\n np.random.seed(random_state)\n group = group[np.random.permutation(len(group))]\n fold_list = []\n fold = 0\n count = 0\n fold_list.append([])\n for i, item in enumerate(group):\n count += (df[col_group] == item).sum()\n fold_list[fold].append(item)\n if count > len(df) / n_splits * (fold + 1):\n fold_list.append([])\n fold += 1\n df_new = df[[col_index]]\n for fold in range(n_splits):\n df_new['fold{}_train'.format(fold + 1)] = df[col_group].apply(lambda\n x: x not in fold_list[fold]).astype(np.int)\n df_new['fold{}_valid'.format(fold + 1)] = 1 - df_new['fold{}_train'\n .format(fold + 1)]\n for i in range(n_splits):\n print('fold: {}, valid: {}. group: {}'.format(i + 1, (df_new[\n 'fold{}_valid'.format(i + 1)] == 1).sum(), len(fold_list[i])))\n return df_new\n\n\ndef main():\n df = pd.read_csv('../input/melanoma/train.csv')\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef data_split_StratifiedKFold(df, col_index, col_stratified, n_splits=5,\n random_state=42):\n folds = list(StratifiedKFold(n_splits=n_splits, shuffle=True,\n random_state=random_state).split(np.arange(len(df)), y=df[\n col_stratified]))\n df_new = df[[col_index]]\n for fold in range(n_splits):\n df_new['fold{}_train'.format(fold + 1)] = 0\n df_new['fold{}_train'.format(fold + 1)][folds[fold][0]] = 1\n df_new['fold{}_valid'.format(fold + 1)] = 0\n df_new['fold{}_valid'.format(fold + 1)][folds[fold][1]] = 1\n return df_new\n\n\ndef data_split_KFold(df, col_index, n_splits=5, random_state=42):\n folds = list(KFold(n_splits=n_splits, shuffle=True, random_state=\n random_state).split(np.arange(len(df))))\n df_new = df[[col_index]]\n for fold in range(n_splits):\n df_new['fold{}_train'.format(fold + 1)] = 0\n df_new['fold{}_train'.format(fold + 1)][folds[fold][0]] = 1\n df_new['fold{}_valid'.format(fold + 1)] = 0\n df_new['fold{}_valid'.format(fold + 1)][folds[fold][1]] = 1\n return df_new\n\n\ndef data_split_GroupKFold(df, col_index, col_group, n_splits=5, random_state=42\n ):\n \"\"\"\n\n :param df:\n :param col_index:\n :param col_group:\n :param n_splits:\n :param random_state:\n :return:\n \"\"\"\n group = np.sort(df[col_group].unique())\n print('num group: {}'.format(len(group)))\n np.random.seed(random_state)\n group = group[np.random.permutation(len(group))]\n fold_list = []\n fold = 0\n count = 0\n fold_list.append([])\n for i, item in enumerate(group):\n count += (df[col_group] == item).sum()\n fold_list[fold].append(item)\n if count > len(df) / n_splits * (fold + 1):\n fold_list.append([])\n fold += 1\n df_new = df[[col_index]]\n for fold in range(n_splits):\n df_new['fold{}_train'.format(fold + 1)] = df[col_group].apply(lambda\n x: x not in fold_list[fold]).astype(np.int)\n df_new['fold{}_valid'.format(fold + 1)] = 1 - df_new['fold{}_train'\n .format(fold + 1)]\n for i in range(n_splits):\n print('fold: {}, valid: {}. group: {}'.format(i + 1, (df_new[\n 'fold{}_valid'.format(i + 1)] == 1).sum(), len(fold_list[i])))\n return df_new\n\n\ndef main():\n df = pd.read_csv('../input/melanoma/train.csv')\n\n\n<mask token>\n",
"step-3": "<mask token>\nwarnings.filterwarnings('ignore')\n\n\ndef data_split_StratifiedKFold(df, col_index, col_stratified, n_splits=5,\n random_state=42):\n folds = list(StratifiedKFold(n_splits=n_splits, shuffle=True,\n random_state=random_state).split(np.arange(len(df)), y=df[\n col_stratified]))\n df_new = df[[col_index]]\n for fold in range(n_splits):\n df_new['fold{}_train'.format(fold + 1)] = 0\n df_new['fold{}_train'.format(fold + 1)][folds[fold][0]] = 1\n df_new['fold{}_valid'.format(fold + 1)] = 0\n df_new['fold{}_valid'.format(fold + 1)][folds[fold][1]] = 1\n return df_new\n\n\ndef data_split_KFold(df, col_index, n_splits=5, random_state=42):\n folds = list(KFold(n_splits=n_splits, shuffle=True, random_state=\n random_state).split(np.arange(len(df))))\n df_new = df[[col_index]]\n for fold in range(n_splits):\n df_new['fold{}_train'.format(fold + 1)] = 0\n df_new['fold{}_train'.format(fold + 1)][folds[fold][0]] = 1\n df_new['fold{}_valid'.format(fold + 1)] = 0\n df_new['fold{}_valid'.format(fold + 1)][folds[fold][1]] = 1\n return df_new\n\n\ndef data_split_GroupKFold(df, col_index, col_group, n_splits=5, random_state=42\n ):\n \"\"\"\n\n :param df:\n :param col_index:\n :param col_group:\n :param n_splits:\n :param random_state:\n :return:\n \"\"\"\n group = np.sort(df[col_group].unique())\n print('num group: {}'.format(len(group)))\n np.random.seed(random_state)\n group = group[np.random.permutation(len(group))]\n fold_list = []\n fold = 0\n count = 0\n fold_list.append([])\n for i, item in enumerate(group):\n count += (df[col_group] == item).sum()\n fold_list[fold].append(item)\n if count > len(df) / n_splits * (fold + 1):\n fold_list.append([])\n fold += 1\n df_new = df[[col_index]]\n for fold in range(n_splits):\n df_new['fold{}_train'.format(fold + 1)] = df[col_group].apply(lambda\n x: x not in fold_list[fold]).astype(np.int)\n df_new['fold{}_valid'.format(fold + 1)] = 1 - df_new['fold{}_train'\n .format(fold + 1)]\n for i in range(n_splits):\n print('fold: {}, valid: {}. group: {}'.format(i + 1, (df_new[\n 'fold{}_valid'.format(i + 1)] == 1).sum(), len(fold_list[i])))\n return df_new\n\n\ndef main():\n df = pd.read_csv('../input/melanoma/train.csv')\n\n\nif __name__ == '__main__':\n main()\n",
"step-4": "import numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport os, shutil, time, pickle, warnings, logging\nimport yaml\nfrom sklearn import preprocessing\nfrom sklearn.model_selection import StratifiedKFold, KFold\nfrom sklearn import metrics\nfrom scipy.special import erfinv\nfrom scipy.stats import mode\nwarnings.filterwarnings('ignore')\n\n\ndef data_split_StratifiedKFold(df, col_index, col_stratified, n_splits=5,\n random_state=42):\n folds = list(StratifiedKFold(n_splits=n_splits, shuffle=True,\n random_state=random_state).split(np.arange(len(df)), y=df[\n col_stratified]))\n df_new = df[[col_index]]\n for fold in range(n_splits):\n df_new['fold{}_train'.format(fold + 1)] = 0\n df_new['fold{}_train'.format(fold + 1)][folds[fold][0]] = 1\n df_new['fold{}_valid'.format(fold + 1)] = 0\n df_new['fold{}_valid'.format(fold + 1)][folds[fold][1]] = 1\n return df_new\n\n\ndef data_split_KFold(df, col_index, n_splits=5, random_state=42):\n folds = list(KFold(n_splits=n_splits, shuffle=True, random_state=\n random_state).split(np.arange(len(df))))\n df_new = df[[col_index]]\n for fold in range(n_splits):\n df_new['fold{}_train'.format(fold + 1)] = 0\n df_new['fold{}_train'.format(fold + 1)][folds[fold][0]] = 1\n df_new['fold{}_valid'.format(fold + 1)] = 0\n df_new['fold{}_valid'.format(fold + 1)][folds[fold][1]] = 1\n return df_new\n\n\ndef data_split_GroupKFold(df, col_index, col_group, n_splits=5, random_state=42\n ):\n \"\"\"\n\n :param df:\n :param col_index:\n :param col_group:\n :param n_splits:\n :param random_state:\n :return:\n \"\"\"\n group = np.sort(df[col_group].unique())\n print('num group: {}'.format(len(group)))\n np.random.seed(random_state)\n group = group[np.random.permutation(len(group))]\n fold_list = []\n fold = 0\n count = 0\n fold_list.append([])\n for i, item in enumerate(group):\n count += (df[col_group] == item).sum()\n fold_list[fold].append(item)\n if count > len(df) / n_splits * (fold + 1):\n fold_list.append([])\n fold += 1\n df_new = df[[col_index]]\n for fold in range(n_splits):\n df_new['fold{}_train'.format(fold + 1)] = df[col_group].apply(lambda\n x: x not in fold_list[fold]).astype(np.int)\n df_new['fold{}_valid'.format(fold + 1)] = 1 - df_new['fold{}_train'\n .format(fold + 1)]\n for i in range(n_splits):\n print('fold: {}, valid: {}. group: {}'.format(i + 1, (df_new[\n 'fold{}_valid'.format(i + 1)] == 1).sum(), len(fold_list[i])))\n return df_new\n\n\ndef main():\n df = pd.read_csv('../input/melanoma/train.csv')\n\n\nif __name__ == '__main__':\n main()\n",
"step-5": "import numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport os, shutil, time, pickle, warnings, logging\nimport yaml\nfrom sklearn import preprocessing\nfrom sklearn.model_selection import StratifiedKFold, KFold\nfrom sklearn import metrics\nfrom scipy.special import erfinv\nfrom scipy.stats import mode\n\nwarnings.filterwarnings('ignore')\n\n\ndef data_split_StratifiedKFold(df, col_index, col_stratified, n_splits=5, random_state=42):\n folds = list(StratifiedKFold(n_splits=n_splits, shuffle=True, random_state=random_state).split(\n np.arange(len(df)), y=df[col_stratified]))\n\n df_new = df[[col_index]]\n for fold in range(n_splits):\n df_new['fold{}_train'.format(fold + 1)] = 0\n df_new['fold{}_train'.format(fold + 1)][folds[fold][0]] = 1\n df_new['fold{}_valid'.format(fold + 1)] = 0\n df_new['fold{}_valid'.format(fold + 1)][folds[fold][1]] = 1\n\n return df_new\n\n\ndef data_split_KFold(df, col_index, n_splits=5, random_state=42):\n folds = list(KFold(n_splits=n_splits, shuffle=True, random_state=random_state).split(\n np.arange(len(df))))\n\n df_new = df[[col_index]]\n for fold in range(n_splits):\n df_new['fold{}_train'.format(fold + 1)] = 0\n df_new['fold{}_train'.format(fold + 1)][folds[fold][0]] = 1\n df_new['fold{}_valid'.format(fold + 1)] = 0\n df_new['fold{}_valid'.format(fold + 1)][folds[fold][1]] = 1\n\n return df_new\n\n\ndef data_split_GroupKFold(df, col_index, col_group, n_splits=5, random_state=42):\n \"\"\"\n\n :param df:\n :param col_index:\n :param col_group:\n :param n_splits:\n :param random_state:\n :return:\n \"\"\"\n group = np.sort(df[col_group].unique())\n print(\"num group: {}\".format(len(group)))\n np.random.seed(random_state)\n group = group[np.random.permutation(len(group))]\n fold_list = []\n fold = 0\n count = 0\n fold_list.append([])\n for i, item in enumerate(group):\n count += (df[col_group] == item).sum()\n fold_list[fold].append(item)\n if count > len(df) / n_splits * (fold + 1):\n fold_list.append([])\n fold += 1\n\n df_new = df[[col_index]]\n for fold in range(n_splits):\n df_new['fold{}_train'.format(fold + 1)] = df[col_group].apply(\n lambda x: x not in fold_list[fold]).astype(np.int)\n df_new['fold{}_valid'.format(fold + 1)] = 1 - df_new['fold{}_train'.format(fold + 1)]\n\n for i in range(n_splits):\n print(\"fold: {}, valid: {}. group: {}\".format(\n i + 1,\n (df_new['fold{}_valid'.format(i + 1)] == 1).sum(),\n len(fold_list[i]))\n )\n\n return df_new\n\n\ndef main():\n df = pd.read_csv(\"../input/melanoma/train.csv\")\n\n\nif __name__ == '__main__':\n main()",
"step-ids": [
2,
4,
5,
6,
7
]
}
|
[
2,
4,
5,
6,
7
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Migration(migrations.Migration):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Migration(migrations.Migration):
dependencies = [('album', '0013_auto_20160210_1609')]
operations = [migrations.CreateModel(name='Albumname', fields=[('id',
models.AutoField(auto_created=True, primary_key=True, serialize=
False, verbose_name='ID')), ('name', models.CharField(max_length=
100)), ('album_text', models.CharField(blank=True, max_length=1000,
null=True)), ('album_no', models.IntegerField(blank=True, null=True
)), ('lineup', models.ManyToManyField(to='album.Shilpi')), (
'prokashok', models.ManyToManyField(to='album.Prokashok'))])]
<|reserved_special_token_1|>
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [('album', '0013_auto_20160210_1609')]
operations = [migrations.CreateModel(name='Albumname', fields=[('id',
models.AutoField(auto_created=True, primary_key=True, serialize=
False, verbose_name='ID')), ('name', models.CharField(max_length=
100)), ('album_text', models.CharField(blank=True, max_length=1000,
null=True)), ('album_no', models.IntegerField(blank=True, null=True
)), ('lineup', models.ManyToManyField(to='album.Shilpi')), (
'prokashok', models.ManyToManyField(to='album.Prokashok'))])]
<|reserved_special_token_1|>
# -*- coding: utf-8 -*-
# Generated by Django 1.9.2 on 2016-02-10 11:06
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('album', '0013_auto_20160210_1609'),
]
operations = [
migrations.CreateModel(
name='Albumname',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100)),
('album_text', models.CharField(blank=True, max_length=1000, null=True)),
('album_no', models.IntegerField(blank=True, null=True)),
('lineup', models.ManyToManyField(to='album.Shilpi')),
('prokashok', models.ManyToManyField(to='album.Prokashok')),
],
),
]
|
flexible
|
{
"blob_id": "a727502063bd0cd959fdde201832d37b29b4db70",
"index": 4304,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Migration(migrations.Migration):\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Migration(migrations.Migration):\n dependencies = [('album', '0013_auto_20160210_1609')]\n operations = [migrations.CreateModel(name='Albumname', fields=[('id',\n models.AutoField(auto_created=True, primary_key=True, serialize=\n False, verbose_name='ID')), ('name', models.CharField(max_length=\n 100)), ('album_text', models.CharField(blank=True, max_length=1000,\n null=True)), ('album_no', models.IntegerField(blank=True, null=True\n )), ('lineup', models.ManyToManyField(to='album.Shilpi')), (\n 'prokashok', models.ManyToManyField(to='album.Prokashok'))])]\n",
"step-4": "from __future__ import unicode_literals\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n dependencies = [('album', '0013_auto_20160210_1609')]\n operations = [migrations.CreateModel(name='Albumname', fields=[('id',\n models.AutoField(auto_created=True, primary_key=True, serialize=\n False, verbose_name='ID')), ('name', models.CharField(max_length=\n 100)), ('album_text', models.CharField(blank=True, max_length=1000,\n null=True)), ('album_no', models.IntegerField(blank=True, null=True\n )), ('lineup', models.ManyToManyField(to='album.Shilpi')), (\n 'prokashok', models.ManyToManyField(to='album.Prokashok'))])]\n",
"step-5": "# -*- coding: utf-8 -*-\n# Generated by Django 1.9.2 on 2016-02-10 11:06\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('album', '0013_auto_20160210_1609'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='Albumname',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('name', models.CharField(max_length=100)),\n ('album_text', models.CharField(blank=True, max_length=1000, null=True)),\n ('album_no', models.IntegerField(blank=True, null=True)),\n ('lineup', models.ManyToManyField(to='album.Shilpi')),\n ('prokashok', models.ManyToManyField(to='album.Prokashok')),\n ],\n ),\n ]\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
import sys
import os
def my_add(a, b):
return a + b
|
normal
|
{
"blob_id": "cc81e13bba0ea0186966bce7f5aac05bb106e971",
"index": 5935,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef my_add(a, b):\n return a + b\n",
"step-3": "import sys\nimport os\n\n\ndef my_add(a, b):\n return a + b\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
from .net import *
|
normal
|
{
"blob_id": "73337246bd54df53842360510148f3a6f4763ace",
"index": 6251,
"step-1": "<mask token>\n",
"step-2": "from .net import *\n",
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0,
1
]
}
|
[
0,
1
] |
<|reserved_special_token_0|>
class Agent(object):
def __init__(self, model=None, lr=0.01, gamma=0.99):
self.gamma = gamma
self.AC = model
self.optimizer = Adam(AC.parameters(), lr=lr)
self.logp_as = []
self.values = []
self.rewards = []
def choose_action(self, obs):
action_logits, value = self.AC(obs)
distribution = Categorical(action_logits)
action = distribution.sample()
self.logp_as.append(distribution.log_prob(action))
self.values.append(value)
return action.item()
def learn(self):
R = 0
policy_losses = []
value_losses = []
returns = []
for r in self.rewards[::-1]:
R = r + self.gamma * R
returns.insert(0, R)
returns = torch.tensor(returns).to(Device)
returns = (returns - returns.mean()) / (returns.std() + 1e-05)
for logp_a, value, R in zip(self.logp_as, self.values, returns):
advantage = R - value.item()
policy_losses.append(-logp_a * advantage)
value_losses.append(F.smooth_l1_loss(value, torch.tensor([R]).
to(Device)))
self.optimizer.zero_grad()
loss = torch.stack(policy_losses).sum() + torch.stack(value_losses
).sum()
loss.backward(retain_graph=True)
self.optimizer.step()
self.rewards = []
self.values = []
self.logp_as = []
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class ActorCriticNet(nn.Module):
def __init__(self, observation_space, action_space, hidden_sizes=[32,
32], activation=nn.Tanh):
super().__init__()
obs_dim = observation_space.shape[0]
action_dim = action_space.n
self.base_net = nn.Sequential(nn.Linear(obs_dim, hidden_sizes[0]))
self.pi = nn.Linear(hidden_sizes[1], action_dim)
self.vf = nn.Linear(hidden_sizes[1], 1)
self.to(Device)
def forward(self, obs):
obs = torch.Tensor(obs).to(Device)
x = F.relu(self.base_net(obs))
action_logits = F.softmax(self.pi(x), dim=-1)
value = self.vf(x)
return action_logits, value
class Agent(object):
def __init__(self, model=None, lr=0.01, gamma=0.99):
self.gamma = gamma
self.AC = model
self.optimizer = Adam(AC.parameters(), lr=lr)
self.logp_as = []
self.values = []
self.rewards = []
def choose_action(self, obs):
action_logits, value = self.AC(obs)
distribution = Categorical(action_logits)
action = distribution.sample()
self.logp_as.append(distribution.log_prob(action))
self.values.append(value)
return action.item()
def learn(self):
R = 0
policy_losses = []
value_losses = []
returns = []
for r in self.rewards[::-1]:
R = r + self.gamma * R
returns.insert(0, R)
returns = torch.tensor(returns).to(Device)
returns = (returns - returns.mean()) / (returns.std() + 1e-05)
for logp_a, value, R in zip(self.logp_as, self.values, returns):
advantage = R - value.item()
policy_losses.append(-logp_a * advantage)
value_losses.append(F.smooth_l1_loss(value, torch.tensor([R]).
to(Device)))
self.optimizer.zero_grad()
loss = torch.stack(policy_losses).sum() + torch.stack(value_losses
).sum()
loss.backward(retain_graph=True)
self.optimizer.step()
self.rewards = []
self.values = []
self.logp_as = []
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class ActorCriticNet(nn.Module):
def __init__(self, observation_space, action_space, hidden_sizes=[32,
32], activation=nn.Tanh):
super().__init__()
obs_dim = observation_space.shape[0]
action_dim = action_space.n
self.base_net = nn.Sequential(nn.Linear(obs_dim, hidden_sizes[0]))
self.pi = nn.Linear(hidden_sizes[1], action_dim)
self.vf = nn.Linear(hidden_sizes[1], 1)
self.to(Device)
def forward(self, obs):
obs = torch.Tensor(obs).to(Device)
x = F.relu(self.base_net(obs))
action_logits = F.softmax(self.pi(x), dim=-1)
value = self.vf(x)
return action_logits, value
class Agent(object):
def __init__(self, model=None, lr=0.01, gamma=0.99):
self.gamma = gamma
self.AC = model
self.optimizer = Adam(AC.parameters(), lr=lr)
self.logp_as = []
self.values = []
self.rewards = []
def choose_action(self, obs):
action_logits, value = self.AC(obs)
distribution = Categorical(action_logits)
action = distribution.sample()
self.logp_as.append(distribution.log_prob(action))
self.values.append(value)
return action.item()
def learn(self):
R = 0
policy_losses = []
value_losses = []
returns = []
for r in self.rewards[::-1]:
R = r + self.gamma * R
returns.insert(0, R)
returns = torch.tensor(returns).to(Device)
returns = (returns - returns.mean()) / (returns.std() + 1e-05)
for logp_a, value, R in zip(self.logp_as, self.values, returns):
advantage = R - value.item()
policy_losses.append(-logp_a * advantage)
value_losses.append(F.smooth_l1_loss(value, torch.tensor([R]).
to(Device)))
self.optimizer.zero_grad()
loss = torch.stack(policy_losses).sum() + torch.stack(value_losses
).sum()
loss.backward(retain_graph=True)
self.optimizer.step()
self.rewards = []
self.values = []
self.logp_as = []
<|reserved_special_token_0|>
for episode in range(EPISODES):
done = False
obs = env.reset()
I = 1
T = 0
episode_reward = 0
running_reward = 0
if episode % show_every == 0:
is_render = True
else:
is_render = False
while not done:
if is_render:
env.render('human')
action = agent.choose_action(obs)
next_obs, reward, done, _ = env.step(action)
obs = next_obs
agent.rewards.append(reward)
T += 1
episode_reward += reward
agent.learn()
running_reward = 0.05 * episode_reward + (1 - 0.05) * running_reward
print(f'episode_{episode} \t ep_reward = {episode_reward} \t ep_len = {T}')
if running_reward > env.spec.reward_threshold:
print(
'Solved! Running reward is now {} and the last episode runs to {} time steps!'
.format(running_reward, T))
break
<|reserved_special_token_1|>
import torch.nn as nn
import torch
from torch.distributions.categorical import Categorical
import torch.nn.functional as F
from torch.optim import Adam
import gym
import numpy as np
Device = torch.device('cuda:0')
class ActorCriticNet(nn.Module):
def __init__(self, observation_space, action_space, hidden_sizes=[32,
32], activation=nn.Tanh):
super().__init__()
obs_dim = observation_space.shape[0]
action_dim = action_space.n
self.base_net = nn.Sequential(nn.Linear(obs_dim, hidden_sizes[0]))
self.pi = nn.Linear(hidden_sizes[1], action_dim)
self.vf = nn.Linear(hidden_sizes[1], 1)
self.to(Device)
def forward(self, obs):
obs = torch.Tensor(obs).to(Device)
x = F.relu(self.base_net(obs))
action_logits = F.softmax(self.pi(x), dim=-1)
value = self.vf(x)
return action_logits, value
class Agent(object):
def __init__(self, model=None, lr=0.01, gamma=0.99):
self.gamma = gamma
self.AC = model
self.optimizer = Adam(AC.parameters(), lr=lr)
self.logp_as = []
self.values = []
self.rewards = []
def choose_action(self, obs):
action_logits, value = self.AC(obs)
distribution = Categorical(action_logits)
action = distribution.sample()
self.logp_as.append(distribution.log_prob(action))
self.values.append(value)
return action.item()
def learn(self):
R = 0
policy_losses = []
value_losses = []
returns = []
for r in self.rewards[::-1]:
R = r + self.gamma * R
returns.insert(0, R)
returns = torch.tensor(returns).to(Device)
returns = (returns - returns.mean()) / (returns.std() + 1e-05)
for logp_a, value, R in zip(self.logp_as, self.values, returns):
advantage = R - value.item()
policy_losses.append(-logp_a * advantage)
value_losses.append(F.smooth_l1_loss(value, torch.tensor([R]).
to(Device)))
self.optimizer.zero_grad()
loss = torch.stack(policy_losses).sum() + torch.stack(value_losses
).sum()
loss.backward(retain_graph=True)
self.optimizer.step()
self.rewards = []
self.values = []
self.logp_as = []
env = gym.make('CartPole-v1')
state = env.reset()
lr = 0.03
EPISODES = 30000
GAMMA = 0.99
hidden_sizes = [128, 128]
show_every = 100
AC = ActorCriticNet(env.observation_space, env.action_space, hidden_sizes)
agent = Agent(AC, lr=lr, gamma=GAMMA)
for episode in range(EPISODES):
done = False
obs = env.reset()
I = 1
T = 0
episode_reward = 0
running_reward = 0
if episode % show_every == 0:
is_render = True
else:
is_render = False
while not done:
if is_render:
env.render('human')
action = agent.choose_action(obs)
next_obs, reward, done, _ = env.step(action)
obs = next_obs
agent.rewards.append(reward)
T += 1
episode_reward += reward
agent.learn()
running_reward = 0.05 * episode_reward + (1 - 0.05) * running_reward
print(f'episode_{episode} \t ep_reward = {episode_reward} \t ep_len = {T}')
if running_reward > env.spec.reward_threshold:
print(
'Solved! Running reward is now {} and the last episode runs to {} time steps!'
.format(running_reward, T))
break
<|reserved_special_token_1|>
import torch.nn as nn
import torch
from torch.distributions.categorical import Categorical
import torch.nn.functional as F
from torch.optim import Adam
import gym
import numpy as np
Device = torch.device("cuda:0")
class ActorCriticNet(nn.Module):
def __init__(self, observation_space, action_space,
hidden_sizes=[32,32], activation=nn.Tanh):
super().__init__()
obs_dim = observation_space.shape[0]
action_dim = action_space.n
self.base_net = nn.Sequential(
nn.Linear(obs_dim, hidden_sizes[0]),
# nn.Linear(hidden_sizes[0], hidden_sizes[1]),
)
self.pi = nn.Linear(hidden_sizes[1], action_dim)
self.vf = nn.Linear(hidden_sizes[1],1)
self.to(Device)
def forward(self, obs):
obs = torch.Tensor(obs).to(Device)
x = F.relu(self.base_net(obs))
action_logits = F.softmax(self.pi(x), dim=-1)
value = self.vf(x)
return action_logits, value
class Agent(object):
def __init__(self, model=None, lr=1e-2, gamma=0.99):
self.gamma = gamma
self.AC = model
self.optimizer = Adam(AC.parameters(), lr=lr)
self.logp_as = []
self.values = []
self.rewards = []
def choose_action(self, obs):
action_logits, value = self.AC(obs)
distribution = Categorical(action_logits)
action = distribution.sample()
self.logp_as.append(distribution.log_prob(action))
self.values.append(value)
return action.item()
def learn(self):
R = 0
policy_losses = []
value_losses = []
returns = []
for r in self.rewards[::-1]:
R = r + self.gamma * R
returns.insert(0, R)
returns = torch.tensor(returns).to(Device)
returns = (returns - returns.mean()) / (returns.std() + 0.00001)
for logp_a, value, R in zip(self.logp_as, self.values, returns):
advantage = R - value.item()
# calculate actor (policy) loss
policy_losses.append(-logp_a * advantage)
# calculate critic (value) loss using L1 smooth loss
value_losses.append(F.smooth_l1_loss(value, torch.tensor([R]).to(Device)))
self.optimizer.zero_grad()
loss = torch.stack(policy_losses).sum() + torch.stack(value_losses).sum()
loss.backward(retain_graph=True)
self.optimizer.step()
self.rewards = []
self.values = []
self.logp_as = []
# Build env
env = gym.make('CartPole-v1')
state = env.reset()
# Learning setting
lr = 3e-2
EPISODES=30000
GAMMA = 0.99
hidden_sizes = [128,128]
show_every = 100
AC = ActorCriticNet(env.observation_space, env.action_space, hidden_sizes)
agent = Agent(AC, lr=lr, gamma=GAMMA)
for episode in range(EPISODES):
# For every episode init
done = False
obs = env.reset()
I = 1
T = 0
# Logs
episode_reward = 0
running_reward = 0
if episode % show_every == 0:
is_render = True
else:
is_render = False
while not done:
# Render
if is_render:
env.render("human")
# Predict action and value
action = agent.choose_action(obs)
# Step the env
next_obs, reward, done, _ = env.step(action)
# Update obs
obs = next_obs
agent.rewards.append(reward)
T += 1
# Logs
episode_reward += reward
# Learn once
agent.learn()
# Update cumulative reward
running_reward = 0.05 * episode_reward + (1 - 0.05) * running_reward
print(f"episode_{episode} \t ep_reward = {episode_reward} \t ep_len = {T}")
if running_reward > env.spec.reward_threshold:
print("Solved! Running reward is now {} and "
"the last episode runs to {} time steps!".format(running_reward, T))
break
|
flexible
|
{
"blob_id": "e1ab4b034c949b8158c6ccc1e8e3f4a960a38c72",
"index": 4382,
"step-1": "<mask token>\n\n\nclass Agent(object):\n\n def __init__(self, model=None, lr=0.01, gamma=0.99):\n self.gamma = gamma\n self.AC = model\n self.optimizer = Adam(AC.parameters(), lr=lr)\n self.logp_as = []\n self.values = []\n self.rewards = []\n\n def choose_action(self, obs):\n action_logits, value = self.AC(obs)\n distribution = Categorical(action_logits)\n action = distribution.sample()\n self.logp_as.append(distribution.log_prob(action))\n self.values.append(value)\n return action.item()\n\n def learn(self):\n R = 0\n policy_losses = []\n value_losses = []\n returns = []\n for r in self.rewards[::-1]:\n R = r + self.gamma * R\n returns.insert(0, R)\n returns = torch.tensor(returns).to(Device)\n returns = (returns - returns.mean()) / (returns.std() + 1e-05)\n for logp_a, value, R in zip(self.logp_as, self.values, returns):\n advantage = R - value.item()\n policy_losses.append(-logp_a * advantage)\n value_losses.append(F.smooth_l1_loss(value, torch.tensor([R]).\n to(Device)))\n self.optimizer.zero_grad()\n loss = torch.stack(policy_losses).sum() + torch.stack(value_losses\n ).sum()\n loss.backward(retain_graph=True)\n self.optimizer.step()\n self.rewards = []\n self.values = []\n self.logp_as = []\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass ActorCriticNet(nn.Module):\n\n def __init__(self, observation_space, action_space, hidden_sizes=[32, \n 32], activation=nn.Tanh):\n super().__init__()\n obs_dim = observation_space.shape[0]\n action_dim = action_space.n\n self.base_net = nn.Sequential(nn.Linear(obs_dim, hidden_sizes[0]))\n self.pi = nn.Linear(hidden_sizes[1], action_dim)\n self.vf = nn.Linear(hidden_sizes[1], 1)\n self.to(Device)\n\n def forward(self, obs):\n obs = torch.Tensor(obs).to(Device)\n x = F.relu(self.base_net(obs))\n action_logits = F.softmax(self.pi(x), dim=-1)\n value = self.vf(x)\n return action_logits, value\n\n\nclass Agent(object):\n\n def __init__(self, model=None, lr=0.01, gamma=0.99):\n self.gamma = gamma\n self.AC = model\n self.optimizer = Adam(AC.parameters(), lr=lr)\n self.logp_as = []\n self.values = []\n self.rewards = []\n\n def choose_action(self, obs):\n action_logits, value = self.AC(obs)\n distribution = Categorical(action_logits)\n action = distribution.sample()\n self.logp_as.append(distribution.log_prob(action))\n self.values.append(value)\n return action.item()\n\n def learn(self):\n R = 0\n policy_losses = []\n value_losses = []\n returns = []\n for r in self.rewards[::-1]:\n R = r + self.gamma * R\n returns.insert(0, R)\n returns = torch.tensor(returns).to(Device)\n returns = (returns - returns.mean()) / (returns.std() + 1e-05)\n for logp_a, value, R in zip(self.logp_as, self.values, returns):\n advantage = R - value.item()\n policy_losses.append(-logp_a * advantage)\n value_losses.append(F.smooth_l1_loss(value, torch.tensor([R]).\n to(Device)))\n self.optimizer.zero_grad()\n loss = torch.stack(policy_losses).sum() + torch.stack(value_losses\n ).sum()\n loss.backward(retain_graph=True)\n self.optimizer.step()\n self.rewards = []\n self.values = []\n self.logp_as = []\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass ActorCriticNet(nn.Module):\n\n def __init__(self, observation_space, action_space, hidden_sizes=[32, \n 32], activation=nn.Tanh):\n super().__init__()\n obs_dim = observation_space.shape[0]\n action_dim = action_space.n\n self.base_net = nn.Sequential(nn.Linear(obs_dim, hidden_sizes[0]))\n self.pi = nn.Linear(hidden_sizes[1], action_dim)\n self.vf = nn.Linear(hidden_sizes[1], 1)\n self.to(Device)\n\n def forward(self, obs):\n obs = torch.Tensor(obs).to(Device)\n x = F.relu(self.base_net(obs))\n action_logits = F.softmax(self.pi(x), dim=-1)\n value = self.vf(x)\n return action_logits, value\n\n\nclass Agent(object):\n\n def __init__(self, model=None, lr=0.01, gamma=0.99):\n self.gamma = gamma\n self.AC = model\n self.optimizer = Adam(AC.parameters(), lr=lr)\n self.logp_as = []\n self.values = []\n self.rewards = []\n\n def choose_action(self, obs):\n action_logits, value = self.AC(obs)\n distribution = Categorical(action_logits)\n action = distribution.sample()\n self.logp_as.append(distribution.log_prob(action))\n self.values.append(value)\n return action.item()\n\n def learn(self):\n R = 0\n policy_losses = []\n value_losses = []\n returns = []\n for r in self.rewards[::-1]:\n R = r + self.gamma * R\n returns.insert(0, R)\n returns = torch.tensor(returns).to(Device)\n returns = (returns - returns.mean()) / (returns.std() + 1e-05)\n for logp_a, value, R in zip(self.logp_as, self.values, returns):\n advantage = R - value.item()\n policy_losses.append(-logp_a * advantage)\n value_losses.append(F.smooth_l1_loss(value, torch.tensor([R]).\n to(Device)))\n self.optimizer.zero_grad()\n loss = torch.stack(policy_losses).sum() + torch.stack(value_losses\n ).sum()\n loss.backward(retain_graph=True)\n self.optimizer.step()\n self.rewards = []\n self.values = []\n self.logp_as = []\n\n\n<mask token>\nfor episode in range(EPISODES):\n done = False\n obs = env.reset()\n I = 1\n T = 0\n episode_reward = 0\n running_reward = 0\n if episode % show_every == 0:\n is_render = True\n else:\n is_render = False\n while not done:\n if is_render:\n env.render('human')\n action = agent.choose_action(obs)\n next_obs, reward, done, _ = env.step(action)\n obs = next_obs\n agent.rewards.append(reward)\n T += 1\n episode_reward += reward\n agent.learn()\n running_reward = 0.05 * episode_reward + (1 - 0.05) * running_reward\n print(f'episode_{episode} \\t ep_reward = {episode_reward} \\t ep_len = {T}')\n if running_reward > env.spec.reward_threshold:\n print(\n 'Solved! Running reward is now {} and the last episode runs to {} time steps!'\n .format(running_reward, T))\n break\n",
"step-4": "import torch.nn as nn\nimport torch\nfrom torch.distributions.categorical import Categorical\nimport torch.nn.functional as F\nfrom torch.optim import Adam\nimport gym\nimport numpy as np\nDevice = torch.device('cuda:0')\n\n\nclass ActorCriticNet(nn.Module):\n\n def __init__(self, observation_space, action_space, hidden_sizes=[32, \n 32], activation=nn.Tanh):\n super().__init__()\n obs_dim = observation_space.shape[0]\n action_dim = action_space.n\n self.base_net = nn.Sequential(nn.Linear(obs_dim, hidden_sizes[0]))\n self.pi = nn.Linear(hidden_sizes[1], action_dim)\n self.vf = nn.Linear(hidden_sizes[1], 1)\n self.to(Device)\n\n def forward(self, obs):\n obs = torch.Tensor(obs).to(Device)\n x = F.relu(self.base_net(obs))\n action_logits = F.softmax(self.pi(x), dim=-1)\n value = self.vf(x)\n return action_logits, value\n\n\nclass Agent(object):\n\n def __init__(self, model=None, lr=0.01, gamma=0.99):\n self.gamma = gamma\n self.AC = model\n self.optimizer = Adam(AC.parameters(), lr=lr)\n self.logp_as = []\n self.values = []\n self.rewards = []\n\n def choose_action(self, obs):\n action_logits, value = self.AC(obs)\n distribution = Categorical(action_logits)\n action = distribution.sample()\n self.logp_as.append(distribution.log_prob(action))\n self.values.append(value)\n return action.item()\n\n def learn(self):\n R = 0\n policy_losses = []\n value_losses = []\n returns = []\n for r in self.rewards[::-1]:\n R = r + self.gamma * R\n returns.insert(0, R)\n returns = torch.tensor(returns).to(Device)\n returns = (returns - returns.mean()) / (returns.std() + 1e-05)\n for logp_a, value, R in zip(self.logp_as, self.values, returns):\n advantage = R - value.item()\n policy_losses.append(-logp_a * advantage)\n value_losses.append(F.smooth_l1_loss(value, torch.tensor([R]).\n to(Device)))\n self.optimizer.zero_grad()\n loss = torch.stack(policy_losses).sum() + torch.stack(value_losses\n ).sum()\n loss.backward(retain_graph=True)\n self.optimizer.step()\n self.rewards = []\n self.values = []\n self.logp_as = []\n\n\nenv = gym.make('CartPole-v1')\nstate = env.reset()\nlr = 0.03\nEPISODES = 30000\nGAMMA = 0.99\nhidden_sizes = [128, 128]\nshow_every = 100\nAC = ActorCriticNet(env.observation_space, env.action_space, hidden_sizes)\nagent = Agent(AC, lr=lr, gamma=GAMMA)\nfor episode in range(EPISODES):\n done = False\n obs = env.reset()\n I = 1\n T = 0\n episode_reward = 0\n running_reward = 0\n if episode % show_every == 0:\n is_render = True\n else:\n is_render = False\n while not done:\n if is_render:\n env.render('human')\n action = agent.choose_action(obs)\n next_obs, reward, done, _ = env.step(action)\n obs = next_obs\n agent.rewards.append(reward)\n T += 1\n episode_reward += reward\n agent.learn()\n running_reward = 0.05 * episode_reward + (1 - 0.05) * running_reward\n print(f'episode_{episode} \\t ep_reward = {episode_reward} \\t ep_len = {T}')\n if running_reward > env.spec.reward_threshold:\n print(\n 'Solved! Running reward is now {} and the last episode runs to {} time steps!'\n .format(running_reward, T))\n break\n",
"step-5": "import torch.nn as nn\nimport torch\nfrom torch.distributions.categorical import Categorical\nimport torch.nn.functional as F\nfrom torch.optim import Adam\n\nimport gym\nimport numpy as np\n\nDevice = torch.device(\"cuda:0\")\n\nclass ActorCriticNet(nn.Module):\n def __init__(self, observation_space, action_space,\n hidden_sizes=[32,32], activation=nn.Tanh):\n super().__init__()\n\n obs_dim = observation_space.shape[0]\n action_dim = action_space.n\n self.base_net = nn.Sequential(\n nn.Linear(obs_dim, hidden_sizes[0]),\n # nn.Linear(hidden_sizes[0], hidden_sizes[1]),\n )\n self.pi = nn.Linear(hidden_sizes[1], action_dim)\n self.vf = nn.Linear(hidden_sizes[1],1)\n self.to(Device)\n \n def forward(self, obs):\n obs = torch.Tensor(obs).to(Device)\n x = F.relu(self.base_net(obs))\n action_logits = F.softmax(self.pi(x), dim=-1)\n value = self.vf(x)\n return action_logits, value\n\nclass Agent(object):\n def __init__(self, model=None, lr=1e-2, gamma=0.99):\n self.gamma = gamma\n self.AC = model\n self.optimizer = Adam(AC.parameters(), lr=lr)\n self.logp_as = []\n self.values = []\n self.rewards = []\n\n def choose_action(self, obs):\n action_logits, value = self.AC(obs)\n distribution = Categorical(action_logits)\n action = distribution.sample()\n self.logp_as.append(distribution.log_prob(action))\n self.values.append(value)\n return action.item()\n \n def learn(self):\n\n R = 0\n policy_losses = []\n value_losses = []\n returns = []\n\n for r in self.rewards[::-1]:\n R = r + self.gamma * R\n returns.insert(0, R)\n returns = torch.tensor(returns).to(Device)\n returns = (returns - returns.mean()) / (returns.std() + 0.00001)\n\n for logp_a, value, R in zip(self.logp_as, self.values, returns):\n advantage = R - value.item()\n # calculate actor (policy) loss \n policy_losses.append(-logp_a * advantage)\n # calculate critic (value) loss using L1 smooth loss\n value_losses.append(F.smooth_l1_loss(value, torch.tensor([R]).to(Device)))\n\n self.optimizer.zero_grad()\n loss = torch.stack(policy_losses).sum() + torch.stack(value_losses).sum()\n loss.backward(retain_graph=True)\n self.optimizer.step()\n\n self.rewards = []\n self.values = []\n self.logp_as = []\n \n\n# Build env\nenv = gym.make('CartPole-v1')\nstate = env.reset()\n\n# Learning setting\nlr = 3e-2\nEPISODES=30000\nGAMMA = 0.99\nhidden_sizes = [128,128]\nshow_every = 100\n\nAC = ActorCriticNet(env.observation_space, env.action_space, hidden_sizes)\nagent = Agent(AC, lr=lr, gamma=GAMMA)\n\nfor episode in range(EPISODES):\n # For every episode init\n done = False\n obs = env.reset()\n I = 1\n T = 0\n\n # Logs\n episode_reward = 0\n running_reward = 0\n if episode % show_every == 0:\n is_render = True\n else:\n is_render = False\n\n while not done:\n # Render\n if is_render:\n env.render(\"human\")\n \n # Predict action and value\n action = agent.choose_action(obs)\n\n # Step the env\n next_obs, reward, done, _ = env.step(action)\n\n # Update obs\n obs = next_obs\n agent.rewards.append(reward)\n T += 1\n\n # Logs\n episode_reward += reward\n \n # Learn once\n agent.learn()\n\n # Update cumulative reward\n running_reward = 0.05 * episode_reward + (1 - 0.05) * running_reward\n \n print(f\"episode_{episode} \\t ep_reward = {episode_reward} \\t ep_len = {T}\")\n if running_reward > env.spec.reward_threshold:\n print(\"Solved! Running reward is now {} and \"\n \"the last episode runs to {} time steps!\".format(running_reward, T))\n break\n",
"step-ids": [
4,
7,
8,
10,
11
]
}
|
[
4,
7,
8,
10,
11
] |
import pymysql
def get_list(sql, args):
conn = pymysql.connect(host='127.0.0.1', port=3306, user='root', passwd
='chen0918', db='web')
cursor = conn.cursor(cursor=pymysql.cursors.DictCursor)
cursor.execute(sql, args)
result = cursor.fetchall()
cursor.close()
conn.close()
return result
def get_one(sql, args):
conn = pymysql.connect(host='127.0.0.1', port=3306, user='root', passwd
='chen0918', db='web')
cursor = conn.cursor(cursor=pymysql.cursors.DictCursor)
cursor.execute(sql, args)
result = cursor.fetchone()
cursor.close()
conn.close()
return result
def modify(sql, args):
conn = pymysql.connect(host='127.0.0.1', port=3306, user='root', passwd
='chen0918', db='web')
cursor = conn.cursor(cursor=pymysql.cursors.DictCursor)
cursor.execute(sql, args)
conn.commit()
cursor.close()
conn.close()
|
normal
|
{
"blob_id": "80819ec83572737c89044936fc269154b190751a",
"index": 2372,
"step-1": "<mask token>\n\n\ndef modify(sql, args):\n conn = pymysql.connect(host='127.0.0.1', port=3306, user='root', passwd\n ='chen0918', db='web')\n cursor = conn.cursor(cursor=pymysql.cursors.DictCursor)\n cursor.execute(sql, args)\n conn.commit()\n cursor.close()\n conn.close()\n",
"step-2": "<mask token>\n\n\ndef get_list(sql, args):\n conn = pymysql.connect(host='127.0.0.1', port=3306, user='root', passwd\n ='chen0918', db='web')\n cursor = conn.cursor(cursor=pymysql.cursors.DictCursor)\n cursor.execute(sql, args)\n result = cursor.fetchall()\n cursor.close()\n conn.close()\n return result\n\n\n<mask token>\n\n\ndef modify(sql, args):\n conn = pymysql.connect(host='127.0.0.1', port=3306, user='root', passwd\n ='chen0918', db='web')\n cursor = conn.cursor(cursor=pymysql.cursors.DictCursor)\n cursor.execute(sql, args)\n conn.commit()\n cursor.close()\n conn.close()\n",
"step-3": "<mask token>\n\n\ndef get_list(sql, args):\n conn = pymysql.connect(host='127.0.0.1', port=3306, user='root', passwd\n ='chen0918', db='web')\n cursor = conn.cursor(cursor=pymysql.cursors.DictCursor)\n cursor.execute(sql, args)\n result = cursor.fetchall()\n cursor.close()\n conn.close()\n return result\n\n\ndef get_one(sql, args):\n conn = pymysql.connect(host='127.0.0.1', port=3306, user='root', passwd\n ='chen0918', db='web')\n cursor = conn.cursor(cursor=pymysql.cursors.DictCursor)\n cursor.execute(sql, args)\n result = cursor.fetchone()\n cursor.close()\n conn.close()\n return result\n\n\ndef modify(sql, args):\n conn = pymysql.connect(host='127.0.0.1', port=3306, user='root', passwd\n ='chen0918', db='web')\n cursor = conn.cursor(cursor=pymysql.cursors.DictCursor)\n cursor.execute(sql, args)\n conn.commit()\n cursor.close()\n conn.close()\n",
"step-4": "import pymysql\n\n\ndef get_list(sql, args):\n conn = pymysql.connect(host='127.0.0.1', port=3306, user='root', passwd\n ='chen0918', db='web')\n cursor = conn.cursor(cursor=pymysql.cursors.DictCursor)\n cursor.execute(sql, args)\n result = cursor.fetchall()\n cursor.close()\n conn.close()\n return result\n\n\ndef get_one(sql, args):\n conn = pymysql.connect(host='127.0.0.1', port=3306, user='root', passwd\n ='chen0918', db='web')\n cursor = conn.cursor(cursor=pymysql.cursors.DictCursor)\n cursor.execute(sql, args)\n result = cursor.fetchone()\n cursor.close()\n conn.close()\n return result\n\n\ndef modify(sql, args):\n conn = pymysql.connect(host='127.0.0.1', port=3306, user='root', passwd\n ='chen0918', db='web')\n cursor = conn.cursor(cursor=pymysql.cursors.DictCursor)\n cursor.execute(sql, args)\n conn.commit()\n cursor.close()\n conn.close()\n",
"step-5": null,
"step-ids": [
1,
2,
3,
4
]
}
|
[
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
cv2.imshow('Original', img)
cv2.imshow('Contorno', contorno)
cv2.waitKey()
cv2.destroyAllWindows()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
img = cv2.imread('data/j.png', cv2.IMREAD_GRAYSCALE)
kernel = cv2.getStructuringElement(cv2.MORPH_CROSS, (7, 7))
erode = cv2.erode(img, kernel)
contorno = img - erode
cv2.imshow('Original', img)
cv2.imshow('Contorno', contorno)
cv2.waitKey()
cv2.destroyAllWindows()
<|reserved_special_token_1|>
import cv2
import numpy as np
img = cv2.imread('data/j.png', cv2.IMREAD_GRAYSCALE)
kernel = cv2.getStructuringElement(cv2.MORPH_CROSS, (7, 7))
erode = cv2.erode(img, kernel)
contorno = img - erode
cv2.imshow('Original', img)
cv2.imshow('Contorno', contorno)
cv2.waitKey()
cv2.destroyAllWindows()
|
flexible
|
{
"blob_id": "809c9ce2b017612bedd1eb889c2b017275ee8b6f",
"index": 1729,
"step-1": "<mask token>\n",
"step-2": "<mask token>\ncv2.imshow('Original', img)\ncv2.imshow('Contorno', contorno)\ncv2.waitKey()\ncv2.destroyAllWindows()\n",
"step-3": "<mask token>\nimg = cv2.imread('data/j.png', cv2.IMREAD_GRAYSCALE)\nkernel = cv2.getStructuringElement(cv2.MORPH_CROSS, (7, 7))\nerode = cv2.erode(img, kernel)\ncontorno = img - erode\ncv2.imshow('Original', img)\ncv2.imshow('Contorno', contorno)\ncv2.waitKey()\ncv2.destroyAllWindows()\n",
"step-4": "import cv2\nimport numpy as np\nimg = cv2.imread('data/j.png', cv2.IMREAD_GRAYSCALE)\nkernel = cv2.getStructuringElement(cv2.MORPH_CROSS, (7, 7))\nerode = cv2.erode(img, kernel)\ncontorno = img - erode\ncv2.imshow('Original', img)\ncv2.imshow('Contorno', contorno)\ncv2.waitKey()\ncv2.destroyAllWindows()\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def CharlietheDog(strArr):
def walk(food_home, dog, matriz, steps=0):
food_home_dx = food_home[0][0] - dog[0]
food_home_dy = food_home[0][1] - dog[1]
walk_x = food_home_dx / (abs(food_home_dx) if food_home_dx != 0 else 1)
walk_y = food_home_dy / (abs(food_home_dy) if food_home_dy != 0 else 1)
steps += abs(walk_x) + abs(walk_y)
dog = dog[0] + walk_x, dog[1] + walk_y
if food_home[0] == dog:
food_home = food_home[1:]
food_home_size = len(food_home)
if food_home_size <= 0:
return steps
return walk(food_home, dog, matriz, steps)
food = []
home = None
dog = None
for i in range(len(strArr)):
for j in range(len(strArr[i])):
if strArr[i][j] == 'F':
food.append((i, j))
if strArr[i][j] == 'H':
home = i, j
if strArr[i][j] == 'C':
dog = i, j
foods = permutations(food)
min_steps = None
for food in foods:
food_home = food + (home,)
steps = walk(food_home, dog, strArr)
if min_steps == None or steps < min_steps:
min_steps = steps
return int(min_steps)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def CharlietheDog(strArr):
def walk(food_home, dog, matriz, steps=0):
food_home_dx = food_home[0][0] - dog[0]
food_home_dy = food_home[0][1] - dog[1]
walk_x = food_home_dx / (abs(food_home_dx) if food_home_dx != 0 else 1)
walk_y = food_home_dy / (abs(food_home_dy) if food_home_dy != 0 else 1)
steps += abs(walk_x) + abs(walk_y)
dog = dog[0] + walk_x, dog[1] + walk_y
if food_home[0] == dog:
food_home = food_home[1:]
food_home_size = len(food_home)
if food_home_size <= 0:
return steps
return walk(food_home, dog, matriz, steps)
food = []
home = None
dog = None
for i in range(len(strArr)):
for j in range(len(strArr[i])):
if strArr[i][j] == 'F':
food.append((i, j))
if strArr[i][j] == 'H':
home = i, j
if strArr[i][j] == 'C':
dog = i, j
foods = permutations(food)
min_steps = None
for food in foods:
food_home = food + (home,)
steps = walk(food_home, dog, strArr)
if min_steps == None or steps < min_steps:
min_steps = steps
return int(min_steps)
print(CharlietheDog(raw_input()))
<|reserved_special_token_1|>
<|reserved_special_token_0|>
from itertools import permutations
def CharlietheDog(strArr):
def walk(food_home, dog, matriz, steps=0):
food_home_dx = food_home[0][0] - dog[0]
food_home_dy = food_home[0][1] - dog[1]
walk_x = food_home_dx / (abs(food_home_dx) if food_home_dx != 0 else 1)
walk_y = food_home_dy / (abs(food_home_dy) if food_home_dy != 0 else 1)
steps += abs(walk_x) + abs(walk_y)
dog = dog[0] + walk_x, dog[1] + walk_y
if food_home[0] == dog:
food_home = food_home[1:]
food_home_size = len(food_home)
if food_home_size <= 0:
return steps
return walk(food_home, dog, matriz, steps)
food = []
home = None
dog = None
for i in range(len(strArr)):
for j in range(len(strArr[i])):
if strArr[i][j] == 'F':
food.append((i, j))
if strArr[i][j] == 'H':
home = i, j
if strArr[i][j] == 'C':
dog = i, j
foods = permutations(food)
min_steps = None
for food in foods:
food_home = food + (home,)
steps = walk(food_home, dog, strArr)
if min_steps == None or steps < min_steps:
min_steps = steps
return int(min_steps)
print(CharlietheDog(raw_input()))
<|reserved_special_token_1|>
"""
Have the function CharlietheDog(strArr) read the array of strings stored in strArr which
will be a 4x4 matrix of the characters 'C', 'H', 'F', 'O', where C represents Charlie the dog,
H represents its home, F represents dog food, and O represents and empty space in the grid.
Your goal is to figure out the least amount of moves required to get Charlie to grab each piece of food in the grid by moving
up, down, left, or right, and then make it home right after.
Charlie cannot move onto the home before all pieces of food have been collected.
For example: if strArr is ["FOOF", "OCOO", "OOOH", "FOOO"], then this looks like the following grid:
F O O F
O C O O
O O O H
F O O O
For the input above, the least amount of steps where the dog can reach each piece of food,
and then return home is 11 steps, so your program should return the number 11.
The grid will always contain between 1 and 8 pieces of food.
Use the Parameter Testing feature in the box below to test your code with different arguments.
"""
from itertools import permutations
def CharlietheDog(strArr):
def walk(food_home, dog, matriz, steps=0):
food_home_dx = food_home[0][0] - dog[0]
food_home_dy = food_home[0][1] - dog[1]
walk_x = food_home_dx/(abs(food_home_dx) if food_home_dx != 0 else 1)
walk_y = food_home_dy/(abs(food_home_dy) if food_home_dy != 0 else 1)
steps += abs(walk_x) + abs(walk_y)
dog = (dog[0] + walk_x, dog[1] + walk_y)
if food_home[0] == dog:
food_home = food_home[1:]
food_home_size = len(food_home)
if food_home_size <= 0:
return steps
return walk(food_home, dog, matriz, steps)
food = []
home = None
dog = None
for i in range(len(strArr)):
for j in range(len(strArr[i])):
if strArr[i][j] == 'F':
food.append((i, j))
if strArr[i][j] == 'H':
home = (i, j)
if strArr[i][j] == 'C':
dog = (i, j)
foods = permutations(food)
min_steps = None
for food in foods:
food_home = food + (home, )
steps = walk(food_home, dog, strArr)
if min_steps == None or steps < min_steps:
min_steps = steps
return int(min_steps)
# keep this function call here
print (CharlietheDog(raw_input()))
|
flexible
|
{
"blob_id": "731110b02c8a09dc84042a99c14eef990ae33cd2",
"index": 5913,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef CharlietheDog(strArr):\n\n def walk(food_home, dog, matriz, steps=0):\n food_home_dx = food_home[0][0] - dog[0]\n food_home_dy = food_home[0][1] - dog[1]\n walk_x = food_home_dx / (abs(food_home_dx) if food_home_dx != 0 else 1)\n walk_y = food_home_dy / (abs(food_home_dy) if food_home_dy != 0 else 1)\n steps += abs(walk_x) + abs(walk_y)\n dog = dog[0] + walk_x, dog[1] + walk_y\n if food_home[0] == dog:\n food_home = food_home[1:]\n food_home_size = len(food_home)\n if food_home_size <= 0:\n return steps\n return walk(food_home, dog, matriz, steps)\n food = []\n home = None\n dog = None\n for i in range(len(strArr)):\n for j in range(len(strArr[i])):\n if strArr[i][j] == 'F':\n food.append((i, j))\n if strArr[i][j] == 'H':\n home = i, j\n if strArr[i][j] == 'C':\n dog = i, j\n foods = permutations(food)\n min_steps = None\n for food in foods:\n food_home = food + (home,)\n steps = walk(food_home, dog, strArr)\n if min_steps == None or steps < min_steps:\n min_steps = steps\n return int(min_steps)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef CharlietheDog(strArr):\n\n def walk(food_home, dog, matriz, steps=0):\n food_home_dx = food_home[0][0] - dog[0]\n food_home_dy = food_home[0][1] - dog[1]\n walk_x = food_home_dx / (abs(food_home_dx) if food_home_dx != 0 else 1)\n walk_y = food_home_dy / (abs(food_home_dy) if food_home_dy != 0 else 1)\n steps += abs(walk_x) + abs(walk_y)\n dog = dog[0] + walk_x, dog[1] + walk_y\n if food_home[0] == dog:\n food_home = food_home[1:]\n food_home_size = len(food_home)\n if food_home_size <= 0:\n return steps\n return walk(food_home, dog, matriz, steps)\n food = []\n home = None\n dog = None\n for i in range(len(strArr)):\n for j in range(len(strArr[i])):\n if strArr[i][j] == 'F':\n food.append((i, j))\n if strArr[i][j] == 'H':\n home = i, j\n if strArr[i][j] == 'C':\n dog = i, j\n foods = permutations(food)\n min_steps = None\n for food in foods:\n food_home = food + (home,)\n steps = walk(food_home, dog, strArr)\n if min_steps == None or steps < min_steps:\n min_steps = steps\n return int(min_steps)\n\n\nprint(CharlietheDog(raw_input()))\n",
"step-4": "<mask token>\nfrom itertools import permutations\n\n\ndef CharlietheDog(strArr):\n\n def walk(food_home, dog, matriz, steps=0):\n food_home_dx = food_home[0][0] - dog[0]\n food_home_dy = food_home[0][1] - dog[1]\n walk_x = food_home_dx / (abs(food_home_dx) if food_home_dx != 0 else 1)\n walk_y = food_home_dy / (abs(food_home_dy) if food_home_dy != 0 else 1)\n steps += abs(walk_x) + abs(walk_y)\n dog = dog[0] + walk_x, dog[1] + walk_y\n if food_home[0] == dog:\n food_home = food_home[1:]\n food_home_size = len(food_home)\n if food_home_size <= 0:\n return steps\n return walk(food_home, dog, matriz, steps)\n food = []\n home = None\n dog = None\n for i in range(len(strArr)):\n for j in range(len(strArr[i])):\n if strArr[i][j] == 'F':\n food.append((i, j))\n if strArr[i][j] == 'H':\n home = i, j\n if strArr[i][j] == 'C':\n dog = i, j\n foods = permutations(food)\n min_steps = None\n for food in foods:\n food_home = food + (home,)\n steps = walk(food_home, dog, strArr)\n if min_steps == None or steps < min_steps:\n min_steps = steps\n return int(min_steps)\n\n\nprint(CharlietheDog(raw_input()))\n",
"step-5": "\"\"\"\nHave the function CharlietheDog(strArr) read the array of strings stored in strArr which \nwill be a 4x4 matrix of the characters 'C', 'H', 'F', 'O', where C represents Charlie the dog,\n H represents its home, F represents dog food, and O represents and empty space in the grid. \n Your goal is to figure out the least amount of moves required to get Charlie to grab each piece of food in the grid by moving\n up, down, left, or right, and then make it home right after. \n Charlie cannot move onto the home before all pieces of food have been collected. \n For example: if strArr is [\"FOOF\", \"OCOO\", \"OOOH\", \"FOOO\"], then this looks like the following grid: \n \n F O O F\n O C O O\n O O O H\n F O O O \n\nFor the input above, the least amount of steps where the dog can reach each piece of food, \nand then return home is 11 steps, so your program should return the number 11. \nThe grid will always contain between 1 and 8 pieces of food. \n\nUse the Parameter Testing feature in the box below to test your code with different arguments.\n\"\"\"\n\nfrom itertools import permutations \n\ndef CharlietheDog(strArr):\n def walk(food_home, dog, matriz, steps=0):\n\n food_home_dx = food_home[0][0] - dog[0]\n food_home_dy = food_home[0][1] - dog[1]\n\n walk_x = food_home_dx/(abs(food_home_dx) if food_home_dx != 0 else 1)\n walk_y = food_home_dy/(abs(food_home_dy) if food_home_dy != 0 else 1)\n\n steps += abs(walk_x) + abs(walk_y)\n\n dog = (dog[0] + walk_x, dog[1] + walk_y)\n\n if food_home[0] == dog:\n food_home = food_home[1:]\n\n food_home_size = len(food_home)\n if food_home_size <= 0:\n return steps\n \n return walk(food_home, dog, matriz, steps)\n\n food = []\n home = None\n dog = None\n\n for i in range(len(strArr)):\n for j in range(len(strArr[i])):\n if strArr[i][j] == 'F':\n food.append((i, j))\n if strArr[i][j] == 'H':\n home = (i, j)\n if strArr[i][j] == 'C':\n dog = (i, j)\n\n foods = permutations(food)\n\n min_steps = None\n for food in foods:\n food_home = food + (home, )\n steps = walk(food_home, dog, strArr)\n if min_steps == None or steps < min_steps:\n min_steps = steps\n\n return int(min_steps)\n\n\n# keep this function call here \nprint (CharlietheDog(raw_input())) \n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
class LIScore(Serializable['LIScore'], Score):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def __str__(self):
return '<LIScore {!r}>'.format(self.name)
class EIScore(Serializable['EIScore'], Score):
"""
[ALPHA] Evaluates the expected magnitude of improvement beyond baselines for given objectives.
Parameters
----------
name: str
the name of the score
description: str
the description of the score
objectives: list[Objective]
objectives (e.g., maximize, minimize, tune, etc.)
baselines: list[float]
best-so-far values for the various objectives (there must be one for each objective)
constraints: list[Constraint]
constraints limiting the allowed values that material instances can have
"""
name = properties.String('name')
description = properties.String('description')
baselines = properties.List(properties.Float, 'baselines')
objectives = properties.List(properties.Object(Objective), 'objectives')
constraints = properties.List(properties.Object(Constraint), 'constraints')
typ = properties.String('type', default='MEI')
def __init__(self, name: str, description: str, objectives: List[
Objective], baselines: List[float], constraints: Optional[List[
Constraint]]=None, session: Optional[Session]=None):
self.name: str = name
self.description: str = description
self.objectives: List[Objective] = objectives
self.baselines: List[float] = baselines
self.constraints: List[Constraint] = constraints or []
self.session: Optional[Session] = session
def __str__(self):
return '<EIScore {!r}>'.format(self.name)
class EVScore(Serializable['EVScore'], Score):
"""
[ALPHA] Evaluates the expected value for given objectives.
Parameters
----------
name: str
the name of the score
description: str
the description of the score
objectives: list[Objective]
objectives (e.g., maximize, minimize, tune, etc.)
constraints: list[Constraint]
constraints limiting the allowed values that material instances can have
"""
name = properties.String('name')
description = properties.String('description')
objectives = properties.List(properties.Object(Objective), 'objectives')
constraints = properties.List(properties.Object(Constraint), 'constraints')
typ = properties.String('type', default='MEV')
def __init__(self, name: str, description: str, objectives: List[
Objective], constraints: Optional[List[Constraint]]=None, session:
Optional[Session]=None):
self.name: str = name
self.description: str = description
self.objectives: List[Objective] = objectives
self.constraints: List[Constraint] = constraints or []
self.session: Optional[Session] = session
def __str__(self):
return '<EVScore {!r}>'.format(self.name)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class LIScore(Serializable['LIScore'], Score):
<|reserved_special_token_0|>
name = properties.String('name')
description = properties.String('description')
baselines = properties.List(properties.Float, 'baselines')
objectives = properties.List(properties.Object(Objective), 'objectives')
constraints = properties.List(properties.Object(Constraint), 'constraints')
typ = properties.String('type', default='MLI')
def __init__(self, name: str, description: str, objectives: List[
Objective], baselines: List[float], constraints: Optional[List[
Constraint]]=None, session: Optional[Session]=None):
self.name: str = name
self.description: str = description
self.objectives: List[Objective] = objectives
self.baselines: List[float] = baselines
self.constraints: List[Constraint] = constraints or []
self.session: Optional[Session] = session
def __str__(self):
return '<LIScore {!r}>'.format(self.name)
class EIScore(Serializable['EIScore'], Score):
"""
[ALPHA] Evaluates the expected magnitude of improvement beyond baselines for given objectives.
Parameters
----------
name: str
the name of the score
description: str
the description of the score
objectives: list[Objective]
objectives (e.g., maximize, minimize, tune, etc.)
baselines: list[float]
best-so-far values for the various objectives (there must be one for each objective)
constraints: list[Constraint]
constraints limiting the allowed values that material instances can have
"""
name = properties.String('name')
description = properties.String('description')
baselines = properties.List(properties.Float, 'baselines')
objectives = properties.List(properties.Object(Objective), 'objectives')
constraints = properties.List(properties.Object(Constraint), 'constraints')
typ = properties.String('type', default='MEI')
def __init__(self, name: str, description: str, objectives: List[
Objective], baselines: List[float], constraints: Optional[List[
Constraint]]=None, session: Optional[Session]=None):
self.name: str = name
self.description: str = description
self.objectives: List[Objective] = objectives
self.baselines: List[float] = baselines
self.constraints: List[Constraint] = constraints or []
self.session: Optional[Session] = session
def __str__(self):
return '<EIScore {!r}>'.format(self.name)
class EVScore(Serializable['EVScore'], Score):
"""
[ALPHA] Evaluates the expected value for given objectives.
Parameters
----------
name: str
the name of the score
description: str
the description of the score
objectives: list[Objective]
objectives (e.g., maximize, minimize, tune, etc.)
constraints: list[Constraint]
constraints limiting the allowed values that material instances can have
"""
name = properties.String('name')
description = properties.String('description')
objectives = properties.List(properties.Object(Objective), 'objectives')
constraints = properties.List(properties.Object(Constraint), 'constraints')
typ = properties.String('type', default='MEV')
def __init__(self, name: str, description: str, objectives: List[
Objective], constraints: Optional[List[Constraint]]=None, session:
Optional[Session]=None):
self.name: str = name
self.description: str = description
self.objectives: List[Objective] = objectives
self.constraints: List[Constraint] = constraints or []
self.session: Optional[Session] = session
def __str__(self):
return '<EVScore {!r}>'.format(self.name)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Score(PolymorphicSerializable['Score']):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
class LIScore(Serializable['LIScore'], Score):
"""[ALPHA] Evaluates the likelihood of scoring better than some baselines for given objectives.
Parameters
----------
name: str
the name of the score
description: str
the description of the score
objectives: list[Objective]
objectives (e.g., maximize, minimize, tune, etc.)
baselines: list[float]
best-so-far values for the various objectives (there must be one for each objective)
constraints: list[Constraint]
constraints limiting the allowed values that material instances can have
"""
name = properties.String('name')
description = properties.String('description')
baselines = properties.List(properties.Float, 'baselines')
objectives = properties.List(properties.Object(Objective), 'objectives')
constraints = properties.List(properties.Object(Constraint), 'constraints')
typ = properties.String('type', default='MLI')
def __init__(self, name: str, description: str, objectives: List[
Objective], baselines: List[float], constraints: Optional[List[
Constraint]]=None, session: Optional[Session]=None):
self.name: str = name
self.description: str = description
self.objectives: List[Objective] = objectives
self.baselines: List[float] = baselines
self.constraints: List[Constraint] = constraints or []
self.session: Optional[Session] = session
def __str__(self):
return '<LIScore {!r}>'.format(self.name)
class EIScore(Serializable['EIScore'], Score):
"""
[ALPHA] Evaluates the expected magnitude of improvement beyond baselines for given objectives.
Parameters
----------
name: str
the name of the score
description: str
the description of the score
objectives: list[Objective]
objectives (e.g., maximize, minimize, tune, etc.)
baselines: list[float]
best-so-far values for the various objectives (there must be one for each objective)
constraints: list[Constraint]
constraints limiting the allowed values that material instances can have
"""
name = properties.String('name')
description = properties.String('description')
baselines = properties.List(properties.Float, 'baselines')
objectives = properties.List(properties.Object(Objective), 'objectives')
constraints = properties.List(properties.Object(Constraint), 'constraints')
typ = properties.String('type', default='MEI')
def __init__(self, name: str, description: str, objectives: List[
Objective], baselines: List[float], constraints: Optional[List[
Constraint]]=None, session: Optional[Session]=None):
self.name: str = name
self.description: str = description
self.objectives: List[Objective] = objectives
self.baselines: List[float] = baselines
self.constraints: List[Constraint] = constraints or []
self.session: Optional[Session] = session
def __str__(self):
return '<EIScore {!r}>'.format(self.name)
class EVScore(Serializable['EVScore'], Score):
"""
[ALPHA] Evaluates the expected value for given objectives.
Parameters
----------
name: str
the name of the score
description: str
the description of the score
objectives: list[Objective]
objectives (e.g., maximize, minimize, tune, etc.)
constraints: list[Constraint]
constraints limiting the allowed values that material instances can have
"""
name = properties.String('name')
description = properties.String('description')
objectives = properties.List(properties.Object(Objective), 'objectives')
constraints = properties.List(properties.Object(Constraint), 'constraints')
typ = properties.String('type', default='MEV')
def __init__(self, name: str, description: str, objectives: List[
Objective], constraints: Optional[List[Constraint]]=None, session:
Optional[Session]=None):
self.name: str = name
self.description: str = description
self.objectives: List[Objective] = objectives
self.constraints: List[Constraint] = constraints or []
self.session: Optional[Session] = session
def __str__(self):
return '<EVScore {!r}>'.format(self.name)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
from typing import List, Optional
from citrine._serialization import properties
from citrine._serialization.polymorphic_serializable import PolymorphicSerializable
from citrine._serialization.serializable import Serializable
from citrine._session import Session
from citrine.informatics.constraints import Constraint
from citrine.informatics.objectives import Objective
__all__ = ['Score', 'LIScore', 'EIScore', 'EVScore']
class Score(PolymorphicSerializable['Score']):
"""[ALPHA] A Citrine Score is used to rank materials according to objectives and constraints.
Abstract type that returns the proper type given a serialized dict.
"""
@classmethod
def get_type(cls, data):
"""Return the subtype."""
return {'MLI': LIScore, 'MEI': EIScore, 'MEV': EVScore}[data['type']]
class LIScore(Serializable['LIScore'], Score):
"""[ALPHA] Evaluates the likelihood of scoring better than some baselines for given objectives.
Parameters
----------
name: str
the name of the score
description: str
the description of the score
objectives: list[Objective]
objectives (e.g., maximize, minimize, tune, etc.)
baselines: list[float]
best-so-far values for the various objectives (there must be one for each objective)
constraints: list[Constraint]
constraints limiting the allowed values that material instances can have
"""
name = properties.String('name')
description = properties.String('description')
baselines = properties.List(properties.Float, 'baselines')
objectives = properties.List(properties.Object(Objective), 'objectives')
constraints = properties.List(properties.Object(Constraint), 'constraints')
typ = properties.String('type', default='MLI')
def __init__(self, name: str, description: str, objectives: List[
Objective], baselines: List[float], constraints: Optional[List[
Constraint]]=None, session: Optional[Session]=None):
self.name: str = name
self.description: str = description
self.objectives: List[Objective] = objectives
self.baselines: List[float] = baselines
self.constraints: List[Constraint] = constraints or []
self.session: Optional[Session] = session
def __str__(self):
return '<LIScore {!r}>'.format(self.name)
class EIScore(Serializable['EIScore'], Score):
"""
[ALPHA] Evaluates the expected magnitude of improvement beyond baselines for given objectives.
Parameters
----------
name: str
the name of the score
description: str
the description of the score
objectives: list[Objective]
objectives (e.g., maximize, minimize, tune, etc.)
baselines: list[float]
best-so-far values for the various objectives (there must be one for each objective)
constraints: list[Constraint]
constraints limiting the allowed values that material instances can have
"""
name = properties.String('name')
description = properties.String('description')
baselines = properties.List(properties.Float, 'baselines')
objectives = properties.List(properties.Object(Objective), 'objectives')
constraints = properties.List(properties.Object(Constraint), 'constraints')
typ = properties.String('type', default='MEI')
def __init__(self, name: str, description: str, objectives: List[
Objective], baselines: List[float], constraints: Optional[List[
Constraint]]=None, session: Optional[Session]=None):
self.name: str = name
self.description: str = description
self.objectives: List[Objective] = objectives
self.baselines: List[float] = baselines
self.constraints: List[Constraint] = constraints or []
self.session: Optional[Session] = session
def __str__(self):
return '<EIScore {!r}>'.format(self.name)
class EVScore(Serializable['EVScore'], Score):
"""
[ALPHA] Evaluates the expected value for given objectives.
Parameters
----------
name: str
the name of the score
description: str
the description of the score
objectives: list[Objective]
objectives (e.g., maximize, minimize, tune, etc.)
constraints: list[Constraint]
constraints limiting the allowed values that material instances can have
"""
name = properties.String('name')
description = properties.String('description')
objectives = properties.List(properties.Object(Objective), 'objectives')
constraints = properties.List(properties.Object(Constraint), 'constraints')
typ = properties.String('type', default='MEV')
def __init__(self, name: str, description: str, objectives: List[
Objective], constraints: Optional[List[Constraint]]=None, session:
Optional[Session]=None):
self.name: str = name
self.description: str = description
self.objectives: List[Objective] = objectives
self.constraints: List[Constraint] = constraints or []
self.session: Optional[Session] = session
def __str__(self):
return '<EVScore {!r}>'.format(self.name)
<|reserved_special_token_1|>
"""Tools for working with Scores."""
from typing import List, Optional
from citrine._serialization import properties
from citrine._serialization.polymorphic_serializable import PolymorphicSerializable
from citrine._serialization.serializable import Serializable
from citrine._session import Session
from citrine.informatics.constraints import Constraint
from citrine.informatics.objectives import Objective
__all__ = ['Score', 'LIScore', 'EIScore', 'EVScore']
class Score(PolymorphicSerializable['Score']):
"""[ALPHA] A Citrine Score is used to rank materials according to objectives and constraints.
Abstract type that returns the proper type given a serialized dict.
"""
@classmethod
def get_type(cls, data):
"""Return the subtype."""
return {
'MLI': LIScore,
'MEI': EIScore,
'MEV': EVScore
}[data['type']]
class LIScore(Serializable['LIScore'], Score):
"""[ALPHA] Evaluates the likelihood of scoring better than some baselines for given objectives.
Parameters
----------
name: str
the name of the score
description: str
the description of the score
objectives: list[Objective]
objectives (e.g., maximize, minimize, tune, etc.)
baselines: list[float]
best-so-far values for the various objectives (there must be one for each objective)
constraints: list[Constraint]
constraints limiting the allowed values that material instances can have
"""
name = properties.String('name')
description = properties.String('description')
baselines = properties.List(properties.Float, 'baselines')
objectives = properties.List(properties.Object(Objective), 'objectives')
constraints = properties.List(properties.Object(Constraint), 'constraints')
typ = properties.String('type', default='MLI')
def __init__(self,
name: str,
description: str,
objectives: List[Objective],
baselines: List[float],
constraints: Optional[List[Constraint]] = None,
session: Optional[Session] = None):
self.name: str = name
self.description: str = description
self.objectives: List[Objective] = objectives
self.baselines: List[float] = baselines
self.constraints: List[Constraint] = constraints or []
self.session: Optional[Session] = session
def __str__(self):
return '<LIScore {!r}>'.format(self.name)
class EIScore(Serializable['EIScore'], Score):
"""
[ALPHA] Evaluates the expected magnitude of improvement beyond baselines for given objectives.
Parameters
----------
name: str
the name of the score
description: str
the description of the score
objectives: list[Objective]
objectives (e.g., maximize, minimize, tune, etc.)
baselines: list[float]
best-so-far values for the various objectives (there must be one for each objective)
constraints: list[Constraint]
constraints limiting the allowed values that material instances can have
"""
name = properties.String('name')
description = properties.String('description')
baselines = properties.List(properties.Float, 'baselines')
objectives = properties.List(properties.Object(Objective), 'objectives')
constraints = properties.List(properties.Object(Constraint), 'constraints')
typ = properties.String('type', default='MEI')
def __init__(self,
name: str,
description: str,
objectives: List[Objective],
baselines: List[float],
constraints: Optional[List[Constraint]] = None,
session: Optional[Session] = None):
self.name: str = name
self.description: str = description
self.objectives: List[Objective] = objectives
self.baselines: List[float] = baselines
self.constraints: List[Constraint] = constraints or []
self.session: Optional[Session] = session
def __str__(self):
return '<EIScore {!r}>'.format(self.name)
class EVScore(Serializable['EVScore'], Score):
"""
[ALPHA] Evaluates the expected value for given objectives.
Parameters
----------
name: str
the name of the score
description: str
the description of the score
objectives: list[Objective]
objectives (e.g., maximize, minimize, tune, etc.)
constraints: list[Constraint]
constraints limiting the allowed values that material instances can have
"""
name = properties.String('name')
description = properties.String('description')
objectives = properties.List(properties.Object(Objective), 'objectives')
constraints = properties.List(properties.Object(Constraint), 'constraints')
typ = properties.String('type', default='MEV')
def __init__(self,
name: str,
description: str,
objectives: List[Objective],
constraints: Optional[List[Constraint]] = None,
session: Optional[Session] = None):
self.name: str = name
self.description: str = description
self.objectives: List[Objective] = objectives
self.constraints: List[Constraint] = constraints or []
self.session: Optional[Session] = session
def __str__(self):
return '<EVScore {!r}>'.format(self.name)
|
flexible
|
{
"blob_id": "a0086a9d27a091776378cd8bde31c59899fc07ac",
"index": 3122,
"step-1": "<mask token>\n\n\nclass LIScore(Serializable['LIScore'], Score):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def __str__(self):\n return '<LIScore {!r}>'.format(self.name)\n\n\nclass EIScore(Serializable['EIScore'], Score):\n \"\"\"\n [ALPHA] Evaluates the expected magnitude of improvement beyond baselines for given objectives.\n\n Parameters\n ----------\n name: str\n the name of the score\n description: str\n the description of the score\n objectives: list[Objective]\n objectives (e.g., maximize, minimize, tune, etc.)\n baselines: list[float]\n best-so-far values for the various objectives (there must be one for each objective)\n constraints: list[Constraint]\n constraints limiting the allowed values that material instances can have\n\n \"\"\"\n name = properties.String('name')\n description = properties.String('description')\n baselines = properties.List(properties.Float, 'baselines')\n objectives = properties.List(properties.Object(Objective), 'objectives')\n constraints = properties.List(properties.Object(Constraint), 'constraints')\n typ = properties.String('type', default='MEI')\n\n def __init__(self, name: str, description: str, objectives: List[\n Objective], baselines: List[float], constraints: Optional[List[\n Constraint]]=None, session: Optional[Session]=None):\n self.name: str = name\n self.description: str = description\n self.objectives: List[Objective] = objectives\n self.baselines: List[float] = baselines\n self.constraints: List[Constraint] = constraints or []\n self.session: Optional[Session] = session\n\n def __str__(self):\n return '<EIScore {!r}>'.format(self.name)\n\n\nclass EVScore(Serializable['EVScore'], Score):\n \"\"\"\n [ALPHA] Evaluates the expected value for given objectives.\n\n Parameters\n ----------\n name: str\n the name of the score\n description: str\n the description of the score\n objectives: list[Objective]\n objectives (e.g., maximize, minimize, tune, etc.)\n constraints: list[Constraint]\n constraints limiting the allowed values that material instances can have\n\n \"\"\"\n name = properties.String('name')\n description = properties.String('description')\n objectives = properties.List(properties.Object(Objective), 'objectives')\n constraints = properties.List(properties.Object(Constraint), 'constraints')\n typ = properties.String('type', default='MEV')\n\n def __init__(self, name: str, description: str, objectives: List[\n Objective], constraints: Optional[List[Constraint]]=None, session:\n Optional[Session]=None):\n self.name: str = name\n self.description: str = description\n self.objectives: List[Objective] = objectives\n self.constraints: List[Constraint] = constraints or []\n self.session: Optional[Session] = session\n\n def __str__(self):\n return '<EVScore {!r}>'.format(self.name)\n",
"step-2": "<mask token>\n\n\nclass LIScore(Serializable['LIScore'], Score):\n <mask token>\n name = properties.String('name')\n description = properties.String('description')\n baselines = properties.List(properties.Float, 'baselines')\n objectives = properties.List(properties.Object(Objective), 'objectives')\n constraints = properties.List(properties.Object(Constraint), 'constraints')\n typ = properties.String('type', default='MLI')\n\n def __init__(self, name: str, description: str, objectives: List[\n Objective], baselines: List[float], constraints: Optional[List[\n Constraint]]=None, session: Optional[Session]=None):\n self.name: str = name\n self.description: str = description\n self.objectives: List[Objective] = objectives\n self.baselines: List[float] = baselines\n self.constraints: List[Constraint] = constraints or []\n self.session: Optional[Session] = session\n\n def __str__(self):\n return '<LIScore {!r}>'.format(self.name)\n\n\nclass EIScore(Serializable['EIScore'], Score):\n \"\"\"\n [ALPHA] Evaluates the expected magnitude of improvement beyond baselines for given objectives.\n\n Parameters\n ----------\n name: str\n the name of the score\n description: str\n the description of the score\n objectives: list[Objective]\n objectives (e.g., maximize, minimize, tune, etc.)\n baselines: list[float]\n best-so-far values for the various objectives (there must be one for each objective)\n constraints: list[Constraint]\n constraints limiting the allowed values that material instances can have\n\n \"\"\"\n name = properties.String('name')\n description = properties.String('description')\n baselines = properties.List(properties.Float, 'baselines')\n objectives = properties.List(properties.Object(Objective), 'objectives')\n constraints = properties.List(properties.Object(Constraint), 'constraints')\n typ = properties.String('type', default='MEI')\n\n def __init__(self, name: str, description: str, objectives: List[\n Objective], baselines: List[float], constraints: Optional[List[\n Constraint]]=None, session: Optional[Session]=None):\n self.name: str = name\n self.description: str = description\n self.objectives: List[Objective] = objectives\n self.baselines: List[float] = baselines\n self.constraints: List[Constraint] = constraints or []\n self.session: Optional[Session] = session\n\n def __str__(self):\n return '<EIScore {!r}>'.format(self.name)\n\n\nclass EVScore(Serializable['EVScore'], Score):\n \"\"\"\n [ALPHA] Evaluates the expected value for given objectives.\n\n Parameters\n ----------\n name: str\n the name of the score\n description: str\n the description of the score\n objectives: list[Objective]\n objectives (e.g., maximize, minimize, tune, etc.)\n constraints: list[Constraint]\n constraints limiting the allowed values that material instances can have\n\n \"\"\"\n name = properties.String('name')\n description = properties.String('description')\n objectives = properties.List(properties.Object(Objective), 'objectives')\n constraints = properties.List(properties.Object(Constraint), 'constraints')\n typ = properties.String('type', default='MEV')\n\n def __init__(self, name: str, description: str, objectives: List[\n Objective], constraints: Optional[List[Constraint]]=None, session:\n Optional[Session]=None):\n self.name: str = name\n self.description: str = description\n self.objectives: List[Objective] = objectives\n self.constraints: List[Constraint] = constraints or []\n self.session: Optional[Session] = session\n\n def __str__(self):\n return '<EVScore {!r}>'.format(self.name)\n",
"step-3": "<mask token>\n\n\nclass Score(PolymorphicSerializable['Score']):\n <mask token>\n <mask token>\n\n\nclass LIScore(Serializable['LIScore'], Score):\n \"\"\"[ALPHA] Evaluates the likelihood of scoring better than some baselines for given objectives.\n\n Parameters\n ----------\n name: str\n the name of the score\n description: str\n the description of the score\n objectives: list[Objective]\n objectives (e.g., maximize, minimize, tune, etc.)\n baselines: list[float]\n best-so-far values for the various objectives (there must be one for each objective)\n constraints: list[Constraint]\n constraints limiting the allowed values that material instances can have\n\n \"\"\"\n name = properties.String('name')\n description = properties.String('description')\n baselines = properties.List(properties.Float, 'baselines')\n objectives = properties.List(properties.Object(Objective), 'objectives')\n constraints = properties.List(properties.Object(Constraint), 'constraints')\n typ = properties.String('type', default='MLI')\n\n def __init__(self, name: str, description: str, objectives: List[\n Objective], baselines: List[float], constraints: Optional[List[\n Constraint]]=None, session: Optional[Session]=None):\n self.name: str = name\n self.description: str = description\n self.objectives: List[Objective] = objectives\n self.baselines: List[float] = baselines\n self.constraints: List[Constraint] = constraints or []\n self.session: Optional[Session] = session\n\n def __str__(self):\n return '<LIScore {!r}>'.format(self.name)\n\n\nclass EIScore(Serializable['EIScore'], Score):\n \"\"\"\n [ALPHA] Evaluates the expected magnitude of improvement beyond baselines for given objectives.\n\n Parameters\n ----------\n name: str\n the name of the score\n description: str\n the description of the score\n objectives: list[Objective]\n objectives (e.g., maximize, minimize, tune, etc.)\n baselines: list[float]\n best-so-far values for the various objectives (there must be one for each objective)\n constraints: list[Constraint]\n constraints limiting the allowed values that material instances can have\n\n \"\"\"\n name = properties.String('name')\n description = properties.String('description')\n baselines = properties.List(properties.Float, 'baselines')\n objectives = properties.List(properties.Object(Objective), 'objectives')\n constraints = properties.List(properties.Object(Constraint), 'constraints')\n typ = properties.String('type', default='MEI')\n\n def __init__(self, name: str, description: str, objectives: List[\n Objective], baselines: List[float], constraints: Optional[List[\n Constraint]]=None, session: Optional[Session]=None):\n self.name: str = name\n self.description: str = description\n self.objectives: List[Objective] = objectives\n self.baselines: List[float] = baselines\n self.constraints: List[Constraint] = constraints or []\n self.session: Optional[Session] = session\n\n def __str__(self):\n return '<EIScore {!r}>'.format(self.name)\n\n\nclass EVScore(Serializable['EVScore'], Score):\n \"\"\"\n [ALPHA] Evaluates the expected value for given objectives.\n\n Parameters\n ----------\n name: str\n the name of the score\n description: str\n the description of the score\n objectives: list[Objective]\n objectives (e.g., maximize, minimize, tune, etc.)\n constraints: list[Constraint]\n constraints limiting the allowed values that material instances can have\n\n \"\"\"\n name = properties.String('name')\n description = properties.String('description')\n objectives = properties.List(properties.Object(Objective), 'objectives')\n constraints = properties.List(properties.Object(Constraint), 'constraints')\n typ = properties.String('type', default='MEV')\n\n def __init__(self, name: str, description: str, objectives: List[\n Objective], constraints: Optional[List[Constraint]]=None, session:\n Optional[Session]=None):\n self.name: str = name\n self.description: str = description\n self.objectives: List[Objective] = objectives\n self.constraints: List[Constraint] = constraints or []\n self.session: Optional[Session] = session\n\n def __str__(self):\n return '<EVScore {!r}>'.format(self.name)\n",
"step-4": "<mask token>\nfrom typing import List, Optional\nfrom citrine._serialization import properties\nfrom citrine._serialization.polymorphic_serializable import PolymorphicSerializable\nfrom citrine._serialization.serializable import Serializable\nfrom citrine._session import Session\nfrom citrine.informatics.constraints import Constraint\nfrom citrine.informatics.objectives import Objective\n__all__ = ['Score', 'LIScore', 'EIScore', 'EVScore']\n\n\nclass Score(PolymorphicSerializable['Score']):\n \"\"\"[ALPHA] A Citrine Score is used to rank materials according to objectives and constraints.\n\n Abstract type that returns the proper type given a serialized dict.\n\n\n \"\"\"\n\n @classmethod\n def get_type(cls, data):\n \"\"\"Return the subtype.\"\"\"\n return {'MLI': LIScore, 'MEI': EIScore, 'MEV': EVScore}[data['type']]\n\n\nclass LIScore(Serializable['LIScore'], Score):\n \"\"\"[ALPHA] Evaluates the likelihood of scoring better than some baselines for given objectives.\n\n Parameters\n ----------\n name: str\n the name of the score\n description: str\n the description of the score\n objectives: list[Objective]\n objectives (e.g., maximize, minimize, tune, etc.)\n baselines: list[float]\n best-so-far values for the various objectives (there must be one for each objective)\n constraints: list[Constraint]\n constraints limiting the allowed values that material instances can have\n\n \"\"\"\n name = properties.String('name')\n description = properties.String('description')\n baselines = properties.List(properties.Float, 'baselines')\n objectives = properties.List(properties.Object(Objective), 'objectives')\n constraints = properties.List(properties.Object(Constraint), 'constraints')\n typ = properties.String('type', default='MLI')\n\n def __init__(self, name: str, description: str, objectives: List[\n Objective], baselines: List[float], constraints: Optional[List[\n Constraint]]=None, session: Optional[Session]=None):\n self.name: str = name\n self.description: str = description\n self.objectives: List[Objective] = objectives\n self.baselines: List[float] = baselines\n self.constraints: List[Constraint] = constraints or []\n self.session: Optional[Session] = session\n\n def __str__(self):\n return '<LIScore {!r}>'.format(self.name)\n\n\nclass EIScore(Serializable['EIScore'], Score):\n \"\"\"\n [ALPHA] Evaluates the expected magnitude of improvement beyond baselines for given objectives.\n\n Parameters\n ----------\n name: str\n the name of the score\n description: str\n the description of the score\n objectives: list[Objective]\n objectives (e.g., maximize, minimize, tune, etc.)\n baselines: list[float]\n best-so-far values for the various objectives (there must be one for each objective)\n constraints: list[Constraint]\n constraints limiting the allowed values that material instances can have\n\n \"\"\"\n name = properties.String('name')\n description = properties.String('description')\n baselines = properties.List(properties.Float, 'baselines')\n objectives = properties.List(properties.Object(Objective), 'objectives')\n constraints = properties.List(properties.Object(Constraint), 'constraints')\n typ = properties.String('type', default='MEI')\n\n def __init__(self, name: str, description: str, objectives: List[\n Objective], baselines: List[float], constraints: Optional[List[\n Constraint]]=None, session: Optional[Session]=None):\n self.name: str = name\n self.description: str = description\n self.objectives: List[Objective] = objectives\n self.baselines: List[float] = baselines\n self.constraints: List[Constraint] = constraints or []\n self.session: Optional[Session] = session\n\n def __str__(self):\n return '<EIScore {!r}>'.format(self.name)\n\n\nclass EVScore(Serializable['EVScore'], Score):\n \"\"\"\n [ALPHA] Evaluates the expected value for given objectives.\n\n Parameters\n ----------\n name: str\n the name of the score\n description: str\n the description of the score\n objectives: list[Objective]\n objectives (e.g., maximize, minimize, tune, etc.)\n constraints: list[Constraint]\n constraints limiting the allowed values that material instances can have\n\n \"\"\"\n name = properties.String('name')\n description = properties.String('description')\n objectives = properties.List(properties.Object(Objective), 'objectives')\n constraints = properties.List(properties.Object(Constraint), 'constraints')\n typ = properties.String('type', default='MEV')\n\n def __init__(self, name: str, description: str, objectives: List[\n Objective], constraints: Optional[List[Constraint]]=None, session:\n Optional[Session]=None):\n self.name: str = name\n self.description: str = description\n self.objectives: List[Objective] = objectives\n self.constraints: List[Constraint] = constraints or []\n self.session: Optional[Session] = session\n\n def __str__(self):\n return '<EVScore {!r}>'.format(self.name)\n",
"step-5": "\"\"\"Tools for working with Scores.\"\"\"\nfrom typing import List, Optional\n\nfrom citrine._serialization import properties\nfrom citrine._serialization.polymorphic_serializable import PolymorphicSerializable\nfrom citrine._serialization.serializable import Serializable\nfrom citrine._session import Session\nfrom citrine.informatics.constraints import Constraint\nfrom citrine.informatics.objectives import Objective\n\n__all__ = ['Score', 'LIScore', 'EIScore', 'EVScore']\n\n\nclass Score(PolymorphicSerializable['Score']):\n \"\"\"[ALPHA] A Citrine Score is used to rank materials according to objectives and constraints.\n\n Abstract type that returns the proper type given a serialized dict.\n\n\n \"\"\"\n\n @classmethod\n def get_type(cls, data):\n \"\"\"Return the subtype.\"\"\"\n return {\n 'MLI': LIScore,\n 'MEI': EIScore,\n 'MEV': EVScore\n }[data['type']]\n\n\nclass LIScore(Serializable['LIScore'], Score):\n \"\"\"[ALPHA] Evaluates the likelihood of scoring better than some baselines for given objectives.\n\n Parameters\n ----------\n name: str\n the name of the score\n description: str\n the description of the score\n objectives: list[Objective]\n objectives (e.g., maximize, minimize, tune, etc.)\n baselines: list[float]\n best-so-far values for the various objectives (there must be one for each objective)\n constraints: list[Constraint]\n constraints limiting the allowed values that material instances can have\n\n \"\"\"\n\n name = properties.String('name')\n description = properties.String('description')\n baselines = properties.List(properties.Float, 'baselines')\n objectives = properties.List(properties.Object(Objective), 'objectives')\n constraints = properties.List(properties.Object(Constraint), 'constraints')\n typ = properties.String('type', default='MLI')\n\n def __init__(self,\n name: str,\n description: str,\n objectives: List[Objective],\n baselines: List[float],\n constraints: Optional[List[Constraint]] = None,\n session: Optional[Session] = None):\n self.name: str = name\n self.description: str = description\n self.objectives: List[Objective] = objectives\n self.baselines: List[float] = baselines\n self.constraints: List[Constraint] = constraints or []\n self.session: Optional[Session] = session\n\n def __str__(self):\n return '<LIScore {!r}>'.format(self.name)\n\n\nclass EIScore(Serializable['EIScore'], Score):\n \"\"\"\n [ALPHA] Evaluates the expected magnitude of improvement beyond baselines for given objectives.\n\n Parameters\n ----------\n name: str\n the name of the score\n description: str\n the description of the score\n objectives: list[Objective]\n objectives (e.g., maximize, minimize, tune, etc.)\n baselines: list[float]\n best-so-far values for the various objectives (there must be one for each objective)\n constraints: list[Constraint]\n constraints limiting the allowed values that material instances can have\n\n \"\"\"\n\n name = properties.String('name')\n description = properties.String('description')\n baselines = properties.List(properties.Float, 'baselines')\n objectives = properties.List(properties.Object(Objective), 'objectives')\n constraints = properties.List(properties.Object(Constraint), 'constraints')\n typ = properties.String('type', default='MEI')\n\n def __init__(self,\n name: str,\n description: str,\n objectives: List[Objective],\n baselines: List[float],\n constraints: Optional[List[Constraint]] = None,\n session: Optional[Session] = None):\n self.name: str = name\n self.description: str = description\n self.objectives: List[Objective] = objectives\n self.baselines: List[float] = baselines\n self.constraints: List[Constraint] = constraints or []\n self.session: Optional[Session] = session\n\n def __str__(self):\n return '<EIScore {!r}>'.format(self.name)\n\n\nclass EVScore(Serializable['EVScore'], Score):\n \"\"\"\n [ALPHA] Evaluates the expected value for given objectives.\n\n Parameters\n ----------\n name: str\n the name of the score\n description: str\n the description of the score\n objectives: list[Objective]\n objectives (e.g., maximize, minimize, tune, etc.)\n constraints: list[Constraint]\n constraints limiting the allowed values that material instances can have\n\n \"\"\"\n\n name = properties.String('name')\n description = properties.String('description')\n objectives = properties.List(properties.Object(Objective), 'objectives')\n constraints = properties.List(properties.Object(Constraint), 'constraints')\n typ = properties.String('type', default='MEV')\n\n def __init__(self,\n name: str,\n description: str,\n objectives: List[Objective],\n constraints: Optional[List[Constraint]] = None,\n session: Optional[Session] = None):\n self.name: str = name\n self.description: str = description\n self.objectives: List[Objective] = objectives\n self.constraints: List[Constraint] = constraints or []\n self.session: Optional[Session] = session\n\n def __str__(self):\n return '<EVScore {!r}>'.format(self.name)\n",
"step-ids": [
12,
14,
16,
20,
21
]
}
|
[
12,
14,
16,
20,
21
] |
# coding: utf-8
# In[1]:
import matplotlib.pyplot as plt
import matplotlib
import pandas as pd
import numpy as np
from pandas import DataFrame, Series
import os, re
# In[2]:
OUTPUT_EXCEL = '월별원내약품사용현황.xlsx'
# In[3]:
# 데이타셋 준비
data_source_dir = '사용량월별통계/원내'
dfs = []
for fname in os.listdir(data_source_dir):
fn, ext = os.path.splitext(fname)
if ext in ['.xls', '.xlsx']:
df = pd.read_excel(os.path.join(data_source_dir, fname))
df['사용(개시)년월'] = fn
dfs.append(df)
use_amount_df = pd.concat(dfs, ignore_index=True)
# In[4]:
drug_standard_df = pd.read_json('drug.json').T
drug_info_df = pd.read_excel('약품정보.xls')
use_amount_df = pd.merge(drug_info_df, use_amount_df[['사용량', '약품코드', '사용(개시)년월']], on='약품코드', how='left')
use_amount_df = pd.merge(use_amount_df, drug_standard_df[['보험코드', '제품명', '판매사', '성분/함량']], left_on='EDI코드', right_on='보험코드', how='left')
use_amount_df['제품명'] = use_amount_df['제품명'].fillna(use_amount_df['약품명(한글)'])
use_amount_df['사용개시년월'] = use_amount_df['수가시작일자'].map(lambda x: str(x)[0:4]+'-'+str(x)[4:6])
use_amount_df['사용(개시)년월'] = use_amount_df['사용(개시)년월'].fillna(use_amount_df['사용개시년월'])
use_amount_df['성분명'] = use_amount_df['성분명'].fillna(use_amount_df['성분/함량'])
use_amount_df['원내/원외 처방구분'] = use_amount_df['원내/원외 처방구분'].map({1: '원외', 2: '원외/원내', 3: '원내'})
use_amount_df['약품법적구분'] = use_amount_df['약품법적구분'].map({0: '일반', 1: '마약', 2: '향정약', 3: '독약', 4: '한방약', 5: '고가약'})
# In[5]:
def get_last(s):
try:
return max(s)
except:
return s
# In[6]:
months = use_amount_df['사용(개시)년월'].unique()
months = sorted(months.tolist(), reverse=1)
use_amount_df['최후사용월'] = use_amount_df.groupby(['제품명'])['사용(개시)년월'].transform(get_last)
use_amount_df['최근미사용월수'] = use_amount_df['최후사용월'].map(lambda x: months.index(x) if x in months else -1)
# In[7]:
use_amount_in_df = use_amount_df[use_amount_df['원내/원외 처방구분'] != '원외']
# In[8]:
use_amount_in_df['사용량'] = use_amount_in_df['사용량'].fillna('오픈후미사용')
# In[9]:
pat = '(\(([^\d].*?)\)+\s*)|퇴장방지\s*|생산원가보전,*\s*|사용장려(비\s*\d+원|및|비용지급,*\s*)'
use_amount_in_df = use_amount_in_df.rename(columns={'제품명': '약품명(드럭인포)', '약품명(한글)': '약품명(원내)'})
use_amount_in_df['약품명(드럭인포)'] = use_amount_in_df['약품명(드럭인포)'].str.replace(pat, '')
# In[10]:
pvt = use_amount_in_df.pivot_table(index = ['EDI코드','약품명(드럭인포)', '성분명','약품코드','약품명(원내)','효능코드명','규격단위', '최근미사용월수'], columns=['사용(개시)년월'], values=['사용량'], aggfunc=sum)
# In[11]:
pvt.to_excel(OUTPUT_EXCEL)
os.startfile(OUTPUT_EXCEL)
# In[ ]:
|
normal
|
{
"blob_id": "16b425d7b8cde1aabe038ccae6922091afb84415",
"index": 411,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef get_last(s):\n try:\n return max(s)\n except:\n return s\n\n\n<mask token>\n",
"step-3": "<mask token>\nOUTPUT_EXCEL = '월별원내약품사용현황.xlsx'\ndata_source_dir = '사용량월별통계/원내'\ndfs = []\nfor fname in os.listdir(data_source_dir):\n fn, ext = os.path.splitext(fname)\n if ext in ['.xls', '.xlsx']:\n df = pd.read_excel(os.path.join(data_source_dir, fname))\n df['사용(개시)년월'] = fn\n dfs.append(df)\nuse_amount_df = pd.concat(dfs, ignore_index=True)\ndrug_standard_df = pd.read_json('drug.json').T\ndrug_info_df = pd.read_excel('약품정보.xls')\nuse_amount_df = pd.merge(drug_info_df, use_amount_df[['사용량', '약품코드',\n '사용(개시)년월']], on='약품코드', how='left')\nuse_amount_df = pd.merge(use_amount_df, drug_standard_df[['보험코드', '제품명',\n '판매사', '성분/함량']], left_on='EDI코드', right_on='보험코드', how='left')\nuse_amount_df['제품명'] = use_amount_df['제품명'].fillna(use_amount_df['약품명(한글)'])\nuse_amount_df['사용개시년월'] = use_amount_df['수가시작일자'].map(lambda x: str(x)[0:4] +\n '-' + str(x)[4:6])\nuse_amount_df['사용(개시)년월'] = use_amount_df['사용(개시)년월'].fillna(use_amount_df[\n '사용개시년월'])\nuse_amount_df['성분명'] = use_amount_df['성분명'].fillna(use_amount_df['성분/함량'])\nuse_amount_df['원내/원외 처방구분'] = use_amount_df['원내/원외 처방구분'].map({(1): '원외', (\n 2): '원외/원내', (3): '원내'})\nuse_amount_df['약품법적구분'] = use_amount_df['약품법적구분'].map({(0): '일반', (1): '마약',\n (2): '향정약', (3): '독약', (4): '한방약', (5): '고가약'})\n\n\ndef get_last(s):\n try:\n return max(s)\n except:\n return s\n\n\nmonths = use_amount_df['사용(개시)년월'].unique()\nmonths = sorted(months.tolist(), reverse=1)\nuse_amount_df['최후사용월'] = use_amount_df.groupby(['제품명'])['사용(개시)년월'].transform(\n get_last)\nuse_amount_df['최근미사용월수'] = use_amount_df['최후사용월'].map(lambda x: months.\n index(x) if x in months else -1)\nuse_amount_in_df = use_amount_df[use_amount_df['원내/원외 처방구분'] != '원외']\nuse_amount_in_df['사용량'] = use_amount_in_df['사용량'].fillna('오픈후미사용')\npat = (\n '(\\\\(([^\\\\d].*?)\\\\)+\\\\s*)|퇴장방지\\\\s*|생산원가보전,*\\\\s*|사용장려(비\\\\s*\\\\d+원|및|비용지급,*\\\\s*)'\n )\nuse_amount_in_df = use_amount_in_df.rename(columns={'제품명': '약품명(드럭인포)',\n '약품명(한글)': '약품명(원내)'})\nuse_amount_in_df['약품명(드럭인포)'] = use_amount_in_df['약품명(드럭인포)'].str.replace(pat,\n '')\npvt = use_amount_in_df.pivot_table(index=['EDI코드', '약품명(드럭인포)', '성분명',\n '약품코드', '약품명(원내)', '효능코드명', '규격단위', '최근미사용월수'], columns=['사용(개시)년월'],\n values=['사용량'], aggfunc=sum)\npvt.to_excel(OUTPUT_EXCEL)\nos.startfile(OUTPUT_EXCEL)\n",
"step-4": "import matplotlib.pyplot as plt\nimport matplotlib\nimport pandas as pd\nimport numpy as np\nfrom pandas import DataFrame, Series\nimport os, re\nOUTPUT_EXCEL = '월별원내약품사용현황.xlsx'\ndata_source_dir = '사용량월별통계/원내'\ndfs = []\nfor fname in os.listdir(data_source_dir):\n fn, ext = os.path.splitext(fname)\n if ext in ['.xls', '.xlsx']:\n df = pd.read_excel(os.path.join(data_source_dir, fname))\n df['사용(개시)년월'] = fn\n dfs.append(df)\nuse_amount_df = pd.concat(dfs, ignore_index=True)\ndrug_standard_df = pd.read_json('drug.json').T\ndrug_info_df = pd.read_excel('약품정보.xls')\nuse_amount_df = pd.merge(drug_info_df, use_amount_df[['사용량', '약품코드',\n '사용(개시)년월']], on='약품코드', how='left')\nuse_amount_df = pd.merge(use_amount_df, drug_standard_df[['보험코드', '제품명',\n '판매사', '성분/함량']], left_on='EDI코드', right_on='보험코드', how='left')\nuse_amount_df['제품명'] = use_amount_df['제품명'].fillna(use_amount_df['약품명(한글)'])\nuse_amount_df['사용개시년월'] = use_amount_df['수가시작일자'].map(lambda x: str(x)[0:4] +\n '-' + str(x)[4:6])\nuse_amount_df['사용(개시)년월'] = use_amount_df['사용(개시)년월'].fillna(use_amount_df[\n '사용개시년월'])\nuse_amount_df['성분명'] = use_amount_df['성분명'].fillna(use_amount_df['성분/함량'])\nuse_amount_df['원내/원외 처방구분'] = use_amount_df['원내/원외 처방구분'].map({(1): '원외', (\n 2): '원외/원내', (3): '원내'})\nuse_amount_df['약품법적구분'] = use_amount_df['약품법적구분'].map({(0): '일반', (1): '마약',\n (2): '향정약', (3): '독약', (4): '한방약', (5): '고가약'})\n\n\ndef get_last(s):\n try:\n return max(s)\n except:\n return s\n\n\nmonths = use_amount_df['사용(개시)년월'].unique()\nmonths = sorted(months.tolist(), reverse=1)\nuse_amount_df['최후사용월'] = use_amount_df.groupby(['제품명'])['사용(개시)년월'].transform(\n get_last)\nuse_amount_df['최근미사용월수'] = use_amount_df['최후사용월'].map(lambda x: months.\n index(x) if x in months else -1)\nuse_amount_in_df = use_amount_df[use_amount_df['원내/원외 처방구분'] != '원외']\nuse_amount_in_df['사용량'] = use_amount_in_df['사용량'].fillna('오픈후미사용')\npat = (\n '(\\\\(([^\\\\d].*?)\\\\)+\\\\s*)|퇴장방지\\\\s*|생산원가보전,*\\\\s*|사용장려(비\\\\s*\\\\d+원|및|비용지급,*\\\\s*)'\n )\nuse_amount_in_df = use_amount_in_df.rename(columns={'제품명': '약품명(드럭인포)',\n '약품명(한글)': '약품명(원내)'})\nuse_amount_in_df['약품명(드럭인포)'] = use_amount_in_df['약품명(드럭인포)'].str.replace(pat,\n '')\npvt = use_amount_in_df.pivot_table(index=['EDI코드', '약품명(드럭인포)', '성분명',\n '약품코드', '약품명(원내)', '효능코드명', '규격단위', '최근미사용월수'], columns=['사용(개시)년월'],\n values=['사용량'], aggfunc=sum)\npvt.to_excel(OUTPUT_EXCEL)\nos.startfile(OUTPUT_EXCEL)\n",
"step-5": "\n# coding: utf-8\n\n# In[1]:\n\nimport matplotlib.pyplot as plt\nimport matplotlib\nimport pandas as pd\nimport numpy as np\nfrom pandas import DataFrame, Series\nimport os, re\n\n\n# In[2]:\n\nOUTPUT_EXCEL = '월별원내약품사용현황.xlsx'\n\n\n# In[3]:\n\n# 데이타셋 준비\ndata_source_dir = '사용량월별통계/원내'\ndfs = []\nfor fname in os.listdir(data_source_dir):\n fn, ext = os.path.splitext(fname)\n if ext in ['.xls', '.xlsx']:\n df = pd.read_excel(os.path.join(data_source_dir, fname))\n df['사용(개시)년월'] = fn\n dfs.append(df)\nuse_amount_df = pd.concat(dfs, ignore_index=True)\n\n\n# In[4]:\n\ndrug_standard_df = pd.read_json('drug.json').T\n\ndrug_info_df = pd.read_excel('약품정보.xls')\n\nuse_amount_df = pd.merge(drug_info_df, use_amount_df[['사용량', '약품코드', '사용(개시)년월']], on='약품코드', how='left')\n\nuse_amount_df = pd.merge(use_amount_df, drug_standard_df[['보험코드', '제품명', '판매사', '성분/함량']], left_on='EDI코드', right_on='보험코드', how='left')\n\nuse_amount_df['제품명'] = use_amount_df['제품명'].fillna(use_amount_df['약품명(한글)'])\n\nuse_amount_df['사용개시년월'] = use_amount_df['수가시작일자'].map(lambda x: str(x)[0:4]+'-'+str(x)[4:6])\n\nuse_amount_df['사용(개시)년월'] = use_amount_df['사용(개시)년월'].fillna(use_amount_df['사용개시년월'])\nuse_amount_df['성분명'] = use_amount_df['성분명'].fillna(use_amount_df['성분/함량'])\n\nuse_amount_df['원내/원외 처방구분'] = use_amount_df['원내/원외 처방구분'].map({1: '원외', 2: '원외/원내', 3: '원내'})\nuse_amount_df['약품법적구분'] = use_amount_df['약품법적구분'].map({0: '일반', 1: '마약', 2: '향정약', 3: '독약', 4: '한방약', 5: '고가약'})\n\n\n# In[5]:\n\ndef get_last(s):\n try:\n return max(s)\n except:\n return s\n\n\n# In[6]:\n\nmonths = use_amount_df['사용(개시)년월'].unique()\nmonths = sorted(months.tolist(), reverse=1)\nuse_amount_df['최후사용월'] = use_amount_df.groupby(['제품명'])['사용(개시)년월'].transform(get_last)\nuse_amount_df['최근미사용월수'] = use_amount_df['최후사용월'].map(lambda x: months.index(x) if x in months else -1)\n\n\n# In[7]:\n\nuse_amount_in_df = use_amount_df[use_amount_df['원내/원외 처방구분'] != '원외']\n\n\n# In[8]:\n\nuse_amount_in_df['사용량'] = use_amount_in_df['사용량'].fillna('오픈후미사용')\n\n\n# In[9]:\n\npat = '(\\(([^\\d].*?)\\)+\\s*)|퇴장방지\\s*|생산원가보전,*\\s*|사용장려(비\\s*\\d+원|및|비용지급,*\\s*)'\nuse_amount_in_df = use_amount_in_df.rename(columns={'제품명': '약품명(드럭인포)', '약품명(한글)': '약품명(원내)'})\nuse_amount_in_df['약품명(드럭인포)'] = use_amount_in_df['약품명(드럭인포)'].str.replace(pat, '')\n\n\n# In[10]:\n\npvt = use_amount_in_df.pivot_table(index = ['EDI코드','약품명(드럭인포)', '성분명','약품코드','약품명(원내)','효능코드명','규격단위', '최근미사용월수'], columns=['사용(개시)년월'], values=['사용량'], aggfunc=sum)\n\n\n# In[11]:\n\npvt.to_excel(OUTPUT_EXCEL)\nos.startfile(OUTPUT_EXCEL)\n\n\n# In[ ]:\n\n\n\n",
"step-ids": [
0,
1,
3,
4,
5
]
}
|
[
0,
1,
3,
4,
5
] |
<|reserved_special_token_0|>
class TestClass01(TestCase):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def test_case01(self):
"""Version extraction"""
version = versions.extract_version(self.any_string)
self.assertEqual(version, '1.1.1')
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def test_case04(self):
"""Datetime version"""
version = versions.get_version_power('2021.1.1')
self.assertTrue(isinstance(version, datetime))
def test_case05(self):
"""Datetime versions compare"""
version = versions.get_version_power('2020.1.1')
version2 = versions.get_version_power('2021.1.1')
self.assertGreater(version2, version)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class TestClass01(TestCase):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def test_case01(self):
"""Version extraction"""
version = versions.extract_version(self.any_string)
self.assertEqual(version, '1.1.1')
def test_case02(self):
"""Version power calculation"""
version = versions.get_version_power('1.1.1')
self.assertEqual(version, 111)
<|reserved_special_token_0|>
def test_case04(self):
"""Datetime version"""
version = versions.get_version_power('2021.1.1')
self.assertTrue(isinstance(version, datetime))
def test_case05(self):
"""Datetime versions compare"""
version = versions.get_version_power('2020.1.1')
version2 = versions.get_version_power('2021.1.1')
self.assertGreater(version2, version)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class TestClass01(TestCase):
"""Software dependency versions compared"""
def setUp(self) ->None:
super().setUp()
self.any_string = 'Some string containing v1.1.1'
def test_case01(self):
"""Version extraction"""
version = versions.extract_version(self.any_string)
self.assertEqual(version, '1.1.1')
def test_case02(self):
"""Version power calculation"""
version = versions.get_version_power('1.1.1')
self.assertEqual(version, 111)
def test_case03(self):
"""Version power calculation compared"""
version1 = versions.get_version_power('1.1.1')
version2 = versions.get_version_power('0.2.1')
self.assertGreater(version1, version2)
def test_case04(self):
"""Datetime version"""
version = versions.get_version_power('2021.1.1')
self.assertTrue(isinstance(version, datetime))
def test_case05(self):
"""Datetime versions compare"""
version = versions.get_version_power('2020.1.1')
version2 = versions.get_version_power('2021.1.1')
self.assertGreater(version2, version)
<|reserved_special_token_1|>
from datetime import datetime
from unittest import TestCase
from vpnmupd import versions
class TestClass01(TestCase):
"""Software dependency versions compared"""
def setUp(self) ->None:
super().setUp()
self.any_string = 'Some string containing v1.1.1'
def test_case01(self):
"""Version extraction"""
version = versions.extract_version(self.any_string)
self.assertEqual(version, '1.1.1')
def test_case02(self):
"""Version power calculation"""
version = versions.get_version_power('1.1.1')
self.assertEqual(version, 111)
def test_case03(self):
"""Version power calculation compared"""
version1 = versions.get_version_power('1.1.1')
version2 = versions.get_version_power('0.2.1')
self.assertGreater(version1, version2)
def test_case04(self):
"""Datetime version"""
version = versions.get_version_power('2021.1.1')
self.assertTrue(isinstance(version, datetime))
def test_case05(self):
"""Datetime versions compare"""
version = versions.get_version_power('2020.1.1')
version2 = versions.get_version_power('2021.1.1')
self.assertGreater(version2, version)
<|reserved_special_token_1|>
from datetime import datetime
from unittest import TestCase
from vpnmupd import versions
class TestClass01(TestCase):
"""Software dependency versions compared"""
def setUp(self) -> None:
super().setUp()
self.any_string = "Some string containing v1.1.1"
def test_case01(self):
"""Version extraction"""
version = versions.extract_version(self.any_string)
self.assertEqual(version, "1.1.1")
def test_case02(self):
"""Version power calculation"""
version = versions.get_version_power("1.1.1")
self.assertEqual(version, 111)
def test_case03(self):
"""Version power calculation compared"""
version1 = versions.get_version_power("1.1.1")
version2 = versions.get_version_power("0.2.1")
self.assertGreater(version1, version2)
def test_case04(self):
"""Datetime version"""
version = versions.get_version_power("2021.1.1")
self.assertTrue(isinstance(version, datetime))
def test_case05(self):
"""Datetime versions compare"""
version = versions.get_version_power("2020.1.1")
version2 = versions.get_version_power("2021.1.1")
self.assertGreater(version2, version)
|
flexible
|
{
"blob_id": "21d2de5719fafd94605f31bc07231644f4be18c5",
"index": 8749,
"step-1": "<mask token>\n\n\nclass TestClass01(TestCase):\n <mask token>\n <mask token>\n\n def test_case01(self):\n \"\"\"Version extraction\"\"\"\n version = versions.extract_version(self.any_string)\n self.assertEqual(version, '1.1.1')\n <mask token>\n <mask token>\n\n def test_case04(self):\n \"\"\"Datetime version\"\"\"\n version = versions.get_version_power('2021.1.1')\n self.assertTrue(isinstance(version, datetime))\n\n def test_case05(self):\n \"\"\"Datetime versions compare\"\"\"\n version = versions.get_version_power('2020.1.1')\n version2 = versions.get_version_power('2021.1.1')\n self.assertGreater(version2, version)\n",
"step-2": "<mask token>\n\n\nclass TestClass01(TestCase):\n <mask token>\n <mask token>\n\n def test_case01(self):\n \"\"\"Version extraction\"\"\"\n version = versions.extract_version(self.any_string)\n self.assertEqual(version, '1.1.1')\n\n def test_case02(self):\n \"\"\"Version power calculation\"\"\"\n version = versions.get_version_power('1.1.1')\n self.assertEqual(version, 111)\n <mask token>\n\n def test_case04(self):\n \"\"\"Datetime version\"\"\"\n version = versions.get_version_power('2021.1.1')\n self.assertTrue(isinstance(version, datetime))\n\n def test_case05(self):\n \"\"\"Datetime versions compare\"\"\"\n version = versions.get_version_power('2020.1.1')\n version2 = versions.get_version_power('2021.1.1')\n self.assertGreater(version2, version)\n",
"step-3": "<mask token>\n\n\nclass TestClass01(TestCase):\n \"\"\"Software dependency versions compared\"\"\"\n\n def setUp(self) ->None:\n super().setUp()\n self.any_string = 'Some string containing v1.1.1'\n\n def test_case01(self):\n \"\"\"Version extraction\"\"\"\n version = versions.extract_version(self.any_string)\n self.assertEqual(version, '1.1.1')\n\n def test_case02(self):\n \"\"\"Version power calculation\"\"\"\n version = versions.get_version_power('1.1.1')\n self.assertEqual(version, 111)\n\n def test_case03(self):\n \"\"\"Version power calculation compared\"\"\"\n version1 = versions.get_version_power('1.1.1')\n version2 = versions.get_version_power('0.2.1')\n self.assertGreater(version1, version2)\n\n def test_case04(self):\n \"\"\"Datetime version\"\"\"\n version = versions.get_version_power('2021.1.1')\n self.assertTrue(isinstance(version, datetime))\n\n def test_case05(self):\n \"\"\"Datetime versions compare\"\"\"\n version = versions.get_version_power('2020.1.1')\n version2 = versions.get_version_power('2021.1.1')\n self.assertGreater(version2, version)\n",
"step-4": "from datetime import datetime\nfrom unittest import TestCase\nfrom vpnmupd import versions\n\n\nclass TestClass01(TestCase):\n \"\"\"Software dependency versions compared\"\"\"\n\n def setUp(self) ->None:\n super().setUp()\n self.any_string = 'Some string containing v1.1.1'\n\n def test_case01(self):\n \"\"\"Version extraction\"\"\"\n version = versions.extract_version(self.any_string)\n self.assertEqual(version, '1.1.1')\n\n def test_case02(self):\n \"\"\"Version power calculation\"\"\"\n version = versions.get_version_power('1.1.1')\n self.assertEqual(version, 111)\n\n def test_case03(self):\n \"\"\"Version power calculation compared\"\"\"\n version1 = versions.get_version_power('1.1.1')\n version2 = versions.get_version_power('0.2.1')\n self.assertGreater(version1, version2)\n\n def test_case04(self):\n \"\"\"Datetime version\"\"\"\n version = versions.get_version_power('2021.1.1')\n self.assertTrue(isinstance(version, datetime))\n\n def test_case05(self):\n \"\"\"Datetime versions compare\"\"\"\n version = versions.get_version_power('2020.1.1')\n version2 = versions.get_version_power('2021.1.1')\n self.assertGreater(version2, version)\n",
"step-5": "from datetime import datetime\nfrom unittest import TestCase\n\nfrom vpnmupd import versions\n\n\nclass TestClass01(TestCase):\n \"\"\"Software dependency versions compared\"\"\"\n\n def setUp(self) -> None:\n super().setUp()\n self.any_string = \"Some string containing v1.1.1\"\n\n def test_case01(self):\n \"\"\"Version extraction\"\"\"\n version = versions.extract_version(self.any_string)\n self.assertEqual(version, \"1.1.1\")\n\n def test_case02(self):\n \"\"\"Version power calculation\"\"\"\n version = versions.get_version_power(\"1.1.1\")\n self.assertEqual(version, 111)\n\n def test_case03(self):\n \"\"\"Version power calculation compared\"\"\"\n version1 = versions.get_version_power(\"1.1.1\")\n version2 = versions.get_version_power(\"0.2.1\")\n self.assertGreater(version1, version2)\n\n def test_case04(self):\n \"\"\"Datetime version\"\"\"\n version = versions.get_version_power(\"2021.1.1\")\n self.assertTrue(isinstance(version, datetime))\n\n def test_case05(self):\n \"\"\"Datetime versions compare\"\"\"\n version = versions.get_version_power(\"2020.1.1\")\n version2 = versions.get_version_power(\"2021.1.1\")\n self.assertGreater(version2, version)\n",
"step-ids": [
4,
5,
8,
9,
10
]
}
|
[
4,
5,
8,
9,
10
] |
""" Contains different comparator classes for model output data structures.
"""
import copy
def tuple_to_string(tuptup):
""" Converts a tuple to its string representation. Uses different separators (;, /, |) for
different depths of the representation.
Parameters
----------
tuptup : list
Tuple to convert to its string representation.
Returns
-------
str
String representation of the input tuple.
"""
def join_deepest(tup, sep=';'):
""" Recursive function to create the string representation for the deepest level of the
tuptup list.
Parameters
----------
tup : object
Element to join if list or list of lists.
sep : str, optional
Separation character to join the list elements by.
Returns
-------
object
List containing joined string in max depth. Str if input depth = 1.
"""
if not isinstance(tup, list):
return tup
if not isinstance(tup[0], list):
return sep.join(tup)
for idx, val in enumerate(tup):
tup[idx] = join_deepest(val, sep)
return tup
tup = copy.deepcopy(tuptup)
tup = join_deepest(tup, ';')
tup = join_deepest(tup, '/')
tup = join_deepest(tup, '|')
return tup
class Comparator():
""" Comparator base class.
"""
def compare(self, obj_a, obj_b):
""" Base comparison method.
Parameters
----------
obj_a : object
Object A for comparison.
obj_b : object
Object B for comparison.
Returns
-------
object
Comparison result.
"""
raise NotImplementedError()
class EqualityComparator():
""" Equality comparator. Checks if both responses are equal.
"""
@staticmethod
def compare(obj_a, obj_b):
""" Compares two response objects based on equality.
Parameters
----------
obj_a : tuple
Response tuple A for comparison.
obj_b : tuple
Response tuple B for comparison.
Returns
-------
bool
True if both objects are equal, false otherwise.
"""
return tuple_to_string(obj_a) == tuple_to_string(obj_b)
class NVCComparator():
""" NVC response comparator. Performs the evaluation based on NVC and non-NVC classes.
"""
@staticmethod
def compare(obj_a, obj_b):
""" Compares two response objects based on their NVCness. Only returns true if both
responses are in agreement with either responding NVC or not NVC.
Parameters
----------
obj_a : tuple
Response tuple A for comparison.
obj_b : tuple
Response tuple B for comparison.
Returns
-------
bool
True only if both objects agree on whether the response is NVC or not.
"""
return (tuple_to_string(obj_a) == 'NVC') == (tuple_to_string(obj_b) == 'NVC')
|
normal
|
{
"blob_id": "9c935e9ef298484d565256a420b867e800c3df55",
"index": 3243,
"step-1": "<mask token>\n\n\nclass NVCComparator:\n \"\"\" NVC response comparator. Performs the evaluation based on NVC and non-NVC classes.\n\n \"\"\"\n\n @staticmethod\n def compare(obj_a, obj_b):\n \"\"\" Compares two response objects based on their NVCness. Only returns true if both\n responses are in agreement with either responding NVC or not NVC.\n\n Parameters\n ----------\n obj_a : tuple\n Response tuple A for comparison.\n\n obj_b : tuple\n Response tuple B for comparison.\n\n Returns\n -------\n bool\n True only if both objects agree on whether the response is NVC or not.\n\n \"\"\"\n return (tuple_to_string(obj_a) == 'NVC') == (tuple_to_string(obj_b) ==\n 'NVC')\n",
"step-2": "<mask token>\n\n\nclass EqualityComparator:\n \"\"\" Equality comparator. Checks if both responses are equal.\n\n \"\"\"\n\n @staticmethod\n def compare(obj_a, obj_b):\n \"\"\" Compares two response objects based on equality.\n\n Parameters\n ----------\n obj_a : tuple\n Response tuple A for comparison.\n\n obj_b : tuple\n Response tuple B for comparison.\n\n Returns\n -------\n bool\n True if both objects are equal, false otherwise.\n\n \"\"\"\n return tuple_to_string(obj_a) == tuple_to_string(obj_b)\n\n\nclass NVCComparator:\n \"\"\" NVC response comparator. Performs the evaluation based on NVC and non-NVC classes.\n\n \"\"\"\n\n @staticmethod\n def compare(obj_a, obj_b):\n \"\"\" Compares two response objects based on their NVCness. Only returns true if both\n responses are in agreement with either responding NVC or not NVC.\n\n Parameters\n ----------\n obj_a : tuple\n Response tuple A for comparison.\n\n obj_b : tuple\n Response tuple B for comparison.\n\n Returns\n -------\n bool\n True only if both objects agree on whether the response is NVC or not.\n\n \"\"\"\n return (tuple_to_string(obj_a) == 'NVC') == (tuple_to_string(obj_b) ==\n 'NVC')\n",
"step-3": "<mask token>\n\n\nclass Comparator:\n <mask token>\n <mask token>\n\n\nclass EqualityComparator:\n \"\"\" Equality comparator. Checks if both responses are equal.\n\n \"\"\"\n\n @staticmethod\n def compare(obj_a, obj_b):\n \"\"\" Compares two response objects based on equality.\n\n Parameters\n ----------\n obj_a : tuple\n Response tuple A for comparison.\n\n obj_b : tuple\n Response tuple B for comparison.\n\n Returns\n -------\n bool\n True if both objects are equal, false otherwise.\n\n \"\"\"\n return tuple_to_string(obj_a) == tuple_to_string(obj_b)\n\n\nclass NVCComparator:\n \"\"\" NVC response comparator. Performs the evaluation based on NVC and non-NVC classes.\n\n \"\"\"\n\n @staticmethod\n def compare(obj_a, obj_b):\n \"\"\" Compares two response objects based on their NVCness. Only returns true if both\n responses are in agreement with either responding NVC or not NVC.\n\n Parameters\n ----------\n obj_a : tuple\n Response tuple A for comparison.\n\n obj_b : tuple\n Response tuple B for comparison.\n\n Returns\n -------\n bool\n True only if both objects agree on whether the response is NVC or not.\n\n \"\"\"\n return (tuple_to_string(obj_a) == 'NVC') == (tuple_to_string(obj_b) ==\n 'NVC')\n",
"step-4": "<mask token>\n\n\ndef tuple_to_string(tuptup):\n \"\"\" Converts a tuple to its string representation. Uses different separators (;, /, |) for\n different depths of the representation.\n\n Parameters\n ----------\n tuptup : list\n Tuple to convert to its string representation.\n\n Returns\n -------\n str\n String representation of the input tuple.\n\n \"\"\"\n\n def join_deepest(tup, sep=';'):\n \"\"\" Recursive function to create the string representation for the deepest level of the\n tuptup list.\n\n Parameters\n ----------\n tup : object\n Element to join if list or list of lists.\n\n sep : str, optional\n Separation character to join the list elements by.\n\n Returns\n -------\n object\n List containing joined string in max depth. Str if input depth = 1.\n\n \"\"\"\n if not isinstance(tup, list):\n return tup\n if not isinstance(tup[0], list):\n return sep.join(tup)\n for idx, val in enumerate(tup):\n tup[idx] = join_deepest(val, sep)\n return tup\n tup = copy.deepcopy(tuptup)\n tup = join_deepest(tup, ';')\n tup = join_deepest(tup, '/')\n tup = join_deepest(tup, '|')\n return tup\n\n\nclass Comparator:\n \"\"\" Comparator base class.\n\n \"\"\"\n\n def compare(self, obj_a, obj_b):\n \"\"\" Base comparison method.\n\n Parameters\n ----------\n obj_a : object\n Object A for comparison.\n\n obj_b : object\n Object B for comparison.\n\n Returns\n -------\n object\n Comparison result.\n\n \"\"\"\n raise NotImplementedError()\n\n\nclass EqualityComparator:\n \"\"\" Equality comparator. Checks if both responses are equal.\n\n \"\"\"\n\n @staticmethod\n def compare(obj_a, obj_b):\n \"\"\" Compares two response objects based on equality.\n\n Parameters\n ----------\n obj_a : tuple\n Response tuple A for comparison.\n\n obj_b : tuple\n Response tuple B for comparison.\n\n Returns\n -------\n bool\n True if both objects are equal, false otherwise.\n\n \"\"\"\n return tuple_to_string(obj_a) == tuple_to_string(obj_b)\n\n\nclass NVCComparator:\n \"\"\" NVC response comparator. Performs the evaluation based on NVC and non-NVC classes.\n\n \"\"\"\n\n @staticmethod\n def compare(obj_a, obj_b):\n \"\"\" Compares two response objects based on their NVCness. Only returns true if both\n responses are in agreement with either responding NVC or not NVC.\n\n Parameters\n ----------\n obj_a : tuple\n Response tuple A for comparison.\n\n obj_b : tuple\n Response tuple B for comparison.\n\n Returns\n -------\n bool\n True only if both objects agree on whether the response is NVC or not.\n\n \"\"\"\n return (tuple_to_string(obj_a) == 'NVC') == (tuple_to_string(obj_b) ==\n 'NVC')\n",
"step-5": "\"\"\" Contains different comparator classes for model output data structures.\n\n\"\"\"\n\nimport copy\n\ndef tuple_to_string(tuptup):\n \"\"\" Converts a tuple to its string representation. Uses different separators (;, /, |) for\n different depths of the representation.\n\n Parameters\n ----------\n tuptup : list\n Tuple to convert to its string representation.\n\n Returns\n -------\n str\n String representation of the input tuple.\n\n \"\"\"\n\n def join_deepest(tup, sep=';'):\n \"\"\" Recursive function to create the string representation for the deepest level of the\n tuptup list.\n\n Parameters\n ----------\n tup : object\n Element to join if list or list of lists.\n\n sep : str, optional\n Separation character to join the list elements by.\n\n Returns\n -------\n object\n List containing joined string in max depth. Str if input depth = 1.\n\n \"\"\"\n\n if not isinstance(tup, list):\n return tup\n if not isinstance(tup[0], list):\n return sep.join(tup)\n\n for idx, val in enumerate(tup):\n tup[idx] = join_deepest(val, sep)\n return tup\n\n tup = copy.deepcopy(tuptup)\n tup = join_deepest(tup, ';')\n tup = join_deepest(tup, '/')\n tup = join_deepest(tup, '|')\n return tup\n\nclass Comparator():\n \"\"\" Comparator base class.\n\n \"\"\"\n\n def compare(self, obj_a, obj_b):\n \"\"\" Base comparison method.\n\n Parameters\n ----------\n obj_a : object\n Object A for comparison.\n\n obj_b : object\n Object B for comparison.\n\n Returns\n -------\n object\n Comparison result.\n\n \"\"\"\n\n raise NotImplementedError()\n\nclass EqualityComparator():\n \"\"\" Equality comparator. Checks if both responses are equal.\n\n \"\"\"\n\n @staticmethod\n def compare(obj_a, obj_b):\n \"\"\" Compares two response objects based on equality.\n\n Parameters\n ----------\n obj_a : tuple\n Response tuple A for comparison.\n\n obj_b : tuple\n Response tuple B for comparison.\n\n Returns\n -------\n bool\n True if both objects are equal, false otherwise.\n\n \"\"\"\n\n return tuple_to_string(obj_a) == tuple_to_string(obj_b)\n\nclass NVCComparator():\n \"\"\" NVC response comparator. Performs the evaluation based on NVC and non-NVC classes.\n\n \"\"\"\n\n @staticmethod\n def compare(obj_a, obj_b):\n \"\"\" Compares two response objects based on their NVCness. Only returns true if both\n responses are in agreement with either responding NVC or not NVC.\n\n Parameters\n ----------\n obj_a : tuple\n Response tuple A for comparison.\n\n obj_b : tuple\n Response tuple B for comparison.\n\n Returns\n -------\n bool\n True only if both objects agree on whether the response is NVC or not.\n\n \"\"\"\n\n return (tuple_to_string(obj_a) == 'NVC') == (tuple_to_string(obj_b) == 'NVC')\n",
"step-ids": [
3,
6,
7,
10,
12
]
}
|
[
3,
6,
7,
10,
12
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
print('----------------')
print('{} x {:2} = {:2}'.format(n, 1, 1 * n))
print('{} x {:2} = {:2}'.format(n, 2, 2 * n))
print('{} x {:2} = {:2}'.format(n, 3, 3 * n))
print('{} x {:2} = {:2}'.format(n, 4, 4 * n))
print('{} x {:2} = {:2}'.format(n, 5, 5 * n))
print('{} x {:2} = {:2}'.format(n, 6, 6 * n))
print('{} x {:2} = {:2}'.format(n, 7, 7 * n))
print('{} x {:2} = {:2}'.format(n, 8, 8 * n))
print('{} x {:2} = {:2}'.format(n, 9, 9 * n))
print('{} x {:2} = {:2}'.format(n, 10, 10 * n))
print('----------------')
<|reserved_special_token_1|>
n = int(input('Informe um numero: '))
print('----------------')
print('{} x {:2} = {:2}'.format(n, 1, 1 * n))
print('{} x {:2} = {:2}'.format(n, 2, 2 * n))
print('{} x {:2} = {:2}'.format(n, 3, 3 * n))
print('{} x {:2} = {:2}'.format(n, 4, 4 * n))
print('{} x {:2} = {:2}'.format(n, 5, 5 * n))
print('{} x {:2} = {:2}'.format(n, 6, 6 * n))
print('{} x {:2} = {:2}'.format(n, 7, 7 * n))
print('{} x {:2} = {:2}'.format(n, 8, 8 * n))
print('{} x {:2} = {:2}'.format(n, 9, 9 * n))
print('{} x {:2} = {:2}'.format(n, 10, 10 * n))
print('----------------')
|
flexible
|
{
"blob_id": "9e814e3f1162e248c5d778c2df9960b199854a27",
"index": 9306,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint('----------------')\nprint('{} x {:2} = {:2}'.format(n, 1, 1 * n))\nprint('{} x {:2} = {:2}'.format(n, 2, 2 * n))\nprint('{} x {:2} = {:2}'.format(n, 3, 3 * n))\nprint('{} x {:2} = {:2}'.format(n, 4, 4 * n))\nprint('{} x {:2} = {:2}'.format(n, 5, 5 * n))\nprint('{} x {:2} = {:2}'.format(n, 6, 6 * n))\nprint('{} x {:2} = {:2}'.format(n, 7, 7 * n))\nprint('{} x {:2} = {:2}'.format(n, 8, 8 * n))\nprint('{} x {:2} = {:2}'.format(n, 9, 9 * n))\nprint('{} x {:2} = {:2}'.format(n, 10, 10 * n))\nprint('----------------')\n",
"step-3": "n = int(input('Informe um numero: '))\nprint('----------------')\nprint('{} x {:2} = {:2}'.format(n, 1, 1 * n))\nprint('{} x {:2} = {:2}'.format(n, 2, 2 * n))\nprint('{} x {:2} = {:2}'.format(n, 3, 3 * n))\nprint('{} x {:2} = {:2}'.format(n, 4, 4 * n))\nprint('{} x {:2} = {:2}'.format(n, 5, 5 * n))\nprint('{} x {:2} = {:2}'.format(n, 6, 6 * n))\nprint('{} x {:2} = {:2}'.format(n, 7, 7 * n))\nprint('{} x {:2} = {:2}'.format(n, 8, 8 * n))\nprint('{} x {:2} = {:2}'.format(n, 9, 9 * n))\nprint('{} x {:2} = {:2}'.format(n, 10, 10 * n))\nprint('----------------')\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
# -*- coding:utf-8 -*-
import requests
from lxml import etree
import codecs
from transfrom import del_extra
import re
MODIFIED_TEXT = [r'一秒记住.*?。', r'(看书.*?)', r'纯文字.*?问', r'热门.*?>', r'最新章节.*?新',
r'は防§.*?e', r'&.*?>', r'r.*?>', r'c.*?>',
r'复制.*?>', r'字-符.*?>', r'最新最快,无.*?。',
r' .Shumilou.Co M.Shumilou.Co<br /><br />', r'[Ww]{3}.*[mM]',
r'&nbsp; &nbsp; &nbsp; &nbsp; ']
HEADER = {'user-agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:51.0) Gecko/20100101 Firefox/51.0 '}
URL = 'http://www.xxbiquge.com/5_5422/'
def crawl_urls(u):
response = requests.get(u, headers=HEADER)
body = etree.HTML(response.content)
content_urls = body.xpath('//div[@class="box_con"]/div/dl//dd/a/@href')
for pk_id, u in enumerate(content_urls):
content_url = 'http://www.xxbiquge.com' + u
yield pk_id, content_url
def crwal(content_url):
""" 爬出目标网站的目标文章,并过滤文章"""
content_response = requests.get(content_url, headers=HEADER)
content_body = etree.HTML(content_response.content)
try:
chapter = content_body.xpath('//div[@class="bookname"]/h1/text()')[0]
content = content_body.xpath('//div[@id="content"]')[0]
except IndexError:
raise IndexError('rules haved change in %s' % content_url)
final_content, need_confirm = transform_content(etree.tounicode(content))
final_content = content_filter(final_content)
return chapter, final_content, need_confirm
def transform_content(txt):
need_confirm = 0
if 'div' in txt:
txt = txt.split('<div id="content">')[-1].split('</div>')[0]
if len(txt) > 0:
while True:
if txt.startswith(' ') or txt.startswith(' '):
break
if '\u4e00' <= txt[0] <= '\u9fff':
break
txt = txt[1:]
txt = del_extra(txt)
if '\\' in txt or len(txt) < 100:
need_confirm = 1
return txt, need_confirm
def content_filter(content):
""" 正则去除文章中间的广告,乱码"""
m_content = content
for ccc in MODIFIED_TEXT:
m_content = re.sub(ccc, '', m_content)
return m_content
if __name__ == '__main__':
pass
|
normal
|
{
"blob_id": "7539042b92a5188a11f625cdfc0f341941f751f0",
"index": 6937,
"step-1": "<mask token>\n\n\ndef crawl_urls(u):\n response = requests.get(u, headers=HEADER)\n body = etree.HTML(response.content)\n content_urls = body.xpath('//div[@class=\"box_con\"]/div/dl//dd/a/@href')\n for pk_id, u in enumerate(content_urls):\n content_url = 'http://www.xxbiquge.com' + u\n yield pk_id, content_url\n\n\ndef crwal(content_url):\n \"\"\" 爬出目标网站的目标文章,并过滤文章\"\"\"\n content_response = requests.get(content_url, headers=HEADER)\n content_body = etree.HTML(content_response.content)\n try:\n chapter = content_body.xpath('//div[@class=\"bookname\"]/h1/text()')[0]\n content = content_body.xpath('//div[@id=\"content\"]')[0]\n except IndexError:\n raise IndexError('rules haved change in %s' % content_url)\n final_content, need_confirm = transform_content(etree.tounicode(content))\n final_content = content_filter(final_content)\n return chapter, final_content, need_confirm\n\n\ndef transform_content(txt):\n need_confirm = 0\n if 'div' in txt:\n txt = txt.split('<div id=\"content\">')[-1].split('</div>')[0]\n if len(txt) > 0:\n while True:\n if txt.startswith('\\xa0') or txt.startswith('\\u3000'):\n break\n if '一' <= txt[0] <= '鿿':\n break\n txt = txt[1:]\n txt = del_extra(txt)\n if '\\\\' in txt or len(txt) < 100:\n need_confirm = 1\n return txt, need_confirm\n\n\ndef content_filter(content):\n \"\"\" 正则去除文章中间的广告,乱码\"\"\"\n m_content = content\n for ccc in MODIFIED_TEXT:\n m_content = re.sub(ccc, '', m_content)\n return m_content\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef crawl_urls(u):\n response = requests.get(u, headers=HEADER)\n body = etree.HTML(response.content)\n content_urls = body.xpath('//div[@class=\"box_con\"]/div/dl//dd/a/@href')\n for pk_id, u in enumerate(content_urls):\n content_url = 'http://www.xxbiquge.com' + u\n yield pk_id, content_url\n\n\ndef crwal(content_url):\n \"\"\" 爬出目标网站的目标文章,并过滤文章\"\"\"\n content_response = requests.get(content_url, headers=HEADER)\n content_body = etree.HTML(content_response.content)\n try:\n chapter = content_body.xpath('//div[@class=\"bookname\"]/h1/text()')[0]\n content = content_body.xpath('//div[@id=\"content\"]')[0]\n except IndexError:\n raise IndexError('rules haved change in %s' % content_url)\n final_content, need_confirm = transform_content(etree.tounicode(content))\n final_content = content_filter(final_content)\n return chapter, final_content, need_confirm\n\n\ndef transform_content(txt):\n need_confirm = 0\n if 'div' in txt:\n txt = txt.split('<div id=\"content\">')[-1].split('</div>')[0]\n if len(txt) > 0:\n while True:\n if txt.startswith('\\xa0') or txt.startswith('\\u3000'):\n break\n if '一' <= txt[0] <= '鿿':\n break\n txt = txt[1:]\n txt = del_extra(txt)\n if '\\\\' in txt or len(txt) < 100:\n need_confirm = 1\n return txt, need_confirm\n\n\ndef content_filter(content):\n \"\"\" 正则去除文章中间的广告,乱码\"\"\"\n m_content = content\n for ccc in MODIFIED_TEXT:\n m_content = re.sub(ccc, '', m_content)\n return m_content\n\n\nif __name__ == '__main__':\n pass\n",
"step-3": "<mask token>\nMODIFIED_TEXT = ['一秒记住.*?。', '(看书.*?)', '纯文字.*?问', '热门.*?>', '最新章节.*?新',\n 'は防§.*?e', '&.*?>', 'r.*?>', 'c.*?>', '复制.*?>', '字-符.*?>', '最新最快,无.*?。',\n '\\xa0\\xa0\\xa0\\xa0.Shumilou.Co\\xa0\\xa0M.Shumilou.Co<br /><br />',\n '[Ww]{3}.*[mM]',\n '&nbsp;\\xa0\\xa0\\xa0\\xa0&nbsp;\\xa0\\xa0\\xa0\\xa0&nbsp;\\xa0\\xa0\\xa0\\xa0&nbsp;\\xa0\\xa0'\n ]\nHEADER = {'user-agent':\n 'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:51.0) Gecko/20100101 Firefox/51.0 '\n }\nURL = 'http://www.xxbiquge.com/5_5422/'\n\n\ndef crawl_urls(u):\n response = requests.get(u, headers=HEADER)\n body = etree.HTML(response.content)\n content_urls = body.xpath('//div[@class=\"box_con\"]/div/dl//dd/a/@href')\n for pk_id, u in enumerate(content_urls):\n content_url = 'http://www.xxbiquge.com' + u\n yield pk_id, content_url\n\n\ndef crwal(content_url):\n \"\"\" 爬出目标网站的目标文章,并过滤文章\"\"\"\n content_response = requests.get(content_url, headers=HEADER)\n content_body = etree.HTML(content_response.content)\n try:\n chapter = content_body.xpath('//div[@class=\"bookname\"]/h1/text()')[0]\n content = content_body.xpath('//div[@id=\"content\"]')[0]\n except IndexError:\n raise IndexError('rules haved change in %s' % content_url)\n final_content, need_confirm = transform_content(etree.tounicode(content))\n final_content = content_filter(final_content)\n return chapter, final_content, need_confirm\n\n\ndef transform_content(txt):\n need_confirm = 0\n if 'div' in txt:\n txt = txt.split('<div id=\"content\">')[-1].split('</div>')[0]\n if len(txt) > 0:\n while True:\n if txt.startswith('\\xa0') or txt.startswith('\\u3000'):\n break\n if '一' <= txt[0] <= '鿿':\n break\n txt = txt[1:]\n txt = del_extra(txt)\n if '\\\\' in txt or len(txt) < 100:\n need_confirm = 1\n return txt, need_confirm\n\n\ndef content_filter(content):\n \"\"\" 正则去除文章中间的广告,乱码\"\"\"\n m_content = content\n for ccc in MODIFIED_TEXT:\n m_content = re.sub(ccc, '', m_content)\n return m_content\n\n\nif __name__ == '__main__':\n pass\n",
"step-4": "import requests\nfrom lxml import etree\nimport codecs\nfrom transfrom import del_extra\nimport re\nMODIFIED_TEXT = ['一秒记住.*?。', '(看书.*?)', '纯文字.*?问', '热门.*?>', '最新章节.*?新',\n 'は防§.*?e', '&.*?>', 'r.*?>', 'c.*?>', '复制.*?>', '字-符.*?>', '最新最快,无.*?。',\n '\\xa0\\xa0\\xa0\\xa0.Shumilou.Co\\xa0\\xa0M.Shumilou.Co<br /><br />',\n '[Ww]{3}.*[mM]',\n '&nbsp;\\xa0\\xa0\\xa0\\xa0&nbsp;\\xa0\\xa0\\xa0\\xa0&nbsp;\\xa0\\xa0\\xa0\\xa0&nbsp;\\xa0\\xa0'\n ]\nHEADER = {'user-agent':\n 'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:51.0) Gecko/20100101 Firefox/51.0 '\n }\nURL = 'http://www.xxbiquge.com/5_5422/'\n\n\ndef crawl_urls(u):\n response = requests.get(u, headers=HEADER)\n body = etree.HTML(response.content)\n content_urls = body.xpath('//div[@class=\"box_con\"]/div/dl//dd/a/@href')\n for pk_id, u in enumerate(content_urls):\n content_url = 'http://www.xxbiquge.com' + u\n yield pk_id, content_url\n\n\ndef crwal(content_url):\n \"\"\" 爬出目标网站的目标文章,并过滤文章\"\"\"\n content_response = requests.get(content_url, headers=HEADER)\n content_body = etree.HTML(content_response.content)\n try:\n chapter = content_body.xpath('//div[@class=\"bookname\"]/h1/text()')[0]\n content = content_body.xpath('//div[@id=\"content\"]')[0]\n except IndexError:\n raise IndexError('rules haved change in %s' % content_url)\n final_content, need_confirm = transform_content(etree.tounicode(content))\n final_content = content_filter(final_content)\n return chapter, final_content, need_confirm\n\n\ndef transform_content(txt):\n need_confirm = 0\n if 'div' in txt:\n txt = txt.split('<div id=\"content\">')[-1].split('</div>')[0]\n if len(txt) > 0:\n while True:\n if txt.startswith('\\xa0') or txt.startswith('\\u3000'):\n break\n if '一' <= txt[0] <= '鿿':\n break\n txt = txt[1:]\n txt = del_extra(txt)\n if '\\\\' in txt or len(txt) < 100:\n need_confirm = 1\n return txt, need_confirm\n\n\ndef content_filter(content):\n \"\"\" 正则去除文章中间的广告,乱码\"\"\"\n m_content = content\n for ccc in MODIFIED_TEXT:\n m_content = re.sub(ccc, '', m_content)\n return m_content\n\n\nif __name__ == '__main__':\n pass\n",
"step-5": "# -*- coding:utf-8 -*-\n\nimport requests\nfrom lxml import etree\nimport codecs\nfrom transfrom import del_extra\nimport re\n\nMODIFIED_TEXT = [r'一秒记住.*?。', r'(看书.*?)', r'纯文字.*?问', r'热门.*?>', r'最新章节.*?新',\n r'は防§.*?e', r'&.*?>', r'r.*?>', r'c.*?>',\n r'复制.*?>', r'字-符.*?>', r'最新最快,无.*?。',\n r' .Shumilou.Co M.Shumilou.Co<br /><br />', r'[Ww]{3}.*[mM]',\n r'&nbsp; &nbsp; &nbsp; &nbsp; ']\n\nHEADER = {'user-agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:51.0) Gecko/20100101 Firefox/51.0 '}\nURL = 'http://www.xxbiquge.com/5_5422/'\n\n\ndef crawl_urls(u):\n response = requests.get(u, headers=HEADER)\n body = etree.HTML(response.content)\n content_urls = body.xpath('//div[@class=\"box_con\"]/div/dl//dd/a/@href')\n for pk_id, u in enumerate(content_urls):\n content_url = 'http://www.xxbiquge.com' + u\n yield pk_id, content_url\n\n\ndef crwal(content_url):\n \"\"\" 爬出目标网站的目标文章,并过滤文章\"\"\"\n content_response = requests.get(content_url, headers=HEADER)\n content_body = etree.HTML(content_response.content)\n try:\n chapter = content_body.xpath('//div[@class=\"bookname\"]/h1/text()')[0]\n content = content_body.xpath('//div[@id=\"content\"]')[0]\n except IndexError:\n raise IndexError('rules haved change in %s' % content_url)\n final_content, need_confirm = transform_content(etree.tounicode(content))\n final_content = content_filter(final_content)\n return chapter, final_content, need_confirm\n\n\ndef transform_content(txt):\n need_confirm = 0\n if 'div' in txt:\n txt = txt.split('<div id=\"content\">')[-1].split('</div>')[0]\n if len(txt) > 0:\n while True:\n if txt.startswith(' ') or txt.startswith(' '):\n break\n if '\\u4e00' <= txt[0] <= '\\u9fff':\n break\n txt = txt[1:]\n txt = del_extra(txt)\n if '\\\\' in txt or len(txt) < 100:\n need_confirm = 1\n return txt, need_confirm\n\n\ndef content_filter(content):\n \"\"\" 正则去除文章中间的广告,乱码\"\"\"\n m_content = content\n for ccc in MODIFIED_TEXT:\n m_content = re.sub(ccc, '', m_content)\n return m_content\n\nif __name__ == '__main__':\n pass\n",
"step-ids": [
4,
5,
6,
7,
8
]
}
|
[
4,
5,
6,
7,
8
] |
import torch
import torchvision
from torch import nn
def get_resnet18(pre_imgnet=False, num_classes=64):
model = torchvision.models.resnet18(pretrained=pre_imgnet)
model.fc = nn.Linear(512, 64)
return model
|
normal
|
{
"blob_id": "8e05b2723d8c50354e785b4bc7c5de8860aa706d",
"index": 5355,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef get_resnet18(pre_imgnet=False, num_classes=64):\n model = torchvision.models.resnet18(pretrained=pre_imgnet)\n model.fc = nn.Linear(512, 64)\n return model\n",
"step-3": "import torch\nimport torchvision\nfrom torch import nn\n\n\ndef get_resnet18(pre_imgnet=False, num_classes=64):\n model = torchvision.models.resnet18(pretrained=pre_imgnet)\n model.fc = nn.Linear(512, 64)\n return model\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def decrypt(key, ciphertext):
plaintext = ''
for i in ciphertext:
if i.isalpha():
alphabet = ord(i) - key
if alphabet < ord('A'):
alphabet += 26
letter = chr(alphabet)
plaintext += letter
return plaintext
<|reserved_special_token_1|>
def encrypt(key, plaintext):
ciphertext = ''
for i in plaintext:
if i.isalpha():
alphabet = ord(i) + key
if alphabet > ord('Z'):
alphabet -= 26
letter = chr(alphabet)
ciphertext += letter
return ciphertext
def decrypt(key, ciphertext):
plaintext = ''
for i in ciphertext:
if i.isalpha():
alphabet = ord(i) - key
if alphabet < ord('A'):
alphabet += 26
letter = chr(alphabet)
plaintext += letter
return plaintext
<|reserved_special_token_1|>
def encrypt(key,plaintext):
ciphertext=""
for i in plaintext:
if i.isalpha():
alphabet = ord(i)+key
if alphabet > ord("Z"):
alphabet -= 26
letter = chr(alphabet)
ciphertext+=letter
return ciphertext
def decrypt(key,ciphertext):
plaintext=""
for i in ciphertext:
if i.isalpha():
alphabet = ord(i)-key
if alphabet < ord("A"):
alphabet += 26
letter = chr(alphabet)
plaintext+=letter
return plaintext
|
flexible
|
{
"blob_id": "ac31cba94ee8ff7a2903a675954c937c567b5a56",
"index": 6739,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef decrypt(key, ciphertext):\n plaintext = ''\n for i in ciphertext:\n if i.isalpha():\n alphabet = ord(i) - key\n if alphabet < ord('A'):\n alphabet += 26\n letter = chr(alphabet)\n plaintext += letter\n return plaintext\n",
"step-3": "def encrypt(key, plaintext):\n ciphertext = ''\n for i in plaintext:\n if i.isalpha():\n alphabet = ord(i) + key\n if alphabet > ord('Z'):\n alphabet -= 26\n letter = chr(alphabet)\n ciphertext += letter\n return ciphertext\n\n\ndef decrypt(key, ciphertext):\n plaintext = ''\n for i in ciphertext:\n if i.isalpha():\n alphabet = ord(i) - key\n if alphabet < ord('A'):\n alphabet += 26\n letter = chr(alphabet)\n plaintext += letter\n return plaintext\n",
"step-4": "\ndef encrypt(key,plaintext):\n ciphertext=\"\"\n\n for i in plaintext:\n if i.isalpha():\n alphabet = ord(i)+key\n if alphabet > ord(\"Z\"):\n alphabet -= 26\n letter = chr(alphabet)\n ciphertext+=letter\n\n return ciphertext\n\ndef decrypt(key,ciphertext):\n plaintext=\"\"\n for i in ciphertext:\n if i.isalpha():\n alphabet = ord(i)-key\n if alphabet < ord(\"A\"):\n alphabet += 26\n letter = chr(alphabet)\n plaintext+=letter\n\n return plaintext\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
from mesa import Model
from mesa.space import SingleGrid
from mesa.time import BaseScheduler, RandomActivation, SimultaneousActivation
from pdpython_model.fixed_model.agents import PDAgent
from mesa.datacollection import DataCollector
class PDModel(Model):
schedule_types = {"Sequential": BaseScheduler,
"Random": RandomActivation,
"Simultaneous": SimultaneousActivation}
def __init__(self, height=8, width=8,
number_of_agents=2,
schedule_type="Simultaneous",
rounds=1,):
# Model Parameters
self.height = height
self.width = width
self.number_of_agents = number_of_agents
self.step_count = 0
self.schedule_type = schedule_type
self.payoffs = {("C", "C"): 3,
("C", "D"): 0,
("D", "C"): 5,
("D", "D"): 2}
# Model Functions
self.schedule = self.schedule_types[self.schedule_type](self)
self.grid = SingleGrid(self.height, self.width, torus=True)
# Find list of empty cells
self.coordinates = [(x, y) for x in range(self.width) for y in range(self.height)]
self.agentIDs = list(range(1, (number_of_agents + 1)))
self.make_agents()
self.running = True
def make_agents(self):
for i in range(self.number_of_agents):
x, y = self.coordinates.pop(0)
# print("x, y:", x, y)
# x, y = self.grid.find_empty()
pdagent = PDAgent((x, y), self, True)
self.grid.place_agent(pdagent, (x, y))
self.schedule.add(pdagent)
def step(self):
self.schedule.step()
self.step_count += 1
def run_model(self, rounds=200):
for i in range(rounds):
self.step()
|
normal
|
{
"blob_id": "446c438b79f9957289fa85f21516c13d67e2cfaf",
"index": 3270,
"step-1": "<mask token>\n\n\nclass PDModel(Model):\n <mask token>\n <mask token>\n\n def make_agents(self):\n for i in range(self.number_of_agents):\n x, y = self.coordinates.pop(0)\n pdagent = PDAgent((x, y), self, True)\n self.grid.place_agent(pdagent, (x, y))\n self.schedule.add(pdagent)\n\n def step(self):\n self.schedule.step()\n self.step_count += 1\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass PDModel(Model):\n <mask token>\n\n def __init__(self, height=8, width=8, number_of_agents=2, schedule_type\n ='Simultaneous', rounds=1):\n self.height = height\n self.width = width\n self.number_of_agents = number_of_agents\n self.step_count = 0\n self.schedule_type = schedule_type\n self.payoffs = {('C', 'C'): 3, ('C', 'D'): 0, ('D', 'C'): 5, ('D',\n 'D'): 2}\n self.schedule = self.schedule_types[self.schedule_type](self)\n self.grid = SingleGrid(self.height, self.width, torus=True)\n self.coordinates = [(x, y) for x in range(self.width) for y in\n range(self.height)]\n self.agentIDs = list(range(1, number_of_agents + 1))\n self.make_agents()\n self.running = True\n\n def make_agents(self):\n for i in range(self.number_of_agents):\n x, y = self.coordinates.pop(0)\n pdagent = PDAgent((x, y), self, True)\n self.grid.place_agent(pdagent, (x, y))\n self.schedule.add(pdagent)\n\n def step(self):\n self.schedule.step()\n self.step_count += 1\n\n def run_model(self, rounds=200):\n for i in range(rounds):\n self.step()\n",
"step-3": "<mask token>\n\n\nclass PDModel(Model):\n schedule_types = {'Sequential': BaseScheduler, 'Random':\n RandomActivation, 'Simultaneous': SimultaneousActivation}\n\n def __init__(self, height=8, width=8, number_of_agents=2, schedule_type\n ='Simultaneous', rounds=1):\n self.height = height\n self.width = width\n self.number_of_agents = number_of_agents\n self.step_count = 0\n self.schedule_type = schedule_type\n self.payoffs = {('C', 'C'): 3, ('C', 'D'): 0, ('D', 'C'): 5, ('D',\n 'D'): 2}\n self.schedule = self.schedule_types[self.schedule_type](self)\n self.grid = SingleGrid(self.height, self.width, torus=True)\n self.coordinates = [(x, y) for x in range(self.width) for y in\n range(self.height)]\n self.agentIDs = list(range(1, number_of_agents + 1))\n self.make_agents()\n self.running = True\n\n def make_agents(self):\n for i in range(self.number_of_agents):\n x, y = self.coordinates.pop(0)\n pdagent = PDAgent((x, y), self, True)\n self.grid.place_agent(pdagent, (x, y))\n self.schedule.add(pdagent)\n\n def step(self):\n self.schedule.step()\n self.step_count += 1\n\n def run_model(self, rounds=200):\n for i in range(rounds):\n self.step()\n",
"step-4": "from mesa import Model\nfrom mesa.space import SingleGrid\nfrom mesa.time import BaseScheduler, RandomActivation, SimultaneousActivation\nfrom pdpython_model.fixed_model.agents import PDAgent\nfrom mesa.datacollection import DataCollector\n\n\nclass PDModel(Model):\n schedule_types = {'Sequential': BaseScheduler, 'Random':\n RandomActivation, 'Simultaneous': SimultaneousActivation}\n\n def __init__(self, height=8, width=8, number_of_agents=2, schedule_type\n ='Simultaneous', rounds=1):\n self.height = height\n self.width = width\n self.number_of_agents = number_of_agents\n self.step_count = 0\n self.schedule_type = schedule_type\n self.payoffs = {('C', 'C'): 3, ('C', 'D'): 0, ('D', 'C'): 5, ('D',\n 'D'): 2}\n self.schedule = self.schedule_types[self.schedule_type](self)\n self.grid = SingleGrid(self.height, self.width, torus=True)\n self.coordinates = [(x, y) for x in range(self.width) for y in\n range(self.height)]\n self.agentIDs = list(range(1, number_of_agents + 1))\n self.make_agents()\n self.running = True\n\n def make_agents(self):\n for i in range(self.number_of_agents):\n x, y = self.coordinates.pop(0)\n pdagent = PDAgent((x, y), self, True)\n self.grid.place_agent(pdagent, (x, y))\n self.schedule.add(pdagent)\n\n def step(self):\n self.schedule.step()\n self.step_count += 1\n\n def run_model(self, rounds=200):\n for i in range(rounds):\n self.step()\n",
"step-5": "from mesa import Model\nfrom mesa.space import SingleGrid\nfrom mesa.time import BaseScheduler, RandomActivation, SimultaneousActivation\nfrom pdpython_model.fixed_model.agents import PDAgent\n\nfrom mesa.datacollection import DataCollector\n\n\nclass PDModel(Model):\n\n schedule_types = {\"Sequential\": BaseScheduler,\n \"Random\": RandomActivation,\n \"Simultaneous\": SimultaneousActivation}\n\n def __init__(self, height=8, width=8,\n number_of_agents=2,\n schedule_type=\"Simultaneous\",\n rounds=1,):\n\n\n # Model Parameters\n self.height = height\n self.width = width\n self.number_of_agents = number_of_agents\n self.step_count = 0\n self.schedule_type = schedule_type\n self.payoffs = {(\"C\", \"C\"): 3,\n (\"C\", \"D\"): 0,\n (\"D\", \"C\"): 5,\n (\"D\", \"D\"): 2}\n\n\n # Model Functions\n self.schedule = self.schedule_types[self.schedule_type](self)\n self.grid = SingleGrid(self.height, self.width, torus=True)\n\n # Find list of empty cells\n self.coordinates = [(x, y) for x in range(self.width) for y in range(self.height)]\n\n self.agentIDs = list(range(1, (number_of_agents + 1)))\n\n self.make_agents()\n self.running = True\n\n def make_agents(self):\n for i in range(self.number_of_agents):\n x, y = self.coordinates.pop(0)\n # print(\"x, y:\", x, y)\n # x, y = self.grid.find_empty()\n pdagent = PDAgent((x, y), self, True)\n self.grid.place_agent(pdagent, (x, y))\n self.schedule.add(pdagent)\n\n def step(self):\n self.schedule.step()\n self.step_count += 1\n\n def run_model(self, rounds=200):\n for i in range(rounds):\n self.step()\n\n",
"step-ids": [
3,
5,
6,
7,
8
]
}
|
[
3,
5,
6,
7,
8
] |
from django.shortcuts import render
class Person(object):
def __init__(self,username):
self.username = username
def index(request):
# p = Person("张三")
# context = {
# 'person': p
# }
# context = {
# 'person': {
# 'username':'zhiliao',
# }
# }
# person.keys()
context = {
'persons': (
'鲁班一号',
'程咬金',
'阿珂'
)
}
return render(request,'index.html',context=context)
|
normal
|
{
"blob_id": "6d2bc28e7742f1063a04ae96fc195515ad70598b",
"index": 5666,
"step-1": "<mask token>\n\n\nclass Person(object):\n <mask token>\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass Person(object):\n\n def __init__(self, username):\n self.username = username\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass Person(object):\n\n def __init__(self, username):\n self.username = username\n\n\ndef index(request):\n context = {'persons': ('鲁班一号', '程咬金', '阿珂')}\n return render(request, 'index.html', context=context)\n",
"step-4": "from django.shortcuts import render\n\n\nclass Person(object):\n\n def __init__(self, username):\n self.username = username\n\n\ndef index(request):\n context = {'persons': ('鲁班一号', '程咬金', '阿珂')}\n return render(request, 'index.html', context=context)\n",
"step-5": "from django.shortcuts import render\n\nclass Person(object):\n def __init__(self,username):\n self.username = username\n\ndef index(request):\n # p = Person(\"张三\")\n # context = {\n # 'person': p\n # }\n # context = {\n # 'person': {\n # 'username':'zhiliao',\n # }\n # }\n # person.keys()\n context = {\n 'persons': (\n '鲁班一号',\n '程咬金',\n '阿珂'\n )\n }\n return render(request,'index.html',context=context)",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
my_func = lambda x, y: x ** y
|
flexible
|
{
"blob_id": "93baa6ba14d06661731dce3e34ea93d49c06001b",
"index": 9043,
"step-1": "<mask token>\n",
"step-2": "my_func = lambda x, y: x ** y\n",
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0,
1
]
}
|
[
0,
1
] |
from .signals import get_restaurant_coordinates, count_average_price, count_total_calories
from .dish import Dish
from .ingredients import Ingredient
from .restaurants import Restaurant
|
normal
|
{
"blob_id": "1935cab249bf559aeadf785ce7abcecb03344c04",
"index": 6058,
"step-1": "<mask token>\n",
"step-2": "from .signals import get_restaurant_coordinates, count_average_price, count_total_calories\nfrom .dish import Dish\nfrom .ingredients import Ingredient\nfrom .restaurants import Restaurant\n",
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0,
1
]
}
|
[
0,
1
] |
from flask import Flask
from flask_sqlalchemy import SQLAlchemy
from flask_security import SQLAlchemySessionUserDatastore, Security
app = Flask(__name__, instance_relative_config=True)
app.config.from_pyfile("config.py", silent=True)
db = SQLAlchemy(app)
from .blueprints.cart.views import cart_blueprint
from .blueprints.admin.views import admin_blueprint
from .blueprints.products.views import product_blueprint
from .blueprints.orders.views import order_blueprint
from .blueprints.account.views import account_blueprint
from .blueprints.categories.views import category_blueprint
from .blueprints.static_pages.views import static_blueprint
app.register_blueprint(static_blueprint)
app.register_blueprint(admin_blueprint)
app.register_blueprint(cart_blueprint)
app.register_blueprint(product_blueprint)
app.register_blueprint(account_blueprint)
app.register_blueprint(category_blueprint)
app.register_blueprint(order_blueprint)
from .blueprints.account.models import AccountUser, AccountRole
from .blueprints.account.forms import RegistrationForm, LoginForm
try:
AccountUser.query.first()
except Exception as e:
db.create_all()
user_datastore = SQLAlchemySessionUserDatastore(db.session, AccountUser, AccountRole)
security = Security(
app, user_datastore, register_form=RegistrationForm, login_form=LoginForm
)
|
normal
|
{
"blob_id": "5d97a2afed26ec4826c8bce30c84863d21f86001",
"index": 9370,
"step-1": "<mask token>\n",
"step-2": "<mask token>\napp.config.from_pyfile('config.py', silent=True)\n<mask token>\napp.register_blueprint(static_blueprint)\napp.register_blueprint(admin_blueprint)\napp.register_blueprint(cart_blueprint)\napp.register_blueprint(product_blueprint)\napp.register_blueprint(account_blueprint)\napp.register_blueprint(category_blueprint)\napp.register_blueprint(order_blueprint)\n<mask token>\ntry:\n AccountUser.query.first()\nexcept Exception as e:\n db.create_all()\n<mask token>\n",
"step-3": "<mask token>\napp = Flask(__name__, instance_relative_config=True)\napp.config.from_pyfile('config.py', silent=True)\ndb = SQLAlchemy(app)\n<mask token>\napp.register_blueprint(static_blueprint)\napp.register_blueprint(admin_blueprint)\napp.register_blueprint(cart_blueprint)\napp.register_blueprint(product_blueprint)\napp.register_blueprint(account_blueprint)\napp.register_blueprint(category_blueprint)\napp.register_blueprint(order_blueprint)\n<mask token>\ntry:\n AccountUser.query.first()\nexcept Exception as e:\n db.create_all()\nuser_datastore = SQLAlchemySessionUserDatastore(db.session, AccountUser,\n AccountRole)\nsecurity = Security(app, user_datastore, register_form=RegistrationForm,\n login_form=LoginForm)\n",
"step-4": "from flask import Flask\nfrom flask_sqlalchemy import SQLAlchemy\nfrom flask_security import SQLAlchemySessionUserDatastore, Security\napp = Flask(__name__, instance_relative_config=True)\napp.config.from_pyfile('config.py', silent=True)\ndb = SQLAlchemy(app)\nfrom .blueprints.cart.views import cart_blueprint\nfrom .blueprints.admin.views import admin_blueprint\nfrom .blueprints.products.views import product_blueprint\nfrom .blueprints.orders.views import order_blueprint\nfrom .blueprints.account.views import account_blueprint\nfrom .blueprints.categories.views import category_blueprint\nfrom .blueprints.static_pages.views import static_blueprint\napp.register_blueprint(static_blueprint)\napp.register_blueprint(admin_blueprint)\napp.register_blueprint(cart_blueprint)\napp.register_blueprint(product_blueprint)\napp.register_blueprint(account_blueprint)\napp.register_blueprint(category_blueprint)\napp.register_blueprint(order_blueprint)\nfrom .blueprints.account.models import AccountUser, AccountRole\nfrom .blueprints.account.forms import RegistrationForm, LoginForm\ntry:\n AccountUser.query.first()\nexcept Exception as e:\n db.create_all()\nuser_datastore = SQLAlchemySessionUserDatastore(db.session, AccountUser,\n AccountRole)\nsecurity = Security(app, user_datastore, register_form=RegistrationForm,\n login_form=LoginForm)\n",
"step-5": "from flask import Flask\nfrom flask_sqlalchemy import SQLAlchemy\nfrom flask_security import SQLAlchemySessionUserDatastore, Security\n\napp = Flask(__name__, instance_relative_config=True)\napp.config.from_pyfile(\"config.py\", silent=True)\n\ndb = SQLAlchemy(app)\n\nfrom .blueprints.cart.views import cart_blueprint\nfrom .blueprints.admin.views import admin_blueprint\nfrom .blueprints.products.views import product_blueprint\nfrom .blueprints.orders.views import order_blueprint\nfrom .blueprints.account.views import account_blueprint\nfrom .blueprints.categories.views import category_blueprint\nfrom .blueprints.static_pages.views import static_blueprint\n\n\napp.register_blueprint(static_blueprint)\napp.register_blueprint(admin_blueprint)\napp.register_blueprint(cart_blueprint)\napp.register_blueprint(product_blueprint)\napp.register_blueprint(account_blueprint)\napp.register_blueprint(category_blueprint)\napp.register_blueprint(order_blueprint)\n\nfrom .blueprints.account.models import AccountUser, AccountRole\nfrom .blueprints.account.forms import RegistrationForm, LoginForm\n\ntry:\n AccountUser.query.first()\nexcept Exception as e:\n db.create_all()\n\nuser_datastore = SQLAlchemySessionUserDatastore(db.session, AccountUser, AccountRole)\nsecurity = Security(\n app, user_datastore, register_form=RegistrationForm, login_form=LoginForm\n)\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
class PaperReader:
<|reserved_special_token_0|>
def __init__(self, _threshold=0.001, _length_limit=20000):
with open(config.wordlist, 'r') as f:
self.wordlist = [w for w in list(f.readlines()) if len(w) >= 4]
self.tfu_pdf = TrueFormatUpmarkerPdf2HTMLEX()
self.tfu_html = TrueFormatUpmarkerHTML()
self.length_limit = _length_limit
self.threshold = _threshold
self.normal_data = list(
'used are variants of the predicate calculus. He even says, Lately those who think they ought to be so regarded seem to be winning. Under these circumstances, it does seem odd for McDermott to devote much space to complaining about the logical basis of a book whose very title proclaims it is about logical foundations. In any case, given such a title, it wouldnt seem necessary that readers should be warned that the foundations being explored are not In competition with this diversity is the idea of a unified model of inference. The desire for such a model is strong among those who study declarative representations, and Genesereth and Nilsson are no exception. As are most of their colleagues, they are drawn to the model of inference as the derivation of conclusions that are entailed by a set of beliefs. They wander from this idea in a few places but not for long. It is not hard to see why: Deduction is one of the fews kinds of inference for which we have an interesting general theory. '
.lower())
def just_extract_text_from_html(self, adress):
logging.info(f'extracting text from {adress}')
try:
with urlopen(adress).read().decode('utf-8') as fdoc:
soup = bs4.BeautifulSoup(fdoc, parent='lxml')
return self.get_only_real_words(soup.get_text(), self.wordlist)
except ValueError:
with open(adress, 'r') as fdoc:
soup = bs4.BeautifulSoup(fdoc, features='lxml')
return self.get_only_real_words(soup.get_text(), self.wordlist)
def parse_file_format(self, adress):
if adress.endswith('pdf'):
paths = self.pdfpath2htmlpaths(adress)
if config.parse_pdf2htmlEX:
os.system(
f'pdf2htmlEX --optimize-text 1 --fit-width {config.reader_width} "{adress}" "{paths.html_before_indexing}"'
)
tfu = self.tfu_pdf
elif adress.endswith('html'):
tfu = self.tfu_html
paths = self.htmlpath2htmlpaths(adress)
logging.warning('trying with html...')
else:
logging.error(f"File '{adress}' could not be processed")
return None
tfu.convert_and_index(paths.html_before_indexing, paths.
html_after_indexing)
tfu.save_doc_json(paths.json_path)
os.system(f'cp "{paths.html_after_indexing}" "{paths.apache_path}"')
self.text = ' '.join(list(tfu.indexed_words.values()))
with open(paths.txt_path, 'w') as f:
f.write(self.text)
logging.debug(paths)
self.paths = paths
time.sleep(2)
logging.info(f'extracted text: {self.text[100:]}')
return None
def load_url(self, adress):
response = urllib.request.urlopen(adress)
data = response.read()
self.text = parser.from_buffer(data)
def analyse(self):
"""
Extracts prose text from the loaded texts, that may contain line numbers somewhere, adresses, journal links etc.
:return str: prose text
"""
logging.info('transferring text to CorpusCook...')
paragraphs = self.text.split('\n\n')
print('mean length of splitted lines', mean([len(p) for p in
paragraphs]))
if mean([len(p) for p in paragraphs]) > 80:
paragraphs = [re.sub('- *\\n', '', p) for p in paragraphs]
paragraphs = [p.replace('\n', ' ') for p in paragraphs]
paragraphs = [p.replace(';', ' ') for p in paragraphs]
joiner = ' '
else:
joiner = ' '
processed_text = joiner.join([p for p in paragraphs if p and
ks_2samp(self.normal_data, list(p)).pvalue > self.threshold])
return processed_text.strip()[:self.length_limit]
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def get_only_real_words(self, text, wordlist):
return text
def htmlpath2htmlpaths(self, adress):
filename = os.path.basename(adress)
html_before_indexing = (config.appcorpuscook_diff_document_dir +
filename)
filename = remove_ugly_chars(filename)
html_after_indexing = (config.appcorpuscook_diff_html_dir +
filename + '.pdf2htmlEX.html')
json_path = config.appcorpuscook_diff_json_dir + filename + '.json'
txt_path = config.appcorpuscook_docs_txt_dir + filename + '.txt'
apache_path = config.apache_dir_document + filename + '.html'
return self.DocPaths(html_before_indexing, html_after_indexing,
apache_path, json_path, txt_path)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class PaperReader:
<|reserved_special_token_0|>
def __init__(self, _threshold=0.001, _length_limit=20000):
with open(config.wordlist, 'r') as f:
self.wordlist = [w for w in list(f.readlines()) if len(w) >= 4]
self.tfu_pdf = TrueFormatUpmarkerPdf2HTMLEX()
self.tfu_html = TrueFormatUpmarkerHTML()
self.length_limit = _length_limit
self.threshold = _threshold
self.normal_data = list(
'used are variants of the predicate calculus. He even says, Lately those who think they ought to be so regarded seem to be winning. Under these circumstances, it does seem odd for McDermott to devote much space to complaining about the logical basis of a book whose very title proclaims it is about logical foundations. In any case, given such a title, it wouldnt seem necessary that readers should be warned that the foundations being explored are not In competition with this diversity is the idea of a unified model of inference. The desire for such a model is strong among those who study declarative representations, and Genesereth and Nilsson are no exception. As are most of their colleagues, they are drawn to the model of inference as the derivation of conclusions that are entailed by a set of beliefs. They wander from this idea in a few places but not for long. It is not hard to see why: Deduction is one of the fews kinds of inference for which we have an interesting general theory. '
.lower())
def just_extract_text_from_html(self, adress):
logging.info(f'extracting text from {adress}')
try:
with urlopen(adress).read().decode('utf-8') as fdoc:
soup = bs4.BeautifulSoup(fdoc, parent='lxml')
return self.get_only_real_words(soup.get_text(), self.wordlist)
except ValueError:
with open(adress, 'r') as fdoc:
soup = bs4.BeautifulSoup(fdoc, features='lxml')
return self.get_only_real_words(soup.get_text(), self.wordlist)
def parse_file_format(self, adress):
if adress.endswith('pdf'):
paths = self.pdfpath2htmlpaths(adress)
if config.parse_pdf2htmlEX:
os.system(
f'pdf2htmlEX --optimize-text 1 --fit-width {config.reader_width} "{adress}" "{paths.html_before_indexing}"'
)
tfu = self.tfu_pdf
elif adress.endswith('html'):
tfu = self.tfu_html
paths = self.htmlpath2htmlpaths(adress)
logging.warning('trying with html...')
else:
logging.error(f"File '{adress}' could not be processed")
return None
tfu.convert_and_index(paths.html_before_indexing, paths.
html_after_indexing)
tfu.save_doc_json(paths.json_path)
os.system(f'cp "{paths.html_after_indexing}" "{paths.apache_path}"')
self.text = ' '.join(list(tfu.indexed_words.values()))
with open(paths.txt_path, 'w') as f:
f.write(self.text)
logging.debug(paths)
self.paths = paths
time.sleep(2)
logging.info(f'extracted text: {self.text[100:]}')
return None
def load_url(self, adress):
response = urllib.request.urlopen(adress)
data = response.read()
self.text = parser.from_buffer(data)
def analyse(self):
"""
Extracts prose text from the loaded texts, that may contain line numbers somewhere, adresses, journal links etc.
:return str: prose text
"""
logging.info('transferring text to CorpusCook...')
paragraphs = self.text.split('\n\n')
print('mean length of splitted lines', mean([len(p) for p in
paragraphs]))
if mean([len(p) for p in paragraphs]) > 80:
paragraphs = [re.sub('- *\\n', '', p) for p in paragraphs]
paragraphs = [p.replace('\n', ' ') for p in paragraphs]
paragraphs = [p.replace(';', ' ') for p in paragraphs]
joiner = ' '
else:
joiner = ' '
processed_text = joiner.join([p for p in paragraphs if p and
ks_2samp(self.normal_data, list(p)).pvalue > self.threshold])
return processed_text.strip()[:self.length_limit]
DocPaths = namedtuple('DocPaths', ['html_before_indexing',
'html_after_indexing', 'apache_path', 'json_path', 'txt_path'])
def pdfpath2htmlpaths(self, adress):
filename = os.path.basename(adress)
html_before_indexing = (config.appcorpuscook_docs_document_dir +
filename + '.html')
filename = remove_ugly_chars(filename)
html_after_indexing = (config.appcorpuscook_docs_document_dir +
filename + '.pdf2htmlEX.html')
json_path = config.appcorpuscook_docs_json_dir + filename + '.json'
txt_path = config.appcorpuscook_docs_txt_dir + filename + '.txt'
apache_path = config.apache_dir_document + filename + '.html'
return self.DocPaths(html_before_indexing, html_after_indexing,
apache_path, json_path, txt_path)
def get_only_real_words(self, text, wordlist):
return text
def htmlpath2htmlpaths(self, adress):
filename = os.path.basename(adress)
html_before_indexing = (config.appcorpuscook_diff_document_dir +
filename)
filename = remove_ugly_chars(filename)
html_after_indexing = (config.appcorpuscook_diff_html_dir +
filename + '.pdf2htmlEX.html')
json_path = config.appcorpuscook_diff_json_dir + filename + '.json'
txt_path = config.appcorpuscook_docs_txt_dir + filename + '.txt'
apache_path = config.apache_dir_document + filename + '.html'
return self.DocPaths(html_before_indexing, html_after_indexing,
apache_path, json_path, txt_path)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class PaperReader:
""" multimedial extractor. it reads text from papers in pdfs, urls, html and other things.
Formatting of text makes processing harder, text is cluttered up with remarks of the punlisher on every page,
page and line numbers and other stuff, that must be ignored with the processing, especially, when joining the
texts of different pages, where sentences continue.
detecting text by comparing to the letter distribution of normal prose to parts of the text extracted.
"""
def __init__(self, _threshold=0.001, _length_limit=20000):
with open(config.wordlist, 'r') as f:
self.wordlist = [w for w in list(f.readlines()) if len(w) >= 4]
self.tfu_pdf = TrueFormatUpmarkerPdf2HTMLEX()
self.tfu_html = TrueFormatUpmarkerHTML()
self.length_limit = _length_limit
self.threshold = _threshold
self.normal_data = list(
'used are variants of the predicate calculus. He even says, Lately those who think they ought to be so regarded seem to be winning. Under these circumstances, it does seem odd for McDermott to devote much space to complaining about the logical basis of a book whose very title proclaims it is about logical foundations. In any case, given such a title, it wouldnt seem necessary that readers should be warned that the foundations being explored are not In competition with this diversity is the idea of a unified model of inference. The desire for such a model is strong among those who study declarative representations, and Genesereth and Nilsson are no exception. As are most of their colleagues, they are drawn to the model of inference as the derivation of conclusions that are entailed by a set of beliefs. They wander from this idea in a few places but not for long. It is not hard to see why: Deduction is one of the fews kinds of inference for which we have an interesting general theory. '
.lower())
def just_extract_text_from_html(self, adress):
logging.info(f'extracting text from {adress}')
try:
with urlopen(adress).read().decode('utf-8') as fdoc:
soup = bs4.BeautifulSoup(fdoc, parent='lxml')
return self.get_only_real_words(soup.get_text(), self.wordlist)
except ValueError:
with open(adress, 'r') as fdoc:
soup = bs4.BeautifulSoup(fdoc, features='lxml')
return self.get_only_real_words(soup.get_text(), self.wordlist)
def parse_file_format(self, adress):
if adress.endswith('pdf'):
paths = self.pdfpath2htmlpaths(adress)
if config.parse_pdf2htmlEX:
os.system(
f'pdf2htmlEX --optimize-text 1 --fit-width {config.reader_width} "{adress}" "{paths.html_before_indexing}"'
)
tfu = self.tfu_pdf
elif adress.endswith('html'):
tfu = self.tfu_html
paths = self.htmlpath2htmlpaths(adress)
logging.warning('trying with html...')
else:
logging.error(f"File '{adress}' could not be processed")
return None
tfu.convert_and_index(paths.html_before_indexing, paths.
html_after_indexing)
tfu.save_doc_json(paths.json_path)
os.system(f'cp "{paths.html_after_indexing}" "{paths.apache_path}"')
self.text = ' '.join(list(tfu.indexed_words.values()))
with open(paths.txt_path, 'w') as f:
f.write(self.text)
logging.debug(paths)
self.paths = paths
time.sleep(2)
logging.info(f'extracted text: {self.text[100:]}')
return None
def load_url(self, adress):
response = urllib.request.urlopen(adress)
data = response.read()
self.text = parser.from_buffer(data)
def analyse(self):
"""
Extracts prose text from the loaded texts, that may contain line numbers somewhere, adresses, journal links etc.
:return str: prose text
"""
logging.info('transferring text to CorpusCook...')
paragraphs = self.text.split('\n\n')
print('mean length of splitted lines', mean([len(p) for p in
paragraphs]))
if mean([len(p) for p in paragraphs]) > 80:
paragraphs = [re.sub('- *\\n', '', p) for p in paragraphs]
paragraphs = [p.replace('\n', ' ') for p in paragraphs]
paragraphs = [p.replace(';', ' ') for p in paragraphs]
joiner = ' '
else:
joiner = ' '
processed_text = joiner.join([p for p in paragraphs if p and
ks_2samp(self.normal_data, list(p)).pvalue > self.threshold])
return processed_text.strip()[:self.length_limit]
DocPaths = namedtuple('DocPaths', ['html_before_indexing',
'html_after_indexing', 'apache_path', 'json_path', 'txt_path'])
def pdfpath2htmlpaths(self, adress):
filename = os.path.basename(adress)
html_before_indexing = (config.appcorpuscook_docs_document_dir +
filename + '.html')
filename = remove_ugly_chars(filename)
html_after_indexing = (config.appcorpuscook_docs_document_dir +
filename + '.pdf2htmlEX.html')
json_path = config.appcorpuscook_docs_json_dir + filename + '.json'
txt_path = config.appcorpuscook_docs_txt_dir + filename + '.txt'
apache_path = config.apache_dir_document + filename + '.html'
return self.DocPaths(html_before_indexing, html_after_indexing,
apache_path, json_path, txt_path)
def get_only_real_words(self, text, wordlist):
return text
def htmlpath2htmlpaths(self, adress):
filename = os.path.basename(adress)
html_before_indexing = (config.appcorpuscook_diff_document_dir +
filename)
filename = remove_ugly_chars(filename)
html_after_indexing = (config.appcorpuscook_diff_html_dir +
filename + '.pdf2htmlEX.html')
json_path = config.appcorpuscook_diff_json_dir + filename + '.json'
txt_path = config.appcorpuscook_docs_txt_dir + filename + '.txt'
apache_path = config.apache_dir_document + filename + '.html'
return self.DocPaths(html_before_indexing, html_after_indexing,
apache_path, json_path, txt_path)
<|reserved_special_token_1|>
import logging
import os
import time
import urllib
from collections import namedtuple
from statistics import mean
from urllib.request import urlopen
import bs4
import regex as re
from tika import parser
from scipy.stats import ks_2samp
import config
from TFU.trueformathtml import TrueFormatUpmarkerHTML
from TFU.trueformatpdf2htmlEX import TrueFormatUpmarkerPdf2HTMLEX
from helpers.str_tools import remove_ugly_chars
class PaperReader:
""" multimedial extractor. it reads text from papers in pdfs, urls, html and other things.
Formatting of text makes processing harder, text is cluttered up with remarks of the punlisher on every page,
page and line numbers and other stuff, that must be ignored with the processing, especially, when joining the
texts of different pages, where sentences continue.
detecting text by comparing to the letter distribution of normal prose to parts of the text extracted.
"""
def __init__(self, _threshold=0.001, _length_limit=20000):
with open(config.wordlist, 'r') as f:
self.wordlist = [w for w in list(f.readlines()) if len(w) >= 4]
self.tfu_pdf = TrueFormatUpmarkerPdf2HTMLEX()
self.tfu_html = TrueFormatUpmarkerHTML()
self.length_limit = _length_limit
self.threshold = _threshold
self.normal_data = list(
'used are variants of the predicate calculus. He even says, Lately those who think they ought to be so regarded seem to be winning. Under these circumstances, it does seem odd for McDermott to devote much space to complaining about the logical basis of a book whose very title proclaims it is about logical foundations. In any case, given such a title, it wouldnt seem necessary that readers should be warned that the foundations being explored are not In competition with this diversity is the idea of a unified model of inference. The desire for such a model is strong among those who study declarative representations, and Genesereth and Nilsson are no exception. As are most of their colleagues, they are drawn to the model of inference as the derivation of conclusions that are entailed by a set of beliefs. They wander from this idea in a few places but not for long. It is not hard to see why: Deduction is one of the fews kinds of inference for which we have an interesting general theory. '
.lower())
def just_extract_text_from_html(self, adress):
logging.info(f'extracting text from {adress}')
try:
with urlopen(adress).read().decode('utf-8') as fdoc:
soup = bs4.BeautifulSoup(fdoc, parent='lxml')
return self.get_only_real_words(soup.get_text(), self.wordlist)
except ValueError:
with open(adress, 'r') as fdoc:
soup = bs4.BeautifulSoup(fdoc, features='lxml')
return self.get_only_real_words(soup.get_text(), self.wordlist)
def parse_file_format(self, adress):
if adress.endswith('pdf'):
paths = self.pdfpath2htmlpaths(adress)
if config.parse_pdf2htmlEX:
os.system(
f'pdf2htmlEX --optimize-text 1 --fit-width {config.reader_width} "{adress}" "{paths.html_before_indexing}"'
)
tfu = self.tfu_pdf
elif adress.endswith('html'):
tfu = self.tfu_html
paths = self.htmlpath2htmlpaths(adress)
logging.warning('trying with html...')
else:
logging.error(f"File '{adress}' could not be processed")
return None
tfu.convert_and_index(paths.html_before_indexing, paths.
html_after_indexing)
tfu.save_doc_json(paths.json_path)
os.system(f'cp "{paths.html_after_indexing}" "{paths.apache_path}"')
self.text = ' '.join(list(tfu.indexed_words.values()))
with open(paths.txt_path, 'w') as f:
f.write(self.text)
logging.debug(paths)
self.paths = paths
time.sleep(2)
logging.info(f'extracted text: {self.text[100:]}')
return None
def load_url(self, adress):
response = urllib.request.urlopen(adress)
data = response.read()
self.text = parser.from_buffer(data)
def analyse(self):
"""
Extracts prose text from the loaded texts, that may contain line numbers somewhere, adresses, journal links etc.
:return str: prose text
"""
logging.info('transferring text to CorpusCook...')
paragraphs = self.text.split('\n\n')
print('mean length of splitted lines', mean([len(p) for p in
paragraphs]))
if mean([len(p) for p in paragraphs]) > 80:
paragraphs = [re.sub('- *\\n', '', p) for p in paragraphs]
paragraphs = [p.replace('\n', ' ') for p in paragraphs]
paragraphs = [p.replace(';', ' ') for p in paragraphs]
joiner = ' '
else:
joiner = ' '
processed_text = joiner.join([p for p in paragraphs if p and
ks_2samp(self.normal_data, list(p)).pvalue > self.threshold])
return processed_text.strip()[:self.length_limit]
DocPaths = namedtuple('DocPaths', ['html_before_indexing',
'html_after_indexing', 'apache_path', 'json_path', 'txt_path'])
def pdfpath2htmlpaths(self, adress):
filename = os.path.basename(adress)
html_before_indexing = (config.appcorpuscook_docs_document_dir +
filename + '.html')
filename = remove_ugly_chars(filename)
html_after_indexing = (config.appcorpuscook_docs_document_dir +
filename + '.pdf2htmlEX.html')
json_path = config.appcorpuscook_docs_json_dir + filename + '.json'
txt_path = config.appcorpuscook_docs_txt_dir + filename + '.txt'
apache_path = config.apache_dir_document + filename + '.html'
return self.DocPaths(html_before_indexing, html_after_indexing,
apache_path, json_path, txt_path)
def get_only_real_words(self, text, wordlist):
return text
def htmlpath2htmlpaths(self, adress):
filename = os.path.basename(adress)
html_before_indexing = (config.appcorpuscook_diff_document_dir +
filename)
filename = remove_ugly_chars(filename)
html_after_indexing = (config.appcorpuscook_diff_html_dir +
filename + '.pdf2htmlEX.html')
json_path = config.appcorpuscook_diff_json_dir + filename + '.json'
txt_path = config.appcorpuscook_docs_txt_dir + filename + '.txt'
apache_path = config.apache_dir_document + filename + '.html'
return self.DocPaths(html_before_indexing, html_after_indexing,
apache_path, json_path, txt_path)
<|reserved_special_token_1|>
import logging
import os
import time
import urllib
from collections import namedtuple
from statistics import mean
from urllib.request import urlopen
import bs4
import regex as re
from tika import parser
from scipy.stats import ks_2samp
import config
from TFU.trueformathtml import TrueFormatUpmarkerHTML
from TFU.trueformatpdf2htmlEX import TrueFormatUpmarkerPdf2HTMLEX
from helpers.str_tools import remove_ugly_chars
class PaperReader:
""" multimedial extractor. it reads text from papers in pdfs, urls, html and other things.
Formatting of text makes processing harder, text is cluttered up with remarks of the punlisher on every page,
page and line numbers and other stuff, that must be ignored with the processing, especially, when joining the
texts of different pages, where sentences continue.
detecting text by comparing to the letter distribution of normal prose to parts of the text extracted.
"""
def __init__(self, _threshold=0.001, _length_limit=20000):
with open(config.wordlist, 'r') as f:
self.wordlist = [w for w in list(f.readlines()) if len(w) >= 4]
self.tfu_pdf = TrueFormatUpmarkerPdf2HTMLEX()
self.tfu_html = TrueFormatUpmarkerHTML()
self.length_limit = _length_limit
self.threshold = _threshold
self.normal_data = list(
'used are variants of the predicate calculus. He even says, Lately '
'those who think they ought to be so regarded seem to be winning. '
'Under these circumstances, it does seem odd for McDermott to devote '
'much space to complaining about the logical basis of a book whose '
'very title proclaims it is about logical foundations. In any '
'case, given such a title, it wouldnt seem necessary that readers '
'should be warned that the foundations being explored are not '
'In competition with this diversity is the idea of a unified model '
'of inference. The desire for such a model is strong among those '
'who study declarative representations, and Genesereth and Nilsson '
'are no exception. As are most of their colleagues, they are drawn '
'to the model of inference as the derivation of conclusions that '
'are entailed by a set of beliefs. They wander from this idea in a '
'few places but not for long. It is not hard to see why: Deduction '
'is one of the fews kinds of inference for which we have an '
'interesting general theory. '.lower()
)
def just_extract_text_from_html(self, adress):
logging.info(f"extracting text from {adress}")
try:
with urlopen(adress).read().decode('utf-8') as fdoc:
soup = bs4.BeautifulSoup(fdoc, parent="lxml")
return self.get_only_real_words(soup.get_text(), self.wordlist)
except ValueError:
with open(adress, "r") as fdoc:
soup = bs4.BeautifulSoup(fdoc, features='lxml')
return self.get_only_real_words(soup.get_text(), self.wordlist)
def parse_file_format(self, adress):
if adress.endswith('pdf'):
paths = self.pdfpath2htmlpaths(adress)
if config.parse_pdf2htmlEX:
os.system(f"pdf2htmlEX "
f"--optimize-text 1 "
f"--fit-width {config.reader_width} "
f"\"{adress}\" \"{paths.html_before_indexing}\"")
tfu = self.tfu_pdf
elif adress.endswith('html'):
tfu = self.tfu_html
paths = self.htmlpath2htmlpaths(adress)
logging.warning("trying with html...")
else:
logging.error(f"File '{adress}' could not be processed")
return None
tfu.convert_and_index(paths.html_before_indexing, paths.html_after_indexing)
tfu.save_doc_json(paths.json_path)
os.system(f"cp \"{paths.html_after_indexing}\" \"{paths.apache_path}\"")
self.text = " ".join(list(tfu.indexed_words.values()))
# needed for topic modelling
with open(paths.txt_path, "w") as f:
f.write(self.text)
logging.debug(paths)
self.paths = paths
time.sleep(2)
logging.info(f"extracted text: {self.text[100:]}")
return None
def load_url(self, adress):
response = urllib.request.urlopen(adress)
data = response.read() # a `bytes` object
self.text = parser.from_buffer(data)
def analyse(self):
"""
Extracts prose text from the loaded texts, that may contain line numbers somewhere, adresses, journal links etc.
:return str: prose text
"""
logging.info("transferring text to CorpusCook...")
paragraphs = self.text.split('\n\n')
print("mean length of splitted lines", (mean([len(p) for p in paragraphs])))
# If TIKA resolved '\n'
if (mean([len(p) for p in paragraphs])) > 80:
paragraphs = [re.sub(r"- *\n", '', p) for p in paragraphs]
paragraphs = [p.replace('\n', " ") for p in paragraphs]
paragraphs = [p.replace(';', " ") for p in paragraphs]
joiner = " "
else:
# If TIKA did not
joiner = " "
processed_text = joiner.join([p
for p in paragraphs
if
p and
ks_2samp(self.normal_data, list(p)).pvalue > self.threshold
]
)
return processed_text.strip()[:self.length_limit]
DocPaths = namedtuple("DocPaths", ["html_before_indexing",
"html_after_indexing",
"apache_path",
"json_path",
"txt_path"])
def pdfpath2htmlpaths(self, adress):
# file_extension = os.path.splitext(adress)[1] keep it, but unused
# path = os.path.dirname(adress)
filename = os.path.basename(adress)
html_before_indexing = config.appcorpuscook_docs_document_dir + filename + ".html"
filename = remove_ugly_chars(filename)
html_after_indexing = config.appcorpuscook_docs_document_dir + filename + ".pdf2htmlEX.html"
json_path = config.appcorpuscook_docs_json_dir + filename + ".json"
txt_path = config.appcorpuscook_docs_txt_dir + filename + ".txt"
apache_path = config.apache_dir_document + filename + ".html"
return self.DocPaths(
html_before_indexing,
html_after_indexing,
apache_path,
json_path,
txt_path)
def get_only_real_words(self, text, wordlist):
return text #" ".join([word for word in text.split() if word in wordlist])
def htmlpath2htmlpaths(self, adress):
filename = os.path.basename(adress)
html_before_indexing = config.appcorpuscook_diff_document_dir + filename
filename = remove_ugly_chars(filename)
html_after_indexing = config.appcorpuscook_diff_html_dir + filename + ".pdf2htmlEX.html"
json_path = config.appcorpuscook_diff_json_dir + filename + ".json"
txt_path = config.appcorpuscook_docs_txt_dir + filename + ".txt"
apache_path = config.apache_dir_document + filename + ".html"
return self.DocPaths(
html_before_indexing,
html_after_indexing,
apache_path,
json_path,
txt_path)
|
flexible
|
{
"blob_id": "4d2cb3e0bdd331a1de7f07eb0109f02c9cf832a8",
"index": 7441,
"step-1": "<mask token>\n\n\nclass PaperReader:\n <mask token>\n\n def __init__(self, _threshold=0.001, _length_limit=20000):\n with open(config.wordlist, 'r') as f:\n self.wordlist = [w for w in list(f.readlines()) if len(w) >= 4]\n self.tfu_pdf = TrueFormatUpmarkerPdf2HTMLEX()\n self.tfu_html = TrueFormatUpmarkerHTML()\n self.length_limit = _length_limit\n self.threshold = _threshold\n self.normal_data = list(\n 'used are variants of the predicate calculus. He even says, Lately those who think they ought to be so regarded seem to be winning. Under these circumstances, it does seem odd for McDermott to devote much space to complaining about the logical basis of a book whose very title proclaims it is about logical foundations. In any case, given such a title, it wouldnt seem necessary that readers should be warned that the foundations being explored are not In competition with this diversity is the idea of a unified model of inference. The desire for such a model is strong among those who study declarative representations, and Genesereth and Nilsson are no exception. As are most of their colleagues, they are drawn to the model of inference as the derivation of conclusions that are entailed by a set of beliefs. They wander from this idea in a few places but not for long. It is not hard to see why: Deduction is one of the fews kinds of inference for which we have an interesting general theory. '\n .lower())\n\n def just_extract_text_from_html(self, adress):\n logging.info(f'extracting text from {adress}')\n try:\n with urlopen(adress).read().decode('utf-8') as fdoc:\n soup = bs4.BeautifulSoup(fdoc, parent='lxml')\n return self.get_only_real_words(soup.get_text(), self.wordlist)\n except ValueError:\n with open(adress, 'r') as fdoc:\n soup = bs4.BeautifulSoup(fdoc, features='lxml')\n return self.get_only_real_words(soup.get_text(), self.wordlist)\n\n def parse_file_format(self, adress):\n if adress.endswith('pdf'):\n paths = self.pdfpath2htmlpaths(adress)\n if config.parse_pdf2htmlEX:\n os.system(\n f'pdf2htmlEX --optimize-text 1 --fit-width {config.reader_width} \"{adress}\" \"{paths.html_before_indexing}\"'\n )\n tfu = self.tfu_pdf\n elif adress.endswith('html'):\n tfu = self.tfu_html\n paths = self.htmlpath2htmlpaths(adress)\n logging.warning('trying with html...')\n else:\n logging.error(f\"File '{adress}' could not be processed\")\n return None\n tfu.convert_and_index(paths.html_before_indexing, paths.\n html_after_indexing)\n tfu.save_doc_json(paths.json_path)\n os.system(f'cp \"{paths.html_after_indexing}\" \"{paths.apache_path}\"')\n self.text = ' '.join(list(tfu.indexed_words.values()))\n with open(paths.txt_path, 'w') as f:\n f.write(self.text)\n logging.debug(paths)\n self.paths = paths\n time.sleep(2)\n logging.info(f'extracted text: {self.text[100:]}')\n return None\n\n def load_url(self, adress):\n response = urllib.request.urlopen(adress)\n data = response.read()\n self.text = parser.from_buffer(data)\n\n def analyse(self):\n \"\"\"\n Extracts prose text from the loaded texts, that may contain line numbers somewhere, adresses, journal links etc.\n :return str: prose text\n \"\"\"\n logging.info('transferring text to CorpusCook...')\n paragraphs = self.text.split('\\n\\n')\n print('mean length of splitted lines', mean([len(p) for p in\n paragraphs]))\n if mean([len(p) for p in paragraphs]) > 80:\n paragraphs = [re.sub('- *\\\\n', '', p) for p in paragraphs]\n paragraphs = [p.replace('\\n', ' ') for p in paragraphs]\n paragraphs = [p.replace(';', ' ') for p in paragraphs]\n joiner = ' '\n else:\n joiner = ' '\n processed_text = joiner.join([p for p in paragraphs if p and \n ks_2samp(self.normal_data, list(p)).pvalue > self.threshold])\n return processed_text.strip()[:self.length_limit]\n <mask token>\n <mask token>\n\n def get_only_real_words(self, text, wordlist):\n return text\n\n def htmlpath2htmlpaths(self, adress):\n filename = os.path.basename(adress)\n html_before_indexing = (config.appcorpuscook_diff_document_dir +\n filename)\n filename = remove_ugly_chars(filename)\n html_after_indexing = (config.appcorpuscook_diff_html_dir +\n filename + '.pdf2htmlEX.html')\n json_path = config.appcorpuscook_diff_json_dir + filename + '.json'\n txt_path = config.appcorpuscook_docs_txt_dir + filename + '.txt'\n apache_path = config.apache_dir_document + filename + '.html'\n return self.DocPaths(html_before_indexing, html_after_indexing,\n apache_path, json_path, txt_path)\n",
"step-2": "<mask token>\n\n\nclass PaperReader:\n <mask token>\n\n def __init__(self, _threshold=0.001, _length_limit=20000):\n with open(config.wordlist, 'r') as f:\n self.wordlist = [w for w in list(f.readlines()) if len(w) >= 4]\n self.tfu_pdf = TrueFormatUpmarkerPdf2HTMLEX()\n self.tfu_html = TrueFormatUpmarkerHTML()\n self.length_limit = _length_limit\n self.threshold = _threshold\n self.normal_data = list(\n 'used are variants of the predicate calculus. He even says, Lately those who think they ought to be so regarded seem to be winning. Under these circumstances, it does seem odd for McDermott to devote much space to complaining about the logical basis of a book whose very title proclaims it is about logical foundations. In any case, given such a title, it wouldnt seem necessary that readers should be warned that the foundations being explored are not In competition with this diversity is the idea of a unified model of inference. The desire for such a model is strong among those who study declarative representations, and Genesereth and Nilsson are no exception. As are most of their colleagues, they are drawn to the model of inference as the derivation of conclusions that are entailed by a set of beliefs. They wander from this idea in a few places but not for long. It is not hard to see why: Deduction is one of the fews kinds of inference for which we have an interesting general theory. '\n .lower())\n\n def just_extract_text_from_html(self, adress):\n logging.info(f'extracting text from {adress}')\n try:\n with urlopen(adress).read().decode('utf-8') as fdoc:\n soup = bs4.BeautifulSoup(fdoc, parent='lxml')\n return self.get_only_real_words(soup.get_text(), self.wordlist)\n except ValueError:\n with open(adress, 'r') as fdoc:\n soup = bs4.BeautifulSoup(fdoc, features='lxml')\n return self.get_only_real_words(soup.get_text(), self.wordlist)\n\n def parse_file_format(self, adress):\n if adress.endswith('pdf'):\n paths = self.pdfpath2htmlpaths(adress)\n if config.parse_pdf2htmlEX:\n os.system(\n f'pdf2htmlEX --optimize-text 1 --fit-width {config.reader_width} \"{adress}\" \"{paths.html_before_indexing}\"'\n )\n tfu = self.tfu_pdf\n elif adress.endswith('html'):\n tfu = self.tfu_html\n paths = self.htmlpath2htmlpaths(adress)\n logging.warning('trying with html...')\n else:\n logging.error(f\"File '{adress}' could not be processed\")\n return None\n tfu.convert_and_index(paths.html_before_indexing, paths.\n html_after_indexing)\n tfu.save_doc_json(paths.json_path)\n os.system(f'cp \"{paths.html_after_indexing}\" \"{paths.apache_path}\"')\n self.text = ' '.join(list(tfu.indexed_words.values()))\n with open(paths.txt_path, 'w') as f:\n f.write(self.text)\n logging.debug(paths)\n self.paths = paths\n time.sleep(2)\n logging.info(f'extracted text: {self.text[100:]}')\n return None\n\n def load_url(self, adress):\n response = urllib.request.urlopen(adress)\n data = response.read()\n self.text = parser.from_buffer(data)\n\n def analyse(self):\n \"\"\"\n Extracts prose text from the loaded texts, that may contain line numbers somewhere, adresses, journal links etc.\n :return str: prose text\n \"\"\"\n logging.info('transferring text to CorpusCook...')\n paragraphs = self.text.split('\\n\\n')\n print('mean length of splitted lines', mean([len(p) for p in\n paragraphs]))\n if mean([len(p) for p in paragraphs]) > 80:\n paragraphs = [re.sub('- *\\\\n', '', p) for p in paragraphs]\n paragraphs = [p.replace('\\n', ' ') for p in paragraphs]\n paragraphs = [p.replace(';', ' ') for p in paragraphs]\n joiner = ' '\n else:\n joiner = ' '\n processed_text = joiner.join([p for p in paragraphs if p and \n ks_2samp(self.normal_data, list(p)).pvalue > self.threshold])\n return processed_text.strip()[:self.length_limit]\n DocPaths = namedtuple('DocPaths', ['html_before_indexing',\n 'html_after_indexing', 'apache_path', 'json_path', 'txt_path'])\n\n def pdfpath2htmlpaths(self, adress):\n filename = os.path.basename(adress)\n html_before_indexing = (config.appcorpuscook_docs_document_dir +\n filename + '.html')\n filename = remove_ugly_chars(filename)\n html_after_indexing = (config.appcorpuscook_docs_document_dir +\n filename + '.pdf2htmlEX.html')\n json_path = config.appcorpuscook_docs_json_dir + filename + '.json'\n txt_path = config.appcorpuscook_docs_txt_dir + filename + '.txt'\n apache_path = config.apache_dir_document + filename + '.html'\n return self.DocPaths(html_before_indexing, html_after_indexing,\n apache_path, json_path, txt_path)\n\n def get_only_real_words(self, text, wordlist):\n return text\n\n def htmlpath2htmlpaths(self, adress):\n filename = os.path.basename(adress)\n html_before_indexing = (config.appcorpuscook_diff_document_dir +\n filename)\n filename = remove_ugly_chars(filename)\n html_after_indexing = (config.appcorpuscook_diff_html_dir +\n filename + '.pdf2htmlEX.html')\n json_path = config.appcorpuscook_diff_json_dir + filename + '.json'\n txt_path = config.appcorpuscook_docs_txt_dir + filename + '.txt'\n apache_path = config.apache_dir_document + filename + '.html'\n return self.DocPaths(html_before_indexing, html_after_indexing,\n apache_path, json_path, txt_path)\n",
"step-3": "<mask token>\n\n\nclass PaperReader:\n \"\"\" multimedial extractor. it reads text from papers in pdfs, urls, html and other things.\n\n Formatting of text makes processing harder, text is cluttered up with remarks of the punlisher on every page,\n page and line numbers and other stuff, that must be ignored with the processing, especially, when joining the\n texts of different pages, where sentences continue.\n\n detecting text by comparing to the letter distribution of normal prose to parts of the text extracted.\n \"\"\"\n\n def __init__(self, _threshold=0.001, _length_limit=20000):\n with open(config.wordlist, 'r') as f:\n self.wordlist = [w for w in list(f.readlines()) if len(w) >= 4]\n self.tfu_pdf = TrueFormatUpmarkerPdf2HTMLEX()\n self.tfu_html = TrueFormatUpmarkerHTML()\n self.length_limit = _length_limit\n self.threshold = _threshold\n self.normal_data = list(\n 'used are variants of the predicate calculus. He even says, Lately those who think they ought to be so regarded seem to be winning. Under these circumstances, it does seem odd for McDermott to devote much space to complaining about the logical basis of a book whose very title proclaims it is about logical foundations. In any case, given such a title, it wouldnt seem necessary that readers should be warned that the foundations being explored are not In competition with this diversity is the idea of a unified model of inference. The desire for such a model is strong among those who study declarative representations, and Genesereth and Nilsson are no exception. As are most of their colleagues, they are drawn to the model of inference as the derivation of conclusions that are entailed by a set of beliefs. They wander from this idea in a few places but not for long. It is not hard to see why: Deduction is one of the fews kinds of inference for which we have an interesting general theory. '\n .lower())\n\n def just_extract_text_from_html(self, adress):\n logging.info(f'extracting text from {adress}')\n try:\n with urlopen(adress).read().decode('utf-8') as fdoc:\n soup = bs4.BeautifulSoup(fdoc, parent='lxml')\n return self.get_only_real_words(soup.get_text(), self.wordlist)\n except ValueError:\n with open(adress, 'r') as fdoc:\n soup = bs4.BeautifulSoup(fdoc, features='lxml')\n return self.get_only_real_words(soup.get_text(), self.wordlist)\n\n def parse_file_format(self, adress):\n if adress.endswith('pdf'):\n paths = self.pdfpath2htmlpaths(adress)\n if config.parse_pdf2htmlEX:\n os.system(\n f'pdf2htmlEX --optimize-text 1 --fit-width {config.reader_width} \"{adress}\" \"{paths.html_before_indexing}\"'\n )\n tfu = self.tfu_pdf\n elif adress.endswith('html'):\n tfu = self.tfu_html\n paths = self.htmlpath2htmlpaths(adress)\n logging.warning('trying with html...')\n else:\n logging.error(f\"File '{adress}' could not be processed\")\n return None\n tfu.convert_and_index(paths.html_before_indexing, paths.\n html_after_indexing)\n tfu.save_doc_json(paths.json_path)\n os.system(f'cp \"{paths.html_after_indexing}\" \"{paths.apache_path}\"')\n self.text = ' '.join(list(tfu.indexed_words.values()))\n with open(paths.txt_path, 'w') as f:\n f.write(self.text)\n logging.debug(paths)\n self.paths = paths\n time.sleep(2)\n logging.info(f'extracted text: {self.text[100:]}')\n return None\n\n def load_url(self, adress):\n response = urllib.request.urlopen(adress)\n data = response.read()\n self.text = parser.from_buffer(data)\n\n def analyse(self):\n \"\"\"\n Extracts prose text from the loaded texts, that may contain line numbers somewhere, adresses, journal links etc.\n :return str: prose text\n \"\"\"\n logging.info('transferring text to CorpusCook...')\n paragraphs = self.text.split('\\n\\n')\n print('mean length of splitted lines', mean([len(p) for p in\n paragraphs]))\n if mean([len(p) for p in paragraphs]) > 80:\n paragraphs = [re.sub('- *\\\\n', '', p) for p in paragraphs]\n paragraphs = [p.replace('\\n', ' ') for p in paragraphs]\n paragraphs = [p.replace(';', ' ') for p in paragraphs]\n joiner = ' '\n else:\n joiner = ' '\n processed_text = joiner.join([p for p in paragraphs if p and \n ks_2samp(self.normal_data, list(p)).pvalue > self.threshold])\n return processed_text.strip()[:self.length_limit]\n DocPaths = namedtuple('DocPaths', ['html_before_indexing',\n 'html_after_indexing', 'apache_path', 'json_path', 'txt_path'])\n\n def pdfpath2htmlpaths(self, adress):\n filename = os.path.basename(adress)\n html_before_indexing = (config.appcorpuscook_docs_document_dir +\n filename + '.html')\n filename = remove_ugly_chars(filename)\n html_after_indexing = (config.appcorpuscook_docs_document_dir +\n filename + '.pdf2htmlEX.html')\n json_path = config.appcorpuscook_docs_json_dir + filename + '.json'\n txt_path = config.appcorpuscook_docs_txt_dir + filename + '.txt'\n apache_path = config.apache_dir_document + filename + '.html'\n return self.DocPaths(html_before_indexing, html_after_indexing,\n apache_path, json_path, txt_path)\n\n def get_only_real_words(self, text, wordlist):\n return text\n\n def htmlpath2htmlpaths(self, adress):\n filename = os.path.basename(adress)\n html_before_indexing = (config.appcorpuscook_diff_document_dir +\n filename)\n filename = remove_ugly_chars(filename)\n html_after_indexing = (config.appcorpuscook_diff_html_dir +\n filename + '.pdf2htmlEX.html')\n json_path = config.appcorpuscook_diff_json_dir + filename + '.json'\n txt_path = config.appcorpuscook_docs_txt_dir + filename + '.txt'\n apache_path = config.apache_dir_document + filename + '.html'\n return self.DocPaths(html_before_indexing, html_after_indexing,\n apache_path, json_path, txt_path)\n",
"step-4": "import logging\nimport os\nimport time\nimport urllib\nfrom collections import namedtuple\nfrom statistics import mean\nfrom urllib.request import urlopen\nimport bs4\nimport regex as re\nfrom tika import parser\nfrom scipy.stats import ks_2samp\nimport config\nfrom TFU.trueformathtml import TrueFormatUpmarkerHTML\nfrom TFU.trueformatpdf2htmlEX import TrueFormatUpmarkerPdf2HTMLEX\nfrom helpers.str_tools import remove_ugly_chars\n\n\nclass PaperReader:\n \"\"\" multimedial extractor. it reads text from papers in pdfs, urls, html and other things.\n\n Formatting of text makes processing harder, text is cluttered up with remarks of the punlisher on every page,\n page and line numbers and other stuff, that must be ignored with the processing, especially, when joining the\n texts of different pages, where sentences continue.\n\n detecting text by comparing to the letter distribution of normal prose to parts of the text extracted.\n \"\"\"\n\n def __init__(self, _threshold=0.001, _length_limit=20000):\n with open(config.wordlist, 'r') as f:\n self.wordlist = [w for w in list(f.readlines()) if len(w) >= 4]\n self.tfu_pdf = TrueFormatUpmarkerPdf2HTMLEX()\n self.tfu_html = TrueFormatUpmarkerHTML()\n self.length_limit = _length_limit\n self.threshold = _threshold\n self.normal_data = list(\n 'used are variants of the predicate calculus. He even says, Lately those who think they ought to be so regarded seem to be winning. Under these circumstances, it does seem odd for McDermott to devote much space to complaining about the logical basis of a book whose very title proclaims it is about logical foundations. In any case, given such a title, it wouldnt seem necessary that readers should be warned that the foundations being explored are not In competition with this diversity is the idea of a unified model of inference. The desire for such a model is strong among those who study declarative representations, and Genesereth and Nilsson are no exception. As are most of their colleagues, they are drawn to the model of inference as the derivation of conclusions that are entailed by a set of beliefs. They wander from this idea in a few places but not for long. It is not hard to see why: Deduction is one of the fews kinds of inference for which we have an interesting general theory. '\n .lower())\n\n def just_extract_text_from_html(self, adress):\n logging.info(f'extracting text from {adress}')\n try:\n with urlopen(adress).read().decode('utf-8') as fdoc:\n soup = bs4.BeautifulSoup(fdoc, parent='lxml')\n return self.get_only_real_words(soup.get_text(), self.wordlist)\n except ValueError:\n with open(adress, 'r') as fdoc:\n soup = bs4.BeautifulSoup(fdoc, features='lxml')\n return self.get_only_real_words(soup.get_text(), self.wordlist)\n\n def parse_file_format(self, adress):\n if adress.endswith('pdf'):\n paths = self.pdfpath2htmlpaths(adress)\n if config.parse_pdf2htmlEX:\n os.system(\n f'pdf2htmlEX --optimize-text 1 --fit-width {config.reader_width} \"{adress}\" \"{paths.html_before_indexing}\"'\n )\n tfu = self.tfu_pdf\n elif adress.endswith('html'):\n tfu = self.tfu_html\n paths = self.htmlpath2htmlpaths(adress)\n logging.warning('trying with html...')\n else:\n logging.error(f\"File '{adress}' could not be processed\")\n return None\n tfu.convert_and_index(paths.html_before_indexing, paths.\n html_after_indexing)\n tfu.save_doc_json(paths.json_path)\n os.system(f'cp \"{paths.html_after_indexing}\" \"{paths.apache_path}\"')\n self.text = ' '.join(list(tfu.indexed_words.values()))\n with open(paths.txt_path, 'w') as f:\n f.write(self.text)\n logging.debug(paths)\n self.paths = paths\n time.sleep(2)\n logging.info(f'extracted text: {self.text[100:]}')\n return None\n\n def load_url(self, adress):\n response = urllib.request.urlopen(adress)\n data = response.read()\n self.text = parser.from_buffer(data)\n\n def analyse(self):\n \"\"\"\n Extracts prose text from the loaded texts, that may contain line numbers somewhere, adresses, journal links etc.\n :return str: prose text\n \"\"\"\n logging.info('transferring text to CorpusCook...')\n paragraphs = self.text.split('\\n\\n')\n print('mean length of splitted lines', mean([len(p) for p in\n paragraphs]))\n if mean([len(p) for p in paragraphs]) > 80:\n paragraphs = [re.sub('- *\\\\n', '', p) for p in paragraphs]\n paragraphs = [p.replace('\\n', ' ') for p in paragraphs]\n paragraphs = [p.replace(';', ' ') for p in paragraphs]\n joiner = ' '\n else:\n joiner = ' '\n processed_text = joiner.join([p for p in paragraphs if p and \n ks_2samp(self.normal_data, list(p)).pvalue > self.threshold])\n return processed_text.strip()[:self.length_limit]\n DocPaths = namedtuple('DocPaths', ['html_before_indexing',\n 'html_after_indexing', 'apache_path', 'json_path', 'txt_path'])\n\n def pdfpath2htmlpaths(self, adress):\n filename = os.path.basename(adress)\n html_before_indexing = (config.appcorpuscook_docs_document_dir +\n filename + '.html')\n filename = remove_ugly_chars(filename)\n html_after_indexing = (config.appcorpuscook_docs_document_dir +\n filename + '.pdf2htmlEX.html')\n json_path = config.appcorpuscook_docs_json_dir + filename + '.json'\n txt_path = config.appcorpuscook_docs_txt_dir + filename + '.txt'\n apache_path = config.apache_dir_document + filename + '.html'\n return self.DocPaths(html_before_indexing, html_after_indexing,\n apache_path, json_path, txt_path)\n\n def get_only_real_words(self, text, wordlist):\n return text\n\n def htmlpath2htmlpaths(self, adress):\n filename = os.path.basename(adress)\n html_before_indexing = (config.appcorpuscook_diff_document_dir +\n filename)\n filename = remove_ugly_chars(filename)\n html_after_indexing = (config.appcorpuscook_diff_html_dir +\n filename + '.pdf2htmlEX.html')\n json_path = config.appcorpuscook_diff_json_dir + filename + '.json'\n txt_path = config.appcorpuscook_docs_txt_dir + filename + '.txt'\n apache_path = config.apache_dir_document + filename + '.html'\n return self.DocPaths(html_before_indexing, html_after_indexing,\n apache_path, json_path, txt_path)\n",
"step-5": "import logging\nimport os\nimport time\nimport urllib\nfrom collections import namedtuple\nfrom statistics import mean\nfrom urllib.request import urlopen\nimport bs4\nimport regex as re\nfrom tika import parser\nfrom scipy.stats import ks_2samp\n\nimport config\nfrom TFU.trueformathtml import TrueFormatUpmarkerHTML\nfrom TFU.trueformatpdf2htmlEX import TrueFormatUpmarkerPdf2HTMLEX\nfrom helpers.str_tools import remove_ugly_chars\n\nclass PaperReader:\n \"\"\" multimedial extractor. it reads text from papers in pdfs, urls, html and other things.\n\n Formatting of text makes processing harder, text is cluttered up with remarks of the punlisher on every page,\n page and line numbers and other stuff, that must be ignored with the processing, especially, when joining the\n texts of different pages, where sentences continue.\n\n detecting text by comparing to the letter distribution of normal prose to parts of the text extracted.\n \"\"\"\n\n def __init__(self, _threshold=0.001, _length_limit=20000):\n with open(config.wordlist, 'r') as f:\n self.wordlist = [w for w in list(f.readlines()) if len(w) >= 4]\n self.tfu_pdf = TrueFormatUpmarkerPdf2HTMLEX()\n self.tfu_html = TrueFormatUpmarkerHTML()\n\n self.length_limit = _length_limit\n self.threshold = _threshold\n self.normal_data = list(\n 'used are variants of the predicate calculus. He even says, Lately '\n 'those who think they ought to be so regarded seem to be winning. '\n 'Under these circumstances, it does seem odd for McDermott to devote '\n 'much space to complaining about the logical basis of a book whose '\n 'very title proclaims it is about logical foundations. In any '\n 'case, given such a title, it wouldnt seem necessary that readers '\n 'should be warned that the foundations being explored are not '\n 'In competition with this diversity is the idea of a unified model '\n 'of inference. The desire for such a model is strong among those '\n 'who study declarative representations, and Genesereth and Nilsson '\n 'are no exception. As are most of their colleagues, they are drawn '\n 'to the model of inference as the derivation of conclusions that '\n 'are entailed by a set of beliefs. They wander from this idea in a '\n 'few places but not for long. It is not hard to see why: Deduction '\n 'is one of the fews kinds of inference for which we have an '\n 'interesting general theory. '.lower()\n )\n\n def just_extract_text_from_html(self, adress):\n logging.info(f\"extracting text from {adress}\")\n try:\n with urlopen(adress).read().decode('utf-8') as fdoc:\n soup = bs4.BeautifulSoup(fdoc, parent=\"lxml\")\n return self.get_only_real_words(soup.get_text(), self.wordlist)\n except ValueError:\n with open(adress, \"r\") as fdoc:\n soup = bs4.BeautifulSoup(fdoc, features='lxml')\n return self.get_only_real_words(soup.get_text(), self.wordlist)\n\n def parse_file_format(self, adress):\n if adress.endswith('pdf'):\n paths = self.pdfpath2htmlpaths(adress)\n\n if config.parse_pdf2htmlEX:\n os.system(f\"pdf2htmlEX \"\n f\"--optimize-text 1 \"\n f\"--fit-width {config.reader_width} \"\n f\"\\\"{adress}\\\" \\\"{paths.html_before_indexing}\\\"\")\n tfu = self.tfu_pdf\n elif adress.endswith('html'):\n tfu = self.tfu_html\n paths = self.htmlpath2htmlpaths(adress)\n logging.warning(\"trying with html...\")\n else:\n logging.error(f\"File '{adress}' could not be processed\")\n return None\n\n tfu.convert_and_index(paths.html_before_indexing, paths.html_after_indexing)\n tfu.save_doc_json(paths.json_path)\n os.system(f\"cp \\\"{paths.html_after_indexing}\\\" \\\"{paths.apache_path}\\\"\")\n self.text = \" \".join(list(tfu.indexed_words.values()))\n\n\n # needed for topic modelling\n with open(paths.txt_path, \"w\") as f:\n f.write(self.text)\n logging.debug(paths)\n self.paths = paths\n time.sleep(2)\n\n logging.info(f\"extracted text: {self.text[100:]}\")\n return None\n\n def load_url(self, adress):\n response = urllib.request.urlopen(adress)\n data = response.read() # a `bytes` object\n self.text = parser.from_buffer(data)\n\n def analyse(self):\n \"\"\"\n Extracts prose text from the loaded texts, that may contain line numbers somewhere, adresses, journal links etc.\n :return str: prose text\n \"\"\"\n logging.info(\"transferring text to CorpusCook...\")\n\n paragraphs = self.text.split('\\n\\n')\n print(\"mean length of splitted lines\", (mean([len(p) for p in paragraphs])))\n\n # If TIKA resolved '\\n'\n if (mean([len(p) for p in paragraphs])) > 80:\n paragraphs = [re.sub(r\"- *\\n\", '', p) for p in paragraphs]\n paragraphs = [p.replace('\\n', \" \") for p in paragraphs]\n paragraphs = [p.replace(';', \" \") for p in paragraphs]\n joiner = \" \"\n else:\n # If TIKA did not\n joiner = \" \"\n\n processed_text = joiner.join([p\n for p in paragraphs\n if\n p and\n ks_2samp(self.normal_data, list(p)).pvalue > self.threshold\n ]\n )\n\n return processed_text.strip()[:self.length_limit]\n\n DocPaths = namedtuple(\"DocPaths\", [\"html_before_indexing\",\n \"html_after_indexing\",\n \"apache_path\",\n \"json_path\",\n \"txt_path\"])\n\n def pdfpath2htmlpaths(self, adress):\n # file_extension = os.path.splitext(adress)[1] keep it, but unused\n # path = os.path.dirname(adress)\n filename = os.path.basename(adress)\n html_before_indexing = config.appcorpuscook_docs_document_dir + filename + \".html\"\n filename = remove_ugly_chars(filename)\n html_after_indexing = config.appcorpuscook_docs_document_dir + filename + \".pdf2htmlEX.html\"\n json_path = config.appcorpuscook_docs_json_dir + filename + \".json\"\n txt_path = config.appcorpuscook_docs_txt_dir + filename + \".txt\"\n apache_path = config.apache_dir_document + filename + \".html\"\n\n return self.DocPaths(\n html_before_indexing,\n html_after_indexing,\n apache_path,\n json_path,\n txt_path)\n\n def get_only_real_words(self, text, wordlist):\n return text #\" \".join([word for word in text.split() if word in wordlist])\n\n def htmlpath2htmlpaths(self, adress):\n filename = os.path.basename(adress)\n html_before_indexing = config.appcorpuscook_diff_document_dir + filename\n filename = remove_ugly_chars(filename)\n html_after_indexing = config.appcorpuscook_diff_html_dir + filename + \".pdf2htmlEX.html\"\n json_path = config.appcorpuscook_diff_json_dir + filename + \".json\"\n txt_path = config.appcorpuscook_docs_txt_dir + filename + \".txt\"\n apache_path = config.apache_dir_document + filename + \".html\"\n\n return self.DocPaths(\n html_before_indexing,\n html_after_indexing,\n apache_path,\n json_path,\n txt_path)\n\n",
"step-ids": [
8,
10,
11,
12,
13
]
}
|
[
8,
10,
11,
12,
13
] |
# -*- coding: utf-8 -*-
"""
Created on Mon Mar 5 14:23:28 2018
@author: emily
"""
import pipeline
import numpy as np
import matplotlib.pyplot as plt
import pstats
import cProfile
pr = cProfile.Profile()
pr.enable()
#def try_running():
max_it=200000
rnd_sd = 1
deps = np.concatenate((np.arange(0,10,0.2), np.arange(10,60,1), np.arange(60,201,5)))
model = pipeline.Model(vs = np.arange(3.5, 4.8, 0.1), all_deps = deps,
idep = np.array([25, 50, 60,70,80,90,100,102,104,106,
108,110,112]),
std_rf = 0, lam_rf = 0, std_swd = 0)
#model = pipeline.Model(vs = np.array([1.8, 2.4, 3.4, 4.5, 4.7, 4.65]), all_deps = deps,
# idep = np.array([10, 32, 41, 60, 96, 120]),
# std_rf = 0, lam_rf = 0, std_swd = 0)
#model = pipeline.Model(vs = np.array([3.4, 4.5]), all_deps = deps,
# idep = np.array([60, 96]),
# std_rf = 0, lam_rf = 0, std_swd = 0)
rf_obs = pipeline.SynthesiseRF(pipeline.MakeFullModel(model))
swd_obs = pipeline.SynthesiseSWD(pipeline.MakeFullModel(model), 1/np.arange(0.02,0.1, 0.01), 1e6)
all_lims = pipeline.Limits(
vs = (0.5,5.5), dep = (0,200), std_rf = (0,0.05),
lam_rf = (0.05, 0.5), std_swd = (0,0.15))
out = pipeline.JointInversion(rf_obs, swd_obs, all_lims, max_it, rnd_sd)
actual_model = pipeline.SaveModel(pipeline.MakeFullModel(model),out[1][:,0])
#%%
all_models = out[1]
good_mods = all_models[:,np.where(all_models[0,]>0)[0]]
nit = good_mods.shape[1]
good_mods = good_mods[:,-int(nit/5):]
mean_mod = np.mean(good_mods, axis = 1)
std_mod = np.std(good_mods, axis = 1)
good_mod = pipeline.Model(vs = mean_mod, all_deps = all_models[:,0],
idep = np.arange(0,mean_mod.size),
lam_rf = 0, std_rf = 0, std_swd = 0)
fullmodel = pipeline.MakeFullModel(good_mod)
fig1 = plt.figure();
ax1 = plt.subplot(121)
for k in range(all_models[1,].size-1):
colstr = str(0.75-k/2/all_models[1,].size)
plt.plot(all_models[:,k],all_models[:,0],
'-',linewidth=1,color=colstr)
ax1.invert_yaxis()
ax1.plot(actual_model,all_models[:,0],'r-',linewidth=3)
ax1.set_xlim((1.5,5))
ax1.set_xlabel('Shear Velocity (km/s)')
ax1.set_ylabel('Depth (km)')
ax1.set_title("{} iterations".format(nit*100))
ax3 = plt.subplot(122)
for k in range(good_mods[0,].size-1):
colstr = str(0.85-k/2/good_mods[0,].size)
ax3.plot(good_mods[:,k],all_models[:,0],
'-',linewidth=1,color=colstr)
ax3.invert_yaxis()
ax3.plot(mean_mod,all_models[:,0],'b-',linewidth = 2)
ax3.plot(mean_mod+std_mod, all_models[:,0],'c-',linewidth = 1)
ax3.plot(mean_mod-std_mod, all_models[:,0],'c-',linewidth = 1)
ax3.plot(actual_model,all_models[:,0],'r--',linewidth=1)
ax3.set_xlim((1.5,5))
ax3.set_xlabel('Shear Velocity (km/s)')
ax3.set_ylabel('Depth (km)')
ax3.set_title('Most recent {}'.format(good_mods.shape[1]))
allvels = np.arange(all_lims.vs[0],all_lims.vs[1],0.01)
evendeps = np.arange(0,all_models[-1,0],0.1)
i_ed = np.zeros(evendeps.shape, dtype = int)
for k in range(all_models[:,0].size-1,0,-1):
i_ed[all_models[k,0]>=evendeps] = k
mod_space = np.zeros((evendeps.size,allvels.size))
for k in range(1,good_mods.shape[1]):
even_vels = good_mods[i_ed,-k]
inds = np.round(even_vels-all_lims.vs[0],2)/0.01
inds = inds.astype(int)
mod_space[range(mod_space.shape[0]),inds] += 1
plt.tight_layout()
fig2 = plt.figure()
ax2 = plt.subplot(121)
ax2.imshow(np.log10(mod_space[-1::-1]+1e-1), cmap = 'viridis', aspect = allvels[-1]/evendeps[-1],
extent = [allvels[0], allvels[-1], evendeps[0], evendeps[-1]])
ax2.invert_yaxis()
ax2.set_xlabel('Shear Velocity (km/s)')
ax2.set_ylabel('Depth (km)')
ax2.xaxis.set_label_position('top')
ax2.xaxis.tick_top()
ax2.set_xlim((1.5,5))
plt.figure(); plt.title('Receiver Function - real: red; synth: grey')
rft = np.arange(0,rf_obs.dt*rf_obs.amp.size,rf_obs.dt)
plt.plot(rft, rf_obs.amp, 'r-', linewidth=2)
synth_rf = pipeline.SynthesiseRF(fullmodel)
plt.plot(rft,synth_rf.amp, '-',color = '0.25', linewidth=1)
synth_swd = pipeline.SynthesiseSWD(fullmodel, swd_obs.period, 1e6)
plt.figure(); plt.title('Surface Wave Dispersion - real: red; synth: grey')
plt.plot(swd_obs.period, swd_obs.c, 'r-', linewidth=2)
plt.plot(synth_swd.period, synth_swd.c, '-',color = '0.25', linewidth=1)
plt.figure(); plt.title("Mahalanobis distance (least squares misfit - phi)")
plt.plot(np.log10(out[2]))
plt.figure(); plt.title("Likelihood of accepting new model - alpha(m|m0)")
plt.plot(np.log10(out[3]))
print(np.mean(out[4]))
#%%
pr.disable()
s=open('thingy4.txt','w')
sortby = 'cumulative'
ps = pstats.Stats(pr, stream=s).sort_stats(sortby)
ps.print_stats()
s.close()
|
normal
|
{
"blob_id": "cfe5d013c968afdbf1fc80e3c8c3233a3678450b",
"index": 9848,
"step-1": "<mask token>\n",
"step-2": "<mask token>\npr.enable()\n<mask token>\nfor k in range(all_models[1,].size - 1):\n colstr = str(0.75 - k / 2 / all_models[1,].size)\n plt.plot(all_models[:, k], all_models[:, 0], '-', linewidth=1, color=colstr\n )\nax1.invert_yaxis()\nax1.plot(actual_model, all_models[:, 0], 'r-', linewidth=3)\nax1.set_xlim((1.5, 5))\nax1.set_xlabel('Shear Velocity (km/s)')\nax1.set_ylabel('Depth (km)')\nax1.set_title('{} iterations'.format(nit * 100))\n<mask token>\nfor k in range(good_mods[0,].size - 1):\n colstr = str(0.85 - k / 2 / good_mods[0,].size)\n ax3.plot(good_mods[:, k], all_models[:, 0], '-', linewidth=1, color=colstr)\nax3.invert_yaxis()\nax3.plot(mean_mod, all_models[:, 0], 'b-', linewidth=2)\nax3.plot(mean_mod + std_mod, all_models[:, 0], 'c-', linewidth=1)\nax3.plot(mean_mod - std_mod, all_models[:, 0], 'c-', linewidth=1)\nax3.plot(actual_model, all_models[:, 0], 'r--', linewidth=1)\nax3.set_xlim((1.5, 5))\nax3.set_xlabel('Shear Velocity (km/s)')\nax3.set_ylabel('Depth (km)')\nax3.set_title('Most recent {}'.format(good_mods.shape[1]))\n<mask token>\nfor k in range(all_models[:, 0].size - 1, 0, -1):\n i_ed[all_models[k, 0] >= evendeps] = k\n<mask token>\nfor k in range(1, good_mods.shape[1]):\n even_vels = good_mods[i_ed, -k]\n inds = np.round(even_vels - all_lims.vs[0], 2) / 0.01\n inds = inds.astype(int)\n mod_space[range(mod_space.shape[0]), inds] += 1\nplt.tight_layout()\n<mask token>\nax2.imshow(np.log10(mod_space[-1::-1] + 0.1), cmap='viridis', aspect=\n allvels[-1] / evendeps[-1], extent=[allvels[0], allvels[-1], evendeps[0\n ], evendeps[-1]])\nax2.invert_yaxis()\nax2.set_xlabel('Shear Velocity (km/s)')\nax2.set_ylabel('Depth (km)')\nax2.xaxis.set_label_position('top')\nax2.xaxis.tick_top()\nax2.set_xlim((1.5, 5))\nplt.figure()\nplt.title('Receiver Function - real: red; synth: grey')\n<mask token>\nplt.plot(rft, rf_obs.amp, 'r-', linewidth=2)\n<mask token>\nplt.plot(rft, synth_rf.amp, '-', color='0.25', linewidth=1)\n<mask token>\nplt.figure()\nplt.title('Surface Wave Dispersion - real: red; synth: grey')\nplt.plot(swd_obs.period, swd_obs.c, 'r-', linewidth=2)\nplt.plot(synth_swd.period, synth_swd.c, '-', color='0.25', linewidth=1)\nplt.figure()\nplt.title('Mahalanobis distance (least squares misfit - phi)')\nplt.plot(np.log10(out[2]))\nplt.figure()\nplt.title('Likelihood of accepting new model - alpha(m|m0)')\nplt.plot(np.log10(out[3]))\nprint(np.mean(out[4]))\npr.disable()\n<mask token>\nps.print_stats()\ns.close()\n",
"step-3": "<mask token>\npr = cProfile.Profile()\npr.enable()\nmax_it = 200000\nrnd_sd = 1\ndeps = np.concatenate((np.arange(0, 10, 0.2), np.arange(10, 60, 1), np.\n arange(60, 201, 5)))\nmodel = pipeline.Model(vs=np.arange(3.5, 4.8, 0.1), all_deps=deps, idep=np.\n array([25, 50, 60, 70, 80, 90, 100, 102, 104, 106, 108, 110, 112]),\n std_rf=0, lam_rf=0, std_swd=0)\nrf_obs = pipeline.SynthesiseRF(pipeline.MakeFullModel(model))\nswd_obs = pipeline.SynthesiseSWD(pipeline.MakeFullModel(model), 1 / np.\n arange(0.02, 0.1, 0.01), 1000000.0)\nall_lims = pipeline.Limits(vs=(0.5, 5.5), dep=(0, 200), std_rf=(0, 0.05),\n lam_rf=(0.05, 0.5), std_swd=(0, 0.15))\nout = pipeline.JointInversion(rf_obs, swd_obs, all_lims, max_it, rnd_sd)\nactual_model = pipeline.SaveModel(pipeline.MakeFullModel(model), out[1][:, 0])\nall_models = out[1]\ngood_mods = all_models[:, np.where(all_models[0,] > 0)[0]]\nnit = good_mods.shape[1]\ngood_mods = good_mods[:, -int(nit / 5):]\nmean_mod = np.mean(good_mods, axis=1)\nstd_mod = np.std(good_mods, axis=1)\ngood_mod = pipeline.Model(vs=mean_mod, all_deps=all_models[:, 0], idep=np.\n arange(0, mean_mod.size), lam_rf=0, std_rf=0, std_swd=0)\nfullmodel = pipeline.MakeFullModel(good_mod)\nfig1 = plt.figure()\nax1 = plt.subplot(121)\nfor k in range(all_models[1,].size - 1):\n colstr = str(0.75 - k / 2 / all_models[1,].size)\n plt.plot(all_models[:, k], all_models[:, 0], '-', linewidth=1, color=colstr\n )\nax1.invert_yaxis()\nax1.plot(actual_model, all_models[:, 0], 'r-', linewidth=3)\nax1.set_xlim((1.5, 5))\nax1.set_xlabel('Shear Velocity (km/s)')\nax1.set_ylabel('Depth (km)')\nax1.set_title('{} iterations'.format(nit * 100))\nax3 = plt.subplot(122)\nfor k in range(good_mods[0,].size - 1):\n colstr = str(0.85 - k / 2 / good_mods[0,].size)\n ax3.plot(good_mods[:, k], all_models[:, 0], '-', linewidth=1, color=colstr)\nax3.invert_yaxis()\nax3.plot(mean_mod, all_models[:, 0], 'b-', linewidth=2)\nax3.plot(mean_mod + std_mod, all_models[:, 0], 'c-', linewidth=1)\nax3.plot(mean_mod - std_mod, all_models[:, 0], 'c-', linewidth=1)\nax3.plot(actual_model, all_models[:, 0], 'r--', linewidth=1)\nax3.set_xlim((1.5, 5))\nax3.set_xlabel('Shear Velocity (km/s)')\nax3.set_ylabel('Depth (km)')\nax3.set_title('Most recent {}'.format(good_mods.shape[1]))\nallvels = np.arange(all_lims.vs[0], all_lims.vs[1], 0.01)\nevendeps = np.arange(0, all_models[-1, 0], 0.1)\ni_ed = np.zeros(evendeps.shape, dtype=int)\nfor k in range(all_models[:, 0].size - 1, 0, -1):\n i_ed[all_models[k, 0] >= evendeps] = k\nmod_space = np.zeros((evendeps.size, allvels.size))\nfor k in range(1, good_mods.shape[1]):\n even_vels = good_mods[i_ed, -k]\n inds = np.round(even_vels - all_lims.vs[0], 2) / 0.01\n inds = inds.astype(int)\n mod_space[range(mod_space.shape[0]), inds] += 1\nplt.tight_layout()\nfig2 = plt.figure()\nax2 = plt.subplot(121)\nax2.imshow(np.log10(mod_space[-1::-1] + 0.1), cmap='viridis', aspect=\n allvels[-1] / evendeps[-1], extent=[allvels[0], allvels[-1], evendeps[0\n ], evendeps[-1]])\nax2.invert_yaxis()\nax2.set_xlabel('Shear Velocity (km/s)')\nax2.set_ylabel('Depth (km)')\nax2.xaxis.set_label_position('top')\nax2.xaxis.tick_top()\nax2.set_xlim((1.5, 5))\nplt.figure()\nplt.title('Receiver Function - real: red; synth: grey')\nrft = np.arange(0, rf_obs.dt * rf_obs.amp.size, rf_obs.dt)\nplt.plot(rft, rf_obs.amp, 'r-', linewidth=2)\nsynth_rf = pipeline.SynthesiseRF(fullmodel)\nplt.plot(rft, synth_rf.amp, '-', color='0.25', linewidth=1)\nsynth_swd = pipeline.SynthesiseSWD(fullmodel, swd_obs.period, 1000000.0)\nplt.figure()\nplt.title('Surface Wave Dispersion - real: red; synth: grey')\nplt.plot(swd_obs.period, swd_obs.c, 'r-', linewidth=2)\nplt.plot(synth_swd.period, synth_swd.c, '-', color='0.25', linewidth=1)\nplt.figure()\nplt.title('Mahalanobis distance (least squares misfit - phi)')\nplt.plot(np.log10(out[2]))\nplt.figure()\nplt.title('Likelihood of accepting new model - alpha(m|m0)')\nplt.plot(np.log10(out[3]))\nprint(np.mean(out[4]))\npr.disable()\ns = open('thingy4.txt', 'w')\nsortby = 'cumulative'\nps = pstats.Stats(pr, stream=s).sort_stats(sortby)\nps.print_stats()\ns.close()\n",
"step-4": "<mask token>\nimport pipeline\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport pstats\nimport cProfile\npr = cProfile.Profile()\npr.enable()\nmax_it = 200000\nrnd_sd = 1\ndeps = np.concatenate((np.arange(0, 10, 0.2), np.arange(10, 60, 1), np.\n arange(60, 201, 5)))\nmodel = pipeline.Model(vs=np.arange(3.5, 4.8, 0.1), all_deps=deps, idep=np.\n array([25, 50, 60, 70, 80, 90, 100, 102, 104, 106, 108, 110, 112]),\n std_rf=0, lam_rf=0, std_swd=0)\nrf_obs = pipeline.SynthesiseRF(pipeline.MakeFullModel(model))\nswd_obs = pipeline.SynthesiseSWD(pipeline.MakeFullModel(model), 1 / np.\n arange(0.02, 0.1, 0.01), 1000000.0)\nall_lims = pipeline.Limits(vs=(0.5, 5.5), dep=(0, 200), std_rf=(0, 0.05),\n lam_rf=(0.05, 0.5), std_swd=(0, 0.15))\nout = pipeline.JointInversion(rf_obs, swd_obs, all_lims, max_it, rnd_sd)\nactual_model = pipeline.SaveModel(pipeline.MakeFullModel(model), out[1][:, 0])\nall_models = out[1]\ngood_mods = all_models[:, np.where(all_models[0,] > 0)[0]]\nnit = good_mods.shape[1]\ngood_mods = good_mods[:, -int(nit / 5):]\nmean_mod = np.mean(good_mods, axis=1)\nstd_mod = np.std(good_mods, axis=1)\ngood_mod = pipeline.Model(vs=mean_mod, all_deps=all_models[:, 0], idep=np.\n arange(0, mean_mod.size), lam_rf=0, std_rf=0, std_swd=0)\nfullmodel = pipeline.MakeFullModel(good_mod)\nfig1 = plt.figure()\nax1 = plt.subplot(121)\nfor k in range(all_models[1,].size - 1):\n colstr = str(0.75 - k / 2 / all_models[1,].size)\n plt.plot(all_models[:, k], all_models[:, 0], '-', linewidth=1, color=colstr\n )\nax1.invert_yaxis()\nax1.plot(actual_model, all_models[:, 0], 'r-', linewidth=3)\nax1.set_xlim((1.5, 5))\nax1.set_xlabel('Shear Velocity (km/s)')\nax1.set_ylabel('Depth (km)')\nax1.set_title('{} iterations'.format(nit * 100))\nax3 = plt.subplot(122)\nfor k in range(good_mods[0,].size - 1):\n colstr = str(0.85 - k / 2 / good_mods[0,].size)\n ax3.plot(good_mods[:, k], all_models[:, 0], '-', linewidth=1, color=colstr)\nax3.invert_yaxis()\nax3.plot(mean_mod, all_models[:, 0], 'b-', linewidth=2)\nax3.plot(mean_mod + std_mod, all_models[:, 0], 'c-', linewidth=1)\nax3.plot(mean_mod - std_mod, all_models[:, 0], 'c-', linewidth=1)\nax3.plot(actual_model, all_models[:, 0], 'r--', linewidth=1)\nax3.set_xlim((1.5, 5))\nax3.set_xlabel('Shear Velocity (km/s)')\nax3.set_ylabel('Depth (km)')\nax3.set_title('Most recent {}'.format(good_mods.shape[1]))\nallvels = np.arange(all_lims.vs[0], all_lims.vs[1], 0.01)\nevendeps = np.arange(0, all_models[-1, 0], 0.1)\ni_ed = np.zeros(evendeps.shape, dtype=int)\nfor k in range(all_models[:, 0].size - 1, 0, -1):\n i_ed[all_models[k, 0] >= evendeps] = k\nmod_space = np.zeros((evendeps.size, allvels.size))\nfor k in range(1, good_mods.shape[1]):\n even_vels = good_mods[i_ed, -k]\n inds = np.round(even_vels - all_lims.vs[0], 2) / 0.01\n inds = inds.astype(int)\n mod_space[range(mod_space.shape[0]), inds] += 1\nplt.tight_layout()\nfig2 = plt.figure()\nax2 = plt.subplot(121)\nax2.imshow(np.log10(mod_space[-1::-1] + 0.1), cmap='viridis', aspect=\n allvels[-1] / evendeps[-1], extent=[allvels[0], allvels[-1], evendeps[0\n ], evendeps[-1]])\nax2.invert_yaxis()\nax2.set_xlabel('Shear Velocity (km/s)')\nax2.set_ylabel('Depth (km)')\nax2.xaxis.set_label_position('top')\nax2.xaxis.tick_top()\nax2.set_xlim((1.5, 5))\nplt.figure()\nplt.title('Receiver Function - real: red; synth: grey')\nrft = np.arange(0, rf_obs.dt * rf_obs.amp.size, rf_obs.dt)\nplt.plot(rft, rf_obs.amp, 'r-', linewidth=2)\nsynth_rf = pipeline.SynthesiseRF(fullmodel)\nplt.plot(rft, synth_rf.amp, '-', color='0.25', linewidth=1)\nsynth_swd = pipeline.SynthesiseSWD(fullmodel, swd_obs.period, 1000000.0)\nplt.figure()\nplt.title('Surface Wave Dispersion - real: red; synth: grey')\nplt.plot(swd_obs.period, swd_obs.c, 'r-', linewidth=2)\nplt.plot(synth_swd.period, synth_swd.c, '-', color='0.25', linewidth=1)\nplt.figure()\nplt.title('Mahalanobis distance (least squares misfit - phi)')\nplt.plot(np.log10(out[2]))\nplt.figure()\nplt.title('Likelihood of accepting new model - alpha(m|m0)')\nplt.plot(np.log10(out[3]))\nprint(np.mean(out[4]))\npr.disable()\ns = open('thingy4.txt', 'w')\nsortby = 'cumulative'\nps = pstats.Stats(pr, stream=s).sort_stats(sortby)\nps.print_stats()\ns.close()\n",
"step-5": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Mar 5 14:23:28 2018\n\n@author: emily\n\"\"\"\n\nimport pipeline\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport pstats\nimport cProfile\n \npr = cProfile.Profile()\npr.enable()\n\n\n#def try_running():\nmax_it=200000\nrnd_sd = 1\n\n\ndeps = np.concatenate((np.arange(0,10,0.2), np.arange(10,60,1), np.arange(60,201,5)))\nmodel = pipeline.Model(vs = np.arange(3.5, 4.8, 0.1), all_deps = deps,\n idep = np.array([25, 50, 60,70,80,90,100,102,104,106,\n 108,110,112]), \n std_rf = 0, lam_rf = 0, std_swd = 0)\n\n#model = pipeline.Model(vs = np.array([1.8, 2.4, 3.4, 4.5, 4.7, 4.65]), all_deps = deps,\n# idep = np.array([10, 32, 41, 60, 96, 120]), \n# std_rf = 0, lam_rf = 0, std_swd = 0)\n#model = pipeline.Model(vs = np.array([3.4, 4.5]), all_deps = deps,\n# idep = np.array([60, 96]), \n# std_rf = 0, lam_rf = 0, std_swd = 0)\n\n\nrf_obs = pipeline.SynthesiseRF(pipeline.MakeFullModel(model))\nswd_obs = pipeline.SynthesiseSWD(pipeline.MakeFullModel(model), 1/np.arange(0.02,0.1, 0.01), 1e6)\nall_lims = pipeline.Limits(\n vs = (0.5,5.5), dep = (0,200), std_rf = (0,0.05),\n lam_rf = (0.05, 0.5), std_swd = (0,0.15))\n\nout = pipeline.JointInversion(rf_obs, swd_obs, all_lims, max_it, rnd_sd)\n\nactual_model = pipeline.SaveModel(pipeline.MakeFullModel(model),out[1][:,0])\n#%%\nall_models = out[1]\ngood_mods = all_models[:,np.where(all_models[0,]>0)[0]]\nnit = good_mods.shape[1]\ngood_mods = good_mods[:,-int(nit/5):]\nmean_mod = np.mean(good_mods, axis = 1)\nstd_mod = np.std(good_mods, axis = 1)\n\ngood_mod = pipeline.Model(vs = mean_mod, all_deps = all_models[:,0],\n idep = np.arange(0,mean_mod.size),\n lam_rf = 0, std_rf = 0, std_swd = 0)\nfullmodel = pipeline.MakeFullModel(good_mod)\n\n\n\nfig1 = plt.figure();\n\nax1 = plt.subplot(121)\nfor k in range(all_models[1,].size-1): \n colstr = str(0.75-k/2/all_models[1,].size)\n plt.plot(all_models[:,k],all_models[:,0],\n '-',linewidth=1,color=colstr)\nax1.invert_yaxis()\nax1.plot(actual_model,all_models[:,0],'r-',linewidth=3)\nax1.set_xlim((1.5,5))\nax1.set_xlabel('Shear Velocity (km/s)')\nax1.set_ylabel('Depth (km)')\nax1.set_title(\"{} iterations\".format(nit*100))\n\nax3 = plt.subplot(122)\nfor k in range(good_mods[0,].size-1): \n colstr = str(0.85-k/2/good_mods[0,].size)\n ax3.plot(good_mods[:,k],all_models[:,0],\n '-',linewidth=1,color=colstr)\nax3.invert_yaxis()\nax3.plot(mean_mod,all_models[:,0],'b-',linewidth = 2)\nax3.plot(mean_mod+std_mod, all_models[:,0],'c-',linewidth = 1)\nax3.plot(mean_mod-std_mod, all_models[:,0],'c-',linewidth = 1)\nax3.plot(actual_model,all_models[:,0],'r--',linewidth=1)\nax3.set_xlim((1.5,5))\nax3.set_xlabel('Shear Velocity (km/s)')\nax3.set_ylabel('Depth (km)')\n\nax3.set_title('Most recent {}'.format(good_mods.shape[1]))\n\n\nallvels = np.arange(all_lims.vs[0],all_lims.vs[1],0.01)\nevendeps = np.arange(0,all_models[-1,0],0.1)\ni_ed = np.zeros(evendeps.shape, dtype = int)\nfor k in range(all_models[:,0].size-1,0,-1):\n i_ed[all_models[k,0]>=evendeps] = k\n \nmod_space = np.zeros((evendeps.size,allvels.size))\nfor k in range(1,good_mods.shape[1]):\n even_vels = good_mods[i_ed,-k]\n inds = np.round(even_vels-all_lims.vs[0],2)/0.01\n inds = inds.astype(int)\n mod_space[range(mod_space.shape[0]),inds] += 1 \n\nplt.tight_layout()\n\nfig2 = plt.figure()\nax2 = plt.subplot(121)\nax2.imshow(np.log10(mod_space[-1::-1]+1e-1), cmap = 'viridis', aspect = allvels[-1]/evendeps[-1],\n extent = [allvels[0], allvels[-1], evendeps[0], evendeps[-1]])\nax2.invert_yaxis()\nax2.set_xlabel('Shear Velocity (km/s)')\nax2.set_ylabel('Depth (km)')\nax2.xaxis.set_label_position('top')\nax2.xaxis.tick_top()\nax2.set_xlim((1.5,5))\n\nplt.figure(); plt.title('Receiver Function - real: red; synth: grey')\nrft = np.arange(0,rf_obs.dt*rf_obs.amp.size,rf_obs.dt)\nplt.plot(rft, rf_obs.amp, 'r-', linewidth=2)\nsynth_rf = pipeline.SynthesiseRF(fullmodel)\nplt.plot(rft,synth_rf.amp, '-',color = '0.25', linewidth=1)\n\nsynth_swd = pipeline.SynthesiseSWD(fullmodel, swd_obs.period, 1e6)\nplt.figure(); plt.title('Surface Wave Dispersion - real: red; synth: grey')\nplt.plot(swd_obs.period, swd_obs.c, 'r-', linewidth=2)\nplt.plot(synth_swd.period, synth_swd.c, '-',color = '0.25', linewidth=1)\n\n\nplt.figure(); plt.title(\"Mahalanobis distance (least squares misfit - phi)\")\nplt.plot(np.log10(out[2]))\n\nplt.figure(); plt.title(\"Likelihood of accepting new model - alpha(m|m0)\")\nplt.plot(np.log10(out[3]))\n\nprint(np.mean(out[4]))\n#%%\npr.disable()\ns=open('thingy4.txt','w')\nsortby = 'cumulative'\nps = pstats.Stats(pr, stream=s).sort_stats(sortby)\nps.print_stats()\ns.close()",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
# -*- coding: UTF-8 -*-
# File name: ukWorkingDays
# Created by JKChang
# 29/07/2020, 11:20
# Tag:
# Description:
from datetime import date,timedelta,datetime
from workalendar.europe import UnitedKingdom
cal = UnitedKingdom()
print(cal.holidays(2020))
def workingDate(start,end):
cal = UnitedKingdom()
res = []
delta = end - start
for i in range(delta.days +1):
day = start + timedelta(days=i)
if cal.is_working_day(day) or day.weekday() < 5:
res.append(day)
else:
pass
return res
start = datetime.today()
end = datetime(2020, 12, 23)
r = workingDate(start,end)
for d in r:
print(d.strftime('%d-%B-%Y'))
print('\n'*3)
|
normal
|
{
"blob_id": "feed412278d9e711e49ef209ece0876c1de4a873",
"index": 886,
"step-1": "<mask token>\n\n\ndef workingDate(start, end):\n cal = UnitedKingdom()\n res = []\n delta = end - start\n for i in range(delta.days + 1):\n day = start + timedelta(days=i)\n if cal.is_working_day(day) or day.weekday() < 5:\n res.append(day)\n else:\n pass\n return res\n\n\n<mask token>\n",
"step-2": "<mask token>\nprint(cal.holidays(2020))\n\n\ndef workingDate(start, end):\n cal = UnitedKingdom()\n res = []\n delta = end - start\n for i in range(delta.days + 1):\n day = start + timedelta(days=i)\n if cal.is_working_day(day) or day.weekday() < 5:\n res.append(day)\n else:\n pass\n return res\n\n\n<mask token>\nfor d in r:\n print(d.strftime('%d-%B-%Y'))\n print('\\n' * 3)\n",
"step-3": "<mask token>\ncal = UnitedKingdom()\nprint(cal.holidays(2020))\n\n\ndef workingDate(start, end):\n cal = UnitedKingdom()\n res = []\n delta = end - start\n for i in range(delta.days + 1):\n day = start + timedelta(days=i)\n if cal.is_working_day(day) or day.weekday() < 5:\n res.append(day)\n else:\n pass\n return res\n\n\nstart = datetime.today()\nend = datetime(2020, 12, 23)\nr = workingDate(start, end)\nfor d in r:\n print(d.strftime('%d-%B-%Y'))\n print('\\n' * 3)\n",
"step-4": "from datetime import date, timedelta, datetime\nfrom workalendar.europe import UnitedKingdom\ncal = UnitedKingdom()\nprint(cal.holidays(2020))\n\n\ndef workingDate(start, end):\n cal = UnitedKingdom()\n res = []\n delta = end - start\n for i in range(delta.days + 1):\n day = start + timedelta(days=i)\n if cal.is_working_day(day) or day.weekday() < 5:\n res.append(day)\n else:\n pass\n return res\n\n\nstart = datetime.today()\nend = datetime(2020, 12, 23)\nr = workingDate(start, end)\nfor d in r:\n print(d.strftime('%d-%B-%Y'))\n print('\\n' * 3)\n",
"step-5": "# -*- coding: UTF-8 -*-\n# File name: ukWorkingDays\n# Created by JKChang\n# 29/07/2020, 11:20\n# Tag:\n# Description:\n\nfrom datetime import date,timedelta,datetime\nfrom workalendar.europe import UnitedKingdom\n\ncal = UnitedKingdom()\nprint(cal.holidays(2020))\n\ndef workingDate(start,end):\n cal = UnitedKingdom()\n res = []\n delta = end - start\n for i in range(delta.days +1):\n day = start + timedelta(days=i)\n if cal.is_working_day(day) or day.weekday() < 5:\n res.append(day)\n else:\n pass\n return res\n\nstart = datetime.today()\nend = datetime(2020, 12, 23)\nr = workingDate(start,end)\nfor d in r:\n print(d.strftime('%d-%B-%Y'))\n print('\\n'*3)\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
context.set_state('ProjectSummary', 'proposal_code', '2017.1.01355.L')
context.set_state('ProjectSummary', 'piname', 'unknown')
context.set_state('ProjectSummary', 'proposal_title', 'unknown')
context.set_state('ProjectStructure', 'ous_part_id', 'X113490217')
context.set_state('ProjectStructure', 'ous_title', 'Undefined')
context.set_state('ProjectStructure', 'ppr_file',
'/opt/dared/opt/qa56.1712.1/mnt/dataproc/2017.1.01355.L_2018_07_02T11_45_04.304/SOUS_uid___A001_X1296_X1f5/GOUS_uid___A001_X1296_X1f6/MOUS_uid___A001_X1296_X1fd/working/PPR_uid___A001_X1296_X1fe.xml'
)
context.set_state('ProjectStructure', 'ps_entity_id', 'uid://A001/X1220/Xddd')
context.set_state('ProjectStructure', 'recipe_name', 'hsd_calimage')
context.set_state('ProjectStructure', 'ous_entity_id', 'uid://A001/X1220/Xdd9')
context.set_state('ProjectStructure', 'ousstatus_entity_id',
'uid://A001/X1296/X1fd')
try:
hsd_importdata(vis=['uid___A002_Xcf3a9c_X2efb',
'uid___A002_Xcf4672_X1a17'], session=['session_1', 'session_2'])
hsd_flagdata(pipelinemode='automatic')
h_tsyscal(pipelinemode='automatic')
hsd_tsysflag(pipelinemode='automatic')
hsd_skycal(pipelinemode='automatic')
hsd_k2jycal(pipelinemode='automatic')
hsd_applycal(pipelinemode='automatic')
hsd_baseline(pipelinemode='automatic')
hsd_blflag(pipelinemode='automatic')
hsd_baseline(pipelinemode='automatic')
hsd_blflag(pipelinemode='automatic')
hsd_imaging(pipelinemode='automatic')
finally:
h_save()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
__rethrow_casa_exceptions = True
context = h_init()
context.set_state('ProjectSummary', 'proposal_code', '2017.1.01355.L')
context.set_state('ProjectSummary', 'piname', 'unknown')
context.set_state('ProjectSummary', 'proposal_title', 'unknown')
context.set_state('ProjectStructure', 'ous_part_id', 'X113490217')
context.set_state('ProjectStructure', 'ous_title', 'Undefined')
context.set_state('ProjectStructure', 'ppr_file',
'/opt/dared/opt/qa56.1712.1/mnt/dataproc/2017.1.01355.L_2018_07_02T11_45_04.304/SOUS_uid___A001_X1296_X1f5/GOUS_uid___A001_X1296_X1f6/MOUS_uid___A001_X1296_X1fd/working/PPR_uid___A001_X1296_X1fe.xml'
)
context.set_state('ProjectStructure', 'ps_entity_id', 'uid://A001/X1220/Xddd')
context.set_state('ProjectStructure', 'recipe_name', 'hsd_calimage')
context.set_state('ProjectStructure', 'ous_entity_id', 'uid://A001/X1220/Xdd9')
context.set_state('ProjectStructure', 'ousstatus_entity_id',
'uid://A001/X1296/X1fd')
try:
hsd_importdata(vis=['uid___A002_Xcf3a9c_X2efb',
'uid___A002_Xcf4672_X1a17'], session=['session_1', 'session_2'])
hsd_flagdata(pipelinemode='automatic')
h_tsyscal(pipelinemode='automatic')
hsd_tsysflag(pipelinemode='automatic')
hsd_skycal(pipelinemode='automatic')
hsd_k2jycal(pipelinemode='automatic')
hsd_applycal(pipelinemode='automatic')
hsd_baseline(pipelinemode='automatic')
hsd_blflag(pipelinemode='automatic')
hsd_baseline(pipelinemode='automatic')
hsd_blflag(pipelinemode='automatic')
hsd_imaging(pipelinemode='automatic')
finally:
h_save()
<|reserved_special_token_1|>
from recipes.almahelpers import fixsyscaltimes
__rethrow_casa_exceptions = True
context = h_init()
context.set_state('ProjectSummary', 'proposal_code', '2017.1.01355.L')
context.set_state('ProjectSummary', 'piname', 'unknown')
context.set_state('ProjectSummary', 'proposal_title', 'unknown')
context.set_state('ProjectStructure', 'ous_part_id', 'X113490217')
context.set_state('ProjectStructure', 'ous_title', 'Undefined')
context.set_state('ProjectStructure', 'ppr_file',
'/opt/dared/opt/qa56.1712.1/mnt/dataproc/2017.1.01355.L_2018_07_02T11_45_04.304/SOUS_uid___A001_X1296_X1f5/GOUS_uid___A001_X1296_X1f6/MOUS_uid___A001_X1296_X1fd/working/PPR_uid___A001_X1296_X1fe.xml'
)
context.set_state('ProjectStructure', 'ps_entity_id', 'uid://A001/X1220/Xddd')
context.set_state('ProjectStructure', 'recipe_name', 'hsd_calimage')
context.set_state('ProjectStructure', 'ous_entity_id', 'uid://A001/X1220/Xdd9')
context.set_state('ProjectStructure', 'ousstatus_entity_id',
'uid://A001/X1296/X1fd')
try:
hsd_importdata(vis=['uid___A002_Xcf3a9c_X2efb',
'uid___A002_Xcf4672_X1a17'], session=['session_1', 'session_2'])
hsd_flagdata(pipelinemode='automatic')
h_tsyscal(pipelinemode='automatic')
hsd_tsysflag(pipelinemode='automatic')
hsd_skycal(pipelinemode='automatic')
hsd_k2jycal(pipelinemode='automatic')
hsd_applycal(pipelinemode='automatic')
hsd_baseline(pipelinemode='automatic')
hsd_blflag(pipelinemode='automatic')
hsd_baseline(pipelinemode='automatic')
hsd_blflag(pipelinemode='automatic')
hsd_imaging(pipelinemode='automatic')
finally:
h_save()
<|reserved_special_token_1|>
from recipes.almahelpers import fixsyscaltimes # SACM/JAO - Fixes
__rethrow_casa_exceptions = True
context = h_init()
context.set_state('ProjectSummary', 'proposal_code', '2017.1.01355.L')
context.set_state('ProjectSummary', 'piname', 'unknown')
context.set_state('ProjectSummary', 'proposal_title', 'unknown')
context.set_state('ProjectStructure', 'ous_part_id', 'X113490217')
context.set_state('ProjectStructure', 'ous_title', 'Undefined')
context.set_state('ProjectStructure', 'ppr_file', '/opt/dared/opt/qa56.1712.1/mnt/dataproc/2017.1.01355.L_2018_07_02T11_45_04.304/SOUS_uid___A001_X1296_X1f5/GOUS_uid___A001_X1296_X1f6/MOUS_uid___A001_X1296_X1fd/working/PPR_uid___A001_X1296_X1fe.xml')
context.set_state('ProjectStructure', 'ps_entity_id', 'uid://A001/X1220/Xddd')
context.set_state('ProjectStructure', 'recipe_name', 'hsd_calimage')
context.set_state('ProjectStructure', 'ous_entity_id', 'uid://A001/X1220/Xdd9')
context.set_state('ProjectStructure', 'ousstatus_entity_id', 'uid://A001/X1296/X1fd')
try:
hsd_importdata(vis=['uid___A002_Xcf3a9c_X2efb', 'uid___A002_Xcf4672_X1a17'], session=['session_1', 'session_2'])
hsd_flagdata(pipelinemode="automatic")
h_tsyscal(pipelinemode="automatic")
hsd_tsysflag(pipelinemode="automatic")
hsd_skycal(pipelinemode="automatic")
hsd_k2jycal(pipelinemode="automatic")
hsd_applycal(pipelinemode="automatic")
hsd_baseline(pipelinemode="automatic")
hsd_blflag(pipelinemode="automatic")
hsd_baseline(pipelinemode="automatic")
hsd_blflag(pipelinemode="automatic")
hsd_imaging(pipelinemode="automatic")
finally:
h_save()
|
flexible
|
{
"blob_id": "290811317ddb49a7d2a9f44ab7e0b6d201db12e1",
"index": 7532,
"step-1": "<mask token>\n",
"step-2": "<mask token>\ncontext.set_state('ProjectSummary', 'proposal_code', '2017.1.01355.L')\ncontext.set_state('ProjectSummary', 'piname', 'unknown')\ncontext.set_state('ProjectSummary', 'proposal_title', 'unknown')\ncontext.set_state('ProjectStructure', 'ous_part_id', 'X113490217')\ncontext.set_state('ProjectStructure', 'ous_title', 'Undefined')\ncontext.set_state('ProjectStructure', 'ppr_file',\n '/opt/dared/opt/qa56.1712.1/mnt/dataproc/2017.1.01355.L_2018_07_02T11_45_04.304/SOUS_uid___A001_X1296_X1f5/GOUS_uid___A001_X1296_X1f6/MOUS_uid___A001_X1296_X1fd/working/PPR_uid___A001_X1296_X1fe.xml'\n )\ncontext.set_state('ProjectStructure', 'ps_entity_id', 'uid://A001/X1220/Xddd')\ncontext.set_state('ProjectStructure', 'recipe_name', 'hsd_calimage')\ncontext.set_state('ProjectStructure', 'ous_entity_id', 'uid://A001/X1220/Xdd9')\ncontext.set_state('ProjectStructure', 'ousstatus_entity_id',\n 'uid://A001/X1296/X1fd')\ntry:\n hsd_importdata(vis=['uid___A002_Xcf3a9c_X2efb',\n 'uid___A002_Xcf4672_X1a17'], session=['session_1', 'session_2'])\n hsd_flagdata(pipelinemode='automatic')\n h_tsyscal(pipelinemode='automatic')\n hsd_tsysflag(pipelinemode='automatic')\n hsd_skycal(pipelinemode='automatic')\n hsd_k2jycal(pipelinemode='automatic')\n hsd_applycal(pipelinemode='automatic')\n hsd_baseline(pipelinemode='automatic')\n hsd_blflag(pipelinemode='automatic')\n hsd_baseline(pipelinemode='automatic')\n hsd_blflag(pipelinemode='automatic')\n hsd_imaging(pipelinemode='automatic')\nfinally:\n h_save()\n",
"step-3": "<mask token>\n__rethrow_casa_exceptions = True\ncontext = h_init()\ncontext.set_state('ProjectSummary', 'proposal_code', '2017.1.01355.L')\ncontext.set_state('ProjectSummary', 'piname', 'unknown')\ncontext.set_state('ProjectSummary', 'proposal_title', 'unknown')\ncontext.set_state('ProjectStructure', 'ous_part_id', 'X113490217')\ncontext.set_state('ProjectStructure', 'ous_title', 'Undefined')\ncontext.set_state('ProjectStructure', 'ppr_file',\n '/opt/dared/opt/qa56.1712.1/mnt/dataproc/2017.1.01355.L_2018_07_02T11_45_04.304/SOUS_uid___A001_X1296_X1f5/GOUS_uid___A001_X1296_X1f6/MOUS_uid___A001_X1296_X1fd/working/PPR_uid___A001_X1296_X1fe.xml'\n )\ncontext.set_state('ProjectStructure', 'ps_entity_id', 'uid://A001/X1220/Xddd')\ncontext.set_state('ProjectStructure', 'recipe_name', 'hsd_calimage')\ncontext.set_state('ProjectStructure', 'ous_entity_id', 'uid://A001/X1220/Xdd9')\ncontext.set_state('ProjectStructure', 'ousstatus_entity_id',\n 'uid://A001/X1296/X1fd')\ntry:\n hsd_importdata(vis=['uid___A002_Xcf3a9c_X2efb',\n 'uid___A002_Xcf4672_X1a17'], session=['session_1', 'session_2'])\n hsd_flagdata(pipelinemode='automatic')\n h_tsyscal(pipelinemode='automatic')\n hsd_tsysflag(pipelinemode='automatic')\n hsd_skycal(pipelinemode='automatic')\n hsd_k2jycal(pipelinemode='automatic')\n hsd_applycal(pipelinemode='automatic')\n hsd_baseline(pipelinemode='automatic')\n hsd_blflag(pipelinemode='automatic')\n hsd_baseline(pipelinemode='automatic')\n hsd_blflag(pipelinemode='automatic')\n hsd_imaging(pipelinemode='automatic')\nfinally:\n h_save()\n",
"step-4": "from recipes.almahelpers import fixsyscaltimes\n__rethrow_casa_exceptions = True\ncontext = h_init()\ncontext.set_state('ProjectSummary', 'proposal_code', '2017.1.01355.L')\ncontext.set_state('ProjectSummary', 'piname', 'unknown')\ncontext.set_state('ProjectSummary', 'proposal_title', 'unknown')\ncontext.set_state('ProjectStructure', 'ous_part_id', 'X113490217')\ncontext.set_state('ProjectStructure', 'ous_title', 'Undefined')\ncontext.set_state('ProjectStructure', 'ppr_file',\n '/opt/dared/opt/qa56.1712.1/mnt/dataproc/2017.1.01355.L_2018_07_02T11_45_04.304/SOUS_uid___A001_X1296_X1f5/GOUS_uid___A001_X1296_X1f6/MOUS_uid___A001_X1296_X1fd/working/PPR_uid___A001_X1296_X1fe.xml'\n )\ncontext.set_state('ProjectStructure', 'ps_entity_id', 'uid://A001/X1220/Xddd')\ncontext.set_state('ProjectStructure', 'recipe_name', 'hsd_calimage')\ncontext.set_state('ProjectStructure', 'ous_entity_id', 'uid://A001/X1220/Xdd9')\ncontext.set_state('ProjectStructure', 'ousstatus_entity_id',\n 'uid://A001/X1296/X1fd')\ntry:\n hsd_importdata(vis=['uid___A002_Xcf3a9c_X2efb',\n 'uid___A002_Xcf4672_X1a17'], session=['session_1', 'session_2'])\n hsd_flagdata(pipelinemode='automatic')\n h_tsyscal(pipelinemode='automatic')\n hsd_tsysflag(pipelinemode='automatic')\n hsd_skycal(pipelinemode='automatic')\n hsd_k2jycal(pipelinemode='automatic')\n hsd_applycal(pipelinemode='automatic')\n hsd_baseline(pipelinemode='automatic')\n hsd_blflag(pipelinemode='automatic')\n hsd_baseline(pipelinemode='automatic')\n hsd_blflag(pipelinemode='automatic')\n hsd_imaging(pipelinemode='automatic')\nfinally:\n h_save()\n",
"step-5": "from recipes.almahelpers import fixsyscaltimes # SACM/JAO - Fixes\n__rethrow_casa_exceptions = True\ncontext = h_init()\ncontext.set_state('ProjectSummary', 'proposal_code', '2017.1.01355.L')\ncontext.set_state('ProjectSummary', 'piname', 'unknown')\ncontext.set_state('ProjectSummary', 'proposal_title', 'unknown')\ncontext.set_state('ProjectStructure', 'ous_part_id', 'X113490217')\ncontext.set_state('ProjectStructure', 'ous_title', 'Undefined')\ncontext.set_state('ProjectStructure', 'ppr_file', '/opt/dared/opt/qa56.1712.1/mnt/dataproc/2017.1.01355.L_2018_07_02T11_45_04.304/SOUS_uid___A001_X1296_X1f5/GOUS_uid___A001_X1296_X1f6/MOUS_uid___A001_X1296_X1fd/working/PPR_uid___A001_X1296_X1fe.xml')\ncontext.set_state('ProjectStructure', 'ps_entity_id', 'uid://A001/X1220/Xddd')\ncontext.set_state('ProjectStructure', 'recipe_name', 'hsd_calimage')\ncontext.set_state('ProjectStructure', 'ous_entity_id', 'uid://A001/X1220/Xdd9')\ncontext.set_state('ProjectStructure', 'ousstatus_entity_id', 'uid://A001/X1296/X1fd')\ntry:\n hsd_importdata(vis=['uid___A002_Xcf3a9c_X2efb', 'uid___A002_Xcf4672_X1a17'], session=['session_1', 'session_2'])\n hsd_flagdata(pipelinemode=\"automatic\")\n h_tsyscal(pipelinemode=\"automatic\")\n hsd_tsysflag(pipelinemode=\"automatic\")\n hsd_skycal(pipelinemode=\"automatic\")\n hsd_k2jycal(pipelinemode=\"automatic\")\n hsd_applycal(pipelinemode=\"automatic\")\n hsd_baseline(pipelinemode=\"automatic\")\n hsd_blflag(pipelinemode=\"automatic\")\n hsd_baseline(pipelinemode=\"automatic\")\n hsd_blflag(pipelinemode=\"automatic\")\n hsd_imaging(pipelinemode=\"automatic\")\nfinally:\n h_save()\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
import sys
import numpy as np
from pymongo import MongoClient
from sklearn import linear_model, preprocessing
assert str(sys.argv[1]) is not None
client = MongoClient(str(sys.argv[1]))
db = client.nba_py
variables = ['0', '1', '2', '3', '4',
'5', '6', '7', '8', '9',
'10', '11', '12', '13', '14',
'15', '16', '17', '18', '19', ]
ITERATIONS = 5
MINUTE_RESTRICTION = 15
ALPHA_VALS = [0, 0.02, 0.03, 0.04, 0.05, 0.06, 0.07, 0.08, 1]
best_error = 999
best_k = 0
for k in ALPHA_VALS:
total_train_error = 0
total_train_variance = 0
total_test_error = 0
total_test_variance = 0
dumb_total_train_error = 0
dumb_total_train_variance = 0
dumb_total_test_error = 0
dumb_total_test_variance = 0
baseline_error = 0
total_count = 0
for j in range(ITERATIONS):
for i in range(len(variables)):
allData = []
allDumbData = []
cursor = db.playtime_model.find({"PLAYER_GROUP": i, "AVG_MIN": {"$gt": MINUTE_RESTRICTION}})
count = 0
for document in cursor:
dataRow = []
for variable in variables:
dataRow.append(document[variable])
dataRow.append(document['AVG_MIN'])
dataRow.append((document['WIN_CHANCE'])**2)
dataRow.append(document['MIN'])
allData.append(dataRow)
allDumbData.append([document['AVG_MIN'], document['MIN']])
count = count + 1
print("player group: %d, game count: %d" % (i, count))
if (count > 600):
total_count += count
Xy = np.array(allData)
np.random.shuffle(Xy)
X = Xy[ :, range(0, Xy.shape[1]-1) ]
y = Xy[ :, Xy.shape[1]-1 ]
X_normalized = preprocessing.scale(X)
# Separate into Train and Test datasets
train_test_split = int(round(len(y) * 0.7))
X_normalized_train = X_normalized[:train_test_split]
X_normalized_test = X_normalized[train_test_split:]
y_train = y[:train_test_split]
y_test = y[train_test_split:]
# train model
if k == 0:
regr = linear_model.LinearRegression(fit_intercept=True)
else:
regr = linear_model.Lasso(alpha=k, fit_intercept=True)
regr.fit(X_normalized_train, y_train)
# Coefficients
# print('Intercept: ', regr.intercept_) ------------------------------------
# for i in range(regr.coef_.size): -----------------------------------------
# print (variables[i], regr.coef_[i]) ----------------------------------
# print("================") ------------------------------------------------
# Error Analysis
train_error = np.mean((regr.predict(X_normalized_train) - y_train) ** 2)
train_variance = regr.score(X_normalized_train, y_train)
test_error = np.mean((regr.predict(X_normalized_test) - y_test) ** 2)
test_variance = regr.score(X_normalized_test, y_test)
# print("Residual sum of squares for training set: %.2f" % train_error) ----
# print('Variance score: %.2f' % train_variance) ---------------------------
# print("Residual sum of squares for test set: %.2f" % test_error) -
# print('Variance score: %.2f' % test_variance) --------------------
total_train_error += train_error * count
total_train_variance += train_variance * count
total_test_error += test_error * count
total_test_variance += test_variance * count
#~~~~calculate against baseline~~~~~~~~~~~
# Xy = np.array(allDumbData) -----------------------------------
# np.random.shuffle(Xy) ----------------------------------------
# X = Xy[ :, range(0, Xy.shape[1]-1) ] -------------------------
# y = Xy[ :, Xy.shape[1]-1 ] -----------------------------------
# -----------------------------------------------------------------------------
# X_normalized = (X) -------------------------------------------
# -----------------------------------------------------------------------------
# # Separate into Train and Test datasets ----------------------
# train_test_split = int(round(len(y) * 0.7)) ------------------
# X_normalized_train = X_normalized[:train_test_split] ---------
# X_normalized_test = X_normalized[train_test_split:] ----------
# y_train = y[:train_test_split] -------------------------------
# y_test = y[train_test_split:] --------------------------------
# -----------------------------------------------------------------------------
# regr = linear_model.LinearRegression(fit_intercept=True) -----
# regr.fit(X_normalized_train, y_train) ------------------------
# -----------------------------------------------------------------------------
# # Error Analysis ---------------------------------------------
# train_error = np.mean((regr.predict(X_normalized_train) - y_train) ** 2)
# train_variance = regr.score(X_normalized_train, y_train) -----
# test_error = np.mean((regr.predict(X_normalized_test) - y_test) ** 2)
# test_variance = regr.score(X_normalized_test, y_test) --------
# # print("Residual sum of squares for training set: %.2f" % train_error) ----
# # print('Variance score: %.2f' % train_variance) ---------------------------
# # print("Residual sum of squares for dumb test set: %.2f" % test_error)
# # print('Variance score for dumb test set: %.2f' % test_variance) --
# dumb_total_train_error += train_error * count ----------------
# dumb_total_train_variance += train_variance * count ----------
# dumb_total_test_error += test_error * count ------------------
# dumb_total_test_variance += test_variance * count ------------
total_train_error = total_train_error / total_count
total_train_variance = total_train_variance / total_count
total_test_error = total_test_error / total_count
total_test_variance = total_test_variance / total_count
# dumb_total_train_error = dumb_total_train_error / total_count ------------
# dumb_total_train_variance = dumb_total_train_variance / total_count ------
# dumb_total_test_error = dumb_total_test_error / total_count --------------
# dumb_total_test_variance = dumb_total_test_variance / total_count --------
print("alpha-value: %.2f" % k)
print("total_train_error: %.2f" % total_train_error)
print("total_train_variance: %.2f" % total_train_variance)
print("total_test_error: %.2f" % total_test_error)
print("total_test_variance: %.2f" % total_test_variance)
# print("dumb_total_train_error: %.2f" % dumb_total_train_error) -----------
# print("dumb_total_train_variance: %.2f" % dumb_total_train_variance) -----
# print("dumb_total_test_error: %.2f" % dumb_total_test_error) -------------
# print("dumb_total_test_variance: %.2f" % dumb_total_test_variance) -------
# print("total_count: %d" % (total_count / ITERATIONS)) --------------------
if (total_test_error < best_error):
best_error = total_test_error
best_k = k
# Calculate against baseline ---------------------------------------------------
cursor = db.playtime_model.find({"AVG_MIN": {"$gt": MINUTE_RESTRICTION}})
baseline_error = 0.0
count = 0
for document in cursor:
baseline_error += (document['AVG_MIN'] - document['MIN'])**2
count += 1
baseline_error = baseline_error / count
print("baseline error: %.2f" % baseline_error)
print("best error: %.2f, best alpha: %.2f" % (best_error, best_k))
|
normal
|
{
"blob_id": "36682c4ab90cdd22b644906e22ede71254eb42ff",
"index": 2091,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nassert str(sys.argv[1]) is not None\n<mask token>\nfor k in ALPHA_VALS:\n total_train_error = 0\n total_train_variance = 0\n total_test_error = 0\n total_test_variance = 0\n dumb_total_train_error = 0\n dumb_total_train_variance = 0\n dumb_total_test_error = 0\n dumb_total_test_variance = 0\n baseline_error = 0\n total_count = 0\n for j in range(ITERATIONS):\n for i in range(len(variables)):\n allData = []\n allDumbData = []\n cursor = db.playtime_model.find({'PLAYER_GROUP': i, 'AVG_MIN':\n {'$gt': MINUTE_RESTRICTION}})\n count = 0\n for document in cursor:\n dataRow = []\n for variable in variables:\n dataRow.append(document[variable])\n dataRow.append(document['AVG_MIN'])\n dataRow.append(document['WIN_CHANCE'] ** 2)\n dataRow.append(document['MIN'])\n allData.append(dataRow)\n allDumbData.append([document['AVG_MIN'], document['MIN']])\n count = count + 1\n print('player group: %d, game count: %d' % (i, count))\n if count > 600:\n total_count += count\n Xy = np.array(allData)\n np.random.shuffle(Xy)\n X = Xy[:, range(0, Xy.shape[1] - 1)]\n y = Xy[:, Xy.shape[1] - 1]\n X_normalized = preprocessing.scale(X)\n train_test_split = int(round(len(y) * 0.7))\n X_normalized_train = X_normalized[:train_test_split]\n X_normalized_test = X_normalized[train_test_split:]\n y_train = y[:train_test_split]\n y_test = y[train_test_split:]\n if k == 0:\n regr = linear_model.LinearRegression(fit_intercept=True)\n else:\n regr = linear_model.Lasso(alpha=k, fit_intercept=True)\n regr.fit(X_normalized_train, y_train)\n train_error = np.mean((regr.predict(X_normalized_train) -\n y_train) ** 2)\n train_variance = regr.score(X_normalized_train, y_train)\n test_error = np.mean((regr.predict(X_normalized_test) -\n y_test) ** 2)\n test_variance = regr.score(X_normalized_test, y_test)\n total_train_error += train_error * count\n total_train_variance += train_variance * count\n total_test_error += test_error * count\n total_test_variance += test_variance * count\n total_train_error = total_train_error / total_count\n total_train_variance = total_train_variance / total_count\n total_test_error = total_test_error / total_count\n total_test_variance = total_test_variance / total_count\n print('alpha-value: %.2f' % k)\n print('total_train_error: %.2f' % total_train_error)\n print('total_train_variance: %.2f' % total_train_variance)\n print('total_test_error: %.2f' % total_test_error)\n print('total_test_variance: %.2f' % total_test_variance)\n if total_test_error < best_error:\n best_error = total_test_error\n best_k = k\n<mask token>\nfor document in cursor:\n baseline_error += (document['AVG_MIN'] - document['MIN']) ** 2\n count += 1\n<mask token>\nprint('baseline error: %.2f' % baseline_error)\nprint('best error: %.2f, best alpha: %.2f' % (best_error, best_k))\n",
"step-3": "<mask token>\nassert str(sys.argv[1]) is not None\nclient = MongoClient(str(sys.argv[1]))\ndb = client.nba_py\nvariables = ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9', '10', '11',\n '12', '13', '14', '15', '16', '17', '18', '19']\nITERATIONS = 5\nMINUTE_RESTRICTION = 15\nALPHA_VALS = [0, 0.02, 0.03, 0.04, 0.05, 0.06, 0.07, 0.08, 1]\nbest_error = 999\nbest_k = 0\nfor k in ALPHA_VALS:\n total_train_error = 0\n total_train_variance = 0\n total_test_error = 0\n total_test_variance = 0\n dumb_total_train_error = 0\n dumb_total_train_variance = 0\n dumb_total_test_error = 0\n dumb_total_test_variance = 0\n baseline_error = 0\n total_count = 0\n for j in range(ITERATIONS):\n for i in range(len(variables)):\n allData = []\n allDumbData = []\n cursor = db.playtime_model.find({'PLAYER_GROUP': i, 'AVG_MIN':\n {'$gt': MINUTE_RESTRICTION}})\n count = 0\n for document in cursor:\n dataRow = []\n for variable in variables:\n dataRow.append(document[variable])\n dataRow.append(document['AVG_MIN'])\n dataRow.append(document['WIN_CHANCE'] ** 2)\n dataRow.append(document['MIN'])\n allData.append(dataRow)\n allDumbData.append([document['AVG_MIN'], document['MIN']])\n count = count + 1\n print('player group: %d, game count: %d' % (i, count))\n if count > 600:\n total_count += count\n Xy = np.array(allData)\n np.random.shuffle(Xy)\n X = Xy[:, range(0, Xy.shape[1] - 1)]\n y = Xy[:, Xy.shape[1] - 1]\n X_normalized = preprocessing.scale(X)\n train_test_split = int(round(len(y) * 0.7))\n X_normalized_train = X_normalized[:train_test_split]\n X_normalized_test = X_normalized[train_test_split:]\n y_train = y[:train_test_split]\n y_test = y[train_test_split:]\n if k == 0:\n regr = linear_model.LinearRegression(fit_intercept=True)\n else:\n regr = linear_model.Lasso(alpha=k, fit_intercept=True)\n regr.fit(X_normalized_train, y_train)\n train_error = np.mean((regr.predict(X_normalized_train) -\n y_train) ** 2)\n train_variance = regr.score(X_normalized_train, y_train)\n test_error = np.mean((regr.predict(X_normalized_test) -\n y_test) ** 2)\n test_variance = regr.score(X_normalized_test, y_test)\n total_train_error += train_error * count\n total_train_variance += train_variance * count\n total_test_error += test_error * count\n total_test_variance += test_variance * count\n total_train_error = total_train_error / total_count\n total_train_variance = total_train_variance / total_count\n total_test_error = total_test_error / total_count\n total_test_variance = total_test_variance / total_count\n print('alpha-value: %.2f' % k)\n print('total_train_error: %.2f' % total_train_error)\n print('total_train_variance: %.2f' % total_train_variance)\n print('total_test_error: %.2f' % total_test_error)\n print('total_test_variance: %.2f' % total_test_variance)\n if total_test_error < best_error:\n best_error = total_test_error\n best_k = k\ncursor = db.playtime_model.find({'AVG_MIN': {'$gt': MINUTE_RESTRICTION}})\nbaseline_error = 0.0\ncount = 0\nfor document in cursor:\n baseline_error += (document['AVG_MIN'] - document['MIN']) ** 2\n count += 1\nbaseline_error = baseline_error / count\nprint('baseline error: %.2f' % baseline_error)\nprint('best error: %.2f, best alpha: %.2f' % (best_error, best_k))\n",
"step-4": "import sys\nimport numpy as np\nfrom pymongo import MongoClient\nfrom sklearn import linear_model, preprocessing\nassert str(sys.argv[1]) is not None\nclient = MongoClient(str(sys.argv[1]))\ndb = client.nba_py\nvariables = ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9', '10', '11',\n '12', '13', '14', '15', '16', '17', '18', '19']\nITERATIONS = 5\nMINUTE_RESTRICTION = 15\nALPHA_VALS = [0, 0.02, 0.03, 0.04, 0.05, 0.06, 0.07, 0.08, 1]\nbest_error = 999\nbest_k = 0\nfor k in ALPHA_VALS:\n total_train_error = 0\n total_train_variance = 0\n total_test_error = 0\n total_test_variance = 0\n dumb_total_train_error = 0\n dumb_total_train_variance = 0\n dumb_total_test_error = 0\n dumb_total_test_variance = 0\n baseline_error = 0\n total_count = 0\n for j in range(ITERATIONS):\n for i in range(len(variables)):\n allData = []\n allDumbData = []\n cursor = db.playtime_model.find({'PLAYER_GROUP': i, 'AVG_MIN':\n {'$gt': MINUTE_RESTRICTION}})\n count = 0\n for document in cursor:\n dataRow = []\n for variable in variables:\n dataRow.append(document[variable])\n dataRow.append(document['AVG_MIN'])\n dataRow.append(document['WIN_CHANCE'] ** 2)\n dataRow.append(document['MIN'])\n allData.append(dataRow)\n allDumbData.append([document['AVG_MIN'], document['MIN']])\n count = count + 1\n print('player group: %d, game count: %d' % (i, count))\n if count > 600:\n total_count += count\n Xy = np.array(allData)\n np.random.shuffle(Xy)\n X = Xy[:, range(0, Xy.shape[1] - 1)]\n y = Xy[:, Xy.shape[1] - 1]\n X_normalized = preprocessing.scale(X)\n train_test_split = int(round(len(y) * 0.7))\n X_normalized_train = X_normalized[:train_test_split]\n X_normalized_test = X_normalized[train_test_split:]\n y_train = y[:train_test_split]\n y_test = y[train_test_split:]\n if k == 0:\n regr = linear_model.LinearRegression(fit_intercept=True)\n else:\n regr = linear_model.Lasso(alpha=k, fit_intercept=True)\n regr.fit(X_normalized_train, y_train)\n train_error = np.mean((regr.predict(X_normalized_train) -\n y_train) ** 2)\n train_variance = regr.score(X_normalized_train, y_train)\n test_error = np.mean((regr.predict(X_normalized_test) -\n y_test) ** 2)\n test_variance = regr.score(X_normalized_test, y_test)\n total_train_error += train_error * count\n total_train_variance += train_variance * count\n total_test_error += test_error * count\n total_test_variance += test_variance * count\n total_train_error = total_train_error / total_count\n total_train_variance = total_train_variance / total_count\n total_test_error = total_test_error / total_count\n total_test_variance = total_test_variance / total_count\n print('alpha-value: %.2f' % k)\n print('total_train_error: %.2f' % total_train_error)\n print('total_train_variance: %.2f' % total_train_variance)\n print('total_test_error: %.2f' % total_test_error)\n print('total_test_variance: %.2f' % total_test_variance)\n if total_test_error < best_error:\n best_error = total_test_error\n best_k = k\ncursor = db.playtime_model.find({'AVG_MIN': {'$gt': MINUTE_RESTRICTION}})\nbaseline_error = 0.0\ncount = 0\nfor document in cursor:\n baseline_error += (document['AVG_MIN'] - document['MIN']) ** 2\n count += 1\nbaseline_error = baseline_error / count\nprint('baseline error: %.2f' % baseline_error)\nprint('best error: %.2f, best alpha: %.2f' % (best_error, best_k))\n",
"step-5": "import sys\nimport numpy as np\nfrom pymongo import MongoClient\nfrom sklearn import linear_model, preprocessing\n\nassert str(sys.argv[1]) is not None\nclient = MongoClient(str(sys.argv[1]))\ndb = client.nba_py\n\nvariables = ['0', '1', '2', '3', '4', \n '5', '6', '7', '8', '9', \n '10', '11', '12', '13', '14', \n '15', '16', '17', '18', '19', ]\n\nITERATIONS = 5\nMINUTE_RESTRICTION = 15\nALPHA_VALS = [0, 0.02, 0.03, 0.04, 0.05, 0.06, 0.07, 0.08, 1]\n\nbest_error = 999\nbest_k = 0\n\nfor k in ALPHA_VALS: \n total_train_error = 0\n total_train_variance = 0\n total_test_error = 0\n total_test_variance = 0\n dumb_total_train_error = 0\n dumb_total_train_variance = 0\n dumb_total_test_error = 0\n dumb_total_test_variance = 0\n baseline_error = 0\n total_count = 0\n for j in range(ITERATIONS):\n for i in range(len(variables)):\n \n allData = []\n allDumbData = []\n \n cursor = db.playtime_model.find({\"PLAYER_GROUP\": i, \"AVG_MIN\": {\"$gt\": MINUTE_RESTRICTION}})\n \n count = 0\n for document in cursor:\n dataRow = []\n for variable in variables:\n dataRow.append(document[variable])\n dataRow.append(document['AVG_MIN'])\n dataRow.append((document['WIN_CHANCE'])**2)\n dataRow.append(document['MIN'])\n allData.append(dataRow)\n allDumbData.append([document['AVG_MIN'], document['MIN']])\n count = count + 1\n \n print(\"player group: %d, game count: %d\" % (i, count))\n if (count > 600):\n total_count += count\n \n Xy = np.array(allData)\n np.random.shuffle(Xy)\n X = Xy[ :, range(0, Xy.shape[1]-1) ]\n y = Xy[ :, Xy.shape[1]-1 ]\n \n X_normalized = preprocessing.scale(X)\n \n # Separate into Train and Test datasets\n train_test_split = int(round(len(y) * 0.7))\n X_normalized_train = X_normalized[:train_test_split]\n X_normalized_test = X_normalized[train_test_split:]\n y_train = y[:train_test_split]\n y_test = y[train_test_split:]\n \n # train model\n if k == 0: \n regr = linear_model.LinearRegression(fit_intercept=True)\n else: \n regr = linear_model.Lasso(alpha=k, fit_intercept=True)\n regr.fit(X_normalized_train, y_train)\n \n # Coefficients\n # print('Intercept: ', regr.intercept_) ------------------------------------\n # for i in range(regr.coef_.size): -----------------------------------------\n # print (variables[i], regr.coef_[i]) ----------------------------------\n # print(\"================\") ------------------------------------------------\n # Error Analysis\n train_error = np.mean((regr.predict(X_normalized_train) - y_train) ** 2)\n train_variance = regr.score(X_normalized_train, y_train)\n test_error = np.mean((regr.predict(X_normalized_test) - y_test) ** 2)\n test_variance = regr.score(X_normalized_test, y_test)\n # print(\"Residual sum of squares for training set: %.2f\" % train_error) ----\n # print('Variance score: %.2f' % train_variance) ---------------------------\n # print(\"Residual sum of squares for test set: %.2f\" % test_error) -\n # print('Variance score: %.2f' % test_variance) --------------------\n total_train_error += train_error * count\n total_train_variance += train_variance * count\n total_test_error += test_error * count\n total_test_variance += test_variance * count\n \n #~~~~calculate against baseline~~~~~~~~~~~\n \n # Xy = np.array(allDumbData) -----------------------------------\n # np.random.shuffle(Xy) ----------------------------------------\n # X = Xy[ :, range(0, Xy.shape[1]-1) ] -------------------------\n # y = Xy[ :, Xy.shape[1]-1 ] -----------------------------------\n# -----------------------------------------------------------------------------\n # X_normalized = (X) -------------------------------------------\n# -----------------------------------------------------------------------------\n # # Separate into Train and Test datasets ----------------------\n # train_test_split = int(round(len(y) * 0.7)) ------------------\n # X_normalized_train = X_normalized[:train_test_split] ---------\n # X_normalized_test = X_normalized[train_test_split:] ----------\n # y_train = y[:train_test_split] -------------------------------\n # y_test = y[train_test_split:] --------------------------------\n# -----------------------------------------------------------------------------\n # regr = linear_model.LinearRegression(fit_intercept=True) -----\n # regr.fit(X_normalized_train, y_train) ------------------------\n# -----------------------------------------------------------------------------\n # # Error Analysis ---------------------------------------------\n # train_error = np.mean((regr.predict(X_normalized_train) - y_train) ** 2) \n # train_variance = regr.score(X_normalized_train, y_train) -----\n # test_error = np.mean((regr.predict(X_normalized_test) - y_test) ** 2) \n # test_variance = regr.score(X_normalized_test, y_test) --------\n # # print(\"Residual sum of squares for training set: %.2f\" % train_error) ---- \n # # print('Variance score: %.2f' % train_variance) --------------------------- \n # # print(\"Residual sum of squares for dumb test set: %.2f\" % test_error) \n # # print('Variance score for dumb test set: %.2f' % test_variance) -- \n # dumb_total_train_error += train_error * count ----------------\n # dumb_total_train_variance += train_variance * count ----------\n # dumb_total_test_error += test_error * count ------------------\n # dumb_total_test_variance += test_variance * count ------------\n \n total_train_error = total_train_error / total_count\n total_train_variance = total_train_variance / total_count\n total_test_error = total_test_error / total_count\n total_test_variance = total_test_variance / total_count\n # dumb_total_train_error = dumb_total_train_error / total_count ------------\n # dumb_total_train_variance = dumb_total_train_variance / total_count ------\n # dumb_total_test_error = dumb_total_test_error / total_count --------------\n # dumb_total_test_variance = dumb_total_test_variance / total_count --------\n print(\"alpha-value: %.2f\" % k)\n print(\"total_train_error: %.2f\" % total_train_error)\n print(\"total_train_variance: %.2f\" % total_train_variance)\n print(\"total_test_error: %.2f\" % total_test_error)\n print(\"total_test_variance: %.2f\" % total_test_variance)\n # print(\"dumb_total_train_error: %.2f\" % dumb_total_train_error) -----------\n # print(\"dumb_total_train_variance: %.2f\" % dumb_total_train_variance) -----\n # print(\"dumb_total_test_error: %.2f\" % dumb_total_test_error) -------------\n # print(\"dumb_total_test_variance: %.2f\" % dumb_total_test_variance) -------\n # print(\"total_count: %d\" % (total_count / ITERATIONS)) --------------------\n \n if (total_test_error < best_error):\n best_error = total_test_error\n best_k = k\n \n# Calculate against baseline ---------------------------------------------------\ncursor = db.playtime_model.find({\"AVG_MIN\": {\"$gt\": MINUTE_RESTRICTION}})\nbaseline_error = 0.0\ncount = 0\nfor document in cursor:\n baseline_error += (document['AVG_MIN'] - document['MIN'])**2\n count += 1\nbaseline_error = baseline_error / count\nprint(\"baseline error: %.2f\" % baseline_error)\nprint(\"best error: %.2f, best alpha: %.2f\" % (best_error, best_k))",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
class User(Document):
surname = StringField(required=True)
name = StringField(required=True)
middle_name = StringField(required=True)
phone = StringField(required=True)
email = StringField(required=True)
address = StringField(required=True)
wishes = StringField(required=True)
@bot.message_handler(commands=['start'])
def start(message):
kb = InlineKeyboardMarkup()
button1 = InlineKeyboardButton('Да', callback_data='yes')
button2 = InlineKeyboardButton('Нет', callback_data='no')
kb.add(button1, button2)
bot.send_message(message.chat.id, 'Здравствуйте. Пройдете опрос?',
reply_markup=kb)
<|reserved_special_token_0|>
@bot.callback_query_handler(func=lambda call: call.data == 'yes')
def start(call):
bot.send_message(call.from_user.id, 'Хорошо')
bot.send_message(call.from_user.id, 'Как Вас зовут?')
@bot.message_handler(content_types=['text'])
def response(message):
data['name'] = message.text
bot.send_message(message.chat.id, 'Какая у Вас фамилия?')
bot.register_next_step_handler(message, get_surname)
def get_surname(message):
data['surname'] = message.text
bot.send_message(message.chat.id, 'Как Ваше отчество?')
bot.register_next_step_handler(message, get_middle_name)
<|reserved_special_token_0|>
@bot.message_handler(content_types=['contact'])
def get_phone(message):
data['phone'] = message.contact.phone_number
bot.send_message(message.chat.id, 'Какой у Вас e-mail?')
bot.register_next_step_handler(message, get_email)
def get_email(message):
data['email'] = message.text
bot.send_message(message.chat.id, 'Какой Ваш адрес?')
bot.register_next_step_handler(message, get_address)
def get_address(message):
data['address'] = message.text
bot.send_message(message.chat.id, 'Какие у Вас пожелания?')
bot.register_next_step_handler(message, get_wishes)
def get_wishes(message):
data['wishes'] = message.text
User.objects.create(**data)
bot.send_message(message.chat.id, 'Спасибо.')
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class User(Document):
surname = StringField(required=True)
name = StringField(required=True)
middle_name = StringField(required=True)
phone = StringField(required=True)
email = StringField(required=True)
address = StringField(required=True)
wishes = StringField(required=True)
@bot.message_handler(commands=['start'])
def start(message):
kb = InlineKeyboardMarkup()
button1 = InlineKeyboardButton('Да', callback_data='yes')
button2 = InlineKeyboardButton('Нет', callback_data='no')
kb.add(button1, button2)
bot.send_message(message.chat.id, 'Здравствуйте. Пройдете опрос?',
reply_markup=kb)
@bot.callback_query_handler(func=lambda call: call.data == 'no')
def bye(call):
bot.send_message(call.from_user.id, 'До свидания')
@bot.callback_query_handler(func=lambda call: call.data == 'yes')
def start(call):
bot.send_message(call.from_user.id, 'Хорошо')
bot.send_message(call.from_user.id, 'Как Вас зовут?')
@bot.message_handler(content_types=['text'])
def response(message):
data['name'] = message.text
bot.send_message(message.chat.id, 'Какая у Вас фамилия?')
bot.register_next_step_handler(message, get_surname)
def get_surname(message):
data['surname'] = message.text
bot.send_message(message.chat.id, 'Как Ваше отчество?')
bot.register_next_step_handler(message, get_middle_name)
def get_middle_name(message):
data['middle_name'] = message.text
kb = ReplyKeyboardMarkup(resize_keyboard=True)
button = KeyboardButton(text='поделиться контактом', request_contact=True)
kb.add(button)
bot.send_message(message.chat.id, 'Поделитесь номером телефона?',
reply_markup=kb)
bot.register_next_step_handler(message, get_phone)
@bot.message_handler(content_types=['contact'])
def get_phone(message):
data['phone'] = message.contact.phone_number
bot.send_message(message.chat.id, 'Какой у Вас e-mail?')
bot.register_next_step_handler(message, get_email)
def get_email(message):
data['email'] = message.text
bot.send_message(message.chat.id, 'Какой Ваш адрес?')
bot.register_next_step_handler(message, get_address)
def get_address(message):
data['address'] = message.text
bot.send_message(message.chat.id, 'Какие у Вас пожелания?')
bot.register_next_step_handler(message, get_wishes)
def get_wishes(message):
data['wishes'] = message.text
User.objects.create(**data)
bot.send_message(message.chat.id, 'Спасибо.')
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
connect('bot_db')
class User(Document):
surname = StringField(required=True)
name = StringField(required=True)
middle_name = StringField(required=True)
phone = StringField(required=True)
email = StringField(required=True)
address = StringField(required=True)
wishes = StringField(required=True)
@bot.message_handler(commands=['start'])
def start(message):
kb = InlineKeyboardMarkup()
button1 = InlineKeyboardButton('Да', callback_data='yes')
button2 = InlineKeyboardButton('Нет', callback_data='no')
kb.add(button1, button2)
bot.send_message(message.chat.id, 'Здравствуйте. Пройдете опрос?',
reply_markup=kb)
@bot.callback_query_handler(func=lambda call: call.data == 'no')
def bye(call):
bot.send_message(call.from_user.id, 'До свидания')
@bot.callback_query_handler(func=lambda call: call.data == 'yes')
def start(call):
bot.send_message(call.from_user.id, 'Хорошо')
bot.send_message(call.from_user.id, 'Как Вас зовут?')
@bot.message_handler(content_types=['text'])
def response(message):
data['name'] = message.text
bot.send_message(message.chat.id, 'Какая у Вас фамилия?')
bot.register_next_step_handler(message, get_surname)
def get_surname(message):
data['surname'] = message.text
bot.send_message(message.chat.id, 'Как Ваше отчество?')
bot.register_next_step_handler(message, get_middle_name)
def get_middle_name(message):
data['middle_name'] = message.text
kb = ReplyKeyboardMarkup(resize_keyboard=True)
button = KeyboardButton(text='поделиться контактом', request_contact=True)
kb.add(button)
bot.send_message(message.chat.id, 'Поделитесь номером телефона?',
reply_markup=kb)
bot.register_next_step_handler(message, get_phone)
@bot.message_handler(content_types=['contact'])
def get_phone(message):
data['phone'] = message.contact.phone_number
bot.send_message(message.chat.id, 'Какой у Вас e-mail?')
bot.register_next_step_handler(message, get_email)
def get_email(message):
data['email'] = message.text
bot.send_message(message.chat.id, 'Какой Ваш адрес?')
bot.register_next_step_handler(message, get_address)
def get_address(message):
data['address'] = message.text
bot.send_message(message.chat.id, 'Какие у Вас пожелания?')
bot.register_next_step_handler(message, get_wishes)
def get_wishes(message):
data['wishes'] = message.text
User.objects.create(**data)
bot.send_message(message.chat.id, 'Спасибо.')
bot.polling()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
bot = telebot.TeleBot(TOKEN)
data = {}
connect('bot_db')
class User(Document):
surname = StringField(required=True)
name = StringField(required=True)
middle_name = StringField(required=True)
phone = StringField(required=True)
email = StringField(required=True)
address = StringField(required=True)
wishes = StringField(required=True)
@bot.message_handler(commands=['start'])
def start(message):
kb = InlineKeyboardMarkup()
button1 = InlineKeyboardButton('Да', callback_data='yes')
button2 = InlineKeyboardButton('Нет', callback_data='no')
kb.add(button1, button2)
bot.send_message(message.chat.id, 'Здравствуйте. Пройдете опрос?',
reply_markup=kb)
@bot.callback_query_handler(func=lambda call: call.data == 'no')
def bye(call):
bot.send_message(call.from_user.id, 'До свидания')
@bot.callback_query_handler(func=lambda call: call.data == 'yes')
def start(call):
bot.send_message(call.from_user.id, 'Хорошо')
bot.send_message(call.from_user.id, 'Как Вас зовут?')
@bot.message_handler(content_types=['text'])
def response(message):
data['name'] = message.text
bot.send_message(message.chat.id, 'Какая у Вас фамилия?')
bot.register_next_step_handler(message, get_surname)
def get_surname(message):
data['surname'] = message.text
bot.send_message(message.chat.id, 'Как Ваше отчество?')
bot.register_next_step_handler(message, get_middle_name)
def get_middle_name(message):
data['middle_name'] = message.text
kb = ReplyKeyboardMarkup(resize_keyboard=True)
button = KeyboardButton(text='поделиться контактом', request_contact=True)
kb.add(button)
bot.send_message(message.chat.id, 'Поделитесь номером телефона?',
reply_markup=kb)
bot.register_next_step_handler(message, get_phone)
@bot.message_handler(content_types=['contact'])
def get_phone(message):
data['phone'] = message.contact.phone_number
bot.send_message(message.chat.id, 'Какой у Вас e-mail?')
bot.register_next_step_handler(message, get_email)
def get_email(message):
data['email'] = message.text
bot.send_message(message.chat.id, 'Какой Ваш адрес?')
bot.register_next_step_handler(message, get_address)
def get_address(message):
data['address'] = message.text
bot.send_message(message.chat.id, 'Какие у Вас пожелания?')
bot.register_next_step_handler(message, get_wishes)
def get_wishes(message):
data['wishes'] = message.text
User.objects.create(**data)
bot.send_message(message.chat.id, 'Спасибо.')
bot.polling()
<|reserved_special_token_1|>
"""1) Написать бота-консультанта, который будет собирать информацию с
пользователя (его ФИО, номер телефона, почта, адресс, пожелания).
Записывать сформированную заявку в БД (по желанию SQl/NOSQL).)."""
import telebot
from .config import TOKEN
from telebot.types import ReplyKeyboardMarkup, KeyboardButton, InlineKeyboardMarkup, InlineKeyboardButton
from mongoengine import *
bot = telebot.TeleBot(TOKEN)
data = {}
connect('bot_db')
class User(Document):
surname = StringField(required=True)
name = StringField(required=True)
middle_name = StringField(required=True)
phone = StringField(required=True)
email = StringField(required=True)
address = StringField(required=True)
wishes = StringField(required=True)
@bot.message_handler(commands=['start'])
def start(message):
kb = InlineKeyboardMarkup()
button1 = InlineKeyboardButton('Да', callback_data='yes')
button2 = InlineKeyboardButton('Нет', callback_data='no')
kb.add(button1, button2)
bot.send_message(message.chat.id, 'Здравствуйте. Пройдете опрос?', reply_markup=kb)
@bot.callback_query_handler(func=lambda call: call.data == 'no')
def bye(call):
bot.send_message(call.from_user.id, 'До свидания')
@bot.callback_query_handler(func=lambda call: call.data == 'yes')
def start(call):
bot.send_message(call.from_user.id, 'Хорошо')
bot.send_message(call.from_user.id, 'Как Вас зовут?')
@bot.message_handler(content_types=['text'])
def response(message):
data['name'] = message.text
bot.send_message(message.chat.id, 'Какая у Вас фамилия?')
bot.register_next_step_handler(message, get_surname)
def get_surname(message):
data['surname'] = message.text
bot.send_message(message.chat.id, 'Как Ваше отчество?')
bot.register_next_step_handler(message, get_middle_name)
def get_middle_name(message):
data['middle_name'] = message.text
kb = ReplyKeyboardMarkup(resize_keyboard=True)
button = KeyboardButton(text='поделиться контактом', request_contact=True)
kb.add(button)
bot.send_message(message.chat.id, 'Поделитесь номером телефона?', reply_markup=kb)
bot.register_next_step_handler(message, get_phone)
@bot.message_handler(content_types=['contact'])
def get_phone(message):
data['phone'] = message.contact.phone_number
bot.send_message(message.chat.id, 'Какой у Вас e-mail?')
bot.register_next_step_handler(message, get_email)
def get_email(message):
data['email'] = message.text
bot.send_message(message.chat.id, 'Какой Ваш адрес?')
bot.register_next_step_handler(message, get_address)
def get_address(message):
data['address'] = message.text
bot.send_message(message.chat.id, 'Какие у Вас пожелания?')
bot.register_next_step_handler(message, get_wishes)
def get_wishes(message):
data['wishes'] = message.text
User.objects.create(**data)
bot.send_message(message.chat.id, 'Спасибо.')
bot.polling()
|
flexible
|
{
"blob_id": "dcb2351f9489815fbec8694b446d0a93972a6590",
"index": 6388,
"step-1": "<mask token>\n\n\nclass User(Document):\n surname = StringField(required=True)\n name = StringField(required=True)\n middle_name = StringField(required=True)\n phone = StringField(required=True)\n email = StringField(required=True)\n address = StringField(required=True)\n wishes = StringField(required=True)\n\n\n@bot.message_handler(commands=['start'])\ndef start(message):\n kb = InlineKeyboardMarkup()\n button1 = InlineKeyboardButton('Да', callback_data='yes')\n button2 = InlineKeyboardButton('Нет', callback_data='no')\n kb.add(button1, button2)\n bot.send_message(message.chat.id, 'Здравствуйте. Пройдете опрос?',\n reply_markup=kb)\n\n\n<mask token>\n\n\n@bot.callback_query_handler(func=lambda call: call.data == 'yes')\ndef start(call):\n bot.send_message(call.from_user.id, 'Хорошо')\n bot.send_message(call.from_user.id, 'Как Вас зовут?')\n\n\n@bot.message_handler(content_types=['text'])\ndef response(message):\n data['name'] = message.text\n bot.send_message(message.chat.id, 'Какая у Вас фамилия?')\n bot.register_next_step_handler(message, get_surname)\n\n\ndef get_surname(message):\n data['surname'] = message.text\n bot.send_message(message.chat.id, 'Как Ваше отчество?')\n bot.register_next_step_handler(message, get_middle_name)\n\n\n<mask token>\n\n\n@bot.message_handler(content_types=['contact'])\ndef get_phone(message):\n data['phone'] = message.contact.phone_number\n bot.send_message(message.chat.id, 'Какой у Вас e-mail?')\n bot.register_next_step_handler(message, get_email)\n\n\ndef get_email(message):\n data['email'] = message.text\n bot.send_message(message.chat.id, 'Какой Ваш адрес?')\n bot.register_next_step_handler(message, get_address)\n\n\ndef get_address(message):\n data['address'] = message.text\n bot.send_message(message.chat.id, 'Какие у Вас пожелания?')\n bot.register_next_step_handler(message, get_wishes)\n\n\ndef get_wishes(message):\n data['wishes'] = message.text\n User.objects.create(**data)\n bot.send_message(message.chat.id, 'Спасибо.')\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass User(Document):\n surname = StringField(required=True)\n name = StringField(required=True)\n middle_name = StringField(required=True)\n phone = StringField(required=True)\n email = StringField(required=True)\n address = StringField(required=True)\n wishes = StringField(required=True)\n\n\n@bot.message_handler(commands=['start'])\ndef start(message):\n kb = InlineKeyboardMarkup()\n button1 = InlineKeyboardButton('Да', callback_data='yes')\n button2 = InlineKeyboardButton('Нет', callback_data='no')\n kb.add(button1, button2)\n bot.send_message(message.chat.id, 'Здравствуйте. Пройдете опрос?',\n reply_markup=kb)\n\n\n@bot.callback_query_handler(func=lambda call: call.data == 'no')\ndef bye(call):\n bot.send_message(call.from_user.id, 'До свидания')\n\n\n@bot.callback_query_handler(func=lambda call: call.data == 'yes')\ndef start(call):\n bot.send_message(call.from_user.id, 'Хорошо')\n bot.send_message(call.from_user.id, 'Как Вас зовут?')\n\n\n@bot.message_handler(content_types=['text'])\ndef response(message):\n data['name'] = message.text\n bot.send_message(message.chat.id, 'Какая у Вас фамилия?')\n bot.register_next_step_handler(message, get_surname)\n\n\ndef get_surname(message):\n data['surname'] = message.text\n bot.send_message(message.chat.id, 'Как Ваше отчество?')\n bot.register_next_step_handler(message, get_middle_name)\n\n\ndef get_middle_name(message):\n data['middle_name'] = message.text\n kb = ReplyKeyboardMarkup(resize_keyboard=True)\n button = KeyboardButton(text='поделиться контактом', request_contact=True)\n kb.add(button)\n bot.send_message(message.chat.id, 'Поделитесь номером телефона?',\n reply_markup=kb)\n bot.register_next_step_handler(message, get_phone)\n\n\n@bot.message_handler(content_types=['contact'])\ndef get_phone(message):\n data['phone'] = message.contact.phone_number\n bot.send_message(message.chat.id, 'Какой у Вас e-mail?')\n bot.register_next_step_handler(message, get_email)\n\n\ndef get_email(message):\n data['email'] = message.text\n bot.send_message(message.chat.id, 'Какой Ваш адрес?')\n bot.register_next_step_handler(message, get_address)\n\n\ndef get_address(message):\n data['address'] = message.text\n bot.send_message(message.chat.id, 'Какие у Вас пожелания?')\n bot.register_next_step_handler(message, get_wishes)\n\n\ndef get_wishes(message):\n data['wishes'] = message.text\n User.objects.create(**data)\n bot.send_message(message.chat.id, 'Спасибо.')\n\n\n<mask token>\n",
"step-3": "<mask token>\nconnect('bot_db')\n\n\nclass User(Document):\n surname = StringField(required=True)\n name = StringField(required=True)\n middle_name = StringField(required=True)\n phone = StringField(required=True)\n email = StringField(required=True)\n address = StringField(required=True)\n wishes = StringField(required=True)\n\n\n@bot.message_handler(commands=['start'])\ndef start(message):\n kb = InlineKeyboardMarkup()\n button1 = InlineKeyboardButton('Да', callback_data='yes')\n button2 = InlineKeyboardButton('Нет', callback_data='no')\n kb.add(button1, button2)\n bot.send_message(message.chat.id, 'Здравствуйте. Пройдете опрос?',\n reply_markup=kb)\n\n\n@bot.callback_query_handler(func=lambda call: call.data == 'no')\ndef bye(call):\n bot.send_message(call.from_user.id, 'До свидания')\n\n\n@bot.callback_query_handler(func=lambda call: call.data == 'yes')\ndef start(call):\n bot.send_message(call.from_user.id, 'Хорошо')\n bot.send_message(call.from_user.id, 'Как Вас зовут?')\n\n\n@bot.message_handler(content_types=['text'])\ndef response(message):\n data['name'] = message.text\n bot.send_message(message.chat.id, 'Какая у Вас фамилия?')\n bot.register_next_step_handler(message, get_surname)\n\n\ndef get_surname(message):\n data['surname'] = message.text\n bot.send_message(message.chat.id, 'Как Ваше отчество?')\n bot.register_next_step_handler(message, get_middle_name)\n\n\ndef get_middle_name(message):\n data['middle_name'] = message.text\n kb = ReplyKeyboardMarkup(resize_keyboard=True)\n button = KeyboardButton(text='поделиться контактом', request_contact=True)\n kb.add(button)\n bot.send_message(message.chat.id, 'Поделитесь номером телефона?',\n reply_markup=kb)\n bot.register_next_step_handler(message, get_phone)\n\n\n@bot.message_handler(content_types=['contact'])\ndef get_phone(message):\n data['phone'] = message.contact.phone_number\n bot.send_message(message.chat.id, 'Какой у Вас e-mail?')\n bot.register_next_step_handler(message, get_email)\n\n\ndef get_email(message):\n data['email'] = message.text\n bot.send_message(message.chat.id, 'Какой Ваш адрес?')\n bot.register_next_step_handler(message, get_address)\n\n\ndef get_address(message):\n data['address'] = message.text\n bot.send_message(message.chat.id, 'Какие у Вас пожелания?')\n bot.register_next_step_handler(message, get_wishes)\n\n\ndef get_wishes(message):\n data['wishes'] = message.text\n User.objects.create(**data)\n bot.send_message(message.chat.id, 'Спасибо.')\n\n\nbot.polling()\n",
"step-4": "<mask token>\nbot = telebot.TeleBot(TOKEN)\ndata = {}\nconnect('bot_db')\n\n\nclass User(Document):\n surname = StringField(required=True)\n name = StringField(required=True)\n middle_name = StringField(required=True)\n phone = StringField(required=True)\n email = StringField(required=True)\n address = StringField(required=True)\n wishes = StringField(required=True)\n\n\n@bot.message_handler(commands=['start'])\ndef start(message):\n kb = InlineKeyboardMarkup()\n button1 = InlineKeyboardButton('Да', callback_data='yes')\n button2 = InlineKeyboardButton('Нет', callback_data='no')\n kb.add(button1, button2)\n bot.send_message(message.chat.id, 'Здравствуйте. Пройдете опрос?',\n reply_markup=kb)\n\n\n@bot.callback_query_handler(func=lambda call: call.data == 'no')\ndef bye(call):\n bot.send_message(call.from_user.id, 'До свидания')\n\n\n@bot.callback_query_handler(func=lambda call: call.data == 'yes')\ndef start(call):\n bot.send_message(call.from_user.id, 'Хорошо')\n bot.send_message(call.from_user.id, 'Как Вас зовут?')\n\n\n@bot.message_handler(content_types=['text'])\ndef response(message):\n data['name'] = message.text\n bot.send_message(message.chat.id, 'Какая у Вас фамилия?')\n bot.register_next_step_handler(message, get_surname)\n\n\ndef get_surname(message):\n data['surname'] = message.text\n bot.send_message(message.chat.id, 'Как Ваше отчество?')\n bot.register_next_step_handler(message, get_middle_name)\n\n\ndef get_middle_name(message):\n data['middle_name'] = message.text\n kb = ReplyKeyboardMarkup(resize_keyboard=True)\n button = KeyboardButton(text='поделиться контактом', request_contact=True)\n kb.add(button)\n bot.send_message(message.chat.id, 'Поделитесь номером телефона?',\n reply_markup=kb)\n bot.register_next_step_handler(message, get_phone)\n\n\n@bot.message_handler(content_types=['contact'])\ndef get_phone(message):\n data['phone'] = message.contact.phone_number\n bot.send_message(message.chat.id, 'Какой у Вас e-mail?')\n bot.register_next_step_handler(message, get_email)\n\n\ndef get_email(message):\n data['email'] = message.text\n bot.send_message(message.chat.id, 'Какой Ваш адрес?')\n bot.register_next_step_handler(message, get_address)\n\n\ndef get_address(message):\n data['address'] = message.text\n bot.send_message(message.chat.id, 'Какие у Вас пожелания?')\n bot.register_next_step_handler(message, get_wishes)\n\n\ndef get_wishes(message):\n data['wishes'] = message.text\n User.objects.create(**data)\n bot.send_message(message.chat.id, 'Спасибо.')\n\n\nbot.polling()\n",
"step-5": "\"\"\"1) Написать бота-консультанта, который будет собирать информацию с\nпользователя (его ФИО, номер телефона, почта, адресс, пожелания).\nЗаписывать сформированную заявку в БД (по желанию SQl/NOSQL).).\"\"\"\n\n\nimport telebot\nfrom .config import TOKEN\nfrom telebot.types import ReplyKeyboardMarkup, KeyboardButton, InlineKeyboardMarkup, InlineKeyboardButton\nfrom mongoengine import *\n\n\nbot = telebot.TeleBot(TOKEN)\ndata = {}\nconnect('bot_db')\n\n\nclass User(Document):\n\n surname = StringField(required=True)\n name = StringField(required=True)\n middle_name = StringField(required=True)\n phone = StringField(required=True)\n email = StringField(required=True)\n address = StringField(required=True)\n wishes = StringField(required=True)\n\n\n@bot.message_handler(commands=['start'])\ndef start(message):\n kb = InlineKeyboardMarkup()\n button1 = InlineKeyboardButton('Да', callback_data='yes')\n button2 = InlineKeyboardButton('Нет', callback_data='no')\n kb.add(button1, button2)\n\n bot.send_message(message.chat.id, 'Здравствуйте. Пройдете опрос?', reply_markup=kb)\n\n\n@bot.callback_query_handler(func=lambda call: call.data == 'no')\ndef bye(call):\n bot.send_message(call.from_user.id, 'До свидания')\n\n\n@bot.callback_query_handler(func=lambda call: call.data == 'yes')\ndef start(call):\n bot.send_message(call.from_user.id, 'Хорошо')\n bot.send_message(call.from_user.id, 'Как Вас зовут?')\n\n\n@bot.message_handler(content_types=['text'])\ndef response(message):\n data['name'] = message.text\n bot.send_message(message.chat.id, 'Какая у Вас фамилия?')\n bot.register_next_step_handler(message, get_surname)\n\n\ndef get_surname(message):\n data['surname'] = message.text\n bot.send_message(message.chat.id, 'Как Ваше отчество?')\n bot.register_next_step_handler(message, get_middle_name)\n\n\ndef get_middle_name(message):\n data['middle_name'] = message.text\n kb = ReplyKeyboardMarkup(resize_keyboard=True)\n button = KeyboardButton(text='поделиться контактом', request_contact=True)\n kb.add(button)\n bot.send_message(message.chat.id, 'Поделитесь номером телефона?', reply_markup=kb)\n bot.register_next_step_handler(message, get_phone)\n\n\n@bot.message_handler(content_types=['contact'])\ndef get_phone(message):\n data['phone'] = message.contact.phone_number\n bot.send_message(message.chat.id, 'Какой у Вас e-mail?')\n bot.register_next_step_handler(message, get_email)\n\n\ndef get_email(message):\n data['email'] = message.text\n bot.send_message(message.chat.id, 'Какой Ваш адрес?')\n bot.register_next_step_handler(message, get_address)\n\n\ndef get_address(message):\n data['address'] = message.text\n bot.send_message(message.chat.id, 'Какие у Вас пожелания?')\n bot.register_next_step_handler(message, get_wishes)\n\n\ndef get_wishes(message):\n data['wishes'] = message.text\n User.objects.create(**data)\n bot.send_message(message.chat.id, 'Спасибо.')\n\n\nbot.polling()\n",
"step-ids": [
10,
12,
13,
14,
16
]
}
|
[
10,
12,
13,
14,
16
] |
<|reserved_special_token_0|>
class initDATA(webapp.RequestHandler):
<|reserved_special_token_0|>
def get(self):
user = users.get_current_user()
if user == None:
self.redirect(users.create_login_url(self.request.uri))
return
for ht in hts:
htdate = datetime.date(datetime.strptime(ht[0], '%Y/%m/%d'))
obj1 = HistoricalTable(date=htdate, title=ht[1], url=ht[2])
obj1.save()
for pr in prs:
prdate = datetime.date(datetime.strptime(pr[0], '%Y/%m/%d'))
obj2 = PollRating(date=prdate, approval_rate=float(pr[3]),
unknown_rate=float(pr[2]), disapproval_rate=float(pr[1]))
obj2.save()
for pgo in gos:
gosdate = datetime.date(datetime.strptime(pgo[1], '%Y/%m/%d'))
try:
goedate = datetime.date(datetime.strptime(pgo[2], '%Y/%m/%d'))
except:
goedate = None
obj3 = Government(name=pgo[0], begin=gosdate, end=goedate)
obj3.save()
class clearDATA(webapp.RequestHandler):
def get(self):
user = users.get_current_user()
if user == None:
self.redirect(users.create_login_url(self.request.uri))
return
for ht in HistoricalTable.all():
ht.delete()
for pr in PollRating.all():
pr.delete()
for pgo in Government.all():
pgo.delete()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class initDATA(webapp.RequestHandler):
"""
classdocs
"""
def get(self):
user = users.get_current_user()
if user == None:
self.redirect(users.create_login_url(self.request.uri))
return
for ht in hts:
htdate = datetime.date(datetime.strptime(ht[0], '%Y/%m/%d'))
obj1 = HistoricalTable(date=htdate, title=ht[1], url=ht[2])
obj1.save()
for pr in prs:
prdate = datetime.date(datetime.strptime(pr[0], '%Y/%m/%d'))
obj2 = PollRating(date=prdate, approval_rate=float(pr[3]),
unknown_rate=float(pr[2]), disapproval_rate=float(pr[1]))
obj2.save()
for pgo in gos:
gosdate = datetime.date(datetime.strptime(pgo[1], '%Y/%m/%d'))
try:
goedate = datetime.date(datetime.strptime(pgo[2], '%Y/%m/%d'))
except:
goedate = None
obj3 = Government(name=pgo[0], begin=gosdate, end=goedate)
obj3.save()
class clearDATA(webapp.RequestHandler):
def get(self):
user = users.get_current_user()
if user == None:
self.redirect(users.create_login_url(self.request.uri))
return
for ht in HistoricalTable.all():
ht.delete()
for pr in PollRating.all():
pr.delete()
for pgo in Government.all():
pgo.delete()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
hts = [['2014/7/1', '集団的自衛権行使容認の閣議決定',
'http://www.47news.jp/47topics/e/254919.php'], ['2014/3/18',
'ロシア、クリミアを編入', 'http://www.47news.jp/CN/201403/CN2014031801002413.html'
], ['2014/2/9', '舛添氏が圧勝、東京都知事選',
'http://www.47news.jp/CN/201402/CN2014020901001630.html'], ['2014/1/7',
'国家安全保障局を設置', 'http://www.47news.jp/CN/201401/CN2014010701001086.html'],
['2013/12/26', '安倍首相が靖国神社参拝',
'http://www.47news.jp/CN/201312/CN2013122601000987.html'], ['2013/12/6',
'特定秘密保護法が成立', 'http://www.47news.jp/CN/201312/CN2013120601002724.html'],
['2013/11/3', '東北楽天がプロ野球日本一',
'http://www.47news.jp/CN/201311/CN2013110301002118.html'], ['2013/10/1',
'消費税率引き上げ決定、4月8%',
'http://www.47news.jp/CN/201310/CN2013100101002292.html'], ['2013/9/8',
'2020年東京五輪開催決定',
'http://www.47news.jp/CN/201309/CN2013090401001495.html'], ['2013/7/21',
'参院選で自民圧勝、ねじれ解消',
'http://www.47news.jp/CN/201307/CN2013072101001638.html'], ['2013/3/15',
'TPP交渉に参加表明', 'http://www.47news.jp/CN/201303/CN2013031501001566.html'],
['2013/2/12', '北朝鮮が3度目の核実験',
'http://www.47news.jp/CN/201302/CN2013021201001987.html'], ['2013/1/16',
'アルジェリア人質事件発生',
'http://www.47news.jp/CN/201301/CN2013011601001649.html'], [
'2012/12/26', '第2次安倍内閣発足',
'http://www.47news.jp/CN/201212/CN2012122601001577.html'], ['2012/12/6',
'自公が政権奪還、衆院選', 'http://www.47news.jp/CN/201212/CN2012121601001041.html'
], ['2012/11/15', '習近平新指導部発足、中国',
'http://www.47news.jp/CN/201211/CN2012111501001203.html'], ['2012/11/6',
'オバマ米大統領が再選', 'http://www.47news.jp/CN/201211/CN2012110701000867.html'],
['2012/10/1', '新型輸送機オスプレイを沖縄配備',
'http://www.47news.jp/CN/201210/CN2012100101001335.html'], ['2012/9/11',
'尖閣諸島の魚釣島など3島国有化',
'http://www.47news.jp/CN/201209/CN2012091101001254.html'], ['2012/8/10',
'消費税増税法が成立、10%へ',
'http://www.47news.jp/CN/201208/CN2012081001002702.html'], ['2012/6/27',
'東京電力を国有化、公的資金注入',
'http://www.47news.jp/CN/201206/CN2012062701001601.html'], [
'2011/12/19', '北朝鮮の金正日総書記が死去発表',
'http://www.47news.jp/CN/201112/CN2011121901001386.html'], [
'2011/11/27', '大阪ダブル選で「維新の会」勝利',
'http://www.47news.jp/CN/201111/CN2011112701001230.html'], [
'2011/10/20', 'リビアのカダフィ大佐が死亡',
'http://www.47news.jp/CN/201110/CN2011102001000912.html'], ['2011/10/5',
'米アップル創業者ジョブズ氏死去',
'http://www.47news.jp/CN/201110/CN2011100601000102.html'], ['2011/9/2',
'野田内閣が発足', 'http://www.47news.jp/CN/201109/CN2011090201000656.html'], [
'2011/8/19', '円が戦後最高値更新、75円95銭',
'http://www.47news.jp/CN/201108/CN2011081901001116.html'], ['2011/7/17',
'なでしこジャパン女子W杯初優勝',
'http://www.47news.jp/CN/201107/CN2011071801000025.html'], ['2011/5/6',
'首相、浜岡原発停止要請', 'http://www.47news.jp/CN/201105/CN2011050601000847.html'
], ['2011/3/11', '東日本大震災',
'http://www.47news.jp/CN/201103/CN2011031101000455.html'], ['2011/2/22',
'NZ地震、日本人28人も死亡',
'http://www.47news.jp/CN/201104/CN2011040401001017.html'], ['2011/1/31',
'民主党小沢一郎元代表を強制起訴',
'http://www.47news.jp/CN/201101/CN2011013101000352.html'], [
'2010/11/23', '北朝鮮が韓国・延坪島砲撃',
'http://www.47news.jp/CN/201011/CN2010112301000213.html'], ['2010/10/6',
'ノーベル化学賞に根岸、鈴木両氏',
'http://www.47news.jp/CN/201010/CN2010100601000811.html'], ['2010/9/15',
'政府が為替介入、6年半ぶり',
'http://www.47news.jp/CN/201009/CN2010091501000138.html'], ['2010/9/7',
'尖閣で中国漁船が巡視船に衝突',
'http://www.47news.jp/CN/201009/CN2010090701000382.html'], ['2010/7/11',
'参院選で民主党大敗、ねじれ国会',
'http://www.47news.jp/CN/201007/CN2010071101000032.html'], ['2010/6/8',
'鳩山首相退陣、菅内閣発足',
'http://www.47news.jp/CN/201006/CN2010060801000756.html'], ['2010/5/28',
'普天間移設で日米合意', 'http://www.47news.jp/CN/201005/CN2010052801000165.html'],
['2010/4/20', '宮崎県で口蹄疫、被害拡大',
'http://www.47news.jp/CN/201004/CN2010042001000207.html'], [
'2009/11/20', 'デフレ宣言、3年5カ月ぶり',
'http://www.47news.jp/CN/200911/CN2009112001000267.html'], ['2009/10/2',
'2016年五輪はリオ、東京落選',
'http://www.47news.jp/CN/200910/CN2009100201000542.html'], ['2009/9/16',
'鳩山内閣発足', 'http://www.47news.jp/CN/200909/CN2009091601000915.html'], [
'2009/8/30', '民主党圧勝で政権交代、衆院選',
'http://www.47news.jp/CN/200908/CN2009083001000015.html'], ['2009/8/3',
'全国初の裁判員裁判、東京地裁',
'http://www.47news.jp/CN/200908/CN2009080301000461.html'], ['2009/6/25',
'歌手M・ジャクソンさん急死',
'http://www.47news.jp/CN/200906/CN2009062601000067.html'], ['2009/5/25',
'北朝鮮が2回目の核実験', 'http://www.47news.jp/CN/200905/CN2009052501000261.html'
], ['2009/3/23', 'WBCで「侍ジャパン」が連覇',
'http://www.47news.jp/CN/200903/CN2009032401000025.html'], ['2009/1/20',
'米、オバマ新政権が発足', 'http://www.47news.jp/CN/200901/CN2009012001000945.html'
], ['2008/10/31', '田母神俊雄航空幕僚長を更迭',
'http://www.47news.jp/CN/200810/CN2008103101000632.html'], ['2008/9/24',
'麻生内閣発足', 'http://www.47news.jp/CN/200809/CN2008092401000025.html'], [
'2008/9/15', 'リーマン・ショック',
'http://www.47news.jp/CN/200809/CN2008091501000215.html'], ['2008/9/1',
'福田首相、退陣表明', 'http://www.47news.jp/CN/200809/CN2008090101000736.html'],
['2008/7/7', '北海道・洞爺湖サミット~9日',
'http://www.47news.jp/CN/200807/CN2008070901000704.html'], ['2008/6/11',
'福田首相の問責決議が可決',
'http://www.47news.jp/CN/200806/CN2008061101000609.html'], ['2008/5/12',
'中国・四川大地震', 'http://www.47news.jp/CN/200805/CN2008051201000871.html'],
['2008/4/9', '日銀総裁に白川副総裁が昇格',
'http://www.47news.jp/CN/200804/CN2008040901000924.html'], ['2008/2/19',
'海自イージス艦が漁船と衝突',
'http://www.47news.jp/CN/200802/CN2008021901000329.html'], ['2008/1/27',
'大阪府知事選で橋下徹氏初当選',
'http://www.47news.jp/CN/200801/CN2008012801000076.html'], [
'2007/11/28', '防衛装備疑惑で前防衛次官を逮捕',
'http://www.47news.jp/CN/200711/CN2007112801000463.html'], ['2007/11/2',
'テロ特措法期限切れ海自撤収命令',
'http://www.47news.jp/CN/200710/CN2007102901000620.html'], ['2007/9/12',
'安倍首相が退陣。後任に福田氏',
'http://www.47news.jp/CN/200709/CN2007091201000426.html'], ['2007/7/29',
'参院選で自民党が歴史的惨敗',
'http://www.47news.jp/CN/200707/CN2007072901000697.html'], ['2007/5/28',
'松岡農相が自殺', 'http://www.47news.jp/CN/200705/CN2007052801000693.html'], [
'2007/5/14', '改憲手続き定めた国民投票法成立',
'http://www.47news.jp/CN/200705/CN2007051401000231.html']]
prs = [['2007/4/16', '38.3 ', '17.5 ', '44.2 '], ['2007/5/12', '38.2 ',
'14.2 ', '47.6 '], ['2007/6/1', '48.7 ', '15.5 ', '35.8 '], [
'2007/7/30', '59.0 ', '12.0 ', '29.0 '], ['2007/8/27', '45.5 ', '14.0 ',
'40.5 '], ['2007/9/13', '46.6 ', '7.9 ', '45.5 '], ['2007/9/25',
'25.6 ', '16.6 ', '57.8 '], ['2007/10/27', '29.6 ', '20.2 ', '50.2 '],
['2007/11/5', '36.6 ', '16.4 ', '47.0 '], ['2007/12/15', '47.6 ',
'17.1 ', '35.3 '], ['2008/1/11', '42.8 ', '15.8 ', '41.4 '], [
'2008/2/9', '44.6 ', '19.9 ', '35.5 '], ['2008/3/15', '50.6 ', '16.0 ',
'33.4 '], ['2008/4/4', '59.6 ', '13.8 ', '26.6 '], ['2008/5/1', '66.6 ',
'13.6 ', '19.8 '], ['2008/6/12', '60.2 ', '14.8 ', '25.0 '], [
'2008/7/11', '53.5 ', '19.7 ', '26.8 '], ['2008/8/1', '48.2 ', '20.4 ',
'31.5 '], ['2008/9/2', '28.0 ', '4.1 ', '67.9 '], ['2008/9/24', '32.9 ',
'18.5 ', '48.6 '], ['2008/10/18', '39.0 ', '18.5 ', '42.5 '], [
'2008/11/8', '42.1 ', '16.9 ', '40.9 '], ['2008/12/6', '61.4 ', '13.2 ',
'25.4 '], ['2009/1/10', '70.2 ', '10.6 ', '19.2 '], ['2009/2/7',
'70.9 ', '11.0 ', '18.1 '], ['2009/2/17', '76.6 ', '10.0 ', '13.4 '], [
'2009/3/7', '70.8 ', '13.2 ', '16.0 '], ['2009/3/25', '63.4 ', '12.8 ',
'23.7 '], ['2009/4/28', '56.2 ', '14.2 ', '29.6 '], ['2009/5/11',
'55.1 ', '16.9 ', '28.0 '], ['2009/5/16', '60.2 ', '13.5 ', '26.2 '], [
'2009/6/13', '70.5 ', '12.0 ', '17.5 '], ['2009/7/3', '60.9 ', '15.7 ',
'23.4 '], ['2009/9/16', '13.1 ', '14.9 ', '72.0 '], ['2009/10/31',
'22.9 ', '15.3 ', '61.8 '], ['2009/11/28', '25.1 ', '11.2 ', '63.6 '],
['2009/12/25', '38.1 ', '14.7 ', '47.1 '], ['2010/1/10', '33.2 ',
'16.0 ', '50.8 '], ['2010/1/17', '44.1 ', '14.4 ', '41.5 '], [
'2010/2/5', '45.1 ', '13.5 ', '41.4 '], ['2010/3/6', '48.9 ', '14.8 ',
'36.4 '], ['2010/4/3', '53.3 ', '13.7 ', '33.0 '], ['2010/4/28',
'64.4 ', '14.9 ', '20.7 '], ['2010/5/29', '73.1 ', '7.7 ', '19.1 '], [
'2010/6/4', '37.2 ', '5.2 ', '57.7 '], ['2010/7/12', '52.2 ', '11.5 ',
'36.2 '], ['2010/8/7', '44.8 ', '16.5 ', '38.7 '], ['2010/8/27',
'36.2 ', '15.7 ', '48.1 '], ['2010/9/9', '31.5 ', '13.8 ', '54.7 '], [
'2010/9/17', '21.2 ', '14.3 ', '64.5 '], ['2010/10/5', '36.6 ', '15.8 ',
'47.6 '], ['2010/11/6', '48.6 ', '18.7 ', '32.7 '], ['2010/11/23',
'61.9 ', '14.5 ', '23.6 '], ['2010/12/25', '67.0 ', '9.4 ', '23.7 '], [
'2011/1/14', '53.9 ', '13.9 ', '32.1 '], ['2011/2/11', '63.3 ', '16.7 ',
'19.9 '], ['2011/3/26', '55.6 ', '16.1 ', '28.3 '], ['2011/4/29',
'58.6 ', '14.5 ', '26.8 '], ['2011/5/14', '57.3 ', '14.6 ', '28.1 '], [
'2011/6/28', '61.1 ', '15.6 ', '23.2 '], ['2011/7/23', '70.6 ', '12.3 ',
'17.1 '], ['2011/8/20', '70.0 ', '14.2 ', '15.8 '], ['2011/9/2',
'18.1 ', '19.1 ', '62.7 '], ['2011/10/1', '27.8 ', '17.6 ', '54.6 '], [
'2011/11/5', '34.3 ', '18.6 ', '47.1 '], ['2011/12/3', '40.3 ', '15.1 ',
'44.6 '], ['2012/1/7', '50.6 ', '13.7 ', '35.7 '], ['2012/1/13',
'47.8 ', '16.4 ', '35.8 '], ['2012/2/18', '55.2 ', '15.8 ', '29.0 '], [
'2012/3/19', '50.2 ', '18.2 ', '31.6 '], ['2012/4/28', '60.0 ', '13.6 ',
'26.4 '], ['2012/5/26', '58.1 ', '13.9 ', '28.0 '], ['2012/6/4',
'50.0 ', '18.0 ', '32.0 '], ['2012/6/26', '54.4 ', '15.8 ', '29.9 '], [
'2012/7/14', '59.9 ', '11.9 ', '28.2 '], ['2012/8/11', '59.0 ', '13.1 ',
'27.9 '], ['2012/9/1', '59.4 ', '14.3 ', '26.3 '], ['2012/10/1',
'55.3 ', '15.5 ', '29.2 '], ['2012/11/3', '66.0 ', '16.2 ', '17.7 '], [
'2012/12/26', '21.8 ', '16.2 ', '62.0 '], ['2013/1/26', '22.1 ',
'11.2 ', '66.7 '], ['2013/2/23', '16.2 ', '11.1 ', '72.7 '], [
'2013/3/23', '16.7 ', '12.2 ', '71.1 '], ['2013/3/30', '20.8 ', '7.2 ',
'72.0 '], ['2013/4/20', '16.0 ', '11.9 ', '72.1 '], ['2013/5/18',
'16.2 ', '12.9 ', '70.9 '], ['2013/6/1', '16.3 ', '15.7 ', '68.0 '], [
'2013/6/8', '20.4 ', '8.4 ', '71.2 '], ['2013/7/22', '31.7 ', '12.1 ',
'56.2 '], ['2013/8/24', '25.6 ', '16.7 ', '57.7 '], ['2013/9/14',
'20.4 ', '17.8 ', '61.8 '], ['2013/9/28', '21.8 ', '7.5 ', '70.7 '], [
'2013/10/1', '24.1 ', '12.6 ', '63.3 '], ['2013/10/26', '27.0 ',
'12.3 ', '60.7 '], ['2013/11/23', '26.2 ', '15.9 ', '57.9 '], [
'2013/12/8', '38.4 ', '14.0 ', '47.6 '], ['2013/12/14', '35.9 ', '7.2 ',
'56.9 '], ['2013/12/22', '33.0 ', '12.8 ', '54.2 '], ['2013/12/28',
'32.6 ', '12.2 ', '55.2 '], ['2014/1/25', '31.0 ', '13.1 ', '55.9 '], [
'2014/2/22', '29.7 ', '16.4 ', '53.9 '], ['2014/4/11', '26.7 ', '13.5 ',
'59.8 '], ['2014/5/17', '32.5 ', '12.8 ', '54.7 '], ['2014/6/21',
'33.0 ', '14.9 ', '52.1 '], ['2014/7/1', '40.6 ', '11.6 ', '47.8 ']]
gos = [['野田', '2011/09/02', '2012/12/26'], ['菅', '2010/06/08', '2011/09/02'
], ['鳩山', '2009/09/16', '2010/06/08'], ['麻生', '2008/09/24',
'2009/09/16'], ['福田', '2007/09/26', '2008/09/24'], ['安倍', '2007/04/01',
'2007/09/26']]
class initDATA(webapp.RequestHandler):
"""
classdocs
"""
def get(self):
user = users.get_current_user()
if user == None:
self.redirect(users.create_login_url(self.request.uri))
return
for ht in hts:
htdate = datetime.date(datetime.strptime(ht[0], '%Y/%m/%d'))
obj1 = HistoricalTable(date=htdate, title=ht[1], url=ht[2])
obj1.save()
for pr in prs:
prdate = datetime.date(datetime.strptime(pr[0], '%Y/%m/%d'))
obj2 = PollRating(date=prdate, approval_rate=float(pr[3]),
unknown_rate=float(pr[2]), disapproval_rate=float(pr[1]))
obj2.save()
for pgo in gos:
gosdate = datetime.date(datetime.strptime(pgo[1], '%Y/%m/%d'))
try:
goedate = datetime.date(datetime.strptime(pgo[2], '%Y/%m/%d'))
except:
goedate = None
obj3 = Government(name=pgo[0], begin=gosdate, end=goedate)
obj3.save()
class clearDATA(webapp.RequestHandler):
def get(self):
user = users.get_current_user()
if user == None:
self.redirect(users.create_login_url(self.request.uri))
return
for ht in HistoricalTable.all():
ht.delete()
for pr in PollRating.all():
pr.delete()
for pgo in Government.all():
pgo.delete()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
from google.appengine.api import users
from google.appengine.ext import webapp
from MyModel import HistoricalTable, PollRating, Government
from datetime import datetime
hts = [['2014/7/1', '集団的自衛権行使容認の閣議決定',
'http://www.47news.jp/47topics/e/254919.php'], ['2014/3/18',
'ロシア、クリミアを編入', 'http://www.47news.jp/CN/201403/CN2014031801002413.html'
], ['2014/2/9', '舛添氏が圧勝、東京都知事選',
'http://www.47news.jp/CN/201402/CN2014020901001630.html'], ['2014/1/7',
'国家安全保障局を設置', 'http://www.47news.jp/CN/201401/CN2014010701001086.html'],
['2013/12/26', '安倍首相が靖国神社参拝',
'http://www.47news.jp/CN/201312/CN2013122601000987.html'], ['2013/12/6',
'特定秘密保護法が成立', 'http://www.47news.jp/CN/201312/CN2013120601002724.html'],
['2013/11/3', '東北楽天がプロ野球日本一',
'http://www.47news.jp/CN/201311/CN2013110301002118.html'], ['2013/10/1',
'消費税率引き上げ決定、4月8%',
'http://www.47news.jp/CN/201310/CN2013100101002292.html'], ['2013/9/8',
'2020年東京五輪開催決定',
'http://www.47news.jp/CN/201309/CN2013090401001495.html'], ['2013/7/21',
'参院選で自民圧勝、ねじれ解消',
'http://www.47news.jp/CN/201307/CN2013072101001638.html'], ['2013/3/15',
'TPP交渉に参加表明', 'http://www.47news.jp/CN/201303/CN2013031501001566.html'],
['2013/2/12', '北朝鮮が3度目の核実験',
'http://www.47news.jp/CN/201302/CN2013021201001987.html'], ['2013/1/16',
'アルジェリア人質事件発生',
'http://www.47news.jp/CN/201301/CN2013011601001649.html'], [
'2012/12/26', '第2次安倍内閣発足',
'http://www.47news.jp/CN/201212/CN2012122601001577.html'], ['2012/12/6',
'自公が政権奪還、衆院選', 'http://www.47news.jp/CN/201212/CN2012121601001041.html'
], ['2012/11/15', '習近平新指導部発足、中国',
'http://www.47news.jp/CN/201211/CN2012111501001203.html'], ['2012/11/6',
'オバマ米大統領が再選', 'http://www.47news.jp/CN/201211/CN2012110701000867.html'],
['2012/10/1', '新型輸送機オスプレイを沖縄配備',
'http://www.47news.jp/CN/201210/CN2012100101001335.html'], ['2012/9/11',
'尖閣諸島の魚釣島など3島国有化',
'http://www.47news.jp/CN/201209/CN2012091101001254.html'], ['2012/8/10',
'消費税増税法が成立、10%へ',
'http://www.47news.jp/CN/201208/CN2012081001002702.html'], ['2012/6/27',
'東京電力を国有化、公的資金注入',
'http://www.47news.jp/CN/201206/CN2012062701001601.html'], [
'2011/12/19', '北朝鮮の金正日総書記が死去発表',
'http://www.47news.jp/CN/201112/CN2011121901001386.html'], [
'2011/11/27', '大阪ダブル選で「維新の会」勝利',
'http://www.47news.jp/CN/201111/CN2011112701001230.html'], [
'2011/10/20', 'リビアのカダフィ大佐が死亡',
'http://www.47news.jp/CN/201110/CN2011102001000912.html'], ['2011/10/5',
'米アップル創業者ジョブズ氏死去',
'http://www.47news.jp/CN/201110/CN2011100601000102.html'], ['2011/9/2',
'野田内閣が発足', 'http://www.47news.jp/CN/201109/CN2011090201000656.html'], [
'2011/8/19', '円が戦後最高値更新、75円95銭',
'http://www.47news.jp/CN/201108/CN2011081901001116.html'], ['2011/7/17',
'なでしこジャパン女子W杯初優勝',
'http://www.47news.jp/CN/201107/CN2011071801000025.html'], ['2011/5/6',
'首相、浜岡原発停止要請', 'http://www.47news.jp/CN/201105/CN2011050601000847.html'
], ['2011/3/11', '東日本大震災',
'http://www.47news.jp/CN/201103/CN2011031101000455.html'], ['2011/2/22',
'NZ地震、日本人28人も死亡',
'http://www.47news.jp/CN/201104/CN2011040401001017.html'], ['2011/1/31',
'民主党小沢一郎元代表を強制起訴',
'http://www.47news.jp/CN/201101/CN2011013101000352.html'], [
'2010/11/23', '北朝鮮が韓国・延坪島砲撃',
'http://www.47news.jp/CN/201011/CN2010112301000213.html'], ['2010/10/6',
'ノーベル化学賞に根岸、鈴木両氏',
'http://www.47news.jp/CN/201010/CN2010100601000811.html'], ['2010/9/15',
'政府が為替介入、6年半ぶり',
'http://www.47news.jp/CN/201009/CN2010091501000138.html'], ['2010/9/7',
'尖閣で中国漁船が巡視船に衝突',
'http://www.47news.jp/CN/201009/CN2010090701000382.html'], ['2010/7/11',
'参院選で民主党大敗、ねじれ国会',
'http://www.47news.jp/CN/201007/CN2010071101000032.html'], ['2010/6/8',
'鳩山首相退陣、菅内閣発足',
'http://www.47news.jp/CN/201006/CN2010060801000756.html'], ['2010/5/28',
'普天間移設で日米合意', 'http://www.47news.jp/CN/201005/CN2010052801000165.html'],
['2010/4/20', '宮崎県で口蹄疫、被害拡大',
'http://www.47news.jp/CN/201004/CN2010042001000207.html'], [
'2009/11/20', 'デフレ宣言、3年5カ月ぶり',
'http://www.47news.jp/CN/200911/CN2009112001000267.html'], ['2009/10/2',
'2016年五輪はリオ、東京落選',
'http://www.47news.jp/CN/200910/CN2009100201000542.html'], ['2009/9/16',
'鳩山内閣発足', 'http://www.47news.jp/CN/200909/CN2009091601000915.html'], [
'2009/8/30', '民主党圧勝で政権交代、衆院選',
'http://www.47news.jp/CN/200908/CN2009083001000015.html'], ['2009/8/3',
'全国初の裁判員裁判、東京地裁',
'http://www.47news.jp/CN/200908/CN2009080301000461.html'], ['2009/6/25',
'歌手M・ジャクソンさん急死',
'http://www.47news.jp/CN/200906/CN2009062601000067.html'], ['2009/5/25',
'北朝鮮が2回目の核実験', 'http://www.47news.jp/CN/200905/CN2009052501000261.html'
], ['2009/3/23', 'WBCで「侍ジャパン」が連覇',
'http://www.47news.jp/CN/200903/CN2009032401000025.html'], ['2009/1/20',
'米、オバマ新政権が発足', 'http://www.47news.jp/CN/200901/CN2009012001000945.html'
], ['2008/10/31', '田母神俊雄航空幕僚長を更迭',
'http://www.47news.jp/CN/200810/CN2008103101000632.html'], ['2008/9/24',
'麻生内閣発足', 'http://www.47news.jp/CN/200809/CN2008092401000025.html'], [
'2008/9/15', 'リーマン・ショック',
'http://www.47news.jp/CN/200809/CN2008091501000215.html'], ['2008/9/1',
'福田首相、退陣表明', 'http://www.47news.jp/CN/200809/CN2008090101000736.html'],
['2008/7/7', '北海道・洞爺湖サミット~9日',
'http://www.47news.jp/CN/200807/CN2008070901000704.html'], ['2008/6/11',
'福田首相の問責決議が可決',
'http://www.47news.jp/CN/200806/CN2008061101000609.html'], ['2008/5/12',
'中国・四川大地震', 'http://www.47news.jp/CN/200805/CN2008051201000871.html'],
['2008/4/9', '日銀総裁に白川副総裁が昇格',
'http://www.47news.jp/CN/200804/CN2008040901000924.html'], ['2008/2/19',
'海自イージス艦が漁船と衝突',
'http://www.47news.jp/CN/200802/CN2008021901000329.html'], ['2008/1/27',
'大阪府知事選で橋下徹氏初当選',
'http://www.47news.jp/CN/200801/CN2008012801000076.html'], [
'2007/11/28', '防衛装備疑惑で前防衛次官を逮捕',
'http://www.47news.jp/CN/200711/CN2007112801000463.html'], ['2007/11/2',
'テロ特措法期限切れ海自撤収命令',
'http://www.47news.jp/CN/200710/CN2007102901000620.html'], ['2007/9/12',
'安倍首相が退陣。後任に福田氏',
'http://www.47news.jp/CN/200709/CN2007091201000426.html'], ['2007/7/29',
'参院選で自民党が歴史的惨敗',
'http://www.47news.jp/CN/200707/CN2007072901000697.html'], ['2007/5/28',
'松岡農相が自殺', 'http://www.47news.jp/CN/200705/CN2007052801000693.html'], [
'2007/5/14', '改憲手続き定めた国民投票法成立',
'http://www.47news.jp/CN/200705/CN2007051401000231.html']]
prs = [['2007/4/16', '38.3 ', '17.5 ', '44.2 '], ['2007/5/12', '38.2 ',
'14.2 ', '47.6 '], ['2007/6/1', '48.7 ', '15.5 ', '35.8 '], [
'2007/7/30', '59.0 ', '12.0 ', '29.0 '], ['2007/8/27', '45.5 ', '14.0 ',
'40.5 '], ['2007/9/13', '46.6 ', '7.9 ', '45.5 '], ['2007/9/25',
'25.6 ', '16.6 ', '57.8 '], ['2007/10/27', '29.6 ', '20.2 ', '50.2 '],
['2007/11/5', '36.6 ', '16.4 ', '47.0 '], ['2007/12/15', '47.6 ',
'17.1 ', '35.3 '], ['2008/1/11', '42.8 ', '15.8 ', '41.4 '], [
'2008/2/9', '44.6 ', '19.9 ', '35.5 '], ['2008/3/15', '50.6 ', '16.0 ',
'33.4 '], ['2008/4/4', '59.6 ', '13.8 ', '26.6 '], ['2008/5/1', '66.6 ',
'13.6 ', '19.8 '], ['2008/6/12', '60.2 ', '14.8 ', '25.0 '], [
'2008/7/11', '53.5 ', '19.7 ', '26.8 '], ['2008/8/1', '48.2 ', '20.4 ',
'31.5 '], ['2008/9/2', '28.0 ', '4.1 ', '67.9 '], ['2008/9/24', '32.9 ',
'18.5 ', '48.6 '], ['2008/10/18', '39.0 ', '18.5 ', '42.5 '], [
'2008/11/8', '42.1 ', '16.9 ', '40.9 '], ['2008/12/6', '61.4 ', '13.2 ',
'25.4 '], ['2009/1/10', '70.2 ', '10.6 ', '19.2 '], ['2009/2/7',
'70.9 ', '11.0 ', '18.1 '], ['2009/2/17', '76.6 ', '10.0 ', '13.4 '], [
'2009/3/7', '70.8 ', '13.2 ', '16.0 '], ['2009/3/25', '63.4 ', '12.8 ',
'23.7 '], ['2009/4/28', '56.2 ', '14.2 ', '29.6 '], ['2009/5/11',
'55.1 ', '16.9 ', '28.0 '], ['2009/5/16', '60.2 ', '13.5 ', '26.2 '], [
'2009/6/13', '70.5 ', '12.0 ', '17.5 '], ['2009/7/3', '60.9 ', '15.7 ',
'23.4 '], ['2009/9/16', '13.1 ', '14.9 ', '72.0 '], ['2009/10/31',
'22.9 ', '15.3 ', '61.8 '], ['2009/11/28', '25.1 ', '11.2 ', '63.6 '],
['2009/12/25', '38.1 ', '14.7 ', '47.1 '], ['2010/1/10', '33.2 ',
'16.0 ', '50.8 '], ['2010/1/17', '44.1 ', '14.4 ', '41.5 '], [
'2010/2/5', '45.1 ', '13.5 ', '41.4 '], ['2010/3/6', '48.9 ', '14.8 ',
'36.4 '], ['2010/4/3', '53.3 ', '13.7 ', '33.0 '], ['2010/4/28',
'64.4 ', '14.9 ', '20.7 '], ['2010/5/29', '73.1 ', '7.7 ', '19.1 '], [
'2010/6/4', '37.2 ', '5.2 ', '57.7 '], ['2010/7/12', '52.2 ', '11.5 ',
'36.2 '], ['2010/8/7', '44.8 ', '16.5 ', '38.7 '], ['2010/8/27',
'36.2 ', '15.7 ', '48.1 '], ['2010/9/9', '31.5 ', '13.8 ', '54.7 '], [
'2010/9/17', '21.2 ', '14.3 ', '64.5 '], ['2010/10/5', '36.6 ', '15.8 ',
'47.6 '], ['2010/11/6', '48.6 ', '18.7 ', '32.7 '], ['2010/11/23',
'61.9 ', '14.5 ', '23.6 '], ['2010/12/25', '67.0 ', '9.4 ', '23.7 '], [
'2011/1/14', '53.9 ', '13.9 ', '32.1 '], ['2011/2/11', '63.3 ', '16.7 ',
'19.9 '], ['2011/3/26', '55.6 ', '16.1 ', '28.3 '], ['2011/4/29',
'58.6 ', '14.5 ', '26.8 '], ['2011/5/14', '57.3 ', '14.6 ', '28.1 '], [
'2011/6/28', '61.1 ', '15.6 ', '23.2 '], ['2011/7/23', '70.6 ', '12.3 ',
'17.1 '], ['2011/8/20', '70.0 ', '14.2 ', '15.8 '], ['2011/9/2',
'18.1 ', '19.1 ', '62.7 '], ['2011/10/1', '27.8 ', '17.6 ', '54.6 '], [
'2011/11/5', '34.3 ', '18.6 ', '47.1 '], ['2011/12/3', '40.3 ', '15.1 ',
'44.6 '], ['2012/1/7', '50.6 ', '13.7 ', '35.7 '], ['2012/1/13',
'47.8 ', '16.4 ', '35.8 '], ['2012/2/18', '55.2 ', '15.8 ', '29.0 '], [
'2012/3/19', '50.2 ', '18.2 ', '31.6 '], ['2012/4/28', '60.0 ', '13.6 ',
'26.4 '], ['2012/5/26', '58.1 ', '13.9 ', '28.0 '], ['2012/6/4',
'50.0 ', '18.0 ', '32.0 '], ['2012/6/26', '54.4 ', '15.8 ', '29.9 '], [
'2012/7/14', '59.9 ', '11.9 ', '28.2 '], ['2012/8/11', '59.0 ', '13.1 ',
'27.9 '], ['2012/9/1', '59.4 ', '14.3 ', '26.3 '], ['2012/10/1',
'55.3 ', '15.5 ', '29.2 '], ['2012/11/3', '66.0 ', '16.2 ', '17.7 '], [
'2012/12/26', '21.8 ', '16.2 ', '62.0 '], ['2013/1/26', '22.1 ',
'11.2 ', '66.7 '], ['2013/2/23', '16.2 ', '11.1 ', '72.7 '], [
'2013/3/23', '16.7 ', '12.2 ', '71.1 '], ['2013/3/30', '20.8 ', '7.2 ',
'72.0 '], ['2013/4/20', '16.0 ', '11.9 ', '72.1 '], ['2013/5/18',
'16.2 ', '12.9 ', '70.9 '], ['2013/6/1', '16.3 ', '15.7 ', '68.0 '], [
'2013/6/8', '20.4 ', '8.4 ', '71.2 '], ['2013/7/22', '31.7 ', '12.1 ',
'56.2 '], ['2013/8/24', '25.6 ', '16.7 ', '57.7 '], ['2013/9/14',
'20.4 ', '17.8 ', '61.8 '], ['2013/9/28', '21.8 ', '7.5 ', '70.7 '], [
'2013/10/1', '24.1 ', '12.6 ', '63.3 '], ['2013/10/26', '27.0 ',
'12.3 ', '60.7 '], ['2013/11/23', '26.2 ', '15.9 ', '57.9 '], [
'2013/12/8', '38.4 ', '14.0 ', '47.6 '], ['2013/12/14', '35.9 ', '7.2 ',
'56.9 '], ['2013/12/22', '33.0 ', '12.8 ', '54.2 '], ['2013/12/28',
'32.6 ', '12.2 ', '55.2 '], ['2014/1/25', '31.0 ', '13.1 ', '55.9 '], [
'2014/2/22', '29.7 ', '16.4 ', '53.9 '], ['2014/4/11', '26.7 ', '13.5 ',
'59.8 '], ['2014/5/17', '32.5 ', '12.8 ', '54.7 '], ['2014/6/21',
'33.0 ', '14.9 ', '52.1 '], ['2014/7/1', '40.6 ', '11.6 ', '47.8 ']]
gos = [['野田', '2011/09/02', '2012/12/26'], ['菅', '2010/06/08', '2011/09/02'
], ['鳩山', '2009/09/16', '2010/06/08'], ['麻生', '2008/09/24',
'2009/09/16'], ['福田', '2007/09/26', '2008/09/24'], ['安倍', '2007/04/01',
'2007/09/26']]
class initDATA(webapp.RequestHandler):
"""
classdocs
"""
def get(self):
user = users.get_current_user()
if user == None:
self.redirect(users.create_login_url(self.request.uri))
return
for ht in hts:
htdate = datetime.date(datetime.strptime(ht[0], '%Y/%m/%d'))
obj1 = HistoricalTable(date=htdate, title=ht[1], url=ht[2])
obj1.save()
for pr in prs:
prdate = datetime.date(datetime.strptime(pr[0], '%Y/%m/%d'))
obj2 = PollRating(date=prdate, approval_rate=float(pr[3]),
unknown_rate=float(pr[2]), disapproval_rate=float(pr[1]))
obj2.save()
for pgo in gos:
gosdate = datetime.date(datetime.strptime(pgo[1], '%Y/%m/%d'))
try:
goedate = datetime.date(datetime.strptime(pgo[2], '%Y/%m/%d'))
except:
goedate = None
obj3 = Government(name=pgo[0], begin=gosdate, end=goedate)
obj3.save()
class clearDATA(webapp.RequestHandler):
def get(self):
user = users.get_current_user()
if user == None:
self.redirect(users.create_login_url(self.request.uri))
return
for ht in HistoricalTable.all():
ht.delete()
for pr in PollRating.all():
pr.delete()
for pgo in Government.all():
pgo.delete()
<|reserved_special_token_1|>
# -*- coding: utf-8 -*-
'''
Created on 2014/07/24
@author: seigo
'''
from google.appengine.api import users
from google.appengine.ext import webapp
from MyModel import HistoricalTable, PollRating, Government
from datetime import datetime
hts = [["2014/7/1","集団的自衛権行使容認の閣議決定","http://www.47news.jp/47topics/e/254919.php"],["2014/3/18","ロシア、クリミアを編入","http://www.47news.jp/CN/201403/CN2014031801002413.html"],["2014/2/9","舛添氏が圧勝、東京都知事選","http://www.47news.jp/CN/201402/CN2014020901001630.html"],["2014/1/7","国家安全保障局を設置","http://www.47news.jp/CN/201401/CN2014010701001086.html"],["2013/12/26","安倍首相が靖国神社参拝","http://www.47news.jp/CN/201312/CN2013122601000987.html"],["2013/12/6","特定秘密保護法が成立","http://www.47news.jp/CN/201312/CN2013120601002724.html"],["2013/11/3","東北楽天がプロ野球日本一","http://www.47news.jp/CN/201311/CN2013110301002118.html"],["2013/10/1","消費税率引き上げ決定、4月8%","http://www.47news.jp/CN/201310/CN2013100101002292.html"],["2013/9/8","2020年東京五輪開催決定","http://www.47news.jp/CN/201309/CN2013090401001495.html"],["2013/7/21","参院選で自民圧勝、ねじれ解消","http://www.47news.jp/CN/201307/CN2013072101001638.html"],["2013/3/15","TPP交渉に参加表明","http://www.47news.jp/CN/201303/CN2013031501001566.html"],["2013/2/12","北朝鮮が3度目の核実験","http://www.47news.jp/CN/201302/CN2013021201001987.html"],["2013/1/16","アルジェリア人質事件発生","http://www.47news.jp/CN/201301/CN2013011601001649.html"],["2012/12/26","第2次安倍内閣発足","http://www.47news.jp/CN/201212/CN2012122601001577.html"],["2012/12/6","自公が政権奪還、衆院選","http://www.47news.jp/CN/201212/CN2012121601001041.html"],["2012/11/15","習近平新指導部発足、中国","http://www.47news.jp/CN/201211/CN2012111501001203.html"],["2012/11/6","オバマ米大統領が再選","http://www.47news.jp/CN/201211/CN2012110701000867.html"],["2012/10/1","新型輸送機オスプレイを沖縄配備","http://www.47news.jp/CN/201210/CN2012100101001335.html"],["2012/9/11","尖閣諸島の魚釣島など3島国有化","http://www.47news.jp/CN/201209/CN2012091101001254.html"],["2012/8/10","消費税増税法が成立、10%へ","http://www.47news.jp/CN/201208/CN2012081001002702.html"],["2012/6/27","東京電力を国有化、公的資金注入","http://www.47news.jp/CN/201206/CN2012062701001601.html"],["2011/12/19","北朝鮮の金正日総書記が死去発表","http://www.47news.jp/CN/201112/CN2011121901001386.html"],["2011/11/27","大阪ダブル選で「維新の会」勝利","http://www.47news.jp/CN/201111/CN2011112701001230.html"],["2011/10/20","リビアのカダフィ大佐が死亡","http://www.47news.jp/CN/201110/CN2011102001000912.html"],["2011/10/5","米アップル創業者ジョブズ氏死去","http://www.47news.jp/CN/201110/CN2011100601000102.html"],["2011/9/2","野田内閣が発足","http://www.47news.jp/CN/201109/CN2011090201000656.html"],["2011/8/19","円が戦後最高値更新、75円95銭","http://www.47news.jp/CN/201108/CN2011081901001116.html"],["2011/7/17","なでしこジャパン女子W杯初優勝","http://www.47news.jp/CN/201107/CN2011071801000025.html"],["2011/5/6","首相、浜岡原発停止要請","http://www.47news.jp/CN/201105/CN2011050601000847.html"],["2011/3/11","東日本大震災","http://www.47news.jp/CN/201103/CN2011031101000455.html"],["2011/2/22","NZ地震、日本人28人も死亡","http://www.47news.jp/CN/201104/CN2011040401001017.html"],["2011/1/31","民主党小沢一郎元代表を強制起訴","http://www.47news.jp/CN/201101/CN2011013101000352.html"],["2010/11/23","北朝鮮が韓国・延坪島砲撃","http://www.47news.jp/CN/201011/CN2010112301000213.html"],["2010/10/6","ノーベル化学賞に根岸、鈴木両氏","http://www.47news.jp/CN/201010/CN2010100601000811.html"],["2010/9/15","政府が為替介入、6年半ぶり","http://www.47news.jp/CN/201009/CN2010091501000138.html"],["2010/9/7","尖閣で中国漁船が巡視船に衝突","http://www.47news.jp/CN/201009/CN2010090701000382.html"],["2010/7/11","参院選で民主党大敗、ねじれ国会","http://www.47news.jp/CN/201007/CN2010071101000032.html"],["2010/6/8","鳩山首相退陣、菅内閣発足","http://www.47news.jp/CN/201006/CN2010060801000756.html"],["2010/5/28","普天間移設で日米合意","http://www.47news.jp/CN/201005/CN2010052801000165.html"],["2010/4/20","宮崎県で口蹄疫、被害拡大","http://www.47news.jp/CN/201004/CN2010042001000207.html"],["2009/11/20","デフレ宣言、3年5カ月ぶり","http://www.47news.jp/CN/200911/CN2009112001000267.html"],["2009/10/2","2016年五輪はリオ、東京落選","http://www.47news.jp/CN/200910/CN2009100201000542.html"],["2009/9/16","鳩山内閣発足","http://www.47news.jp/CN/200909/CN2009091601000915.html"],["2009/8/30","民主党圧勝で政権交代、衆院選","http://www.47news.jp/CN/200908/CN2009083001000015.html"],["2009/8/3","全国初の裁判員裁判、東京地裁","http://www.47news.jp/CN/200908/CN2009080301000461.html"],["2009/6/25","歌手M・ジャクソンさん急死","http://www.47news.jp/CN/200906/CN2009062601000067.html"],["2009/5/25","北朝鮮が2回目の核実験","http://www.47news.jp/CN/200905/CN2009052501000261.html"],["2009/3/23","WBCで「侍ジャパン」が連覇","http://www.47news.jp/CN/200903/CN2009032401000025.html"],["2009/1/20","米、オバマ新政権が発足","http://www.47news.jp/CN/200901/CN2009012001000945.html"],["2008/10/31","田母神俊雄航空幕僚長を更迭","http://www.47news.jp/CN/200810/CN2008103101000632.html"],["2008/9/24","麻生内閣発足","http://www.47news.jp/CN/200809/CN2008092401000025.html"],["2008/9/15","リーマン・ショック","http://www.47news.jp/CN/200809/CN2008091501000215.html"],["2008/9/1","福田首相、退陣表明","http://www.47news.jp/CN/200809/CN2008090101000736.html"],["2008/7/7","北海道・洞爺湖サミット~9日","http://www.47news.jp/CN/200807/CN2008070901000704.html"],["2008/6/11","福田首相の問責決議が可決","http://www.47news.jp/CN/200806/CN2008061101000609.html"],["2008/5/12","中国・四川大地震","http://www.47news.jp/CN/200805/CN2008051201000871.html"],["2008/4/9","日銀総裁に白川副総裁が昇格","http://www.47news.jp/CN/200804/CN2008040901000924.html"],["2008/2/19","海自イージス艦が漁船と衝突","http://www.47news.jp/CN/200802/CN2008021901000329.html"],["2008/1/27","大阪府知事選で橋下徹氏初当選","http://www.47news.jp/CN/200801/CN2008012801000076.html"],["2007/11/28","防衛装備疑惑で前防衛次官を逮捕","http://www.47news.jp/CN/200711/CN2007112801000463.html"],["2007/11/2","テロ特措法期限切れ海自撤収命令","http://www.47news.jp/CN/200710/CN2007102901000620.html"],["2007/9/12","安倍首相が退陣。後任に福田氏","http://www.47news.jp/CN/200709/CN2007091201000426.html"],["2007/7/29","参院選で自民党が歴史的惨敗","http://www.47news.jp/CN/200707/CN2007072901000697.html"],["2007/5/28","松岡農相が自殺","http://www.47news.jp/CN/200705/CN2007052801000693.html"],["2007/5/14","改憲手続き定めた国民投票法成立","http://www.47news.jp/CN/200705/CN2007051401000231.html"]]
prs = [["2007/4/16","38.3 ","17.5 ","44.2 "],["2007/5/12","38.2 ","14.2 ","47.6 "],["2007/6/1","48.7 ","15.5 ","35.8 "],["2007/7/30","59.0 ","12.0 ","29.0 "],["2007/8/27","45.5 ","14.0 ","40.5 "],["2007/9/13","46.6 ","7.9 ","45.5 "],["2007/9/25","25.6 ","16.6 ","57.8 "],["2007/10/27","29.6 ","20.2 ","50.2 "],["2007/11/5","36.6 ","16.4 ","47.0 "],["2007/12/15","47.6 ","17.1 ","35.3 "],["2008/1/11","42.8 ","15.8 ","41.4 "],["2008/2/9","44.6 ","19.9 ","35.5 "],["2008/3/15","50.6 ","16.0 ","33.4 "],["2008/4/4","59.6 ","13.8 ","26.6 "],["2008/5/1","66.6 ","13.6 ","19.8 "],["2008/6/12","60.2 ","14.8 ","25.0 "],["2008/7/11","53.5 ","19.7 ","26.8 "],["2008/8/1","48.2 ","20.4 ","31.5 "],["2008/9/2","28.0 ","4.1 ","67.9 "],["2008/9/24","32.9 ","18.5 ","48.6 "],["2008/10/18","39.0 ","18.5 ","42.5 "],["2008/11/8","42.1 ","16.9 ","40.9 "],["2008/12/6","61.4 ","13.2 ","25.4 "],["2009/1/10","70.2 ","10.6 ","19.2 "],["2009/2/7","70.9 ","11.0 ","18.1 "],["2009/2/17","76.6 ","10.0 ","13.4 "],["2009/3/7","70.8 ","13.2 ","16.0 "],["2009/3/25","63.4 ","12.8 ","23.7 "],["2009/4/28","56.2 ","14.2 ","29.6 "],["2009/5/11","55.1 ","16.9 ","28.0 "],["2009/5/16","60.2 ","13.5 ","26.2 "],["2009/6/13","70.5 ","12.0 ","17.5 "],["2009/7/3","60.9 ","15.7 ","23.4 "],["2009/9/16","13.1 ","14.9 ","72.0 "],["2009/10/31","22.9 ","15.3 ","61.8 "],["2009/11/28","25.1 ","11.2 ","63.6 "],["2009/12/25","38.1 ","14.7 ","47.1 "],["2010/1/10","33.2 ","16.0 ","50.8 "],["2010/1/17","44.1 ","14.4 ","41.5 "],["2010/2/5","45.1 ","13.5 ","41.4 "],["2010/3/6","48.9 ","14.8 ","36.4 "],["2010/4/3","53.3 ","13.7 ","33.0 "],["2010/4/28","64.4 ","14.9 ","20.7 "],["2010/5/29","73.1 ","7.7 ","19.1 "],["2010/6/4","37.2 ","5.2 ","57.7 "],["2010/7/12","52.2 ","11.5 ","36.2 "],["2010/8/7","44.8 ","16.5 ","38.7 "],["2010/8/27","36.2 ","15.7 ","48.1 "],["2010/9/9","31.5 ","13.8 ","54.7 "],["2010/9/17","21.2 ","14.3 ","64.5 "],["2010/10/5","36.6 ","15.8 ","47.6 "],["2010/11/6","48.6 ","18.7 ","32.7 "],["2010/11/23","61.9 ","14.5 ","23.6 "],["2010/12/25","67.0 ","9.4 ","23.7 "],["2011/1/14","53.9 ","13.9 ","32.1 "],["2011/2/11","63.3 ","16.7 ","19.9 "],["2011/3/26","55.6 ","16.1 ","28.3 "],["2011/4/29","58.6 ","14.5 ","26.8 "],["2011/5/14","57.3 ","14.6 ","28.1 "],["2011/6/28","61.1 ","15.6 ","23.2 "],["2011/7/23","70.6 ","12.3 ","17.1 "],["2011/8/20","70.0 ","14.2 ","15.8 "],["2011/9/2","18.1 ","19.1 ","62.7 "],["2011/10/1","27.8 ","17.6 ","54.6 "],["2011/11/5","34.3 ","18.6 ","47.1 "],["2011/12/3","40.3 ","15.1 ","44.6 "],["2012/1/7","50.6 ","13.7 ","35.7 "],["2012/1/13","47.8 ","16.4 ","35.8 "],["2012/2/18","55.2 ","15.8 ","29.0 "],["2012/3/19","50.2 ","18.2 ","31.6 "],["2012/4/28","60.0 ","13.6 ","26.4 "],["2012/5/26","58.1 ","13.9 ","28.0 "],["2012/6/4","50.0 ","18.0 ","32.0 "],["2012/6/26","54.4 ","15.8 ","29.9 "],["2012/7/14","59.9 ","11.9 ","28.2 "],["2012/8/11","59.0 ","13.1 ","27.9 "],["2012/9/1","59.4 ","14.3 ","26.3 "],["2012/10/1","55.3 ","15.5 ","29.2 "],["2012/11/3","66.0 ","16.2 ","17.7 "],["2012/12/26","21.8 ","16.2 ","62.0 "],["2013/1/26","22.1 ","11.2 ","66.7 "],["2013/2/23","16.2 ","11.1 ","72.7 "],["2013/3/23","16.7 ","12.2 ","71.1 "],["2013/3/30","20.8 ","7.2 ","72.0 "],["2013/4/20","16.0 ","11.9 ","72.1 "],["2013/5/18","16.2 ","12.9 ","70.9 "],["2013/6/1","16.3 ","15.7 ","68.0 "],["2013/6/8","20.4 ","8.4 ","71.2 "],["2013/7/22","31.7 ","12.1 ","56.2 "],["2013/8/24","25.6 ","16.7 ","57.7 "],["2013/9/14","20.4 ","17.8 ","61.8 "],["2013/9/28","21.8 ","7.5 ","70.7 "],["2013/10/1","24.1 ","12.6 ","63.3 "],["2013/10/26","27.0 ","12.3 ","60.7 "],["2013/11/23","26.2 ","15.9 ","57.9 "],["2013/12/8","38.4 ","14.0 ","47.6 "],["2013/12/14","35.9 ","7.2 ","56.9 "],["2013/12/22","33.0 ","12.8 ","54.2 "],["2013/12/28","32.6 ","12.2 ","55.2 "],["2014/1/25","31.0 ","13.1 ","55.9 "],["2014/2/22","29.7 ","16.4 ","53.9 "],["2014/4/11","26.7 ","13.5 ","59.8 "],["2014/5/17","32.5 ","12.8 ","54.7 "],["2014/6/21","33.0 ","14.9 ","52.1 "],["2014/7/1","40.6 ","11.6 ","47.8 "]]
gos = [["野田","2011/09/02","2012/12/26"],["菅","2010/06/08","2011/09/02"],["鳩山","2009/09/16","2010/06/08"],["麻生","2008/09/24","2009/09/16"],["福田","2007/09/26","2008/09/24"],["安倍","2007/04/01","2007/09/26"]]
class initDATA(webapp.RequestHandler):
'''
classdocs
'''
def get(self):
user = users.get_current_user()
if user == None:
self.redirect(users.create_login_url(self.request.uri))
return
for ht in hts:
htdate = datetime.date(datetime.strptime(ht[0], '%Y/%m/%d'))
obj1 = HistoricalTable(date=htdate,title=ht[1],url=ht[2])
obj1.save()
for pr in prs:
prdate = datetime.date(datetime.strptime(pr[0], '%Y/%m/%d'))
obj2 = PollRating(date=prdate,approval_rate=float(pr[3]),unknown_rate=float(pr[2]),disapproval_rate=float(pr[1]))
obj2.save()
for pgo in gos:
gosdate = datetime.date(datetime.strptime(pgo[1], '%Y/%m/%d'))
try:
goedate = datetime.date(datetime.strptime(pgo[2], '%Y/%m/%d'))
except:
goedate = None
obj3 = Government(name=pgo[0], begin=gosdate, end=goedate)
obj3.save()
class clearDATA(webapp.RequestHandler):
def get(self):
user = users.get_current_user()
if user == None:
self.redirect(users.create_login_url(self.request.uri))
return
for ht in HistoricalTable.all():
ht.delete()
for pr in PollRating.all():
pr.delete()
for pgo in Government.all():
pgo.delete()
|
flexible
|
{
"blob_id": "b8957acb71d435a93b4397a24d3b5cf4b2a817f8",
"index": 2602,
"step-1": "<mask token>\n\n\nclass initDATA(webapp.RequestHandler):\n <mask token>\n\n def get(self):\n user = users.get_current_user()\n if user == None:\n self.redirect(users.create_login_url(self.request.uri))\n return\n for ht in hts:\n htdate = datetime.date(datetime.strptime(ht[0], '%Y/%m/%d'))\n obj1 = HistoricalTable(date=htdate, title=ht[1], url=ht[2])\n obj1.save()\n for pr in prs:\n prdate = datetime.date(datetime.strptime(pr[0], '%Y/%m/%d'))\n obj2 = PollRating(date=prdate, approval_rate=float(pr[3]),\n unknown_rate=float(pr[2]), disapproval_rate=float(pr[1]))\n obj2.save()\n for pgo in gos:\n gosdate = datetime.date(datetime.strptime(pgo[1], '%Y/%m/%d'))\n try:\n goedate = datetime.date(datetime.strptime(pgo[2], '%Y/%m/%d'))\n except:\n goedate = None\n obj3 = Government(name=pgo[0], begin=gosdate, end=goedate)\n obj3.save()\n\n\nclass clearDATA(webapp.RequestHandler):\n\n def get(self):\n user = users.get_current_user()\n if user == None:\n self.redirect(users.create_login_url(self.request.uri))\n return\n for ht in HistoricalTable.all():\n ht.delete()\n for pr in PollRating.all():\n pr.delete()\n for pgo in Government.all():\n pgo.delete()\n",
"step-2": "<mask token>\n\n\nclass initDATA(webapp.RequestHandler):\n \"\"\"\n classdocs\n \"\"\"\n\n def get(self):\n user = users.get_current_user()\n if user == None:\n self.redirect(users.create_login_url(self.request.uri))\n return\n for ht in hts:\n htdate = datetime.date(datetime.strptime(ht[0], '%Y/%m/%d'))\n obj1 = HistoricalTable(date=htdate, title=ht[1], url=ht[2])\n obj1.save()\n for pr in prs:\n prdate = datetime.date(datetime.strptime(pr[0], '%Y/%m/%d'))\n obj2 = PollRating(date=prdate, approval_rate=float(pr[3]),\n unknown_rate=float(pr[2]), disapproval_rate=float(pr[1]))\n obj2.save()\n for pgo in gos:\n gosdate = datetime.date(datetime.strptime(pgo[1], '%Y/%m/%d'))\n try:\n goedate = datetime.date(datetime.strptime(pgo[2], '%Y/%m/%d'))\n except:\n goedate = None\n obj3 = Government(name=pgo[0], begin=gosdate, end=goedate)\n obj3.save()\n\n\nclass clearDATA(webapp.RequestHandler):\n\n def get(self):\n user = users.get_current_user()\n if user == None:\n self.redirect(users.create_login_url(self.request.uri))\n return\n for ht in HistoricalTable.all():\n ht.delete()\n for pr in PollRating.all():\n pr.delete()\n for pgo in Government.all():\n pgo.delete()\n",
"step-3": "<mask token>\nhts = [['2014/7/1', '集団的自衛権行使容認の閣議決定',\n 'http://www.47news.jp/47topics/e/254919.php'], ['2014/3/18',\n 'ロシア、クリミアを編入', 'http://www.47news.jp/CN/201403/CN2014031801002413.html'\n ], ['2014/2/9', '舛添氏が圧勝、東京都知事選',\n 'http://www.47news.jp/CN/201402/CN2014020901001630.html'], ['2014/1/7',\n '国家安全保障局を設置', 'http://www.47news.jp/CN/201401/CN2014010701001086.html'],\n ['2013/12/26', '安倍首相が靖国神社参拝',\n 'http://www.47news.jp/CN/201312/CN2013122601000987.html'], ['2013/12/6',\n '特定秘密保護法が成立', 'http://www.47news.jp/CN/201312/CN2013120601002724.html'],\n ['2013/11/3', '東北楽天がプロ野球日本一',\n 'http://www.47news.jp/CN/201311/CN2013110301002118.html'], ['2013/10/1',\n '消費税率引き上げ決定、4月8%',\n 'http://www.47news.jp/CN/201310/CN2013100101002292.html'], ['2013/9/8',\n '2020年東京五輪開催決定',\n 'http://www.47news.jp/CN/201309/CN2013090401001495.html'], ['2013/7/21',\n '参院選で自民圧勝、ねじれ解消',\n 'http://www.47news.jp/CN/201307/CN2013072101001638.html'], ['2013/3/15',\n 'TPP交渉に参加表明', 'http://www.47news.jp/CN/201303/CN2013031501001566.html'],\n ['2013/2/12', '北朝鮮が3度目の核実験',\n 'http://www.47news.jp/CN/201302/CN2013021201001987.html'], ['2013/1/16',\n 'アルジェリア人質事件発生',\n 'http://www.47news.jp/CN/201301/CN2013011601001649.html'], [\n '2012/12/26', '第2次安倍内閣発足',\n 'http://www.47news.jp/CN/201212/CN2012122601001577.html'], ['2012/12/6',\n '自公が政権奪還、衆院選', 'http://www.47news.jp/CN/201212/CN2012121601001041.html'\n ], ['2012/11/15', '習近平新指導部発足、中国',\n 'http://www.47news.jp/CN/201211/CN2012111501001203.html'], ['2012/11/6',\n 'オバマ米大統領が再選', 'http://www.47news.jp/CN/201211/CN2012110701000867.html'],\n ['2012/10/1', '新型輸送機オスプレイを沖縄配備',\n 'http://www.47news.jp/CN/201210/CN2012100101001335.html'], ['2012/9/11',\n '尖閣諸島の魚釣島など3島国有化',\n 'http://www.47news.jp/CN/201209/CN2012091101001254.html'], ['2012/8/10',\n '消費税増税法が成立、10%へ',\n 'http://www.47news.jp/CN/201208/CN2012081001002702.html'], ['2012/6/27',\n '東京電力を国有化、公的資金注入',\n 'http://www.47news.jp/CN/201206/CN2012062701001601.html'], [\n '2011/12/19', '北朝鮮の金正日総書記が死去発表',\n 'http://www.47news.jp/CN/201112/CN2011121901001386.html'], [\n '2011/11/27', '大阪ダブル選で「維新の会」勝利',\n 'http://www.47news.jp/CN/201111/CN2011112701001230.html'], [\n '2011/10/20', 'リビアのカダフィ大佐が死亡',\n 'http://www.47news.jp/CN/201110/CN2011102001000912.html'], ['2011/10/5',\n '米アップル創業者ジョブズ氏死去',\n 'http://www.47news.jp/CN/201110/CN2011100601000102.html'], ['2011/9/2',\n '野田内閣が発足', 'http://www.47news.jp/CN/201109/CN2011090201000656.html'], [\n '2011/8/19', '円が戦後最高値更新、75円95銭',\n 'http://www.47news.jp/CN/201108/CN2011081901001116.html'], ['2011/7/17',\n 'なでしこジャパン女子W杯初優勝',\n 'http://www.47news.jp/CN/201107/CN2011071801000025.html'], ['2011/5/6',\n '首相、浜岡原発停止要請', 'http://www.47news.jp/CN/201105/CN2011050601000847.html'\n ], ['2011/3/11', '東日本大震災',\n 'http://www.47news.jp/CN/201103/CN2011031101000455.html'], ['2011/2/22',\n 'NZ地震、日本人28人も死亡',\n 'http://www.47news.jp/CN/201104/CN2011040401001017.html'], ['2011/1/31',\n '民主党小沢一郎元代表を強制起訴',\n 'http://www.47news.jp/CN/201101/CN2011013101000352.html'], [\n '2010/11/23', '北朝鮮が韓国・延坪島砲撃',\n 'http://www.47news.jp/CN/201011/CN2010112301000213.html'], ['2010/10/6',\n 'ノーベル化学賞に根岸、鈴木両氏',\n 'http://www.47news.jp/CN/201010/CN2010100601000811.html'], ['2010/9/15',\n '政府が為替介入、6年半ぶり',\n 'http://www.47news.jp/CN/201009/CN2010091501000138.html'], ['2010/9/7',\n '尖閣で中国漁船が巡視船に衝突',\n 'http://www.47news.jp/CN/201009/CN2010090701000382.html'], ['2010/7/11',\n '参院選で民主党大敗、ねじれ国会',\n 'http://www.47news.jp/CN/201007/CN2010071101000032.html'], ['2010/6/8',\n '鳩山首相退陣、菅内閣発足',\n 'http://www.47news.jp/CN/201006/CN2010060801000756.html'], ['2010/5/28',\n '普天間移設で日米合意', 'http://www.47news.jp/CN/201005/CN2010052801000165.html'],\n ['2010/4/20', '宮崎県で口蹄疫、被害拡大',\n 'http://www.47news.jp/CN/201004/CN2010042001000207.html'], [\n '2009/11/20', 'デフレ宣言、3年5カ月ぶり',\n 'http://www.47news.jp/CN/200911/CN2009112001000267.html'], ['2009/10/2',\n '2016年五輪はリオ、東京落選',\n 'http://www.47news.jp/CN/200910/CN2009100201000542.html'], ['2009/9/16',\n '鳩山内閣発足', 'http://www.47news.jp/CN/200909/CN2009091601000915.html'], [\n '2009/8/30', '民主党圧勝で政権交代、衆院選',\n 'http://www.47news.jp/CN/200908/CN2009083001000015.html'], ['2009/8/3',\n '全国初の裁判員裁判、東京地裁',\n 'http://www.47news.jp/CN/200908/CN2009080301000461.html'], ['2009/6/25',\n '歌手M・ジャクソンさん急死',\n 'http://www.47news.jp/CN/200906/CN2009062601000067.html'], ['2009/5/25',\n '北朝鮮が2回目の核実験', 'http://www.47news.jp/CN/200905/CN2009052501000261.html'\n ], ['2009/3/23', 'WBCで「侍ジャパン」が連覇',\n 'http://www.47news.jp/CN/200903/CN2009032401000025.html'], ['2009/1/20',\n '米、オバマ新政権が発足', 'http://www.47news.jp/CN/200901/CN2009012001000945.html'\n ], ['2008/10/31', '田母神俊雄航空幕僚長を更迭',\n 'http://www.47news.jp/CN/200810/CN2008103101000632.html'], ['2008/9/24',\n '麻生内閣発足', 'http://www.47news.jp/CN/200809/CN2008092401000025.html'], [\n '2008/9/15', 'リーマン・ショック',\n 'http://www.47news.jp/CN/200809/CN2008091501000215.html'], ['2008/9/1',\n '福田首相、退陣表明', 'http://www.47news.jp/CN/200809/CN2008090101000736.html'],\n ['2008/7/7', '北海道・洞爺湖サミット~9日',\n 'http://www.47news.jp/CN/200807/CN2008070901000704.html'], ['2008/6/11',\n '福田首相の問責決議が可決',\n 'http://www.47news.jp/CN/200806/CN2008061101000609.html'], ['2008/5/12',\n '中国・四川大地震', 'http://www.47news.jp/CN/200805/CN2008051201000871.html'],\n ['2008/4/9', '日銀総裁に白川副総裁が昇格',\n 'http://www.47news.jp/CN/200804/CN2008040901000924.html'], ['2008/2/19',\n '海自イージス艦が漁船と衝突',\n 'http://www.47news.jp/CN/200802/CN2008021901000329.html'], ['2008/1/27',\n '大阪府知事選で橋下徹氏初当選',\n 'http://www.47news.jp/CN/200801/CN2008012801000076.html'], [\n '2007/11/28', '防衛装備疑惑で前防衛次官を逮捕',\n 'http://www.47news.jp/CN/200711/CN2007112801000463.html'], ['2007/11/2',\n 'テロ特措法期限切れ海自撤収命令',\n 'http://www.47news.jp/CN/200710/CN2007102901000620.html'], ['2007/9/12',\n '安倍首相が退陣。後任に福田氏',\n 'http://www.47news.jp/CN/200709/CN2007091201000426.html'], ['2007/7/29',\n '参院選で自民党が歴史的惨敗',\n 'http://www.47news.jp/CN/200707/CN2007072901000697.html'], ['2007/5/28',\n '松岡農相が自殺', 'http://www.47news.jp/CN/200705/CN2007052801000693.html'], [\n '2007/5/14', '改憲手続き定めた国民投票法成立',\n 'http://www.47news.jp/CN/200705/CN2007051401000231.html']]\nprs = [['2007/4/16', '38.3 ', '17.5 ', '44.2 '], ['2007/5/12', '38.2 ',\n '14.2 ', '47.6 '], ['2007/6/1', '48.7 ', '15.5 ', '35.8 '], [\n '2007/7/30', '59.0 ', '12.0 ', '29.0 '], ['2007/8/27', '45.5 ', '14.0 ',\n '40.5 '], ['2007/9/13', '46.6 ', '7.9 ', '45.5 '], ['2007/9/25',\n '25.6 ', '16.6 ', '57.8 '], ['2007/10/27', '29.6 ', '20.2 ', '50.2 '],\n ['2007/11/5', '36.6 ', '16.4 ', '47.0 '], ['2007/12/15', '47.6 ',\n '17.1 ', '35.3 '], ['2008/1/11', '42.8 ', '15.8 ', '41.4 '], [\n '2008/2/9', '44.6 ', '19.9 ', '35.5 '], ['2008/3/15', '50.6 ', '16.0 ',\n '33.4 '], ['2008/4/4', '59.6 ', '13.8 ', '26.6 '], ['2008/5/1', '66.6 ',\n '13.6 ', '19.8 '], ['2008/6/12', '60.2 ', '14.8 ', '25.0 '], [\n '2008/7/11', '53.5 ', '19.7 ', '26.8 '], ['2008/8/1', '48.2 ', '20.4 ',\n '31.5 '], ['2008/9/2', '28.0 ', '4.1 ', '67.9 '], ['2008/9/24', '32.9 ',\n '18.5 ', '48.6 '], ['2008/10/18', '39.0 ', '18.5 ', '42.5 '], [\n '2008/11/8', '42.1 ', '16.9 ', '40.9 '], ['2008/12/6', '61.4 ', '13.2 ',\n '25.4 '], ['2009/1/10', '70.2 ', '10.6 ', '19.2 '], ['2009/2/7',\n '70.9 ', '11.0 ', '18.1 '], ['2009/2/17', '76.6 ', '10.0 ', '13.4 '], [\n '2009/3/7', '70.8 ', '13.2 ', '16.0 '], ['2009/3/25', '63.4 ', '12.8 ',\n '23.7 '], ['2009/4/28', '56.2 ', '14.2 ', '29.6 '], ['2009/5/11',\n '55.1 ', '16.9 ', '28.0 '], ['2009/5/16', '60.2 ', '13.5 ', '26.2 '], [\n '2009/6/13', '70.5 ', '12.0 ', '17.5 '], ['2009/7/3', '60.9 ', '15.7 ',\n '23.4 '], ['2009/9/16', '13.1 ', '14.9 ', '72.0 '], ['2009/10/31',\n '22.9 ', '15.3 ', '61.8 '], ['2009/11/28', '25.1 ', '11.2 ', '63.6 '],\n ['2009/12/25', '38.1 ', '14.7 ', '47.1 '], ['2010/1/10', '33.2 ',\n '16.0 ', '50.8 '], ['2010/1/17', '44.1 ', '14.4 ', '41.5 '], [\n '2010/2/5', '45.1 ', '13.5 ', '41.4 '], ['2010/3/6', '48.9 ', '14.8 ',\n '36.4 '], ['2010/4/3', '53.3 ', '13.7 ', '33.0 '], ['2010/4/28',\n '64.4 ', '14.9 ', '20.7 '], ['2010/5/29', '73.1 ', '7.7 ', '19.1 '], [\n '2010/6/4', '37.2 ', '5.2 ', '57.7 '], ['2010/7/12', '52.2 ', '11.5 ',\n '36.2 '], ['2010/8/7', '44.8 ', '16.5 ', '38.7 '], ['2010/8/27',\n '36.2 ', '15.7 ', '48.1 '], ['2010/9/9', '31.5 ', '13.8 ', '54.7 '], [\n '2010/9/17', '21.2 ', '14.3 ', '64.5 '], ['2010/10/5', '36.6 ', '15.8 ',\n '47.6 '], ['2010/11/6', '48.6 ', '18.7 ', '32.7 '], ['2010/11/23',\n '61.9 ', '14.5 ', '23.6 '], ['2010/12/25', '67.0 ', '9.4 ', '23.7 '], [\n '2011/1/14', '53.9 ', '13.9 ', '32.1 '], ['2011/2/11', '63.3 ', '16.7 ',\n '19.9 '], ['2011/3/26', '55.6 ', '16.1 ', '28.3 '], ['2011/4/29',\n '58.6 ', '14.5 ', '26.8 '], ['2011/5/14', '57.3 ', '14.6 ', '28.1 '], [\n '2011/6/28', '61.1 ', '15.6 ', '23.2 '], ['2011/7/23', '70.6 ', '12.3 ',\n '17.1 '], ['2011/8/20', '70.0 ', '14.2 ', '15.8 '], ['2011/9/2',\n '18.1 ', '19.1 ', '62.7 '], ['2011/10/1', '27.8 ', '17.6 ', '54.6 '], [\n '2011/11/5', '34.3 ', '18.6 ', '47.1 '], ['2011/12/3', '40.3 ', '15.1 ',\n '44.6 '], ['2012/1/7', '50.6 ', '13.7 ', '35.7 '], ['2012/1/13',\n '47.8 ', '16.4 ', '35.8 '], ['2012/2/18', '55.2 ', '15.8 ', '29.0 '], [\n '2012/3/19', '50.2 ', '18.2 ', '31.6 '], ['2012/4/28', '60.0 ', '13.6 ',\n '26.4 '], ['2012/5/26', '58.1 ', '13.9 ', '28.0 '], ['2012/6/4',\n '50.0 ', '18.0 ', '32.0 '], ['2012/6/26', '54.4 ', '15.8 ', '29.9 '], [\n '2012/7/14', '59.9 ', '11.9 ', '28.2 '], ['2012/8/11', '59.0 ', '13.1 ',\n '27.9 '], ['2012/9/1', '59.4 ', '14.3 ', '26.3 '], ['2012/10/1',\n '55.3 ', '15.5 ', '29.2 '], ['2012/11/3', '66.0 ', '16.2 ', '17.7 '], [\n '2012/12/26', '21.8 ', '16.2 ', '62.0 '], ['2013/1/26', '22.1 ',\n '11.2 ', '66.7 '], ['2013/2/23', '16.2 ', '11.1 ', '72.7 '], [\n '2013/3/23', '16.7 ', '12.2 ', '71.1 '], ['2013/3/30', '20.8 ', '7.2 ',\n '72.0 '], ['2013/4/20', '16.0 ', '11.9 ', '72.1 '], ['2013/5/18',\n '16.2 ', '12.9 ', '70.9 '], ['2013/6/1', '16.3 ', '15.7 ', '68.0 '], [\n '2013/6/8', '20.4 ', '8.4 ', '71.2 '], ['2013/7/22', '31.7 ', '12.1 ',\n '56.2 '], ['2013/8/24', '25.6 ', '16.7 ', '57.7 '], ['2013/9/14',\n '20.4 ', '17.8 ', '61.8 '], ['2013/9/28', '21.8 ', '7.5 ', '70.7 '], [\n '2013/10/1', '24.1 ', '12.6 ', '63.3 '], ['2013/10/26', '27.0 ',\n '12.3 ', '60.7 '], ['2013/11/23', '26.2 ', '15.9 ', '57.9 '], [\n '2013/12/8', '38.4 ', '14.0 ', '47.6 '], ['2013/12/14', '35.9 ', '7.2 ',\n '56.9 '], ['2013/12/22', '33.0 ', '12.8 ', '54.2 '], ['2013/12/28',\n '32.6 ', '12.2 ', '55.2 '], ['2014/1/25', '31.0 ', '13.1 ', '55.9 '], [\n '2014/2/22', '29.7 ', '16.4 ', '53.9 '], ['2014/4/11', '26.7 ', '13.5 ',\n '59.8 '], ['2014/5/17', '32.5 ', '12.8 ', '54.7 '], ['2014/6/21',\n '33.0 ', '14.9 ', '52.1 '], ['2014/7/1', '40.6 ', '11.6 ', '47.8 ']]\ngos = [['野田', '2011/09/02', '2012/12/26'], ['菅', '2010/06/08', '2011/09/02'\n ], ['鳩山', '2009/09/16', '2010/06/08'], ['麻生', '2008/09/24',\n '2009/09/16'], ['福田', '2007/09/26', '2008/09/24'], ['安倍', '2007/04/01',\n '2007/09/26']]\n\n\nclass initDATA(webapp.RequestHandler):\n \"\"\"\n classdocs\n \"\"\"\n\n def get(self):\n user = users.get_current_user()\n if user == None:\n self.redirect(users.create_login_url(self.request.uri))\n return\n for ht in hts:\n htdate = datetime.date(datetime.strptime(ht[0], '%Y/%m/%d'))\n obj1 = HistoricalTable(date=htdate, title=ht[1], url=ht[2])\n obj1.save()\n for pr in prs:\n prdate = datetime.date(datetime.strptime(pr[0], '%Y/%m/%d'))\n obj2 = PollRating(date=prdate, approval_rate=float(pr[3]),\n unknown_rate=float(pr[2]), disapproval_rate=float(pr[1]))\n obj2.save()\n for pgo in gos:\n gosdate = datetime.date(datetime.strptime(pgo[1], '%Y/%m/%d'))\n try:\n goedate = datetime.date(datetime.strptime(pgo[2], '%Y/%m/%d'))\n except:\n goedate = None\n obj3 = Government(name=pgo[0], begin=gosdate, end=goedate)\n obj3.save()\n\n\nclass clearDATA(webapp.RequestHandler):\n\n def get(self):\n user = users.get_current_user()\n if user == None:\n self.redirect(users.create_login_url(self.request.uri))\n return\n for ht in HistoricalTable.all():\n ht.delete()\n for pr in PollRating.all():\n pr.delete()\n for pgo in Government.all():\n pgo.delete()\n",
"step-4": "<mask token>\nfrom google.appengine.api import users\nfrom google.appengine.ext import webapp\nfrom MyModel import HistoricalTable, PollRating, Government\nfrom datetime import datetime\nhts = [['2014/7/1', '集団的自衛権行使容認の閣議決定',\n 'http://www.47news.jp/47topics/e/254919.php'], ['2014/3/18',\n 'ロシア、クリミアを編入', 'http://www.47news.jp/CN/201403/CN2014031801002413.html'\n ], ['2014/2/9', '舛添氏が圧勝、東京都知事選',\n 'http://www.47news.jp/CN/201402/CN2014020901001630.html'], ['2014/1/7',\n '国家安全保障局を設置', 'http://www.47news.jp/CN/201401/CN2014010701001086.html'],\n ['2013/12/26', '安倍首相が靖国神社参拝',\n 'http://www.47news.jp/CN/201312/CN2013122601000987.html'], ['2013/12/6',\n '特定秘密保護法が成立', 'http://www.47news.jp/CN/201312/CN2013120601002724.html'],\n ['2013/11/3', '東北楽天がプロ野球日本一',\n 'http://www.47news.jp/CN/201311/CN2013110301002118.html'], ['2013/10/1',\n '消費税率引き上げ決定、4月8%',\n 'http://www.47news.jp/CN/201310/CN2013100101002292.html'], ['2013/9/8',\n '2020年東京五輪開催決定',\n 'http://www.47news.jp/CN/201309/CN2013090401001495.html'], ['2013/7/21',\n '参院選で自民圧勝、ねじれ解消',\n 'http://www.47news.jp/CN/201307/CN2013072101001638.html'], ['2013/3/15',\n 'TPP交渉に参加表明', 'http://www.47news.jp/CN/201303/CN2013031501001566.html'],\n ['2013/2/12', '北朝鮮が3度目の核実験',\n 'http://www.47news.jp/CN/201302/CN2013021201001987.html'], ['2013/1/16',\n 'アルジェリア人質事件発生',\n 'http://www.47news.jp/CN/201301/CN2013011601001649.html'], [\n '2012/12/26', '第2次安倍内閣発足',\n 'http://www.47news.jp/CN/201212/CN2012122601001577.html'], ['2012/12/6',\n '自公が政権奪還、衆院選', 'http://www.47news.jp/CN/201212/CN2012121601001041.html'\n ], ['2012/11/15', '習近平新指導部発足、中国',\n 'http://www.47news.jp/CN/201211/CN2012111501001203.html'], ['2012/11/6',\n 'オバマ米大統領が再選', 'http://www.47news.jp/CN/201211/CN2012110701000867.html'],\n ['2012/10/1', '新型輸送機オスプレイを沖縄配備',\n 'http://www.47news.jp/CN/201210/CN2012100101001335.html'], ['2012/9/11',\n '尖閣諸島の魚釣島など3島国有化',\n 'http://www.47news.jp/CN/201209/CN2012091101001254.html'], ['2012/8/10',\n '消費税増税法が成立、10%へ',\n 'http://www.47news.jp/CN/201208/CN2012081001002702.html'], ['2012/6/27',\n '東京電力を国有化、公的資金注入',\n 'http://www.47news.jp/CN/201206/CN2012062701001601.html'], [\n '2011/12/19', '北朝鮮の金正日総書記が死去発表',\n 'http://www.47news.jp/CN/201112/CN2011121901001386.html'], [\n '2011/11/27', '大阪ダブル選で「維新の会」勝利',\n 'http://www.47news.jp/CN/201111/CN2011112701001230.html'], [\n '2011/10/20', 'リビアのカダフィ大佐が死亡',\n 'http://www.47news.jp/CN/201110/CN2011102001000912.html'], ['2011/10/5',\n '米アップル創業者ジョブズ氏死去',\n 'http://www.47news.jp/CN/201110/CN2011100601000102.html'], ['2011/9/2',\n '野田内閣が発足', 'http://www.47news.jp/CN/201109/CN2011090201000656.html'], [\n '2011/8/19', '円が戦後最高値更新、75円95銭',\n 'http://www.47news.jp/CN/201108/CN2011081901001116.html'], ['2011/7/17',\n 'なでしこジャパン女子W杯初優勝',\n 'http://www.47news.jp/CN/201107/CN2011071801000025.html'], ['2011/5/6',\n '首相、浜岡原発停止要請', 'http://www.47news.jp/CN/201105/CN2011050601000847.html'\n ], ['2011/3/11', '東日本大震災',\n 'http://www.47news.jp/CN/201103/CN2011031101000455.html'], ['2011/2/22',\n 'NZ地震、日本人28人も死亡',\n 'http://www.47news.jp/CN/201104/CN2011040401001017.html'], ['2011/1/31',\n '民主党小沢一郎元代表を強制起訴',\n 'http://www.47news.jp/CN/201101/CN2011013101000352.html'], [\n '2010/11/23', '北朝鮮が韓国・延坪島砲撃',\n 'http://www.47news.jp/CN/201011/CN2010112301000213.html'], ['2010/10/6',\n 'ノーベル化学賞に根岸、鈴木両氏',\n 'http://www.47news.jp/CN/201010/CN2010100601000811.html'], ['2010/9/15',\n '政府が為替介入、6年半ぶり',\n 'http://www.47news.jp/CN/201009/CN2010091501000138.html'], ['2010/9/7',\n '尖閣で中国漁船が巡視船に衝突',\n 'http://www.47news.jp/CN/201009/CN2010090701000382.html'], ['2010/7/11',\n '参院選で民主党大敗、ねじれ国会',\n 'http://www.47news.jp/CN/201007/CN2010071101000032.html'], ['2010/6/8',\n '鳩山首相退陣、菅内閣発足',\n 'http://www.47news.jp/CN/201006/CN2010060801000756.html'], ['2010/5/28',\n '普天間移設で日米合意', 'http://www.47news.jp/CN/201005/CN2010052801000165.html'],\n ['2010/4/20', '宮崎県で口蹄疫、被害拡大',\n 'http://www.47news.jp/CN/201004/CN2010042001000207.html'], [\n '2009/11/20', 'デフレ宣言、3年5カ月ぶり',\n 'http://www.47news.jp/CN/200911/CN2009112001000267.html'], ['2009/10/2',\n '2016年五輪はリオ、東京落選',\n 'http://www.47news.jp/CN/200910/CN2009100201000542.html'], ['2009/9/16',\n '鳩山内閣発足', 'http://www.47news.jp/CN/200909/CN2009091601000915.html'], [\n '2009/8/30', '民主党圧勝で政権交代、衆院選',\n 'http://www.47news.jp/CN/200908/CN2009083001000015.html'], ['2009/8/3',\n '全国初の裁判員裁判、東京地裁',\n 'http://www.47news.jp/CN/200908/CN2009080301000461.html'], ['2009/6/25',\n '歌手M・ジャクソンさん急死',\n 'http://www.47news.jp/CN/200906/CN2009062601000067.html'], ['2009/5/25',\n '北朝鮮が2回目の核実験', 'http://www.47news.jp/CN/200905/CN2009052501000261.html'\n ], ['2009/3/23', 'WBCで「侍ジャパン」が連覇',\n 'http://www.47news.jp/CN/200903/CN2009032401000025.html'], ['2009/1/20',\n '米、オバマ新政権が発足', 'http://www.47news.jp/CN/200901/CN2009012001000945.html'\n ], ['2008/10/31', '田母神俊雄航空幕僚長を更迭',\n 'http://www.47news.jp/CN/200810/CN2008103101000632.html'], ['2008/9/24',\n '麻生内閣発足', 'http://www.47news.jp/CN/200809/CN2008092401000025.html'], [\n '2008/9/15', 'リーマン・ショック',\n 'http://www.47news.jp/CN/200809/CN2008091501000215.html'], ['2008/9/1',\n '福田首相、退陣表明', 'http://www.47news.jp/CN/200809/CN2008090101000736.html'],\n ['2008/7/7', '北海道・洞爺湖サミット~9日',\n 'http://www.47news.jp/CN/200807/CN2008070901000704.html'], ['2008/6/11',\n '福田首相の問責決議が可決',\n 'http://www.47news.jp/CN/200806/CN2008061101000609.html'], ['2008/5/12',\n '中国・四川大地震', 'http://www.47news.jp/CN/200805/CN2008051201000871.html'],\n ['2008/4/9', '日銀総裁に白川副総裁が昇格',\n 'http://www.47news.jp/CN/200804/CN2008040901000924.html'], ['2008/2/19',\n '海自イージス艦が漁船と衝突',\n 'http://www.47news.jp/CN/200802/CN2008021901000329.html'], ['2008/1/27',\n '大阪府知事選で橋下徹氏初当選',\n 'http://www.47news.jp/CN/200801/CN2008012801000076.html'], [\n '2007/11/28', '防衛装備疑惑で前防衛次官を逮捕',\n 'http://www.47news.jp/CN/200711/CN2007112801000463.html'], ['2007/11/2',\n 'テロ特措法期限切れ海自撤収命令',\n 'http://www.47news.jp/CN/200710/CN2007102901000620.html'], ['2007/9/12',\n '安倍首相が退陣。後任に福田氏',\n 'http://www.47news.jp/CN/200709/CN2007091201000426.html'], ['2007/7/29',\n '参院選で自民党が歴史的惨敗',\n 'http://www.47news.jp/CN/200707/CN2007072901000697.html'], ['2007/5/28',\n '松岡農相が自殺', 'http://www.47news.jp/CN/200705/CN2007052801000693.html'], [\n '2007/5/14', '改憲手続き定めた国民投票法成立',\n 'http://www.47news.jp/CN/200705/CN2007051401000231.html']]\nprs = [['2007/4/16', '38.3 ', '17.5 ', '44.2 '], ['2007/5/12', '38.2 ',\n '14.2 ', '47.6 '], ['2007/6/1', '48.7 ', '15.5 ', '35.8 '], [\n '2007/7/30', '59.0 ', '12.0 ', '29.0 '], ['2007/8/27', '45.5 ', '14.0 ',\n '40.5 '], ['2007/9/13', '46.6 ', '7.9 ', '45.5 '], ['2007/9/25',\n '25.6 ', '16.6 ', '57.8 '], ['2007/10/27', '29.6 ', '20.2 ', '50.2 '],\n ['2007/11/5', '36.6 ', '16.4 ', '47.0 '], ['2007/12/15', '47.6 ',\n '17.1 ', '35.3 '], ['2008/1/11', '42.8 ', '15.8 ', '41.4 '], [\n '2008/2/9', '44.6 ', '19.9 ', '35.5 '], ['2008/3/15', '50.6 ', '16.0 ',\n '33.4 '], ['2008/4/4', '59.6 ', '13.8 ', '26.6 '], ['2008/5/1', '66.6 ',\n '13.6 ', '19.8 '], ['2008/6/12', '60.2 ', '14.8 ', '25.0 '], [\n '2008/7/11', '53.5 ', '19.7 ', '26.8 '], ['2008/8/1', '48.2 ', '20.4 ',\n '31.5 '], ['2008/9/2', '28.0 ', '4.1 ', '67.9 '], ['2008/9/24', '32.9 ',\n '18.5 ', '48.6 '], ['2008/10/18', '39.0 ', '18.5 ', '42.5 '], [\n '2008/11/8', '42.1 ', '16.9 ', '40.9 '], ['2008/12/6', '61.4 ', '13.2 ',\n '25.4 '], ['2009/1/10', '70.2 ', '10.6 ', '19.2 '], ['2009/2/7',\n '70.9 ', '11.0 ', '18.1 '], ['2009/2/17', '76.6 ', '10.0 ', '13.4 '], [\n '2009/3/7', '70.8 ', '13.2 ', '16.0 '], ['2009/3/25', '63.4 ', '12.8 ',\n '23.7 '], ['2009/4/28', '56.2 ', '14.2 ', '29.6 '], ['2009/5/11',\n '55.1 ', '16.9 ', '28.0 '], ['2009/5/16', '60.2 ', '13.5 ', '26.2 '], [\n '2009/6/13', '70.5 ', '12.0 ', '17.5 '], ['2009/7/3', '60.9 ', '15.7 ',\n '23.4 '], ['2009/9/16', '13.1 ', '14.9 ', '72.0 '], ['2009/10/31',\n '22.9 ', '15.3 ', '61.8 '], ['2009/11/28', '25.1 ', '11.2 ', '63.6 '],\n ['2009/12/25', '38.1 ', '14.7 ', '47.1 '], ['2010/1/10', '33.2 ',\n '16.0 ', '50.8 '], ['2010/1/17', '44.1 ', '14.4 ', '41.5 '], [\n '2010/2/5', '45.1 ', '13.5 ', '41.4 '], ['2010/3/6', '48.9 ', '14.8 ',\n '36.4 '], ['2010/4/3', '53.3 ', '13.7 ', '33.0 '], ['2010/4/28',\n '64.4 ', '14.9 ', '20.7 '], ['2010/5/29', '73.1 ', '7.7 ', '19.1 '], [\n '2010/6/4', '37.2 ', '5.2 ', '57.7 '], ['2010/7/12', '52.2 ', '11.5 ',\n '36.2 '], ['2010/8/7', '44.8 ', '16.5 ', '38.7 '], ['2010/8/27',\n '36.2 ', '15.7 ', '48.1 '], ['2010/9/9', '31.5 ', '13.8 ', '54.7 '], [\n '2010/9/17', '21.2 ', '14.3 ', '64.5 '], ['2010/10/5', '36.6 ', '15.8 ',\n '47.6 '], ['2010/11/6', '48.6 ', '18.7 ', '32.7 '], ['2010/11/23',\n '61.9 ', '14.5 ', '23.6 '], ['2010/12/25', '67.0 ', '9.4 ', '23.7 '], [\n '2011/1/14', '53.9 ', '13.9 ', '32.1 '], ['2011/2/11', '63.3 ', '16.7 ',\n '19.9 '], ['2011/3/26', '55.6 ', '16.1 ', '28.3 '], ['2011/4/29',\n '58.6 ', '14.5 ', '26.8 '], ['2011/5/14', '57.3 ', '14.6 ', '28.1 '], [\n '2011/6/28', '61.1 ', '15.6 ', '23.2 '], ['2011/7/23', '70.6 ', '12.3 ',\n '17.1 '], ['2011/8/20', '70.0 ', '14.2 ', '15.8 '], ['2011/9/2',\n '18.1 ', '19.1 ', '62.7 '], ['2011/10/1', '27.8 ', '17.6 ', '54.6 '], [\n '2011/11/5', '34.3 ', '18.6 ', '47.1 '], ['2011/12/3', '40.3 ', '15.1 ',\n '44.6 '], ['2012/1/7', '50.6 ', '13.7 ', '35.7 '], ['2012/1/13',\n '47.8 ', '16.4 ', '35.8 '], ['2012/2/18', '55.2 ', '15.8 ', '29.0 '], [\n '2012/3/19', '50.2 ', '18.2 ', '31.6 '], ['2012/4/28', '60.0 ', '13.6 ',\n '26.4 '], ['2012/5/26', '58.1 ', '13.9 ', '28.0 '], ['2012/6/4',\n '50.0 ', '18.0 ', '32.0 '], ['2012/6/26', '54.4 ', '15.8 ', '29.9 '], [\n '2012/7/14', '59.9 ', '11.9 ', '28.2 '], ['2012/8/11', '59.0 ', '13.1 ',\n '27.9 '], ['2012/9/1', '59.4 ', '14.3 ', '26.3 '], ['2012/10/1',\n '55.3 ', '15.5 ', '29.2 '], ['2012/11/3', '66.0 ', '16.2 ', '17.7 '], [\n '2012/12/26', '21.8 ', '16.2 ', '62.0 '], ['2013/1/26', '22.1 ',\n '11.2 ', '66.7 '], ['2013/2/23', '16.2 ', '11.1 ', '72.7 '], [\n '2013/3/23', '16.7 ', '12.2 ', '71.1 '], ['2013/3/30', '20.8 ', '7.2 ',\n '72.0 '], ['2013/4/20', '16.0 ', '11.9 ', '72.1 '], ['2013/5/18',\n '16.2 ', '12.9 ', '70.9 '], ['2013/6/1', '16.3 ', '15.7 ', '68.0 '], [\n '2013/6/8', '20.4 ', '8.4 ', '71.2 '], ['2013/7/22', '31.7 ', '12.1 ',\n '56.2 '], ['2013/8/24', '25.6 ', '16.7 ', '57.7 '], ['2013/9/14',\n '20.4 ', '17.8 ', '61.8 '], ['2013/9/28', '21.8 ', '7.5 ', '70.7 '], [\n '2013/10/1', '24.1 ', '12.6 ', '63.3 '], ['2013/10/26', '27.0 ',\n '12.3 ', '60.7 '], ['2013/11/23', '26.2 ', '15.9 ', '57.9 '], [\n '2013/12/8', '38.4 ', '14.0 ', '47.6 '], ['2013/12/14', '35.9 ', '7.2 ',\n '56.9 '], ['2013/12/22', '33.0 ', '12.8 ', '54.2 '], ['2013/12/28',\n '32.6 ', '12.2 ', '55.2 '], ['2014/1/25', '31.0 ', '13.1 ', '55.9 '], [\n '2014/2/22', '29.7 ', '16.4 ', '53.9 '], ['2014/4/11', '26.7 ', '13.5 ',\n '59.8 '], ['2014/5/17', '32.5 ', '12.8 ', '54.7 '], ['2014/6/21',\n '33.0 ', '14.9 ', '52.1 '], ['2014/7/1', '40.6 ', '11.6 ', '47.8 ']]\ngos = [['野田', '2011/09/02', '2012/12/26'], ['菅', '2010/06/08', '2011/09/02'\n ], ['鳩山', '2009/09/16', '2010/06/08'], ['麻生', '2008/09/24',\n '2009/09/16'], ['福田', '2007/09/26', '2008/09/24'], ['安倍', '2007/04/01',\n '2007/09/26']]\n\n\nclass initDATA(webapp.RequestHandler):\n \"\"\"\n classdocs\n \"\"\"\n\n def get(self):\n user = users.get_current_user()\n if user == None:\n self.redirect(users.create_login_url(self.request.uri))\n return\n for ht in hts:\n htdate = datetime.date(datetime.strptime(ht[0], '%Y/%m/%d'))\n obj1 = HistoricalTable(date=htdate, title=ht[1], url=ht[2])\n obj1.save()\n for pr in prs:\n prdate = datetime.date(datetime.strptime(pr[0], '%Y/%m/%d'))\n obj2 = PollRating(date=prdate, approval_rate=float(pr[3]),\n unknown_rate=float(pr[2]), disapproval_rate=float(pr[1]))\n obj2.save()\n for pgo in gos:\n gosdate = datetime.date(datetime.strptime(pgo[1], '%Y/%m/%d'))\n try:\n goedate = datetime.date(datetime.strptime(pgo[2], '%Y/%m/%d'))\n except:\n goedate = None\n obj3 = Government(name=pgo[0], begin=gosdate, end=goedate)\n obj3.save()\n\n\nclass clearDATA(webapp.RequestHandler):\n\n def get(self):\n user = users.get_current_user()\n if user == None:\n self.redirect(users.create_login_url(self.request.uri))\n return\n for ht in HistoricalTable.all():\n ht.delete()\n for pr in PollRating.all():\n pr.delete()\n for pgo in Government.all():\n pgo.delete()\n",
"step-5": "# -*- coding: utf-8 -*-\n'''\nCreated on 2014/07/24\n\n@author: seigo\n'''\n\nfrom google.appengine.api import users\nfrom google.appengine.ext import webapp\nfrom MyModel import HistoricalTable, PollRating, Government\nfrom datetime import datetime\n\nhts = [[\"2014/7/1\",\"集団的自衛権行使容認の閣議決定\",\"http://www.47news.jp/47topics/e/254919.php\"],[\"2014/3/18\",\"ロシア、クリミアを編入\",\"http://www.47news.jp/CN/201403/CN2014031801002413.html\"],[\"2014/2/9\",\"舛添氏が圧勝、東京都知事選\",\"http://www.47news.jp/CN/201402/CN2014020901001630.html\"],[\"2014/1/7\",\"国家安全保障局を設置\",\"http://www.47news.jp/CN/201401/CN2014010701001086.html\"],[\"2013/12/26\",\"安倍首相が靖国神社参拝\",\"http://www.47news.jp/CN/201312/CN2013122601000987.html\"],[\"2013/12/6\",\"特定秘密保護法が成立\",\"http://www.47news.jp/CN/201312/CN2013120601002724.html\"],[\"2013/11/3\",\"東北楽天がプロ野球日本一\",\"http://www.47news.jp/CN/201311/CN2013110301002118.html\"],[\"2013/10/1\",\"消費税率引き上げ決定、4月8%\",\"http://www.47news.jp/CN/201310/CN2013100101002292.html\"],[\"2013/9/8\",\"2020年東京五輪開催決定\",\"http://www.47news.jp/CN/201309/CN2013090401001495.html\"],[\"2013/7/21\",\"参院選で自民圧勝、ねじれ解消\",\"http://www.47news.jp/CN/201307/CN2013072101001638.html\"],[\"2013/3/15\",\"TPP交渉に参加表明\",\"http://www.47news.jp/CN/201303/CN2013031501001566.html\"],[\"2013/2/12\",\"北朝鮮が3度目の核実験\",\"http://www.47news.jp/CN/201302/CN2013021201001987.html\"],[\"2013/1/16\",\"アルジェリア人質事件発生\",\"http://www.47news.jp/CN/201301/CN2013011601001649.html\"],[\"2012/12/26\",\"第2次安倍内閣発足\",\"http://www.47news.jp/CN/201212/CN2012122601001577.html\"],[\"2012/12/6\",\"自公が政権奪還、衆院選\",\"http://www.47news.jp/CN/201212/CN2012121601001041.html\"],[\"2012/11/15\",\"習近平新指導部発足、中国\",\"http://www.47news.jp/CN/201211/CN2012111501001203.html\"],[\"2012/11/6\",\"オバマ米大統領が再選\",\"http://www.47news.jp/CN/201211/CN2012110701000867.html\"],[\"2012/10/1\",\"新型輸送機オスプレイを沖縄配備\",\"http://www.47news.jp/CN/201210/CN2012100101001335.html\"],[\"2012/9/11\",\"尖閣諸島の魚釣島など3島国有化\",\"http://www.47news.jp/CN/201209/CN2012091101001254.html\"],[\"2012/8/10\",\"消費税増税法が成立、10%へ\",\"http://www.47news.jp/CN/201208/CN2012081001002702.html\"],[\"2012/6/27\",\"東京電力を国有化、公的資金注入\",\"http://www.47news.jp/CN/201206/CN2012062701001601.html\"],[\"2011/12/19\",\"北朝鮮の金正日総書記が死去発表\",\"http://www.47news.jp/CN/201112/CN2011121901001386.html\"],[\"2011/11/27\",\"大阪ダブル選で「維新の会」勝利\",\"http://www.47news.jp/CN/201111/CN2011112701001230.html\"],[\"2011/10/20\",\"リビアのカダフィ大佐が死亡\",\"http://www.47news.jp/CN/201110/CN2011102001000912.html\"],[\"2011/10/5\",\"米アップル創業者ジョブズ氏死去\",\"http://www.47news.jp/CN/201110/CN2011100601000102.html\"],[\"2011/9/2\",\"野田内閣が発足\",\"http://www.47news.jp/CN/201109/CN2011090201000656.html\"],[\"2011/8/19\",\"円が戦後最高値更新、75円95銭\",\"http://www.47news.jp/CN/201108/CN2011081901001116.html\"],[\"2011/7/17\",\"なでしこジャパン女子W杯初優勝\",\"http://www.47news.jp/CN/201107/CN2011071801000025.html\"],[\"2011/5/6\",\"首相、浜岡原発停止要請\",\"http://www.47news.jp/CN/201105/CN2011050601000847.html\"],[\"2011/3/11\",\"東日本大震災\",\"http://www.47news.jp/CN/201103/CN2011031101000455.html\"],[\"2011/2/22\",\"NZ地震、日本人28人も死亡\",\"http://www.47news.jp/CN/201104/CN2011040401001017.html\"],[\"2011/1/31\",\"民主党小沢一郎元代表を強制起訴\",\"http://www.47news.jp/CN/201101/CN2011013101000352.html\"],[\"2010/11/23\",\"北朝鮮が韓国・延坪島砲撃\",\"http://www.47news.jp/CN/201011/CN2010112301000213.html\"],[\"2010/10/6\",\"ノーベル化学賞に根岸、鈴木両氏\",\"http://www.47news.jp/CN/201010/CN2010100601000811.html\"],[\"2010/9/15\",\"政府が為替介入、6年半ぶり\",\"http://www.47news.jp/CN/201009/CN2010091501000138.html\"],[\"2010/9/7\",\"尖閣で中国漁船が巡視船に衝突\",\"http://www.47news.jp/CN/201009/CN2010090701000382.html\"],[\"2010/7/11\",\"参院選で民主党大敗、ねじれ国会\",\"http://www.47news.jp/CN/201007/CN2010071101000032.html\"],[\"2010/6/8\",\"鳩山首相退陣、菅内閣発足\",\"http://www.47news.jp/CN/201006/CN2010060801000756.html\"],[\"2010/5/28\",\"普天間移設で日米合意\",\"http://www.47news.jp/CN/201005/CN2010052801000165.html\"],[\"2010/4/20\",\"宮崎県で口蹄疫、被害拡大\",\"http://www.47news.jp/CN/201004/CN2010042001000207.html\"],[\"2009/11/20\",\"デフレ宣言、3年5カ月ぶり\",\"http://www.47news.jp/CN/200911/CN2009112001000267.html\"],[\"2009/10/2\",\"2016年五輪はリオ、東京落選\",\"http://www.47news.jp/CN/200910/CN2009100201000542.html\"],[\"2009/9/16\",\"鳩山内閣発足\",\"http://www.47news.jp/CN/200909/CN2009091601000915.html\"],[\"2009/8/30\",\"民主党圧勝で政権交代、衆院選\",\"http://www.47news.jp/CN/200908/CN2009083001000015.html\"],[\"2009/8/3\",\"全国初の裁判員裁判、東京地裁\",\"http://www.47news.jp/CN/200908/CN2009080301000461.html\"],[\"2009/6/25\",\"歌手M・ジャクソンさん急死\",\"http://www.47news.jp/CN/200906/CN2009062601000067.html\"],[\"2009/5/25\",\"北朝鮮が2回目の核実験\",\"http://www.47news.jp/CN/200905/CN2009052501000261.html\"],[\"2009/3/23\",\"WBCで「侍ジャパン」が連覇\",\"http://www.47news.jp/CN/200903/CN2009032401000025.html\"],[\"2009/1/20\",\"米、オバマ新政権が発足\",\"http://www.47news.jp/CN/200901/CN2009012001000945.html\"],[\"2008/10/31\",\"田母神俊雄航空幕僚長を更迭\",\"http://www.47news.jp/CN/200810/CN2008103101000632.html\"],[\"2008/9/24\",\"麻生内閣発足\",\"http://www.47news.jp/CN/200809/CN2008092401000025.html\"],[\"2008/9/15\",\"リーマン・ショック\",\"http://www.47news.jp/CN/200809/CN2008091501000215.html\"],[\"2008/9/1\",\"福田首相、退陣表明\",\"http://www.47news.jp/CN/200809/CN2008090101000736.html\"],[\"2008/7/7\",\"北海道・洞爺湖サミット~9日\",\"http://www.47news.jp/CN/200807/CN2008070901000704.html\"],[\"2008/6/11\",\"福田首相の問責決議が可決\",\"http://www.47news.jp/CN/200806/CN2008061101000609.html\"],[\"2008/5/12\",\"中国・四川大地震\",\"http://www.47news.jp/CN/200805/CN2008051201000871.html\"],[\"2008/4/9\",\"日銀総裁に白川副総裁が昇格\",\"http://www.47news.jp/CN/200804/CN2008040901000924.html\"],[\"2008/2/19\",\"海自イージス艦が漁船と衝突\",\"http://www.47news.jp/CN/200802/CN2008021901000329.html\"],[\"2008/1/27\",\"大阪府知事選で橋下徹氏初当選\",\"http://www.47news.jp/CN/200801/CN2008012801000076.html\"],[\"2007/11/28\",\"防衛装備疑惑で前防衛次官を逮捕\",\"http://www.47news.jp/CN/200711/CN2007112801000463.html\"],[\"2007/11/2\",\"テロ特措法期限切れ海自撤収命令\",\"http://www.47news.jp/CN/200710/CN2007102901000620.html\"],[\"2007/9/12\",\"安倍首相が退陣。後任に福田氏\",\"http://www.47news.jp/CN/200709/CN2007091201000426.html\"],[\"2007/7/29\",\"参院選で自民党が歴史的惨敗\",\"http://www.47news.jp/CN/200707/CN2007072901000697.html\"],[\"2007/5/28\",\"松岡農相が自殺\",\"http://www.47news.jp/CN/200705/CN2007052801000693.html\"],[\"2007/5/14\",\"改憲手続き定めた国民投票法成立\",\"http://www.47news.jp/CN/200705/CN2007051401000231.html\"]]\nprs = [[\"2007/4/16\",\"38.3 \",\"17.5 \",\"44.2 \"],[\"2007/5/12\",\"38.2 \",\"14.2 \",\"47.6 \"],[\"2007/6/1\",\"48.7 \",\"15.5 \",\"35.8 \"],[\"2007/7/30\",\"59.0 \",\"12.0 \",\"29.0 \"],[\"2007/8/27\",\"45.5 \",\"14.0 \",\"40.5 \"],[\"2007/9/13\",\"46.6 \",\"7.9 \",\"45.5 \"],[\"2007/9/25\",\"25.6 \",\"16.6 \",\"57.8 \"],[\"2007/10/27\",\"29.6 \",\"20.2 \",\"50.2 \"],[\"2007/11/5\",\"36.6 \",\"16.4 \",\"47.0 \"],[\"2007/12/15\",\"47.6 \",\"17.1 \",\"35.3 \"],[\"2008/1/11\",\"42.8 \",\"15.8 \",\"41.4 \"],[\"2008/2/9\",\"44.6 \",\"19.9 \",\"35.5 \"],[\"2008/3/15\",\"50.6 \",\"16.0 \",\"33.4 \"],[\"2008/4/4\",\"59.6 \",\"13.8 \",\"26.6 \"],[\"2008/5/1\",\"66.6 \",\"13.6 \",\"19.8 \"],[\"2008/6/12\",\"60.2 \",\"14.8 \",\"25.0 \"],[\"2008/7/11\",\"53.5 \",\"19.7 \",\"26.8 \"],[\"2008/8/1\",\"48.2 \",\"20.4 \",\"31.5 \"],[\"2008/9/2\",\"28.0 \",\"4.1 \",\"67.9 \"],[\"2008/9/24\",\"32.9 \",\"18.5 \",\"48.6 \"],[\"2008/10/18\",\"39.0 \",\"18.5 \",\"42.5 \"],[\"2008/11/8\",\"42.1 \",\"16.9 \",\"40.9 \"],[\"2008/12/6\",\"61.4 \",\"13.2 \",\"25.4 \"],[\"2009/1/10\",\"70.2 \",\"10.6 \",\"19.2 \"],[\"2009/2/7\",\"70.9 \",\"11.0 \",\"18.1 \"],[\"2009/2/17\",\"76.6 \",\"10.0 \",\"13.4 \"],[\"2009/3/7\",\"70.8 \",\"13.2 \",\"16.0 \"],[\"2009/3/25\",\"63.4 \",\"12.8 \",\"23.7 \"],[\"2009/4/28\",\"56.2 \",\"14.2 \",\"29.6 \"],[\"2009/5/11\",\"55.1 \",\"16.9 \",\"28.0 \"],[\"2009/5/16\",\"60.2 \",\"13.5 \",\"26.2 \"],[\"2009/6/13\",\"70.5 \",\"12.0 \",\"17.5 \"],[\"2009/7/3\",\"60.9 \",\"15.7 \",\"23.4 \"],[\"2009/9/16\",\"13.1 \",\"14.9 \",\"72.0 \"],[\"2009/10/31\",\"22.9 \",\"15.3 \",\"61.8 \"],[\"2009/11/28\",\"25.1 \",\"11.2 \",\"63.6 \"],[\"2009/12/25\",\"38.1 \",\"14.7 \",\"47.1 \"],[\"2010/1/10\",\"33.2 \",\"16.0 \",\"50.8 \"],[\"2010/1/17\",\"44.1 \",\"14.4 \",\"41.5 \"],[\"2010/2/5\",\"45.1 \",\"13.5 \",\"41.4 \"],[\"2010/3/6\",\"48.9 \",\"14.8 \",\"36.4 \"],[\"2010/4/3\",\"53.3 \",\"13.7 \",\"33.0 \"],[\"2010/4/28\",\"64.4 \",\"14.9 \",\"20.7 \"],[\"2010/5/29\",\"73.1 \",\"7.7 \",\"19.1 \"],[\"2010/6/4\",\"37.2 \",\"5.2 \",\"57.7 \"],[\"2010/7/12\",\"52.2 \",\"11.5 \",\"36.2 \"],[\"2010/8/7\",\"44.8 \",\"16.5 \",\"38.7 \"],[\"2010/8/27\",\"36.2 \",\"15.7 \",\"48.1 \"],[\"2010/9/9\",\"31.5 \",\"13.8 \",\"54.7 \"],[\"2010/9/17\",\"21.2 \",\"14.3 \",\"64.5 \"],[\"2010/10/5\",\"36.6 \",\"15.8 \",\"47.6 \"],[\"2010/11/6\",\"48.6 \",\"18.7 \",\"32.7 \"],[\"2010/11/23\",\"61.9 \",\"14.5 \",\"23.6 \"],[\"2010/12/25\",\"67.0 \",\"9.4 \",\"23.7 \"],[\"2011/1/14\",\"53.9 \",\"13.9 \",\"32.1 \"],[\"2011/2/11\",\"63.3 \",\"16.7 \",\"19.9 \"],[\"2011/3/26\",\"55.6 \",\"16.1 \",\"28.3 \"],[\"2011/4/29\",\"58.6 \",\"14.5 \",\"26.8 \"],[\"2011/5/14\",\"57.3 \",\"14.6 \",\"28.1 \"],[\"2011/6/28\",\"61.1 \",\"15.6 \",\"23.2 \"],[\"2011/7/23\",\"70.6 \",\"12.3 \",\"17.1 \"],[\"2011/8/20\",\"70.0 \",\"14.2 \",\"15.8 \"],[\"2011/9/2\",\"18.1 \",\"19.1 \",\"62.7 \"],[\"2011/10/1\",\"27.8 \",\"17.6 \",\"54.6 \"],[\"2011/11/5\",\"34.3 \",\"18.6 \",\"47.1 \"],[\"2011/12/3\",\"40.3 \",\"15.1 \",\"44.6 \"],[\"2012/1/7\",\"50.6 \",\"13.7 \",\"35.7 \"],[\"2012/1/13\",\"47.8 \",\"16.4 \",\"35.8 \"],[\"2012/2/18\",\"55.2 \",\"15.8 \",\"29.0 \"],[\"2012/3/19\",\"50.2 \",\"18.2 \",\"31.6 \"],[\"2012/4/28\",\"60.0 \",\"13.6 \",\"26.4 \"],[\"2012/5/26\",\"58.1 \",\"13.9 \",\"28.0 \"],[\"2012/6/4\",\"50.0 \",\"18.0 \",\"32.0 \"],[\"2012/6/26\",\"54.4 \",\"15.8 \",\"29.9 \"],[\"2012/7/14\",\"59.9 \",\"11.9 \",\"28.2 \"],[\"2012/8/11\",\"59.0 \",\"13.1 \",\"27.9 \"],[\"2012/9/1\",\"59.4 \",\"14.3 \",\"26.3 \"],[\"2012/10/1\",\"55.3 \",\"15.5 \",\"29.2 \"],[\"2012/11/3\",\"66.0 \",\"16.2 \",\"17.7 \"],[\"2012/12/26\",\"21.8 \",\"16.2 \",\"62.0 \"],[\"2013/1/26\",\"22.1 \",\"11.2 \",\"66.7 \"],[\"2013/2/23\",\"16.2 \",\"11.1 \",\"72.7 \"],[\"2013/3/23\",\"16.7 \",\"12.2 \",\"71.1 \"],[\"2013/3/30\",\"20.8 \",\"7.2 \",\"72.0 \"],[\"2013/4/20\",\"16.0 \",\"11.9 \",\"72.1 \"],[\"2013/5/18\",\"16.2 \",\"12.9 \",\"70.9 \"],[\"2013/6/1\",\"16.3 \",\"15.7 \",\"68.0 \"],[\"2013/6/8\",\"20.4 \",\"8.4 \",\"71.2 \"],[\"2013/7/22\",\"31.7 \",\"12.1 \",\"56.2 \"],[\"2013/8/24\",\"25.6 \",\"16.7 \",\"57.7 \"],[\"2013/9/14\",\"20.4 \",\"17.8 \",\"61.8 \"],[\"2013/9/28\",\"21.8 \",\"7.5 \",\"70.7 \"],[\"2013/10/1\",\"24.1 \",\"12.6 \",\"63.3 \"],[\"2013/10/26\",\"27.0 \",\"12.3 \",\"60.7 \"],[\"2013/11/23\",\"26.2 \",\"15.9 \",\"57.9 \"],[\"2013/12/8\",\"38.4 \",\"14.0 \",\"47.6 \"],[\"2013/12/14\",\"35.9 \",\"7.2 \",\"56.9 \"],[\"2013/12/22\",\"33.0 \",\"12.8 \",\"54.2 \"],[\"2013/12/28\",\"32.6 \",\"12.2 \",\"55.2 \"],[\"2014/1/25\",\"31.0 \",\"13.1 \",\"55.9 \"],[\"2014/2/22\",\"29.7 \",\"16.4 \",\"53.9 \"],[\"2014/4/11\",\"26.7 \",\"13.5 \",\"59.8 \"],[\"2014/5/17\",\"32.5 \",\"12.8 \",\"54.7 \"],[\"2014/6/21\",\"33.0 \",\"14.9 \",\"52.1 \"],[\"2014/7/1\",\"40.6 \",\"11.6 \",\"47.8 \"]]\ngos = [[\"野田\",\"2011/09/02\",\"2012/12/26\"],[\"菅\",\"2010/06/08\",\"2011/09/02\"],[\"鳩山\",\"2009/09/16\",\"2010/06/08\"],[\"麻生\",\"2008/09/24\",\"2009/09/16\"],[\"福田\",\"2007/09/26\",\"2008/09/24\"],[\"安倍\",\"2007/04/01\",\"2007/09/26\"]]\n\n\nclass initDATA(webapp.RequestHandler):\n '''\n classdocs\n '''\n def get(self):\n user = users.get_current_user()\n if user == None:\n self.redirect(users.create_login_url(self.request.uri))\n return\n for ht in hts:\n htdate = datetime.date(datetime.strptime(ht[0], '%Y/%m/%d'))\n obj1 = HistoricalTable(date=htdate,title=ht[1],url=ht[2])\n obj1.save()\n \n for pr in prs:\n prdate = datetime.date(datetime.strptime(pr[0], '%Y/%m/%d'))\n obj2 = PollRating(date=prdate,approval_rate=float(pr[3]),unknown_rate=float(pr[2]),disapproval_rate=float(pr[1]))\n obj2.save()\n \n for pgo in gos:\n gosdate = datetime.date(datetime.strptime(pgo[1], '%Y/%m/%d'))\n try:\n goedate = datetime.date(datetime.strptime(pgo[2], '%Y/%m/%d'))\n except:\n goedate = None\n obj3 = Government(name=pgo[0], begin=gosdate, end=goedate)\n obj3.save()\n \nclass clearDATA(webapp.RequestHandler):\n def get(self):\n user = users.get_current_user()\n if user == None:\n self.redirect(users.create_login_url(self.request.uri))\n return\n for ht in HistoricalTable.all():\n ht.delete()\n \n for pr in PollRating.all():\n pr.delete()\n \n for pgo in Government.all():\n pgo.delete()\n \n \n ",
"step-ids": [
4,
5,
6,
7,
8
]
}
|
[
4,
5,
6,
7,
8
] |
# Copyright (c) 2016, the GPyOpt Authors
# Licensed under the BSD 3-clause license (see LICENSE.txt)
import numpy as np
from scipy.special import erfc
import time
from ..core.errors import InvalidConfigError
def compute_integrated_acquisition(acquisition,x):
'''
Used to compute the acquisition function when samples of the hyper-parameters have been generated (used in GP_MCMC model).
:param acquisition: acquisition function with GpyOpt model type GP_MCMC.
:param x: location where the acquisition is evaluated.
'''
acqu_x = 0
for i in range(acquisition.model.num_hmc_samples):
acquisition.model.model.kern[:] = acquisition.model.hmc_samples[i,:]
acqu_x += acquisition.acquisition_function(x)
acqu_x = acqu_x/acquisition.model.num_hmc_samples
return acqu_x
def compute_integrated_acquisition_withGradients(acquisition,x):
'''
Used to compute the acquisition function with gradients when samples of the hyper-parameters have been generated (used in GP_MCMC model).
:param acquisition: acquisition function with GpyOpt model type GP_MCMC.
:param x: location where the acquisition is evaluated.
'''
acqu_x = 0
d_acqu_x = 0
for i in range(acquisition.model.num_hmc_samples):
acquisition.model.model.kern[:] = acquisition.model.hmc_samples[i,:]
acqu_x_sample, d_acqu_x_sample = acquisition.acquisition_function_withGradients(x)
acqu_x += acqu_x_sample
d_acqu_x += d_acqu_x_sample
acqu_x = acqu_x/acquisition.model.num_hmc_samples
d_acqu_x = d_acqu_x/acquisition.model.num_hmc_samples
return acqu_x, d_acqu_x
def best_guess(f,X):
'''
Gets the best current guess from a vector.
:param f: function to evaluate.
:param X: locations.
'''
n = X.shape[0]
xbest = np.zeros(n)
for i in range(n):
ff = f(X[0:(i+1)])
xbest[i] = ff[np.argmin(ff)]
return xbest
def samples_multidimensional_uniform(bounds,num_data):
'''
Generates a multidimensional grid uniformly distributed.
:param bounds: tuple defining the box constraints.
:num_data: number of data points to generate.
'''
dim = len(bounds)
Z_rand = np.zeros(shape=(num_data,dim))
for k in range(0,dim): Z_rand[:,k] = np.random.uniform(low=bounds[k][0],high=bounds[k][1],size=num_data)
return Z_rand
def reshape(x,input_dim):
'''
Reshapes x into a matrix with input_dim columns
'''
x = np.array(x)
if x.size ==input_dim:
x = x.reshape((1,input_dim))
return x
def get_moments(model,x):
'''
Moments (mean and sdev.) of a GP model at x
'''
input_dim = model.X.shape[1]
x = reshape(x,input_dim)
fmin = min(model.predict(model.X)[0])
m, v = model.predict(x)
s = np.sqrt(np.clip(v, 0, np.inf))
return (m,s, fmin)
def get_d_moments(model,x):
'''
Gradients with respect to x of the moments (mean and sdev.) of the GP
:param model: GPy model.
:param x: location where the gradients are evaluated.
'''
input_dim = model.input_dim
x = reshape(x,input_dim)
_, v = model.predict(x)
dmdx, dvdx = model.predictive_gradients(x)
dmdx = dmdx[:,:,0]
dsdx = dvdx / (2*np.sqrt(v))
return (dmdx, dsdx)
def get_quantiles(acquisition_par, fmin, m, s):
'''
Quantiles of the Gaussian distribution useful to determine the acquisition function values
:param acquisition_par: parameter of the acquisition function
:param fmin: current minimum.
:param m: vector of means.
:param s: vector of standard deviations.
'''
if isinstance(s, np.ndarray):
s[s<1e-10] = 1e-10
elif s< 1e-10:
s = 1e-10
u = (fmin - m - acquisition_par)/s
phi = np.exp(-0.5 * u**2) / np.sqrt(2*np.pi)
Phi = 0.5 * erfc(-u / np.sqrt(2))
return (phi, Phi, u)
def best_value(Y,sign=1):
'''
Returns a vector whose components i are the minimum (default) or maximum of Y[:i]
'''
n = Y.shape[0]
Y_best = np.ones(n)
for i in range(n):
if sign == 1:
Y_best[i]=Y[:(i+1)].min()
else:
Y_best[i]=Y[:(i+1)].max()
return Y_best
def spawn(f):
'''
Function for parallel evaluation of the acquisition function
'''
def fun(pipe,x):
pipe.send(f(x))
pipe.close()
return fun
def evaluate_function(f,X):
'''
Returns the evaluation of a function *f* and the time per evaluation
'''
num_data, dim_data = X.shape
Y_eval = np.zeros((num_data, dim_data))
Y_time = np.zeros((num_data, 1))
for i in range(num_data):
time_zero = time.time()
Y_eval[i,:] = f(X[i,:])
Y_time[i,:] = time.time() - time_zero
return Y_eval, Y_time
def values_to_array(input_values):
'''
Transforms a values of int, float and tuples to a column vector numpy array
'''
if type(input_values)==tuple:
values = np.array(input_values).reshape(-1,1)
elif type(input_values) == np.ndarray:
values = np.atleast_2d(input_values)
elif type(input_values)==int or type(input_values)==float or type(np.int64):
values = np.atleast_2d(np.array(input_values))
else:
print('Type to transform not recognized')
return values
def merge_values(values1,values2):
'''
Merges two numpy arrays by calculating all possible combinations of rows
'''
array1 = values_to_array(values1)
array2 = values_to_array(values2)
if array1.size == 0:
return array2
if array2.size == 0:
return array1
merged_array = []
for row_array1 in array1:
for row_array2 in array2:
merged_row = np.hstack((row_array1,row_array2))
merged_array.append(merged_row)
return np.atleast_2d(merged_array)
def normalize(Y, normalization_type='stats'):
"""Normalize the vector Y using statistics or its range.
:param Y: Row or column vector that you want to normalize.
:param normalization_type: String specifying the kind of normalization
to use. Options are 'stats' to use mean and standard deviation,
or 'maxmin' to use the range of function values.
:return Y_normalized: The normalized vector.
"""
Y = np.asarray(Y, dtype=float)
if np.max(Y.shape) != Y.size:
raise NotImplementedError('Only 1-dimensional arrays are supported.')
# Only normalize with non null sdev (divide by zero). For only one
# data point both std and ptp return 0.
if normalization_type == 'stats':
Y_norm = Y - Y.mean()
std = Y.std()
if std > 0:
Y_norm /= std
elif normalization_type == 'maxmin':
Y_norm = Y - Y.min()
y_range = np.ptp(Y)
if y_range > 0:
Y_norm /= y_range
# A range of [-1, 1] is more natural for a zero-mean GP
Y_norm = 2 * (Y_norm - 0.5)
else:
raise ValueError('Unknown normalization type: {}'.format(normalization_type))
return Y_norm
|
normal
|
{
"blob_id": "4e7cfbf51ec9bad691d8dd9f103f22728cf5e952",
"index": 1229,
"step-1": "<mask token>\n\n\ndef compute_integrated_acquisition(acquisition, x):\n \"\"\"\n Used to compute the acquisition function when samples of the hyper-parameters have been generated (used in GP_MCMC model).\n\n :param acquisition: acquisition function with GpyOpt model type GP_MCMC.\n :param x: location where the acquisition is evaluated.\n \"\"\"\n acqu_x = 0\n for i in range(acquisition.model.num_hmc_samples):\n acquisition.model.model.kern[:] = acquisition.model.hmc_samples[i, :]\n acqu_x += acquisition.acquisition_function(x)\n acqu_x = acqu_x / acquisition.model.num_hmc_samples\n return acqu_x\n\n\ndef compute_integrated_acquisition_withGradients(acquisition, x):\n \"\"\"\n Used to compute the acquisition function with gradients when samples of the hyper-parameters have been generated (used in GP_MCMC model).\n\n :param acquisition: acquisition function with GpyOpt model type GP_MCMC.\n :param x: location where the acquisition is evaluated.\n \"\"\"\n acqu_x = 0\n d_acqu_x = 0\n for i in range(acquisition.model.num_hmc_samples):\n acquisition.model.model.kern[:] = acquisition.model.hmc_samples[i, :]\n acqu_x_sample, d_acqu_x_sample = (acquisition.\n acquisition_function_withGradients(x))\n acqu_x += acqu_x_sample\n d_acqu_x += d_acqu_x_sample\n acqu_x = acqu_x / acquisition.model.num_hmc_samples\n d_acqu_x = d_acqu_x / acquisition.model.num_hmc_samples\n return acqu_x, d_acqu_x\n\n\ndef best_guess(f, X):\n \"\"\"\n Gets the best current guess from a vector.\n :param f: function to evaluate.\n :param X: locations.\n \"\"\"\n n = X.shape[0]\n xbest = np.zeros(n)\n for i in range(n):\n ff = f(X[0:i + 1])\n xbest[i] = ff[np.argmin(ff)]\n return xbest\n\n\n<mask token>\n\n\ndef best_value(Y, sign=1):\n \"\"\"\n Returns a vector whose components i are the minimum (default) or maximum of Y[:i]\n \"\"\"\n n = Y.shape[0]\n Y_best = np.ones(n)\n for i in range(n):\n if sign == 1:\n Y_best[i] = Y[:i + 1].min()\n else:\n Y_best[i] = Y[:i + 1].max()\n return Y_best\n\n\n<mask token>\n\n\ndef evaluate_function(f, X):\n \"\"\"\n Returns the evaluation of a function *f* and the time per evaluation\n \"\"\"\n num_data, dim_data = X.shape\n Y_eval = np.zeros((num_data, dim_data))\n Y_time = np.zeros((num_data, 1))\n for i in range(num_data):\n time_zero = time.time()\n Y_eval[i, :] = f(X[i, :])\n Y_time[i, :] = time.time() - time_zero\n return Y_eval, Y_time\n\n\n<mask token>\n\n\ndef merge_values(values1, values2):\n \"\"\"\n Merges two numpy arrays by calculating all possible combinations of rows\n \"\"\"\n array1 = values_to_array(values1)\n array2 = values_to_array(values2)\n if array1.size == 0:\n return array2\n if array2.size == 0:\n return array1\n merged_array = []\n for row_array1 in array1:\n for row_array2 in array2:\n merged_row = np.hstack((row_array1, row_array2))\n merged_array.append(merged_row)\n return np.atleast_2d(merged_array)\n\n\ndef normalize(Y, normalization_type='stats'):\n \"\"\"Normalize the vector Y using statistics or its range.\n\n :param Y: Row or column vector that you want to normalize.\n :param normalization_type: String specifying the kind of normalization\n to use. Options are 'stats' to use mean and standard deviation,\n or 'maxmin' to use the range of function values.\n :return Y_normalized: The normalized vector.\n \"\"\"\n Y = np.asarray(Y, dtype=float)\n if np.max(Y.shape) != Y.size:\n raise NotImplementedError('Only 1-dimensional arrays are supported.')\n if normalization_type == 'stats':\n Y_norm = Y - Y.mean()\n std = Y.std()\n if std > 0:\n Y_norm /= std\n elif normalization_type == 'maxmin':\n Y_norm = Y - Y.min()\n y_range = np.ptp(Y)\n if y_range > 0:\n Y_norm /= y_range\n Y_norm = 2 * (Y_norm - 0.5)\n else:\n raise ValueError('Unknown normalization type: {}'.format(\n normalization_type))\n return Y_norm\n",
"step-2": "<mask token>\n\n\ndef compute_integrated_acquisition(acquisition, x):\n \"\"\"\n Used to compute the acquisition function when samples of the hyper-parameters have been generated (used in GP_MCMC model).\n\n :param acquisition: acquisition function with GpyOpt model type GP_MCMC.\n :param x: location where the acquisition is evaluated.\n \"\"\"\n acqu_x = 0\n for i in range(acquisition.model.num_hmc_samples):\n acquisition.model.model.kern[:] = acquisition.model.hmc_samples[i, :]\n acqu_x += acquisition.acquisition_function(x)\n acqu_x = acqu_x / acquisition.model.num_hmc_samples\n return acqu_x\n\n\ndef compute_integrated_acquisition_withGradients(acquisition, x):\n \"\"\"\n Used to compute the acquisition function with gradients when samples of the hyper-parameters have been generated (used in GP_MCMC model).\n\n :param acquisition: acquisition function with GpyOpt model type GP_MCMC.\n :param x: location where the acquisition is evaluated.\n \"\"\"\n acqu_x = 0\n d_acqu_x = 0\n for i in range(acquisition.model.num_hmc_samples):\n acquisition.model.model.kern[:] = acquisition.model.hmc_samples[i, :]\n acqu_x_sample, d_acqu_x_sample = (acquisition.\n acquisition_function_withGradients(x))\n acqu_x += acqu_x_sample\n d_acqu_x += d_acqu_x_sample\n acqu_x = acqu_x / acquisition.model.num_hmc_samples\n d_acqu_x = d_acqu_x / acquisition.model.num_hmc_samples\n return acqu_x, d_acqu_x\n\n\ndef best_guess(f, X):\n \"\"\"\n Gets the best current guess from a vector.\n :param f: function to evaluate.\n :param X: locations.\n \"\"\"\n n = X.shape[0]\n xbest = np.zeros(n)\n for i in range(n):\n ff = f(X[0:i + 1])\n xbest[i] = ff[np.argmin(ff)]\n return xbest\n\n\ndef samples_multidimensional_uniform(bounds, num_data):\n \"\"\"\n Generates a multidimensional grid uniformly distributed.\n :param bounds: tuple defining the box constraints.\n :num_data: number of data points to generate.\n\n \"\"\"\n dim = len(bounds)\n Z_rand = np.zeros(shape=(num_data, dim))\n for k in range(0, dim):\n Z_rand[:, k] = np.random.uniform(low=bounds[k][0], high=bounds[k][1\n ], size=num_data)\n return Z_rand\n\n\ndef reshape(x, input_dim):\n \"\"\"\n Reshapes x into a matrix with input_dim columns\n\n \"\"\"\n x = np.array(x)\n if x.size == input_dim:\n x = x.reshape((1, input_dim))\n return x\n\n\n<mask token>\n\n\ndef best_value(Y, sign=1):\n \"\"\"\n Returns a vector whose components i are the minimum (default) or maximum of Y[:i]\n \"\"\"\n n = Y.shape[0]\n Y_best = np.ones(n)\n for i in range(n):\n if sign == 1:\n Y_best[i] = Y[:i + 1].min()\n else:\n Y_best[i] = Y[:i + 1].max()\n return Y_best\n\n\n<mask token>\n\n\ndef evaluate_function(f, X):\n \"\"\"\n Returns the evaluation of a function *f* and the time per evaluation\n \"\"\"\n num_data, dim_data = X.shape\n Y_eval = np.zeros((num_data, dim_data))\n Y_time = np.zeros((num_data, 1))\n for i in range(num_data):\n time_zero = time.time()\n Y_eval[i, :] = f(X[i, :])\n Y_time[i, :] = time.time() - time_zero\n return Y_eval, Y_time\n\n\n<mask token>\n\n\ndef merge_values(values1, values2):\n \"\"\"\n Merges two numpy arrays by calculating all possible combinations of rows\n \"\"\"\n array1 = values_to_array(values1)\n array2 = values_to_array(values2)\n if array1.size == 0:\n return array2\n if array2.size == 0:\n return array1\n merged_array = []\n for row_array1 in array1:\n for row_array2 in array2:\n merged_row = np.hstack((row_array1, row_array2))\n merged_array.append(merged_row)\n return np.atleast_2d(merged_array)\n\n\ndef normalize(Y, normalization_type='stats'):\n \"\"\"Normalize the vector Y using statistics or its range.\n\n :param Y: Row or column vector that you want to normalize.\n :param normalization_type: String specifying the kind of normalization\n to use. Options are 'stats' to use mean and standard deviation,\n or 'maxmin' to use the range of function values.\n :return Y_normalized: The normalized vector.\n \"\"\"\n Y = np.asarray(Y, dtype=float)\n if np.max(Y.shape) != Y.size:\n raise NotImplementedError('Only 1-dimensional arrays are supported.')\n if normalization_type == 'stats':\n Y_norm = Y - Y.mean()\n std = Y.std()\n if std > 0:\n Y_norm /= std\n elif normalization_type == 'maxmin':\n Y_norm = Y - Y.min()\n y_range = np.ptp(Y)\n if y_range > 0:\n Y_norm /= y_range\n Y_norm = 2 * (Y_norm - 0.5)\n else:\n raise ValueError('Unknown normalization type: {}'.format(\n normalization_type))\n return Y_norm\n",
"step-3": "<mask token>\n\n\ndef compute_integrated_acquisition(acquisition, x):\n \"\"\"\n Used to compute the acquisition function when samples of the hyper-parameters have been generated (used in GP_MCMC model).\n\n :param acquisition: acquisition function with GpyOpt model type GP_MCMC.\n :param x: location where the acquisition is evaluated.\n \"\"\"\n acqu_x = 0\n for i in range(acquisition.model.num_hmc_samples):\n acquisition.model.model.kern[:] = acquisition.model.hmc_samples[i, :]\n acqu_x += acquisition.acquisition_function(x)\n acqu_x = acqu_x / acquisition.model.num_hmc_samples\n return acqu_x\n\n\ndef compute_integrated_acquisition_withGradients(acquisition, x):\n \"\"\"\n Used to compute the acquisition function with gradients when samples of the hyper-parameters have been generated (used in GP_MCMC model).\n\n :param acquisition: acquisition function with GpyOpt model type GP_MCMC.\n :param x: location where the acquisition is evaluated.\n \"\"\"\n acqu_x = 0\n d_acqu_x = 0\n for i in range(acquisition.model.num_hmc_samples):\n acquisition.model.model.kern[:] = acquisition.model.hmc_samples[i, :]\n acqu_x_sample, d_acqu_x_sample = (acquisition.\n acquisition_function_withGradients(x))\n acqu_x += acqu_x_sample\n d_acqu_x += d_acqu_x_sample\n acqu_x = acqu_x / acquisition.model.num_hmc_samples\n d_acqu_x = d_acqu_x / acquisition.model.num_hmc_samples\n return acqu_x, d_acqu_x\n\n\ndef best_guess(f, X):\n \"\"\"\n Gets the best current guess from a vector.\n :param f: function to evaluate.\n :param X: locations.\n \"\"\"\n n = X.shape[0]\n xbest = np.zeros(n)\n for i in range(n):\n ff = f(X[0:i + 1])\n xbest[i] = ff[np.argmin(ff)]\n return xbest\n\n\ndef samples_multidimensional_uniform(bounds, num_data):\n \"\"\"\n Generates a multidimensional grid uniformly distributed.\n :param bounds: tuple defining the box constraints.\n :num_data: number of data points to generate.\n\n \"\"\"\n dim = len(bounds)\n Z_rand = np.zeros(shape=(num_data, dim))\n for k in range(0, dim):\n Z_rand[:, k] = np.random.uniform(low=bounds[k][0], high=bounds[k][1\n ], size=num_data)\n return Z_rand\n\n\ndef reshape(x, input_dim):\n \"\"\"\n Reshapes x into a matrix with input_dim columns\n\n \"\"\"\n x = np.array(x)\n if x.size == input_dim:\n x = x.reshape((1, input_dim))\n return x\n\n\n<mask token>\n\n\ndef get_d_moments(model, x):\n \"\"\"\n Gradients with respect to x of the moments (mean and sdev.) of the GP\n :param model: GPy model.\n :param x: location where the gradients are evaluated.\n \"\"\"\n input_dim = model.input_dim\n x = reshape(x, input_dim)\n _, v = model.predict(x)\n dmdx, dvdx = model.predictive_gradients(x)\n dmdx = dmdx[:, :, 0]\n dsdx = dvdx / (2 * np.sqrt(v))\n return dmdx, dsdx\n\n\ndef get_quantiles(acquisition_par, fmin, m, s):\n \"\"\"\n Quantiles of the Gaussian distribution useful to determine the acquisition function values\n :param acquisition_par: parameter of the acquisition function\n :param fmin: current minimum.\n :param m: vector of means.\n :param s: vector of standard deviations.\n \"\"\"\n if isinstance(s, np.ndarray):\n s[s < 1e-10] = 1e-10\n elif s < 1e-10:\n s = 1e-10\n u = (fmin - m - acquisition_par) / s\n phi = np.exp(-0.5 * u ** 2) / np.sqrt(2 * np.pi)\n Phi = 0.5 * erfc(-u / np.sqrt(2))\n return phi, Phi, u\n\n\ndef best_value(Y, sign=1):\n \"\"\"\n Returns a vector whose components i are the minimum (default) or maximum of Y[:i]\n \"\"\"\n n = Y.shape[0]\n Y_best = np.ones(n)\n for i in range(n):\n if sign == 1:\n Y_best[i] = Y[:i + 1].min()\n else:\n Y_best[i] = Y[:i + 1].max()\n return Y_best\n\n\n<mask token>\n\n\ndef evaluate_function(f, X):\n \"\"\"\n Returns the evaluation of a function *f* and the time per evaluation\n \"\"\"\n num_data, dim_data = X.shape\n Y_eval = np.zeros((num_data, dim_data))\n Y_time = np.zeros((num_data, 1))\n for i in range(num_data):\n time_zero = time.time()\n Y_eval[i, :] = f(X[i, :])\n Y_time[i, :] = time.time() - time_zero\n return Y_eval, Y_time\n\n\ndef values_to_array(input_values):\n \"\"\"\n Transforms a values of int, float and tuples to a column vector numpy array\n \"\"\"\n if type(input_values) == tuple:\n values = np.array(input_values).reshape(-1, 1)\n elif type(input_values) == np.ndarray:\n values = np.atleast_2d(input_values)\n elif type(input_values) == int or type(input_values) == float or type(np\n .int64):\n values = np.atleast_2d(np.array(input_values))\n else:\n print('Type to transform not recognized')\n return values\n\n\ndef merge_values(values1, values2):\n \"\"\"\n Merges two numpy arrays by calculating all possible combinations of rows\n \"\"\"\n array1 = values_to_array(values1)\n array2 = values_to_array(values2)\n if array1.size == 0:\n return array2\n if array2.size == 0:\n return array1\n merged_array = []\n for row_array1 in array1:\n for row_array2 in array2:\n merged_row = np.hstack((row_array1, row_array2))\n merged_array.append(merged_row)\n return np.atleast_2d(merged_array)\n\n\ndef normalize(Y, normalization_type='stats'):\n \"\"\"Normalize the vector Y using statistics or its range.\n\n :param Y: Row or column vector that you want to normalize.\n :param normalization_type: String specifying the kind of normalization\n to use. Options are 'stats' to use mean and standard deviation,\n or 'maxmin' to use the range of function values.\n :return Y_normalized: The normalized vector.\n \"\"\"\n Y = np.asarray(Y, dtype=float)\n if np.max(Y.shape) != Y.size:\n raise NotImplementedError('Only 1-dimensional arrays are supported.')\n if normalization_type == 'stats':\n Y_norm = Y - Y.mean()\n std = Y.std()\n if std > 0:\n Y_norm /= std\n elif normalization_type == 'maxmin':\n Y_norm = Y - Y.min()\n y_range = np.ptp(Y)\n if y_range > 0:\n Y_norm /= y_range\n Y_norm = 2 * (Y_norm - 0.5)\n else:\n raise ValueError('Unknown normalization type: {}'.format(\n normalization_type))\n return Y_norm\n",
"step-4": "import numpy as np\nfrom scipy.special import erfc\nimport time\nfrom ..core.errors import InvalidConfigError\n\n\ndef compute_integrated_acquisition(acquisition, x):\n \"\"\"\n Used to compute the acquisition function when samples of the hyper-parameters have been generated (used in GP_MCMC model).\n\n :param acquisition: acquisition function with GpyOpt model type GP_MCMC.\n :param x: location where the acquisition is evaluated.\n \"\"\"\n acqu_x = 0\n for i in range(acquisition.model.num_hmc_samples):\n acquisition.model.model.kern[:] = acquisition.model.hmc_samples[i, :]\n acqu_x += acquisition.acquisition_function(x)\n acqu_x = acqu_x / acquisition.model.num_hmc_samples\n return acqu_x\n\n\ndef compute_integrated_acquisition_withGradients(acquisition, x):\n \"\"\"\n Used to compute the acquisition function with gradients when samples of the hyper-parameters have been generated (used in GP_MCMC model).\n\n :param acquisition: acquisition function with GpyOpt model type GP_MCMC.\n :param x: location where the acquisition is evaluated.\n \"\"\"\n acqu_x = 0\n d_acqu_x = 0\n for i in range(acquisition.model.num_hmc_samples):\n acquisition.model.model.kern[:] = acquisition.model.hmc_samples[i, :]\n acqu_x_sample, d_acqu_x_sample = (acquisition.\n acquisition_function_withGradients(x))\n acqu_x += acqu_x_sample\n d_acqu_x += d_acqu_x_sample\n acqu_x = acqu_x / acquisition.model.num_hmc_samples\n d_acqu_x = d_acqu_x / acquisition.model.num_hmc_samples\n return acqu_x, d_acqu_x\n\n\ndef best_guess(f, X):\n \"\"\"\n Gets the best current guess from a vector.\n :param f: function to evaluate.\n :param X: locations.\n \"\"\"\n n = X.shape[0]\n xbest = np.zeros(n)\n for i in range(n):\n ff = f(X[0:i + 1])\n xbest[i] = ff[np.argmin(ff)]\n return xbest\n\n\ndef samples_multidimensional_uniform(bounds, num_data):\n \"\"\"\n Generates a multidimensional grid uniformly distributed.\n :param bounds: tuple defining the box constraints.\n :num_data: number of data points to generate.\n\n \"\"\"\n dim = len(bounds)\n Z_rand = np.zeros(shape=(num_data, dim))\n for k in range(0, dim):\n Z_rand[:, k] = np.random.uniform(low=bounds[k][0], high=bounds[k][1\n ], size=num_data)\n return Z_rand\n\n\ndef reshape(x, input_dim):\n \"\"\"\n Reshapes x into a matrix with input_dim columns\n\n \"\"\"\n x = np.array(x)\n if x.size == input_dim:\n x = x.reshape((1, input_dim))\n return x\n\n\ndef get_moments(model, x):\n \"\"\"\n Moments (mean and sdev.) of a GP model at x\n\n \"\"\"\n input_dim = model.X.shape[1]\n x = reshape(x, input_dim)\n fmin = min(model.predict(model.X)[0])\n m, v = model.predict(x)\n s = np.sqrt(np.clip(v, 0, np.inf))\n return m, s, fmin\n\n\ndef get_d_moments(model, x):\n \"\"\"\n Gradients with respect to x of the moments (mean and sdev.) of the GP\n :param model: GPy model.\n :param x: location where the gradients are evaluated.\n \"\"\"\n input_dim = model.input_dim\n x = reshape(x, input_dim)\n _, v = model.predict(x)\n dmdx, dvdx = model.predictive_gradients(x)\n dmdx = dmdx[:, :, 0]\n dsdx = dvdx / (2 * np.sqrt(v))\n return dmdx, dsdx\n\n\ndef get_quantiles(acquisition_par, fmin, m, s):\n \"\"\"\n Quantiles of the Gaussian distribution useful to determine the acquisition function values\n :param acquisition_par: parameter of the acquisition function\n :param fmin: current minimum.\n :param m: vector of means.\n :param s: vector of standard deviations.\n \"\"\"\n if isinstance(s, np.ndarray):\n s[s < 1e-10] = 1e-10\n elif s < 1e-10:\n s = 1e-10\n u = (fmin - m - acquisition_par) / s\n phi = np.exp(-0.5 * u ** 2) / np.sqrt(2 * np.pi)\n Phi = 0.5 * erfc(-u / np.sqrt(2))\n return phi, Phi, u\n\n\ndef best_value(Y, sign=1):\n \"\"\"\n Returns a vector whose components i are the minimum (default) or maximum of Y[:i]\n \"\"\"\n n = Y.shape[0]\n Y_best = np.ones(n)\n for i in range(n):\n if sign == 1:\n Y_best[i] = Y[:i + 1].min()\n else:\n Y_best[i] = Y[:i + 1].max()\n return Y_best\n\n\ndef spawn(f):\n \"\"\"\n Function for parallel evaluation of the acquisition function\n \"\"\"\n\n def fun(pipe, x):\n pipe.send(f(x))\n pipe.close()\n return fun\n\n\ndef evaluate_function(f, X):\n \"\"\"\n Returns the evaluation of a function *f* and the time per evaluation\n \"\"\"\n num_data, dim_data = X.shape\n Y_eval = np.zeros((num_data, dim_data))\n Y_time = np.zeros((num_data, 1))\n for i in range(num_data):\n time_zero = time.time()\n Y_eval[i, :] = f(X[i, :])\n Y_time[i, :] = time.time() - time_zero\n return Y_eval, Y_time\n\n\ndef values_to_array(input_values):\n \"\"\"\n Transforms a values of int, float and tuples to a column vector numpy array\n \"\"\"\n if type(input_values) == tuple:\n values = np.array(input_values).reshape(-1, 1)\n elif type(input_values) == np.ndarray:\n values = np.atleast_2d(input_values)\n elif type(input_values) == int or type(input_values) == float or type(np\n .int64):\n values = np.atleast_2d(np.array(input_values))\n else:\n print('Type to transform not recognized')\n return values\n\n\ndef merge_values(values1, values2):\n \"\"\"\n Merges two numpy arrays by calculating all possible combinations of rows\n \"\"\"\n array1 = values_to_array(values1)\n array2 = values_to_array(values2)\n if array1.size == 0:\n return array2\n if array2.size == 0:\n return array1\n merged_array = []\n for row_array1 in array1:\n for row_array2 in array2:\n merged_row = np.hstack((row_array1, row_array2))\n merged_array.append(merged_row)\n return np.atleast_2d(merged_array)\n\n\ndef normalize(Y, normalization_type='stats'):\n \"\"\"Normalize the vector Y using statistics or its range.\n\n :param Y: Row or column vector that you want to normalize.\n :param normalization_type: String specifying the kind of normalization\n to use. Options are 'stats' to use mean and standard deviation,\n or 'maxmin' to use the range of function values.\n :return Y_normalized: The normalized vector.\n \"\"\"\n Y = np.asarray(Y, dtype=float)\n if np.max(Y.shape) != Y.size:\n raise NotImplementedError('Only 1-dimensional arrays are supported.')\n if normalization_type == 'stats':\n Y_norm = Y - Y.mean()\n std = Y.std()\n if std > 0:\n Y_norm /= std\n elif normalization_type == 'maxmin':\n Y_norm = Y - Y.min()\n y_range = np.ptp(Y)\n if y_range > 0:\n Y_norm /= y_range\n Y_norm = 2 * (Y_norm - 0.5)\n else:\n raise ValueError('Unknown normalization type: {}'.format(\n normalization_type))\n return Y_norm\n",
"step-5": "# Copyright (c) 2016, the GPyOpt Authors\n# Licensed under the BSD 3-clause license (see LICENSE.txt)\n\nimport numpy as np\nfrom scipy.special import erfc\nimport time\nfrom ..core.errors import InvalidConfigError\n\ndef compute_integrated_acquisition(acquisition,x):\n '''\n Used to compute the acquisition function when samples of the hyper-parameters have been generated (used in GP_MCMC model).\n\n :param acquisition: acquisition function with GpyOpt model type GP_MCMC.\n :param x: location where the acquisition is evaluated.\n '''\n\n acqu_x = 0\n\n for i in range(acquisition.model.num_hmc_samples):\n acquisition.model.model.kern[:] = acquisition.model.hmc_samples[i,:]\n acqu_x += acquisition.acquisition_function(x)\n\n acqu_x = acqu_x/acquisition.model.num_hmc_samples\n return acqu_x\n\ndef compute_integrated_acquisition_withGradients(acquisition,x):\n '''\n Used to compute the acquisition function with gradients when samples of the hyper-parameters have been generated (used in GP_MCMC model).\n\n :param acquisition: acquisition function with GpyOpt model type GP_MCMC.\n :param x: location where the acquisition is evaluated.\n '''\n\n acqu_x = 0\n d_acqu_x = 0\n\n for i in range(acquisition.model.num_hmc_samples):\n acquisition.model.model.kern[:] = acquisition.model.hmc_samples[i,:]\n acqu_x_sample, d_acqu_x_sample = acquisition.acquisition_function_withGradients(x)\n acqu_x += acqu_x_sample\n d_acqu_x += d_acqu_x_sample\n\n acqu_x = acqu_x/acquisition.model.num_hmc_samples\n d_acqu_x = d_acqu_x/acquisition.model.num_hmc_samples\n\n return acqu_x, d_acqu_x\n\n\ndef best_guess(f,X):\n '''\n Gets the best current guess from a vector.\n :param f: function to evaluate.\n :param X: locations.\n '''\n n = X.shape[0]\n xbest = np.zeros(n)\n for i in range(n):\n ff = f(X[0:(i+1)])\n xbest[i] = ff[np.argmin(ff)]\n return xbest\n\n\ndef samples_multidimensional_uniform(bounds,num_data):\n '''\n Generates a multidimensional grid uniformly distributed.\n :param bounds: tuple defining the box constraints.\n :num_data: number of data points to generate.\n\n '''\n dim = len(bounds)\n Z_rand = np.zeros(shape=(num_data,dim))\n for k in range(0,dim): Z_rand[:,k] = np.random.uniform(low=bounds[k][0],high=bounds[k][1],size=num_data)\n return Z_rand\n\n\ndef reshape(x,input_dim):\n '''\n Reshapes x into a matrix with input_dim columns\n\n '''\n x = np.array(x)\n if x.size ==input_dim:\n x = x.reshape((1,input_dim))\n return x\n\ndef get_moments(model,x):\n '''\n Moments (mean and sdev.) of a GP model at x\n\n '''\n input_dim = model.X.shape[1]\n x = reshape(x,input_dim)\n fmin = min(model.predict(model.X)[0])\n m, v = model.predict(x)\n s = np.sqrt(np.clip(v, 0, np.inf))\n return (m,s, fmin)\n\ndef get_d_moments(model,x):\n '''\n Gradients with respect to x of the moments (mean and sdev.) of the GP\n :param model: GPy model.\n :param x: location where the gradients are evaluated.\n '''\n input_dim = model.input_dim\n x = reshape(x,input_dim)\n _, v = model.predict(x)\n dmdx, dvdx = model.predictive_gradients(x)\n dmdx = dmdx[:,:,0]\n dsdx = dvdx / (2*np.sqrt(v))\n return (dmdx, dsdx)\n\n\ndef get_quantiles(acquisition_par, fmin, m, s):\n '''\n Quantiles of the Gaussian distribution useful to determine the acquisition function values\n :param acquisition_par: parameter of the acquisition function\n :param fmin: current minimum.\n :param m: vector of means.\n :param s: vector of standard deviations.\n '''\n if isinstance(s, np.ndarray):\n s[s<1e-10] = 1e-10\n elif s< 1e-10:\n s = 1e-10\n u = (fmin - m - acquisition_par)/s\n phi = np.exp(-0.5 * u**2) / np.sqrt(2*np.pi)\n Phi = 0.5 * erfc(-u / np.sqrt(2))\n return (phi, Phi, u)\n\n\ndef best_value(Y,sign=1):\n '''\n Returns a vector whose components i are the minimum (default) or maximum of Y[:i]\n '''\n n = Y.shape[0]\n Y_best = np.ones(n)\n for i in range(n):\n if sign == 1:\n Y_best[i]=Y[:(i+1)].min()\n else:\n Y_best[i]=Y[:(i+1)].max()\n return Y_best\n\ndef spawn(f):\n '''\n Function for parallel evaluation of the acquisition function\n '''\n def fun(pipe,x):\n pipe.send(f(x))\n pipe.close()\n return fun\n\n\ndef evaluate_function(f,X):\n '''\n Returns the evaluation of a function *f* and the time per evaluation\n '''\n num_data, dim_data = X.shape\n Y_eval = np.zeros((num_data, dim_data))\n Y_time = np.zeros((num_data, 1))\n for i in range(num_data):\n time_zero = time.time()\n Y_eval[i,:] = f(X[i,:])\n Y_time[i,:] = time.time() - time_zero\n return Y_eval, Y_time\n\n\ndef values_to_array(input_values):\n '''\n Transforms a values of int, float and tuples to a column vector numpy array\n '''\n if type(input_values)==tuple:\n values = np.array(input_values).reshape(-1,1)\n elif type(input_values) == np.ndarray:\n values = np.atleast_2d(input_values)\n elif type(input_values)==int or type(input_values)==float or type(np.int64):\n values = np.atleast_2d(np.array(input_values))\n else:\n print('Type to transform not recognized')\n return values\n\n\ndef merge_values(values1,values2):\n '''\n Merges two numpy arrays by calculating all possible combinations of rows\n '''\n array1 = values_to_array(values1)\n array2 = values_to_array(values2)\n\n if array1.size == 0:\n return array2\n if array2.size == 0:\n return array1\n\n merged_array = []\n for row_array1 in array1:\n for row_array2 in array2:\n merged_row = np.hstack((row_array1,row_array2))\n merged_array.append(merged_row)\n return np.atleast_2d(merged_array)\n\n\ndef normalize(Y, normalization_type='stats'):\n \"\"\"Normalize the vector Y using statistics or its range.\n\n :param Y: Row or column vector that you want to normalize.\n :param normalization_type: String specifying the kind of normalization\n to use. Options are 'stats' to use mean and standard deviation,\n or 'maxmin' to use the range of function values.\n :return Y_normalized: The normalized vector.\n \"\"\"\n Y = np.asarray(Y, dtype=float)\n\n if np.max(Y.shape) != Y.size:\n raise NotImplementedError('Only 1-dimensional arrays are supported.')\n\n # Only normalize with non null sdev (divide by zero). For only one\n # data point both std and ptp return 0.\n if normalization_type == 'stats':\n Y_norm = Y - Y.mean()\n std = Y.std()\n if std > 0:\n Y_norm /= std\n elif normalization_type == 'maxmin':\n Y_norm = Y - Y.min()\n y_range = np.ptp(Y)\n if y_range > 0:\n Y_norm /= y_range\n # A range of [-1, 1] is more natural for a zero-mean GP\n Y_norm = 2 * (Y_norm - 0.5)\n else:\n raise ValueError('Unknown normalization type: {}'.format(normalization_type))\n\n return Y_norm\n",
"step-ids": [
7,
9,
12,
15,
16
]
}
|
[
7,
9,
12,
15,
16
] |
from ad_api.base import Client, sp_endpoint, fill_query_params, ApiResponse
class CampaignNegativeKeywords(Client):
@sp_endpoint('/v2/sp/campaignNegativeKeywords/{}', method='GET')
def get_campaign_negative_keyword(self, keywordId, **kwargs) -> ApiResponse:
r"""
get_campaign_negative_keyword(self, keywordId, \*\*kwargs) -> ApiResponse
Gets a campaign negative keyword specified by identifier.
path **keywordId**:*number* | Required. The identifier of an existing keyword.
Returns:
ApiResponse
"""
return self._request(fill_query_params(kwargs.pop('path'), keywordId), params=kwargs)
@sp_endpoint('/v2/sp/campaignNegativeKeywords/{}', method='DELETE')
def delete_campaign_negative_keyword(self, keywordId, **kwargs) -> ApiResponse:
r"""
delete_campaign_negative_keyword(self, keywordId, \*\*kwargs) -> ApiResponse
Archives a campaign negative keyword.
path **keywordId**:*number* | Required. The identifier of an existing keyword.
Returns:
ApiResponse
"""
return self._request(fill_query_params(kwargs.pop('path'), keywordId), params=kwargs)
@sp_endpoint('/v2/sp/campaignNegativeKeywords/extended/{}', method='GET')
def get_campaign_negative_keyword_extended(self, keywordId, **kwargs) -> ApiResponse:
r"""
get_campaign_negative_keyword_extended(self, keywordId, \*\*kwargs) -> ApiResponse
Gets a campaign negative keyword that has extended data fields.
path **keywordId**:*number* | Required. The identifier of an existing keyword.
Returns:
ApiResponse
"""
return self._request(fill_query_params(kwargs.pop('path'), keywordId), params=kwargs)
@sp_endpoint('/v2/sp/campaignNegativeKeywords/extended', method='GET')
def list_campaign_negative_keywords_extended(self, **kwargs) -> ApiResponse:
r"""
list_campaign_negative_keywords_extended(self, \*\*kwargs) -> ApiResponse
Gets a list of campaign negative keywords that have extended data fields.
query **startIndex**:*integer* | Optional. 0-indexed record offset for the result set. Default value : 0
query **count**:*integer* | Optional. Number of records to include in the paged response. Defaults to max page size.
query **matchTypeFilter**:*string* | Optional. Restricts results to keywords with match types within the specified comma-separated list. Available values : negativePhrase, negativeExact.
query **keywordText**:*string* | Optional. Restricts results to keywords that match the specified text exactly.
query **campaignIdFilter**:*string* | Optional. A comma-delimited list of campaign identifiers.
query **keywordIdFilter**:*string* | Optional. Restricts results to keywords associated with campaigns specified by identifier in the comma-delimited list.
Returns:
ApiResponse
"""
return self._request(kwargs.pop('path'), params=kwargs)
@sp_endpoint('/v2/sp/campaignNegativeKeywords', method='GET')
def list_campaign_negative_keywords(self, **kwargs) -> ApiResponse:
r"""
list_campaign_negative_keywords(self, \*\*kwargs) -> ApiResponse
Gets a list of campaign negative keywords.
query **startIndex**:*integer* | Optional. 0-indexed record offset for the result set. Default value : 0
query **count**:*integer* | Optional. Number of records to include in the paged response. Defaults to max page size.
query **matchTypeFilter**:*string* | Optional. Restricts results to keywords with match types within the specified comma-separated list. Available values : negativePhrase, negativeExact.
query **keywordText**:*string* | Optional. Restricts results to keywords that match the specified text exactly.
query **campaignIdFilter**:*string* | Optional. A comma-delimited list of campaign identifiers.
query **keywordIdFilter**:*string* | Optional. Restricts results to keywords associated with campaigns specified by identifier in the comma-delimited list.
Returns:
ApiResponse
"""
return self._request(kwargs.pop('path'), params=kwargs)
@sp_endpoint('/v2/sp/campaignNegativeKeywords', method='POST')
def create_campaign_negative_keywords(self, **kwargs) -> ApiResponse:
r"""
create_campaign_negative_keywords(self, \*\*kwargs) -> ApiResponse:
Creates one or more campaign negative keywords.
body: | REQUIRED {'description': 'An array of keyword objects.}'
| '**campaignId**': *number*, {'description': 'The identifer of the campaign to which the keyword is associated.'}
| '**state**': *string*, {'description': 'The current resource state.' , 'Enum': '[ enabled ]'}
| '**keywordText**': *string*, {'description': 'The text of the expression to match against a search query.'}
| '**matchType**': *string*, {'description': 'The type of match.' , 'Enum': '[ negativeExact, negativePhrase ]'}
Returns:
ApiResponse
"""
return self._request(kwargs.pop('path'), data=kwargs.pop('body'), params=kwargs)
@sp_endpoint('/v2/sp/campaignNegativeKeywords', method='PUT')
def edit_campaign_negative_keywords(self, **kwargs) -> ApiResponse:
r"""
edit_campaign_negative_keywords(self, \*\*kwargs) -> ApiResponse:
Updates one or more campaign negative keywords.
body: | REQUIRED {'description': 'An array of campaign negative keywords with updated values.'}
| '**keywordId**': *number*, {'description': 'The identifer of the campaign to which the keyword is associated.'}
| '**state**': *string*, {'description': 'The current resource state.' , 'Enum': '[ deleted ]'}
Returns:
ApiResponse
"""
return self._request(kwargs.pop('path'), data=kwargs.pop('body'), params=kwargs)
|
normal
|
{
"blob_id": "f6e0215f9992ceab51887aab6a19f58a5d013eb4",
"index": 7829,
"step-1": "<mask token>\n\n\nclass CampaignNegativeKeywords(Client):\n <mask token>\n\n @sp_endpoint('/v2/sp/campaignNegativeKeywords/{}', method='DELETE')\n def delete_campaign_negative_keyword(self, keywordId, **kwargs\n ) ->ApiResponse:\n \"\"\"\n\n delete_campaign_negative_keyword(self, keywordId, \\\\*\\\\*kwargs) -> ApiResponse\n\n Archives a campaign negative keyword.\n\n path **keywordId**:*number* | Required. The identifier of an existing keyword.\n\n\n Returns:\n\n ApiResponse\n\n \"\"\"\n return self._request(fill_query_params(kwargs.pop('path'),\n keywordId), params=kwargs)\n\n @sp_endpoint('/v2/sp/campaignNegativeKeywords/extended/{}', method='GET')\n def get_campaign_negative_keyword_extended(self, keywordId, **kwargs\n ) ->ApiResponse:\n \"\"\"\n\n get_campaign_negative_keyword_extended(self, keywordId, \\\\*\\\\*kwargs) -> ApiResponse\n\n Gets a campaign negative keyword that has extended data fields.\n\n path **keywordId**:*number* | Required. The identifier of an existing keyword.\n\n Returns:\n\n ApiResponse\n\n \"\"\"\n return self._request(fill_query_params(kwargs.pop('path'),\n keywordId), params=kwargs)\n\n @sp_endpoint('/v2/sp/campaignNegativeKeywords/extended', method='GET')\n def list_campaign_negative_keywords_extended(self, **kwargs) ->ApiResponse:\n \"\"\"\n list_campaign_negative_keywords_extended(self, \\\\*\\\\*kwargs) -> ApiResponse\n\n Gets a list of campaign negative keywords that have extended data fields.\n\n query **startIndex**:*integer* | Optional. 0-indexed record offset for the result set. Default value : 0\n\n query **count**:*integer* | Optional. Number of records to include in the paged response. Defaults to max page size.\n\n query **matchTypeFilter**:*string* | Optional. Restricts results to keywords with match types within the specified comma-separated list. Available values : negativePhrase, negativeExact.\n\n query **keywordText**:*string* | Optional. Restricts results to keywords that match the specified text exactly.\n\n query **campaignIdFilter**:*string* | Optional. A comma-delimited list of campaign identifiers.\n\n query **keywordIdFilter**:*string* | Optional. Restricts results to keywords associated with campaigns specified by identifier in the comma-delimited list.\n\n Returns:\n\n ApiResponse\n\n \"\"\"\n return self._request(kwargs.pop('path'), params=kwargs)\n <mask token>\n <mask token>\n\n @sp_endpoint('/v2/sp/campaignNegativeKeywords', method='PUT')\n def edit_campaign_negative_keywords(self, **kwargs) ->ApiResponse:\n \"\"\"\n edit_campaign_negative_keywords(self, \\\\*\\\\*kwargs) -> ApiResponse:\n\n Updates one or more campaign negative keywords.\n\n body: | REQUIRED {'description': 'An array of campaign negative keywords with updated values.'}\n\n | '**keywordId**': *number*, {'description': 'The identifer of the campaign to which the keyword is associated.'}\n | '**state**': *string*, {'description': 'The current resource state.' , 'Enum': '[ deleted ]'}\n\n Returns:\n\n ApiResponse\n\n \"\"\"\n return self._request(kwargs.pop('path'), data=kwargs.pop('body'),\n params=kwargs)\n",
"step-2": "<mask token>\n\n\nclass CampaignNegativeKeywords(Client):\n\n @sp_endpoint('/v2/sp/campaignNegativeKeywords/{}', method='GET')\n def get_campaign_negative_keyword(self, keywordId, **kwargs) ->ApiResponse:\n \"\"\"\n\n get_campaign_negative_keyword(self, keywordId, \\\\*\\\\*kwargs) -> ApiResponse\n\n Gets a campaign negative keyword specified by identifier.\n\n path **keywordId**:*number* | Required. The identifier of an existing keyword.\n\n\n Returns:\n\n ApiResponse\n\n \"\"\"\n return self._request(fill_query_params(kwargs.pop('path'),\n keywordId), params=kwargs)\n\n @sp_endpoint('/v2/sp/campaignNegativeKeywords/{}', method='DELETE')\n def delete_campaign_negative_keyword(self, keywordId, **kwargs\n ) ->ApiResponse:\n \"\"\"\n\n delete_campaign_negative_keyword(self, keywordId, \\\\*\\\\*kwargs) -> ApiResponse\n\n Archives a campaign negative keyword.\n\n path **keywordId**:*number* | Required. The identifier of an existing keyword.\n\n\n Returns:\n\n ApiResponse\n\n \"\"\"\n return self._request(fill_query_params(kwargs.pop('path'),\n keywordId), params=kwargs)\n\n @sp_endpoint('/v2/sp/campaignNegativeKeywords/extended/{}', method='GET')\n def get_campaign_negative_keyword_extended(self, keywordId, **kwargs\n ) ->ApiResponse:\n \"\"\"\n\n get_campaign_negative_keyword_extended(self, keywordId, \\\\*\\\\*kwargs) -> ApiResponse\n\n Gets a campaign negative keyword that has extended data fields.\n\n path **keywordId**:*number* | Required. The identifier of an existing keyword.\n\n Returns:\n\n ApiResponse\n\n \"\"\"\n return self._request(fill_query_params(kwargs.pop('path'),\n keywordId), params=kwargs)\n\n @sp_endpoint('/v2/sp/campaignNegativeKeywords/extended', method='GET')\n def list_campaign_negative_keywords_extended(self, **kwargs) ->ApiResponse:\n \"\"\"\n list_campaign_negative_keywords_extended(self, \\\\*\\\\*kwargs) -> ApiResponse\n\n Gets a list of campaign negative keywords that have extended data fields.\n\n query **startIndex**:*integer* | Optional. 0-indexed record offset for the result set. Default value : 0\n\n query **count**:*integer* | Optional. Number of records to include in the paged response. Defaults to max page size.\n\n query **matchTypeFilter**:*string* | Optional. Restricts results to keywords with match types within the specified comma-separated list. Available values : negativePhrase, negativeExact.\n\n query **keywordText**:*string* | Optional. Restricts results to keywords that match the specified text exactly.\n\n query **campaignIdFilter**:*string* | Optional. A comma-delimited list of campaign identifiers.\n\n query **keywordIdFilter**:*string* | Optional. Restricts results to keywords associated with campaigns specified by identifier in the comma-delimited list.\n\n Returns:\n\n ApiResponse\n\n \"\"\"\n return self._request(kwargs.pop('path'), params=kwargs)\n <mask token>\n <mask token>\n\n @sp_endpoint('/v2/sp/campaignNegativeKeywords', method='PUT')\n def edit_campaign_negative_keywords(self, **kwargs) ->ApiResponse:\n \"\"\"\n edit_campaign_negative_keywords(self, \\\\*\\\\*kwargs) -> ApiResponse:\n\n Updates one or more campaign negative keywords.\n\n body: | REQUIRED {'description': 'An array of campaign negative keywords with updated values.'}\n\n | '**keywordId**': *number*, {'description': 'The identifer of the campaign to which the keyword is associated.'}\n | '**state**': *string*, {'description': 'The current resource state.' , 'Enum': '[ deleted ]'}\n\n Returns:\n\n ApiResponse\n\n \"\"\"\n return self._request(kwargs.pop('path'), data=kwargs.pop('body'),\n params=kwargs)\n",
"step-3": "<mask token>\n\n\nclass CampaignNegativeKeywords(Client):\n\n @sp_endpoint('/v2/sp/campaignNegativeKeywords/{}', method='GET')\n def get_campaign_negative_keyword(self, keywordId, **kwargs) ->ApiResponse:\n \"\"\"\n\n get_campaign_negative_keyword(self, keywordId, \\\\*\\\\*kwargs) -> ApiResponse\n\n Gets a campaign negative keyword specified by identifier.\n\n path **keywordId**:*number* | Required. The identifier of an existing keyword.\n\n\n Returns:\n\n ApiResponse\n\n \"\"\"\n return self._request(fill_query_params(kwargs.pop('path'),\n keywordId), params=kwargs)\n\n @sp_endpoint('/v2/sp/campaignNegativeKeywords/{}', method='DELETE')\n def delete_campaign_negative_keyword(self, keywordId, **kwargs\n ) ->ApiResponse:\n \"\"\"\n\n delete_campaign_negative_keyword(self, keywordId, \\\\*\\\\*kwargs) -> ApiResponse\n\n Archives a campaign negative keyword.\n\n path **keywordId**:*number* | Required. The identifier of an existing keyword.\n\n\n Returns:\n\n ApiResponse\n\n \"\"\"\n return self._request(fill_query_params(kwargs.pop('path'),\n keywordId), params=kwargs)\n\n @sp_endpoint('/v2/sp/campaignNegativeKeywords/extended/{}', method='GET')\n def get_campaign_negative_keyword_extended(self, keywordId, **kwargs\n ) ->ApiResponse:\n \"\"\"\n\n get_campaign_negative_keyword_extended(self, keywordId, \\\\*\\\\*kwargs) -> ApiResponse\n\n Gets a campaign negative keyword that has extended data fields.\n\n path **keywordId**:*number* | Required. The identifier of an existing keyword.\n\n Returns:\n\n ApiResponse\n\n \"\"\"\n return self._request(fill_query_params(kwargs.pop('path'),\n keywordId), params=kwargs)\n\n @sp_endpoint('/v2/sp/campaignNegativeKeywords/extended', method='GET')\n def list_campaign_negative_keywords_extended(self, **kwargs) ->ApiResponse:\n \"\"\"\n list_campaign_negative_keywords_extended(self, \\\\*\\\\*kwargs) -> ApiResponse\n\n Gets a list of campaign negative keywords that have extended data fields.\n\n query **startIndex**:*integer* | Optional. 0-indexed record offset for the result set. Default value : 0\n\n query **count**:*integer* | Optional. Number of records to include in the paged response. Defaults to max page size.\n\n query **matchTypeFilter**:*string* | Optional. Restricts results to keywords with match types within the specified comma-separated list. Available values : negativePhrase, negativeExact.\n\n query **keywordText**:*string* | Optional. Restricts results to keywords that match the specified text exactly.\n\n query **campaignIdFilter**:*string* | Optional. A comma-delimited list of campaign identifiers.\n\n query **keywordIdFilter**:*string* | Optional. Restricts results to keywords associated with campaigns specified by identifier in the comma-delimited list.\n\n Returns:\n\n ApiResponse\n\n \"\"\"\n return self._request(kwargs.pop('path'), params=kwargs)\n\n @sp_endpoint('/v2/sp/campaignNegativeKeywords', method='GET')\n def list_campaign_negative_keywords(self, **kwargs) ->ApiResponse:\n \"\"\"\n list_campaign_negative_keywords(self, \\\\*\\\\*kwargs) -> ApiResponse\n\n Gets a list of campaign negative keywords.\n\n query **startIndex**:*integer* | Optional. 0-indexed record offset for the result set. Default value : 0\n\n query **count**:*integer* | Optional. Number of records to include in the paged response. Defaults to max page size.\n\n query **matchTypeFilter**:*string* | Optional. Restricts results to keywords with match types within the specified comma-separated list. Available values : negativePhrase, negativeExact.\n\n query **keywordText**:*string* | Optional. Restricts results to keywords that match the specified text exactly.\n\n query **campaignIdFilter**:*string* | Optional. A comma-delimited list of campaign identifiers.\n\n query **keywordIdFilter**:*string* | Optional. Restricts results to keywords associated with campaigns specified by identifier in the comma-delimited list.\n\n Returns:\n\n ApiResponse\n\n \"\"\"\n return self._request(kwargs.pop('path'), params=kwargs)\n\n @sp_endpoint('/v2/sp/campaignNegativeKeywords', method='POST')\n def create_campaign_negative_keywords(self, **kwargs) ->ApiResponse:\n \"\"\"\n create_campaign_negative_keywords(self, \\\\*\\\\*kwargs) -> ApiResponse:\n\n Creates one or more campaign negative keywords.\n\n body: | REQUIRED {'description': 'An array of keyword objects.}'\n\n | '**campaignId**': *number*, {'description': 'The identifer of the campaign to which the keyword is associated.'}\n | '**state**': *string*, {'description': 'The current resource state.' , 'Enum': '[ enabled ]'}\n | '**keywordText**': *string*, {'description': 'The text of the expression to match against a search query.'}\n | '**matchType**': *string*, {'description': 'The type of match.' , 'Enum': '[ negativeExact, negativePhrase ]'}\n\n Returns:\n\n ApiResponse\n\n \"\"\"\n return self._request(kwargs.pop('path'), data=kwargs.pop('body'),\n params=kwargs)\n\n @sp_endpoint('/v2/sp/campaignNegativeKeywords', method='PUT')\n def edit_campaign_negative_keywords(self, **kwargs) ->ApiResponse:\n \"\"\"\n edit_campaign_negative_keywords(self, \\\\*\\\\*kwargs) -> ApiResponse:\n\n Updates one or more campaign negative keywords.\n\n body: | REQUIRED {'description': 'An array of campaign negative keywords with updated values.'}\n\n | '**keywordId**': *number*, {'description': 'The identifer of the campaign to which the keyword is associated.'}\n | '**state**': *string*, {'description': 'The current resource state.' , 'Enum': '[ deleted ]'}\n\n Returns:\n\n ApiResponse\n\n \"\"\"\n return self._request(kwargs.pop('path'), data=kwargs.pop('body'),\n params=kwargs)\n",
"step-4": "from ad_api.base import Client, sp_endpoint, fill_query_params, ApiResponse\n\n\nclass CampaignNegativeKeywords(Client):\n\n @sp_endpoint('/v2/sp/campaignNegativeKeywords/{}', method='GET')\n def get_campaign_negative_keyword(self, keywordId, **kwargs) ->ApiResponse:\n \"\"\"\n\n get_campaign_negative_keyword(self, keywordId, \\\\*\\\\*kwargs) -> ApiResponse\n\n Gets a campaign negative keyword specified by identifier.\n\n path **keywordId**:*number* | Required. The identifier of an existing keyword.\n\n\n Returns:\n\n ApiResponse\n\n \"\"\"\n return self._request(fill_query_params(kwargs.pop('path'),\n keywordId), params=kwargs)\n\n @sp_endpoint('/v2/sp/campaignNegativeKeywords/{}', method='DELETE')\n def delete_campaign_negative_keyword(self, keywordId, **kwargs\n ) ->ApiResponse:\n \"\"\"\n\n delete_campaign_negative_keyword(self, keywordId, \\\\*\\\\*kwargs) -> ApiResponse\n\n Archives a campaign negative keyword.\n\n path **keywordId**:*number* | Required. The identifier of an existing keyword.\n\n\n Returns:\n\n ApiResponse\n\n \"\"\"\n return self._request(fill_query_params(kwargs.pop('path'),\n keywordId), params=kwargs)\n\n @sp_endpoint('/v2/sp/campaignNegativeKeywords/extended/{}', method='GET')\n def get_campaign_negative_keyword_extended(self, keywordId, **kwargs\n ) ->ApiResponse:\n \"\"\"\n\n get_campaign_negative_keyword_extended(self, keywordId, \\\\*\\\\*kwargs) -> ApiResponse\n\n Gets a campaign negative keyword that has extended data fields.\n\n path **keywordId**:*number* | Required. The identifier of an existing keyword.\n\n Returns:\n\n ApiResponse\n\n \"\"\"\n return self._request(fill_query_params(kwargs.pop('path'),\n keywordId), params=kwargs)\n\n @sp_endpoint('/v2/sp/campaignNegativeKeywords/extended', method='GET')\n def list_campaign_negative_keywords_extended(self, **kwargs) ->ApiResponse:\n \"\"\"\n list_campaign_negative_keywords_extended(self, \\\\*\\\\*kwargs) -> ApiResponse\n\n Gets a list of campaign negative keywords that have extended data fields.\n\n query **startIndex**:*integer* | Optional. 0-indexed record offset for the result set. Default value : 0\n\n query **count**:*integer* | Optional. Number of records to include in the paged response. Defaults to max page size.\n\n query **matchTypeFilter**:*string* | Optional. Restricts results to keywords with match types within the specified comma-separated list. Available values : negativePhrase, negativeExact.\n\n query **keywordText**:*string* | Optional. Restricts results to keywords that match the specified text exactly.\n\n query **campaignIdFilter**:*string* | Optional. A comma-delimited list of campaign identifiers.\n\n query **keywordIdFilter**:*string* | Optional. Restricts results to keywords associated with campaigns specified by identifier in the comma-delimited list.\n\n Returns:\n\n ApiResponse\n\n \"\"\"\n return self._request(kwargs.pop('path'), params=kwargs)\n\n @sp_endpoint('/v2/sp/campaignNegativeKeywords', method='GET')\n def list_campaign_negative_keywords(self, **kwargs) ->ApiResponse:\n \"\"\"\n list_campaign_negative_keywords(self, \\\\*\\\\*kwargs) -> ApiResponse\n\n Gets a list of campaign negative keywords.\n\n query **startIndex**:*integer* | Optional. 0-indexed record offset for the result set. Default value : 0\n\n query **count**:*integer* | Optional. Number of records to include in the paged response. Defaults to max page size.\n\n query **matchTypeFilter**:*string* | Optional. Restricts results to keywords with match types within the specified comma-separated list. Available values : negativePhrase, negativeExact.\n\n query **keywordText**:*string* | Optional. Restricts results to keywords that match the specified text exactly.\n\n query **campaignIdFilter**:*string* | Optional. A comma-delimited list of campaign identifiers.\n\n query **keywordIdFilter**:*string* | Optional. Restricts results to keywords associated with campaigns specified by identifier in the comma-delimited list.\n\n Returns:\n\n ApiResponse\n\n \"\"\"\n return self._request(kwargs.pop('path'), params=kwargs)\n\n @sp_endpoint('/v2/sp/campaignNegativeKeywords', method='POST')\n def create_campaign_negative_keywords(self, **kwargs) ->ApiResponse:\n \"\"\"\n create_campaign_negative_keywords(self, \\\\*\\\\*kwargs) -> ApiResponse:\n\n Creates one or more campaign negative keywords.\n\n body: | REQUIRED {'description': 'An array of keyword objects.}'\n\n | '**campaignId**': *number*, {'description': 'The identifer of the campaign to which the keyword is associated.'}\n | '**state**': *string*, {'description': 'The current resource state.' , 'Enum': '[ enabled ]'}\n | '**keywordText**': *string*, {'description': 'The text of the expression to match against a search query.'}\n | '**matchType**': *string*, {'description': 'The type of match.' , 'Enum': '[ negativeExact, negativePhrase ]'}\n\n Returns:\n\n ApiResponse\n\n \"\"\"\n return self._request(kwargs.pop('path'), data=kwargs.pop('body'),\n params=kwargs)\n\n @sp_endpoint('/v2/sp/campaignNegativeKeywords', method='PUT')\n def edit_campaign_negative_keywords(self, **kwargs) ->ApiResponse:\n \"\"\"\n edit_campaign_negative_keywords(self, \\\\*\\\\*kwargs) -> ApiResponse:\n\n Updates one or more campaign negative keywords.\n\n body: | REQUIRED {'description': 'An array of campaign negative keywords with updated values.'}\n\n | '**keywordId**': *number*, {'description': 'The identifer of the campaign to which the keyword is associated.'}\n | '**state**': *string*, {'description': 'The current resource state.' , 'Enum': '[ deleted ]'}\n\n Returns:\n\n ApiResponse\n\n \"\"\"\n return self._request(kwargs.pop('path'), data=kwargs.pop('body'),\n params=kwargs)\n",
"step-5": "from ad_api.base import Client, sp_endpoint, fill_query_params, ApiResponse\n\nclass CampaignNegativeKeywords(Client):\n\n @sp_endpoint('/v2/sp/campaignNegativeKeywords/{}', method='GET')\n def get_campaign_negative_keyword(self, keywordId, **kwargs) -> ApiResponse:\n r\"\"\"\n\n get_campaign_negative_keyword(self, keywordId, \\*\\*kwargs) -> ApiResponse\n\n Gets a campaign negative keyword specified by identifier.\n\n path **keywordId**:*number* | Required. The identifier of an existing keyword.\n\n\n Returns:\n\n ApiResponse\n\n \"\"\"\n return self._request(fill_query_params(kwargs.pop('path'), keywordId), params=kwargs)\n\n @sp_endpoint('/v2/sp/campaignNegativeKeywords/{}', method='DELETE')\n def delete_campaign_negative_keyword(self, keywordId, **kwargs) -> ApiResponse:\n r\"\"\"\n\n delete_campaign_negative_keyword(self, keywordId, \\*\\*kwargs) -> ApiResponse\n\n Archives a campaign negative keyword.\n\n path **keywordId**:*number* | Required. The identifier of an existing keyword.\n\n\n Returns:\n\n ApiResponse\n\n \"\"\"\n return self._request(fill_query_params(kwargs.pop('path'), keywordId), params=kwargs)\n\n @sp_endpoint('/v2/sp/campaignNegativeKeywords/extended/{}', method='GET')\n def get_campaign_negative_keyword_extended(self, keywordId, **kwargs) -> ApiResponse:\n r\"\"\"\n\n get_campaign_negative_keyword_extended(self, keywordId, \\*\\*kwargs) -> ApiResponse\n\n Gets a campaign negative keyword that has extended data fields.\n\n path **keywordId**:*number* | Required. The identifier of an existing keyword.\n\n Returns:\n\n ApiResponse\n\n \"\"\"\n return self._request(fill_query_params(kwargs.pop('path'), keywordId), params=kwargs)\n\n @sp_endpoint('/v2/sp/campaignNegativeKeywords/extended', method='GET')\n def list_campaign_negative_keywords_extended(self, **kwargs) -> ApiResponse:\n r\"\"\"\n list_campaign_negative_keywords_extended(self, \\*\\*kwargs) -> ApiResponse\n\n Gets a list of campaign negative keywords that have extended data fields.\n\n query **startIndex**:*integer* | Optional. 0-indexed record offset for the result set. Default value : 0\n\n query **count**:*integer* | Optional. Number of records to include in the paged response. Defaults to max page size.\n\n query **matchTypeFilter**:*string* | Optional. Restricts results to keywords with match types within the specified comma-separated list. Available values : negativePhrase, negativeExact.\n\n query **keywordText**:*string* | Optional. Restricts results to keywords that match the specified text exactly.\n\n query **campaignIdFilter**:*string* | Optional. A comma-delimited list of campaign identifiers.\n\n query **keywordIdFilter**:*string* | Optional. Restricts results to keywords associated with campaigns specified by identifier in the comma-delimited list.\n\n Returns:\n\n ApiResponse\n\n \"\"\"\n return self._request(kwargs.pop('path'), params=kwargs)\n\n @sp_endpoint('/v2/sp/campaignNegativeKeywords', method='GET')\n def list_campaign_negative_keywords(self, **kwargs) -> ApiResponse:\n r\"\"\"\n list_campaign_negative_keywords(self, \\*\\*kwargs) -> ApiResponse\n\n Gets a list of campaign negative keywords.\n\n query **startIndex**:*integer* | Optional. 0-indexed record offset for the result set. Default value : 0\n\n query **count**:*integer* | Optional. Number of records to include in the paged response. Defaults to max page size.\n\n query **matchTypeFilter**:*string* | Optional. Restricts results to keywords with match types within the specified comma-separated list. Available values : negativePhrase, negativeExact.\n\n query **keywordText**:*string* | Optional. Restricts results to keywords that match the specified text exactly.\n\n query **campaignIdFilter**:*string* | Optional. A comma-delimited list of campaign identifiers.\n\n query **keywordIdFilter**:*string* | Optional. Restricts results to keywords associated with campaigns specified by identifier in the comma-delimited list.\n\n Returns:\n\n ApiResponse\n\n \"\"\"\n return self._request(kwargs.pop('path'), params=kwargs)\n\n @sp_endpoint('/v2/sp/campaignNegativeKeywords', method='POST')\n def create_campaign_negative_keywords(self, **kwargs) -> ApiResponse:\n r\"\"\"\n create_campaign_negative_keywords(self, \\*\\*kwargs) -> ApiResponse:\n\n Creates one or more campaign negative keywords.\n\n body: | REQUIRED {'description': 'An array of keyword objects.}'\n\n | '**campaignId**': *number*, {'description': 'The identifer of the campaign to which the keyword is associated.'}\n | '**state**': *string*, {'description': 'The current resource state.' , 'Enum': '[ enabled ]'}\n | '**keywordText**': *string*, {'description': 'The text of the expression to match against a search query.'}\n | '**matchType**': *string*, {'description': 'The type of match.' , 'Enum': '[ negativeExact, negativePhrase ]'}\n\n Returns:\n\n ApiResponse\n\n \"\"\"\n return self._request(kwargs.pop('path'), data=kwargs.pop('body'), params=kwargs)\n\n @sp_endpoint('/v2/sp/campaignNegativeKeywords', method='PUT')\n def edit_campaign_negative_keywords(self, **kwargs) -> ApiResponse:\n r\"\"\"\n edit_campaign_negative_keywords(self, \\*\\*kwargs) -> ApiResponse:\n\n Updates one or more campaign negative keywords.\n\n body: | REQUIRED {'description': 'An array of campaign negative keywords with updated values.'}\n\n | '**keywordId**': *number*, {'description': 'The identifer of the campaign to which the keyword is associated.'}\n | '**state**': *string*, {'description': 'The current resource state.' , 'Enum': '[ deleted ]'}\n\n Returns:\n\n ApiResponse\n\n \"\"\"\n return self._request(kwargs.pop('path'), data=kwargs.pop('body'), params=kwargs)\n",
"step-ids": [
5,
6,
8,
9,
10
]
}
|
[
5,
6,
8,
9,
10
] |
#!/usr/bin/env python
# https://github.com/git/git/blob/master/Documentation/githooks.txt#L181
# This hook is called by 'git push' and can be used to prevent a push from taking
# place. The hook is called with two parameters which provide the name and
# location of the destination remote, if a named remote is not being used both
# values will be the same.
# Information about what is to be pushed is provided on the hook's standard
# input with lines of the form:
# <local ref> SP <local sha1> SP <remote ref> SP <remote sha1> LF
# For instance, if the command +git push origin master:foreign+ were run the
# hook would receive a line like the following:
# refs/heads/master 67890 refs/heads/foreign 12345
# although the full, 40-character SHA-1s would be supplied. If the foreign ref
# does not yet exist the `<remote SHA-1>` will be 40 `0`. If a ref is to be
# deleted, the `<local ref>` will be supplied as `(delete)` and the `<local
# SHA-1>` will be 40 `0`. If the local commit was specified by something other
# than a name which could be expanded (such as `HEAD~`, or a SHA-1) it will be
# supplied as it was originally given.
# If this hook exits with a non-zero status, 'git push' will abort without
# pushing anything. Information about why the push is rejected may be sent
# to the user by writing to standard error.
import gitta_hook
import sys
sys.argv.extend(sys.stdin.read().split()) # add stdin arguments
remote_dest_uri = sys.argv[1] # generally, uri is the name of the remote
# but may instead be the url
remote_dest_url = sys.argv[2] # remote destination location
kwargs = {'remote_dest_uri': remote_dest_uri,
'remote_dest_url': remote_dest_url}
if len(sys.argv) > 3: # this can fail if pre-push is going to fail anyways?
local_ref = sys.argv[3] # there was no 4th argument in sys.argv. Had to
# add by reading from stdin
local_sha1 = sys.argv[4]
remote_ref = sys.argv[5]
remote_sha1 = sys.argv[6]
kwargs.update({'local_ref': local_ref, 'local_sha1': local_sha1,
'remote_ref': remote_ref, 'remote_sha1': remote_sha1})
gitta_hook.trigger(*sys.argv, **kwargs)
|
normal
|
{
"blob_id": "eabc81cacacc40d55234b60927b17069980a08f8",
"index": 7245,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nsys.argv.extend(sys.stdin.read().split())\n<mask token>\nif len(sys.argv) > 3:\n local_ref = sys.argv[3]\n local_sha1 = sys.argv[4]\n remote_ref = sys.argv[5]\n remote_sha1 = sys.argv[6]\n kwargs.update({'local_ref': local_ref, 'local_sha1': local_sha1,\n 'remote_ref': remote_ref, 'remote_sha1': remote_sha1})\ngitta_hook.trigger(*sys.argv, **kwargs)\n",
"step-3": "<mask token>\nsys.argv.extend(sys.stdin.read().split())\nremote_dest_uri = sys.argv[1]\nremote_dest_url = sys.argv[2]\nkwargs = {'remote_dest_uri': remote_dest_uri, 'remote_dest_url':\n remote_dest_url}\nif len(sys.argv) > 3:\n local_ref = sys.argv[3]\n local_sha1 = sys.argv[4]\n remote_ref = sys.argv[5]\n remote_sha1 = sys.argv[6]\n kwargs.update({'local_ref': local_ref, 'local_sha1': local_sha1,\n 'remote_ref': remote_ref, 'remote_sha1': remote_sha1})\ngitta_hook.trigger(*sys.argv, **kwargs)\n",
"step-4": "import gitta_hook\nimport sys\nsys.argv.extend(sys.stdin.read().split())\nremote_dest_uri = sys.argv[1]\nremote_dest_url = sys.argv[2]\nkwargs = {'remote_dest_uri': remote_dest_uri, 'remote_dest_url':\n remote_dest_url}\nif len(sys.argv) > 3:\n local_ref = sys.argv[3]\n local_sha1 = sys.argv[4]\n remote_ref = sys.argv[5]\n remote_sha1 = sys.argv[6]\n kwargs.update({'local_ref': local_ref, 'local_sha1': local_sha1,\n 'remote_ref': remote_ref, 'remote_sha1': remote_sha1})\ngitta_hook.trigger(*sys.argv, **kwargs)\n",
"step-5": "#!/usr/bin/env python\n\n# https://github.com/git/git/blob/master/Documentation/githooks.txt#L181\n# This hook is called by 'git push' and can be used to prevent a push from taking\n# place. The hook is called with two parameters which provide the name and\n# location of the destination remote, if a named remote is not being used both\n# values will be the same.\n \n# Information about what is to be pushed is provided on the hook's standard\n# input with lines of the form:\n \n# <local ref> SP <local sha1> SP <remote ref> SP <remote sha1> LF\n \n# For instance, if the command +git push origin master:foreign+ were run the\n# hook would receive a line like the following:\n \n# refs/heads/master 67890 refs/heads/foreign 12345\n \n# although the full, 40-character SHA-1s would be supplied. If the foreign ref\n# does not yet exist the `<remote SHA-1>` will be 40 `0`. If a ref is to be\n# deleted, the `<local ref>` will be supplied as `(delete)` and the `<local\n# SHA-1>` will be 40 `0`. If the local commit was specified by something other\n# than a name which could be expanded (such as `HEAD~`, or a SHA-1) it will be\n# supplied as it was originally given.\n \n# If this hook exits with a non-zero status, 'git push' will abort without\n# pushing anything. Information about why the push is rejected may be sent\n# to the user by writing to standard error.\nimport gitta_hook\nimport sys\nsys.argv.extend(sys.stdin.read().split()) # add stdin arguments\nremote_dest_uri = sys.argv[1] # generally, uri is the name of the remote\n # but may instead be the url\nremote_dest_url = sys.argv[2] # remote destination location\nkwargs = {'remote_dest_uri': remote_dest_uri, \n 'remote_dest_url': remote_dest_url}\nif len(sys.argv) > 3: # this can fail if pre-push is going to fail anyways?\n local_ref = sys.argv[3] # there was no 4th argument in sys.argv. Had to\n # add by reading from stdin\n local_sha1 = sys.argv[4]\n remote_ref = sys.argv[5]\n remote_sha1 = sys.argv[6]\n kwargs.update({'local_ref': local_ref, 'local_sha1': local_sha1,\n 'remote_ref': remote_ref, 'remote_sha1': remote_sha1})\ngitta_hook.trigger(*sys.argv, **kwargs)\n\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
import socket
import json
from typing import Dict
listadionica = ["GS", "MS", "WFC", "VALBZ", "BOND", "VALE", "XLF"]
class Burza:
def __init__ (self, test):
if test:
host_name = "test-exch-partitivnisumari"
port = 25000
else:
host_name = "production"
port = 25000
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((host_name, port))
self.stream = s.makefile('rw', 1)
self.zapisi(("type": "hello", "team": 'PARTITIVNISUMARI'))
assert self.citaj()['type'] == 'hello'
self.order_id = 0
def citaj(self, store_last=True):
data = self.stream.readline()
if(data == ""):
return None
else:
data = json.loads(data)
self.last_data = data
!!!
return data
def zapisi(self, data):
json.dump(data, self.stream)
self.stream.write("\n")
def kupi(self, buy_sell, symbol, price, size):
trade = {'type': 'add', 'order_id': self.order_id,
'symbol': symbol, 'dir': buy_sell, 'price': price, 'size': size}
self.order_id += 1
if buy_sell == "SELL":
self.zapisi(trade)
!!!
elif buy_sell == "BUY":
self.zapisi(trade)
!!!
def logger(dicc, ord):
if ord['type'] == 'book':
buy = ord['buy']
sell = ord['sell']
count_buy = 0
value_buy = 0
for p, n in buy:
value_buy += p * n
count_buy += n
count_sell = 0
value_sell = 0
for p, n in sell:
value_sell += p * n
count_sell += n
if count_buy != 0 and count_sell != 0:
dicc[ord['symbol']].append((value_buy//count_buy, value_sell//count_sell))
def logN(burza, n):
dicc = {}
readed_results = []
for i in range(n):
readed_results.append(burza.citaj())
for ord in readed_results:
if ord['type'] == 'book':
buy = ord['buy']
sell = ord['sell']
count_buy = 0
value_buy = 0
for p, n in buy:
value_buy +=
|
normal
|
{
"blob_id": "5a895c864c496e1073d75937909c994432a71d75",
"index": 9760,
"step-1": "import socket\nimport json\n\nfrom typing import Dict\n\nlistadionica = [\"GS\", \"MS\", \"WFC\", \"VALBZ\", \"BOND\", \"VALE\", \"XLF\"]\n\nclass Burza:\n def __init__ (self, test):\n\n if test:\n host_name = \"test-exch-partitivnisumari\"\n port = 25000\n else:\n host_name = \"production\"\n port = 25000\n\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n s.connect((host_name, port))\n self.stream = s.makefile('rw', 1)\n\n self.zapisi((\"type\": \"hello\", \"team\": 'PARTITIVNISUMARI'))\n assert self.citaj()['type'] == 'hello'\n self.order_id = 0\n\n def citaj(self, store_last=True):\n data = self.stream.readline()\n if(data == \"\"):\n return None\n else:\n data = json.loads(data)\n self.last_data = data\n !!!\n return data\n\n def zapisi(self, data):\n json.dump(data, self.stream)\n self.stream.write(\"\\n\")\n\n def kupi(self, buy_sell, symbol, price, size):\n trade = {'type': 'add', 'order_id': self.order_id,\n 'symbol': symbol, 'dir': buy_sell, 'price': price, 'size': size}\n self.order_id += 1\n\n if buy_sell == \"SELL\":\n self.zapisi(trade)\n !!!\n elif buy_sell == \"BUY\":\n self.zapisi(trade)\n !!!\n\ndef logger(dicc, ord):\n if ord['type'] == 'book':\n buy = ord['buy']\n sell = ord['sell']\n\n count_buy = 0\n value_buy = 0\n for p, n in buy:\n value_buy += p * n\n count_buy += n\n\n count_sell = 0\n value_sell = 0\n for p, n in sell:\n value_sell += p * n\n count_sell += n\n if count_buy != 0 and count_sell != 0:\n dicc[ord['symbol']].append((value_buy//count_buy, value_sell//count_sell))\n\ndef logN(burza, n):\n dicc = {}\n readed_results = []\n for i in range(n):\n readed_results.append(burza.citaj())\n for ord in readed_results:\n if ord['type'] == 'book':\n buy = ord['buy']\n sell = ord['sell']\n\n count_buy = 0\n value_buy = 0\n for p, n in buy:\n value_buy +=",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
class Solution(object):
def removeNthFromEnd(self, head, n):
dummy = ListNode(-1)
dummy.next = head
first, second = dummy, dummy
for i in range(n):
first = first.next
while first.next:
first = first.next
second = second.next
second.next = second.next.next
return dummy.next
|
normal
|
{
"blob_id": "7e71c97070285b051b23448c755e3d41b2909dda",
"index": 3884,
"step-1": "<mask token>\n",
"step-2": "class Solution(object):\n <mask token>\n",
"step-3": "class Solution(object):\n\n def removeNthFromEnd(self, head, n):\n dummy = ListNode(-1)\n dummy.next = head\n first, second = dummy, dummy\n for i in range(n):\n first = first.next\n while first.next:\n first = first.next\n second = second.next\n second.next = second.next.next\n return dummy.next\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
from flask import url_for
from bs4 import BeautifulSoup
from unittest.mock import ANY
import app
from app.notify_client.models import InvitedUser
from tests.conftest import sample_invite as create_sample_invite
from tests.conftest import mock_check_invite_token as mock_check_token_invite
def test_existing_user_accept_invite_calls_api_and_redirects_to_dashboard(
client,
service_one,
api_user_active,
sample_invite,
mock_get_service,
mock_check_invite_token,
mock_get_user_by_email,
mock_get_users_by_service,
mock_accept_invite,
mock_add_user_to_service,
):
expected_service = service_one['id']
expected_redirect_location = 'http://localhost/services/{}/dashboard'.format(expected_service)
expected_permissions = ['send_messages', 'manage_service', 'manage_api_keys']
response = client.get(url_for('main.accept_invite', token='thisisnotarealtoken'))
mock_check_invite_token.assert_called_with('thisisnotarealtoken')
mock_get_user_by_email.assert_called_with('invited_user@test.gov.uk')
assert mock_accept_invite.call_count == 1
mock_add_user_to_service.assert_called_with(expected_service, api_user_active.id, expected_permissions)
assert response.status_code == 302
assert response.location == expected_redirect_location
def test_existing_user_with_no_permissions_accept_invite(
client,
mocker,
service_one,
api_user_active,
sample_invite,
mock_check_invite_token,
mock_get_user_by_email,
mock_get_users_by_service,
mock_add_user_to_service,
mock_get_service,
):
expected_service = service_one['id']
sample_invite['permissions'] = ''
expected_permissions = []
mocker.patch('app.invite_api_client.accept_invite', return_value=sample_invite)
response = client.get(url_for('main.accept_invite', token='thisisnotarealtoken'))
mock_add_user_to_service.assert_called_with(expected_service, api_user_active.id, expected_permissions)
assert response.status_code == 302
def test_if_existing_user_accepts_twice_they_redirect_to_sign_in(
client,
mocker,
sample_invite,
mock_get_service,
):
sample_invite['status'] = 'accepted'
invite = InvitedUser(**sample_invite)
mocker.patch('app.invite_api_client.check_token', return_value=invite)
response = client.get(url_for('main.accept_invite', token='thisisnotarealtoken'), follow_redirects=True)
assert response.status_code == 200
page = BeautifulSoup(response.data.decode('utf-8'), 'html.parser')
assert (
page.h1.string,
page.select('main p')[0].text.strip(),
) == (
'You need to sign in again',
'We signed you out because you haven’t used Notify for a while.',
)
def test_existing_user_of_service_get_redirected_to_signin(
client,
mocker,
api_user_active,
sample_invite,
mock_get_service,
mock_get_user_by_email,
mock_accept_invite,
):
sample_invite['email_address'] = api_user_active.email_address
invite = InvitedUser(**sample_invite)
mocker.patch('app.invite_api_client.check_token', return_value=invite)
mocker.patch('app.user_api_client.get_users_for_service', return_value=[api_user_active])
response = client.get(url_for('main.accept_invite', token='thisisnotarealtoken'), follow_redirects=True)
assert response.status_code == 200
page = BeautifulSoup(response.data.decode('utf-8'), 'html.parser')
assert (
page.h1.string,
page.select('main p')[0].text.strip(),
) == (
'You need to sign in again',
'We signed you out because you haven’t used Notify for a while.',
)
assert mock_accept_invite.call_count == 1
def test_existing_signed_out_user_accept_invite_redirects_to_sign_in(
client,
service_one,
api_user_active,
sample_invite,
mock_check_invite_token,
mock_get_user_by_email,
mock_get_users_by_service,
mock_add_user_to_service,
mock_accept_invite,
mock_get_service,
):
expected_service = service_one['id']
expected_permissions = ['send_messages', 'manage_service', 'manage_api_keys']
response = client.get(url_for('main.accept_invite', token='thisisnotarealtoken'), follow_redirects=True)
mock_check_invite_token.assert_called_with('thisisnotarealtoken')
mock_get_user_by_email.assert_called_with('invited_user@test.gov.uk')
mock_add_user_to_service.assert_called_with(expected_service, api_user_active.id, expected_permissions)
assert mock_accept_invite.call_count == 1
assert response.status_code == 200
page = BeautifulSoup(response.data.decode('utf-8'), 'html.parser')
assert (
page.h1.string,
page.select('main p')[0].text.strip(),
) == (
'You need to sign in again',
'We signed you out because you haven’t used Notify for a while.',
)
def test_new_user_accept_invite_calls_api_and_redirects_to_registration(
client,
service_one,
mock_check_invite_token,
mock_dont_get_user_by_email,
mock_add_user_to_service,
mock_get_users_by_service,
mock_get_service,
):
expected_redirect_location = 'http://localhost/register-from-invite'
response = client.get(url_for('main.accept_invite', token='thisisnotarealtoken'))
mock_check_invite_token.assert_called_with('thisisnotarealtoken')
mock_dont_get_user_by_email.assert_called_with('invited_user@test.gov.uk')
assert response.status_code == 302
assert response.location == expected_redirect_location
def test_new_user_accept_invite_calls_api_and_views_registration_page(
client,
service_one,
mock_check_invite_token,
mock_dont_get_user_by_email,
mock_add_user_to_service,
mock_get_users_by_service,
mock_get_service,
):
response = client.get(url_for('main.accept_invite', token='thisisnotarealtoken'), follow_redirects=True)
mock_check_invite_token.assert_called_with('thisisnotarealtoken')
mock_dont_get_user_by_email.assert_called_with('invited_user@test.gov.uk')
assert response.status_code == 200
page = BeautifulSoup(response.data.decode('utf-8'), 'html.parser')
assert page.h1.string.strip() == 'Create an account'
email_in_page = page.find('main').find('p')
assert email_in_page.text.strip() == 'Your account will be created with this email: invited_user@test.gov.uk' # noqa
form = page.find('form')
name = form.find('input', id='name')
password = form.find('input', id='password')
service = form.find('input', type='hidden', id='service')
email = form.find('input', type='hidden', id='email_address')
assert email
assert email.attrs['value'] == 'invited_user@test.gov.uk'
assert name
assert password
assert service
assert service.attrs['value'] == service_one['id']
def test_cancelled_invited_user_accepts_invited_redirect_to_cancelled_invitation(
client,
service_one,
mocker,
mock_get_user,
mock_get_service,
):
cancelled_invitation = create_sample_invite(mocker, service_one, status='cancelled')
mock_check_token_invite(mocker, cancelled_invitation)
response = client.get(url_for('main.accept_invite', token='thisisnotarealtoken'))
app.invite_api_client.check_token.assert_called_with('thisisnotarealtoken')
assert response.status_code == 200
page = BeautifulSoup(response.data.decode('utf-8'), 'html.parser')
assert page.h1.string.strip() == 'The invitation you were sent has been cancelled'
def test_new_user_accept_invite_completes_new_registration_redirects_to_verify(
client,
service_one,
sample_invite,
api_user_active,
mock_check_invite_token,
mock_dont_get_user_by_email,
mock_is_email_unique,
mock_register_user,
mock_send_verify_code,
mock_accept_invite,
mock_get_users_by_service,
mock_add_user_to_service,
mock_get_service,
):
expected_service = service_one['id']
expected_email = sample_invite['email_address']
expected_from_user = service_one['users'][0]
expected_redirect_location = 'http://localhost/register-from-invite'
response = client.get(url_for('main.accept_invite', token='thisisnotarealtoken'))
with client.session_transaction() as session:
assert response.status_code == 302
assert response.location == expected_redirect_location
invited_user = session.get('invited_user')
assert invited_user
assert expected_service == invited_user['service']
assert expected_email == invited_user['email_address']
assert expected_from_user == invited_user['from_user']
data = {'service': invited_user['service'],
'email_address': invited_user['email_address'],
'from_user': invited_user['from_user'],
'password': 'longpassword',
'mobile_number': '+447890123456',
'name': 'Invited User'
}
expected_redirect_location = 'http://localhost/verify'
response = client.post(url_for('main.register_from_invite'), data=data)
assert response.status_code == 302
assert response.location == expected_redirect_location
mock_send_verify_code.assert_called_once_with(ANY, 'sms', data['mobile_number'])
mock_register_user.assert_called_with(data['name'],
data['email_address'],
data['mobile_number'],
data['password'])
assert mock_accept_invite.call_count == 1
def test_signed_in_existing_user_cannot_use_anothers_invite(
logged_in_client,
mocker,
api_user_active,
sample_invite,
mock_get_user,
mock_accept_invite,
mock_get_service,
):
invite = InvitedUser(**sample_invite)
mocker.patch('app.invite_api_client.check_token', return_value=invite)
mocker.patch('app.user_api_client.get_users_for_service', return_value=[api_user_active])
response = logged_in_client.get(url_for('main.accept_invite', token='thisisnotarealtoken'), follow_redirects=True)
assert response.status_code == 403
page = BeautifulSoup(response.data.decode('utf-8'), 'html.parser')
assert page.h1.string.strip() == '403'
flash_banners = page.find_all('div', class_='banner-dangerous')
assert len(flash_banners) == 1
banner_contents = flash_banners[0].text.strip()
assert "You’re signed in as test@user.gov.uk." in banner_contents
assert "This invite is for another email address." in banner_contents
assert "Sign out and click the link again to accept this invite." in banner_contents
assert mock_accept_invite.call_count == 0
def test_new_invited_user_verifies_and_added_to_service(
client,
service_one,
sample_invite,
api_user_active,
mock_check_invite_token,
mock_dont_get_user_by_email,
mock_is_email_unique,
mock_register_user,
mock_send_verify_code,
mock_check_verify_code,
mock_get_user,
mock_update_user,
mock_add_user_to_service,
mock_accept_invite,
mock_get_service,
mock_get_service_templates,
mock_get_template_statistics,
mock_get_jobs,
mock_has_permissions,
mock_get_users_by_service,
mock_get_detailed_service,
mock_get_usage,
):
# visit accept token page
response = client.get(url_for('main.accept_invite', token='thisisnotarealtoken'))
data = {'service': sample_invite['service'],
'email_address': sample_invite['email_address'],
'from_user': sample_invite['from_user'],
'password': 'longpassword',
'mobile_number': '+447890123456',
'name': 'Invited User'
}
# get redirected to register from invite
response = client.post(url_for('main.register_from_invite'), data=data)
# that sends user on to verify
response = client.post(url_for('main.verify'), data={'sms_code': '12345'}, follow_redirects=True)
# when they post codes back to admin user should be added to
# service and sent on to dash board
expected_permissions = ['send_messages', 'manage_service', 'manage_api_keys']
with client.session_transaction() as session:
new_user_id = session['user_id']
mock_add_user_to_service.assert_called_with(data['service'], new_user_id, expected_permissions)
mock_accept_invite.assert_called_with(data['service'], sample_invite['id'])
mock_check_verify_code.assert_called_once_with(new_user_id, '12345', 'sms')
assert service_one['id'] == session['service_id']
raw_html = response.data.decode('utf-8')
page = BeautifulSoup(raw_html, 'html.parser')
assert page.find('h1').text == 'Dashboard'
|
normal
|
{
"blob_id": "0baa133bd9eb8a162a82b23ba4d26cdd34f701c4",
"index": 1507,
"step-1": "<mask token>\n\n\ndef test_existing_user_accept_invite_calls_api_and_redirects_to_dashboard(\n client, service_one, api_user_active, sample_invite, mock_get_service,\n mock_check_invite_token, mock_get_user_by_email,\n mock_get_users_by_service, mock_accept_invite, mock_add_user_to_service):\n expected_service = service_one['id']\n expected_redirect_location = ('http://localhost/services/{}/dashboard'.\n format(expected_service))\n expected_permissions = ['send_messages', 'manage_service',\n 'manage_api_keys']\n response = client.get(url_for('main.accept_invite', token=\n 'thisisnotarealtoken'))\n mock_check_invite_token.assert_called_with('thisisnotarealtoken')\n mock_get_user_by_email.assert_called_with('invited_user@test.gov.uk')\n assert mock_accept_invite.call_count == 1\n mock_add_user_to_service.assert_called_with(expected_service,\n api_user_active.id, expected_permissions)\n assert response.status_code == 302\n assert response.location == expected_redirect_location\n\n\ndef test_existing_user_with_no_permissions_accept_invite(client, mocker,\n service_one, api_user_active, sample_invite, mock_check_invite_token,\n mock_get_user_by_email, mock_get_users_by_service,\n mock_add_user_to_service, mock_get_service):\n expected_service = service_one['id']\n sample_invite['permissions'] = ''\n expected_permissions = []\n mocker.patch('app.invite_api_client.accept_invite', return_value=\n sample_invite)\n response = client.get(url_for('main.accept_invite', token=\n 'thisisnotarealtoken'))\n mock_add_user_to_service.assert_called_with(expected_service,\n api_user_active.id, expected_permissions)\n assert response.status_code == 302\n\n\ndef test_if_existing_user_accepts_twice_they_redirect_to_sign_in(client,\n mocker, sample_invite, mock_get_service):\n sample_invite['status'] = 'accepted'\n invite = InvitedUser(**sample_invite)\n mocker.patch('app.invite_api_client.check_token', return_value=invite)\n response = client.get(url_for('main.accept_invite', token=\n 'thisisnotarealtoken'), follow_redirects=True)\n assert response.status_code == 200\n page = BeautifulSoup(response.data.decode('utf-8'), 'html.parser')\n assert (page.h1.string, page.select('main p')[0].text.strip()) == (\n 'You need to sign in again',\n 'We signed you out because you haven’t used Notify for a while.')\n\n\ndef test_existing_user_of_service_get_redirected_to_signin(client, mocker,\n api_user_active, sample_invite, mock_get_service,\n mock_get_user_by_email, mock_accept_invite):\n sample_invite['email_address'] = api_user_active.email_address\n invite = InvitedUser(**sample_invite)\n mocker.patch('app.invite_api_client.check_token', return_value=invite)\n mocker.patch('app.user_api_client.get_users_for_service', return_value=\n [api_user_active])\n response = client.get(url_for('main.accept_invite', token=\n 'thisisnotarealtoken'), follow_redirects=True)\n assert response.status_code == 200\n page = BeautifulSoup(response.data.decode('utf-8'), 'html.parser')\n assert (page.h1.string, page.select('main p')[0].text.strip()) == (\n 'You need to sign in again',\n 'We signed you out because you haven’t used Notify for a while.')\n assert mock_accept_invite.call_count == 1\n\n\ndef test_existing_signed_out_user_accept_invite_redirects_to_sign_in(client,\n service_one, api_user_active, sample_invite, mock_check_invite_token,\n mock_get_user_by_email, mock_get_users_by_service,\n mock_add_user_to_service, mock_accept_invite, mock_get_service):\n expected_service = service_one['id']\n expected_permissions = ['send_messages', 'manage_service',\n 'manage_api_keys']\n response = client.get(url_for('main.accept_invite', token=\n 'thisisnotarealtoken'), follow_redirects=True)\n mock_check_invite_token.assert_called_with('thisisnotarealtoken')\n mock_get_user_by_email.assert_called_with('invited_user@test.gov.uk')\n mock_add_user_to_service.assert_called_with(expected_service,\n api_user_active.id, expected_permissions)\n assert mock_accept_invite.call_count == 1\n assert response.status_code == 200\n page = BeautifulSoup(response.data.decode('utf-8'), 'html.parser')\n assert (page.h1.string, page.select('main p')[0].text.strip()) == (\n 'You need to sign in again',\n 'We signed you out because you haven’t used Notify for a while.')\n\n\ndef test_new_user_accept_invite_calls_api_and_redirects_to_registration(client,\n service_one, mock_check_invite_token, mock_dont_get_user_by_email,\n mock_add_user_to_service, mock_get_users_by_service, mock_get_service):\n expected_redirect_location = 'http://localhost/register-from-invite'\n response = client.get(url_for('main.accept_invite', token=\n 'thisisnotarealtoken'))\n mock_check_invite_token.assert_called_with('thisisnotarealtoken')\n mock_dont_get_user_by_email.assert_called_with('invited_user@test.gov.uk')\n assert response.status_code == 302\n assert response.location == expected_redirect_location\n\n\ndef test_new_user_accept_invite_calls_api_and_views_registration_page(client,\n service_one, mock_check_invite_token, mock_dont_get_user_by_email,\n mock_add_user_to_service, mock_get_users_by_service, mock_get_service):\n response = client.get(url_for('main.accept_invite', token=\n 'thisisnotarealtoken'), follow_redirects=True)\n mock_check_invite_token.assert_called_with('thisisnotarealtoken')\n mock_dont_get_user_by_email.assert_called_with('invited_user@test.gov.uk')\n assert response.status_code == 200\n page = BeautifulSoup(response.data.decode('utf-8'), 'html.parser')\n assert page.h1.string.strip() == 'Create an account'\n email_in_page = page.find('main').find('p')\n assert email_in_page.text.strip(\n ) == 'Your account will be created with this email: invited_user@test.gov.uk'\n form = page.find('form')\n name = form.find('input', id='name')\n password = form.find('input', id='password')\n service = form.find('input', type='hidden', id='service')\n email = form.find('input', type='hidden', id='email_address')\n assert email\n assert email.attrs['value'] == 'invited_user@test.gov.uk'\n assert name\n assert password\n assert service\n assert service.attrs['value'] == service_one['id']\n\n\n<mask token>\n\n\ndef test_signed_in_existing_user_cannot_use_anothers_invite(logged_in_client,\n mocker, api_user_active, sample_invite, mock_get_user,\n mock_accept_invite, mock_get_service):\n invite = InvitedUser(**sample_invite)\n mocker.patch('app.invite_api_client.check_token', return_value=invite)\n mocker.patch('app.user_api_client.get_users_for_service', return_value=\n [api_user_active])\n response = logged_in_client.get(url_for('main.accept_invite', token=\n 'thisisnotarealtoken'), follow_redirects=True)\n assert response.status_code == 403\n page = BeautifulSoup(response.data.decode('utf-8'), 'html.parser')\n assert page.h1.string.strip() == '403'\n flash_banners = page.find_all('div', class_='banner-dangerous')\n assert len(flash_banners) == 1\n banner_contents = flash_banners[0].text.strip()\n assert 'You’re signed in as test@user.gov.uk.' in banner_contents\n assert 'This invite is for another email address.' in banner_contents\n assert 'Sign out and click the link again to accept this invite.' in banner_contents\n assert mock_accept_invite.call_count == 0\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef test_existing_user_accept_invite_calls_api_and_redirects_to_dashboard(\n client, service_one, api_user_active, sample_invite, mock_get_service,\n mock_check_invite_token, mock_get_user_by_email,\n mock_get_users_by_service, mock_accept_invite, mock_add_user_to_service):\n expected_service = service_one['id']\n expected_redirect_location = ('http://localhost/services/{}/dashboard'.\n format(expected_service))\n expected_permissions = ['send_messages', 'manage_service',\n 'manage_api_keys']\n response = client.get(url_for('main.accept_invite', token=\n 'thisisnotarealtoken'))\n mock_check_invite_token.assert_called_with('thisisnotarealtoken')\n mock_get_user_by_email.assert_called_with('invited_user@test.gov.uk')\n assert mock_accept_invite.call_count == 1\n mock_add_user_to_service.assert_called_with(expected_service,\n api_user_active.id, expected_permissions)\n assert response.status_code == 302\n assert response.location == expected_redirect_location\n\n\ndef test_existing_user_with_no_permissions_accept_invite(client, mocker,\n service_one, api_user_active, sample_invite, mock_check_invite_token,\n mock_get_user_by_email, mock_get_users_by_service,\n mock_add_user_to_service, mock_get_service):\n expected_service = service_one['id']\n sample_invite['permissions'] = ''\n expected_permissions = []\n mocker.patch('app.invite_api_client.accept_invite', return_value=\n sample_invite)\n response = client.get(url_for('main.accept_invite', token=\n 'thisisnotarealtoken'))\n mock_add_user_to_service.assert_called_with(expected_service,\n api_user_active.id, expected_permissions)\n assert response.status_code == 302\n\n\ndef test_if_existing_user_accepts_twice_they_redirect_to_sign_in(client,\n mocker, sample_invite, mock_get_service):\n sample_invite['status'] = 'accepted'\n invite = InvitedUser(**sample_invite)\n mocker.patch('app.invite_api_client.check_token', return_value=invite)\n response = client.get(url_for('main.accept_invite', token=\n 'thisisnotarealtoken'), follow_redirects=True)\n assert response.status_code == 200\n page = BeautifulSoup(response.data.decode('utf-8'), 'html.parser')\n assert (page.h1.string, page.select('main p')[0].text.strip()) == (\n 'You need to sign in again',\n 'We signed you out because you haven’t used Notify for a while.')\n\n\ndef test_existing_user_of_service_get_redirected_to_signin(client, mocker,\n api_user_active, sample_invite, mock_get_service,\n mock_get_user_by_email, mock_accept_invite):\n sample_invite['email_address'] = api_user_active.email_address\n invite = InvitedUser(**sample_invite)\n mocker.patch('app.invite_api_client.check_token', return_value=invite)\n mocker.patch('app.user_api_client.get_users_for_service', return_value=\n [api_user_active])\n response = client.get(url_for('main.accept_invite', token=\n 'thisisnotarealtoken'), follow_redirects=True)\n assert response.status_code == 200\n page = BeautifulSoup(response.data.decode('utf-8'), 'html.parser')\n assert (page.h1.string, page.select('main p')[0].text.strip()) == (\n 'You need to sign in again',\n 'We signed you out because you haven’t used Notify for a while.')\n assert mock_accept_invite.call_count == 1\n\n\ndef test_existing_signed_out_user_accept_invite_redirects_to_sign_in(client,\n service_one, api_user_active, sample_invite, mock_check_invite_token,\n mock_get_user_by_email, mock_get_users_by_service,\n mock_add_user_to_service, mock_accept_invite, mock_get_service):\n expected_service = service_one['id']\n expected_permissions = ['send_messages', 'manage_service',\n 'manage_api_keys']\n response = client.get(url_for('main.accept_invite', token=\n 'thisisnotarealtoken'), follow_redirects=True)\n mock_check_invite_token.assert_called_with('thisisnotarealtoken')\n mock_get_user_by_email.assert_called_with('invited_user@test.gov.uk')\n mock_add_user_to_service.assert_called_with(expected_service,\n api_user_active.id, expected_permissions)\n assert mock_accept_invite.call_count == 1\n assert response.status_code == 200\n page = BeautifulSoup(response.data.decode('utf-8'), 'html.parser')\n assert (page.h1.string, page.select('main p')[0].text.strip()) == (\n 'You need to sign in again',\n 'We signed you out because you haven’t used Notify for a while.')\n\n\ndef test_new_user_accept_invite_calls_api_and_redirects_to_registration(client,\n service_one, mock_check_invite_token, mock_dont_get_user_by_email,\n mock_add_user_to_service, mock_get_users_by_service, mock_get_service):\n expected_redirect_location = 'http://localhost/register-from-invite'\n response = client.get(url_for('main.accept_invite', token=\n 'thisisnotarealtoken'))\n mock_check_invite_token.assert_called_with('thisisnotarealtoken')\n mock_dont_get_user_by_email.assert_called_with('invited_user@test.gov.uk')\n assert response.status_code == 302\n assert response.location == expected_redirect_location\n\n\ndef test_new_user_accept_invite_calls_api_and_views_registration_page(client,\n service_one, mock_check_invite_token, mock_dont_get_user_by_email,\n mock_add_user_to_service, mock_get_users_by_service, mock_get_service):\n response = client.get(url_for('main.accept_invite', token=\n 'thisisnotarealtoken'), follow_redirects=True)\n mock_check_invite_token.assert_called_with('thisisnotarealtoken')\n mock_dont_get_user_by_email.assert_called_with('invited_user@test.gov.uk')\n assert response.status_code == 200\n page = BeautifulSoup(response.data.decode('utf-8'), 'html.parser')\n assert page.h1.string.strip() == 'Create an account'\n email_in_page = page.find('main').find('p')\n assert email_in_page.text.strip(\n ) == 'Your account will be created with this email: invited_user@test.gov.uk'\n form = page.find('form')\n name = form.find('input', id='name')\n password = form.find('input', id='password')\n service = form.find('input', type='hidden', id='service')\n email = form.find('input', type='hidden', id='email_address')\n assert email\n assert email.attrs['value'] == 'invited_user@test.gov.uk'\n assert name\n assert password\n assert service\n assert service.attrs['value'] == service_one['id']\n\n\ndef test_cancelled_invited_user_accepts_invited_redirect_to_cancelled_invitation(\n client, service_one, mocker, mock_get_user, mock_get_service):\n cancelled_invitation = create_sample_invite(mocker, service_one, status\n ='cancelled')\n mock_check_token_invite(mocker, cancelled_invitation)\n response = client.get(url_for('main.accept_invite', token=\n 'thisisnotarealtoken'))\n app.invite_api_client.check_token.assert_called_with('thisisnotarealtoken')\n assert response.status_code == 200\n page = BeautifulSoup(response.data.decode('utf-8'), 'html.parser')\n assert page.h1.string.strip(\n ) == 'The invitation you were sent has been cancelled'\n\n\n<mask token>\n\n\ndef test_signed_in_existing_user_cannot_use_anothers_invite(logged_in_client,\n mocker, api_user_active, sample_invite, mock_get_user,\n mock_accept_invite, mock_get_service):\n invite = InvitedUser(**sample_invite)\n mocker.patch('app.invite_api_client.check_token', return_value=invite)\n mocker.patch('app.user_api_client.get_users_for_service', return_value=\n [api_user_active])\n response = logged_in_client.get(url_for('main.accept_invite', token=\n 'thisisnotarealtoken'), follow_redirects=True)\n assert response.status_code == 403\n page = BeautifulSoup(response.data.decode('utf-8'), 'html.parser')\n assert page.h1.string.strip() == '403'\n flash_banners = page.find_all('div', class_='banner-dangerous')\n assert len(flash_banners) == 1\n banner_contents = flash_banners[0].text.strip()\n assert 'You’re signed in as test@user.gov.uk.' in banner_contents\n assert 'This invite is for another email address.' in banner_contents\n assert 'Sign out and click the link again to accept this invite.' in banner_contents\n assert mock_accept_invite.call_count == 0\n\n\ndef test_new_invited_user_verifies_and_added_to_service(client, service_one,\n sample_invite, api_user_active, mock_check_invite_token,\n mock_dont_get_user_by_email, mock_is_email_unique, mock_register_user,\n mock_send_verify_code, mock_check_verify_code, mock_get_user,\n mock_update_user, mock_add_user_to_service, mock_accept_invite,\n mock_get_service, mock_get_service_templates,\n mock_get_template_statistics, mock_get_jobs, mock_has_permissions,\n mock_get_users_by_service, mock_get_detailed_service, mock_get_usage):\n response = client.get(url_for('main.accept_invite', token=\n 'thisisnotarealtoken'))\n data = {'service': sample_invite['service'], 'email_address':\n sample_invite['email_address'], 'from_user': sample_invite[\n 'from_user'], 'password': 'longpassword', 'mobile_number':\n '+447890123456', 'name': 'Invited User'}\n response = client.post(url_for('main.register_from_invite'), data=data)\n response = client.post(url_for('main.verify'), data={'sms_code':\n '12345'}, follow_redirects=True)\n expected_permissions = ['send_messages', 'manage_service',\n 'manage_api_keys']\n with client.session_transaction() as session:\n new_user_id = session['user_id']\n mock_add_user_to_service.assert_called_with(data['service'],\n new_user_id, expected_permissions)\n mock_accept_invite.assert_called_with(data['service'],\n sample_invite['id'])\n mock_check_verify_code.assert_called_once_with(new_user_id, '12345',\n 'sms')\n assert service_one['id'] == session['service_id']\n raw_html = response.data.decode('utf-8')\n page = BeautifulSoup(raw_html, 'html.parser')\n assert page.find('h1').text == 'Dashboard'\n",
"step-3": "<mask token>\n\n\ndef test_existing_user_accept_invite_calls_api_and_redirects_to_dashboard(\n client, service_one, api_user_active, sample_invite, mock_get_service,\n mock_check_invite_token, mock_get_user_by_email,\n mock_get_users_by_service, mock_accept_invite, mock_add_user_to_service):\n expected_service = service_one['id']\n expected_redirect_location = ('http://localhost/services/{}/dashboard'.\n format(expected_service))\n expected_permissions = ['send_messages', 'manage_service',\n 'manage_api_keys']\n response = client.get(url_for('main.accept_invite', token=\n 'thisisnotarealtoken'))\n mock_check_invite_token.assert_called_with('thisisnotarealtoken')\n mock_get_user_by_email.assert_called_with('invited_user@test.gov.uk')\n assert mock_accept_invite.call_count == 1\n mock_add_user_to_service.assert_called_with(expected_service,\n api_user_active.id, expected_permissions)\n assert response.status_code == 302\n assert response.location == expected_redirect_location\n\n\ndef test_existing_user_with_no_permissions_accept_invite(client, mocker,\n service_one, api_user_active, sample_invite, mock_check_invite_token,\n mock_get_user_by_email, mock_get_users_by_service,\n mock_add_user_to_service, mock_get_service):\n expected_service = service_one['id']\n sample_invite['permissions'] = ''\n expected_permissions = []\n mocker.patch('app.invite_api_client.accept_invite', return_value=\n sample_invite)\n response = client.get(url_for('main.accept_invite', token=\n 'thisisnotarealtoken'))\n mock_add_user_to_service.assert_called_with(expected_service,\n api_user_active.id, expected_permissions)\n assert response.status_code == 302\n\n\ndef test_if_existing_user_accepts_twice_they_redirect_to_sign_in(client,\n mocker, sample_invite, mock_get_service):\n sample_invite['status'] = 'accepted'\n invite = InvitedUser(**sample_invite)\n mocker.patch('app.invite_api_client.check_token', return_value=invite)\n response = client.get(url_for('main.accept_invite', token=\n 'thisisnotarealtoken'), follow_redirects=True)\n assert response.status_code == 200\n page = BeautifulSoup(response.data.decode('utf-8'), 'html.parser')\n assert (page.h1.string, page.select('main p')[0].text.strip()) == (\n 'You need to sign in again',\n 'We signed you out because you haven’t used Notify for a while.')\n\n\ndef test_existing_user_of_service_get_redirected_to_signin(client, mocker,\n api_user_active, sample_invite, mock_get_service,\n mock_get_user_by_email, mock_accept_invite):\n sample_invite['email_address'] = api_user_active.email_address\n invite = InvitedUser(**sample_invite)\n mocker.patch('app.invite_api_client.check_token', return_value=invite)\n mocker.patch('app.user_api_client.get_users_for_service', return_value=\n [api_user_active])\n response = client.get(url_for('main.accept_invite', token=\n 'thisisnotarealtoken'), follow_redirects=True)\n assert response.status_code == 200\n page = BeautifulSoup(response.data.decode('utf-8'), 'html.parser')\n assert (page.h1.string, page.select('main p')[0].text.strip()) == (\n 'You need to sign in again',\n 'We signed you out because you haven’t used Notify for a while.')\n assert mock_accept_invite.call_count == 1\n\n\ndef test_existing_signed_out_user_accept_invite_redirects_to_sign_in(client,\n service_one, api_user_active, sample_invite, mock_check_invite_token,\n mock_get_user_by_email, mock_get_users_by_service,\n mock_add_user_to_service, mock_accept_invite, mock_get_service):\n expected_service = service_one['id']\n expected_permissions = ['send_messages', 'manage_service',\n 'manage_api_keys']\n response = client.get(url_for('main.accept_invite', token=\n 'thisisnotarealtoken'), follow_redirects=True)\n mock_check_invite_token.assert_called_with('thisisnotarealtoken')\n mock_get_user_by_email.assert_called_with('invited_user@test.gov.uk')\n mock_add_user_to_service.assert_called_with(expected_service,\n api_user_active.id, expected_permissions)\n assert mock_accept_invite.call_count == 1\n assert response.status_code == 200\n page = BeautifulSoup(response.data.decode('utf-8'), 'html.parser')\n assert (page.h1.string, page.select('main p')[0].text.strip()) == (\n 'You need to sign in again',\n 'We signed you out because you haven’t used Notify for a while.')\n\n\ndef test_new_user_accept_invite_calls_api_and_redirects_to_registration(client,\n service_one, mock_check_invite_token, mock_dont_get_user_by_email,\n mock_add_user_to_service, mock_get_users_by_service, mock_get_service):\n expected_redirect_location = 'http://localhost/register-from-invite'\n response = client.get(url_for('main.accept_invite', token=\n 'thisisnotarealtoken'))\n mock_check_invite_token.assert_called_with('thisisnotarealtoken')\n mock_dont_get_user_by_email.assert_called_with('invited_user@test.gov.uk')\n assert response.status_code == 302\n assert response.location == expected_redirect_location\n\n\ndef test_new_user_accept_invite_calls_api_and_views_registration_page(client,\n service_one, mock_check_invite_token, mock_dont_get_user_by_email,\n mock_add_user_to_service, mock_get_users_by_service, mock_get_service):\n response = client.get(url_for('main.accept_invite', token=\n 'thisisnotarealtoken'), follow_redirects=True)\n mock_check_invite_token.assert_called_with('thisisnotarealtoken')\n mock_dont_get_user_by_email.assert_called_with('invited_user@test.gov.uk')\n assert response.status_code == 200\n page = BeautifulSoup(response.data.decode('utf-8'), 'html.parser')\n assert page.h1.string.strip() == 'Create an account'\n email_in_page = page.find('main').find('p')\n assert email_in_page.text.strip(\n ) == 'Your account will be created with this email: invited_user@test.gov.uk'\n form = page.find('form')\n name = form.find('input', id='name')\n password = form.find('input', id='password')\n service = form.find('input', type='hidden', id='service')\n email = form.find('input', type='hidden', id='email_address')\n assert email\n assert email.attrs['value'] == 'invited_user@test.gov.uk'\n assert name\n assert password\n assert service\n assert service.attrs['value'] == service_one['id']\n\n\ndef test_cancelled_invited_user_accepts_invited_redirect_to_cancelled_invitation(\n client, service_one, mocker, mock_get_user, mock_get_service):\n cancelled_invitation = create_sample_invite(mocker, service_one, status\n ='cancelled')\n mock_check_token_invite(mocker, cancelled_invitation)\n response = client.get(url_for('main.accept_invite', token=\n 'thisisnotarealtoken'))\n app.invite_api_client.check_token.assert_called_with('thisisnotarealtoken')\n assert response.status_code == 200\n page = BeautifulSoup(response.data.decode('utf-8'), 'html.parser')\n assert page.h1.string.strip(\n ) == 'The invitation you were sent has been cancelled'\n\n\ndef test_new_user_accept_invite_completes_new_registration_redirects_to_verify(\n client, service_one, sample_invite, api_user_active,\n mock_check_invite_token, mock_dont_get_user_by_email,\n mock_is_email_unique, mock_register_user, mock_send_verify_code,\n mock_accept_invite, mock_get_users_by_service, mock_add_user_to_service,\n mock_get_service):\n expected_service = service_one['id']\n expected_email = sample_invite['email_address']\n expected_from_user = service_one['users'][0]\n expected_redirect_location = 'http://localhost/register-from-invite'\n response = client.get(url_for('main.accept_invite', token=\n 'thisisnotarealtoken'))\n with client.session_transaction() as session:\n assert response.status_code == 302\n assert response.location == expected_redirect_location\n invited_user = session.get('invited_user')\n assert invited_user\n assert expected_service == invited_user['service']\n assert expected_email == invited_user['email_address']\n assert expected_from_user == invited_user['from_user']\n data = {'service': invited_user['service'], 'email_address':\n invited_user['email_address'], 'from_user': invited_user[\n 'from_user'], 'password': 'longpassword', 'mobile_number':\n '+447890123456', 'name': 'Invited User'}\n expected_redirect_location = 'http://localhost/verify'\n response = client.post(url_for('main.register_from_invite'), data=data)\n assert response.status_code == 302\n assert response.location == expected_redirect_location\n mock_send_verify_code.assert_called_once_with(ANY, 'sms', data[\n 'mobile_number'])\n mock_register_user.assert_called_with(data['name'], data[\n 'email_address'], data['mobile_number'], data['password'])\n assert mock_accept_invite.call_count == 1\n\n\ndef test_signed_in_existing_user_cannot_use_anothers_invite(logged_in_client,\n mocker, api_user_active, sample_invite, mock_get_user,\n mock_accept_invite, mock_get_service):\n invite = InvitedUser(**sample_invite)\n mocker.patch('app.invite_api_client.check_token', return_value=invite)\n mocker.patch('app.user_api_client.get_users_for_service', return_value=\n [api_user_active])\n response = logged_in_client.get(url_for('main.accept_invite', token=\n 'thisisnotarealtoken'), follow_redirects=True)\n assert response.status_code == 403\n page = BeautifulSoup(response.data.decode('utf-8'), 'html.parser')\n assert page.h1.string.strip() == '403'\n flash_banners = page.find_all('div', class_='banner-dangerous')\n assert len(flash_banners) == 1\n banner_contents = flash_banners[0].text.strip()\n assert 'You’re signed in as test@user.gov.uk.' in banner_contents\n assert 'This invite is for another email address.' in banner_contents\n assert 'Sign out and click the link again to accept this invite.' in banner_contents\n assert mock_accept_invite.call_count == 0\n\n\ndef test_new_invited_user_verifies_and_added_to_service(client, service_one,\n sample_invite, api_user_active, mock_check_invite_token,\n mock_dont_get_user_by_email, mock_is_email_unique, mock_register_user,\n mock_send_verify_code, mock_check_verify_code, mock_get_user,\n mock_update_user, mock_add_user_to_service, mock_accept_invite,\n mock_get_service, mock_get_service_templates,\n mock_get_template_statistics, mock_get_jobs, mock_has_permissions,\n mock_get_users_by_service, mock_get_detailed_service, mock_get_usage):\n response = client.get(url_for('main.accept_invite', token=\n 'thisisnotarealtoken'))\n data = {'service': sample_invite['service'], 'email_address':\n sample_invite['email_address'], 'from_user': sample_invite[\n 'from_user'], 'password': 'longpassword', 'mobile_number':\n '+447890123456', 'name': 'Invited User'}\n response = client.post(url_for('main.register_from_invite'), data=data)\n response = client.post(url_for('main.verify'), data={'sms_code':\n '12345'}, follow_redirects=True)\n expected_permissions = ['send_messages', 'manage_service',\n 'manage_api_keys']\n with client.session_transaction() as session:\n new_user_id = session['user_id']\n mock_add_user_to_service.assert_called_with(data['service'],\n new_user_id, expected_permissions)\n mock_accept_invite.assert_called_with(data['service'],\n sample_invite['id'])\n mock_check_verify_code.assert_called_once_with(new_user_id, '12345',\n 'sms')\n assert service_one['id'] == session['service_id']\n raw_html = response.data.decode('utf-8')\n page = BeautifulSoup(raw_html, 'html.parser')\n assert page.find('h1').text == 'Dashboard'\n",
"step-4": "from flask import url_for\nfrom bs4 import BeautifulSoup\nfrom unittest.mock import ANY\nimport app\nfrom app.notify_client.models import InvitedUser\nfrom tests.conftest import sample_invite as create_sample_invite\nfrom tests.conftest import mock_check_invite_token as mock_check_token_invite\n\n\ndef test_existing_user_accept_invite_calls_api_and_redirects_to_dashboard(\n client, service_one, api_user_active, sample_invite, mock_get_service,\n mock_check_invite_token, mock_get_user_by_email,\n mock_get_users_by_service, mock_accept_invite, mock_add_user_to_service):\n expected_service = service_one['id']\n expected_redirect_location = ('http://localhost/services/{}/dashboard'.\n format(expected_service))\n expected_permissions = ['send_messages', 'manage_service',\n 'manage_api_keys']\n response = client.get(url_for('main.accept_invite', token=\n 'thisisnotarealtoken'))\n mock_check_invite_token.assert_called_with('thisisnotarealtoken')\n mock_get_user_by_email.assert_called_with('invited_user@test.gov.uk')\n assert mock_accept_invite.call_count == 1\n mock_add_user_to_service.assert_called_with(expected_service,\n api_user_active.id, expected_permissions)\n assert response.status_code == 302\n assert response.location == expected_redirect_location\n\n\ndef test_existing_user_with_no_permissions_accept_invite(client, mocker,\n service_one, api_user_active, sample_invite, mock_check_invite_token,\n mock_get_user_by_email, mock_get_users_by_service,\n mock_add_user_to_service, mock_get_service):\n expected_service = service_one['id']\n sample_invite['permissions'] = ''\n expected_permissions = []\n mocker.patch('app.invite_api_client.accept_invite', return_value=\n sample_invite)\n response = client.get(url_for('main.accept_invite', token=\n 'thisisnotarealtoken'))\n mock_add_user_to_service.assert_called_with(expected_service,\n api_user_active.id, expected_permissions)\n assert response.status_code == 302\n\n\ndef test_if_existing_user_accepts_twice_they_redirect_to_sign_in(client,\n mocker, sample_invite, mock_get_service):\n sample_invite['status'] = 'accepted'\n invite = InvitedUser(**sample_invite)\n mocker.patch('app.invite_api_client.check_token', return_value=invite)\n response = client.get(url_for('main.accept_invite', token=\n 'thisisnotarealtoken'), follow_redirects=True)\n assert response.status_code == 200\n page = BeautifulSoup(response.data.decode('utf-8'), 'html.parser')\n assert (page.h1.string, page.select('main p')[0].text.strip()) == (\n 'You need to sign in again',\n 'We signed you out because you haven’t used Notify for a while.')\n\n\ndef test_existing_user_of_service_get_redirected_to_signin(client, mocker,\n api_user_active, sample_invite, mock_get_service,\n mock_get_user_by_email, mock_accept_invite):\n sample_invite['email_address'] = api_user_active.email_address\n invite = InvitedUser(**sample_invite)\n mocker.patch('app.invite_api_client.check_token', return_value=invite)\n mocker.patch('app.user_api_client.get_users_for_service', return_value=\n [api_user_active])\n response = client.get(url_for('main.accept_invite', token=\n 'thisisnotarealtoken'), follow_redirects=True)\n assert response.status_code == 200\n page = BeautifulSoup(response.data.decode('utf-8'), 'html.parser')\n assert (page.h1.string, page.select('main p')[0].text.strip()) == (\n 'You need to sign in again',\n 'We signed you out because you haven’t used Notify for a while.')\n assert mock_accept_invite.call_count == 1\n\n\ndef test_existing_signed_out_user_accept_invite_redirects_to_sign_in(client,\n service_one, api_user_active, sample_invite, mock_check_invite_token,\n mock_get_user_by_email, mock_get_users_by_service,\n mock_add_user_to_service, mock_accept_invite, mock_get_service):\n expected_service = service_one['id']\n expected_permissions = ['send_messages', 'manage_service',\n 'manage_api_keys']\n response = client.get(url_for('main.accept_invite', token=\n 'thisisnotarealtoken'), follow_redirects=True)\n mock_check_invite_token.assert_called_with('thisisnotarealtoken')\n mock_get_user_by_email.assert_called_with('invited_user@test.gov.uk')\n mock_add_user_to_service.assert_called_with(expected_service,\n api_user_active.id, expected_permissions)\n assert mock_accept_invite.call_count == 1\n assert response.status_code == 200\n page = BeautifulSoup(response.data.decode('utf-8'), 'html.parser')\n assert (page.h1.string, page.select('main p')[0].text.strip()) == (\n 'You need to sign in again',\n 'We signed you out because you haven’t used Notify for a while.')\n\n\ndef test_new_user_accept_invite_calls_api_and_redirects_to_registration(client,\n service_one, mock_check_invite_token, mock_dont_get_user_by_email,\n mock_add_user_to_service, mock_get_users_by_service, mock_get_service):\n expected_redirect_location = 'http://localhost/register-from-invite'\n response = client.get(url_for('main.accept_invite', token=\n 'thisisnotarealtoken'))\n mock_check_invite_token.assert_called_with('thisisnotarealtoken')\n mock_dont_get_user_by_email.assert_called_with('invited_user@test.gov.uk')\n assert response.status_code == 302\n assert response.location == expected_redirect_location\n\n\ndef test_new_user_accept_invite_calls_api_and_views_registration_page(client,\n service_one, mock_check_invite_token, mock_dont_get_user_by_email,\n mock_add_user_to_service, mock_get_users_by_service, mock_get_service):\n response = client.get(url_for('main.accept_invite', token=\n 'thisisnotarealtoken'), follow_redirects=True)\n mock_check_invite_token.assert_called_with('thisisnotarealtoken')\n mock_dont_get_user_by_email.assert_called_with('invited_user@test.gov.uk')\n assert response.status_code == 200\n page = BeautifulSoup(response.data.decode('utf-8'), 'html.parser')\n assert page.h1.string.strip() == 'Create an account'\n email_in_page = page.find('main').find('p')\n assert email_in_page.text.strip(\n ) == 'Your account will be created with this email: invited_user@test.gov.uk'\n form = page.find('form')\n name = form.find('input', id='name')\n password = form.find('input', id='password')\n service = form.find('input', type='hidden', id='service')\n email = form.find('input', type='hidden', id='email_address')\n assert email\n assert email.attrs['value'] == 'invited_user@test.gov.uk'\n assert name\n assert password\n assert service\n assert service.attrs['value'] == service_one['id']\n\n\ndef test_cancelled_invited_user_accepts_invited_redirect_to_cancelled_invitation(\n client, service_one, mocker, mock_get_user, mock_get_service):\n cancelled_invitation = create_sample_invite(mocker, service_one, status\n ='cancelled')\n mock_check_token_invite(mocker, cancelled_invitation)\n response = client.get(url_for('main.accept_invite', token=\n 'thisisnotarealtoken'))\n app.invite_api_client.check_token.assert_called_with('thisisnotarealtoken')\n assert response.status_code == 200\n page = BeautifulSoup(response.data.decode('utf-8'), 'html.parser')\n assert page.h1.string.strip(\n ) == 'The invitation you were sent has been cancelled'\n\n\ndef test_new_user_accept_invite_completes_new_registration_redirects_to_verify(\n client, service_one, sample_invite, api_user_active,\n mock_check_invite_token, mock_dont_get_user_by_email,\n mock_is_email_unique, mock_register_user, mock_send_verify_code,\n mock_accept_invite, mock_get_users_by_service, mock_add_user_to_service,\n mock_get_service):\n expected_service = service_one['id']\n expected_email = sample_invite['email_address']\n expected_from_user = service_one['users'][0]\n expected_redirect_location = 'http://localhost/register-from-invite'\n response = client.get(url_for('main.accept_invite', token=\n 'thisisnotarealtoken'))\n with client.session_transaction() as session:\n assert response.status_code == 302\n assert response.location == expected_redirect_location\n invited_user = session.get('invited_user')\n assert invited_user\n assert expected_service == invited_user['service']\n assert expected_email == invited_user['email_address']\n assert expected_from_user == invited_user['from_user']\n data = {'service': invited_user['service'], 'email_address':\n invited_user['email_address'], 'from_user': invited_user[\n 'from_user'], 'password': 'longpassword', 'mobile_number':\n '+447890123456', 'name': 'Invited User'}\n expected_redirect_location = 'http://localhost/verify'\n response = client.post(url_for('main.register_from_invite'), data=data)\n assert response.status_code == 302\n assert response.location == expected_redirect_location\n mock_send_verify_code.assert_called_once_with(ANY, 'sms', data[\n 'mobile_number'])\n mock_register_user.assert_called_with(data['name'], data[\n 'email_address'], data['mobile_number'], data['password'])\n assert mock_accept_invite.call_count == 1\n\n\ndef test_signed_in_existing_user_cannot_use_anothers_invite(logged_in_client,\n mocker, api_user_active, sample_invite, mock_get_user,\n mock_accept_invite, mock_get_service):\n invite = InvitedUser(**sample_invite)\n mocker.patch('app.invite_api_client.check_token', return_value=invite)\n mocker.patch('app.user_api_client.get_users_for_service', return_value=\n [api_user_active])\n response = logged_in_client.get(url_for('main.accept_invite', token=\n 'thisisnotarealtoken'), follow_redirects=True)\n assert response.status_code == 403\n page = BeautifulSoup(response.data.decode('utf-8'), 'html.parser')\n assert page.h1.string.strip() == '403'\n flash_banners = page.find_all('div', class_='banner-dangerous')\n assert len(flash_banners) == 1\n banner_contents = flash_banners[0].text.strip()\n assert 'You’re signed in as test@user.gov.uk.' in banner_contents\n assert 'This invite is for another email address.' in banner_contents\n assert 'Sign out and click the link again to accept this invite.' in banner_contents\n assert mock_accept_invite.call_count == 0\n\n\ndef test_new_invited_user_verifies_and_added_to_service(client, service_one,\n sample_invite, api_user_active, mock_check_invite_token,\n mock_dont_get_user_by_email, mock_is_email_unique, mock_register_user,\n mock_send_verify_code, mock_check_verify_code, mock_get_user,\n mock_update_user, mock_add_user_to_service, mock_accept_invite,\n mock_get_service, mock_get_service_templates,\n mock_get_template_statistics, mock_get_jobs, mock_has_permissions,\n mock_get_users_by_service, mock_get_detailed_service, mock_get_usage):\n response = client.get(url_for('main.accept_invite', token=\n 'thisisnotarealtoken'))\n data = {'service': sample_invite['service'], 'email_address':\n sample_invite['email_address'], 'from_user': sample_invite[\n 'from_user'], 'password': 'longpassword', 'mobile_number':\n '+447890123456', 'name': 'Invited User'}\n response = client.post(url_for('main.register_from_invite'), data=data)\n response = client.post(url_for('main.verify'), data={'sms_code':\n '12345'}, follow_redirects=True)\n expected_permissions = ['send_messages', 'manage_service',\n 'manage_api_keys']\n with client.session_transaction() as session:\n new_user_id = session['user_id']\n mock_add_user_to_service.assert_called_with(data['service'],\n new_user_id, expected_permissions)\n mock_accept_invite.assert_called_with(data['service'],\n sample_invite['id'])\n mock_check_verify_code.assert_called_once_with(new_user_id, '12345',\n 'sms')\n assert service_one['id'] == session['service_id']\n raw_html = response.data.decode('utf-8')\n page = BeautifulSoup(raw_html, 'html.parser')\n assert page.find('h1').text == 'Dashboard'\n",
"step-5": "from flask import url_for\nfrom bs4 import BeautifulSoup\nfrom unittest.mock import ANY\n\nimport app\n\nfrom app.notify_client.models import InvitedUser\nfrom tests.conftest import sample_invite as create_sample_invite\nfrom tests.conftest import mock_check_invite_token as mock_check_token_invite\n\n\ndef test_existing_user_accept_invite_calls_api_and_redirects_to_dashboard(\n client,\n service_one,\n api_user_active,\n sample_invite,\n mock_get_service,\n mock_check_invite_token,\n mock_get_user_by_email,\n mock_get_users_by_service,\n mock_accept_invite,\n mock_add_user_to_service,\n):\n\n expected_service = service_one['id']\n expected_redirect_location = 'http://localhost/services/{}/dashboard'.format(expected_service)\n expected_permissions = ['send_messages', 'manage_service', 'manage_api_keys']\n\n response = client.get(url_for('main.accept_invite', token='thisisnotarealtoken'))\n\n mock_check_invite_token.assert_called_with('thisisnotarealtoken')\n mock_get_user_by_email.assert_called_with('invited_user@test.gov.uk')\n assert mock_accept_invite.call_count == 1\n mock_add_user_to_service.assert_called_with(expected_service, api_user_active.id, expected_permissions)\n\n assert response.status_code == 302\n assert response.location == expected_redirect_location\n\n\ndef test_existing_user_with_no_permissions_accept_invite(\n client,\n mocker,\n service_one,\n api_user_active,\n sample_invite,\n mock_check_invite_token,\n mock_get_user_by_email,\n mock_get_users_by_service,\n mock_add_user_to_service,\n mock_get_service,\n):\n\n expected_service = service_one['id']\n sample_invite['permissions'] = ''\n expected_permissions = []\n mocker.patch('app.invite_api_client.accept_invite', return_value=sample_invite)\n\n response = client.get(url_for('main.accept_invite', token='thisisnotarealtoken'))\n mock_add_user_to_service.assert_called_with(expected_service, api_user_active.id, expected_permissions)\n\n assert response.status_code == 302\n\n\ndef test_if_existing_user_accepts_twice_they_redirect_to_sign_in(\n client,\n mocker,\n sample_invite,\n mock_get_service,\n):\n\n sample_invite['status'] = 'accepted'\n invite = InvitedUser(**sample_invite)\n mocker.patch('app.invite_api_client.check_token', return_value=invite)\n\n response = client.get(url_for('main.accept_invite', token='thisisnotarealtoken'), follow_redirects=True)\n assert response.status_code == 200\n page = BeautifulSoup(response.data.decode('utf-8'), 'html.parser')\n assert (\n page.h1.string,\n page.select('main p')[0].text.strip(),\n ) == (\n 'You need to sign in again',\n 'We signed you out because you haven’t used Notify for a while.',\n )\n\n\ndef test_existing_user_of_service_get_redirected_to_signin(\n client,\n mocker,\n api_user_active,\n sample_invite,\n mock_get_service,\n mock_get_user_by_email,\n mock_accept_invite,\n):\n sample_invite['email_address'] = api_user_active.email_address\n invite = InvitedUser(**sample_invite)\n mocker.patch('app.invite_api_client.check_token', return_value=invite)\n mocker.patch('app.user_api_client.get_users_for_service', return_value=[api_user_active])\n\n response = client.get(url_for('main.accept_invite', token='thisisnotarealtoken'), follow_redirects=True)\n assert response.status_code == 200\n page = BeautifulSoup(response.data.decode('utf-8'), 'html.parser')\n assert (\n page.h1.string,\n page.select('main p')[0].text.strip(),\n ) == (\n 'You need to sign in again',\n 'We signed you out because you haven’t used Notify for a while.',\n )\n assert mock_accept_invite.call_count == 1\n\n\ndef test_existing_signed_out_user_accept_invite_redirects_to_sign_in(\n client,\n service_one,\n api_user_active,\n sample_invite,\n mock_check_invite_token,\n mock_get_user_by_email,\n mock_get_users_by_service,\n mock_add_user_to_service,\n mock_accept_invite,\n mock_get_service,\n):\n\n expected_service = service_one['id']\n expected_permissions = ['send_messages', 'manage_service', 'manage_api_keys']\n\n response = client.get(url_for('main.accept_invite', token='thisisnotarealtoken'), follow_redirects=True)\n\n mock_check_invite_token.assert_called_with('thisisnotarealtoken')\n mock_get_user_by_email.assert_called_with('invited_user@test.gov.uk')\n mock_add_user_to_service.assert_called_with(expected_service, api_user_active.id, expected_permissions)\n assert mock_accept_invite.call_count == 1\n\n assert response.status_code == 200\n page = BeautifulSoup(response.data.decode('utf-8'), 'html.parser')\n assert (\n page.h1.string,\n page.select('main p')[0].text.strip(),\n ) == (\n 'You need to sign in again',\n 'We signed you out because you haven’t used Notify for a while.',\n )\n\n\ndef test_new_user_accept_invite_calls_api_and_redirects_to_registration(\n client,\n service_one,\n mock_check_invite_token,\n mock_dont_get_user_by_email,\n mock_add_user_to_service,\n mock_get_users_by_service,\n mock_get_service,\n):\n\n expected_redirect_location = 'http://localhost/register-from-invite'\n\n response = client.get(url_for('main.accept_invite', token='thisisnotarealtoken'))\n\n mock_check_invite_token.assert_called_with('thisisnotarealtoken')\n mock_dont_get_user_by_email.assert_called_with('invited_user@test.gov.uk')\n\n assert response.status_code == 302\n assert response.location == expected_redirect_location\n\n\ndef test_new_user_accept_invite_calls_api_and_views_registration_page(\n client,\n service_one,\n mock_check_invite_token,\n mock_dont_get_user_by_email,\n mock_add_user_to_service,\n mock_get_users_by_service,\n mock_get_service,\n):\n\n response = client.get(url_for('main.accept_invite', token='thisisnotarealtoken'), follow_redirects=True)\n\n mock_check_invite_token.assert_called_with('thisisnotarealtoken')\n mock_dont_get_user_by_email.assert_called_with('invited_user@test.gov.uk')\n\n assert response.status_code == 200\n page = BeautifulSoup(response.data.decode('utf-8'), 'html.parser')\n assert page.h1.string.strip() == 'Create an account'\n\n email_in_page = page.find('main').find('p')\n assert email_in_page.text.strip() == 'Your account will be created with this email: invited_user@test.gov.uk' # noqa\n\n form = page.find('form')\n name = form.find('input', id='name')\n password = form.find('input', id='password')\n service = form.find('input', type='hidden', id='service')\n email = form.find('input', type='hidden', id='email_address')\n\n assert email\n assert email.attrs['value'] == 'invited_user@test.gov.uk'\n assert name\n assert password\n assert service\n assert service.attrs['value'] == service_one['id']\n\n\ndef test_cancelled_invited_user_accepts_invited_redirect_to_cancelled_invitation(\n client,\n service_one,\n mocker,\n mock_get_user,\n mock_get_service,\n):\n cancelled_invitation = create_sample_invite(mocker, service_one, status='cancelled')\n mock_check_token_invite(mocker, cancelled_invitation)\n response = client.get(url_for('main.accept_invite', token='thisisnotarealtoken'))\n\n app.invite_api_client.check_token.assert_called_with('thisisnotarealtoken')\n assert response.status_code == 200\n page = BeautifulSoup(response.data.decode('utf-8'), 'html.parser')\n assert page.h1.string.strip() == 'The invitation you were sent has been cancelled'\n\n\ndef test_new_user_accept_invite_completes_new_registration_redirects_to_verify(\n client,\n service_one,\n sample_invite,\n api_user_active,\n mock_check_invite_token,\n mock_dont_get_user_by_email,\n mock_is_email_unique,\n mock_register_user,\n mock_send_verify_code,\n mock_accept_invite,\n mock_get_users_by_service,\n mock_add_user_to_service,\n mock_get_service,\n):\n\n expected_service = service_one['id']\n expected_email = sample_invite['email_address']\n expected_from_user = service_one['users'][0]\n expected_redirect_location = 'http://localhost/register-from-invite'\n\n response = client.get(url_for('main.accept_invite', token='thisisnotarealtoken'))\n with client.session_transaction() as session:\n assert response.status_code == 302\n assert response.location == expected_redirect_location\n invited_user = session.get('invited_user')\n assert invited_user\n assert expected_service == invited_user['service']\n assert expected_email == invited_user['email_address']\n assert expected_from_user == invited_user['from_user']\n\n data = {'service': invited_user['service'],\n 'email_address': invited_user['email_address'],\n 'from_user': invited_user['from_user'],\n 'password': 'longpassword',\n 'mobile_number': '+447890123456',\n 'name': 'Invited User'\n }\n\n expected_redirect_location = 'http://localhost/verify'\n response = client.post(url_for('main.register_from_invite'), data=data)\n assert response.status_code == 302\n assert response.location == expected_redirect_location\n\n mock_send_verify_code.assert_called_once_with(ANY, 'sms', data['mobile_number'])\n\n mock_register_user.assert_called_with(data['name'],\n data['email_address'],\n data['mobile_number'],\n data['password'])\n\n assert mock_accept_invite.call_count == 1\n\n\ndef test_signed_in_existing_user_cannot_use_anothers_invite(\n logged_in_client,\n mocker,\n api_user_active,\n sample_invite,\n mock_get_user,\n mock_accept_invite,\n mock_get_service,\n):\n invite = InvitedUser(**sample_invite)\n mocker.patch('app.invite_api_client.check_token', return_value=invite)\n mocker.patch('app.user_api_client.get_users_for_service', return_value=[api_user_active])\n\n response = logged_in_client.get(url_for('main.accept_invite', token='thisisnotarealtoken'), follow_redirects=True)\n assert response.status_code == 403\n page = BeautifulSoup(response.data.decode('utf-8'), 'html.parser')\n assert page.h1.string.strip() == '403'\n flash_banners = page.find_all('div', class_='banner-dangerous')\n assert len(flash_banners) == 1\n banner_contents = flash_banners[0].text.strip()\n assert \"You’re signed in as test@user.gov.uk.\" in banner_contents\n assert \"This invite is for another email address.\" in banner_contents\n assert \"Sign out and click the link again to accept this invite.\" in banner_contents\n assert mock_accept_invite.call_count == 0\n\n\ndef test_new_invited_user_verifies_and_added_to_service(\n client,\n service_one,\n sample_invite,\n api_user_active,\n mock_check_invite_token,\n mock_dont_get_user_by_email,\n mock_is_email_unique,\n mock_register_user,\n mock_send_verify_code,\n mock_check_verify_code,\n mock_get_user,\n mock_update_user,\n mock_add_user_to_service,\n mock_accept_invite,\n mock_get_service,\n mock_get_service_templates,\n mock_get_template_statistics,\n mock_get_jobs,\n mock_has_permissions,\n mock_get_users_by_service,\n mock_get_detailed_service,\n mock_get_usage,\n):\n\n # visit accept token page\n response = client.get(url_for('main.accept_invite', token='thisisnotarealtoken'))\n data = {'service': sample_invite['service'],\n 'email_address': sample_invite['email_address'],\n 'from_user': sample_invite['from_user'],\n 'password': 'longpassword',\n 'mobile_number': '+447890123456',\n 'name': 'Invited User'\n }\n\n # get redirected to register from invite\n response = client.post(url_for('main.register_from_invite'), data=data)\n\n # that sends user on to verify\n response = client.post(url_for('main.verify'), data={'sms_code': '12345'}, follow_redirects=True)\n\n # when they post codes back to admin user should be added to\n # service and sent on to dash board\n expected_permissions = ['send_messages', 'manage_service', 'manage_api_keys']\n\n with client.session_transaction() as session:\n new_user_id = session['user_id']\n mock_add_user_to_service.assert_called_with(data['service'], new_user_id, expected_permissions)\n mock_accept_invite.assert_called_with(data['service'], sample_invite['id'])\n mock_check_verify_code.assert_called_once_with(new_user_id, '12345', 'sms')\n assert service_one['id'] == session['service_id']\n\n raw_html = response.data.decode('utf-8')\n page = BeautifulSoup(raw_html, 'html.parser')\n assert page.find('h1').text == 'Dashboard'\n",
"step-ids": [
8,
10,
11,
12,
13
]
}
|
[
8,
10,
11,
12,
13
] |
<|reserved_special_token_0|>
@pytest.mark.integration
def test_athena_config_query_location_old_plus_new_value_not_allowed():
from datahub.ingestion.source.sql.athena import AthenaConfig
with pytest.raises(ValueError):
AthenaConfig.parse_obj({'aws_region': 'us-west-1', 's3_staging_dir':
's3://sample-staging-dir/', 'query_result_location':
's3://query_result_location', 'work_group': 'test-workgroup'})
@pytest.mark.integration
def test_athena_config_staging_dir_is_set_as_query_result():
from datahub.ingestion.source.sql.athena import AthenaConfig
config = AthenaConfig.parse_obj({'aws_region': 'us-west-1',
's3_staging_dir': 's3://sample-staging-dir/', 'work_group':
'test-workgroup'})
expected_config = AthenaConfig.parse_obj({'aws_region': 'us-west-1',
'query_result_location': 's3://sample-staging-dir/', 'work_group':
'test-workgroup'})
assert config.json() == expected_config.json()
<|reserved_special_token_0|>
@pytest.mark.integration
@freeze_time(FROZEN_TIME)
def test_athena_get_table_properties():
from pyathena.model import AthenaTableMetadata
from datahub.ingestion.source.sql.athena import AthenaConfig, AthenaSource
config = AthenaConfig.parse_obj({'aws_region': 'us-west-1',
's3_staging_dir': 's3://sample-staging-dir/', 'work_group':
'test-workgroup'})
schema: str = 'test_schema'
table: str = 'test_table'
table_metadata = {'TableMetadata': {'Name': 'test', 'TableType':
'testType', 'CreateTime': datetime.now(), 'LastAccessTime':
datetime.now(), 'PartitionKeys': [{'Name': 'testKey', 'Type':
'string', 'Comment': 'testComment'}], 'Parameters': {'comment':
'testComment', 'location': 's3://testLocation', 'inputformat':
'testInputFormat', 'outputformat': 'testOutputFormat',
'serde.serialization.lib': 'testSerde'}}}
mock_cursor = mock.MagicMock()
mock_inspector = mock.MagicMock()
mock_inspector.engine.raw_connection().cursor.return_value = mock_cursor
mock_cursor._get_table_metadata.return_value = AthenaTableMetadata(response
=table_metadata)
ctx = PipelineContext(run_id='test')
source = AthenaSource(config=config, ctx=ctx)
description, custom_properties, location = source.get_table_properties(
inspector=mock_inspector, table=table, schema=schema)
assert custom_properties == {'comment': 'testComment', 'create_time':
'2020-04-14 07:00:00', 'inputformat': 'testInputFormat',
'last_access_time': '2020-04-14 07:00:00', 'location':
's3://testLocation', 'outputformat': 'testOutputFormat',
'partition_keys':
'[{"name": "testKey", "type": "string", "comment": "testComment"}]',
'serde.serialization.lib': 'testSerde', 'table_type': 'testType'}
assert location == make_s3_urn('s3://testLocation', 'PROD')
<|reserved_special_token_1|>
<|reserved_special_token_0|>
@pytest.mark.integration
def test_athena_config_query_location_old_plus_new_value_not_allowed():
from datahub.ingestion.source.sql.athena import AthenaConfig
with pytest.raises(ValueError):
AthenaConfig.parse_obj({'aws_region': 'us-west-1', 's3_staging_dir':
's3://sample-staging-dir/', 'query_result_location':
's3://query_result_location', 'work_group': 'test-workgroup'})
@pytest.mark.integration
def test_athena_config_staging_dir_is_set_as_query_result():
from datahub.ingestion.source.sql.athena import AthenaConfig
config = AthenaConfig.parse_obj({'aws_region': 'us-west-1',
's3_staging_dir': 's3://sample-staging-dir/', 'work_group':
'test-workgroup'})
expected_config = AthenaConfig.parse_obj({'aws_region': 'us-west-1',
'query_result_location': 's3://sample-staging-dir/', 'work_group':
'test-workgroup'})
assert config.json() == expected_config.json()
@pytest.mark.integration
def test_athena_uri():
from datahub.ingestion.source.sql.athena import AthenaConfig
config = AthenaConfig.parse_obj({'aws_region': 'us-west-1',
'query_result_location': 's3://query-result-location/',
'work_group': 'test-workgroup'})
assert config.get_sql_alchemy_url(
) == 'awsathena+rest://@athena.us-west-1.amazonaws.com:443/?s3_staging_dir=s3%3A%2F%2Fquery-result-location%2F&work_group=test-workgroup&catalog_name=awsdatacatalog&duration_seconds=3600'
@pytest.mark.integration
@freeze_time(FROZEN_TIME)
def test_athena_get_table_properties():
from pyathena.model import AthenaTableMetadata
from datahub.ingestion.source.sql.athena import AthenaConfig, AthenaSource
config = AthenaConfig.parse_obj({'aws_region': 'us-west-1',
's3_staging_dir': 's3://sample-staging-dir/', 'work_group':
'test-workgroup'})
schema: str = 'test_schema'
table: str = 'test_table'
table_metadata = {'TableMetadata': {'Name': 'test', 'TableType':
'testType', 'CreateTime': datetime.now(), 'LastAccessTime':
datetime.now(), 'PartitionKeys': [{'Name': 'testKey', 'Type':
'string', 'Comment': 'testComment'}], 'Parameters': {'comment':
'testComment', 'location': 's3://testLocation', 'inputformat':
'testInputFormat', 'outputformat': 'testOutputFormat',
'serde.serialization.lib': 'testSerde'}}}
mock_cursor = mock.MagicMock()
mock_inspector = mock.MagicMock()
mock_inspector.engine.raw_connection().cursor.return_value = mock_cursor
mock_cursor._get_table_metadata.return_value = AthenaTableMetadata(response
=table_metadata)
ctx = PipelineContext(run_id='test')
source = AthenaSource(config=config, ctx=ctx)
description, custom_properties, location = source.get_table_properties(
inspector=mock_inspector, table=table, schema=schema)
assert custom_properties == {'comment': 'testComment', 'create_time':
'2020-04-14 07:00:00', 'inputformat': 'testInputFormat',
'last_access_time': '2020-04-14 07:00:00', 'location':
's3://testLocation', 'outputformat': 'testOutputFormat',
'partition_keys':
'[{"name": "testKey", "type": "string", "comment": "testComment"}]',
'serde.serialization.lib': 'testSerde', 'table_type': 'testType'}
assert location == make_s3_urn('s3://testLocation', 'PROD')
<|reserved_special_token_1|>
<|reserved_special_token_0|>
FROZEN_TIME = '2020-04-14 07:00:00'
@pytest.mark.integration
def test_athena_config_query_location_old_plus_new_value_not_allowed():
from datahub.ingestion.source.sql.athena import AthenaConfig
with pytest.raises(ValueError):
AthenaConfig.parse_obj({'aws_region': 'us-west-1', 's3_staging_dir':
's3://sample-staging-dir/', 'query_result_location':
's3://query_result_location', 'work_group': 'test-workgroup'})
@pytest.mark.integration
def test_athena_config_staging_dir_is_set_as_query_result():
from datahub.ingestion.source.sql.athena import AthenaConfig
config = AthenaConfig.parse_obj({'aws_region': 'us-west-1',
's3_staging_dir': 's3://sample-staging-dir/', 'work_group':
'test-workgroup'})
expected_config = AthenaConfig.parse_obj({'aws_region': 'us-west-1',
'query_result_location': 's3://sample-staging-dir/', 'work_group':
'test-workgroup'})
assert config.json() == expected_config.json()
@pytest.mark.integration
def test_athena_uri():
from datahub.ingestion.source.sql.athena import AthenaConfig
config = AthenaConfig.parse_obj({'aws_region': 'us-west-1',
'query_result_location': 's3://query-result-location/',
'work_group': 'test-workgroup'})
assert config.get_sql_alchemy_url(
) == 'awsathena+rest://@athena.us-west-1.amazonaws.com:443/?s3_staging_dir=s3%3A%2F%2Fquery-result-location%2F&work_group=test-workgroup&catalog_name=awsdatacatalog&duration_seconds=3600'
@pytest.mark.integration
@freeze_time(FROZEN_TIME)
def test_athena_get_table_properties():
from pyathena.model import AthenaTableMetadata
from datahub.ingestion.source.sql.athena import AthenaConfig, AthenaSource
config = AthenaConfig.parse_obj({'aws_region': 'us-west-1',
's3_staging_dir': 's3://sample-staging-dir/', 'work_group':
'test-workgroup'})
schema: str = 'test_schema'
table: str = 'test_table'
table_metadata = {'TableMetadata': {'Name': 'test', 'TableType':
'testType', 'CreateTime': datetime.now(), 'LastAccessTime':
datetime.now(), 'PartitionKeys': [{'Name': 'testKey', 'Type':
'string', 'Comment': 'testComment'}], 'Parameters': {'comment':
'testComment', 'location': 's3://testLocation', 'inputformat':
'testInputFormat', 'outputformat': 'testOutputFormat',
'serde.serialization.lib': 'testSerde'}}}
mock_cursor = mock.MagicMock()
mock_inspector = mock.MagicMock()
mock_inspector.engine.raw_connection().cursor.return_value = mock_cursor
mock_cursor._get_table_metadata.return_value = AthenaTableMetadata(response
=table_metadata)
ctx = PipelineContext(run_id='test')
source = AthenaSource(config=config, ctx=ctx)
description, custom_properties, location = source.get_table_properties(
inspector=mock_inspector, table=table, schema=schema)
assert custom_properties == {'comment': 'testComment', 'create_time':
'2020-04-14 07:00:00', 'inputformat': 'testInputFormat',
'last_access_time': '2020-04-14 07:00:00', 'location':
's3://testLocation', 'outputformat': 'testOutputFormat',
'partition_keys':
'[{"name": "testKey", "type": "string", "comment": "testComment"}]',
'serde.serialization.lib': 'testSerde', 'table_type': 'testType'}
assert location == make_s3_urn('s3://testLocation', 'PROD')
<|reserved_special_token_1|>
from datetime import datetime
from unittest import mock
import pytest
from freezegun import freeze_time
from datahub.ingestion.api.common import PipelineContext
from src.datahub.ingestion.source.aws.s3_util import make_s3_urn
FROZEN_TIME = '2020-04-14 07:00:00'
@pytest.mark.integration
def test_athena_config_query_location_old_plus_new_value_not_allowed():
from datahub.ingestion.source.sql.athena import AthenaConfig
with pytest.raises(ValueError):
AthenaConfig.parse_obj({'aws_region': 'us-west-1', 's3_staging_dir':
's3://sample-staging-dir/', 'query_result_location':
's3://query_result_location', 'work_group': 'test-workgroup'})
@pytest.mark.integration
def test_athena_config_staging_dir_is_set_as_query_result():
from datahub.ingestion.source.sql.athena import AthenaConfig
config = AthenaConfig.parse_obj({'aws_region': 'us-west-1',
's3_staging_dir': 's3://sample-staging-dir/', 'work_group':
'test-workgroup'})
expected_config = AthenaConfig.parse_obj({'aws_region': 'us-west-1',
'query_result_location': 's3://sample-staging-dir/', 'work_group':
'test-workgroup'})
assert config.json() == expected_config.json()
@pytest.mark.integration
def test_athena_uri():
from datahub.ingestion.source.sql.athena import AthenaConfig
config = AthenaConfig.parse_obj({'aws_region': 'us-west-1',
'query_result_location': 's3://query-result-location/',
'work_group': 'test-workgroup'})
assert config.get_sql_alchemy_url(
) == 'awsathena+rest://@athena.us-west-1.amazonaws.com:443/?s3_staging_dir=s3%3A%2F%2Fquery-result-location%2F&work_group=test-workgroup&catalog_name=awsdatacatalog&duration_seconds=3600'
@pytest.mark.integration
@freeze_time(FROZEN_TIME)
def test_athena_get_table_properties():
from pyathena.model import AthenaTableMetadata
from datahub.ingestion.source.sql.athena import AthenaConfig, AthenaSource
config = AthenaConfig.parse_obj({'aws_region': 'us-west-1',
's3_staging_dir': 's3://sample-staging-dir/', 'work_group':
'test-workgroup'})
schema: str = 'test_schema'
table: str = 'test_table'
table_metadata = {'TableMetadata': {'Name': 'test', 'TableType':
'testType', 'CreateTime': datetime.now(), 'LastAccessTime':
datetime.now(), 'PartitionKeys': [{'Name': 'testKey', 'Type':
'string', 'Comment': 'testComment'}], 'Parameters': {'comment':
'testComment', 'location': 's3://testLocation', 'inputformat':
'testInputFormat', 'outputformat': 'testOutputFormat',
'serde.serialization.lib': 'testSerde'}}}
mock_cursor = mock.MagicMock()
mock_inspector = mock.MagicMock()
mock_inspector.engine.raw_connection().cursor.return_value = mock_cursor
mock_cursor._get_table_metadata.return_value = AthenaTableMetadata(response
=table_metadata)
ctx = PipelineContext(run_id='test')
source = AthenaSource(config=config, ctx=ctx)
description, custom_properties, location = source.get_table_properties(
inspector=mock_inspector, table=table, schema=schema)
assert custom_properties == {'comment': 'testComment', 'create_time':
'2020-04-14 07:00:00', 'inputformat': 'testInputFormat',
'last_access_time': '2020-04-14 07:00:00', 'location':
's3://testLocation', 'outputformat': 'testOutputFormat',
'partition_keys':
'[{"name": "testKey", "type": "string", "comment": "testComment"}]',
'serde.serialization.lib': 'testSerde', 'table_type': 'testType'}
assert location == make_s3_urn('s3://testLocation', 'PROD')
<|reserved_special_token_1|>
from datetime import datetime
from unittest import mock
import pytest
from freezegun import freeze_time
from datahub.ingestion.api.common import PipelineContext
from src.datahub.ingestion.source.aws.s3_util import make_s3_urn
FROZEN_TIME = "2020-04-14 07:00:00"
@pytest.mark.integration
def test_athena_config_query_location_old_plus_new_value_not_allowed():
from datahub.ingestion.source.sql.athena import AthenaConfig
with pytest.raises(ValueError):
AthenaConfig.parse_obj(
{
"aws_region": "us-west-1",
"s3_staging_dir": "s3://sample-staging-dir/",
"query_result_location": "s3://query_result_location",
"work_group": "test-workgroup",
}
)
@pytest.mark.integration
def test_athena_config_staging_dir_is_set_as_query_result():
from datahub.ingestion.source.sql.athena import AthenaConfig
config = AthenaConfig.parse_obj(
{
"aws_region": "us-west-1",
"s3_staging_dir": "s3://sample-staging-dir/",
"work_group": "test-workgroup",
}
)
expected_config = AthenaConfig.parse_obj(
{
"aws_region": "us-west-1",
"query_result_location": "s3://sample-staging-dir/",
"work_group": "test-workgroup",
}
)
assert config.json() == expected_config.json()
@pytest.mark.integration
def test_athena_uri():
from datahub.ingestion.source.sql.athena import AthenaConfig
config = AthenaConfig.parse_obj(
{
"aws_region": "us-west-1",
"query_result_location": "s3://query-result-location/",
"work_group": "test-workgroup",
}
)
assert (
config.get_sql_alchemy_url()
== "awsathena+rest://@athena.us-west-1.amazonaws.com:443/?s3_staging_dir=s3%3A%2F%2Fquery-result-location%2F&work_group=test-workgroup&catalog_name=awsdatacatalog&duration_seconds=3600"
)
@pytest.mark.integration
@freeze_time(FROZEN_TIME)
def test_athena_get_table_properties():
from pyathena.model import AthenaTableMetadata
from datahub.ingestion.source.sql.athena import AthenaConfig, AthenaSource
config = AthenaConfig.parse_obj(
{
"aws_region": "us-west-1",
"s3_staging_dir": "s3://sample-staging-dir/",
"work_group": "test-workgroup",
}
)
schema: str = "test_schema"
table: str = "test_table"
table_metadata = {
"TableMetadata": {
"Name": "test",
"TableType": "testType",
"CreateTime": datetime.now(),
"LastAccessTime": datetime.now(),
"PartitionKeys": [
{"Name": "testKey", "Type": "string", "Comment": "testComment"}
],
"Parameters": {
"comment": "testComment",
"location": "s3://testLocation",
"inputformat": "testInputFormat",
"outputformat": "testOutputFormat",
"serde.serialization.lib": "testSerde",
},
},
}
mock_cursor = mock.MagicMock()
mock_inspector = mock.MagicMock()
mock_inspector.engine.raw_connection().cursor.return_value = mock_cursor
mock_cursor._get_table_metadata.return_value = AthenaTableMetadata(
response=table_metadata
)
ctx = PipelineContext(run_id="test")
source = AthenaSource(config=config, ctx=ctx)
description, custom_properties, location = source.get_table_properties(
inspector=mock_inspector, table=table, schema=schema
)
assert custom_properties == {
"comment": "testComment",
"create_time": "2020-04-14 07:00:00",
"inputformat": "testInputFormat",
"last_access_time": "2020-04-14 07:00:00",
"location": "s3://testLocation",
"outputformat": "testOutputFormat",
"partition_keys": '[{"name": "testKey", "type": "string", "comment": "testComment"}]',
"serde.serialization.lib": "testSerde",
"table_type": "testType",
}
assert location == make_s3_urn("s3://testLocation", "PROD")
|
flexible
|
{
"blob_id": "1304b6373edeca394070b8a3d144608cf07172e3",
"index": 9448,
"step-1": "<mask token>\n\n\n@pytest.mark.integration\ndef test_athena_config_query_location_old_plus_new_value_not_allowed():\n from datahub.ingestion.source.sql.athena import AthenaConfig\n with pytest.raises(ValueError):\n AthenaConfig.parse_obj({'aws_region': 'us-west-1', 's3_staging_dir':\n 's3://sample-staging-dir/', 'query_result_location':\n 's3://query_result_location', 'work_group': 'test-workgroup'})\n\n\n@pytest.mark.integration\ndef test_athena_config_staging_dir_is_set_as_query_result():\n from datahub.ingestion.source.sql.athena import AthenaConfig\n config = AthenaConfig.parse_obj({'aws_region': 'us-west-1',\n 's3_staging_dir': 's3://sample-staging-dir/', 'work_group':\n 'test-workgroup'})\n expected_config = AthenaConfig.parse_obj({'aws_region': 'us-west-1',\n 'query_result_location': 's3://sample-staging-dir/', 'work_group':\n 'test-workgroup'})\n assert config.json() == expected_config.json()\n\n\n<mask token>\n\n\n@pytest.mark.integration\n@freeze_time(FROZEN_TIME)\ndef test_athena_get_table_properties():\n from pyathena.model import AthenaTableMetadata\n from datahub.ingestion.source.sql.athena import AthenaConfig, AthenaSource\n config = AthenaConfig.parse_obj({'aws_region': 'us-west-1',\n 's3_staging_dir': 's3://sample-staging-dir/', 'work_group':\n 'test-workgroup'})\n schema: str = 'test_schema'\n table: str = 'test_table'\n table_metadata = {'TableMetadata': {'Name': 'test', 'TableType':\n 'testType', 'CreateTime': datetime.now(), 'LastAccessTime':\n datetime.now(), 'PartitionKeys': [{'Name': 'testKey', 'Type':\n 'string', 'Comment': 'testComment'}], 'Parameters': {'comment':\n 'testComment', 'location': 's3://testLocation', 'inputformat':\n 'testInputFormat', 'outputformat': 'testOutputFormat',\n 'serde.serialization.lib': 'testSerde'}}}\n mock_cursor = mock.MagicMock()\n mock_inspector = mock.MagicMock()\n mock_inspector.engine.raw_connection().cursor.return_value = mock_cursor\n mock_cursor._get_table_metadata.return_value = AthenaTableMetadata(response\n =table_metadata)\n ctx = PipelineContext(run_id='test')\n source = AthenaSource(config=config, ctx=ctx)\n description, custom_properties, location = source.get_table_properties(\n inspector=mock_inspector, table=table, schema=schema)\n assert custom_properties == {'comment': 'testComment', 'create_time':\n '2020-04-14 07:00:00', 'inputformat': 'testInputFormat',\n 'last_access_time': '2020-04-14 07:00:00', 'location':\n 's3://testLocation', 'outputformat': 'testOutputFormat',\n 'partition_keys':\n '[{\"name\": \"testKey\", \"type\": \"string\", \"comment\": \"testComment\"}]',\n 'serde.serialization.lib': 'testSerde', 'table_type': 'testType'}\n assert location == make_s3_urn('s3://testLocation', 'PROD')\n",
"step-2": "<mask token>\n\n\n@pytest.mark.integration\ndef test_athena_config_query_location_old_plus_new_value_not_allowed():\n from datahub.ingestion.source.sql.athena import AthenaConfig\n with pytest.raises(ValueError):\n AthenaConfig.parse_obj({'aws_region': 'us-west-1', 's3_staging_dir':\n 's3://sample-staging-dir/', 'query_result_location':\n 's3://query_result_location', 'work_group': 'test-workgroup'})\n\n\n@pytest.mark.integration\ndef test_athena_config_staging_dir_is_set_as_query_result():\n from datahub.ingestion.source.sql.athena import AthenaConfig\n config = AthenaConfig.parse_obj({'aws_region': 'us-west-1',\n 's3_staging_dir': 's3://sample-staging-dir/', 'work_group':\n 'test-workgroup'})\n expected_config = AthenaConfig.parse_obj({'aws_region': 'us-west-1',\n 'query_result_location': 's3://sample-staging-dir/', 'work_group':\n 'test-workgroup'})\n assert config.json() == expected_config.json()\n\n\n@pytest.mark.integration\ndef test_athena_uri():\n from datahub.ingestion.source.sql.athena import AthenaConfig\n config = AthenaConfig.parse_obj({'aws_region': 'us-west-1',\n 'query_result_location': 's3://query-result-location/',\n 'work_group': 'test-workgroup'})\n assert config.get_sql_alchemy_url(\n ) == 'awsathena+rest://@athena.us-west-1.amazonaws.com:443/?s3_staging_dir=s3%3A%2F%2Fquery-result-location%2F&work_group=test-workgroup&catalog_name=awsdatacatalog&duration_seconds=3600'\n\n\n@pytest.mark.integration\n@freeze_time(FROZEN_TIME)\ndef test_athena_get_table_properties():\n from pyathena.model import AthenaTableMetadata\n from datahub.ingestion.source.sql.athena import AthenaConfig, AthenaSource\n config = AthenaConfig.parse_obj({'aws_region': 'us-west-1',\n 's3_staging_dir': 's3://sample-staging-dir/', 'work_group':\n 'test-workgroup'})\n schema: str = 'test_schema'\n table: str = 'test_table'\n table_metadata = {'TableMetadata': {'Name': 'test', 'TableType':\n 'testType', 'CreateTime': datetime.now(), 'LastAccessTime':\n datetime.now(), 'PartitionKeys': [{'Name': 'testKey', 'Type':\n 'string', 'Comment': 'testComment'}], 'Parameters': {'comment':\n 'testComment', 'location': 's3://testLocation', 'inputformat':\n 'testInputFormat', 'outputformat': 'testOutputFormat',\n 'serde.serialization.lib': 'testSerde'}}}\n mock_cursor = mock.MagicMock()\n mock_inspector = mock.MagicMock()\n mock_inspector.engine.raw_connection().cursor.return_value = mock_cursor\n mock_cursor._get_table_metadata.return_value = AthenaTableMetadata(response\n =table_metadata)\n ctx = PipelineContext(run_id='test')\n source = AthenaSource(config=config, ctx=ctx)\n description, custom_properties, location = source.get_table_properties(\n inspector=mock_inspector, table=table, schema=schema)\n assert custom_properties == {'comment': 'testComment', 'create_time':\n '2020-04-14 07:00:00', 'inputformat': 'testInputFormat',\n 'last_access_time': '2020-04-14 07:00:00', 'location':\n 's3://testLocation', 'outputformat': 'testOutputFormat',\n 'partition_keys':\n '[{\"name\": \"testKey\", \"type\": \"string\", \"comment\": \"testComment\"}]',\n 'serde.serialization.lib': 'testSerde', 'table_type': 'testType'}\n assert location == make_s3_urn('s3://testLocation', 'PROD')\n",
"step-3": "<mask token>\nFROZEN_TIME = '2020-04-14 07:00:00'\n\n\n@pytest.mark.integration\ndef test_athena_config_query_location_old_plus_new_value_not_allowed():\n from datahub.ingestion.source.sql.athena import AthenaConfig\n with pytest.raises(ValueError):\n AthenaConfig.parse_obj({'aws_region': 'us-west-1', 's3_staging_dir':\n 's3://sample-staging-dir/', 'query_result_location':\n 's3://query_result_location', 'work_group': 'test-workgroup'})\n\n\n@pytest.mark.integration\ndef test_athena_config_staging_dir_is_set_as_query_result():\n from datahub.ingestion.source.sql.athena import AthenaConfig\n config = AthenaConfig.parse_obj({'aws_region': 'us-west-1',\n 's3_staging_dir': 's3://sample-staging-dir/', 'work_group':\n 'test-workgroup'})\n expected_config = AthenaConfig.parse_obj({'aws_region': 'us-west-1',\n 'query_result_location': 's3://sample-staging-dir/', 'work_group':\n 'test-workgroup'})\n assert config.json() == expected_config.json()\n\n\n@pytest.mark.integration\ndef test_athena_uri():\n from datahub.ingestion.source.sql.athena import AthenaConfig\n config = AthenaConfig.parse_obj({'aws_region': 'us-west-1',\n 'query_result_location': 's3://query-result-location/',\n 'work_group': 'test-workgroup'})\n assert config.get_sql_alchemy_url(\n ) == 'awsathena+rest://@athena.us-west-1.amazonaws.com:443/?s3_staging_dir=s3%3A%2F%2Fquery-result-location%2F&work_group=test-workgroup&catalog_name=awsdatacatalog&duration_seconds=3600'\n\n\n@pytest.mark.integration\n@freeze_time(FROZEN_TIME)\ndef test_athena_get_table_properties():\n from pyathena.model import AthenaTableMetadata\n from datahub.ingestion.source.sql.athena import AthenaConfig, AthenaSource\n config = AthenaConfig.parse_obj({'aws_region': 'us-west-1',\n 's3_staging_dir': 's3://sample-staging-dir/', 'work_group':\n 'test-workgroup'})\n schema: str = 'test_schema'\n table: str = 'test_table'\n table_metadata = {'TableMetadata': {'Name': 'test', 'TableType':\n 'testType', 'CreateTime': datetime.now(), 'LastAccessTime':\n datetime.now(), 'PartitionKeys': [{'Name': 'testKey', 'Type':\n 'string', 'Comment': 'testComment'}], 'Parameters': {'comment':\n 'testComment', 'location': 's3://testLocation', 'inputformat':\n 'testInputFormat', 'outputformat': 'testOutputFormat',\n 'serde.serialization.lib': 'testSerde'}}}\n mock_cursor = mock.MagicMock()\n mock_inspector = mock.MagicMock()\n mock_inspector.engine.raw_connection().cursor.return_value = mock_cursor\n mock_cursor._get_table_metadata.return_value = AthenaTableMetadata(response\n =table_metadata)\n ctx = PipelineContext(run_id='test')\n source = AthenaSource(config=config, ctx=ctx)\n description, custom_properties, location = source.get_table_properties(\n inspector=mock_inspector, table=table, schema=schema)\n assert custom_properties == {'comment': 'testComment', 'create_time':\n '2020-04-14 07:00:00', 'inputformat': 'testInputFormat',\n 'last_access_time': '2020-04-14 07:00:00', 'location':\n 's3://testLocation', 'outputformat': 'testOutputFormat',\n 'partition_keys':\n '[{\"name\": \"testKey\", \"type\": \"string\", \"comment\": \"testComment\"}]',\n 'serde.serialization.lib': 'testSerde', 'table_type': 'testType'}\n assert location == make_s3_urn('s3://testLocation', 'PROD')\n",
"step-4": "from datetime import datetime\nfrom unittest import mock\nimport pytest\nfrom freezegun import freeze_time\nfrom datahub.ingestion.api.common import PipelineContext\nfrom src.datahub.ingestion.source.aws.s3_util import make_s3_urn\nFROZEN_TIME = '2020-04-14 07:00:00'\n\n\n@pytest.mark.integration\ndef test_athena_config_query_location_old_plus_new_value_not_allowed():\n from datahub.ingestion.source.sql.athena import AthenaConfig\n with pytest.raises(ValueError):\n AthenaConfig.parse_obj({'aws_region': 'us-west-1', 's3_staging_dir':\n 's3://sample-staging-dir/', 'query_result_location':\n 's3://query_result_location', 'work_group': 'test-workgroup'})\n\n\n@pytest.mark.integration\ndef test_athena_config_staging_dir_is_set_as_query_result():\n from datahub.ingestion.source.sql.athena import AthenaConfig\n config = AthenaConfig.parse_obj({'aws_region': 'us-west-1',\n 's3_staging_dir': 's3://sample-staging-dir/', 'work_group':\n 'test-workgroup'})\n expected_config = AthenaConfig.parse_obj({'aws_region': 'us-west-1',\n 'query_result_location': 's3://sample-staging-dir/', 'work_group':\n 'test-workgroup'})\n assert config.json() == expected_config.json()\n\n\n@pytest.mark.integration\ndef test_athena_uri():\n from datahub.ingestion.source.sql.athena import AthenaConfig\n config = AthenaConfig.parse_obj({'aws_region': 'us-west-1',\n 'query_result_location': 's3://query-result-location/',\n 'work_group': 'test-workgroup'})\n assert config.get_sql_alchemy_url(\n ) == 'awsathena+rest://@athena.us-west-1.amazonaws.com:443/?s3_staging_dir=s3%3A%2F%2Fquery-result-location%2F&work_group=test-workgroup&catalog_name=awsdatacatalog&duration_seconds=3600'\n\n\n@pytest.mark.integration\n@freeze_time(FROZEN_TIME)\ndef test_athena_get_table_properties():\n from pyathena.model import AthenaTableMetadata\n from datahub.ingestion.source.sql.athena import AthenaConfig, AthenaSource\n config = AthenaConfig.parse_obj({'aws_region': 'us-west-1',\n 's3_staging_dir': 's3://sample-staging-dir/', 'work_group':\n 'test-workgroup'})\n schema: str = 'test_schema'\n table: str = 'test_table'\n table_metadata = {'TableMetadata': {'Name': 'test', 'TableType':\n 'testType', 'CreateTime': datetime.now(), 'LastAccessTime':\n datetime.now(), 'PartitionKeys': [{'Name': 'testKey', 'Type':\n 'string', 'Comment': 'testComment'}], 'Parameters': {'comment':\n 'testComment', 'location': 's3://testLocation', 'inputformat':\n 'testInputFormat', 'outputformat': 'testOutputFormat',\n 'serde.serialization.lib': 'testSerde'}}}\n mock_cursor = mock.MagicMock()\n mock_inspector = mock.MagicMock()\n mock_inspector.engine.raw_connection().cursor.return_value = mock_cursor\n mock_cursor._get_table_metadata.return_value = AthenaTableMetadata(response\n =table_metadata)\n ctx = PipelineContext(run_id='test')\n source = AthenaSource(config=config, ctx=ctx)\n description, custom_properties, location = source.get_table_properties(\n inspector=mock_inspector, table=table, schema=schema)\n assert custom_properties == {'comment': 'testComment', 'create_time':\n '2020-04-14 07:00:00', 'inputformat': 'testInputFormat',\n 'last_access_time': '2020-04-14 07:00:00', 'location':\n 's3://testLocation', 'outputformat': 'testOutputFormat',\n 'partition_keys':\n '[{\"name\": \"testKey\", \"type\": \"string\", \"comment\": \"testComment\"}]',\n 'serde.serialization.lib': 'testSerde', 'table_type': 'testType'}\n assert location == make_s3_urn('s3://testLocation', 'PROD')\n",
"step-5": "from datetime import datetime\nfrom unittest import mock\n\nimport pytest\nfrom freezegun import freeze_time\n\nfrom datahub.ingestion.api.common import PipelineContext\nfrom src.datahub.ingestion.source.aws.s3_util import make_s3_urn\n\nFROZEN_TIME = \"2020-04-14 07:00:00\"\n\n\n@pytest.mark.integration\ndef test_athena_config_query_location_old_plus_new_value_not_allowed():\n from datahub.ingestion.source.sql.athena import AthenaConfig\n\n with pytest.raises(ValueError):\n AthenaConfig.parse_obj(\n {\n \"aws_region\": \"us-west-1\",\n \"s3_staging_dir\": \"s3://sample-staging-dir/\",\n \"query_result_location\": \"s3://query_result_location\",\n \"work_group\": \"test-workgroup\",\n }\n )\n\n\n@pytest.mark.integration\ndef test_athena_config_staging_dir_is_set_as_query_result():\n from datahub.ingestion.source.sql.athena import AthenaConfig\n\n config = AthenaConfig.parse_obj(\n {\n \"aws_region\": \"us-west-1\",\n \"s3_staging_dir\": \"s3://sample-staging-dir/\",\n \"work_group\": \"test-workgroup\",\n }\n )\n\n expected_config = AthenaConfig.parse_obj(\n {\n \"aws_region\": \"us-west-1\",\n \"query_result_location\": \"s3://sample-staging-dir/\",\n \"work_group\": \"test-workgroup\",\n }\n )\n\n assert config.json() == expected_config.json()\n\n\n@pytest.mark.integration\ndef test_athena_uri():\n from datahub.ingestion.source.sql.athena import AthenaConfig\n\n config = AthenaConfig.parse_obj(\n {\n \"aws_region\": \"us-west-1\",\n \"query_result_location\": \"s3://query-result-location/\",\n \"work_group\": \"test-workgroup\",\n }\n )\n assert (\n config.get_sql_alchemy_url()\n == \"awsathena+rest://@athena.us-west-1.amazonaws.com:443/?s3_staging_dir=s3%3A%2F%2Fquery-result-location%2F&work_group=test-workgroup&catalog_name=awsdatacatalog&duration_seconds=3600\"\n )\n\n\n@pytest.mark.integration\n@freeze_time(FROZEN_TIME)\ndef test_athena_get_table_properties():\n from pyathena.model import AthenaTableMetadata\n\n from datahub.ingestion.source.sql.athena import AthenaConfig, AthenaSource\n\n config = AthenaConfig.parse_obj(\n {\n \"aws_region\": \"us-west-1\",\n \"s3_staging_dir\": \"s3://sample-staging-dir/\",\n \"work_group\": \"test-workgroup\",\n }\n )\n schema: str = \"test_schema\"\n table: str = \"test_table\"\n\n table_metadata = {\n \"TableMetadata\": {\n \"Name\": \"test\",\n \"TableType\": \"testType\",\n \"CreateTime\": datetime.now(),\n \"LastAccessTime\": datetime.now(),\n \"PartitionKeys\": [\n {\"Name\": \"testKey\", \"Type\": \"string\", \"Comment\": \"testComment\"}\n ],\n \"Parameters\": {\n \"comment\": \"testComment\",\n \"location\": \"s3://testLocation\",\n \"inputformat\": \"testInputFormat\",\n \"outputformat\": \"testOutputFormat\",\n \"serde.serialization.lib\": \"testSerde\",\n },\n },\n }\n\n mock_cursor = mock.MagicMock()\n mock_inspector = mock.MagicMock()\n mock_inspector.engine.raw_connection().cursor.return_value = mock_cursor\n mock_cursor._get_table_metadata.return_value = AthenaTableMetadata(\n response=table_metadata\n )\n\n ctx = PipelineContext(run_id=\"test\")\n source = AthenaSource(config=config, ctx=ctx)\n description, custom_properties, location = source.get_table_properties(\n inspector=mock_inspector, table=table, schema=schema\n )\n assert custom_properties == {\n \"comment\": \"testComment\",\n \"create_time\": \"2020-04-14 07:00:00\",\n \"inputformat\": \"testInputFormat\",\n \"last_access_time\": \"2020-04-14 07:00:00\",\n \"location\": \"s3://testLocation\",\n \"outputformat\": \"testOutputFormat\",\n \"partition_keys\": '[{\"name\": \"testKey\", \"type\": \"string\", \"comment\": \"testComment\"}]',\n \"serde.serialization.lib\": \"testSerde\",\n \"table_type\": \"testType\",\n }\n\n assert location == make_s3_urn(\"s3://testLocation\", \"PROD\")\n",
"step-ids": [
3,
4,
5,
6,
7
]
}
|
[
3,
4,
5,
6,
7
] |
<|reserved_special_token_0|>
def dump_all_sargas(base_dir):
for kaanda_index in book_data.get_subunit_list(file_path=unit_info_file,
unit_path_list=[]):
if kaanda_index >= 6:
continue
sarga_list = book_data.get_subunit_list(file_path=unit_info_file,
unit_path_list=[kaanda_index])
for sarga_index in sarga_list:
logging.info('Kanda %d Sarga %d', kaanda_index, sarga_index)
out_path = os.path.join(base_dir, '%d' % kaanda_index,
'%03d.md' % sarga_index)
url = (
'https://www.valmiki.iitk.ac.in/sloka?field_kanda_tid=%d&language=dv&field_sarga_value=%d'
% (kaanda_index, sarga_index))
dump_sarga(url=url, out_path=out_path, sarga_id=sarga_index)
def dump_all_sargas(base_dir):
for kaanda_index in book_data.get_subunit_list(file_path=unit_info_file,
unit_path_list=[]):
if kaanda_index >= 6:
continue
sarga_list = book_data.get_subunit_list(file_path=unit_info_file,
unit_path_list=[kaanda_index])
for sarga_index in sarga_list:
logging.info('Kanda %d Sarga %d', kaanda_index, sarga_index)
out_path = os.path.join(base_dir, '%d' % kaanda_index,
'%03d.md' % sarga_index)
url = (
'https://www.valmiki.iitk.ac.in/sloka?field_kanda_tid=%d&language=dv&field_sarga_value=%d'
% (kaanda_index, sarga_index))
dump_sarga(url=url, out_path=out_path, sarga_id=sarga_index)
def dump_commentary(base_dir, commentary_id):
for kaanda_index in book_data.get_subunit_list(file_path=unit_info_file,
unit_path_list=[]):
if kaanda_index >= 6:
continue
sarga_list = book_data.get_subunit_list(file_path=unit_info_file,
unit_path_list=[kaanda_index])
for sarga_index in sarga_list:
logging.info('Kanda %d Sarga %d', kaanda_index, sarga_index)
out_path = os.path.join(base_dir, '%d' % kaanda_index,
'%03d.md' % sarga_index)
url = (
'https://www.valmiki.iitk.ac.in/commentaries?language=dv&field_commnetary_tid=%d&field_kanda_tid=%d&field_sarga_value=%d'
% (commentary_id, kaanda_index, sarga_index))
title_maker = lambda soup, title_prefix: sanscript.transliterate(
'%03d' % sarga_index, sanscript.IAST, sanscript.DEVANAGARI)
iitk.dump_item(item_url=url, outfile_path=out_path, title_maker
=title_maker)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def dump_sarga(url, out_path, sarga_id, dry_run=False):
page_html = urlopen(url)
soup = BeautifulSoup(page_html.read(), 'lxml')
shloka_tags = soup.select('.views-row')
sarga_content = ''
for index, shloka_tag in enumerate(tqdm(shloka_tags)):
fields = shloka_tag.select('.field-content')
if index == 0:
sarga_summary = fields[0].contents[0].replace('[', '').replace(']',
'')
shloka = souper.get_md_paragraph(fields[0].contents[1:])
sarga_content = get_md_with_pandoc(content_in=sarga_summary,
source_format='html')
else:
shloka = souper.get_md_paragraph(fields[0].contents)
shloka = shloka.replace(':', 'ः')
word_meaning = souper.get_md_paragraph(fields[1].contents).replace(':',
'ः')
shloka_meaning = souper.get_md_paragraph(fields[2].contents)
content = textwrap.dedent(
"""
## श्लोकः
### मूलम्
%s
### शब्दार्थः
%s
### आङ्ग्लानुवादः
%s
"""
) % (shloka, word_meaning, shloka_meaning)
sarga_content = '%s\n\n%s' % (sarga_content, content)
md_file = MdFile(file_path=out_path)
sarga_content = sarga_content.replace(':', 'ः').replace('इत्यार्षे',
'\n## समाप्तिः\n')
md_file.dump_to_file(metadata={'title': '%03d' % sarga_id}, content=
sarga_content, dry_run=dry_run)
def dump_all_sargas(base_dir):
for kaanda_index in book_data.get_subunit_list(file_path=unit_info_file,
unit_path_list=[]):
if kaanda_index >= 6:
continue
sarga_list = book_data.get_subunit_list(file_path=unit_info_file,
unit_path_list=[kaanda_index])
for sarga_index in sarga_list:
logging.info('Kanda %d Sarga %d', kaanda_index, sarga_index)
out_path = os.path.join(base_dir, '%d' % kaanda_index,
'%03d.md' % sarga_index)
url = (
'https://www.valmiki.iitk.ac.in/sloka?field_kanda_tid=%d&language=dv&field_sarga_value=%d'
% (kaanda_index, sarga_index))
dump_sarga(url=url, out_path=out_path, sarga_id=sarga_index)
def dump_all_sargas(base_dir):
for kaanda_index in book_data.get_subunit_list(file_path=unit_info_file,
unit_path_list=[]):
if kaanda_index >= 6:
continue
sarga_list = book_data.get_subunit_list(file_path=unit_info_file,
unit_path_list=[kaanda_index])
for sarga_index in sarga_list:
logging.info('Kanda %d Sarga %d', kaanda_index, sarga_index)
out_path = os.path.join(base_dir, '%d' % kaanda_index,
'%03d.md' % sarga_index)
url = (
'https://www.valmiki.iitk.ac.in/sloka?field_kanda_tid=%d&language=dv&field_sarga_value=%d'
% (kaanda_index, sarga_index))
dump_sarga(url=url, out_path=out_path, sarga_id=sarga_index)
def dump_commentary(base_dir, commentary_id):
for kaanda_index in book_data.get_subunit_list(file_path=unit_info_file,
unit_path_list=[]):
if kaanda_index >= 6:
continue
sarga_list = book_data.get_subunit_list(file_path=unit_info_file,
unit_path_list=[kaanda_index])
for sarga_index in sarga_list:
logging.info('Kanda %d Sarga %d', kaanda_index, sarga_index)
out_path = os.path.join(base_dir, '%d' % kaanda_index,
'%03d.md' % sarga_index)
url = (
'https://www.valmiki.iitk.ac.in/commentaries?language=dv&field_commnetary_tid=%d&field_kanda_tid=%d&field_sarga_value=%d'
% (commentary_id, kaanda_index, sarga_index))
title_maker = lambda soup, title_prefix: sanscript.transliterate(
'%03d' % sarga_index, sanscript.IAST, sanscript.DEVANAGARI)
iitk.dump_item(item_url=url, outfile_path=out_path, title_maker
=title_maker)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
for handler in logging.root.handlers[:]:
logging.root.removeHandler(handler)
logging.basicConfig(level=logging.DEBUG, format=
'%(levelname)s:%(asctime)s:%(module)s:%(lineno)d %(message)s')
unit_info_file = os.path.join(os.path.dirname(book_data.__file__),
'data/book_data/raamaayanam/andhra.json')
def dump_sarga(url, out_path, sarga_id, dry_run=False):
page_html = urlopen(url)
soup = BeautifulSoup(page_html.read(), 'lxml')
shloka_tags = soup.select('.views-row')
sarga_content = ''
for index, shloka_tag in enumerate(tqdm(shloka_tags)):
fields = shloka_tag.select('.field-content')
if index == 0:
sarga_summary = fields[0].contents[0].replace('[', '').replace(']',
'')
shloka = souper.get_md_paragraph(fields[0].contents[1:])
sarga_content = get_md_with_pandoc(content_in=sarga_summary,
source_format='html')
else:
shloka = souper.get_md_paragraph(fields[0].contents)
shloka = shloka.replace(':', 'ः')
word_meaning = souper.get_md_paragraph(fields[1].contents).replace(':',
'ः')
shloka_meaning = souper.get_md_paragraph(fields[2].contents)
content = textwrap.dedent(
"""
## श्लोकः
### मूलम्
%s
### शब्दार्थः
%s
### आङ्ग्लानुवादः
%s
"""
) % (shloka, word_meaning, shloka_meaning)
sarga_content = '%s\n\n%s' % (sarga_content, content)
md_file = MdFile(file_path=out_path)
sarga_content = sarga_content.replace(':', 'ः').replace('इत्यार्षे',
'\n## समाप्तिः\n')
md_file.dump_to_file(metadata={'title': '%03d' % sarga_id}, content=
sarga_content, dry_run=dry_run)
def dump_all_sargas(base_dir):
for kaanda_index in book_data.get_subunit_list(file_path=unit_info_file,
unit_path_list=[]):
if kaanda_index >= 6:
continue
sarga_list = book_data.get_subunit_list(file_path=unit_info_file,
unit_path_list=[kaanda_index])
for sarga_index in sarga_list:
logging.info('Kanda %d Sarga %d', kaanda_index, sarga_index)
out_path = os.path.join(base_dir, '%d' % kaanda_index,
'%03d.md' % sarga_index)
url = (
'https://www.valmiki.iitk.ac.in/sloka?field_kanda_tid=%d&language=dv&field_sarga_value=%d'
% (kaanda_index, sarga_index))
dump_sarga(url=url, out_path=out_path, sarga_id=sarga_index)
def dump_all_sargas(base_dir):
for kaanda_index in book_data.get_subunit_list(file_path=unit_info_file,
unit_path_list=[]):
if kaanda_index >= 6:
continue
sarga_list = book_data.get_subunit_list(file_path=unit_info_file,
unit_path_list=[kaanda_index])
for sarga_index in sarga_list:
logging.info('Kanda %d Sarga %d', kaanda_index, sarga_index)
out_path = os.path.join(base_dir, '%d' % kaanda_index,
'%03d.md' % sarga_index)
url = (
'https://www.valmiki.iitk.ac.in/sloka?field_kanda_tid=%d&language=dv&field_sarga_value=%d'
% (kaanda_index, sarga_index))
dump_sarga(url=url, out_path=out_path, sarga_id=sarga_index)
def dump_commentary(base_dir, commentary_id):
for kaanda_index in book_data.get_subunit_list(file_path=unit_info_file,
unit_path_list=[]):
if kaanda_index >= 6:
continue
sarga_list = book_data.get_subunit_list(file_path=unit_info_file,
unit_path_list=[kaanda_index])
for sarga_index in sarga_list:
logging.info('Kanda %d Sarga %d', kaanda_index, sarga_index)
out_path = os.path.join(base_dir, '%d' % kaanda_index,
'%03d.md' % sarga_index)
url = (
'https://www.valmiki.iitk.ac.in/commentaries?language=dv&field_commnetary_tid=%d&field_kanda_tid=%d&field_sarga_value=%d'
% (commentary_id, kaanda_index, sarga_index))
title_maker = lambda soup, title_prefix: sanscript.transliterate(
'%03d' % sarga_index, sanscript.IAST, sanscript.DEVANAGARI)
iitk.dump_item(item_url=url, outfile_path=out_path, title_maker
=title_maker)
if __name__ == '__main__':
pass
<|reserved_special_token_1|>
import logging
import os
import textwrap
from urllib.request import urlopen
from bs4 import BeautifulSoup
from tqdm import tqdm
from doc_curation import book_data
from doc_curation.md import get_md_with_pandoc
from doc_curation.md.file import MdFile
from doc_curation.scraping.misc_sites import iitk
from doc_curation.scraping.html_scraper import souper
from indic_transliteration import sanscript
for handler in logging.root.handlers[:]:
logging.root.removeHandler(handler)
logging.basicConfig(level=logging.DEBUG, format=
'%(levelname)s:%(asctime)s:%(module)s:%(lineno)d %(message)s')
unit_info_file = os.path.join(os.path.dirname(book_data.__file__),
'data/book_data/raamaayanam/andhra.json')
def dump_sarga(url, out_path, sarga_id, dry_run=False):
page_html = urlopen(url)
soup = BeautifulSoup(page_html.read(), 'lxml')
shloka_tags = soup.select('.views-row')
sarga_content = ''
for index, shloka_tag in enumerate(tqdm(shloka_tags)):
fields = shloka_tag.select('.field-content')
if index == 0:
sarga_summary = fields[0].contents[0].replace('[', '').replace(']',
'')
shloka = souper.get_md_paragraph(fields[0].contents[1:])
sarga_content = get_md_with_pandoc(content_in=sarga_summary,
source_format='html')
else:
shloka = souper.get_md_paragraph(fields[0].contents)
shloka = shloka.replace(':', 'ः')
word_meaning = souper.get_md_paragraph(fields[1].contents).replace(':',
'ः')
shloka_meaning = souper.get_md_paragraph(fields[2].contents)
content = textwrap.dedent(
"""
## श्लोकः
### मूलम्
%s
### शब्दार्थः
%s
### आङ्ग्लानुवादः
%s
"""
) % (shloka, word_meaning, shloka_meaning)
sarga_content = '%s\n\n%s' % (sarga_content, content)
md_file = MdFile(file_path=out_path)
sarga_content = sarga_content.replace(':', 'ः').replace('इत्यार्षे',
'\n## समाप्तिः\n')
md_file.dump_to_file(metadata={'title': '%03d' % sarga_id}, content=
sarga_content, dry_run=dry_run)
def dump_all_sargas(base_dir):
for kaanda_index in book_data.get_subunit_list(file_path=unit_info_file,
unit_path_list=[]):
if kaanda_index >= 6:
continue
sarga_list = book_data.get_subunit_list(file_path=unit_info_file,
unit_path_list=[kaanda_index])
for sarga_index in sarga_list:
logging.info('Kanda %d Sarga %d', kaanda_index, sarga_index)
out_path = os.path.join(base_dir, '%d' % kaanda_index,
'%03d.md' % sarga_index)
url = (
'https://www.valmiki.iitk.ac.in/sloka?field_kanda_tid=%d&language=dv&field_sarga_value=%d'
% (kaanda_index, sarga_index))
dump_sarga(url=url, out_path=out_path, sarga_id=sarga_index)
def dump_all_sargas(base_dir):
for kaanda_index in book_data.get_subunit_list(file_path=unit_info_file,
unit_path_list=[]):
if kaanda_index >= 6:
continue
sarga_list = book_data.get_subunit_list(file_path=unit_info_file,
unit_path_list=[kaanda_index])
for sarga_index in sarga_list:
logging.info('Kanda %d Sarga %d', kaanda_index, sarga_index)
out_path = os.path.join(base_dir, '%d' % kaanda_index,
'%03d.md' % sarga_index)
url = (
'https://www.valmiki.iitk.ac.in/sloka?field_kanda_tid=%d&language=dv&field_sarga_value=%d'
% (kaanda_index, sarga_index))
dump_sarga(url=url, out_path=out_path, sarga_id=sarga_index)
def dump_commentary(base_dir, commentary_id):
for kaanda_index in book_data.get_subunit_list(file_path=unit_info_file,
unit_path_list=[]):
if kaanda_index >= 6:
continue
sarga_list = book_data.get_subunit_list(file_path=unit_info_file,
unit_path_list=[kaanda_index])
for sarga_index in sarga_list:
logging.info('Kanda %d Sarga %d', kaanda_index, sarga_index)
out_path = os.path.join(base_dir, '%d' % kaanda_index,
'%03d.md' % sarga_index)
url = (
'https://www.valmiki.iitk.ac.in/commentaries?language=dv&field_commnetary_tid=%d&field_kanda_tid=%d&field_sarga_value=%d'
% (commentary_id, kaanda_index, sarga_index))
title_maker = lambda soup, title_prefix: sanscript.transliterate(
'%03d' % sarga_index, sanscript.IAST, sanscript.DEVANAGARI)
iitk.dump_item(item_url=url, outfile_path=out_path, title_maker
=title_maker)
if __name__ == '__main__':
pass
<|reserved_special_token_1|>
import logging
import os
import textwrap
from urllib.request import urlopen
from bs4 import BeautifulSoup
from tqdm import tqdm
from doc_curation import book_data
from doc_curation.md import get_md_with_pandoc
from doc_curation.md.file import MdFile
from doc_curation.scraping.misc_sites import iitk
from doc_curation.scraping.html_scraper import souper
from indic_transliteration import sanscript
# Remove all handlers associated with the root logger object.
for handler in logging.root.handlers[:]:
logging.root.removeHandler(handler)
logging.basicConfig(
level=logging.DEBUG,
format="%(levelname)s:%(asctime)s:%(module)s:%(lineno)d %(message)s")
unit_info_file = os.path.join(os.path.dirname(book_data.__file__), "data/book_data/raamaayanam/andhra.json")
def dump_sarga(url, out_path, sarga_id, dry_run=False):
# browser.implicitly_wait(2)
page_html = urlopen(url)
soup = BeautifulSoup(page_html.read(), 'lxml')
shloka_tags = soup.select(".views-row")
sarga_content = ""
for (index, shloka_tag) in enumerate(tqdm(shloka_tags)):
fields = shloka_tag.select(".field-content")
if index == 0:
sarga_summary = fields[0].contents[0].replace("[", "").replace("]", "")
shloka = souper.get_md_paragraph(fields[0].contents[1:])
sarga_content = get_md_with_pandoc(content_in=sarga_summary, source_format="html")
else:
shloka = souper.get_md_paragraph(fields[0].contents)
shloka = shloka.replace(":", "ः")
word_meaning = souper.get_md_paragraph(fields[1].contents).replace(":", "ः")
shloka_meaning = souper.get_md_paragraph(fields[2].contents)
content = textwrap.dedent("""
## श्लोकः
### मूलम्
%s
### शब्दार्थः
%s
### आङ्ग्लानुवादः
%s
""") % (shloka, word_meaning, shloka_meaning)
sarga_content = "%s\n\n%s" % (sarga_content, content)
md_file = MdFile(file_path=out_path)
sarga_content = sarga_content.replace(":", "ः").replace("इत्यार्षे", "\n## समाप्तिः\n")
md_file.dump_to_file(metadata={"title": "%03d" % sarga_id}, content=sarga_content, dry_run=dry_run)
def dump_all_sargas(base_dir):
for kaanda_index in book_data.get_subunit_list(file_path=unit_info_file, unit_path_list=[]):
if kaanda_index >= 6:
continue
sarga_list = book_data.get_subunit_list(file_path=unit_info_file, unit_path_list=[kaanda_index])
for sarga_index in sarga_list:
logging.info("Kanda %d Sarga %d", kaanda_index, sarga_index)
out_path = os.path.join(base_dir, "%d" % kaanda_index, "%03d.md" % sarga_index)
url = "https://www.valmiki.iitk.ac.in/sloka?field_kanda_tid=%d&language=dv&field_sarga_value=%d" % (
kaanda_index, sarga_index)
dump_sarga(url=url, out_path=out_path, sarga_id=sarga_index)
def dump_all_sargas(base_dir):
for kaanda_index in book_data.get_subunit_list(file_path=unit_info_file, unit_path_list=[]):
if kaanda_index >= 6:
continue
sarga_list = book_data.get_subunit_list(file_path=unit_info_file, unit_path_list=[kaanda_index])
for sarga_index in sarga_list:
logging.info("Kanda %d Sarga %d", kaanda_index, sarga_index)
out_path = os.path.join(base_dir, "%d" % kaanda_index, "%03d.md" % sarga_index)
url = "https://www.valmiki.iitk.ac.in/sloka?field_kanda_tid=%d&language=dv&field_sarga_value=%d" % (
kaanda_index, sarga_index)
dump_sarga(url=url, out_path=out_path, sarga_id=sarga_index)
def dump_commentary(base_dir, commentary_id):
for kaanda_index in book_data.get_subunit_list(file_path=unit_info_file, unit_path_list=[]):
if kaanda_index >= 6:
continue
sarga_list = book_data.get_subunit_list(file_path=unit_info_file, unit_path_list=[kaanda_index])
for sarga_index in sarga_list:
logging.info("Kanda %d Sarga %d", kaanda_index, sarga_index)
out_path = os.path.join(base_dir, "%d" % kaanda_index, "%03d.md" % sarga_index)
url = "https://www.valmiki.iitk.ac.in/commentaries?language=dv&field_commnetary_tid=%d&field_kanda_tid=%d&field_sarga_value=%d" % (
commentary_id, kaanda_index, sarga_index)
title_maker = lambda soup, title_prefix: sanscript.transliterate("%03d" % sarga_index, sanscript.IAST,
sanscript.DEVANAGARI)
iitk.dump_item(item_url=url, outfile_path=out_path, title_maker=title_maker)
if __name__ == '__main__':
pass
# dump_all_sargas(base_dir="/home/vvasuki/sanskrit/raw_etexts/purANam/rAmAyaNam/Andhra-pAThaH_iitk/")
# aandhra.fix_title_names(base_dir="/home/vvasuki/sanskrit/raw_etexts/purANam/rAmAyaNam/kumbhakona", base_dir_ref="/home/vvasuki/sanskrit/raw_etexts/purANam/rAmAyaNam/goraxapuram/VR_with_errors", dry_run=False)
# dump_commentary(base_dir="/home/vvasuki/sanskrit/raw_etexts/purANam/rAmAyaNam/TIkA/bhUShaNa_iitk/", commentary_id=14)
# dump_commentary(base_dir="/home/vvasuki/sanskrit/raw_etexts/purANam/rAmAyaNam/TIkA/shiromaNI_iitk/", commentary_id=10)
# dump_commentary(base_dir="/home/vvasuki/sanskrit/raw_etexts/purANam/rAmAyaNam/TIkA/tilaka_iitk/", commentary_id=13)
|
flexible
|
{
"blob_id": "f3a63a22f8746d4a1f127bfe9e8c9d822109ab3c",
"index": 463,
"step-1": "<mask token>\n\n\ndef dump_all_sargas(base_dir):\n for kaanda_index in book_data.get_subunit_list(file_path=unit_info_file,\n unit_path_list=[]):\n if kaanda_index >= 6:\n continue\n sarga_list = book_data.get_subunit_list(file_path=unit_info_file,\n unit_path_list=[kaanda_index])\n for sarga_index in sarga_list:\n logging.info('Kanda %d Sarga %d', kaanda_index, sarga_index)\n out_path = os.path.join(base_dir, '%d' % kaanda_index, \n '%03d.md' % sarga_index)\n url = (\n 'https://www.valmiki.iitk.ac.in/sloka?field_kanda_tid=%d&language=dv&field_sarga_value=%d'\n % (kaanda_index, sarga_index))\n dump_sarga(url=url, out_path=out_path, sarga_id=sarga_index)\n\n\ndef dump_all_sargas(base_dir):\n for kaanda_index in book_data.get_subunit_list(file_path=unit_info_file,\n unit_path_list=[]):\n if kaanda_index >= 6:\n continue\n sarga_list = book_data.get_subunit_list(file_path=unit_info_file,\n unit_path_list=[kaanda_index])\n for sarga_index in sarga_list:\n logging.info('Kanda %d Sarga %d', kaanda_index, sarga_index)\n out_path = os.path.join(base_dir, '%d' % kaanda_index, \n '%03d.md' % sarga_index)\n url = (\n 'https://www.valmiki.iitk.ac.in/sloka?field_kanda_tid=%d&language=dv&field_sarga_value=%d'\n % (kaanda_index, sarga_index))\n dump_sarga(url=url, out_path=out_path, sarga_id=sarga_index)\n\n\ndef dump_commentary(base_dir, commentary_id):\n for kaanda_index in book_data.get_subunit_list(file_path=unit_info_file,\n unit_path_list=[]):\n if kaanda_index >= 6:\n continue\n sarga_list = book_data.get_subunit_list(file_path=unit_info_file,\n unit_path_list=[kaanda_index])\n for sarga_index in sarga_list:\n logging.info('Kanda %d Sarga %d', kaanda_index, sarga_index)\n out_path = os.path.join(base_dir, '%d' % kaanda_index, \n '%03d.md' % sarga_index)\n url = (\n 'https://www.valmiki.iitk.ac.in/commentaries?language=dv&field_commnetary_tid=%d&field_kanda_tid=%d&field_sarga_value=%d'\n % (commentary_id, kaanda_index, sarga_index))\n title_maker = lambda soup, title_prefix: sanscript.transliterate(\n '%03d' % sarga_index, sanscript.IAST, sanscript.DEVANAGARI)\n iitk.dump_item(item_url=url, outfile_path=out_path, title_maker\n =title_maker)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef dump_sarga(url, out_path, sarga_id, dry_run=False):\n page_html = urlopen(url)\n soup = BeautifulSoup(page_html.read(), 'lxml')\n shloka_tags = soup.select('.views-row')\n sarga_content = ''\n for index, shloka_tag in enumerate(tqdm(shloka_tags)):\n fields = shloka_tag.select('.field-content')\n if index == 0:\n sarga_summary = fields[0].contents[0].replace('[', '').replace(']',\n '')\n shloka = souper.get_md_paragraph(fields[0].contents[1:])\n sarga_content = get_md_with_pandoc(content_in=sarga_summary,\n source_format='html')\n else:\n shloka = souper.get_md_paragraph(fields[0].contents)\n shloka = shloka.replace(':', 'ः')\n word_meaning = souper.get_md_paragraph(fields[1].contents).replace(':',\n 'ः')\n shloka_meaning = souper.get_md_paragraph(fields[2].contents)\n content = textwrap.dedent(\n \"\"\"\n ## श्लोकः\n ### मूलम्\n %s\n \n ### शब्दार्थः\n %s\n \n ### आङ्ग्लानुवादः\n %s\n \"\"\"\n ) % (shloka, word_meaning, shloka_meaning)\n sarga_content = '%s\\n\\n%s' % (sarga_content, content)\n md_file = MdFile(file_path=out_path)\n sarga_content = sarga_content.replace(':', 'ः').replace('इत्यार्षे',\n '\\n## समाप्तिः\\n')\n md_file.dump_to_file(metadata={'title': '%03d' % sarga_id}, content=\n sarga_content, dry_run=dry_run)\n\n\ndef dump_all_sargas(base_dir):\n for kaanda_index in book_data.get_subunit_list(file_path=unit_info_file,\n unit_path_list=[]):\n if kaanda_index >= 6:\n continue\n sarga_list = book_data.get_subunit_list(file_path=unit_info_file,\n unit_path_list=[kaanda_index])\n for sarga_index in sarga_list:\n logging.info('Kanda %d Sarga %d', kaanda_index, sarga_index)\n out_path = os.path.join(base_dir, '%d' % kaanda_index, \n '%03d.md' % sarga_index)\n url = (\n 'https://www.valmiki.iitk.ac.in/sloka?field_kanda_tid=%d&language=dv&field_sarga_value=%d'\n % (kaanda_index, sarga_index))\n dump_sarga(url=url, out_path=out_path, sarga_id=sarga_index)\n\n\ndef dump_all_sargas(base_dir):\n for kaanda_index in book_data.get_subunit_list(file_path=unit_info_file,\n unit_path_list=[]):\n if kaanda_index >= 6:\n continue\n sarga_list = book_data.get_subunit_list(file_path=unit_info_file,\n unit_path_list=[kaanda_index])\n for sarga_index in sarga_list:\n logging.info('Kanda %d Sarga %d', kaanda_index, sarga_index)\n out_path = os.path.join(base_dir, '%d' % kaanda_index, \n '%03d.md' % sarga_index)\n url = (\n 'https://www.valmiki.iitk.ac.in/sloka?field_kanda_tid=%d&language=dv&field_sarga_value=%d'\n % (kaanda_index, sarga_index))\n dump_sarga(url=url, out_path=out_path, sarga_id=sarga_index)\n\n\ndef dump_commentary(base_dir, commentary_id):\n for kaanda_index in book_data.get_subunit_list(file_path=unit_info_file,\n unit_path_list=[]):\n if kaanda_index >= 6:\n continue\n sarga_list = book_data.get_subunit_list(file_path=unit_info_file,\n unit_path_list=[kaanda_index])\n for sarga_index in sarga_list:\n logging.info('Kanda %d Sarga %d', kaanda_index, sarga_index)\n out_path = os.path.join(base_dir, '%d' % kaanda_index, \n '%03d.md' % sarga_index)\n url = (\n 'https://www.valmiki.iitk.ac.in/commentaries?language=dv&field_commnetary_tid=%d&field_kanda_tid=%d&field_sarga_value=%d'\n % (commentary_id, kaanda_index, sarga_index))\n title_maker = lambda soup, title_prefix: sanscript.transliterate(\n '%03d' % sarga_index, sanscript.IAST, sanscript.DEVANAGARI)\n iitk.dump_item(item_url=url, outfile_path=out_path, title_maker\n =title_maker)\n\n\n<mask token>\n",
"step-3": "<mask token>\nfor handler in logging.root.handlers[:]:\n logging.root.removeHandler(handler)\nlogging.basicConfig(level=logging.DEBUG, format=\n '%(levelname)s:%(asctime)s:%(module)s:%(lineno)d %(message)s')\nunit_info_file = os.path.join(os.path.dirname(book_data.__file__),\n 'data/book_data/raamaayanam/andhra.json')\n\n\ndef dump_sarga(url, out_path, sarga_id, dry_run=False):\n page_html = urlopen(url)\n soup = BeautifulSoup(page_html.read(), 'lxml')\n shloka_tags = soup.select('.views-row')\n sarga_content = ''\n for index, shloka_tag in enumerate(tqdm(shloka_tags)):\n fields = shloka_tag.select('.field-content')\n if index == 0:\n sarga_summary = fields[0].contents[0].replace('[', '').replace(']',\n '')\n shloka = souper.get_md_paragraph(fields[0].contents[1:])\n sarga_content = get_md_with_pandoc(content_in=sarga_summary,\n source_format='html')\n else:\n shloka = souper.get_md_paragraph(fields[0].contents)\n shloka = shloka.replace(':', 'ः')\n word_meaning = souper.get_md_paragraph(fields[1].contents).replace(':',\n 'ः')\n shloka_meaning = souper.get_md_paragraph(fields[2].contents)\n content = textwrap.dedent(\n \"\"\"\n ## श्लोकः\n ### मूलम्\n %s\n \n ### शब्दार्थः\n %s\n \n ### आङ्ग्लानुवादः\n %s\n \"\"\"\n ) % (shloka, word_meaning, shloka_meaning)\n sarga_content = '%s\\n\\n%s' % (sarga_content, content)\n md_file = MdFile(file_path=out_path)\n sarga_content = sarga_content.replace(':', 'ः').replace('इत्यार्षे',\n '\\n## समाप्तिः\\n')\n md_file.dump_to_file(metadata={'title': '%03d' % sarga_id}, content=\n sarga_content, dry_run=dry_run)\n\n\ndef dump_all_sargas(base_dir):\n for kaanda_index in book_data.get_subunit_list(file_path=unit_info_file,\n unit_path_list=[]):\n if kaanda_index >= 6:\n continue\n sarga_list = book_data.get_subunit_list(file_path=unit_info_file,\n unit_path_list=[kaanda_index])\n for sarga_index in sarga_list:\n logging.info('Kanda %d Sarga %d', kaanda_index, sarga_index)\n out_path = os.path.join(base_dir, '%d' % kaanda_index, \n '%03d.md' % sarga_index)\n url = (\n 'https://www.valmiki.iitk.ac.in/sloka?field_kanda_tid=%d&language=dv&field_sarga_value=%d'\n % (kaanda_index, sarga_index))\n dump_sarga(url=url, out_path=out_path, sarga_id=sarga_index)\n\n\ndef dump_all_sargas(base_dir):\n for kaanda_index in book_data.get_subunit_list(file_path=unit_info_file,\n unit_path_list=[]):\n if kaanda_index >= 6:\n continue\n sarga_list = book_data.get_subunit_list(file_path=unit_info_file,\n unit_path_list=[kaanda_index])\n for sarga_index in sarga_list:\n logging.info('Kanda %d Sarga %d', kaanda_index, sarga_index)\n out_path = os.path.join(base_dir, '%d' % kaanda_index, \n '%03d.md' % sarga_index)\n url = (\n 'https://www.valmiki.iitk.ac.in/sloka?field_kanda_tid=%d&language=dv&field_sarga_value=%d'\n % (kaanda_index, sarga_index))\n dump_sarga(url=url, out_path=out_path, sarga_id=sarga_index)\n\n\ndef dump_commentary(base_dir, commentary_id):\n for kaanda_index in book_data.get_subunit_list(file_path=unit_info_file,\n unit_path_list=[]):\n if kaanda_index >= 6:\n continue\n sarga_list = book_data.get_subunit_list(file_path=unit_info_file,\n unit_path_list=[kaanda_index])\n for sarga_index in sarga_list:\n logging.info('Kanda %d Sarga %d', kaanda_index, sarga_index)\n out_path = os.path.join(base_dir, '%d' % kaanda_index, \n '%03d.md' % sarga_index)\n url = (\n 'https://www.valmiki.iitk.ac.in/commentaries?language=dv&field_commnetary_tid=%d&field_kanda_tid=%d&field_sarga_value=%d'\n % (commentary_id, kaanda_index, sarga_index))\n title_maker = lambda soup, title_prefix: sanscript.transliterate(\n '%03d' % sarga_index, sanscript.IAST, sanscript.DEVANAGARI)\n iitk.dump_item(item_url=url, outfile_path=out_path, title_maker\n =title_maker)\n\n\nif __name__ == '__main__':\n pass\n",
"step-4": "import logging\nimport os\nimport textwrap\nfrom urllib.request import urlopen\nfrom bs4 import BeautifulSoup\nfrom tqdm import tqdm\nfrom doc_curation import book_data\nfrom doc_curation.md import get_md_with_pandoc\nfrom doc_curation.md.file import MdFile\nfrom doc_curation.scraping.misc_sites import iitk\nfrom doc_curation.scraping.html_scraper import souper\nfrom indic_transliteration import sanscript\nfor handler in logging.root.handlers[:]:\n logging.root.removeHandler(handler)\nlogging.basicConfig(level=logging.DEBUG, format=\n '%(levelname)s:%(asctime)s:%(module)s:%(lineno)d %(message)s')\nunit_info_file = os.path.join(os.path.dirname(book_data.__file__),\n 'data/book_data/raamaayanam/andhra.json')\n\n\ndef dump_sarga(url, out_path, sarga_id, dry_run=False):\n page_html = urlopen(url)\n soup = BeautifulSoup(page_html.read(), 'lxml')\n shloka_tags = soup.select('.views-row')\n sarga_content = ''\n for index, shloka_tag in enumerate(tqdm(shloka_tags)):\n fields = shloka_tag.select('.field-content')\n if index == 0:\n sarga_summary = fields[0].contents[0].replace('[', '').replace(']',\n '')\n shloka = souper.get_md_paragraph(fields[0].contents[1:])\n sarga_content = get_md_with_pandoc(content_in=sarga_summary,\n source_format='html')\n else:\n shloka = souper.get_md_paragraph(fields[0].contents)\n shloka = shloka.replace(':', 'ः')\n word_meaning = souper.get_md_paragraph(fields[1].contents).replace(':',\n 'ः')\n shloka_meaning = souper.get_md_paragraph(fields[2].contents)\n content = textwrap.dedent(\n \"\"\"\n ## श्लोकः\n ### मूलम्\n %s\n \n ### शब्दार्थः\n %s\n \n ### आङ्ग्लानुवादः\n %s\n \"\"\"\n ) % (shloka, word_meaning, shloka_meaning)\n sarga_content = '%s\\n\\n%s' % (sarga_content, content)\n md_file = MdFile(file_path=out_path)\n sarga_content = sarga_content.replace(':', 'ः').replace('इत्यार्षे',\n '\\n## समाप्तिः\\n')\n md_file.dump_to_file(metadata={'title': '%03d' % sarga_id}, content=\n sarga_content, dry_run=dry_run)\n\n\ndef dump_all_sargas(base_dir):\n for kaanda_index in book_data.get_subunit_list(file_path=unit_info_file,\n unit_path_list=[]):\n if kaanda_index >= 6:\n continue\n sarga_list = book_data.get_subunit_list(file_path=unit_info_file,\n unit_path_list=[kaanda_index])\n for sarga_index in sarga_list:\n logging.info('Kanda %d Sarga %d', kaanda_index, sarga_index)\n out_path = os.path.join(base_dir, '%d' % kaanda_index, \n '%03d.md' % sarga_index)\n url = (\n 'https://www.valmiki.iitk.ac.in/sloka?field_kanda_tid=%d&language=dv&field_sarga_value=%d'\n % (kaanda_index, sarga_index))\n dump_sarga(url=url, out_path=out_path, sarga_id=sarga_index)\n\n\ndef dump_all_sargas(base_dir):\n for kaanda_index in book_data.get_subunit_list(file_path=unit_info_file,\n unit_path_list=[]):\n if kaanda_index >= 6:\n continue\n sarga_list = book_data.get_subunit_list(file_path=unit_info_file,\n unit_path_list=[kaanda_index])\n for sarga_index in sarga_list:\n logging.info('Kanda %d Sarga %d', kaanda_index, sarga_index)\n out_path = os.path.join(base_dir, '%d' % kaanda_index, \n '%03d.md' % sarga_index)\n url = (\n 'https://www.valmiki.iitk.ac.in/sloka?field_kanda_tid=%d&language=dv&field_sarga_value=%d'\n % (kaanda_index, sarga_index))\n dump_sarga(url=url, out_path=out_path, sarga_id=sarga_index)\n\n\ndef dump_commentary(base_dir, commentary_id):\n for kaanda_index in book_data.get_subunit_list(file_path=unit_info_file,\n unit_path_list=[]):\n if kaanda_index >= 6:\n continue\n sarga_list = book_data.get_subunit_list(file_path=unit_info_file,\n unit_path_list=[kaanda_index])\n for sarga_index in sarga_list:\n logging.info('Kanda %d Sarga %d', kaanda_index, sarga_index)\n out_path = os.path.join(base_dir, '%d' % kaanda_index, \n '%03d.md' % sarga_index)\n url = (\n 'https://www.valmiki.iitk.ac.in/commentaries?language=dv&field_commnetary_tid=%d&field_kanda_tid=%d&field_sarga_value=%d'\n % (commentary_id, kaanda_index, sarga_index))\n title_maker = lambda soup, title_prefix: sanscript.transliterate(\n '%03d' % sarga_index, sanscript.IAST, sanscript.DEVANAGARI)\n iitk.dump_item(item_url=url, outfile_path=out_path, title_maker\n =title_maker)\n\n\nif __name__ == '__main__':\n pass\n",
"step-5": "import logging\nimport os\nimport textwrap\nfrom urllib.request import urlopen\n\nfrom bs4 import BeautifulSoup\nfrom tqdm import tqdm\n\nfrom doc_curation import book_data\nfrom doc_curation.md import get_md_with_pandoc\nfrom doc_curation.md.file import MdFile\nfrom doc_curation.scraping.misc_sites import iitk\nfrom doc_curation.scraping.html_scraper import souper\nfrom indic_transliteration import sanscript\n\n# Remove all handlers associated with the root logger object.\nfor handler in logging.root.handlers[:]:\n logging.root.removeHandler(handler)\nlogging.basicConfig(\n level=logging.DEBUG,\n format=\"%(levelname)s:%(asctime)s:%(module)s:%(lineno)d %(message)s\")\n\nunit_info_file = os.path.join(os.path.dirname(book_data.__file__), \"data/book_data/raamaayanam/andhra.json\")\n\n\ndef dump_sarga(url, out_path, sarga_id, dry_run=False):\n # browser.implicitly_wait(2)\n page_html = urlopen(url)\n soup = BeautifulSoup(page_html.read(), 'lxml')\n shloka_tags = soup.select(\".views-row\")\n sarga_content = \"\"\n for (index, shloka_tag) in enumerate(tqdm(shloka_tags)):\n fields = shloka_tag.select(\".field-content\")\n if index == 0:\n sarga_summary = fields[0].contents[0].replace(\"[\", \"\").replace(\"]\", \"\")\n shloka = souper.get_md_paragraph(fields[0].contents[1:])\n sarga_content = get_md_with_pandoc(content_in=sarga_summary, source_format=\"html\")\n else:\n shloka = souper.get_md_paragraph(fields[0].contents)\n shloka = shloka.replace(\":\", \"ः\")\n word_meaning = souper.get_md_paragraph(fields[1].contents).replace(\":\", \"ः\")\n shloka_meaning = souper.get_md_paragraph(fields[2].contents)\n content = textwrap.dedent(\"\"\"\n ## श्लोकः\n ### मूलम्\n %s\n \n ### शब्दार्थः\n %s\n \n ### आङ्ग्लानुवादः\n %s\n \"\"\") % (shloka, word_meaning, shloka_meaning)\n sarga_content = \"%s\\n\\n%s\" % (sarga_content, content)\n md_file = MdFile(file_path=out_path)\n sarga_content = sarga_content.replace(\":\", \"ः\").replace(\"इत्यार्षे\", \"\\n## समाप्तिः\\n\")\n md_file.dump_to_file(metadata={\"title\": \"%03d\" % sarga_id}, content=sarga_content, dry_run=dry_run)\n\n\ndef dump_all_sargas(base_dir):\n for kaanda_index in book_data.get_subunit_list(file_path=unit_info_file, unit_path_list=[]):\n if kaanda_index >= 6:\n continue\n sarga_list = book_data.get_subunit_list(file_path=unit_info_file, unit_path_list=[kaanda_index])\n for sarga_index in sarga_list:\n logging.info(\"Kanda %d Sarga %d\", kaanda_index, sarga_index)\n out_path = os.path.join(base_dir, \"%d\" % kaanda_index, \"%03d.md\" % sarga_index)\n url = \"https://www.valmiki.iitk.ac.in/sloka?field_kanda_tid=%d&language=dv&field_sarga_value=%d\" % (\n kaanda_index, sarga_index)\n dump_sarga(url=url, out_path=out_path, sarga_id=sarga_index)\n\n\ndef dump_all_sargas(base_dir):\n for kaanda_index in book_data.get_subunit_list(file_path=unit_info_file, unit_path_list=[]):\n if kaanda_index >= 6:\n continue\n sarga_list = book_data.get_subunit_list(file_path=unit_info_file, unit_path_list=[kaanda_index])\n for sarga_index in sarga_list:\n logging.info(\"Kanda %d Sarga %d\", kaanda_index, sarga_index)\n out_path = os.path.join(base_dir, \"%d\" % kaanda_index, \"%03d.md\" % sarga_index)\n url = \"https://www.valmiki.iitk.ac.in/sloka?field_kanda_tid=%d&language=dv&field_sarga_value=%d\" % (\n kaanda_index, sarga_index)\n dump_sarga(url=url, out_path=out_path, sarga_id=sarga_index)\n\n\ndef dump_commentary(base_dir, commentary_id):\n for kaanda_index in book_data.get_subunit_list(file_path=unit_info_file, unit_path_list=[]):\n if kaanda_index >= 6:\n continue\n sarga_list = book_data.get_subunit_list(file_path=unit_info_file, unit_path_list=[kaanda_index])\n for sarga_index in sarga_list:\n logging.info(\"Kanda %d Sarga %d\", kaanda_index, sarga_index)\n out_path = os.path.join(base_dir, \"%d\" % kaanda_index, \"%03d.md\" % sarga_index)\n url = \"https://www.valmiki.iitk.ac.in/commentaries?language=dv&field_commnetary_tid=%d&field_kanda_tid=%d&field_sarga_value=%d\" % (\n commentary_id, kaanda_index, sarga_index)\n title_maker = lambda soup, title_prefix: sanscript.transliterate(\"%03d\" % sarga_index, sanscript.IAST,\n sanscript.DEVANAGARI)\n iitk.dump_item(item_url=url, outfile_path=out_path, title_maker=title_maker)\n\n\nif __name__ == '__main__':\n pass\n # dump_all_sargas(base_dir=\"/home/vvasuki/sanskrit/raw_etexts/purANam/rAmAyaNam/Andhra-pAThaH_iitk/\")\n # aandhra.fix_title_names(base_dir=\"/home/vvasuki/sanskrit/raw_etexts/purANam/rAmAyaNam/kumbhakona\", base_dir_ref=\"/home/vvasuki/sanskrit/raw_etexts/purANam/rAmAyaNam/goraxapuram/VR_with_errors\", dry_run=False)\n # dump_commentary(base_dir=\"/home/vvasuki/sanskrit/raw_etexts/purANam/rAmAyaNam/TIkA/bhUShaNa_iitk/\", commentary_id=14)\n # dump_commentary(base_dir=\"/home/vvasuki/sanskrit/raw_etexts/purANam/rAmAyaNam/TIkA/shiromaNI_iitk/\", commentary_id=10)\n # dump_commentary(base_dir=\"/home/vvasuki/sanskrit/raw_etexts/purANam/rAmAyaNam/TIkA/tilaka_iitk/\", commentary_id=13)\n",
"step-ids": [
3,
4,
6,
7,
8
]
}
|
[
3,
4,
6,
7,
8
] |
<|reserved_special_token_0|>
class GIFOutput(object):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def __init__(self, gif_fps=None, color_depth=None, gif_loop=None,
height=None, start=None, duration=None, suffix=None, overlay=None,
overlay_alignment=None, overlay_scale='fit', label=None):
"""GIFOutput - a model defined in Swagger"""
self._gif_fps = None
self._color_depth = None
self._gif_loop = None
self._height = None
self._start = None
self._duration = None
self._suffix = None
self._overlay = None
self._overlay_alignment = None
self._overlay_scale = None
self._label = None
self.discriminator = None
if gif_fps is not None:
self.gif_fps = gif_fps
if color_depth is not None:
self.color_depth = color_depth
if gif_loop is not None:
self.gif_loop = gif_loop
self.height = height
self.start = start
if duration is not None:
self.duration = duration
if suffix is not None:
self.suffix = suffix
if overlay is not None:
self.overlay = overlay
if overlay_alignment is not None:
self.overlay_alignment = overlay_alignment
if overlay_scale is not None:
self.overlay_scale = overlay_scale
if label is not None:
self.label = label
@property
def gif_fps(self):
"""Gets the gif_fps of this GIFOutput.
The frame rate of the GIF. Default is the Video frame rate
:return: The gif_fps of this GIFOutput.
:rtype: float
"""
return self._gif_fps
@gif_fps.setter
def gif_fps(self, gif_fps):
"""Sets the gif_fps of this GIFOutput.
The frame rate of the GIF. Default is the Video frame rate
:param gif_fps: The gif_fps of this GIFOutput.
:type: float
"""
if gif_fps is not None and gif_fps > 30:
raise ValueError(
'Invalid value for `gif_fps`, must be a value less than or equal to `30`'
)
self._gif_fps = gif_fps
@property
def color_depth(self):
"""Gets the color_depth of this GIFOutput.
Amount of colors in palette
:return: The color_depth of this GIFOutput.
:rtype: float
"""
return self._color_depth
@color_depth.setter
def color_depth(self, color_depth):
"""Sets the color_depth of this GIFOutput.
Amount of colors in palette
:param color_depth: The color_depth of this GIFOutput.
:type: float
"""
self._color_depth = color_depth
@property
def gif_loop(self):
"""Gets the gif_loop of this GIFOutput.
If to loop the GIF. -1 is no loop, 0 is infinite loops, and other numbers are number of loops.
:return: The gif_loop of this GIFOutput.
:rtype: int
"""
return self._gif_loop
@gif_loop.setter
def gif_loop(self, gif_loop):
"""Sets the gif_loop of this GIFOutput.
If to loop the GIF. -1 is no loop, 0 is infinite loops, and other numbers are number of loops.
:param gif_loop: The gif_loop of this GIFOutput.
:type: int
"""
if gif_loop is not None and gif_loop < -1:
raise ValueError(
'Invalid value for `gif_loop`, must be a value greater than or equal to `-1`'
)
self._gif_loop = gif_loop
<|reserved_special_token_0|>
@height.setter
def height(self, height):
"""Sets the height of this GIFOutput.
Height of the media to be rendered, in pixels. Should be the height of your scenes unless a smaller
resolution is needed. Resolution higher than the scene resolution reduces quality. The width is automatically
calculated to keep the aspect ratio.
:param height: The height of this GIFOutput.
:type: float
"""
if height is None:
raise ValueError('Invalid value for `height`, must not be `None`')
self._height = height
@property
def start(self):
"""Gets the start of this GIFOutput.
What second of the storyboard timeline to start the GIF.
:return: The start of this GIFOutput.
:rtype: float
"""
return self._start
@start.setter
def start(self, start):
"""Sets the start of this GIFOutput.
What second of the storyboard timeline to start the GIF.
:param start: The start of this GIFOutput.
:type: float
"""
if start is None:
raise ValueError('Invalid value for `start`, must not be `None`')
self._start = start
@property
def duration(self):
"""Gets the duration of this GIFOutput.
Seconds for the duration of the GIF. Can't be longer than the video.
:return: The duration of this GIFOutput.
:rtype: float
"""
return self._duration
@duration.setter
def duration(self, duration):
"""Sets the duration of this GIFOutput.
Seconds for the duration of the GIF. Can't be longer than the video.
:param duration: The duration of this GIFOutput.
:type: float
"""
self._duration = duration
@property
def suffix(self):
"""Gets the suffix of this GIFOutput.
Unique ending of the file name so several outputs can be created then identified. Required if there is more
then 1 video output.
:return: The suffix of this GIFOutput.
:rtype: str
"""
return self._suffix
@suffix.setter
def suffix(self, suffix):
"""Sets the suffix of this GIFOutput.
Unique ending of the file name so several outputs can be created then identified. Required if there is more
then 1 video output.
:param suffix: The suffix of this GIFOutput.
:type: str
"""
self._suffix = suffix
@property
def overlay(self):
"""Gets the overlay of this GIFOutput.
Path to overlay image, such as: play button or watermark.
:return: The overlay of this GIFOutput.
:rtype: str
"""
return self._overlay
@overlay.setter
def overlay(self, overlay):
"""Sets the overlay of this GIFOutput.
Path to overlay image, such as: play button or watermark.
:param overlay: The overlay of this GIFOutput.
:type: str
"""
self._overlay = overlay
@property
def overlay_alignment(self):
"""Gets the overlay_alignment of this GIFOutput.
Alignment for overlay image in case the image doesn't fit the video perfectly. The first item in the array is
X. The second is Y.
:return: The overlay_alignment of this GIFOutput.
:rtype: list[str]
"""
return self._overlay_alignment
@overlay_alignment.setter
def overlay_alignment(self, overlay_alignment):
"""Sets the overlay_alignment of this GIFOutput.
Alignment for overlay image in case the image doesn't fit the video perfectly. The first item in the array is
X. The second is Y.
:param overlay_alignment: The overlay_alignment of this GIFOutput.
:type: list[str]
"""
allowed_values = ['left', 'center', 'right', 'top', 'middle', 'bottom']
if not set(overlay_alignment).issubset(set(allowed_values)):
raise ValueError(
'Invalid values for `overlay_alignment` [{0}], must be a subset of [{1}]'
.format(', '.join(map(str, set(overlay_alignment) - set(
allowed_values))), ', '.join(map(str, allowed_values))))
self._overlay_alignment = overlay_alignment
@property
def overlay_scale(self):
"""Gets the overlay_scale of this GIFOutput.
Scale the overlay image if it's not the same size as the video. * Fit: scale the image up or down so it's
completely visible in the video's resolution. If not the same aspect ratio, transparency is added around the
image according to the alignment settings. * Fill: scale the image up or down so it completely fills the
video. If not the same aspect ratio, the image is cropped according to the alignment settings. * None: don't
resize the overlay image.
:return: The overlay_scale of this GIFOutput.
:rtype: str
"""
return self._overlay_scale
@overlay_scale.setter
def overlay_scale(self, overlay_scale):
"""Sets the overlay_scale of this GIFOutput.
Scale the overlay image if it's not the same size as the video. * Fit: scale the image up or down so it's
completely visible in the video's resolution. If not the same aspect ratio, transparency is added around the
image according to the alignment settings. * Fill: scale the image up or down so it completely fills the
video. If not the same aspect ratio, the image is cropped according to the alignment settings. * None: don't
resize the overlay image.
:param overlay_scale: The overlay_scale of this GIFOutput.
:type: str
"""
allowed_values = ['fit', 'fill', 'none']
if overlay_scale not in allowed_values:
raise ValueError(
'Invalid value for `overlay_scale` ({0}), must be one of {1}'
.format(overlay_scale, allowed_values))
self._overlay_scale = overlay_scale
@property
def label(self):
"""Gets the label of this GIFOutput.
This label is another way to identify this specific output. The label is returned in the response,
but does not appear in the file name.
:return: The label of this GIFOutput.
:rtype: str
"""
return self._label
@label.setter
def label(self, label):
"""Sets the label of this GIFOutput.
This label is another way to identify this specific output. The label is returned in the response,
but does not appear in the file name.
:param label: The label of this GIFOutput.
:type: str
"""
self._label = label
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(lambda x: x.to_dict() if hasattr(x,
'to_dict') else x, value))
elif hasattr(value, 'to_dict'):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(lambda item: (item[0], item[1].
to_dict()) if hasattr(item[1], 'to_dict') else item,
value.items()))
else:
result[attr] = value
return result
<|reserved_special_token_0|>
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, GIFOutput):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class GIFOutput(object):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def __init__(self, gif_fps=None, color_depth=None, gif_loop=None,
height=None, start=None, duration=None, suffix=None, overlay=None,
overlay_alignment=None, overlay_scale='fit', label=None):
"""GIFOutput - a model defined in Swagger"""
self._gif_fps = None
self._color_depth = None
self._gif_loop = None
self._height = None
self._start = None
self._duration = None
self._suffix = None
self._overlay = None
self._overlay_alignment = None
self._overlay_scale = None
self._label = None
self.discriminator = None
if gif_fps is not None:
self.gif_fps = gif_fps
if color_depth is not None:
self.color_depth = color_depth
if gif_loop is not None:
self.gif_loop = gif_loop
self.height = height
self.start = start
if duration is not None:
self.duration = duration
if suffix is not None:
self.suffix = suffix
if overlay is not None:
self.overlay = overlay
if overlay_alignment is not None:
self.overlay_alignment = overlay_alignment
if overlay_scale is not None:
self.overlay_scale = overlay_scale
if label is not None:
self.label = label
@property
def gif_fps(self):
"""Gets the gif_fps of this GIFOutput.
The frame rate of the GIF. Default is the Video frame rate
:return: The gif_fps of this GIFOutput.
:rtype: float
"""
return self._gif_fps
@gif_fps.setter
def gif_fps(self, gif_fps):
"""Sets the gif_fps of this GIFOutput.
The frame rate of the GIF. Default is the Video frame rate
:param gif_fps: The gif_fps of this GIFOutput.
:type: float
"""
if gif_fps is not None and gif_fps > 30:
raise ValueError(
'Invalid value for `gif_fps`, must be a value less than or equal to `30`'
)
self._gif_fps = gif_fps
@property
def color_depth(self):
"""Gets the color_depth of this GIFOutput.
Amount of colors in palette
:return: The color_depth of this GIFOutput.
:rtype: float
"""
return self._color_depth
@color_depth.setter
def color_depth(self, color_depth):
"""Sets the color_depth of this GIFOutput.
Amount of colors in palette
:param color_depth: The color_depth of this GIFOutput.
:type: float
"""
self._color_depth = color_depth
@property
def gif_loop(self):
"""Gets the gif_loop of this GIFOutput.
If to loop the GIF. -1 is no loop, 0 is infinite loops, and other numbers are number of loops.
:return: The gif_loop of this GIFOutput.
:rtype: int
"""
return self._gif_loop
@gif_loop.setter
def gif_loop(self, gif_loop):
"""Sets the gif_loop of this GIFOutput.
If to loop the GIF. -1 is no loop, 0 is infinite loops, and other numbers are number of loops.
:param gif_loop: The gif_loop of this GIFOutput.
:type: int
"""
if gif_loop is not None and gif_loop < -1:
raise ValueError(
'Invalid value for `gif_loop`, must be a value greater than or equal to `-1`'
)
self._gif_loop = gif_loop
@property
def height(self):
"""Gets the height of this GIFOutput.
Height of the media to be rendered, in pixels. Should be the height of your scenes unless a smaller
resolution is needed. Resolution higher than the scene resolution reduces quality. The width is automatically
calculated to keep the aspect ratio.
:return: The height of this GIFOutput.
:rtype: float
"""
return self._height
@height.setter
def height(self, height):
"""Sets the height of this GIFOutput.
Height of the media to be rendered, in pixels. Should be the height of your scenes unless a smaller
resolution is needed. Resolution higher than the scene resolution reduces quality. The width is automatically
calculated to keep the aspect ratio.
:param height: The height of this GIFOutput.
:type: float
"""
if height is None:
raise ValueError('Invalid value for `height`, must not be `None`')
self._height = height
@property
def start(self):
"""Gets the start of this GIFOutput.
What second of the storyboard timeline to start the GIF.
:return: The start of this GIFOutput.
:rtype: float
"""
return self._start
@start.setter
def start(self, start):
"""Sets the start of this GIFOutput.
What second of the storyboard timeline to start the GIF.
:param start: The start of this GIFOutput.
:type: float
"""
if start is None:
raise ValueError('Invalid value for `start`, must not be `None`')
self._start = start
@property
def duration(self):
"""Gets the duration of this GIFOutput.
Seconds for the duration of the GIF. Can't be longer than the video.
:return: The duration of this GIFOutput.
:rtype: float
"""
return self._duration
@duration.setter
def duration(self, duration):
"""Sets the duration of this GIFOutput.
Seconds for the duration of the GIF. Can't be longer than the video.
:param duration: The duration of this GIFOutput.
:type: float
"""
self._duration = duration
@property
def suffix(self):
"""Gets the suffix of this GIFOutput.
Unique ending of the file name so several outputs can be created then identified. Required if there is more
then 1 video output.
:return: The suffix of this GIFOutput.
:rtype: str
"""
return self._suffix
@suffix.setter
def suffix(self, suffix):
"""Sets the suffix of this GIFOutput.
Unique ending of the file name so several outputs can be created then identified. Required if there is more
then 1 video output.
:param suffix: The suffix of this GIFOutput.
:type: str
"""
self._suffix = suffix
@property
def overlay(self):
"""Gets the overlay of this GIFOutput.
Path to overlay image, such as: play button or watermark.
:return: The overlay of this GIFOutput.
:rtype: str
"""
return self._overlay
@overlay.setter
def overlay(self, overlay):
"""Sets the overlay of this GIFOutput.
Path to overlay image, such as: play button or watermark.
:param overlay: The overlay of this GIFOutput.
:type: str
"""
self._overlay = overlay
@property
def overlay_alignment(self):
"""Gets the overlay_alignment of this GIFOutput.
Alignment for overlay image in case the image doesn't fit the video perfectly. The first item in the array is
X. The second is Y.
:return: The overlay_alignment of this GIFOutput.
:rtype: list[str]
"""
return self._overlay_alignment
@overlay_alignment.setter
def overlay_alignment(self, overlay_alignment):
"""Sets the overlay_alignment of this GIFOutput.
Alignment for overlay image in case the image doesn't fit the video perfectly. The first item in the array is
X. The second is Y.
:param overlay_alignment: The overlay_alignment of this GIFOutput.
:type: list[str]
"""
allowed_values = ['left', 'center', 'right', 'top', 'middle', 'bottom']
if not set(overlay_alignment).issubset(set(allowed_values)):
raise ValueError(
'Invalid values for `overlay_alignment` [{0}], must be a subset of [{1}]'
.format(', '.join(map(str, set(overlay_alignment) - set(
allowed_values))), ', '.join(map(str, allowed_values))))
self._overlay_alignment = overlay_alignment
@property
def overlay_scale(self):
"""Gets the overlay_scale of this GIFOutput.
Scale the overlay image if it's not the same size as the video. * Fit: scale the image up or down so it's
completely visible in the video's resolution. If not the same aspect ratio, transparency is added around the
image according to the alignment settings. * Fill: scale the image up or down so it completely fills the
video. If not the same aspect ratio, the image is cropped according to the alignment settings. * None: don't
resize the overlay image.
:return: The overlay_scale of this GIFOutput.
:rtype: str
"""
return self._overlay_scale
@overlay_scale.setter
def overlay_scale(self, overlay_scale):
"""Sets the overlay_scale of this GIFOutput.
Scale the overlay image if it's not the same size as the video. * Fit: scale the image up or down so it's
completely visible in the video's resolution. If not the same aspect ratio, transparency is added around the
image according to the alignment settings. * Fill: scale the image up or down so it completely fills the
video. If not the same aspect ratio, the image is cropped according to the alignment settings. * None: don't
resize the overlay image.
:param overlay_scale: The overlay_scale of this GIFOutput.
:type: str
"""
allowed_values = ['fit', 'fill', 'none']
if overlay_scale not in allowed_values:
raise ValueError(
'Invalid value for `overlay_scale` ({0}), must be one of {1}'
.format(overlay_scale, allowed_values))
self._overlay_scale = overlay_scale
@property
def label(self):
"""Gets the label of this GIFOutput.
This label is another way to identify this specific output. The label is returned in the response,
but does not appear in the file name.
:return: The label of this GIFOutput.
:rtype: str
"""
return self._label
@label.setter
def label(self, label):
"""Sets the label of this GIFOutput.
This label is another way to identify this specific output. The label is returned in the response,
but does not appear in the file name.
:param label: The label of this GIFOutput.
:type: str
"""
self._label = label
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(lambda x: x.to_dict() if hasattr(x,
'to_dict') else x, value))
elif hasattr(value, 'to_dict'):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(lambda item: (item[0], item[1].
to_dict()) if hasattr(item[1], 'to_dict') else item,
value.items()))
else:
result[attr] = value
return result
<|reserved_special_token_0|>
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, GIFOutput):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class GIFOutput(object):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def __init__(self, gif_fps=None, color_depth=None, gif_loop=None,
height=None, start=None, duration=None, suffix=None, overlay=None,
overlay_alignment=None, overlay_scale='fit', label=None):
"""GIFOutput - a model defined in Swagger"""
self._gif_fps = None
self._color_depth = None
self._gif_loop = None
self._height = None
self._start = None
self._duration = None
self._suffix = None
self._overlay = None
self._overlay_alignment = None
self._overlay_scale = None
self._label = None
self.discriminator = None
if gif_fps is not None:
self.gif_fps = gif_fps
if color_depth is not None:
self.color_depth = color_depth
if gif_loop is not None:
self.gif_loop = gif_loop
self.height = height
self.start = start
if duration is not None:
self.duration = duration
if suffix is not None:
self.suffix = suffix
if overlay is not None:
self.overlay = overlay
if overlay_alignment is not None:
self.overlay_alignment = overlay_alignment
if overlay_scale is not None:
self.overlay_scale = overlay_scale
if label is not None:
self.label = label
@property
def gif_fps(self):
"""Gets the gif_fps of this GIFOutput.
The frame rate of the GIF. Default is the Video frame rate
:return: The gif_fps of this GIFOutput.
:rtype: float
"""
return self._gif_fps
@gif_fps.setter
def gif_fps(self, gif_fps):
"""Sets the gif_fps of this GIFOutput.
The frame rate of the GIF. Default is the Video frame rate
:param gif_fps: The gif_fps of this GIFOutput.
:type: float
"""
if gif_fps is not None and gif_fps > 30:
raise ValueError(
'Invalid value for `gif_fps`, must be a value less than or equal to `30`'
)
self._gif_fps = gif_fps
@property
def color_depth(self):
"""Gets the color_depth of this GIFOutput.
Amount of colors in palette
:return: The color_depth of this GIFOutput.
:rtype: float
"""
return self._color_depth
@color_depth.setter
def color_depth(self, color_depth):
"""Sets the color_depth of this GIFOutput.
Amount of colors in palette
:param color_depth: The color_depth of this GIFOutput.
:type: float
"""
self._color_depth = color_depth
@property
def gif_loop(self):
"""Gets the gif_loop of this GIFOutput.
If to loop the GIF. -1 is no loop, 0 is infinite loops, and other numbers are number of loops.
:return: The gif_loop of this GIFOutput.
:rtype: int
"""
return self._gif_loop
@gif_loop.setter
def gif_loop(self, gif_loop):
"""Sets the gif_loop of this GIFOutput.
If to loop the GIF. -1 is no loop, 0 is infinite loops, and other numbers are number of loops.
:param gif_loop: The gif_loop of this GIFOutput.
:type: int
"""
if gif_loop is not None and gif_loop < -1:
raise ValueError(
'Invalid value for `gif_loop`, must be a value greater than or equal to `-1`'
)
self._gif_loop = gif_loop
@property
def height(self):
"""Gets the height of this GIFOutput.
Height of the media to be rendered, in pixels. Should be the height of your scenes unless a smaller
resolution is needed. Resolution higher than the scene resolution reduces quality. The width is automatically
calculated to keep the aspect ratio.
:return: The height of this GIFOutput.
:rtype: float
"""
return self._height
@height.setter
def height(self, height):
"""Sets the height of this GIFOutput.
Height of the media to be rendered, in pixels. Should be the height of your scenes unless a smaller
resolution is needed. Resolution higher than the scene resolution reduces quality. The width is automatically
calculated to keep the aspect ratio.
:param height: The height of this GIFOutput.
:type: float
"""
if height is None:
raise ValueError('Invalid value for `height`, must not be `None`')
self._height = height
@property
def start(self):
"""Gets the start of this GIFOutput.
What second of the storyboard timeline to start the GIF.
:return: The start of this GIFOutput.
:rtype: float
"""
return self._start
@start.setter
def start(self, start):
"""Sets the start of this GIFOutput.
What second of the storyboard timeline to start the GIF.
:param start: The start of this GIFOutput.
:type: float
"""
if start is None:
raise ValueError('Invalid value for `start`, must not be `None`')
self._start = start
@property
def duration(self):
"""Gets the duration of this GIFOutput.
Seconds for the duration of the GIF. Can't be longer than the video.
:return: The duration of this GIFOutput.
:rtype: float
"""
return self._duration
@duration.setter
def duration(self, duration):
"""Sets the duration of this GIFOutput.
Seconds for the duration of the GIF. Can't be longer than the video.
:param duration: The duration of this GIFOutput.
:type: float
"""
self._duration = duration
@property
def suffix(self):
"""Gets the suffix of this GIFOutput.
Unique ending of the file name so several outputs can be created then identified. Required if there is more
then 1 video output.
:return: The suffix of this GIFOutput.
:rtype: str
"""
return self._suffix
@suffix.setter
def suffix(self, suffix):
"""Sets the suffix of this GIFOutput.
Unique ending of the file name so several outputs can be created then identified. Required if there is more
then 1 video output.
:param suffix: The suffix of this GIFOutput.
:type: str
"""
self._suffix = suffix
@property
def overlay(self):
"""Gets the overlay of this GIFOutput.
Path to overlay image, such as: play button or watermark.
:return: The overlay of this GIFOutput.
:rtype: str
"""
return self._overlay
@overlay.setter
def overlay(self, overlay):
"""Sets the overlay of this GIFOutput.
Path to overlay image, such as: play button or watermark.
:param overlay: The overlay of this GIFOutput.
:type: str
"""
self._overlay = overlay
@property
def overlay_alignment(self):
"""Gets the overlay_alignment of this GIFOutput.
Alignment for overlay image in case the image doesn't fit the video perfectly. The first item in the array is
X. The second is Y.
:return: The overlay_alignment of this GIFOutput.
:rtype: list[str]
"""
return self._overlay_alignment
@overlay_alignment.setter
def overlay_alignment(self, overlay_alignment):
"""Sets the overlay_alignment of this GIFOutput.
Alignment for overlay image in case the image doesn't fit the video perfectly. The first item in the array is
X. The second is Y.
:param overlay_alignment: The overlay_alignment of this GIFOutput.
:type: list[str]
"""
allowed_values = ['left', 'center', 'right', 'top', 'middle', 'bottom']
if not set(overlay_alignment).issubset(set(allowed_values)):
raise ValueError(
'Invalid values for `overlay_alignment` [{0}], must be a subset of [{1}]'
.format(', '.join(map(str, set(overlay_alignment) - set(
allowed_values))), ', '.join(map(str, allowed_values))))
self._overlay_alignment = overlay_alignment
@property
def overlay_scale(self):
"""Gets the overlay_scale of this GIFOutput.
Scale the overlay image if it's not the same size as the video. * Fit: scale the image up or down so it's
completely visible in the video's resolution. If not the same aspect ratio, transparency is added around the
image according to the alignment settings. * Fill: scale the image up or down so it completely fills the
video. If not the same aspect ratio, the image is cropped according to the alignment settings. * None: don't
resize the overlay image.
:return: The overlay_scale of this GIFOutput.
:rtype: str
"""
return self._overlay_scale
@overlay_scale.setter
def overlay_scale(self, overlay_scale):
"""Sets the overlay_scale of this GIFOutput.
Scale the overlay image if it's not the same size as the video. * Fit: scale the image up or down so it's
completely visible in the video's resolution. If not the same aspect ratio, transparency is added around the
image according to the alignment settings. * Fill: scale the image up or down so it completely fills the
video. If not the same aspect ratio, the image is cropped according to the alignment settings. * None: don't
resize the overlay image.
:param overlay_scale: The overlay_scale of this GIFOutput.
:type: str
"""
allowed_values = ['fit', 'fill', 'none']
if overlay_scale not in allowed_values:
raise ValueError(
'Invalid value for `overlay_scale` ({0}), must be one of {1}'
.format(overlay_scale, allowed_values))
self._overlay_scale = overlay_scale
@property
def label(self):
"""Gets the label of this GIFOutput.
This label is another way to identify this specific output. The label is returned in the response,
but does not appear in the file name.
:return: The label of this GIFOutput.
:rtype: str
"""
return self._label
@label.setter
def label(self, label):
"""Sets the label of this GIFOutput.
This label is another way to identify this specific output. The label is returned in the response,
but does not appear in the file name.
:param label: The label of this GIFOutput.
:type: str
"""
self._label = label
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(lambda x: x.to_dict() if hasattr(x,
'to_dict') else x, value))
elif hasattr(value, 'to_dict'):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(lambda item: (item[0], item[1].
to_dict()) if hasattr(item[1], 'to_dict') else item,
value.items()))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, GIFOutput):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class GIFOutput(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {'gif_fps': 'float', 'color_depth': 'float', 'gif_loop':
'int', 'height': 'float', 'start': 'float', 'duration': 'float',
'suffix': 'str', 'overlay': 'str', 'overlay_alignment': 'list[str]',
'overlay_scale': 'str', 'label': 'str'}
attribute_map = {'gif_fps': 'gif_fps', 'color_depth': 'color_depth',
'gif_loop': 'gif_loop', 'height': 'height', 'start': 'start',
'duration': 'duration', 'suffix': 'suffix', 'overlay': 'overlay',
'overlay_alignment': 'overlay_alignment', 'overlay_scale':
'overlay_scale', 'label': 'label'}
def __init__(self, gif_fps=None, color_depth=None, gif_loop=None,
height=None, start=None, duration=None, suffix=None, overlay=None,
overlay_alignment=None, overlay_scale='fit', label=None):
"""GIFOutput - a model defined in Swagger"""
self._gif_fps = None
self._color_depth = None
self._gif_loop = None
self._height = None
self._start = None
self._duration = None
self._suffix = None
self._overlay = None
self._overlay_alignment = None
self._overlay_scale = None
self._label = None
self.discriminator = None
if gif_fps is not None:
self.gif_fps = gif_fps
if color_depth is not None:
self.color_depth = color_depth
if gif_loop is not None:
self.gif_loop = gif_loop
self.height = height
self.start = start
if duration is not None:
self.duration = duration
if suffix is not None:
self.suffix = suffix
if overlay is not None:
self.overlay = overlay
if overlay_alignment is not None:
self.overlay_alignment = overlay_alignment
if overlay_scale is not None:
self.overlay_scale = overlay_scale
if label is not None:
self.label = label
@property
def gif_fps(self):
"""Gets the gif_fps of this GIFOutput.
The frame rate of the GIF. Default is the Video frame rate
:return: The gif_fps of this GIFOutput.
:rtype: float
"""
return self._gif_fps
@gif_fps.setter
def gif_fps(self, gif_fps):
"""Sets the gif_fps of this GIFOutput.
The frame rate of the GIF. Default is the Video frame rate
:param gif_fps: The gif_fps of this GIFOutput.
:type: float
"""
if gif_fps is not None and gif_fps > 30:
raise ValueError(
'Invalid value for `gif_fps`, must be a value less than or equal to `30`'
)
self._gif_fps = gif_fps
@property
def color_depth(self):
"""Gets the color_depth of this GIFOutput.
Amount of colors in palette
:return: The color_depth of this GIFOutput.
:rtype: float
"""
return self._color_depth
@color_depth.setter
def color_depth(self, color_depth):
"""Sets the color_depth of this GIFOutput.
Amount of colors in palette
:param color_depth: The color_depth of this GIFOutput.
:type: float
"""
self._color_depth = color_depth
@property
def gif_loop(self):
"""Gets the gif_loop of this GIFOutput.
If to loop the GIF. -1 is no loop, 0 is infinite loops, and other numbers are number of loops.
:return: The gif_loop of this GIFOutput.
:rtype: int
"""
return self._gif_loop
@gif_loop.setter
def gif_loop(self, gif_loop):
"""Sets the gif_loop of this GIFOutput.
If to loop the GIF. -1 is no loop, 0 is infinite loops, and other numbers are number of loops.
:param gif_loop: The gif_loop of this GIFOutput.
:type: int
"""
if gif_loop is not None and gif_loop < -1:
raise ValueError(
'Invalid value for `gif_loop`, must be a value greater than or equal to `-1`'
)
self._gif_loop = gif_loop
@property
def height(self):
"""Gets the height of this GIFOutput.
Height of the media to be rendered, in pixels. Should be the height of your scenes unless a smaller
resolution is needed. Resolution higher than the scene resolution reduces quality. The width is automatically
calculated to keep the aspect ratio.
:return: The height of this GIFOutput.
:rtype: float
"""
return self._height
@height.setter
def height(self, height):
"""Sets the height of this GIFOutput.
Height of the media to be rendered, in pixels. Should be the height of your scenes unless a smaller
resolution is needed. Resolution higher than the scene resolution reduces quality. The width is automatically
calculated to keep the aspect ratio.
:param height: The height of this GIFOutput.
:type: float
"""
if height is None:
raise ValueError('Invalid value for `height`, must not be `None`')
self._height = height
@property
def start(self):
"""Gets the start of this GIFOutput.
What second of the storyboard timeline to start the GIF.
:return: The start of this GIFOutput.
:rtype: float
"""
return self._start
@start.setter
def start(self, start):
"""Sets the start of this GIFOutput.
What second of the storyboard timeline to start the GIF.
:param start: The start of this GIFOutput.
:type: float
"""
if start is None:
raise ValueError('Invalid value for `start`, must not be `None`')
self._start = start
@property
def duration(self):
"""Gets the duration of this GIFOutput.
Seconds for the duration of the GIF. Can't be longer than the video.
:return: The duration of this GIFOutput.
:rtype: float
"""
return self._duration
@duration.setter
def duration(self, duration):
"""Sets the duration of this GIFOutput.
Seconds for the duration of the GIF. Can't be longer than the video.
:param duration: The duration of this GIFOutput.
:type: float
"""
self._duration = duration
@property
def suffix(self):
"""Gets the suffix of this GIFOutput.
Unique ending of the file name so several outputs can be created then identified. Required if there is more
then 1 video output.
:return: The suffix of this GIFOutput.
:rtype: str
"""
return self._suffix
@suffix.setter
def suffix(self, suffix):
"""Sets the suffix of this GIFOutput.
Unique ending of the file name so several outputs can be created then identified. Required if there is more
then 1 video output.
:param suffix: The suffix of this GIFOutput.
:type: str
"""
self._suffix = suffix
@property
def overlay(self):
"""Gets the overlay of this GIFOutput.
Path to overlay image, such as: play button or watermark.
:return: The overlay of this GIFOutput.
:rtype: str
"""
return self._overlay
@overlay.setter
def overlay(self, overlay):
"""Sets the overlay of this GIFOutput.
Path to overlay image, such as: play button or watermark.
:param overlay: The overlay of this GIFOutput.
:type: str
"""
self._overlay = overlay
@property
def overlay_alignment(self):
"""Gets the overlay_alignment of this GIFOutput.
Alignment for overlay image in case the image doesn't fit the video perfectly. The first item in the array is
X. The second is Y.
:return: The overlay_alignment of this GIFOutput.
:rtype: list[str]
"""
return self._overlay_alignment
@overlay_alignment.setter
def overlay_alignment(self, overlay_alignment):
"""Sets the overlay_alignment of this GIFOutput.
Alignment for overlay image in case the image doesn't fit the video perfectly. The first item in the array is
X. The second is Y.
:param overlay_alignment: The overlay_alignment of this GIFOutput.
:type: list[str]
"""
allowed_values = ['left', 'center', 'right', 'top', 'middle', 'bottom']
if not set(overlay_alignment).issubset(set(allowed_values)):
raise ValueError(
'Invalid values for `overlay_alignment` [{0}], must be a subset of [{1}]'
.format(', '.join(map(str, set(overlay_alignment) - set(
allowed_values))), ', '.join(map(str, allowed_values))))
self._overlay_alignment = overlay_alignment
@property
def overlay_scale(self):
"""Gets the overlay_scale of this GIFOutput.
Scale the overlay image if it's not the same size as the video. * Fit: scale the image up or down so it's
completely visible in the video's resolution. If not the same aspect ratio, transparency is added around the
image according to the alignment settings. * Fill: scale the image up or down so it completely fills the
video. If not the same aspect ratio, the image is cropped according to the alignment settings. * None: don't
resize the overlay image.
:return: The overlay_scale of this GIFOutput.
:rtype: str
"""
return self._overlay_scale
@overlay_scale.setter
def overlay_scale(self, overlay_scale):
"""Sets the overlay_scale of this GIFOutput.
Scale the overlay image if it's not the same size as the video. * Fit: scale the image up or down so it's
completely visible in the video's resolution. If not the same aspect ratio, transparency is added around the
image according to the alignment settings. * Fill: scale the image up or down so it completely fills the
video. If not the same aspect ratio, the image is cropped according to the alignment settings. * None: don't
resize the overlay image.
:param overlay_scale: The overlay_scale of this GIFOutput.
:type: str
"""
allowed_values = ['fit', 'fill', 'none']
if overlay_scale not in allowed_values:
raise ValueError(
'Invalid value for `overlay_scale` ({0}), must be one of {1}'
.format(overlay_scale, allowed_values))
self._overlay_scale = overlay_scale
@property
def label(self):
"""Gets the label of this GIFOutput.
This label is another way to identify this specific output. The label is returned in the response,
but does not appear in the file name.
:return: The label of this GIFOutput.
:rtype: str
"""
return self._label
@label.setter
def label(self, label):
"""Sets the label of this GIFOutput.
This label is another way to identify this specific output. The label is returned in the response,
but does not appear in the file name.
:param label: The label of this GIFOutput.
:type: str
"""
self._label = label
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(lambda x: x.to_dict() if hasattr(x,
'to_dict') else x, value))
elif hasattr(value, 'to_dict'):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(lambda item: (item[0], item[1].
to_dict()) if hasattr(item[1], 'to_dict') else item,
value.items()))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, GIFOutput):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
<|reserved_special_token_1|>
# coding: utf-8
"""
Idomoo API
OpenAPI spec version: 2.0
Contact: dev.support@idomoo.com
"""
import pprint
import six
class GIFOutput(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'gif_fps': 'float',
'color_depth': 'float',
'gif_loop': 'int',
'height': 'float',
'start': 'float',
'duration': 'float',
'suffix': 'str',
'overlay': 'str',
'overlay_alignment': 'list[str]',
'overlay_scale': 'str',
'label': 'str'
}
attribute_map = {
'gif_fps': 'gif_fps',
'color_depth': 'color_depth',
'gif_loop': 'gif_loop',
'height': 'height',
'start': 'start',
'duration': 'duration',
'suffix': 'suffix',
'overlay': 'overlay',
'overlay_alignment': 'overlay_alignment',
'overlay_scale': 'overlay_scale',
'label': 'label'
}
def __init__(self, gif_fps=None, color_depth=None, gif_loop=None, height=None, start=None, duration=None,
suffix=None, overlay=None, overlay_alignment=None, overlay_scale='fit', label=None):
"""GIFOutput - a model defined in Swagger"""
self._gif_fps = None
self._color_depth = None
self._gif_loop = None
self._height = None
self._start = None
self._duration = None
self._suffix = None
self._overlay = None
self._overlay_alignment = None
self._overlay_scale = None
self._label = None
self.discriminator = None
if gif_fps is not None:
self.gif_fps = gif_fps
if color_depth is not None:
self.color_depth = color_depth
if gif_loop is not None:
self.gif_loop = gif_loop
self.height = height
self.start = start
if duration is not None:
self.duration = duration
if suffix is not None:
self.suffix = suffix
if overlay is not None:
self.overlay = overlay
if overlay_alignment is not None:
self.overlay_alignment = overlay_alignment
if overlay_scale is not None:
self.overlay_scale = overlay_scale
if label is not None:
self.label = label
@property
def gif_fps(self):
"""Gets the gif_fps of this GIFOutput.
The frame rate of the GIF. Default is the Video frame rate
:return: The gif_fps of this GIFOutput.
:rtype: float
"""
return self._gif_fps
@gif_fps.setter
def gif_fps(self, gif_fps):
"""Sets the gif_fps of this GIFOutput.
The frame rate of the GIF. Default is the Video frame rate
:param gif_fps: The gif_fps of this GIFOutput.
:type: float
"""
if gif_fps is not None and gif_fps > 30:
raise ValueError("Invalid value for `gif_fps`, must be a value less than or equal to `30`")
self._gif_fps = gif_fps
@property
def color_depth(self):
"""Gets the color_depth of this GIFOutput.
Amount of colors in palette
:return: The color_depth of this GIFOutput.
:rtype: float
"""
return self._color_depth
@color_depth.setter
def color_depth(self, color_depth):
"""Sets the color_depth of this GIFOutput.
Amount of colors in palette
:param color_depth: The color_depth of this GIFOutput.
:type: float
"""
self._color_depth = color_depth
@property
def gif_loop(self):
"""Gets the gif_loop of this GIFOutput.
If to loop the GIF. -1 is no loop, 0 is infinite loops, and other numbers are number of loops.
:return: The gif_loop of this GIFOutput.
:rtype: int
"""
return self._gif_loop
@gif_loop.setter
def gif_loop(self, gif_loop):
"""Sets the gif_loop of this GIFOutput.
If to loop the GIF. -1 is no loop, 0 is infinite loops, and other numbers are number of loops.
:param gif_loop: The gif_loop of this GIFOutput.
:type: int
"""
if gif_loop is not None and gif_loop < -1:
raise ValueError("Invalid value for `gif_loop`, must be a value greater than or equal to `-1`")
self._gif_loop = gif_loop
@property
def height(self):
"""Gets the height of this GIFOutput.
Height of the media to be rendered, in pixels. Should be the height of your scenes unless a smaller
resolution is needed. Resolution higher than the scene resolution reduces quality. The width is automatically
calculated to keep the aspect ratio.
:return: The height of this GIFOutput.
:rtype: float
"""
return self._height
@height.setter
def height(self, height):
"""Sets the height of this GIFOutput.
Height of the media to be rendered, in pixels. Should be the height of your scenes unless a smaller
resolution is needed. Resolution higher than the scene resolution reduces quality. The width is automatically
calculated to keep the aspect ratio.
:param height: The height of this GIFOutput.
:type: float
"""
if height is None:
raise ValueError("Invalid value for `height`, must not be `None`")
self._height = height
@property
def start(self):
"""Gets the start of this GIFOutput.
What second of the storyboard timeline to start the GIF.
:return: The start of this GIFOutput.
:rtype: float
"""
return self._start
@start.setter
def start(self, start):
"""Sets the start of this GIFOutput.
What second of the storyboard timeline to start the GIF.
:param start: The start of this GIFOutput.
:type: float
"""
if start is None:
raise ValueError("Invalid value for `start`, must not be `None`")
self._start = start
@property
def duration(self):
"""Gets the duration of this GIFOutput.
Seconds for the duration of the GIF. Can't be longer than the video.
:return: The duration of this GIFOutput.
:rtype: float
"""
return self._duration
@duration.setter
def duration(self, duration):
"""Sets the duration of this GIFOutput.
Seconds for the duration of the GIF. Can't be longer than the video.
:param duration: The duration of this GIFOutput.
:type: float
"""
self._duration = duration
@property
def suffix(self):
"""Gets the suffix of this GIFOutput.
Unique ending of the file name so several outputs can be created then identified. Required if there is more
then 1 video output.
:return: The suffix of this GIFOutput.
:rtype: str
"""
return self._suffix
@suffix.setter
def suffix(self, suffix):
"""Sets the suffix of this GIFOutput.
Unique ending of the file name so several outputs can be created then identified. Required if there is more
then 1 video output.
:param suffix: The suffix of this GIFOutput.
:type: str
"""
self._suffix = suffix
@property
def overlay(self):
"""Gets the overlay of this GIFOutput.
Path to overlay image, such as: play button or watermark.
:return: The overlay of this GIFOutput.
:rtype: str
"""
return self._overlay
@overlay.setter
def overlay(self, overlay):
"""Sets the overlay of this GIFOutput.
Path to overlay image, such as: play button or watermark.
:param overlay: The overlay of this GIFOutput.
:type: str
"""
self._overlay = overlay
@property
def overlay_alignment(self):
"""Gets the overlay_alignment of this GIFOutput.
Alignment for overlay image in case the image doesn't fit the video perfectly. The first item in the array is
X. The second is Y.
:return: The overlay_alignment of this GIFOutput.
:rtype: list[str]
"""
return self._overlay_alignment
@overlay_alignment.setter
def overlay_alignment(self, overlay_alignment):
"""Sets the overlay_alignment of this GIFOutput.
Alignment for overlay image in case the image doesn't fit the video perfectly. The first item in the array is
X. The second is Y.
:param overlay_alignment: The overlay_alignment of this GIFOutput.
:type: list[str]
"""
allowed_values = ["left", "center", "right", "top", "middle", "bottom"]
if not set(overlay_alignment).issubset(set(allowed_values)):
raise ValueError(
"Invalid values for `overlay_alignment` [{0}], must be a subset of [{1}]"
.format(", ".join(map(str, set(overlay_alignment) - set(allowed_values))),
", ".join(map(str, allowed_values)))
)
self._overlay_alignment = overlay_alignment
@property
def overlay_scale(self):
"""Gets the overlay_scale of this GIFOutput.
Scale the overlay image if it's not the same size as the video. * Fit: scale the image up or down so it's
completely visible in the video's resolution. If not the same aspect ratio, transparency is added around the
image according to the alignment settings. * Fill: scale the image up or down so it completely fills the
video. If not the same aspect ratio, the image is cropped according to the alignment settings. * None: don't
resize the overlay image.
:return: The overlay_scale of this GIFOutput.
:rtype: str
"""
return self._overlay_scale
@overlay_scale.setter
def overlay_scale(self, overlay_scale):
"""Sets the overlay_scale of this GIFOutput.
Scale the overlay image if it's not the same size as the video. * Fit: scale the image up or down so it's
completely visible in the video's resolution. If not the same aspect ratio, transparency is added around the
image according to the alignment settings. * Fill: scale the image up or down so it completely fills the
video. If not the same aspect ratio, the image is cropped according to the alignment settings. * None: don't
resize the overlay image.
:param overlay_scale: The overlay_scale of this GIFOutput.
:type: str
"""
allowed_values = ["fit", "fill", "none"]
if overlay_scale not in allowed_values:
raise ValueError(
"Invalid value for `overlay_scale` ({0}), must be one of {1}"
.format(overlay_scale, allowed_values)
)
self._overlay_scale = overlay_scale
@property
def label(self):
"""Gets the label of this GIFOutput.
This label is another way to identify this specific output. The label is returned in the response,
but does not appear in the file name.
:return: The label of this GIFOutput.
:rtype: str
"""
return self._label
@label.setter
def label(self, label):
"""Sets the label of this GIFOutput.
This label is another way to identify this specific output. The label is returned in the response,
but does not appear in the file name.
:param label: The label of this GIFOutput.
:type: str
"""
self._label = label
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, GIFOutput):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
flexible
|
{
"blob_id": "2362c9a12f97f32f6136aaf16a55cf4acbaf9294",
"index": 4753,
"step-1": "<mask token>\n\n\nclass GIFOutput(object):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def __init__(self, gif_fps=None, color_depth=None, gif_loop=None,\n height=None, start=None, duration=None, suffix=None, overlay=None,\n overlay_alignment=None, overlay_scale='fit', label=None):\n \"\"\"GIFOutput - a model defined in Swagger\"\"\"\n self._gif_fps = None\n self._color_depth = None\n self._gif_loop = None\n self._height = None\n self._start = None\n self._duration = None\n self._suffix = None\n self._overlay = None\n self._overlay_alignment = None\n self._overlay_scale = None\n self._label = None\n self.discriminator = None\n if gif_fps is not None:\n self.gif_fps = gif_fps\n if color_depth is not None:\n self.color_depth = color_depth\n if gif_loop is not None:\n self.gif_loop = gif_loop\n self.height = height\n self.start = start\n if duration is not None:\n self.duration = duration\n if suffix is not None:\n self.suffix = suffix\n if overlay is not None:\n self.overlay = overlay\n if overlay_alignment is not None:\n self.overlay_alignment = overlay_alignment\n if overlay_scale is not None:\n self.overlay_scale = overlay_scale\n if label is not None:\n self.label = label\n\n @property\n def gif_fps(self):\n \"\"\"Gets the gif_fps of this GIFOutput.\n\n The frame rate of the GIF. Default is the Video frame rate\n\n :return: The gif_fps of this GIFOutput.\n :rtype: float\n \"\"\"\n return self._gif_fps\n\n @gif_fps.setter\n def gif_fps(self, gif_fps):\n \"\"\"Sets the gif_fps of this GIFOutput.\n\n The frame rate of the GIF. Default is the Video frame rate\n\n :param gif_fps: The gif_fps of this GIFOutput.\n :type: float\n \"\"\"\n if gif_fps is not None and gif_fps > 30:\n raise ValueError(\n 'Invalid value for `gif_fps`, must be a value less than or equal to `30`'\n )\n self._gif_fps = gif_fps\n\n @property\n def color_depth(self):\n \"\"\"Gets the color_depth of this GIFOutput.\n\n Amount of colors in palette\n\n :return: The color_depth of this GIFOutput.\n :rtype: float\n \"\"\"\n return self._color_depth\n\n @color_depth.setter\n def color_depth(self, color_depth):\n \"\"\"Sets the color_depth of this GIFOutput.\n\n Amount of colors in palette\n\n :param color_depth: The color_depth of this GIFOutput.\n :type: float\n \"\"\"\n self._color_depth = color_depth\n\n @property\n def gif_loop(self):\n \"\"\"Gets the gif_loop of this GIFOutput.\n\n If to loop the GIF. -1 is no loop, 0 is infinite loops, and other numbers are number of loops.\n\n :return: The gif_loop of this GIFOutput.\n :rtype: int\n \"\"\"\n return self._gif_loop\n\n @gif_loop.setter\n def gif_loop(self, gif_loop):\n \"\"\"Sets the gif_loop of this GIFOutput.\n\n If to loop the GIF. -1 is no loop, 0 is infinite loops, and other numbers are number of loops.\n\n :param gif_loop: The gif_loop of this GIFOutput.\n :type: int\n \"\"\"\n if gif_loop is not None and gif_loop < -1:\n raise ValueError(\n 'Invalid value for `gif_loop`, must be a value greater than or equal to `-1`'\n )\n self._gif_loop = gif_loop\n <mask token>\n\n @height.setter\n def height(self, height):\n \"\"\"Sets the height of this GIFOutput.\n\n Height of the media to be rendered, in pixels. Should be the height of your scenes unless a smaller\n resolution is needed. Resolution higher than the scene resolution reduces quality. The width is automatically\n calculated to keep the aspect ratio.\n\n :param height: The height of this GIFOutput.\n :type: float\n \"\"\"\n if height is None:\n raise ValueError('Invalid value for `height`, must not be `None`')\n self._height = height\n\n @property\n def start(self):\n \"\"\"Gets the start of this GIFOutput.\n\n What second of the storyboard timeline to start the GIF.\n\n :return: The start of this GIFOutput.\n :rtype: float\n \"\"\"\n return self._start\n\n @start.setter\n def start(self, start):\n \"\"\"Sets the start of this GIFOutput.\n\n What second of the storyboard timeline to start the GIF.\n\n :param start: The start of this GIFOutput.\n :type: float\n \"\"\"\n if start is None:\n raise ValueError('Invalid value for `start`, must not be `None`')\n self._start = start\n\n @property\n def duration(self):\n \"\"\"Gets the duration of this GIFOutput.\n\n Seconds for the duration of the GIF. Can't be longer than the video.\n\n :return: The duration of this GIFOutput.\n :rtype: float\n \"\"\"\n return self._duration\n\n @duration.setter\n def duration(self, duration):\n \"\"\"Sets the duration of this GIFOutput.\n\n Seconds for the duration of the GIF. Can't be longer than the video.\n\n :param duration: The duration of this GIFOutput.\n :type: float\n \"\"\"\n self._duration = duration\n\n @property\n def suffix(self):\n \"\"\"Gets the suffix of this GIFOutput.\n\n Unique ending of the file name so several outputs can be created then identified. Required if there is more\n then 1 video output.\n\n :return: The suffix of this GIFOutput.\n :rtype: str\n \"\"\"\n return self._suffix\n\n @suffix.setter\n def suffix(self, suffix):\n \"\"\"Sets the suffix of this GIFOutput.\n\n Unique ending of the file name so several outputs can be created then identified. Required if there is more\n then 1 video output.\n\n :param suffix: The suffix of this GIFOutput.\n :type: str\n \"\"\"\n self._suffix = suffix\n\n @property\n def overlay(self):\n \"\"\"Gets the overlay of this GIFOutput.\n\n Path to overlay image, such as: play button or watermark.\n\n :return: The overlay of this GIFOutput.\n :rtype: str\n \"\"\"\n return self._overlay\n\n @overlay.setter\n def overlay(self, overlay):\n \"\"\"Sets the overlay of this GIFOutput.\n\n Path to overlay image, such as: play button or watermark.\n\n :param overlay: The overlay of this GIFOutput.\n :type: str\n \"\"\"\n self._overlay = overlay\n\n @property\n def overlay_alignment(self):\n \"\"\"Gets the overlay_alignment of this GIFOutput.\n\n Alignment for overlay image in case the image doesn't fit the video perfectly. The first item in the array is\n X. The second is Y.\n\n :return: The overlay_alignment of this GIFOutput.\n :rtype: list[str]\n \"\"\"\n return self._overlay_alignment\n\n @overlay_alignment.setter\n def overlay_alignment(self, overlay_alignment):\n \"\"\"Sets the overlay_alignment of this GIFOutput.\n\n Alignment for overlay image in case the image doesn't fit the video perfectly. The first item in the array is\n X. The second is Y.\n\n :param overlay_alignment: The overlay_alignment of this GIFOutput.\n :type: list[str]\n \"\"\"\n allowed_values = ['left', 'center', 'right', 'top', 'middle', 'bottom']\n if not set(overlay_alignment).issubset(set(allowed_values)):\n raise ValueError(\n 'Invalid values for `overlay_alignment` [{0}], must be a subset of [{1}]'\n .format(', '.join(map(str, set(overlay_alignment) - set(\n allowed_values))), ', '.join(map(str, allowed_values))))\n self._overlay_alignment = overlay_alignment\n\n @property\n def overlay_scale(self):\n \"\"\"Gets the overlay_scale of this GIFOutput.\n\n Scale the overlay image if it's not the same size as the video. * Fit: scale the image up or down so it's\n completely visible in the video's resolution. If not the same aspect ratio, transparency is added around the\n image according to the alignment settings. * Fill: scale the image up or down so it completely fills the\n video. If not the same aspect ratio, the image is cropped according to the alignment settings. * None: don't\n resize the overlay image.\n\n :return: The overlay_scale of this GIFOutput.\n :rtype: str\n \"\"\"\n return self._overlay_scale\n\n @overlay_scale.setter\n def overlay_scale(self, overlay_scale):\n \"\"\"Sets the overlay_scale of this GIFOutput.\n\n Scale the overlay image if it's not the same size as the video. * Fit: scale the image up or down so it's\n completely visible in the video's resolution. If not the same aspect ratio, transparency is added around the\n image according to the alignment settings. * Fill: scale the image up or down so it completely fills the\n video. If not the same aspect ratio, the image is cropped according to the alignment settings. * None: don't\n resize the overlay image.\n\n :param overlay_scale: The overlay_scale of this GIFOutput.\n :type: str\n \"\"\"\n allowed_values = ['fit', 'fill', 'none']\n if overlay_scale not in allowed_values:\n raise ValueError(\n 'Invalid value for `overlay_scale` ({0}), must be one of {1}'\n .format(overlay_scale, allowed_values))\n self._overlay_scale = overlay_scale\n\n @property\n def label(self):\n \"\"\"Gets the label of this GIFOutput.\n\n This label is another way to identify this specific output. The label is returned in the response,\n but does not appear in the file name.\n\n :return: The label of this GIFOutput.\n :rtype: str\n \"\"\"\n return self._label\n\n @label.setter\n def label(self, label):\n \"\"\"Sets the label of this GIFOutput.\n\n This label is another way to identify this specific output. The label is returned in the response,\n but does not appear in the file name.\n\n :param label: The label of this GIFOutput.\n :type: str\n \"\"\"\n self._label = label\n\n def to_dict(self):\n \"\"\"Returns the model properties as a dict\"\"\"\n result = {}\n for attr, _ in six.iteritems(self.swagger_types):\n value = getattr(self, attr)\n if isinstance(value, list):\n result[attr] = list(map(lambda x: x.to_dict() if hasattr(x,\n 'to_dict') else x, value))\n elif hasattr(value, 'to_dict'):\n result[attr] = value.to_dict()\n elif isinstance(value, dict):\n result[attr] = dict(map(lambda item: (item[0], item[1].\n to_dict()) if hasattr(item[1], 'to_dict') else item,\n value.items()))\n else:\n result[attr] = value\n return result\n <mask token>\n\n def __repr__(self):\n \"\"\"For `print` and `pprint`\"\"\"\n return self.to_str()\n\n def __eq__(self, other):\n \"\"\"Returns true if both objects are equal\"\"\"\n if not isinstance(other, GIFOutput):\n return False\n return self.__dict__ == other.__dict__\n\n def __ne__(self, other):\n \"\"\"Returns true if both objects are not equal\"\"\"\n return not self == other\n",
"step-2": "<mask token>\n\n\nclass GIFOutput(object):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def __init__(self, gif_fps=None, color_depth=None, gif_loop=None,\n height=None, start=None, duration=None, suffix=None, overlay=None,\n overlay_alignment=None, overlay_scale='fit', label=None):\n \"\"\"GIFOutput - a model defined in Swagger\"\"\"\n self._gif_fps = None\n self._color_depth = None\n self._gif_loop = None\n self._height = None\n self._start = None\n self._duration = None\n self._suffix = None\n self._overlay = None\n self._overlay_alignment = None\n self._overlay_scale = None\n self._label = None\n self.discriminator = None\n if gif_fps is not None:\n self.gif_fps = gif_fps\n if color_depth is not None:\n self.color_depth = color_depth\n if gif_loop is not None:\n self.gif_loop = gif_loop\n self.height = height\n self.start = start\n if duration is not None:\n self.duration = duration\n if suffix is not None:\n self.suffix = suffix\n if overlay is not None:\n self.overlay = overlay\n if overlay_alignment is not None:\n self.overlay_alignment = overlay_alignment\n if overlay_scale is not None:\n self.overlay_scale = overlay_scale\n if label is not None:\n self.label = label\n\n @property\n def gif_fps(self):\n \"\"\"Gets the gif_fps of this GIFOutput.\n\n The frame rate of the GIF. Default is the Video frame rate\n\n :return: The gif_fps of this GIFOutput.\n :rtype: float\n \"\"\"\n return self._gif_fps\n\n @gif_fps.setter\n def gif_fps(self, gif_fps):\n \"\"\"Sets the gif_fps of this GIFOutput.\n\n The frame rate of the GIF. Default is the Video frame rate\n\n :param gif_fps: The gif_fps of this GIFOutput.\n :type: float\n \"\"\"\n if gif_fps is not None and gif_fps > 30:\n raise ValueError(\n 'Invalid value for `gif_fps`, must be a value less than or equal to `30`'\n )\n self._gif_fps = gif_fps\n\n @property\n def color_depth(self):\n \"\"\"Gets the color_depth of this GIFOutput.\n\n Amount of colors in palette\n\n :return: The color_depth of this GIFOutput.\n :rtype: float\n \"\"\"\n return self._color_depth\n\n @color_depth.setter\n def color_depth(self, color_depth):\n \"\"\"Sets the color_depth of this GIFOutput.\n\n Amount of colors in palette\n\n :param color_depth: The color_depth of this GIFOutput.\n :type: float\n \"\"\"\n self._color_depth = color_depth\n\n @property\n def gif_loop(self):\n \"\"\"Gets the gif_loop of this GIFOutput.\n\n If to loop the GIF. -1 is no loop, 0 is infinite loops, and other numbers are number of loops.\n\n :return: The gif_loop of this GIFOutput.\n :rtype: int\n \"\"\"\n return self._gif_loop\n\n @gif_loop.setter\n def gif_loop(self, gif_loop):\n \"\"\"Sets the gif_loop of this GIFOutput.\n\n If to loop the GIF. -1 is no loop, 0 is infinite loops, and other numbers are number of loops.\n\n :param gif_loop: The gif_loop of this GIFOutput.\n :type: int\n \"\"\"\n if gif_loop is not None and gif_loop < -1:\n raise ValueError(\n 'Invalid value for `gif_loop`, must be a value greater than or equal to `-1`'\n )\n self._gif_loop = gif_loop\n\n @property\n def height(self):\n \"\"\"Gets the height of this GIFOutput.\n\n Height of the media to be rendered, in pixels. Should be the height of your scenes unless a smaller\n resolution is needed. Resolution higher than the scene resolution reduces quality. The width is automatically\n calculated to keep the aspect ratio.\n\n :return: The height of this GIFOutput.\n :rtype: float\n \"\"\"\n return self._height\n\n @height.setter\n def height(self, height):\n \"\"\"Sets the height of this GIFOutput.\n\n Height of the media to be rendered, in pixels. Should be the height of your scenes unless a smaller\n resolution is needed. Resolution higher than the scene resolution reduces quality. The width is automatically\n calculated to keep the aspect ratio.\n\n :param height: The height of this GIFOutput.\n :type: float\n \"\"\"\n if height is None:\n raise ValueError('Invalid value for `height`, must not be `None`')\n self._height = height\n\n @property\n def start(self):\n \"\"\"Gets the start of this GIFOutput.\n\n What second of the storyboard timeline to start the GIF.\n\n :return: The start of this GIFOutput.\n :rtype: float\n \"\"\"\n return self._start\n\n @start.setter\n def start(self, start):\n \"\"\"Sets the start of this GIFOutput.\n\n What second of the storyboard timeline to start the GIF.\n\n :param start: The start of this GIFOutput.\n :type: float\n \"\"\"\n if start is None:\n raise ValueError('Invalid value for `start`, must not be `None`')\n self._start = start\n\n @property\n def duration(self):\n \"\"\"Gets the duration of this GIFOutput.\n\n Seconds for the duration of the GIF. Can't be longer than the video.\n\n :return: The duration of this GIFOutput.\n :rtype: float\n \"\"\"\n return self._duration\n\n @duration.setter\n def duration(self, duration):\n \"\"\"Sets the duration of this GIFOutput.\n\n Seconds for the duration of the GIF. Can't be longer than the video.\n\n :param duration: The duration of this GIFOutput.\n :type: float\n \"\"\"\n self._duration = duration\n\n @property\n def suffix(self):\n \"\"\"Gets the suffix of this GIFOutput.\n\n Unique ending of the file name so several outputs can be created then identified. Required if there is more\n then 1 video output.\n\n :return: The suffix of this GIFOutput.\n :rtype: str\n \"\"\"\n return self._suffix\n\n @suffix.setter\n def suffix(self, suffix):\n \"\"\"Sets the suffix of this GIFOutput.\n\n Unique ending of the file name so several outputs can be created then identified. Required if there is more\n then 1 video output.\n\n :param suffix: The suffix of this GIFOutput.\n :type: str\n \"\"\"\n self._suffix = suffix\n\n @property\n def overlay(self):\n \"\"\"Gets the overlay of this GIFOutput.\n\n Path to overlay image, such as: play button or watermark.\n\n :return: The overlay of this GIFOutput.\n :rtype: str\n \"\"\"\n return self._overlay\n\n @overlay.setter\n def overlay(self, overlay):\n \"\"\"Sets the overlay of this GIFOutput.\n\n Path to overlay image, such as: play button or watermark.\n\n :param overlay: The overlay of this GIFOutput.\n :type: str\n \"\"\"\n self._overlay = overlay\n\n @property\n def overlay_alignment(self):\n \"\"\"Gets the overlay_alignment of this GIFOutput.\n\n Alignment for overlay image in case the image doesn't fit the video perfectly. The first item in the array is\n X. The second is Y.\n\n :return: The overlay_alignment of this GIFOutput.\n :rtype: list[str]\n \"\"\"\n return self._overlay_alignment\n\n @overlay_alignment.setter\n def overlay_alignment(self, overlay_alignment):\n \"\"\"Sets the overlay_alignment of this GIFOutput.\n\n Alignment for overlay image in case the image doesn't fit the video perfectly. The first item in the array is\n X. The second is Y.\n\n :param overlay_alignment: The overlay_alignment of this GIFOutput.\n :type: list[str]\n \"\"\"\n allowed_values = ['left', 'center', 'right', 'top', 'middle', 'bottom']\n if not set(overlay_alignment).issubset(set(allowed_values)):\n raise ValueError(\n 'Invalid values for `overlay_alignment` [{0}], must be a subset of [{1}]'\n .format(', '.join(map(str, set(overlay_alignment) - set(\n allowed_values))), ', '.join(map(str, allowed_values))))\n self._overlay_alignment = overlay_alignment\n\n @property\n def overlay_scale(self):\n \"\"\"Gets the overlay_scale of this GIFOutput.\n\n Scale the overlay image if it's not the same size as the video. * Fit: scale the image up or down so it's\n completely visible in the video's resolution. If not the same aspect ratio, transparency is added around the\n image according to the alignment settings. * Fill: scale the image up or down so it completely fills the\n video. If not the same aspect ratio, the image is cropped according to the alignment settings. * None: don't\n resize the overlay image.\n\n :return: The overlay_scale of this GIFOutput.\n :rtype: str\n \"\"\"\n return self._overlay_scale\n\n @overlay_scale.setter\n def overlay_scale(self, overlay_scale):\n \"\"\"Sets the overlay_scale of this GIFOutput.\n\n Scale the overlay image if it's not the same size as the video. * Fit: scale the image up or down so it's\n completely visible in the video's resolution. If not the same aspect ratio, transparency is added around the\n image according to the alignment settings. * Fill: scale the image up or down so it completely fills the\n video. If not the same aspect ratio, the image is cropped according to the alignment settings. * None: don't\n resize the overlay image.\n\n :param overlay_scale: The overlay_scale of this GIFOutput.\n :type: str\n \"\"\"\n allowed_values = ['fit', 'fill', 'none']\n if overlay_scale not in allowed_values:\n raise ValueError(\n 'Invalid value for `overlay_scale` ({0}), must be one of {1}'\n .format(overlay_scale, allowed_values))\n self._overlay_scale = overlay_scale\n\n @property\n def label(self):\n \"\"\"Gets the label of this GIFOutput.\n\n This label is another way to identify this specific output. The label is returned in the response,\n but does not appear in the file name.\n\n :return: The label of this GIFOutput.\n :rtype: str\n \"\"\"\n return self._label\n\n @label.setter\n def label(self, label):\n \"\"\"Sets the label of this GIFOutput.\n\n This label is another way to identify this specific output. The label is returned in the response,\n but does not appear in the file name.\n\n :param label: The label of this GIFOutput.\n :type: str\n \"\"\"\n self._label = label\n\n def to_dict(self):\n \"\"\"Returns the model properties as a dict\"\"\"\n result = {}\n for attr, _ in six.iteritems(self.swagger_types):\n value = getattr(self, attr)\n if isinstance(value, list):\n result[attr] = list(map(lambda x: x.to_dict() if hasattr(x,\n 'to_dict') else x, value))\n elif hasattr(value, 'to_dict'):\n result[attr] = value.to_dict()\n elif isinstance(value, dict):\n result[attr] = dict(map(lambda item: (item[0], item[1].\n to_dict()) if hasattr(item[1], 'to_dict') else item,\n value.items()))\n else:\n result[attr] = value\n return result\n <mask token>\n\n def __repr__(self):\n \"\"\"For `print` and `pprint`\"\"\"\n return self.to_str()\n\n def __eq__(self, other):\n \"\"\"Returns true if both objects are equal\"\"\"\n if not isinstance(other, GIFOutput):\n return False\n return self.__dict__ == other.__dict__\n\n def __ne__(self, other):\n \"\"\"Returns true if both objects are not equal\"\"\"\n return not self == other\n",
"step-3": "<mask token>\n\n\nclass GIFOutput(object):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def __init__(self, gif_fps=None, color_depth=None, gif_loop=None,\n height=None, start=None, duration=None, suffix=None, overlay=None,\n overlay_alignment=None, overlay_scale='fit', label=None):\n \"\"\"GIFOutput - a model defined in Swagger\"\"\"\n self._gif_fps = None\n self._color_depth = None\n self._gif_loop = None\n self._height = None\n self._start = None\n self._duration = None\n self._suffix = None\n self._overlay = None\n self._overlay_alignment = None\n self._overlay_scale = None\n self._label = None\n self.discriminator = None\n if gif_fps is not None:\n self.gif_fps = gif_fps\n if color_depth is not None:\n self.color_depth = color_depth\n if gif_loop is not None:\n self.gif_loop = gif_loop\n self.height = height\n self.start = start\n if duration is not None:\n self.duration = duration\n if suffix is not None:\n self.suffix = suffix\n if overlay is not None:\n self.overlay = overlay\n if overlay_alignment is not None:\n self.overlay_alignment = overlay_alignment\n if overlay_scale is not None:\n self.overlay_scale = overlay_scale\n if label is not None:\n self.label = label\n\n @property\n def gif_fps(self):\n \"\"\"Gets the gif_fps of this GIFOutput.\n\n The frame rate of the GIF. Default is the Video frame rate\n\n :return: The gif_fps of this GIFOutput.\n :rtype: float\n \"\"\"\n return self._gif_fps\n\n @gif_fps.setter\n def gif_fps(self, gif_fps):\n \"\"\"Sets the gif_fps of this GIFOutput.\n\n The frame rate of the GIF. Default is the Video frame rate\n\n :param gif_fps: The gif_fps of this GIFOutput.\n :type: float\n \"\"\"\n if gif_fps is not None and gif_fps > 30:\n raise ValueError(\n 'Invalid value for `gif_fps`, must be a value less than or equal to `30`'\n )\n self._gif_fps = gif_fps\n\n @property\n def color_depth(self):\n \"\"\"Gets the color_depth of this GIFOutput.\n\n Amount of colors in palette\n\n :return: The color_depth of this GIFOutput.\n :rtype: float\n \"\"\"\n return self._color_depth\n\n @color_depth.setter\n def color_depth(self, color_depth):\n \"\"\"Sets the color_depth of this GIFOutput.\n\n Amount of colors in palette\n\n :param color_depth: The color_depth of this GIFOutput.\n :type: float\n \"\"\"\n self._color_depth = color_depth\n\n @property\n def gif_loop(self):\n \"\"\"Gets the gif_loop of this GIFOutput.\n\n If to loop the GIF. -1 is no loop, 0 is infinite loops, and other numbers are number of loops.\n\n :return: The gif_loop of this GIFOutput.\n :rtype: int\n \"\"\"\n return self._gif_loop\n\n @gif_loop.setter\n def gif_loop(self, gif_loop):\n \"\"\"Sets the gif_loop of this GIFOutput.\n\n If to loop the GIF. -1 is no loop, 0 is infinite loops, and other numbers are number of loops.\n\n :param gif_loop: The gif_loop of this GIFOutput.\n :type: int\n \"\"\"\n if gif_loop is not None and gif_loop < -1:\n raise ValueError(\n 'Invalid value for `gif_loop`, must be a value greater than or equal to `-1`'\n )\n self._gif_loop = gif_loop\n\n @property\n def height(self):\n \"\"\"Gets the height of this GIFOutput.\n\n Height of the media to be rendered, in pixels. Should be the height of your scenes unless a smaller\n resolution is needed. Resolution higher than the scene resolution reduces quality. The width is automatically\n calculated to keep the aspect ratio.\n\n :return: The height of this GIFOutput.\n :rtype: float\n \"\"\"\n return self._height\n\n @height.setter\n def height(self, height):\n \"\"\"Sets the height of this GIFOutput.\n\n Height of the media to be rendered, in pixels. Should be the height of your scenes unless a smaller\n resolution is needed. Resolution higher than the scene resolution reduces quality. The width is automatically\n calculated to keep the aspect ratio.\n\n :param height: The height of this GIFOutput.\n :type: float\n \"\"\"\n if height is None:\n raise ValueError('Invalid value for `height`, must not be `None`')\n self._height = height\n\n @property\n def start(self):\n \"\"\"Gets the start of this GIFOutput.\n\n What second of the storyboard timeline to start the GIF.\n\n :return: The start of this GIFOutput.\n :rtype: float\n \"\"\"\n return self._start\n\n @start.setter\n def start(self, start):\n \"\"\"Sets the start of this GIFOutput.\n\n What second of the storyboard timeline to start the GIF.\n\n :param start: The start of this GIFOutput.\n :type: float\n \"\"\"\n if start is None:\n raise ValueError('Invalid value for `start`, must not be `None`')\n self._start = start\n\n @property\n def duration(self):\n \"\"\"Gets the duration of this GIFOutput.\n\n Seconds for the duration of the GIF. Can't be longer than the video.\n\n :return: The duration of this GIFOutput.\n :rtype: float\n \"\"\"\n return self._duration\n\n @duration.setter\n def duration(self, duration):\n \"\"\"Sets the duration of this GIFOutput.\n\n Seconds for the duration of the GIF. Can't be longer than the video.\n\n :param duration: The duration of this GIFOutput.\n :type: float\n \"\"\"\n self._duration = duration\n\n @property\n def suffix(self):\n \"\"\"Gets the suffix of this GIFOutput.\n\n Unique ending of the file name so several outputs can be created then identified. Required if there is more\n then 1 video output.\n\n :return: The suffix of this GIFOutput.\n :rtype: str\n \"\"\"\n return self._suffix\n\n @suffix.setter\n def suffix(self, suffix):\n \"\"\"Sets the suffix of this GIFOutput.\n\n Unique ending of the file name so several outputs can be created then identified. Required if there is more\n then 1 video output.\n\n :param suffix: The suffix of this GIFOutput.\n :type: str\n \"\"\"\n self._suffix = suffix\n\n @property\n def overlay(self):\n \"\"\"Gets the overlay of this GIFOutput.\n\n Path to overlay image, such as: play button or watermark.\n\n :return: The overlay of this GIFOutput.\n :rtype: str\n \"\"\"\n return self._overlay\n\n @overlay.setter\n def overlay(self, overlay):\n \"\"\"Sets the overlay of this GIFOutput.\n\n Path to overlay image, such as: play button or watermark.\n\n :param overlay: The overlay of this GIFOutput.\n :type: str\n \"\"\"\n self._overlay = overlay\n\n @property\n def overlay_alignment(self):\n \"\"\"Gets the overlay_alignment of this GIFOutput.\n\n Alignment for overlay image in case the image doesn't fit the video perfectly. The first item in the array is\n X. The second is Y.\n\n :return: The overlay_alignment of this GIFOutput.\n :rtype: list[str]\n \"\"\"\n return self._overlay_alignment\n\n @overlay_alignment.setter\n def overlay_alignment(self, overlay_alignment):\n \"\"\"Sets the overlay_alignment of this GIFOutput.\n\n Alignment for overlay image in case the image doesn't fit the video perfectly. The first item in the array is\n X. The second is Y.\n\n :param overlay_alignment: The overlay_alignment of this GIFOutput.\n :type: list[str]\n \"\"\"\n allowed_values = ['left', 'center', 'right', 'top', 'middle', 'bottom']\n if not set(overlay_alignment).issubset(set(allowed_values)):\n raise ValueError(\n 'Invalid values for `overlay_alignment` [{0}], must be a subset of [{1}]'\n .format(', '.join(map(str, set(overlay_alignment) - set(\n allowed_values))), ', '.join(map(str, allowed_values))))\n self._overlay_alignment = overlay_alignment\n\n @property\n def overlay_scale(self):\n \"\"\"Gets the overlay_scale of this GIFOutput.\n\n Scale the overlay image if it's not the same size as the video. * Fit: scale the image up or down so it's\n completely visible in the video's resolution. If not the same aspect ratio, transparency is added around the\n image according to the alignment settings. * Fill: scale the image up or down so it completely fills the\n video. If not the same aspect ratio, the image is cropped according to the alignment settings. * None: don't\n resize the overlay image.\n\n :return: The overlay_scale of this GIFOutput.\n :rtype: str\n \"\"\"\n return self._overlay_scale\n\n @overlay_scale.setter\n def overlay_scale(self, overlay_scale):\n \"\"\"Sets the overlay_scale of this GIFOutput.\n\n Scale the overlay image if it's not the same size as the video. * Fit: scale the image up or down so it's\n completely visible in the video's resolution. If not the same aspect ratio, transparency is added around the\n image according to the alignment settings. * Fill: scale the image up or down so it completely fills the\n video. If not the same aspect ratio, the image is cropped according to the alignment settings. * None: don't\n resize the overlay image.\n\n :param overlay_scale: The overlay_scale of this GIFOutput.\n :type: str\n \"\"\"\n allowed_values = ['fit', 'fill', 'none']\n if overlay_scale not in allowed_values:\n raise ValueError(\n 'Invalid value for `overlay_scale` ({0}), must be one of {1}'\n .format(overlay_scale, allowed_values))\n self._overlay_scale = overlay_scale\n\n @property\n def label(self):\n \"\"\"Gets the label of this GIFOutput.\n\n This label is another way to identify this specific output. The label is returned in the response,\n but does not appear in the file name.\n\n :return: The label of this GIFOutput.\n :rtype: str\n \"\"\"\n return self._label\n\n @label.setter\n def label(self, label):\n \"\"\"Sets the label of this GIFOutput.\n\n This label is another way to identify this specific output. The label is returned in the response,\n but does not appear in the file name.\n\n :param label: The label of this GIFOutput.\n :type: str\n \"\"\"\n self._label = label\n\n def to_dict(self):\n \"\"\"Returns the model properties as a dict\"\"\"\n result = {}\n for attr, _ in six.iteritems(self.swagger_types):\n value = getattr(self, attr)\n if isinstance(value, list):\n result[attr] = list(map(lambda x: x.to_dict() if hasattr(x,\n 'to_dict') else x, value))\n elif hasattr(value, 'to_dict'):\n result[attr] = value.to_dict()\n elif isinstance(value, dict):\n result[attr] = dict(map(lambda item: (item[0], item[1].\n to_dict()) if hasattr(item[1], 'to_dict') else item,\n value.items()))\n else:\n result[attr] = value\n return result\n\n def to_str(self):\n \"\"\"Returns the string representation of the model\"\"\"\n return pprint.pformat(self.to_dict())\n\n def __repr__(self):\n \"\"\"For `print` and `pprint`\"\"\"\n return self.to_str()\n\n def __eq__(self, other):\n \"\"\"Returns true if both objects are equal\"\"\"\n if not isinstance(other, GIFOutput):\n return False\n return self.__dict__ == other.__dict__\n\n def __ne__(self, other):\n \"\"\"Returns true if both objects are not equal\"\"\"\n return not self == other\n",
"step-4": "<mask token>\n\n\nclass GIFOutput(object):\n \"\"\"NOTE: This class is auto generated by the swagger code generator program.\n\n Do not edit the class manually.\n \"\"\"\n \"\"\"\n Attributes:\n swagger_types (dict): The key is attribute name\n and the value is attribute type.\n attribute_map (dict): The key is attribute name\n and the value is json key in definition.\n \"\"\"\n swagger_types = {'gif_fps': 'float', 'color_depth': 'float', 'gif_loop':\n 'int', 'height': 'float', 'start': 'float', 'duration': 'float',\n 'suffix': 'str', 'overlay': 'str', 'overlay_alignment': 'list[str]',\n 'overlay_scale': 'str', 'label': 'str'}\n attribute_map = {'gif_fps': 'gif_fps', 'color_depth': 'color_depth',\n 'gif_loop': 'gif_loop', 'height': 'height', 'start': 'start',\n 'duration': 'duration', 'suffix': 'suffix', 'overlay': 'overlay',\n 'overlay_alignment': 'overlay_alignment', 'overlay_scale':\n 'overlay_scale', 'label': 'label'}\n\n def __init__(self, gif_fps=None, color_depth=None, gif_loop=None,\n height=None, start=None, duration=None, suffix=None, overlay=None,\n overlay_alignment=None, overlay_scale='fit', label=None):\n \"\"\"GIFOutput - a model defined in Swagger\"\"\"\n self._gif_fps = None\n self._color_depth = None\n self._gif_loop = None\n self._height = None\n self._start = None\n self._duration = None\n self._suffix = None\n self._overlay = None\n self._overlay_alignment = None\n self._overlay_scale = None\n self._label = None\n self.discriminator = None\n if gif_fps is not None:\n self.gif_fps = gif_fps\n if color_depth is not None:\n self.color_depth = color_depth\n if gif_loop is not None:\n self.gif_loop = gif_loop\n self.height = height\n self.start = start\n if duration is not None:\n self.duration = duration\n if suffix is not None:\n self.suffix = suffix\n if overlay is not None:\n self.overlay = overlay\n if overlay_alignment is not None:\n self.overlay_alignment = overlay_alignment\n if overlay_scale is not None:\n self.overlay_scale = overlay_scale\n if label is not None:\n self.label = label\n\n @property\n def gif_fps(self):\n \"\"\"Gets the gif_fps of this GIFOutput.\n\n The frame rate of the GIF. Default is the Video frame rate\n\n :return: The gif_fps of this GIFOutput.\n :rtype: float\n \"\"\"\n return self._gif_fps\n\n @gif_fps.setter\n def gif_fps(self, gif_fps):\n \"\"\"Sets the gif_fps of this GIFOutput.\n\n The frame rate of the GIF. Default is the Video frame rate\n\n :param gif_fps: The gif_fps of this GIFOutput.\n :type: float\n \"\"\"\n if gif_fps is not None and gif_fps > 30:\n raise ValueError(\n 'Invalid value for `gif_fps`, must be a value less than or equal to `30`'\n )\n self._gif_fps = gif_fps\n\n @property\n def color_depth(self):\n \"\"\"Gets the color_depth of this GIFOutput.\n\n Amount of colors in palette\n\n :return: The color_depth of this GIFOutput.\n :rtype: float\n \"\"\"\n return self._color_depth\n\n @color_depth.setter\n def color_depth(self, color_depth):\n \"\"\"Sets the color_depth of this GIFOutput.\n\n Amount of colors in palette\n\n :param color_depth: The color_depth of this GIFOutput.\n :type: float\n \"\"\"\n self._color_depth = color_depth\n\n @property\n def gif_loop(self):\n \"\"\"Gets the gif_loop of this GIFOutput.\n\n If to loop the GIF. -1 is no loop, 0 is infinite loops, and other numbers are number of loops.\n\n :return: The gif_loop of this GIFOutput.\n :rtype: int\n \"\"\"\n return self._gif_loop\n\n @gif_loop.setter\n def gif_loop(self, gif_loop):\n \"\"\"Sets the gif_loop of this GIFOutput.\n\n If to loop the GIF. -1 is no loop, 0 is infinite loops, and other numbers are number of loops.\n\n :param gif_loop: The gif_loop of this GIFOutput.\n :type: int\n \"\"\"\n if gif_loop is not None and gif_loop < -1:\n raise ValueError(\n 'Invalid value for `gif_loop`, must be a value greater than or equal to `-1`'\n )\n self._gif_loop = gif_loop\n\n @property\n def height(self):\n \"\"\"Gets the height of this GIFOutput.\n\n Height of the media to be rendered, in pixels. Should be the height of your scenes unless a smaller\n resolution is needed. Resolution higher than the scene resolution reduces quality. The width is automatically\n calculated to keep the aspect ratio.\n\n :return: The height of this GIFOutput.\n :rtype: float\n \"\"\"\n return self._height\n\n @height.setter\n def height(self, height):\n \"\"\"Sets the height of this GIFOutput.\n\n Height of the media to be rendered, in pixels. Should be the height of your scenes unless a smaller\n resolution is needed. Resolution higher than the scene resolution reduces quality. The width is automatically\n calculated to keep the aspect ratio.\n\n :param height: The height of this GIFOutput.\n :type: float\n \"\"\"\n if height is None:\n raise ValueError('Invalid value for `height`, must not be `None`')\n self._height = height\n\n @property\n def start(self):\n \"\"\"Gets the start of this GIFOutput.\n\n What second of the storyboard timeline to start the GIF.\n\n :return: The start of this GIFOutput.\n :rtype: float\n \"\"\"\n return self._start\n\n @start.setter\n def start(self, start):\n \"\"\"Sets the start of this GIFOutput.\n\n What second of the storyboard timeline to start the GIF.\n\n :param start: The start of this GIFOutput.\n :type: float\n \"\"\"\n if start is None:\n raise ValueError('Invalid value for `start`, must not be `None`')\n self._start = start\n\n @property\n def duration(self):\n \"\"\"Gets the duration of this GIFOutput.\n\n Seconds for the duration of the GIF. Can't be longer than the video.\n\n :return: The duration of this GIFOutput.\n :rtype: float\n \"\"\"\n return self._duration\n\n @duration.setter\n def duration(self, duration):\n \"\"\"Sets the duration of this GIFOutput.\n\n Seconds for the duration of the GIF. Can't be longer than the video.\n\n :param duration: The duration of this GIFOutput.\n :type: float\n \"\"\"\n self._duration = duration\n\n @property\n def suffix(self):\n \"\"\"Gets the suffix of this GIFOutput.\n\n Unique ending of the file name so several outputs can be created then identified. Required if there is more\n then 1 video output.\n\n :return: The suffix of this GIFOutput.\n :rtype: str\n \"\"\"\n return self._suffix\n\n @suffix.setter\n def suffix(self, suffix):\n \"\"\"Sets the suffix of this GIFOutput.\n\n Unique ending of the file name so several outputs can be created then identified. Required if there is more\n then 1 video output.\n\n :param suffix: The suffix of this GIFOutput.\n :type: str\n \"\"\"\n self._suffix = suffix\n\n @property\n def overlay(self):\n \"\"\"Gets the overlay of this GIFOutput.\n\n Path to overlay image, such as: play button or watermark.\n\n :return: The overlay of this GIFOutput.\n :rtype: str\n \"\"\"\n return self._overlay\n\n @overlay.setter\n def overlay(self, overlay):\n \"\"\"Sets the overlay of this GIFOutput.\n\n Path to overlay image, such as: play button or watermark.\n\n :param overlay: The overlay of this GIFOutput.\n :type: str\n \"\"\"\n self._overlay = overlay\n\n @property\n def overlay_alignment(self):\n \"\"\"Gets the overlay_alignment of this GIFOutput.\n\n Alignment for overlay image in case the image doesn't fit the video perfectly. The first item in the array is\n X. The second is Y.\n\n :return: The overlay_alignment of this GIFOutput.\n :rtype: list[str]\n \"\"\"\n return self._overlay_alignment\n\n @overlay_alignment.setter\n def overlay_alignment(self, overlay_alignment):\n \"\"\"Sets the overlay_alignment of this GIFOutput.\n\n Alignment for overlay image in case the image doesn't fit the video perfectly. The first item in the array is\n X. The second is Y.\n\n :param overlay_alignment: The overlay_alignment of this GIFOutput.\n :type: list[str]\n \"\"\"\n allowed_values = ['left', 'center', 'right', 'top', 'middle', 'bottom']\n if not set(overlay_alignment).issubset(set(allowed_values)):\n raise ValueError(\n 'Invalid values for `overlay_alignment` [{0}], must be a subset of [{1}]'\n .format(', '.join(map(str, set(overlay_alignment) - set(\n allowed_values))), ', '.join(map(str, allowed_values))))\n self._overlay_alignment = overlay_alignment\n\n @property\n def overlay_scale(self):\n \"\"\"Gets the overlay_scale of this GIFOutput.\n\n Scale the overlay image if it's not the same size as the video. * Fit: scale the image up or down so it's\n completely visible in the video's resolution. If not the same aspect ratio, transparency is added around the\n image according to the alignment settings. * Fill: scale the image up or down so it completely fills the\n video. If not the same aspect ratio, the image is cropped according to the alignment settings. * None: don't\n resize the overlay image.\n\n :return: The overlay_scale of this GIFOutput.\n :rtype: str\n \"\"\"\n return self._overlay_scale\n\n @overlay_scale.setter\n def overlay_scale(self, overlay_scale):\n \"\"\"Sets the overlay_scale of this GIFOutput.\n\n Scale the overlay image if it's not the same size as the video. * Fit: scale the image up or down so it's\n completely visible in the video's resolution. If not the same aspect ratio, transparency is added around the\n image according to the alignment settings. * Fill: scale the image up or down so it completely fills the\n video. If not the same aspect ratio, the image is cropped according to the alignment settings. * None: don't\n resize the overlay image.\n\n :param overlay_scale: The overlay_scale of this GIFOutput.\n :type: str\n \"\"\"\n allowed_values = ['fit', 'fill', 'none']\n if overlay_scale not in allowed_values:\n raise ValueError(\n 'Invalid value for `overlay_scale` ({0}), must be one of {1}'\n .format(overlay_scale, allowed_values))\n self._overlay_scale = overlay_scale\n\n @property\n def label(self):\n \"\"\"Gets the label of this GIFOutput.\n\n This label is another way to identify this specific output. The label is returned in the response,\n but does not appear in the file name.\n\n :return: The label of this GIFOutput.\n :rtype: str\n \"\"\"\n return self._label\n\n @label.setter\n def label(self, label):\n \"\"\"Sets the label of this GIFOutput.\n\n This label is another way to identify this specific output. The label is returned in the response,\n but does not appear in the file name.\n\n :param label: The label of this GIFOutput.\n :type: str\n \"\"\"\n self._label = label\n\n def to_dict(self):\n \"\"\"Returns the model properties as a dict\"\"\"\n result = {}\n for attr, _ in six.iteritems(self.swagger_types):\n value = getattr(self, attr)\n if isinstance(value, list):\n result[attr] = list(map(lambda x: x.to_dict() if hasattr(x,\n 'to_dict') else x, value))\n elif hasattr(value, 'to_dict'):\n result[attr] = value.to_dict()\n elif isinstance(value, dict):\n result[attr] = dict(map(lambda item: (item[0], item[1].\n to_dict()) if hasattr(item[1], 'to_dict') else item,\n value.items()))\n else:\n result[attr] = value\n return result\n\n def to_str(self):\n \"\"\"Returns the string representation of the model\"\"\"\n return pprint.pformat(self.to_dict())\n\n def __repr__(self):\n \"\"\"For `print` and `pprint`\"\"\"\n return self.to_str()\n\n def __eq__(self, other):\n \"\"\"Returns true if both objects are equal\"\"\"\n if not isinstance(other, GIFOutput):\n return False\n return self.__dict__ == other.__dict__\n\n def __ne__(self, other):\n \"\"\"Returns true if both objects are not equal\"\"\"\n return not self == other\n",
"step-5": "# coding: utf-8\n\n\"\"\"\n Idomoo API\n\n\n\n OpenAPI spec version: 2.0\n Contact: dev.support@idomoo.com\n\n\"\"\"\n\n\nimport pprint\n\nimport six\n\n\nclass GIFOutput(object):\n \"\"\"NOTE: This class is auto generated by the swagger code generator program.\n\n Do not edit the class manually.\n \"\"\"\n\n \"\"\"\n Attributes:\n swagger_types (dict): The key is attribute name\n and the value is attribute type.\n attribute_map (dict): The key is attribute name\n and the value is json key in definition.\n \"\"\"\n swagger_types = {\n 'gif_fps': 'float',\n 'color_depth': 'float',\n 'gif_loop': 'int',\n 'height': 'float',\n 'start': 'float',\n 'duration': 'float',\n 'suffix': 'str',\n 'overlay': 'str',\n 'overlay_alignment': 'list[str]',\n 'overlay_scale': 'str',\n 'label': 'str'\n }\n\n attribute_map = {\n 'gif_fps': 'gif_fps',\n 'color_depth': 'color_depth',\n 'gif_loop': 'gif_loop',\n 'height': 'height',\n 'start': 'start',\n 'duration': 'duration',\n 'suffix': 'suffix',\n 'overlay': 'overlay',\n 'overlay_alignment': 'overlay_alignment',\n 'overlay_scale': 'overlay_scale',\n 'label': 'label'\n }\n\n def __init__(self, gif_fps=None, color_depth=None, gif_loop=None, height=None, start=None, duration=None,\n suffix=None, overlay=None, overlay_alignment=None, overlay_scale='fit', label=None):\n \"\"\"GIFOutput - a model defined in Swagger\"\"\"\n\n self._gif_fps = None\n self._color_depth = None\n self._gif_loop = None\n self._height = None\n self._start = None\n self._duration = None\n self._suffix = None\n self._overlay = None\n self._overlay_alignment = None\n self._overlay_scale = None\n self._label = None\n self.discriminator = None\n\n if gif_fps is not None:\n self.gif_fps = gif_fps\n if color_depth is not None:\n self.color_depth = color_depth\n if gif_loop is not None:\n self.gif_loop = gif_loop\n self.height = height\n self.start = start\n if duration is not None:\n self.duration = duration\n if suffix is not None:\n self.suffix = suffix\n if overlay is not None:\n self.overlay = overlay\n if overlay_alignment is not None:\n self.overlay_alignment = overlay_alignment\n if overlay_scale is not None:\n self.overlay_scale = overlay_scale\n if label is not None:\n self.label = label\n\n @property\n def gif_fps(self):\n \"\"\"Gets the gif_fps of this GIFOutput.\n\n The frame rate of the GIF. Default is the Video frame rate\n\n :return: The gif_fps of this GIFOutput.\n :rtype: float\n \"\"\"\n return self._gif_fps\n\n @gif_fps.setter\n def gif_fps(self, gif_fps):\n \"\"\"Sets the gif_fps of this GIFOutput.\n\n The frame rate of the GIF. Default is the Video frame rate\n\n :param gif_fps: The gif_fps of this GIFOutput.\n :type: float\n \"\"\"\n if gif_fps is not None and gif_fps > 30:\n raise ValueError(\"Invalid value for `gif_fps`, must be a value less than or equal to `30`\")\n\n self._gif_fps = gif_fps\n\n @property\n def color_depth(self):\n \"\"\"Gets the color_depth of this GIFOutput.\n\n Amount of colors in palette\n\n :return: The color_depth of this GIFOutput.\n :rtype: float\n \"\"\"\n return self._color_depth\n\n @color_depth.setter\n def color_depth(self, color_depth):\n \"\"\"Sets the color_depth of this GIFOutput.\n\n Amount of colors in palette\n\n :param color_depth: The color_depth of this GIFOutput.\n :type: float\n \"\"\"\n\n self._color_depth = color_depth\n\n @property\n def gif_loop(self):\n \"\"\"Gets the gif_loop of this GIFOutput.\n\n If to loop the GIF. -1 is no loop, 0 is infinite loops, and other numbers are number of loops.\n\n :return: The gif_loop of this GIFOutput.\n :rtype: int\n \"\"\"\n return self._gif_loop\n\n @gif_loop.setter\n def gif_loop(self, gif_loop):\n \"\"\"Sets the gif_loop of this GIFOutput.\n\n If to loop the GIF. -1 is no loop, 0 is infinite loops, and other numbers are number of loops.\n\n :param gif_loop: The gif_loop of this GIFOutput.\n :type: int\n \"\"\"\n if gif_loop is not None and gif_loop < -1:\n raise ValueError(\"Invalid value for `gif_loop`, must be a value greater than or equal to `-1`\")\n\n self._gif_loop = gif_loop\n\n @property\n def height(self):\n \"\"\"Gets the height of this GIFOutput.\n\n Height of the media to be rendered, in pixels. Should be the height of your scenes unless a smaller\n resolution is needed. Resolution higher than the scene resolution reduces quality. The width is automatically\n calculated to keep the aspect ratio.\n\n :return: The height of this GIFOutput.\n :rtype: float\n \"\"\"\n return self._height\n\n @height.setter\n def height(self, height):\n \"\"\"Sets the height of this GIFOutput.\n\n Height of the media to be rendered, in pixels. Should be the height of your scenes unless a smaller\n resolution is needed. Resolution higher than the scene resolution reduces quality. The width is automatically\n calculated to keep the aspect ratio.\n\n :param height: The height of this GIFOutput.\n :type: float\n \"\"\"\n if height is None:\n raise ValueError(\"Invalid value for `height`, must not be `None`\")\n\n self._height = height\n\n @property\n def start(self):\n \"\"\"Gets the start of this GIFOutput.\n\n What second of the storyboard timeline to start the GIF.\n\n :return: The start of this GIFOutput.\n :rtype: float\n \"\"\"\n return self._start\n\n @start.setter\n def start(self, start):\n \"\"\"Sets the start of this GIFOutput.\n\n What second of the storyboard timeline to start the GIF.\n\n :param start: The start of this GIFOutput.\n :type: float\n \"\"\"\n if start is None:\n raise ValueError(\"Invalid value for `start`, must not be `None`\")\n\n self._start = start\n\n @property\n def duration(self):\n \"\"\"Gets the duration of this GIFOutput.\n\n Seconds for the duration of the GIF. Can't be longer than the video.\n\n :return: The duration of this GIFOutput.\n :rtype: float\n \"\"\"\n return self._duration\n\n @duration.setter\n def duration(self, duration):\n \"\"\"Sets the duration of this GIFOutput.\n\n Seconds for the duration of the GIF. Can't be longer than the video.\n\n :param duration: The duration of this GIFOutput.\n :type: float\n \"\"\"\n\n self._duration = duration\n\n @property\n def suffix(self):\n \"\"\"Gets the suffix of this GIFOutput.\n\n Unique ending of the file name so several outputs can be created then identified. Required if there is more\n then 1 video output.\n\n :return: The suffix of this GIFOutput.\n :rtype: str\n \"\"\"\n return self._suffix\n\n @suffix.setter\n def suffix(self, suffix):\n \"\"\"Sets the suffix of this GIFOutput.\n\n Unique ending of the file name so several outputs can be created then identified. Required if there is more\n then 1 video output.\n\n :param suffix: The suffix of this GIFOutput.\n :type: str\n \"\"\"\n\n self._suffix = suffix\n\n @property\n def overlay(self):\n \"\"\"Gets the overlay of this GIFOutput.\n\n Path to overlay image, such as: play button or watermark.\n\n :return: The overlay of this GIFOutput.\n :rtype: str\n \"\"\"\n return self._overlay\n\n @overlay.setter\n def overlay(self, overlay):\n \"\"\"Sets the overlay of this GIFOutput.\n\n Path to overlay image, such as: play button or watermark.\n\n :param overlay: The overlay of this GIFOutput.\n :type: str\n \"\"\"\n\n self._overlay = overlay\n\n @property\n def overlay_alignment(self):\n \"\"\"Gets the overlay_alignment of this GIFOutput.\n\n Alignment for overlay image in case the image doesn't fit the video perfectly. The first item in the array is\n X. The second is Y.\n\n :return: The overlay_alignment of this GIFOutput.\n :rtype: list[str]\n \"\"\"\n return self._overlay_alignment\n\n @overlay_alignment.setter\n def overlay_alignment(self, overlay_alignment):\n \"\"\"Sets the overlay_alignment of this GIFOutput.\n\n Alignment for overlay image in case the image doesn't fit the video perfectly. The first item in the array is\n X. The second is Y.\n\n :param overlay_alignment: The overlay_alignment of this GIFOutput.\n :type: list[str]\n \"\"\"\n allowed_values = [\"left\", \"center\", \"right\", \"top\", \"middle\", \"bottom\"]\n if not set(overlay_alignment).issubset(set(allowed_values)):\n raise ValueError(\n \"Invalid values for `overlay_alignment` [{0}], must be a subset of [{1}]\"\n .format(\", \".join(map(str, set(overlay_alignment) - set(allowed_values))),\n \", \".join(map(str, allowed_values)))\n )\n\n self._overlay_alignment = overlay_alignment\n\n @property\n def overlay_scale(self):\n \"\"\"Gets the overlay_scale of this GIFOutput.\n\n Scale the overlay image if it's not the same size as the video. * Fit: scale the image up or down so it's\n completely visible in the video's resolution. If not the same aspect ratio, transparency is added around the\n image according to the alignment settings. * Fill: scale the image up or down so it completely fills the\n video. If not the same aspect ratio, the image is cropped according to the alignment settings. * None: don't\n resize the overlay image.\n\n :return: The overlay_scale of this GIFOutput.\n :rtype: str\n \"\"\"\n return self._overlay_scale\n\n @overlay_scale.setter\n def overlay_scale(self, overlay_scale):\n \"\"\"Sets the overlay_scale of this GIFOutput.\n\n Scale the overlay image if it's not the same size as the video. * Fit: scale the image up or down so it's\n completely visible in the video's resolution. If not the same aspect ratio, transparency is added around the\n image according to the alignment settings. * Fill: scale the image up or down so it completely fills the\n video. If not the same aspect ratio, the image is cropped according to the alignment settings. * None: don't\n resize the overlay image.\n\n :param overlay_scale: The overlay_scale of this GIFOutput.\n :type: str\n \"\"\"\n allowed_values = [\"fit\", \"fill\", \"none\"]\n if overlay_scale not in allowed_values:\n raise ValueError(\n \"Invalid value for `overlay_scale` ({0}), must be one of {1}\"\n .format(overlay_scale, allowed_values)\n )\n\n self._overlay_scale = overlay_scale\n\n @property\n def label(self):\n \"\"\"Gets the label of this GIFOutput.\n\n This label is another way to identify this specific output. The label is returned in the response,\n but does not appear in the file name.\n\n :return: The label of this GIFOutput.\n :rtype: str\n \"\"\"\n return self._label\n\n @label.setter\n def label(self, label):\n \"\"\"Sets the label of this GIFOutput.\n\n This label is another way to identify this specific output. The label is returned in the response,\n but does not appear in the file name.\n\n :param label: The label of this GIFOutput.\n :type: str\n \"\"\"\n\n self._label = label\n\n def to_dict(self):\n \"\"\"Returns the model properties as a dict\"\"\"\n result = {}\n\n for attr, _ in six.iteritems(self.swagger_types):\n value = getattr(self, attr)\n if isinstance(value, list):\n result[attr] = list(map(\n lambda x: x.to_dict() if hasattr(x, \"to_dict\") else x,\n value\n ))\n elif hasattr(value, \"to_dict\"):\n result[attr] = value.to_dict()\n elif isinstance(value, dict):\n result[attr] = dict(map(\n lambda item: (item[0], item[1].to_dict())\n if hasattr(item[1], \"to_dict\") else item,\n value.items()\n ))\n else:\n result[attr] = value\n\n return result\n\n def to_str(self):\n \"\"\"Returns the string representation of the model\"\"\"\n return pprint.pformat(self.to_dict())\n\n def __repr__(self):\n \"\"\"For `print` and `pprint`\"\"\"\n return self.to_str()\n\n def __eq__(self, other):\n \"\"\"Returns true if both objects are equal\"\"\"\n if not isinstance(other, GIFOutput):\n return False\n\n return self.__dict__ == other.__dict__\n\n def __ne__(self, other):\n \"\"\"Returns true if both objects are not equal\"\"\"\n return not self == other\n",
"step-ids": [
27,
28,
29,
31,
33
]
}
|
[
27,
28,
29,
31,
33
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
for i in range(x, y + 1):
if i > 1:
for j in range(2, i):
if i % j == 0:
break
else:
count += 1
print(count)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
x, y = map(int, input().split())
count = 0
for i in range(x, y + 1):
if i > 1:
for j in range(2, i):
if i % j == 0:
break
else:
count += 1
print(count)
<|reserved_special_token_1|>
'''Given a range of 2 numbers (i.e) L and R count the number of prime numbers in the range (inclusive of L and R ).
Input Size : L <= R <= 100000(complexity O(n) read about Sieve of Eratosthenes)
Sample Testcase :
INPUT
2 5
OUTPUT
3'''
x,y=map(int,input().split())
count=0
for i in range(x,y+1):
if i>1:
for j in range(2,i):
if(i%j==0):
break
else:
count+=1
print(count)
|
flexible
|
{
"blob_id": "06848ec0e327fed1da00446cec6392c6f42130af",
"index": 2158,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor i in range(x, y + 1):\n if i > 1:\n for j in range(2, i):\n if i % j == 0:\n break\n else:\n count += 1\nprint(count)\n",
"step-3": "<mask token>\nx, y = map(int, input().split())\ncount = 0\nfor i in range(x, y + 1):\n if i > 1:\n for j in range(2, i):\n if i % j == 0:\n break\n else:\n count += 1\nprint(count)\n",
"step-4": "'''Given a range of 2 numbers (i.e) L and R count the number of prime numbers in the range (inclusive of L and R ).\nInput Size : L <= R <= 100000(complexity O(n) read about Sieve of Eratosthenes)\nSample Testcase :\nINPUT\n2 5\nOUTPUT\n3'''\n\nx,y=map(int,input().split())\ncount=0\nfor i in range(x,y+1):\n if i>1:\n for j in range(2,i):\n if(i%j==0):\n break\n else:\n count+=1\nprint(count)\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
[x for x in zip(a, b)]
<|reserved_special_token_0|>
[x for x in zip(a, c)]
<|reserved_special_token_0|>
for book, price in zip(books, prices):
print('%s的价格是:%3.1f' % (book, price))
[y for y in reversed(b)]
for book in sorted(books, reverse=True, key=len):
print(book)
<|reserved_special_token_1|>
a = ['a', 'b', 'c']
b = [1, 2, 3]
[x for x in zip(a, b)]
c = ['x', 'y']
[x for x in zip(a, c)]
books = ['简爱', '小王子', '瓦尔登湖']
prices = [56, 78, 66]
for book, price in zip(books, prices):
print('%s的价格是:%3.1f' % (book, price))
[y for y in reversed(b)]
for book in sorted(books, reverse=True, key=len):
print(book)
<|reserved_special_token_1|>
# zip(),可以压缩 N 个列表成为一个zip对象(可迭代对象)。
a =['a', 'b', 'c']
b =[1, 2, 3]
[x for x in zip(a, b)] # [('a', 1), ('b', 2), ('c', 3)]
# 列表长度不等时,以短的为准
c =['x','y']
[x for x in zip(a, c)] # [('a', 'x'), ('b', 'y')]
# 例子
books =['简爱','小王子','瓦尔登湖']
prices =[56, 78, 66]
for book, price in zip(books, prices):
print("%s的价格是:%3.1f"% (book, price))
# reversed() 实现反向遍历,参数可以是各种序列
[y for y in reversed(b)] # [3, 2, 1]
# sorted() 接受一个可迭代对象,返回其升序。可传参数,reverse=True,key=?(排序关键字)
for book in sorted(books, reverse=True, key=len):
print(book)
|
flexible
|
{
"blob_id": "0eab23f4271f724da587707599eb0cbf2144efa1",
"index": 8178,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n[x for x in zip(a, b)]\n<mask token>\n[x for x in zip(a, c)]\n<mask token>\nfor book, price in zip(books, prices):\n print('%s的价格是:%3.1f' % (book, price))\n[y for y in reversed(b)]\nfor book in sorted(books, reverse=True, key=len):\n print(book)\n",
"step-3": "a = ['a', 'b', 'c']\nb = [1, 2, 3]\n[x for x in zip(a, b)]\nc = ['x', 'y']\n[x for x in zip(a, c)]\nbooks = ['简爱', '小王子', '瓦尔登湖']\nprices = [56, 78, 66]\nfor book, price in zip(books, prices):\n print('%s的价格是:%3.1f' % (book, price))\n[y for y in reversed(b)]\nfor book in sorted(books, reverse=True, key=len):\n print(book)\n",
"step-4": "# zip(),可以压缩 N 个列表成为一个zip对象(可迭代对象)。\na =['a', 'b', 'c']\nb =[1, 2, 3]\n[x for x in zip(a, b)] # [('a', 1), ('b', 2), ('c', 3)]\n\n# 列表长度不等时,以短的为准\nc =['x','y']\n[x for x in zip(a, c)] # [('a', 'x'), ('b', 'y')]\n\n# 例子\nbooks =['简爱','小王子','瓦尔登湖']\nprices =[56, 78, 66]\nfor book, price in zip(books, prices):\n print(\"%s的价格是:%3.1f\"% (book, price))\n\n# reversed() 实现反向遍历,参数可以是各种序列\n[y for y in reversed(b)] # [3, 2, 1]\n\n# sorted() 接受一个可迭代对象,返回其升序。可传参数,reverse=True,key=?(排序关键字)\nfor book in sorted(books, reverse=True, key=len):\n print(book)",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
class TemperatureSensor:
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def __init__(self, average_temperature, temperature_variation,
min_temperature, max_temperature):
self.average_temperature = average_temperature
self.temperature_variation = temperature_variation
self.min_temperature = min_temperature
self.max_temperature = max_temperature
self.value = 0.0
<|reserved_special_token_0|>
def noise(self):
self.noise_value = np.random.normal(0, 1)
return self.noise_value
def simple_random(self):
value = self.min_temperature + random() * (self.max_temperature -
self.min_temperature)
return value
def complex_random(self):
value = self.average_temperature * (1 + self.temperature_variation /
100 * (1 * random() - 1))
value = max(value, self.min_temperature)
value = min(value, self.max_temperature)
return value
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class TemperatureSensor:
sensor_type = 'temperature'
unit = 'celsius'
instance_id = '283h62gsj'
def __init__(self, average_temperature, temperature_variation,
min_temperature, max_temperature):
self.average_temperature = average_temperature
self.temperature_variation = temperature_variation
self.min_temperature = min_temperature
self.max_temperature = max_temperature
self.value = 0.0
def sense(self):
self.value = self.complex_random() + self.noise()
return self.value
def noise(self):
self.noise_value = np.random.normal(0, 1)
return self.noise_value
def simple_random(self):
value = self.min_temperature + random() * (self.max_temperature -
self.min_temperature)
return value
def complex_random(self):
value = self.average_temperature * (1 + self.temperature_variation /
100 * (1 * random() - 1))
value = max(value, self.min_temperature)
value = min(value, self.max_temperature)
return value
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class TemperatureSensor:
sensor_type = 'temperature'
unit = 'celsius'
instance_id = '283h62gsj'
def __init__(self, average_temperature, temperature_variation,
min_temperature, max_temperature):
self.average_temperature = average_temperature
self.temperature_variation = temperature_variation
self.min_temperature = min_temperature
self.max_temperature = max_temperature
self.value = 0.0
def sense(self):
self.value = self.complex_random() + self.noise()
return self.value
def noise(self):
self.noise_value = np.random.normal(0, 1)
return self.noise_value
def simple_random(self):
value = self.min_temperature + random() * (self.max_temperature -
self.min_temperature)
return value
def complex_random(self):
value = self.average_temperature * (1 + self.temperature_variation /
100 * (1 * random() - 1))
value = max(value, self.min_temperature)
value = min(value, self.max_temperature)
return value
ts = TemperatureSensor(25, 10, 16, 35)
<|reserved_special_token_1|>
from random import random
import numpy as np
class TemperatureSensor:
sensor_type = 'temperature'
unit = 'celsius'
instance_id = '283h62gsj'
def __init__(self, average_temperature, temperature_variation,
min_temperature, max_temperature):
self.average_temperature = average_temperature
self.temperature_variation = temperature_variation
self.min_temperature = min_temperature
self.max_temperature = max_temperature
self.value = 0.0
def sense(self):
self.value = self.complex_random() + self.noise()
return self.value
def noise(self):
self.noise_value = np.random.normal(0, 1)
return self.noise_value
def simple_random(self):
value = self.min_temperature + random() * (self.max_temperature -
self.min_temperature)
return value
def complex_random(self):
value = self.average_temperature * (1 + self.temperature_variation /
100 * (1 * random() - 1))
value = max(value, self.min_temperature)
value = min(value, self.max_temperature)
return value
ts = TemperatureSensor(25, 10, 16, 35)
<|reserved_special_token_1|>
from random import random
import numpy as np
class TemperatureSensor:
sensor_type = "temperature"
unit="celsius"
instance_id="283h62gsj"
#initialisation
def __init__(self, average_temperature, temperature_variation, min_temperature, max_temperature):
self.average_temperature = average_temperature
self.temperature_variation = temperature_variation
self.min_temperature = min_temperature
self.max_temperature= max_temperature
self.value = 0.0 #initialise current temp value
#sensing
def sense(self):
#self.value = self.value + self.simple_random()
self.value = self.complex_random() + self.noise()
return self.value
#noise
def noise(self):
self.noise_value = np.random.normal(0,1)
return self.noise_value
#helper function for generating values with min temp as its base
def simple_random(self):
value = self.min_temperature + (random() * (self.max_temperature - self.min_temperature)) #so that it is in the range
return value
def complex_random(self):
value = self.average_temperature * (1 + (self.temperature_variation/100) * (1 * random() -1))
value = max(value,self.min_temperature)
value = min(value,self.max_temperature)
return value
#creating instance of sensor
ts = TemperatureSensor(25,10,16,35)
|
flexible
|
{
"blob_id": "bc890f0f40a7e9c916628d491e473b5ecfa9bb9b",
"index": 740,
"step-1": "<mask token>\n\n\nclass TemperatureSensor:\n <mask token>\n <mask token>\n <mask token>\n\n def __init__(self, average_temperature, temperature_variation,\n min_temperature, max_temperature):\n self.average_temperature = average_temperature\n self.temperature_variation = temperature_variation\n self.min_temperature = min_temperature\n self.max_temperature = max_temperature\n self.value = 0.0\n <mask token>\n\n def noise(self):\n self.noise_value = np.random.normal(0, 1)\n return self.noise_value\n\n def simple_random(self):\n value = self.min_temperature + random() * (self.max_temperature -\n self.min_temperature)\n return value\n\n def complex_random(self):\n value = self.average_temperature * (1 + self.temperature_variation /\n 100 * (1 * random() - 1))\n value = max(value, self.min_temperature)\n value = min(value, self.max_temperature)\n return value\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass TemperatureSensor:\n sensor_type = 'temperature'\n unit = 'celsius'\n instance_id = '283h62gsj'\n\n def __init__(self, average_temperature, temperature_variation,\n min_temperature, max_temperature):\n self.average_temperature = average_temperature\n self.temperature_variation = temperature_variation\n self.min_temperature = min_temperature\n self.max_temperature = max_temperature\n self.value = 0.0\n\n def sense(self):\n self.value = self.complex_random() + self.noise()\n return self.value\n\n def noise(self):\n self.noise_value = np.random.normal(0, 1)\n return self.noise_value\n\n def simple_random(self):\n value = self.min_temperature + random() * (self.max_temperature -\n self.min_temperature)\n return value\n\n def complex_random(self):\n value = self.average_temperature * (1 + self.temperature_variation /\n 100 * (1 * random() - 1))\n value = max(value, self.min_temperature)\n value = min(value, self.max_temperature)\n return value\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass TemperatureSensor:\n sensor_type = 'temperature'\n unit = 'celsius'\n instance_id = '283h62gsj'\n\n def __init__(self, average_temperature, temperature_variation,\n min_temperature, max_temperature):\n self.average_temperature = average_temperature\n self.temperature_variation = temperature_variation\n self.min_temperature = min_temperature\n self.max_temperature = max_temperature\n self.value = 0.0\n\n def sense(self):\n self.value = self.complex_random() + self.noise()\n return self.value\n\n def noise(self):\n self.noise_value = np.random.normal(0, 1)\n return self.noise_value\n\n def simple_random(self):\n value = self.min_temperature + random() * (self.max_temperature -\n self.min_temperature)\n return value\n\n def complex_random(self):\n value = self.average_temperature * (1 + self.temperature_variation /\n 100 * (1 * random() - 1))\n value = max(value, self.min_temperature)\n value = min(value, self.max_temperature)\n return value\n\n\nts = TemperatureSensor(25, 10, 16, 35)\n",
"step-4": "from random import random\nimport numpy as np\n\n\nclass TemperatureSensor:\n sensor_type = 'temperature'\n unit = 'celsius'\n instance_id = '283h62gsj'\n\n def __init__(self, average_temperature, temperature_variation,\n min_temperature, max_temperature):\n self.average_temperature = average_temperature\n self.temperature_variation = temperature_variation\n self.min_temperature = min_temperature\n self.max_temperature = max_temperature\n self.value = 0.0\n\n def sense(self):\n self.value = self.complex_random() + self.noise()\n return self.value\n\n def noise(self):\n self.noise_value = np.random.normal(0, 1)\n return self.noise_value\n\n def simple_random(self):\n value = self.min_temperature + random() * (self.max_temperature -\n self.min_temperature)\n return value\n\n def complex_random(self):\n value = self.average_temperature * (1 + self.temperature_variation /\n 100 * (1 * random() - 1))\n value = max(value, self.min_temperature)\n value = min(value, self.max_temperature)\n return value\n\n\nts = TemperatureSensor(25, 10, 16, 35)\n",
"step-5": "from random import random\r\n\r\nimport numpy as np\r\n\r\nclass TemperatureSensor:\r\n sensor_type = \"temperature\"\r\n unit=\"celsius\"\r\n instance_id=\"283h62gsj\"\r\n \r\n #initialisation\r\n \r\n def __init__(self, average_temperature, temperature_variation, min_temperature, max_temperature):\r\n self.average_temperature = average_temperature\r\n self.temperature_variation = temperature_variation\r\n self.min_temperature = min_temperature \r\n self.max_temperature= max_temperature\r\n self.value = 0.0 #initialise current temp value\r\n \r\n #sensing \r\n def sense(self):\r\n #self.value = self.value + self.simple_random()\r\n self.value = self.complex_random() + self.noise()\r\n return self.value\r\n \r\n #noise\r\n def noise(self):\r\n self.noise_value = np.random.normal(0,1)\r\n return self.noise_value\r\n \r\n #helper function for generating values with min temp as its base\r\n def simple_random(self):\r\n value = self.min_temperature + (random() * (self.max_temperature - self.min_temperature)) #so that it is in the range\r\n return value\r\n \r\n def complex_random(self):\r\n value = self.average_temperature * (1 + (self.temperature_variation/100) * (1 * random() -1))\r\n value = max(value,self.min_temperature)\r\n value = min(value,self.max_temperature)\r\n return value\r\n \r\n#creating instance of sensor\r\nts = TemperatureSensor(25,10,16,35)\r\n\r\n",
"step-ids": [
5,
7,
8,
9,
10
]
}
|
[
5,
7,
8,
9,
10
] |
from django.apps import AppConfig
class FilebasedUniqueConfig(AppConfig):
name = 'papermerge.filebased_unique'
label = 'filebased_unique'
|
normal
|
{
"blob_id": "2d17229afe154937132c1e4f8c138896da34ab61",
"index": 1430,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass FilebasedUniqueConfig(AppConfig):\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass FilebasedUniqueConfig(AppConfig):\n name = 'papermerge.filebased_unique'\n label = 'filebased_unique'\n",
"step-4": "from django.apps import AppConfig\n\n\nclass FilebasedUniqueConfig(AppConfig):\n name = 'papermerge.filebased_unique'\n label = 'filebased_unique'\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
print('Older: ' + cache['year'])
<|reserved_special_token_0|>
print('Newer: ' + cache['year'])
print(cache)
<|reserved_special_token_0|>
print(cache)
<|reserved_special_token_0|>
print(cache)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
cache = cachetools.LRUCache(maxsize=3)
cache['PyCon'] = 'India'
cache['year'] = '2017'
print('Older: ' + cache['year'])
cache['year'] = '2018'
print('Newer: ' + cache['year'])
print(cache)
cache['sdate'] = '05/09/2018'
print(cache)
cache['edate'] = '09/09/2018'
print(cache)
<|reserved_special_token_1|>
import cachetools
cache = cachetools.LRUCache(maxsize=3)
cache['PyCon'] = 'India'
cache['year'] = '2017'
print('Older: ' + cache['year'])
cache['year'] = '2018'
print('Newer: ' + cache['year'])
print(cache)
cache['sdate'] = '05/09/2018'
print(cache)
cache['edate'] = '09/09/2018'
print(cache)
<|reserved_special_token_1|>
import cachetools
cache = cachetools.LRUCache(maxsize = 3)
cache['PyCon'] = 'India'
cache['year'] = '2017'
print("Older: " + cache['year'])
cache['year'] = '2018'
print("Newer: " + cache['year'])
print(cache)
cache['sdate'] = '05/09/2018'
print(cache)
cache['edate'] = '09/09/2018'
print(cache)
|
flexible
|
{
"blob_id": "aebc918d6a1d1d2473f74d77b8a915ac25548e3a",
"index": 443,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint('Older: ' + cache['year'])\n<mask token>\nprint('Newer: ' + cache['year'])\nprint(cache)\n<mask token>\nprint(cache)\n<mask token>\nprint(cache)\n",
"step-3": "<mask token>\ncache = cachetools.LRUCache(maxsize=3)\ncache['PyCon'] = 'India'\ncache['year'] = '2017'\nprint('Older: ' + cache['year'])\ncache['year'] = '2018'\nprint('Newer: ' + cache['year'])\nprint(cache)\ncache['sdate'] = '05/09/2018'\nprint(cache)\ncache['edate'] = '09/09/2018'\nprint(cache)\n",
"step-4": "import cachetools\ncache = cachetools.LRUCache(maxsize=3)\ncache['PyCon'] = 'India'\ncache['year'] = '2017'\nprint('Older: ' + cache['year'])\ncache['year'] = '2018'\nprint('Newer: ' + cache['year'])\nprint(cache)\ncache['sdate'] = '05/09/2018'\nprint(cache)\ncache['edate'] = '09/09/2018'\nprint(cache)\n",
"step-5": "import cachetools\n\ncache = cachetools.LRUCache(maxsize = 3)\ncache['PyCon'] = 'India'\ncache['year'] = '2017'\nprint(\"Older: \" + cache['year'])\n\ncache['year'] = '2018'\nprint(\"Newer: \" + cache['year'])\nprint(cache)\n\ncache['sdate'] = '05/09/2018'\nprint(cache)\n\ncache['edate'] = '09/09/2018'\nprint(cache)",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
# -*- coding: utf8 -*-
'''
dump data from mysql/hive to load into mysql
'''
from datetime import datetime,timedelta
from optparse import OptionParser
import argparse
import ConfigParser
import sys
import os
import time
import commonutil
def getConf(cfgfile):
config = ConfigParser.ConfigParser()
with open(cfgfile, 'r') as cfgfile:
config.readfp(cfgfile)
return config
def readFile(fileName):
infile = open(fileName, "r")
content = infile.read()
infile.close()
return content
def truncateFile(fileName):
fileTemp = open(fileName, "w")
fileTemp.truncate()
fileTemp.close()
def getConnBySecName(dbConf,secName):
descSec = ''
secs = dbConf.sections()
for sec in secs:
if sec == secName:
descSec = sec
conn = getConnOne(dbConf,descSec)
return conn
def getConns(dbConf):
secs = dbConf.sections()
conns = {}
for sec in secs:
conn = getConnOne(dbConf, sec)
if conn is not None:
conns[sec]=conn
return conns
def getConnOne(dbConf, sec):
host = dbConf.get(sec,"host")
port = dbConf.getint(sec,"port")
user = dbConf.get(sec,"user")
password = dbConf.get(sec,"password")
database = dbConf.get(sec,"database")
conn = commonutil.getConn(host, port, user, password, database)
if conn is None:
time.sleep(5)
conn = commonutil.getConn(host, port, user, password, database)
if conn is None:
commonutil.fatal("fail to get connection : sec=[{0}]".format(sec))
return conn
|
normal
|
{
"blob_id": "df984939c109662bebbd1556c12223fce8f643e6",
"index": 1773,
"step-1": "<mask token>\n\n\ndef truncateFile(fileName):\n fileTemp = open(fileName, 'w')\n fileTemp.truncate()\n fileTemp.close()\n\n\ndef getConnBySecName(dbConf, secName):\n descSec = ''\n secs = dbConf.sections()\n for sec in secs:\n if sec == secName:\n descSec = sec\n conn = getConnOne(dbConf, descSec)\n return conn\n\n\n<mask token>\n\n\ndef getConnOne(dbConf, sec):\n host = dbConf.get(sec, 'host')\n port = dbConf.getint(sec, 'port')\n user = dbConf.get(sec, 'user')\n password = dbConf.get(sec, 'password')\n database = dbConf.get(sec, 'database')\n conn = commonutil.getConn(host, port, user, password, database)\n if conn is None:\n time.sleep(5)\n conn = commonutil.getConn(host, port, user, password, database)\n if conn is None:\n commonutil.fatal('fail to get connection : sec=[{0}]'.format(sec))\n return conn\n",
"step-2": "<mask token>\n\n\ndef readFile(fileName):\n infile = open(fileName, 'r')\n content = infile.read()\n infile.close()\n return content\n\n\ndef truncateFile(fileName):\n fileTemp = open(fileName, 'w')\n fileTemp.truncate()\n fileTemp.close()\n\n\ndef getConnBySecName(dbConf, secName):\n descSec = ''\n secs = dbConf.sections()\n for sec in secs:\n if sec == secName:\n descSec = sec\n conn = getConnOne(dbConf, descSec)\n return conn\n\n\n<mask token>\n\n\ndef getConnOne(dbConf, sec):\n host = dbConf.get(sec, 'host')\n port = dbConf.getint(sec, 'port')\n user = dbConf.get(sec, 'user')\n password = dbConf.get(sec, 'password')\n database = dbConf.get(sec, 'database')\n conn = commonutil.getConn(host, port, user, password, database)\n if conn is None:\n time.sleep(5)\n conn = commonutil.getConn(host, port, user, password, database)\n if conn is None:\n commonutil.fatal('fail to get connection : sec=[{0}]'.format(sec))\n return conn\n",
"step-3": "<mask token>\n\n\ndef getConf(cfgfile):\n config = ConfigParser.ConfigParser()\n with open(cfgfile, 'r') as cfgfile:\n config.readfp(cfgfile)\n return config\n\n\ndef readFile(fileName):\n infile = open(fileName, 'r')\n content = infile.read()\n infile.close()\n return content\n\n\ndef truncateFile(fileName):\n fileTemp = open(fileName, 'w')\n fileTemp.truncate()\n fileTemp.close()\n\n\ndef getConnBySecName(dbConf, secName):\n descSec = ''\n secs = dbConf.sections()\n for sec in secs:\n if sec == secName:\n descSec = sec\n conn = getConnOne(dbConf, descSec)\n return conn\n\n\ndef getConns(dbConf):\n secs = dbConf.sections()\n conns = {}\n for sec in secs:\n conn = getConnOne(dbConf, sec)\n if conn is not None:\n conns[sec] = conn\n return conns\n\n\ndef getConnOne(dbConf, sec):\n host = dbConf.get(sec, 'host')\n port = dbConf.getint(sec, 'port')\n user = dbConf.get(sec, 'user')\n password = dbConf.get(sec, 'password')\n database = dbConf.get(sec, 'database')\n conn = commonutil.getConn(host, port, user, password, database)\n if conn is None:\n time.sleep(5)\n conn = commonutil.getConn(host, port, user, password, database)\n if conn is None:\n commonutil.fatal('fail to get connection : sec=[{0}]'.format(sec))\n return conn\n",
"step-4": "<mask token>\nfrom datetime import datetime, timedelta\nfrom optparse import OptionParser\nimport argparse\nimport ConfigParser\nimport sys\nimport os\nimport time\nimport commonutil\n\n\ndef getConf(cfgfile):\n config = ConfigParser.ConfigParser()\n with open(cfgfile, 'r') as cfgfile:\n config.readfp(cfgfile)\n return config\n\n\ndef readFile(fileName):\n infile = open(fileName, 'r')\n content = infile.read()\n infile.close()\n return content\n\n\ndef truncateFile(fileName):\n fileTemp = open(fileName, 'w')\n fileTemp.truncate()\n fileTemp.close()\n\n\ndef getConnBySecName(dbConf, secName):\n descSec = ''\n secs = dbConf.sections()\n for sec in secs:\n if sec == secName:\n descSec = sec\n conn = getConnOne(dbConf, descSec)\n return conn\n\n\ndef getConns(dbConf):\n secs = dbConf.sections()\n conns = {}\n for sec in secs:\n conn = getConnOne(dbConf, sec)\n if conn is not None:\n conns[sec] = conn\n return conns\n\n\ndef getConnOne(dbConf, sec):\n host = dbConf.get(sec, 'host')\n port = dbConf.getint(sec, 'port')\n user = dbConf.get(sec, 'user')\n password = dbConf.get(sec, 'password')\n database = dbConf.get(sec, 'database')\n conn = commonutil.getConn(host, port, user, password, database)\n if conn is None:\n time.sleep(5)\n conn = commonutil.getConn(host, port, user, password, database)\n if conn is None:\n commonutil.fatal('fail to get connection : sec=[{0}]'.format(sec))\n return conn\n",
"step-5": "# -*- coding: utf8 -*-\r\n'''\r\ndump data from mysql/hive to load into mysql\r\n'''\r\nfrom datetime import datetime,timedelta\r\nfrom optparse import OptionParser\r\nimport argparse\r\nimport ConfigParser\r\nimport sys\r\nimport os\r\nimport time\r\nimport commonutil\r\n\r\ndef getConf(cfgfile):\r\n config = ConfigParser.ConfigParser()\r\n with open(cfgfile, 'r') as cfgfile:\r\n config.readfp(cfgfile)\r\n return config\r\n\r\ndef readFile(fileName):\r\n infile = open(fileName, \"r\")\r\n content = infile.read()\r\n infile.close()\r\n return content\r\n\r\ndef truncateFile(fileName):\r\n fileTemp = open(fileName, \"w\")\r\n fileTemp.truncate()\r\n fileTemp.close()\r\n\r\ndef getConnBySecName(dbConf,secName):\r\n descSec = ''\r\n secs = dbConf.sections()\r\n for sec in secs:\r\n if sec == secName:\r\n descSec = sec\r\n conn = getConnOne(dbConf,descSec)\r\n return conn\r\n \r\ndef getConns(dbConf):\r\n secs = dbConf.sections()\r\n conns = {}\r\n for sec in secs:\r\n conn = getConnOne(dbConf, sec)\r\n if conn is not None:\r\n conns[sec]=conn\r\n return conns\r\n\r\ndef getConnOne(dbConf, sec):\r\n host = dbConf.get(sec,\"host\")\r\n port = dbConf.getint(sec,\"port\")\r\n user = dbConf.get(sec,\"user\")\r\n password = dbConf.get(sec,\"password\")\r\n database = dbConf.get(sec,\"database\")\r\n conn = commonutil.getConn(host, port, user, password, database)\r\n if conn is None:\r\n time.sleep(5)\r\n conn = commonutil.getConn(host, port, user, password, database)\r\n if conn is None:\r\n commonutil.fatal(\"fail to get connection : sec=[{0}]\".format(sec))\r\n return conn\r\n \r\n",
"step-ids": [
3,
4,
6,
7,
8
]
}
|
[
3,
4,
6,
7,
8
] |
"""Support for binary sensor using I2C abelectronicsiopi chip."""
from custom_components.abelectronicsiopi.IOPi import IOPi
import voluptuous as vol
from homeassistant.components.binary_sensor import PLATFORM_SCHEMA, BinarySensorEntity
from homeassistant.const import DEVICE_DEFAULT_NAME
import homeassistant.helpers.config_validation as cv
CONF_INVERT_LOGIC = "invert_logic"
CONF_I2C_ADDRESS = "i2c_address"
CONF_PINS = "pins"
CONF_PULL_MODE = "pull_mode"
DEFAULT_INVERT_LOGIC = False
DEFAULT_I2C_ADDRESS = 0x20
DEFAULT_PULL_MODE = True
_SENSORS_SCHEMA = vol.Schema({cv.positive_int: cv.string})
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_PINS): _SENSORS_SCHEMA,
vol.Optional(CONF_INVERT_LOGIC, default=DEFAULT_INVERT_LOGIC): cv.boolean,
vol.Optional(CONF_PULL_MODE, default=DEFAULT_PULL_MODE): cv.boolean,
vol.Optional(CONF_I2C_ADDRESS, default=DEFAULT_I2C_ADDRESS): vol.Coerce(int),
}
)
def setup_platform(hass, config, add_devices, discovery_info=None):
"""Set up the abelectronicsiopi binary sensors."""
pull_mode = config[CONF_PULL_MODE]
invert_logic = config[CONF_INVERT_LOGIC]
iopi = IOPi(config.get(CONF_I2C_ADDRESS), True)
binary_sensors = []
pins = config[CONF_PINS]
for pin_num, pin_name in pins.items():
binary_sensors.append(abelectronicsiopiBinarySensor(pin_name, pin_num, pull_mode, invert_logic, iopi))
add_devices(binary_sensors, True)
class abelectronicsiopiBinarySensor(BinarySensorEntity):
"""Represent a binary sensor that uses abelectronicsiopi."""
iobus = None
targetpin = None
_state = False
def __init__(self, pinname, pin, pull_mode, invert_logic, bus):
"""Initialize the pin."""
self._state = None
self._name = pinname
self.targetpin = pin
self.iobus = bus
if pull_mode == True:
self.iobus.set_pin_pullup(self.targetpin, 1)
else:
self.iobus.set_pin_pullup(self.targetpin, 0)
self.iobus.set_pin_direction(self.targetpin, 1)
if invert_logic == True:
self.iobus.invert_pin(self.targetpin, 1)
else:
self.iobus.invert_pin(self.targetpin, 0)
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def is_on(self):
"""Return the state of the entity."""
self._state = self.iobus.read_pin(self.targetpin)
return self._state
def update(self):
"""Update the GPIO state."""
self._state = self.iobus.read_pin(self.targetpin)
|
normal
|
{
"blob_id": "73d056d4ab0d268841156b21dfc2c54b5fb2f5f1",
"index": 5218,
"step-1": "<mask token>\n\n\nclass abelectronicsiopiBinarySensor(BinarySensorEntity):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def __init__(self, pinname, pin, pull_mode, invert_logic, bus):\n \"\"\"Initialize the pin.\"\"\"\n self._state = None\n self._name = pinname\n self.targetpin = pin\n self.iobus = bus\n if pull_mode == True:\n self.iobus.set_pin_pullup(self.targetpin, 1)\n else:\n self.iobus.set_pin_pullup(self.targetpin, 0)\n self.iobus.set_pin_direction(self.targetpin, 1)\n if invert_logic == True:\n self.iobus.invert_pin(self.targetpin, 1)\n else:\n self.iobus.invert_pin(self.targetpin, 0)\n\n @property\n def name(self):\n \"\"\"Return the name of the sensor.\"\"\"\n return self._name\n\n @property\n def is_on(self):\n \"\"\"Return the state of the entity.\"\"\"\n self._state = self.iobus.read_pin(self.targetpin)\n return self._state\n\n def update(self):\n \"\"\"Update the GPIO state.\"\"\"\n self._state = self.iobus.read_pin(self.targetpin)\n",
"step-2": "<mask token>\n\n\nclass abelectronicsiopiBinarySensor(BinarySensorEntity):\n \"\"\"Represent a binary sensor that uses abelectronicsiopi.\"\"\"\n iobus = None\n targetpin = None\n _state = False\n\n def __init__(self, pinname, pin, pull_mode, invert_logic, bus):\n \"\"\"Initialize the pin.\"\"\"\n self._state = None\n self._name = pinname\n self.targetpin = pin\n self.iobus = bus\n if pull_mode == True:\n self.iobus.set_pin_pullup(self.targetpin, 1)\n else:\n self.iobus.set_pin_pullup(self.targetpin, 0)\n self.iobus.set_pin_direction(self.targetpin, 1)\n if invert_logic == True:\n self.iobus.invert_pin(self.targetpin, 1)\n else:\n self.iobus.invert_pin(self.targetpin, 0)\n\n @property\n def name(self):\n \"\"\"Return the name of the sensor.\"\"\"\n return self._name\n\n @property\n def is_on(self):\n \"\"\"Return the state of the entity.\"\"\"\n self._state = self.iobus.read_pin(self.targetpin)\n return self._state\n\n def update(self):\n \"\"\"Update the GPIO state.\"\"\"\n self._state = self.iobus.read_pin(self.targetpin)\n",
"step-3": "<mask token>\n\n\ndef setup_platform(hass, config, add_devices, discovery_info=None):\n \"\"\"Set up the abelectronicsiopi binary sensors.\"\"\"\n pull_mode = config[CONF_PULL_MODE]\n invert_logic = config[CONF_INVERT_LOGIC]\n iopi = IOPi(config.get(CONF_I2C_ADDRESS), True)\n binary_sensors = []\n pins = config[CONF_PINS]\n for pin_num, pin_name in pins.items():\n binary_sensors.append(abelectronicsiopiBinarySensor(pin_name,\n pin_num, pull_mode, invert_logic, iopi))\n add_devices(binary_sensors, True)\n\n\nclass abelectronicsiopiBinarySensor(BinarySensorEntity):\n \"\"\"Represent a binary sensor that uses abelectronicsiopi.\"\"\"\n iobus = None\n targetpin = None\n _state = False\n\n def __init__(self, pinname, pin, pull_mode, invert_logic, bus):\n \"\"\"Initialize the pin.\"\"\"\n self._state = None\n self._name = pinname\n self.targetpin = pin\n self.iobus = bus\n if pull_mode == True:\n self.iobus.set_pin_pullup(self.targetpin, 1)\n else:\n self.iobus.set_pin_pullup(self.targetpin, 0)\n self.iobus.set_pin_direction(self.targetpin, 1)\n if invert_logic == True:\n self.iobus.invert_pin(self.targetpin, 1)\n else:\n self.iobus.invert_pin(self.targetpin, 0)\n\n @property\n def name(self):\n \"\"\"Return the name of the sensor.\"\"\"\n return self._name\n\n @property\n def is_on(self):\n \"\"\"Return the state of the entity.\"\"\"\n self._state = self.iobus.read_pin(self.targetpin)\n return self._state\n\n def update(self):\n \"\"\"Update the GPIO state.\"\"\"\n self._state = self.iobus.read_pin(self.targetpin)\n",
"step-4": "<mask token>\nfrom custom_components.abelectronicsiopi.IOPi import IOPi\nimport voluptuous as vol\nfrom homeassistant.components.binary_sensor import PLATFORM_SCHEMA, BinarySensorEntity\nfrom homeassistant.const import DEVICE_DEFAULT_NAME\nimport homeassistant.helpers.config_validation as cv\nCONF_INVERT_LOGIC = 'invert_logic'\nCONF_I2C_ADDRESS = 'i2c_address'\nCONF_PINS = 'pins'\nCONF_PULL_MODE = 'pull_mode'\nDEFAULT_INVERT_LOGIC = False\nDEFAULT_I2C_ADDRESS = 32\nDEFAULT_PULL_MODE = True\n_SENSORS_SCHEMA = vol.Schema({cv.positive_int: cv.string})\nPLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({vol.Required(CONF_PINS):\n _SENSORS_SCHEMA, vol.Optional(CONF_INVERT_LOGIC, default=\n DEFAULT_INVERT_LOGIC): cv.boolean, vol.Optional(CONF_PULL_MODE, default\n =DEFAULT_PULL_MODE): cv.boolean, vol.Optional(CONF_I2C_ADDRESS, default\n =DEFAULT_I2C_ADDRESS): vol.Coerce(int)})\n\n\ndef setup_platform(hass, config, add_devices, discovery_info=None):\n \"\"\"Set up the abelectronicsiopi binary sensors.\"\"\"\n pull_mode = config[CONF_PULL_MODE]\n invert_logic = config[CONF_INVERT_LOGIC]\n iopi = IOPi(config.get(CONF_I2C_ADDRESS), True)\n binary_sensors = []\n pins = config[CONF_PINS]\n for pin_num, pin_name in pins.items():\n binary_sensors.append(abelectronicsiopiBinarySensor(pin_name,\n pin_num, pull_mode, invert_logic, iopi))\n add_devices(binary_sensors, True)\n\n\nclass abelectronicsiopiBinarySensor(BinarySensorEntity):\n \"\"\"Represent a binary sensor that uses abelectronicsiopi.\"\"\"\n iobus = None\n targetpin = None\n _state = False\n\n def __init__(self, pinname, pin, pull_mode, invert_logic, bus):\n \"\"\"Initialize the pin.\"\"\"\n self._state = None\n self._name = pinname\n self.targetpin = pin\n self.iobus = bus\n if pull_mode == True:\n self.iobus.set_pin_pullup(self.targetpin, 1)\n else:\n self.iobus.set_pin_pullup(self.targetpin, 0)\n self.iobus.set_pin_direction(self.targetpin, 1)\n if invert_logic == True:\n self.iobus.invert_pin(self.targetpin, 1)\n else:\n self.iobus.invert_pin(self.targetpin, 0)\n\n @property\n def name(self):\n \"\"\"Return the name of the sensor.\"\"\"\n return self._name\n\n @property\n def is_on(self):\n \"\"\"Return the state of the entity.\"\"\"\n self._state = self.iobus.read_pin(self.targetpin)\n return self._state\n\n def update(self):\n \"\"\"Update the GPIO state.\"\"\"\n self._state = self.iobus.read_pin(self.targetpin)\n",
"step-5": "\"\"\"Support for binary sensor using I2C abelectronicsiopi chip.\"\"\"\r\nfrom custom_components.abelectronicsiopi.IOPi import IOPi\r\nimport voluptuous as vol\r\n\r\nfrom homeassistant.components.binary_sensor import PLATFORM_SCHEMA, BinarySensorEntity\r\nfrom homeassistant.const import DEVICE_DEFAULT_NAME\r\nimport homeassistant.helpers.config_validation as cv\r\n\r\nCONF_INVERT_LOGIC = \"invert_logic\"\r\nCONF_I2C_ADDRESS = \"i2c_address\"\r\nCONF_PINS = \"pins\"\r\nCONF_PULL_MODE = \"pull_mode\"\r\n\r\nDEFAULT_INVERT_LOGIC = False\r\nDEFAULT_I2C_ADDRESS = 0x20\r\nDEFAULT_PULL_MODE = True\r\n\r\n_SENSORS_SCHEMA = vol.Schema({cv.positive_int: cv.string})\r\n\r\nPLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(\r\n {\r\n vol.Required(CONF_PINS): _SENSORS_SCHEMA,\r\n vol.Optional(CONF_INVERT_LOGIC, default=DEFAULT_INVERT_LOGIC): cv.boolean,\r\n vol.Optional(CONF_PULL_MODE, default=DEFAULT_PULL_MODE): cv.boolean,\r\n vol.Optional(CONF_I2C_ADDRESS, default=DEFAULT_I2C_ADDRESS): vol.Coerce(int),\r\n }\r\n)\r\n\r\n\r\ndef setup_platform(hass, config, add_devices, discovery_info=None):\r\n \"\"\"Set up the abelectronicsiopi binary sensors.\"\"\"\r\n pull_mode = config[CONF_PULL_MODE]\r\n invert_logic = config[CONF_INVERT_LOGIC]\r\n\r\n iopi = IOPi(config.get(CONF_I2C_ADDRESS), True)\r\n\r\n binary_sensors = []\r\n pins = config[CONF_PINS]\r\n\r\n for pin_num, pin_name in pins.items():\r\n binary_sensors.append(abelectronicsiopiBinarySensor(pin_name, pin_num, pull_mode, invert_logic, iopi))\r\n add_devices(binary_sensors, True)\r\n\r\n\r\nclass abelectronicsiopiBinarySensor(BinarySensorEntity):\r\n \"\"\"Represent a binary sensor that uses abelectronicsiopi.\"\"\"\r\n\r\n iobus = None\r\n targetpin = None\r\n _state = False\r\n\r\n def __init__(self, pinname, pin, pull_mode, invert_logic, bus):\r\n \"\"\"Initialize the pin.\"\"\"\r\n self._state = None\r\n self._name = pinname\r\n self.targetpin = pin\r\n self.iobus = bus\r\n\r\n if pull_mode == True:\r\n self.iobus.set_pin_pullup(self.targetpin, 1)\r\n else:\r\n self.iobus.set_pin_pullup(self.targetpin, 0)\r\n\r\n self.iobus.set_pin_direction(self.targetpin, 1)\r\n\r\n if invert_logic == True:\r\n self.iobus.invert_pin(self.targetpin, 1)\r\n else:\r\n self.iobus.invert_pin(self.targetpin, 0) \r\n\r\n @property\r\n def name(self):\r\n \"\"\"Return the name of the sensor.\"\"\"\r\n return self._name\r\n\r\n @property\r\n def is_on(self):\r\n \"\"\"Return the state of the entity.\"\"\"\r\n self._state = self.iobus.read_pin(self.targetpin)\r\n return self._state\r\n\r\n def update(self):\r\n \"\"\"Update the GPIO state.\"\"\"\r\n self._state = self.iobus.read_pin(self.targetpin)\r\n",
"step-ids": [
5,
7,
8,
10,
11
]
}
|
[
5,
7,
8,
10,
11
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
print('O aluno escolhido é {}.'.format(random.choice(names)))
<|reserved_special_token_1|>
<|reserved_special_token_0|>
a = input('Nome do primeiro aluno: ')
b = input('Nome do segundo aluno: ')
c = input('Nome do terceiro aluno: ')
d = input('Nome do quarto aluno: ')
names = [a, b, c, d]
print('O aluno escolhido é {}.'.format(random.choice(names)))
<|reserved_special_token_1|>
import random
a = input('Nome do primeiro aluno: ')
b = input('Nome do segundo aluno: ')
c = input('Nome do terceiro aluno: ')
d = input('Nome do quarto aluno: ')
names = [a, b, c, d]
print('O aluno escolhido é {}.'.format(random.choice(names)))
|
flexible
|
{
"blob_id": "bac3cee5e6d129fcf345d92000cb2a257c303dd5",
"index": 9805,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint('O aluno escolhido é {}.'.format(random.choice(names)))\n",
"step-3": "<mask token>\na = input('Nome do primeiro aluno: ')\nb = input('Nome do segundo aluno: ')\nc = input('Nome do terceiro aluno: ')\nd = input('Nome do quarto aluno: ')\nnames = [a, b, c, d]\nprint('O aluno escolhido é {}.'.format(random.choice(names)))\n",
"step-4": "import random\na = input('Nome do primeiro aluno: ')\nb = input('Nome do segundo aluno: ')\nc = input('Nome do terceiro aluno: ')\nd = input('Nome do quarto aluno: ')\nnames = [a, b, c, d]\nprint('O aluno escolhido é {}.'.format(random.choice(names)))\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
MUTATIONS = [ChangeField('ReviewRequest', 'depends_on', initial=None, null=
False), ChangeField('ReviewRequestDraft', 'depends_on', initial=None,
null=False)]
<|reserved_special_token_1|>
from django_evolution.mutations import ChangeField
MUTATIONS = [ChangeField('ReviewRequest', 'depends_on', initial=None, null=
False), ChangeField('ReviewRequestDraft', 'depends_on', initial=None,
null=False)]
<|reserved_special_token_1|>
from django_evolution.mutations import ChangeField
MUTATIONS = [
ChangeField('ReviewRequest', 'depends_on', initial=None, null=False),
ChangeField('ReviewRequestDraft', 'depends_on', initial=None, null=False),
]
|
flexible
|
{
"blob_id": "286953e381d03c0817d57f9ee4e15f2a0ce808a9",
"index": 9776,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nMUTATIONS = [ChangeField('ReviewRequest', 'depends_on', initial=None, null=\n False), ChangeField('ReviewRequestDraft', 'depends_on', initial=None,\n null=False)]\n",
"step-3": "from django_evolution.mutations import ChangeField\nMUTATIONS = [ChangeField('ReviewRequest', 'depends_on', initial=None, null=\n False), ChangeField('ReviewRequestDraft', 'depends_on', initial=None,\n null=False)]\n",
"step-4": "from django_evolution.mutations import ChangeField\n\n\nMUTATIONS = [\n ChangeField('ReviewRequest', 'depends_on', initial=None, null=False),\n ChangeField('ReviewRequestDraft', 'depends_on', initial=None, null=False),\n]\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
"""
Copyright (c) Facebook, Inc. and its affiliates.
"""
# fmt: off
"""
Every template contains an ordered list of TemplateObjects.
TemplateObject is defined in template_objects.py
GetMemory templates are written for filters and have an answer_type
They represent the action of fetching from the memory using the filters.
Examples:
[Human, QueryBotCurrentAction],
- human: what are you doing
- human: what are you up to
[Human, QueryBot, MoveTarget],
- human: where you going
- human: where are you heading
"""
from template_objects import *
ANSWER_WITH_CORRECTION = [
## what is this + the thing at location ##
[[Human, What, Is, BlockObjectThis],
[HumanReplace, The, AbstractDescription, BlockObjectLocation]],
[[Human, What, Is, BlockObjectThis, AbstractDescription],
[HumanReplace, The, AbstractDescription, BlockObjectLocation]],
[[Human, What, Is, BlockObjectThat],
[HumanReplace, The, AbstractDescription, BlockObjectLocation]],
[[Human, What, Is, BlockObjectThat, AbstractDescription],
[HumanReplace, The, AbstractDescription, BlockObjectLocation]],
## what size is X + the thing at location ##
[[Human, AskSize, BlockObjectThis],
[HumanReplace, The, AbstractDescription, BlockObjectLocation]],
[[Human, AskSize, BlockObjectThis, AbstractDescription],
[HumanReplace, The, AbstractDescription, BlockObjectLocation]],
[[Human, AskSize, BlockObjectThis, ConcreteDescription],
[HumanReplace, The, AbstractDescription, BlockObjectLocation]],
[[Human, AskSize, BlockObjectThat],
[HumanReplace, The, AbstractDescription, BlockObjectLocation]],
[[Human, AskSize, BlockObjectThat, AbstractDescription],
[HumanReplace, The, AbstractDescription, BlockObjectLocation]],
[[Human, AskSize, BlockObjectThat, ConcreteDescription],
[HumanReplace, The, AbstractDescription, BlockObjectLocation]],
## what color is X + the thing at location ##
[[Human, AskColour, BlockObjectThis],
[HumanReplace, The, AbstractDescription, BlockObjectLocation]],
[[Human, AskColour, BlockObjectThis, AbstractDescription],
[HumanReplace, The, AbstractDescription, BlockObjectLocation]],
[[Human, AskColour, BlockObjectThis, ConcreteDescription],
[HumanReplace, The, AbstractDescription, BlockObjectLocation]],
[[Human, AskColour, BlockObjectThat],
[HumanReplace, The, AbstractDescription, BlockObjectLocation]],
[[Human, AskColour, BlockObjectThat, AbstractDescription],
[HumanReplace, The, AbstractDescription, BlockObjectLocation]],
[[Human, AskColour, BlockObjectThat, ConcreteDescription],
[HumanReplace, The, AbstractDescription, BlockObjectLocation]],
# Is X Y ##
[[Human, AskIs, BlockObjectThis, Size],
[HumanReplace, The, AbstractDescription, BlockObjectLocation]],
[[Human, AskIs, BlockObjectThis, AbstractDescription, Size],
[HumanReplace, The, AbstractDescription, BlockObjectLocation]],
[[Human, AskIs, BlockObjectThis, ConcreteDescription, Size],
[HumanReplace, The, AbstractDescription, BlockObjectLocation]],
[[Human, AskIs, BlockObjectThat, Size],
[HumanReplace, The, AbstractDescription, BlockObjectLocation]],
[[Human, AskIs, BlockObjectThat, AbstractDescription, Size],
[HumanReplace, The, AbstractDescription, BlockObjectLocation]],
[[Human, AskIs, BlockObjectThat, ConcreteDescription, Size],
[HumanReplace, The, AbstractDescription, BlockObjectLocation]],
[[Human, AskIs, The, AbstractDescription, BlockObjectLocation, Size],
[HumanReplace, The, AbstractDescription, BlockObjectLocation]],
[[Human, AskIs, The, ConcreteDescription, BlockObjectLocation, Size],
[HumanReplace, The, AbstractDescription, BlockObjectLocation]],
[[Human, AskIs, BlockObjectThis, Colour],
[HumanReplace, The, AbstractDescription, BlockObjectLocation]],
[[Human, AskIs, BlockObjectThis, AbstractDescription, Colour],
[HumanReplace, The, AbstractDescription, BlockObjectLocation]],
[[Human, AskIs, BlockObjectThis, ConcreteDescription, Colour],
[HumanReplace, The, AbstractDescription, BlockObjectLocation]],
[[Human, AskIs, BlockObjectThat, Colour],
[HumanReplace, The, AbstractDescription, BlockObjectLocation]],
[[Human, AskIs, BlockObjectThat, AbstractDescription, Colour],
[HumanReplace, The, AbstractDescription, BlockObjectLocation]],
[[Human, AskIs, BlockObjectThat, ConcreteDescription, Colour],
[HumanReplace, The, AbstractDescription, BlockObjectLocation]],
[[Human, AskIs, The, AbstractDescription, BlockObjectLocation, Colour],
[HumanReplace, The, AbstractDescription, BlockObjectLocation]],
[[Human, AskIs, The, ConcreteDescription, BlockObjectLocation, Colour],
[HumanReplace, The, AbstractDescription, BlockObjectLocation]],
## Is X a Y ##
[[Human, AskIs, BlockObjectThis, ConcreteDescription],
[HumanReplace, The, AbstractDescription, BlockObjectLocation]],
[[Human, AskIs, BlockObjectThis, AbstractDescription, ConcreteDescription],
[HumanReplace, The, AbstractDescription, BlockObjectLocation]],
[[Human, AskIs, BlockObjectThat, ConcreteDescription],
[HumanReplace, The, AbstractDescription, BlockObjectLocation]],
[[Human, AskIs, BlockObjectThat, AbstractDescription, ConcreteDescription],
[HumanReplace, The, AbstractDescription, BlockObjectLocation]],
]
ANSWER_TEMPLATES = [
# 1
## What is X ##
[Human, What, Is, BlockObjectThis],
[Human, What, Is, BlockObjectThis, AbstractDescription],
[Human, What, Is, BlockObjectThat],
[Human, What, Is, BlockObjectThat, AbstractDescription],
# 2
## What is at X ##
[Human, What, Is, BlockObjectLocation],
[Human, What, Is, The, AbstractDescription, BlockObjectLocation],
## What do you see at X ##
[Human, WhatSee, BlockObjectLocation],
# 3
# What size is X ##
[Human, AskSize, BlockObjectThis],
[Human, AskSize, BlockObjectThis, AbstractDescription],
[Human, AskSize, BlockObjectThis, ConcreteDescription],
[Human, AskSize, BlockObjectThat],
[Human, AskSize, BlockObjectThat, AbstractDescription],
[Human, AskSize, BlockObjectThat, ConcreteDescription],
# 4
## what size is X at Y ##
[Human, AskSize, The, AbstractDescription, BlockObjectLocation],
[Human, AskSize, The, ConcreteDescription, BlockObjectLocation],
# 5
# What colour is X ##
[Human, AskColour, BlockObjectThis],
[Human, AskColour, BlockObjectThis, AbstractDescription],
[Human, AskColour, BlockObjectThis, ConcreteDescription],
[Human, AskColour, BlockObjectThat],
[Human, AskColour, BlockObjectThat, AbstractDescription],
[Human, AskColour, BlockObjectThat, ConcreteDescription],
# 6
## what colour is X at Y ##
[Human, AskColour, The, AbstractDescription, BlockObjectLocation],
[Human, AskColour, The, ConcreteDescription, BlockObjectLocation],
# 7
## Is X Y ##
[Human, AskIs, BlockObjectThis, Size],
[Human, AskIs, BlockObjectThis, AbstractDescription, Size],
[Human, AskIs, BlockObjectThis, ConcreteDescription, Size],
[Human, AskIs, BlockObjectThat, Size],
[Human, AskIs, BlockObjectThat, AbstractDescription, Size],
[Human, AskIs, BlockObjectThat, ConcreteDescription, Size],
[Human, AskIs, The, AbstractDescription, BlockObjectLocation, Size],
[Human, AskIs, The, ConcreteDescription, BlockObjectLocation, Size],
[Human, AskIs, BlockObjectThis, Colour],
[Human, AskIs, BlockObjectThis, AbstractDescription, Colour],
[Human, AskIs, BlockObjectThis, ConcreteDescription, Colour],
[Human, AskIs, BlockObjectThat, Colour],
[Human, AskIs, BlockObjectThat, AbstractDescription, Colour],
[Human, AskIs, BlockObjectThat, ConcreteDescription, Colour],
[Human, AskIs, The, AbstractDescription, BlockObjectLocation, Colour],
[Human, AskIs, The, ConcreteDescription, BlockObjectLocation, Colour],
# 8
## Is X a Y ##
[Human, AskIs, BlockObjectThis, ConcreteDescription],
[Human, AskIs, BlockObjectThis, AbstractDescription, ConcreteDescription],
[Human, AskIs, BlockObjectThat, ConcreteDescription],
[Human, AskIs, BlockObjectThat, AbstractDescription, ConcreteDescription],
# 9
## IS X at Y Z ##
[Human, AskIs, The, AbstractDescription, BlockObjectLocation, ConcreteDescription],
]
GET_MEMORY_TEMPLATES = [
## What are you Doing (Action name) ##
[Human, QueryBotCurrentAction],
## What are you Building (Action reference object name) ##
[Human, QueryBot, ActionReferenceObjectName],
## Where are you heading (Move target) ##
[Human, QueryBot, MoveTarget],
## Where are you (Bot location) ##
[Human, QueryBot, CurrentLocation],
] + ANSWER_TEMPLATES
|
normal
|
{
"blob_id": "ceb714e949a72f621aec8b8728fbd1201e22afd1",
"index": 8705,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nANSWER_WITH_CORRECTION = [[[Human, What, Is, BlockObjectThis], [\n HumanReplace, The, AbstractDescription, BlockObjectLocation]], [[Human,\n What, Is, BlockObjectThis, AbstractDescription], [HumanReplace, The,\n AbstractDescription, BlockObjectLocation]], [[Human, What, Is,\n BlockObjectThat], [HumanReplace, The, AbstractDescription,\n BlockObjectLocation]], [[Human, What, Is, BlockObjectThat,\n AbstractDescription], [HumanReplace, The, AbstractDescription,\n BlockObjectLocation]], [[Human, AskSize, BlockObjectThis], [\n HumanReplace, The, AbstractDescription, BlockObjectLocation]], [[Human,\n AskSize, BlockObjectThis, AbstractDescription], [HumanReplace, The,\n AbstractDescription, BlockObjectLocation]], [[Human, AskSize,\n BlockObjectThis, ConcreteDescription], [HumanReplace, The,\n AbstractDescription, BlockObjectLocation]], [[Human, AskSize,\n BlockObjectThat], [HumanReplace, The, AbstractDescription,\n BlockObjectLocation]], [[Human, AskSize, BlockObjectThat,\n AbstractDescription], [HumanReplace, The, AbstractDescription,\n BlockObjectLocation]], [[Human, AskSize, BlockObjectThat,\n ConcreteDescription], [HumanReplace, The, AbstractDescription,\n BlockObjectLocation]], [[Human, AskColour, BlockObjectThis], [\n HumanReplace, The, AbstractDescription, BlockObjectLocation]], [[Human,\n AskColour, BlockObjectThis, AbstractDescription], [HumanReplace, The,\n AbstractDescription, BlockObjectLocation]], [[Human, AskColour,\n BlockObjectThis, ConcreteDescription], [HumanReplace, The,\n AbstractDescription, BlockObjectLocation]], [[Human, AskColour,\n BlockObjectThat], [HumanReplace, The, AbstractDescription,\n BlockObjectLocation]], [[Human, AskColour, BlockObjectThat,\n AbstractDescription], [HumanReplace, The, AbstractDescription,\n BlockObjectLocation]], [[Human, AskColour, BlockObjectThat,\n ConcreteDescription], [HumanReplace, The, AbstractDescription,\n BlockObjectLocation]], [[Human, AskIs, BlockObjectThis, Size], [\n HumanReplace, The, AbstractDescription, BlockObjectLocation]], [[Human,\n AskIs, BlockObjectThis, AbstractDescription, Size], [HumanReplace, The,\n AbstractDescription, BlockObjectLocation]], [[Human, AskIs,\n BlockObjectThis, ConcreteDescription, Size], [HumanReplace, The,\n AbstractDescription, BlockObjectLocation]], [[Human, AskIs,\n BlockObjectThat, Size], [HumanReplace, The, AbstractDescription,\n BlockObjectLocation]], [[Human, AskIs, BlockObjectThat,\n AbstractDescription, Size], [HumanReplace, The, AbstractDescription,\n BlockObjectLocation]], [[Human, AskIs, BlockObjectThat,\n ConcreteDescription, Size], [HumanReplace, The, AbstractDescription,\n BlockObjectLocation]], [[Human, AskIs, The, AbstractDescription,\n BlockObjectLocation, Size], [HumanReplace, The, AbstractDescription,\n BlockObjectLocation]], [[Human, AskIs, The, ConcreteDescription,\n BlockObjectLocation, Size], [HumanReplace, The, AbstractDescription,\n BlockObjectLocation]], [[Human, AskIs, BlockObjectThis, Colour], [\n HumanReplace, The, AbstractDescription, BlockObjectLocation]], [[Human,\n AskIs, BlockObjectThis, AbstractDescription, Colour], [HumanReplace,\n The, AbstractDescription, BlockObjectLocation]], [[Human, AskIs,\n BlockObjectThis, ConcreteDescription, Colour], [HumanReplace, The,\n AbstractDescription, BlockObjectLocation]], [[Human, AskIs,\n BlockObjectThat, Colour], [HumanReplace, The, AbstractDescription,\n BlockObjectLocation]], [[Human, AskIs, BlockObjectThat,\n AbstractDescription, Colour], [HumanReplace, The, AbstractDescription,\n BlockObjectLocation]], [[Human, AskIs, BlockObjectThat,\n ConcreteDescription, Colour], [HumanReplace, The, AbstractDescription,\n BlockObjectLocation]], [[Human, AskIs, The, AbstractDescription,\n BlockObjectLocation, Colour], [HumanReplace, The, AbstractDescription,\n BlockObjectLocation]], [[Human, AskIs, The, ConcreteDescription,\n BlockObjectLocation, Colour], [HumanReplace, The, AbstractDescription,\n BlockObjectLocation]], [[Human, AskIs, BlockObjectThis,\n ConcreteDescription], [HumanReplace, The, AbstractDescription,\n BlockObjectLocation]], [[Human, AskIs, BlockObjectThis,\n AbstractDescription, ConcreteDescription], [HumanReplace, The,\n AbstractDescription, BlockObjectLocation]], [[Human, AskIs,\n BlockObjectThat, ConcreteDescription], [HumanReplace, The,\n AbstractDescription, BlockObjectLocation]], [[Human, AskIs,\n BlockObjectThat, AbstractDescription, ConcreteDescription], [\n HumanReplace, The, AbstractDescription, BlockObjectLocation]]]\nANSWER_TEMPLATES = [[Human, What, Is, BlockObjectThis], [Human, What, Is,\n BlockObjectThis, AbstractDescription], [Human, What, Is,\n BlockObjectThat], [Human, What, Is, BlockObjectThat,\n AbstractDescription], [Human, What, Is, BlockObjectLocation], [Human,\n What, Is, The, AbstractDescription, BlockObjectLocation], [Human,\n WhatSee, BlockObjectLocation], [Human, AskSize, BlockObjectThis], [\n Human, AskSize, BlockObjectThis, AbstractDescription], [Human, AskSize,\n BlockObjectThis, ConcreteDescription], [Human, AskSize, BlockObjectThat\n ], [Human, AskSize, BlockObjectThat, AbstractDescription], [Human,\n AskSize, BlockObjectThat, ConcreteDescription], [Human, AskSize, The,\n AbstractDescription, BlockObjectLocation], [Human, AskSize, The,\n ConcreteDescription, BlockObjectLocation], [Human, AskColour,\n BlockObjectThis], [Human, AskColour, BlockObjectThis,\n AbstractDescription], [Human, AskColour, BlockObjectThis,\n ConcreteDescription], [Human, AskColour, BlockObjectThat], [Human,\n AskColour, BlockObjectThat, AbstractDescription], [Human, AskColour,\n BlockObjectThat, ConcreteDescription], [Human, AskColour, The,\n AbstractDescription, BlockObjectLocation], [Human, AskColour, The,\n ConcreteDescription, BlockObjectLocation], [Human, AskIs,\n BlockObjectThis, Size], [Human, AskIs, BlockObjectThis,\n AbstractDescription, Size], [Human, AskIs, BlockObjectThis,\n ConcreteDescription, Size], [Human, AskIs, BlockObjectThat, Size], [\n Human, AskIs, BlockObjectThat, AbstractDescription, Size], [Human,\n AskIs, BlockObjectThat, ConcreteDescription, Size], [Human, AskIs, The,\n AbstractDescription, BlockObjectLocation, Size], [Human, AskIs, The,\n ConcreteDescription, BlockObjectLocation, Size], [Human, AskIs,\n BlockObjectThis, Colour], [Human, AskIs, BlockObjectThis,\n AbstractDescription, Colour], [Human, AskIs, BlockObjectThis,\n ConcreteDescription, Colour], [Human, AskIs, BlockObjectThat, Colour],\n [Human, AskIs, BlockObjectThat, AbstractDescription, Colour], [Human,\n AskIs, BlockObjectThat, ConcreteDescription, Colour], [Human, AskIs,\n The, AbstractDescription, BlockObjectLocation, Colour], [Human, AskIs,\n The, ConcreteDescription, BlockObjectLocation, Colour], [Human, AskIs,\n BlockObjectThis, ConcreteDescription], [Human, AskIs, BlockObjectThis,\n AbstractDescription, ConcreteDescription], [Human, AskIs,\n BlockObjectThat, ConcreteDescription], [Human, AskIs, BlockObjectThat,\n AbstractDescription, ConcreteDescription], [Human, AskIs, The,\n AbstractDescription, BlockObjectLocation, ConcreteDescription]]\nGET_MEMORY_TEMPLATES = [[Human, QueryBotCurrentAction], [Human, QueryBot,\n ActionReferenceObjectName], [Human, QueryBot, MoveTarget], [Human,\n QueryBot, CurrentLocation]] + ANSWER_TEMPLATES\n",
"step-3": "<mask token>\nfrom template_objects import *\nANSWER_WITH_CORRECTION = [[[Human, What, Is, BlockObjectThis], [\n HumanReplace, The, AbstractDescription, BlockObjectLocation]], [[Human,\n What, Is, BlockObjectThis, AbstractDescription], [HumanReplace, The,\n AbstractDescription, BlockObjectLocation]], [[Human, What, Is,\n BlockObjectThat], [HumanReplace, The, AbstractDescription,\n BlockObjectLocation]], [[Human, What, Is, BlockObjectThat,\n AbstractDescription], [HumanReplace, The, AbstractDescription,\n BlockObjectLocation]], [[Human, AskSize, BlockObjectThis], [\n HumanReplace, The, AbstractDescription, BlockObjectLocation]], [[Human,\n AskSize, BlockObjectThis, AbstractDescription], [HumanReplace, The,\n AbstractDescription, BlockObjectLocation]], [[Human, AskSize,\n BlockObjectThis, ConcreteDescription], [HumanReplace, The,\n AbstractDescription, BlockObjectLocation]], [[Human, AskSize,\n BlockObjectThat], [HumanReplace, The, AbstractDescription,\n BlockObjectLocation]], [[Human, AskSize, BlockObjectThat,\n AbstractDescription], [HumanReplace, The, AbstractDescription,\n BlockObjectLocation]], [[Human, AskSize, BlockObjectThat,\n ConcreteDescription], [HumanReplace, The, AbstractDescription,\n BlockObjectLocation]], [[Human, AskColour, BlockObjectThis], [\n HumanReplace, The, AbstractDescription, BlockObjectLocation]], [[Human,\n AskColour, BlockObjectThis, AbstractDescription], [HumanReplace, The,\n AbstractDescription, BlockObjectLocation]], [[Human, AskColour,\n BlockObjectThis, ConcreteDescription], [HumanReplace, The,\n AbstractDescription, BlockObjectLocation]], [[Human, AskColour,\n BlockObjectThat], [HumanReplace, The, AbstractDescription,\n BlockObjectLocation]], [[Human, AskColour, BlockObjectThat,\n AbstractDescription], [HumanReplace, The, AbstractDescription,\n BlockObjectLocation]], [[Human, AskColour, BlockObjectThat,\n ConcreteDescription], [HumanReplace, The, AbstractDescription,\n BlockObjectLocation]], [[Human, AskIs, BlockObjectThis, Size], [\n HumanReplace, The, AbstractDescription, BlockObjectLocation]], [[Human,\n AskIs, BlockObjectThis, AbstractDescription, Size], [HumanReplace, The,\n AbstractDescription, BlockObjectLocation]], [[Human, AskIs,\n BlockObjectThis, ConcreteDescription, Size], [HumanReplace, The,\n AbstractDescription, BlockObjectLocation]], [[Human, AskIs,\n BlockObjectThat, Size], [HumanReplace, The, AbstractDescription,\n BlockObjectLocation]], [[Human, AskIs, BlockObjectThat,\n AbstractDescription, Size], [HumanReplace, The, AbstractDescription,\n BlockObjectLocation]], [[Human, AskIs, BlockObjectThat,\n ConcreteDescription, Size], [HumanReplace, The, AbstractDescription,\n BlockObjectLocation]], [[Human, AskIs, The, AbstractDescription,\n BlockObjectLocation, Size], [HumanReplace, The, AbstractDescription,\n BlockObjectLocation]], [[Human, AskIs, The, ConcreteDescription,\n BlockObjectLocation, Size], [HumanReplace, The, AbstractDescription,\n BlockObjectLocation]], [[Human, AskIs, BlockObjectThis, Colour], [\n HumanReplace, The, AbstractDescription, BlockObjectLocation]], [[Human,\n AskIs, BlockObjectThis, AbstractDescription, Colour], [HumanReplace,\n The, AbstractDescription, BlockObjectLocation]], [[Human, AskIs,\n BlockObjectThis, ConcreteDescription, Colour], [HumanReplace, The,\n AbstractDescription, BlockObjectLocation]], [[Human, AskIs,\n BlockObjectThat, Colour], [HumanReplace, The, AbstractDescription,\n BlockObjectLocation]], [[Human, AskIs, BlockObjectThat,\n AbstractDescription, Colour], [HumanReplace, The, AbstractDescription,\n BlockObjectLocation]], [[Human, AskIs, BlockObjectThat,\n ConcreteDescription, Colour], [HumanReplace, The, AbstractDescription,\n BlockObjectLocation]], [[Human, AskIs, The, AbstractDescription,\n BlockObjectLocation, Colour], [HumanReplace, The, AbstractDescription,\n BlockObjectLocation]], [[Human, AskIs, The, ConcreteDescription,\n BlockObjectLocation, Colour], [HumanReplace, The, AbstractDescription,\n BlockObjectLocation]], [[Human, AskIs, BlockObjectThis,\n ConcreteDescription], [HumanReplace, The, AbstractDescription,\n BlockObjectLocation]], [[Human, AskIs, BlockObjectThis,\n AbstractDescription, ConcreteDescription], [HumanReplace, The,\n AbstractDescription, BlockObjectLocation]], [[Human, AskIs,\n BlockObjectThat, ConcreteDescription], [HumanReplace, The,\n AbstractDescription, BlockObjectLocation]], [[Human, AskIs,\n BlockObjectThat, AbstractDescription, ConcreteDescription], [\n HumanReplace, The, AbstractDescription, BlockObjectLocation]]]\nANSWER_TEMPLATES = [[Human, What, Is, BlockObjectThis], [Human, What, Is,\n BlockObjectThis, AbstractDescription], [Human, What, Is,\n BlockObjectThat], [Human, What, Is, BlockObjectThat,\n AbstractDescription], [Human, What, Is, BlockObjectLocation], [Human,\n What, Is, The, AbstractDescription, BlockObjectLocation], [Human,\n WhatSee, BlockObjectLocation], [Human, AskSize, BlockObjectThis], [\n Human, AskSize, BlockObjectThis, AbstractDescription], [Human, AskSize,\n BlockObjectThis, ConcreteDescription], [Human, AskSize, BlockObjectThat\n ], [Human, AskSize, BlockObjectThat, AbstractDescription], [Human,\n AskSize, BlockObjectThat, ConcreteDescription], [Human, AskSize, The,\n AbstractDescription, BlockObjectLocation], [Human, AskSize, The,\n ConcreteDescription, BlockObjectLocation], [Human, AskColour,\n BlockObjectThis], [Human, AskColour, BlockObjectThis,\n AbstractDescription], [Human, AskColour, BlockObjectThis,\n ConcreteDescription], [Human, AskColour, BlockObjectThat], [Human,\n AskColour, BlockObjectThat, AbstractDescription], [Human, AskColour,\n BlockObjectThat, ConcreteDescription], [Human, AskColour, The,\n AbstractDescription, BlockObjectLocation], [Human, AskColour, The,\n ConcreteDescription, BlockObjectLocation], [Human, AskIs,\n BlockObjectThis, Size], [Human, AskIs, BlockObjectThis,\n AbstractDescription, Size], [Human, AskIs, BlockObjectThis,\n ConcreteDescription, Size], [Human, AskIs, BlockObjectThat, Size], [\n Human, AskIs, BlockObjectThat, AbstractDescription, Size], [Human,\n AskIs, BlockObjectThat, ConcreteDescription, Size], [Human, AskIs, The,\n AbstractDescription, BlockObjectLocation, Size], [Human, AskIs, The,\n ConcreteDescription, BlockObjectLocation, Size], [Human, AskIs,\n BlockObjectThis, Colour], [Human, AskIs, BlockObjectThis,\n AbstractDescription, Colour], [Human, AskIs, BlockObjectThis,\n ConcreteDescription, Colour], [Human, AskIs, BlockObjectThat, Colour],\n [Human, AskIs, BlockObjectThat, AbstractDescription, Colour], [Human,\n AskIs, BlockObjectThat, ConcreteDescription, Colour], [Human, AskIs,\n The, AbstractDescription, BlockObjectLocation, Colour], [Human, AskIs,\n The, ConcreteDescription, BlockObjectLocation, Colour], [Human, AskIs,\n BlockObjectThis, ConcreteDescription], [Human, AskIs, BlockObjectThis,\n AbstractDescription, ConcreteDescription], [Human, AskIs,\n BlockObjectThat, ConcreteDescription], [Human, AskIs, BlockObjectThat,\n AbstractDescription, ConcreteDescription], [Human, AskIs, The,\n AbstractDescription, BlockObjectLocation, ConcreteDescription]]\nGET_MEMORY_TEMPLATES = [[Human, QueryBotCurrentAction], [Human, QueryBot,\n ActionReferenceObjectName], [Human, QueryBot, MoveTarget], [Human,\n QueryBot, CurrentLocation]] + ANSWER_TEMPLATES\n",
"step-4": "\"\"\"\nCopyright (c) Facebook, Inc. and its affiliates.\n\"\"\"\n\n# fmt: off\n\"\"\"\nEvery template contains an ordered list of TemplateObjects.\nTemplateObject is defined in template_objects.py\n\nGetMemory templates are written for filters and have an answer_type\nThey represent the action of fetching from the memory using the filters.\n\nExamples:\n\n[Human, QueryBotCurrentAction],\n- human: what are you doing\n- human: what are you up to\n\n[Human, QueryBot, MoveTarget],\n- human: where you going\n- human: where are you heading\n\"\"\"\nfrom template_objects import *\n\nANSWER_WITH_CORRECTION = [\n ## what is this + the thing at location ##\n [[Human, What, Is, BlockObjectThis],\n [HumanReplace, The, AbstractDescription, BlockObjectLocation]],\n [[Human, What, Is, BlockObjectThis, AbstractDescription],\n [HumanReplace, The, AbstractDescription, BlockObjectLocation]],\n [[Human, What, Is, BlockObjectThat],\n [HumanReplace, The, AbstractDescription, BlockObjectLocation]],\n [[Human, What, Is, BlockObjectThat, AbstractDescription],\n [HumanReplace, The, AbstractDescription, BlockObjectLocation]],\n\n ## what size is X + the thing at location ##\n [[Human, AskSize, BlockObjectThis],\n [HumanReplace, The, AbstractDescription, BlockObjectLocation]],\n [[Human, AskSize, BlockObjectThis, AbstractDescription],\n [HumanReplace, The, AbstractDescription, BlockObjectLocation]],\n [[Human, AskSize, BlockObjectThis, ConcreteDescription],\n [HumanReplace, The, AbstractDescription, BlockObjectLocation]],\n [[Human, AskSize, BlockObjectThat],\n [HumanReplace, The, AbstractDescription, BlockObjectLocation]],\n [[Human, AskSize, BlockObjectThat, AbstractDescription],\n [HumanReplace, The, AbstractDescription, BlockObjectLocation]],\n [[Human, AskSize, BlockObjectThat, ConcreteDescription],\n [HumanReplace, The, AbstractDescription, BlockObjectLocation]],\n\n ## what color is X + the thing at location ##\n [[Human, AskColour, BlockObjectThis],\n [HumanReplace, The, AbstractDescription, BlockObjectLocation]],\n [[Human, AskColour, BlockObjectThis, AbstractDescription],\n [HumanReplace, The, AbstractDescription, BlockObjectLocation]],\n [[Human, AskColour, BlockObjectThis, ConcreteDescription],\n [HumanReplace, The, AbstractDescription, BlockObjectLocation]],\n [[Human, AskColour, BlockObjectThat],\n [HumanReplace, The, AbstractDescription, BlockObjectLocation]],\n [[Human, AskColour, BlockObjectThat, AbstractDescription],\n [HumanReplace, The, AbstractDescription, BlockObjectLocation]],\n [[Human, AskColour, BlockObjectThat, ConcreteDescription],\n [HumanReplace, The, AbstractDescription, BlockObjectLocation]],\n\n # Is X Y ##\n [[Human, AskIs, BlockObjectThis, Size],\n [HumanReplace, The, AbstractDescription, BlockObjectLocation]],\n [[Human, AskIs, BlockObjectThis, AbstractDescription, Size],\n [HumanReplace, The, AbstractDescription, BlockObjectLocation]],\n [[Human, AskIs, BlockObjectThis, ConcreteDescription, Size],\n [HumanReplace, The, AbstractDescription, BlockObjectLocation]],\n [[Human, AskIs, BlockObjectThat, Size],\n [HumanReplace, The, AbstractDescription, BlockObjectLocation]],\n [[Human, AskIs, BlockObjectThat, AbstractDescription, Size],\n [HumanReplace, The, AbstractDescription, BlockObjectLocation]],\n [[Human, AskIs, BlockObjectThat, ConcreteDescription, Size],\n [HumanReplace, The, AbstractDescription, BlockObjectLocation]],\n [[Human, AskIs, The, AbstractDescription, BlockObjectLocation, Size],\n [HumanReplace, The, AbstractDescription, BlockObjectLocation]],\n [[Human, AskIs, The, ConcreteDescription, BlockObjectLocation, Size],\n [HumanReplace, The, AbstractDescription, BlockObjectLocation]],\n\n [[Human, AskIs, BlockObjectThis, Colour],\n [HumanReplace, The, AbstractDescription, BlockObjectLocation]],\n [[Human, AskIs, BlockObjectThis, AbstractDescription, Colour],\n [HumanReplace, The, AbstractDescription, BlockObjectLocation]],\n [[Human, AskIs, BlockObjectThis, ConcreteDescription, Colour],\n [HumanReplace, The, AbstractDescription, BlockObjectLocation]],\n [[Human, AskIs, BlockObjectThat, Colour],\n [HumanReplace, The, AbstractDescription, BlockObjectLocation]],\n [[Human, AskIs, BlockObjectThat, AbstractDescription, Colour],\n [HumanReplace, The, AbstractDescription, BlockObjectLocation]],\n [[Human, AskIs, BlockObjectThat, ConcreteDescription, Colour],\n [HumanReplace, The, AbstractDescription, BlockObjectLocation]],\n [[Human, AskIs, The, AbstractDescription, BlockObjectLocation, Colour],\n [HumanReplace, The, AbstractDescription, BlockObjectLocation]],\n [[Human, AskIs, The, ConcreteDescription, BlockObjectLocation, Colour],\n [HumanReplace, The, AbstractDescription, BlockObjectLocation]],\n\n ## Is X a Y ##\n [[Human, AskIs, BlockObjectThis, ConcreteDescription],\n [HumanReplace, The, AbstractDescription, BlockObjectLocation]],\n [[Human, AskIs, BlockObjectThis, AbstractDescription, ConcreteDescription],\n [HumanReplace, The, AbstractDescription, BlockObjectLocation]],\n [[Human, AskIs, BlockObjectThat, ConcreteDescription],\n [HumanReplace, The, AbstractDescription, BlockObjectLocation]],\n [[Human, AskIs, BlockObjectThat, AbstractDescription, ConcreteDescription],\n [HumanReplace, The, AbstractDescription, BlockObjectLocation]],\n\n]\n\nANSWER_TEMPLATES = [\n # 1\n ## What is X ##\n [Human, What, Is, BlockObjectThis],\n [Human, What, Is, BlockObjectThis, AbstractDescription],\n [Human, What, Is, BlockObjectThat],\n [Human, What, Is, BlockObjectThat, AbstractDescription],\n\n # 2\n ## What is at X ##\n [Human, What, Is, BlockObjectLocation],\n [Human, What, Is, The, AbstractDescription, BlockObjectLocation],\n\n ## What do you see at X ##\n [Human, WhatSee, BlockObjectLocation],\n\n # 3\n # What size is X ##\n [Human, AskSize, BlockObjectThis],\n [Human, AskSize, BlockObjectThis, AbstractDescription],\n [Human, AskSize, BlockObjectThis, ConcreteDescription],\n [Human, AskSize, BlockObjectThat],\n [Human, AskSize, BlockObjectThat, AbstractDescription],\n [Human, AskSize, BlockObjectThat, ConcreteDescription],\n\n # 4\n ## what size is X at Y ##\n [Human, AskSize, The, AbstractDescription, BlockObjectLocation],\n [Human, AskSize, The, ConcreteDescription, BlockObjectLocation],\n\n # 5\n # What colour is X ##\n [Human, AskColour, BlockObjectThis],\n [Human, AskColour, BlockObjectThis, AbstractDescription],\n [Human, AskColour, BlockObjectThis, ConcreteDescription],\n [Human, AskColour, BlockObjectThat],\n [Human, AskColour, BlockObjectThat, AbstractDescription],\n [Human, AskColour, BlockObjectThat, ConcreteDescription],\n\n # 6\n ## what colour is X at Y ##\n [Human, AskColour, The, AbstractDescription, BlockObjectLocation],\n [Human, AskColour, The, ConcreteDescription, BlockObjectLocation],\n\n # 7\n ## Is X Y ##\n [Human, AskIs, BlockObjectThis, Size],\n [Human, AskIs, BlockObjectThis, AbstractDescription, Size],\n [Human, AskIs, BlockObjectThis, ConcreteDescription, Size],\n [Human, AskIs, BlockObjectThat, Size],\n [Human, AskIs, BlockObjectThat, AbstractDescription, Size],\n [Human, AskIs, BlockObjectThat, ConcreteDescription, Size],\n\n [Human, AskIs, The, AbstractDescription, BlockObjectLocation, Size],\n [Human, AskIs, The, ConcreteDescription, BlockObjectLocation, Size],\n\n [Human, AskIs, BlockObjectThis, Colour],\n [Human, AskIs, BlockObjectThis, AbstractDescription, Colour],\n [Human, AskIs, BlockObjectThis, ConcreteDescription, Colour],\n [Human, AskIs, BlockObjectThat, Colour],\n [Human, AskIs, BlockObjectThat, AbstractDescription, Colour],\n [Human, AskIs, BlockObjectThat, ConcreteDescription, Colour],\n\n [Human, AskIs, The, AbstractDescription, BlockObjectLocation, Colour],\n [Human, AskIs, The, ConcreteDescription, BlockObjectLocation, Colour],\n\n # 8\n ## Is X a Y ##\n [Human, AskIs, BlockObjectThis, ConcreteDescription],\n [Human, AskIs, BlockObjectThis, AbstractDescription, ConcreteDescription],\n [Human, AskIs, BlockObjectThat, ConcreteDescription],\n [Human, AskIs, BlockObjectThat, AbstractDescription, ConcreteDescription],\n\n # 9\n ## IS X at Y Z ##\n [Human, AskIs, The, AbstractDescription, BlockObjectLocation, ConcreteDescription],\n\n] \n\nGET_MEMORY_TEMPLATES = [\n ## What are you Doing (Action name) ##\n [Human, QueryBotCurrentAction],\n\n ## What are you Building (Action reference object name) ##\n [Human, QueryBot, ActionReferenceObjectName],\n\n ## Where are you heading (Move target) ##\n [Human, QueryBot, MoveTarget],\n\n ## Where are you (Bot location) ##\n [Human, QueryBot, CurrentLocation],\n] + ANSWER_TEMPLATES\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
savecaptcha.write(b64cap[0])
savecaptcha.close()
<|reserved_special_token_0|>
f.close()
<|reserved_special_token_0|>
fincapfile.close()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
gInfo = {'obj': g2.go(capUrl), 'Headers-C-T': g2.response.headers[
'Content-Type'], 'url': g2.response.url, 'urlDetails': g2.response.
url_details()}
capHtml = capHtml = gInfo['obj'].unicode_body(ignore_errors=True,
fix_special_entities=True)
b64cap = re.findall('base64,(.*?)\\\\" id=', capHtml, re.DOTALL)
savecaptcha = open(file='/home/ubuntu/captcha.png', mode='w')
savecaptcha.write(b64cap[0])
savecaptcha.close()
f = open(file='/home/ubuntu/captcha.png', mode='rb')
r = f.read()
i = base64.b64decode(r)
f.close()
fincapfile = open(file='/home/ubuntu/workspace/ffcap.jpeg', mode='wb')
capsave = fincapfile.write(i)
fincapfile.close()
<|reserved_special_token_1|>
import fs
gInfo = {'obj': g2.go(capUrl), 'Headers-C-T': g2.response.headers[
'Content-Type'], 'url': g2.response.url, 'urlDetails': g2.response.
url_details()}
capHtml = capHtml = gInfo['obj'].unicode_body(ignore_errors=True,
fix_special_entities=True)
b64cap = re.findall('base64,(.*?)\\\\" id=', capHtml, re.DOTALL)
savecaptcha = open(file='/home/ubuntu/captcha.png', mode='w')
savecaptcha.write(b64cap[0])
savecaptcha.close()
f = open(file='/home/ubuntu/captcha.png', mode='rb')
r = f.read()
i = base64.b64decode(r)
f.close()
fincapfile = open(file='/home/ubuntu/workspace/ffcap.jpeg', mode='wb')
capsave = fincapfile.write(i)
fincapfile.close()
<|reserved_special_token_1|>
import fs
gInfo = {
'obj': g2.go(capUrl),
'Headers-C-T': g2.response.headers['Content-Type'],
'url': g2.response.url,
'urlDetails': g2.response.url_details()
}
capHtml = capHtml = gInfo['obj'].unicode_body(ignore_errors=True, fix_special_entities=True)
b64cap = re.findall(r'base64,(.*?)\\" id=', capHtml, re.DOTALL)
savecaptcha = open(file="/home/ubuntu/captcha.png", mode="w")
savecaptcha.write(b64cap[0])
savecaptcha.close()
f = open(file="/home/ubuntu/captcha.png", mode="rb")
r = f.read()
i = base64.b64decode(r)
f.close()
fincapfile = open(file="/home/ubuntu/workspace/ffcap.jpeg", mode="wb")
capsave = fincapfile.write(i)
fincapfile.close()
|
flexible
|
{
"blob_id": "2a5f69fbb26bd1f94c10ff0da687391bf5bd3c23",
"index": 6054,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nsavecaptcha.write(b64cap[0])\nsavecaptcha.close()\n<mask token>\nf.close()\n<mask token>\nfincapfile.close()\n",
"step-3": "<mask token>\ngInfo = {'obj': g2.go(capUrl), 'Headers-C-T': g2.response.headers[\n 'Content-Type'], 'url': g2.response.url, 'urlDetails': g2.response.\n url_details()}\ncapHtml = capHtml = gInfo['obj'].unicode_body(ignore_errors=True,\n fix_special_entities=True)\nb64cap = re.findall('base64,(.*?)\\\\\\\\\" id=', capHtml, re.DOTALL)\nsavecaptcha = open(file='/home/ubuntu/captcha.png', mode='w')\nsavecaptcha.write(b64cap[0])\nsavecaptcha.close()\nf = open(file='/home/ubuntu/captcha.png', mode='rb')\nr = f.read()\ni = base64.b64decode(r)\nf.close()\nfincapfile = open(file='/home/ubuntu/workspace/ffcap.jpeg', mode='wb')\ncapsave = fincapfile.write(i)\nfincapfile.close()\n",
"step-4": "import fs\ngInfo = {'obj': g2.go(capUrl), 'Headers-C-T': g2.response.headers[\n 'Content-Type'], 'url': g2.response.url, 'urlDetails': g2.response.\n url_details()}\ncapHtml = capHtml = gInfo['obj'].unicode_body(ignore_errors=True,\n fix_special_entities=True)\nb64cap = re.findall('base64,(.*?)\\\\\\\\\" id=', capHtml, re.DOTALL)\nsavecaptcha = open(file='/home/ubuntu/captcha.png', mode='w')\nsavecaptcha.write(b64cap[0])\nsavecaptcha.close()\nf = open(file='/home/ubuntu/captcha.png', mode='rb')\nr = f.read()\ni = base64.b64decode(r)\nf.close()\nfincapfile = open(file='/home/ubuntu/workspace/ffcap.jpeg', mode='wb')\ncapsave = fincapfile.write(i)\nfincapfile.close()\n",
"step-5": "import fs\n\n\ngInfo = {\n\n'obj': g2.go(capUrl),\n\n'Headers-C-T': g2.response.headers['Content-Type'],\n\n'url': g2.response.url,\n\n'urlDetails': g2.response.url_details()\n\n}\n\ncapHtml = capHtml = gInfo['obj'].unicode_body(ignore_errors=True, fix_special_entities=True)\n\nb64cap = re.findall(r'base64,(.*?)\\\\\" id=', capHtml, re.DOTALL)\n\nsavecaptcha = open(file=\"/home/ubuntu/captcha.png\", mode=\"w\")\n\nsavecaptcha.write(b64cap[0])\n\nsavecaptcha.close()\n\nf = open(file=\"/home/ubuntu/captcha.png\", mode=\"rb\")\n\nr = f.read()\n\ni = base64.b64decode(r)\n\nf.close()\n\nfincapfile = open(file=\"/home/ubuntu/workspace/ffcap.jpeg\", mode=\"wb\")\n\ncapsave = fincapfile.write(i)\n\nfincapfile.close()",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def to_rna(str):
strtrans = maketrans('ACGT', 'UGCA')
return str.translate(strtrans)
<|reserved_special_token_1|>
from string import maketrans
def to_rna(str):
strtrans = maketrans('ACGT', 'UGCA')
return str.translate(strtrans)
|
flexible
|
{
"blob_id": "aace7bc6684f4a9cec2f8fe270b5123a375780af",
"index": 8059,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef to_rna(str):\n strtrans = maketrans('ACGT', 'UGCA')\n return str.translate(strtrans)\n",
"step-3": "from string import maketrans\n\n\ndef to_rna(str):\n strtrans = maketrans('ACGT', 'UGCA')\n return str.translate(strtrans)\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
from .advection_flux_2d import gen_advection_flux_conservative_eno3_pyst_kernel_2d
from .advection_timestep_2d import gen_advection_timestep_euler_forward_conservative_eno3_pyst_kernel_2d
from .brinkmann_penalise_2d import gen_brinkmann_penalise_pyst_kernel_2d, gen_brinkmann_penalise_vs_fixed_val_pyst_kernel_2d
from .char_func_from_level_set_2d import gen_char_func_from_level_set_via_sine_heaviside_pyst_kernel_2d
from .diffusion_flux_2d import gen_diffusion_flux_pyst_kernel_2d
from .diffusion_timestep_2d import gen_diffusion_timestep_euler_forward_pyst_kernel_2d
from .elementwise_ops_2d import gen_add_fixed_val_pyst_kernel_2d, gen_elementwise_complex_product_pyst_kernel_2d, gen_elementwise_copy_pyst_kernel_2d, gen_elementwise_sum_pyst_kernel_2d, gen_set_fixed_val_at_boundaries_pyst_kernel_2d, gen_set_fixed_val_pyst_kernel_2d, gen_elementwise_saxpby_pyst_kernel_2d
from .inplane_field_curl_2d import gen_inplane_field_curl_pyst_kernel_2d
from .outplane_field_curl_2d import gen_outplane_field_curl_pyst_kernel_2d
from .penalise_field_boundary_2d import gen_penalise_field_boundary_pyst_kernel_2d
from .update_vorticity_from_velocity_forcing_2d import gen_update_vorticity_from_penalised_velocity_pyst_kernel_2d, gen_update_vorticity_from_velocity_forcing_pyst_kernel_2d
<|reserved_special_token_1|>
"""Stencil based grid operations in 2D."""
from .advection_flux_2d import gen_advection_flux_conservative_eno3_pyst_kernel_2d
from .advection_timestep_2d import (
gen_advection_timestep_euler_forward_conservative_eno3_pyst_kernel_2d,
)
from .brinkmann_penalise_2d import (
gen_brinkmann_penalise_pyst_kernel_2d,
gen_brinkmann_penalise_vs_fixed_val_pyst_kernel_2d,
)
from .char_func_from_level_set_2d import (
gen_char_func_from_level_set_via_sine_heaviside_pyst_kernel_2d,
)
from .diffusion_flux_2d import gen_diffusion_flux_pyst_kernel_2d
from .diffusion_timestep_2d import gen_diffusion_timestep_euler_forward_pyst_kernel_2d
from .elementwise_ops_2d import (
gen_add_fixed_val_pyst_kernel_2d,
gen_elementwise_complex_product_pyst_kernel_2d,
gen_elementwise_copy_pyst_kernel_2d,
gen_elementwise_sum_pyst_kernel_2d,
gen_set_fixed_val_at_boundaries_pyst_kernel_2d,
gen_set_fixed_val_pyst_kernel_2d,
gen_elementwise_saxpby_pyst_kernel_2d,
)
from .inplane_field_curl_2d import gen_inplane_field_curl_pyst_kernel_2d
from .outplane_field_curl_2d import gen_outplane_field_curl_pyst_kernel_2d
from .penalise_field_boundary_2d import gen_penalise_field_boundary_pyst_kernel_2d
from .update_vorticity_from_velocity_forcing_2d import (
gen_update_vorticity_from_penalised_velocity_pyst_kernel_2d,
gen_update_vorticity_from_velocity_forcing_pyst_kernel_2d,
)
|
flexible
|
{
"blob_id": "2dddee735e23e8cdb7df83f47f63926727cf8963",
"index": 2731,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfrom .advection_flux_2d import gen_advection_flux_conservative_eno3_pyst_kernel_2d\nfrom .advection_timestep_2d import gen_advection_timestep_euler_forward_conservative_eno3_pyst_kernel_2d\nfrom .brinkmann_penalise_2d import gen_brinkmann_penalise_pyst_kernel_2d, gen_brinkmann_penalise_vs_fixed_val_pyst_kernel_2d\nfrom .char_func_from_level_set_2d import gen_char_func_from_level_set_via_sine_heaviside_pyst_kernel_2d\nfrom .diffusion_flux_2d import gen_diffusion_flux_pyst_kernel_2d\nfrom .diffusion_timestep_2d import gen_diffusion_timestep_euler_forward_pyst_kernel_2d\nfrom .elementwise_ops_2d import gen_add_fixed_val_pyst_kernel_2d, gen_elementwise_complex_product_pyst_kernel_2d, gen_elementwise_copy_pyst_kernel_2d, gen_elementwise_sum_pyst_kernel_2d, gen_set_fixed_val_at_boundaries_pyst_kernel_2d, gen_set_fixed_val_pyst_kernel_2d, gen_elementwise_saxpby_pyst_kernel_2d\nfrom .inplane_field_curl_2d import gen_inplane_field_curl_pyst_kernel_2d\nfrom .outplane_field_curl_2d import gen_outplane_field_curl_pyst_kernel_2d\nfrom .penalise_field_boundary_2d import gen_penalise_field_boundary_pyst_kernel_2d\nfrom .update_vorticity_from_velocity_forcing_2d import gen_update_vorticity_from_penalised_velocity_pyst_kernel_2d, gen_update_vorticity_from_velocity_forcing_pyst_kernel_2d\n",
"step-3": "\"\"\"Stencil based grid operations in 2D.\"\"\"\nfrom .advection_flux_2d import gen_advection_flux_conservative_eno3_pyst_kernel_2d\nfrom .advection_timestep_2d import (\n gen_advection_timestep_euler_forward_conservative_eno3_pyst_kernel_2d,\n)\nfrom .brinkmann_penalise_2d import (\n gen_brinkmann_penalise_pyst_kernel_2d,\n gen_brinkmann_penalise_vs_fixed_val_pyst_kernel_2d,\n)\nfrom .char_func_from_level_set_2d import (\n gen_char_func_from_level_set_via_sine_heaviside_pyst_kernel_2d,\n)\nfrom .diffusion_flux_2d import gen_diffusion_flux_pyst_kernel_2d\nfrom .diffusion_timestep_2d import gen_diffusion_timestep_euler_forward_pyst_kernel_2d\nfrom .elementwise_ops_2d import (\n gen_add_fixed_val_pyst_kernel_2d,\n gen_elementwise_complex_product_pyst_kernel_2d,\n gen_elementwise_copy_pyst_kernel_2d,\n gen_elementwise_sum_pyst_kernel_2d,\n gen_set_fixed_val_at_boundaries_pyst_kernel_2d,\n gen_set_fixed_val_pyst_kernel_2d,\n gen_elementwise_saxpby_pyst_kernel_2d,\n)\nfrom .inplane_field_curl_2d import gen_inplane_field_curl_pyst_kernel_2d\nfrom .outplane_field_curl_2d import gen_outplane_field_curl_pyst_kernel_2d\nfrom .penalise_field_boundary_2d import gen_penalise_field_boundary_pyst_kernel_2d\nfrom .update_vorticity_from_velocity_forcing_2d import (\n gen_update_vorticity_from_penalised_velocity_pyst_kernel_2d,\n gen_update_vorticity_from_velocity_forcing_pyst_kernel_2d,\n)\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
import sqlite3
class announcement:
def __init__(eps_df, revenue_df):
conn = sqlite3.connect("earnings.db", timeout=120)
cur = conn.cursor()
symbol_href = self.driver.find_element_by_class_name("lfkTWp")
symbol = symbol_href.text
eps_history_df = pd.read_sql(
'select * from estimize_eps where Symbol == "%s"' % symbol, conn
)
revenue_history_df = pd.read_sql("select * from estimize_revenue", conn)
price_history_df = pd.read_sql("select * from price_history", conn)
def get_combined_df(eps_df, revenue_df):
del eps_df["Historical Beat Rate"]
del revenue_df["Historical Beat Rate"]
date_reported_df = eps_df["Date Reported"].str.split(" ", n=1, expand=True)
date_reported_df = date_reported_df.rename(
columns={0: "Date Reported", 1: "Time Reported"}
)
date_reported_df["Date Reported"] = pd.to_datetime(
date_reported_df["Date Reported"]
)
eps_df["Date Reported"] = date_reported_df["Date Reported"]
eps_df["Time Reported"] = date_reported_df["Time Reported"]
date_reported_df = revenue_df["Date Reported"].str.split(" ", n=1, expand=True)
date_reported_df = date_reported_df.rename(
columns={0: "Date Reported", 1: "Time Reported"}
)
date_reported_df["Date Reported"] = pd.to_datetime(
date_reported_df["Date Reported"]
)
revenue_df["Date Reported"] = date_reported_df["Date Reported"]
revenue_df["Time Reported"] = date_reported_df["Time Reported"]
eps_df = eps_df.sort_values(by="Date Reported")
revenue_df = revenue_df.sort_values(by="Date Reported")
eps_df = eps_df.set_index(
["Date Reported", "Time Reported", "Symbol"], append=True, drop=True
)
revenue_df = revenue_df.set_index(
["Date Reported", "Time Reported", "Symbol"], append=True, drop=True
)
eps_df.columns = "EPS " + eps_df.columns
revenue_df.columns = "Revenue " + revenue_df.columns
df = eps_df.join(revenue_df)
return df
def get_historical_beat():
df["Historical EPS Beat Ratio"] = None
df["Historical EPS Beat Percent"] = None
for index, row in df.iterrows():
index_num, date_reported, time_reported, symbol = index
this_df = df[df.index.get_level_values("Symbol") == symbol]
beat_rate = this_df[
this_df.index.get_level_values("Date Reported") <= date_reported
].tail(8)
if len(beat_rate) >= 4:
beat_rate_ratio = len(beat_rate[beat_rate["EPS Surprise"] > 0]) / float(
len(beat_rate)
)
beat_rate_percent = beat_rate["EPS Surprise"] / beat_rate["EPS Actual"]
beat_rate_percent = beat_rate_percent.replace([np.inf, -np.inf], np.nan)
beat_rate_percent = beat_rate_percent.mean()
# TODO: Do the same for revenue
df.loc[index_num, ["Historical EPS Beat Ratio"]] = beat_rate_ratio
df.loc[index_num, ["Historical EPS Beat Percent"]] = beat_rate_percent
def get_average_change():
df["Average Change 5 Days"] = None
df["Average Abnormal Change 5 Days"] = None
df["Average Change 10 Days"] = None
df["Average Abnormal Change 10 Days"] = None
for index, row in df.iterrows():
index_num, date_reported, time_reported, symbol = index
returns_df = df[
df.index.get_level_values("Date Reported") < date_reported
].tail(8)
if len(returns_df) >= 4:
df.loc[index_num, ["Average Change 5 Days"]] = returns_df[
"5 Day Change"
].mean()
df.loc[index_num, ["Average Change 10 Days"]] = returns_df[
"10 Day Change"
].mean()
df.loc[index_num, ["Average Abnormal Change 5 Days"]] = returns_df[
"5 Day Change Abnormal"
].mean()
df.loc[index_num, ["Average Abnormal Change 10 Days"]] = returns_df[
"10 Day Change Abnormal"
].mean()
def get_YoY_growth():
df["YoY Growth"] = None
for index, row in df.iterrows():
index_num, date_reported, time_reported, symbol = index
time_reported = time_reported.replace("'", "")
quarter_numer, year = time_reported.split(" ")
this_df = df["EPS Actual"]
try:
this_quarter = this_df[
this_df.index.get_level_values("Time Reported")
== quarter_numer + " '" + year
].values[0]
last_quarter = this_df[
this_df.index.get_level_values("Time Reported")
== quarter_numer + " '" + str(int(year) - 1)
].values[0]
df.loc[index_num, ["YoY Growth"]] = (
this_quarter - last_quarter
) / last_quarter
except Exception as e:
pass
def get_market_cap():
finviz_page = r.get("https://finviz.com/quote.ashx?t=%s" % symbol)
soup = BeautifulSoup(finviz_page.text, features="lxml")
table_row = soup.findAll("tr", attrs={"class": "table-dark-row"})[1]
market_cap = table_row.text.replace("Market Cap", "").split("\n")[1]
if "K" in market_cap:
market_cap = float(market_cap[:-1]) * 1000
elif "M" in market_cap:
market_cap = float(market_cap[:-1]) * 1000000
elif "B" in market_cap:
market_cap = float(market_cap[:-1]) * 1000000000
market_cap = int(market_cap)
if market_cap > 10000000000:
market_cap_text = "Large"
elif market_cap > 2000000000:
market_cap_text = "Medium"
elif market_cap > 300000000:
market_cap_text = "Small"
elif market_cap > 50000000:
market_cap_text = "Micro"
else:
market_cap_text = "Nano"
df["Market Cap Text"] = market_cap_text
def get_estimize_data(self):
# request the estimize website for data
url = "https://www.estimize.com/calendar?tab=equity&date=" + datetime.now().strftime(
"%Y-%m-%d"
)
self.driver.get(url)
# check if there are no companies reporting earnings
myElem = WebDriverWait(self.driver, self.delay).until(
EC.presence_of_element_located((By.CLASS_NAME, "dAViVi"))
)
companies_reporting_div = self.driver.find_element_by_class_name("dAViVi")
if "0 Events" == companies_reporting_div.text.split("\n")[1]:
return
# method to extra the ticker symbols from the webpage
tickers = self.get_tickers()
# method to get the historical data from yahoo
# self.get_yahoo_historical(tickers)
# TODO: update price history table with missing yahoo price data entries
# read the table and make a dataframe out of it
eps_df = pd.read_html(self.driver.page_source)[0]
eps_df["Symbol"] = tickers
eps_df = eps_df.iloc[:, [2, 3, 5, 6, 7, 8, 9, 10, 12]]
eps_df.columns = [
"Date Reported",
"Num of Estimates",
"Delta",
"Surprise",
"Historical Beat Rate",
"Wall St",
"Estimize",
"Actual",
"Symbol",
]
# same as above, but for revenues table instead of EPS table
url = (
"https://www.estimize.com/calendar?tab=equity&metric=revenue&date="
+ self.read_date.strftime("%Y-%m-%d")
)
self.driver.get(url)
myElem = WebDriverWait(self.driver, self.delay).until(
EC.presence_of_element_located((By.TAG_NAME, "table"))
)
revenue_df = pd.read_html(self.driver.page_source)[0]
tickers = self.get_tickers()
revenue_df["Symbol"] = tickers
revenue_df = revenue_df.iloc[:, [2, 3, 5, 6, 7, 8, 9, 10, 12]]
revenue_df.columns = [
"Date Reported",
"Num of Estimates",
"Delta",
"Surprise",
"Historical Beat Rate",
"Wall St",
"Estimize",
"Actual",
"Symbol",
]
return eps_df, revenue_df
def get_tickers(self):
# extract ticker symbopls from the html source
soup = BeautifulSoup(self.driver.page_source, features="lxml")
ticker_links = soup.findAll("a", attrs={"class": "lfkTWp"})
# create list of symbols that were extracted
tickers = []
for ticker in ticker_links:
tickers.append(ticker.contents[0])
return tickers
|
normal
|
{
"blob_id": "b7738c27e11e9566d90157717633312031cdffd6",
"index": 818,
"step-1": "<mask token>\n\n\nclass announcement:\n\n def __init__(eps_df, revenue_df):\n conn = sqlite3.connect('earnings.db', timeout=120)\n cur = conn.cursor()\n symbol_href = self.driver.find_element_by_class_name('lfkTWp')\n symbol = symbol_href.text\n eps_history_df = pd.read_sql(\n 'select * from estimize_eps where Symbol == \"%s\"' % symbol, conn)\n revenue_history_df = pd.read_sql('select * from estimize_revenue', conn\n )\n price_history_df = pd.read_sql('select * from price_history', conn)\n\n def get_combined_df(eps_df, revenue_df):\n del eps_df['Historical Beat Rate']\n del revenue_df['Historical Beat Rate']\n date_reported_df = eps_df['Date Reported'].str.split(' ', n=1,\n expand=True)\n date_reported_df = date_reported_df.rename(columns={(0):\n 'Date Reported', (1): 'Time Reported'})\n date_reported_df['Date Reported'] = pd.to_datetime(date_reported_df\n ['Date Reported'])\n eps_df['Date Reported'] = date_reported_df['Date Reported']\n eps_df['Time Reported'] = date_reported_df['Time Reported']\n date_reported_df = revenue_df['Date Reported'].str.split(' ', n=1,\n expand=True)\n date_reported_df = date_reported_df.rename(columns={(0):\n 'Date Reported', (1): 'Time Reported'})\n date_reported_df['Date Reported'] = pd.to_datetime(date_reported_df\n ['Date Reported'])\n revenue_df['Date Reported'] = date_reported_df['Date Reported']\n revenue_df['Time Reported'] = date_reported_df['Time Reported']\n eps_df = eps_df.sort_values(by='Date Reported')\n revenue_df = revenue_df.sort_values(by='Date Reported')\n eps_df = eps_df.set_index(['Date Reported', 'Time Reported',\n 'Symbol'], append=True, drop=True)\n revenue_df = revenue_df.set_index(['Date Reported', 'Time Reported',\n 'Symbol'], append=True, drop=True)\n eps_df.columns = 'EPS ' + eps_df.columns\n revenue_df.columns = 'Revenue ' + revenue_df.columns\n df = eps_df.join(revenue_df)\n return df\n\n def get_historical_beat():\n df['Historical EPS Beat Ratio'] = None\n df['Historical EPS Beat Percent'] = None\n for index, row in df.iterrows():\n index_num, date_reported, time_reported, symbol = index\n this_df = df[df.index.get_level_values('Symbol') == symbol]\n beat_rate = this_df[this_df.index.get_level_values(\n 'Date Reported') <= date_reported].tail(8)\n if len(beat_rate) >= 4:\n beat_rate_ratio = len(beat_rate[beat_rate['EPS Surprise'] > 0]\n ) / float(len(beat_rate))\n beat_rate_percent = beat_rate['EPS Surprise'] / beat_rate[\n 'EPS Actual']\n beat_rate_percent = beat_rate_percent.replace([np.inf, -np.\n inf], np.nan)\n beat_rate_percent = beat_rate_percent.mean()\n df.loc[index_num, ['Historical EPS Beat Ratio']\n ] = beat_rate_ratio\n df.loc[index_num, ['Historical EPS Beat Percent']\n ] = beat_rate_percent\n\n def get_average_change():\n df['Average Change 5 Days'] = None\n df['Average Abnormal Change 5 Days'] = None\n df['Average Change 10 Days'] = None\n df['Average Abnormal Change 10 Days'] = None\n for index, row in df.iterrows():\n index_num, date_reported, time_reported, symbol = index\n returns_df = df[df.index.get_level_values('Date Reported') <\n date_reported].tail(8)\n if len(returns_df) >= 4:\n df.loc[index_num, ['Average Change 5 Days']] = returns_df[\n '5 Day Change'].mean()\n df.loc[index_num, ['Average Change 10 Days']] = returns_df[\n '10 Day Change'].mean()\n df.loc[index_num, ['Average Abnormal Change 5 Days']\n ] = returns_df['5 Day Change Abnormal'].mean()\n df.loc[index_num, ['Average Abnormal Change 10 Days']\n ] = returns_df['10 Day Change Abnormal'].mean()\n\n def get_YoY_growth():\n df['YoY Growth'] = None\n for index, row in df.iterrows():\n index_num, date_reported, time_reported, symbol = index\n time_reported = time_reported.replace(\"'\", '')\n quarter_numer, year = time_reported.split(' ')\n this_df = df['EPS Actual']\n try:\n this_quarter = this_df[this_df.index.get_level_values(\n 'Time Reported') == quarter_numer + \" '\" + year].values[0]\n last_quarter = this_df[this_df.index.get_level_values(\n 'Time Reported') == quarter_numer + \" '\" + str(int(year\n ) - 1)].values[0]\n df.loc[index_num, ['YoY Growth']] = (this_quarter -\n last_quarter) / last_quarter\n except Exception as e:\n pass\n <mask token>\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass announcement:\n\n def __init__(eps_df, revenue_df):\n conn = sqlite3.connect('earnings.db', timeout=120)\n cur = conn.cursor()\n symbol_href = self.driver.find_element_by_class_name('lfkTWp')\n symbol = symbol_href.text\n eps_history_df = pd.read_sql(\n 'select * from estimize_eps where Symbol == \"%s\"' % symbol, conn)\n revenue_history_df = pd.read_sql('select * from estimize_revenue', conn\n )\n price_history_df = pd.read_sql('select * from price_history', conn)\n\n def get_combined_df(eps_df, revenue_df):\n del eps_df['Historical Beat Rate']\n del revenue_df['Historical Beat Rate']\n date_reported_df = eps_df['Date Reported'].str.split(' ', n=1,\n expand=True)\n date_reported_df = date_reported_df.rename(columns={(0):\n 'Date Reported', (1): 'Time Reported'})\n date_reported_df['Date Reported'] = pd.to_datetime(date_reported_df\n ['Date Reported'])\n eps_df['Date Reported'] = date_reported_df['Date Reported']\n eps_df['Time Reported'] = date_reported_df['Time Reported']\n date_reported_df = revenue_df['Date Reported'].str.split(' ', n=1,\n expand=True)\n date_reported_df = date_reported_df.rename(columns={(0):\n 'Date Reported', (1): 'Time Reported'})\n date_reported_df['Date Reported'] = pd.to_datetime(date_reported_df\n ['Date Reported'])\n revenue_df['Date Reported'] = date_reported_df['Date Reported']\n revenue_df['Time Reported'] = date_reported_df['Time Reported']\n eps_df = eps_df.sort_values(by='Date Reported')\n revenue_df = revenue_df.sort_values(by='Date Reported')\n eps_df = eps_df.set_index(['Date Reported', 'Time Reported',\n 'Symbol'], append=True, drop=True)\n revenue_df = revenue_df.set_index(['Date Reported', 'Time Reported',\n 'Symbol'], append=True, drop=True)\n eps_df.columns = 'EPS ' + eps_df.columns\n revenue_df.columns = 'Revenue ' + revenue_df.columns\n df = eps_df.join(revenue_df)\n return df\n\n def get_historical_beat():\n df['Historical EPS Beat Ratio'] = None\n df['Historical EPS Beat Percent'] = None\n for index, row in df.iterrows():\n index_num, date_reported, time_reported, symbol = index\n this_df = df[df.index.get_level_values('Symbol') == symbol]\n beat_rate = this_df[this_df.index.get_level_values(\n 'Date Reported') <= date_reported].tail(8)\n if len(beat_rate) >= 4:\n beat_rate_ratio = len(beat_rate[beat_rate['EPS Surprise'] > 0]\n ) / float(len(beat_rate))\n beat_rate_percent = beat_rate['EPS Surprise'] / beat_rate[\n 'EPS Actual']\n beat_rate_percent = beat_rate_percent.replace([np.inf, -np.\n inf], np.nan)\n beat_rate_percent = beat_rate_percent.mean()\n df.loc[index_num, ['Historical EPS Beat Ratio']\n ] = beat_rate_ratio\n df.loc[index_num, ['Historical EPS Beat Percent']\n ] = beat_rate_percent\n\n def get_average_change():\n df['Average Change 5 Days'] = None\n df['Average Abnormal Change 5 Days'] = None\n df['Average Change 10 Days'] = None\n df['Average Abnormal Change 10 Days'] = None\n for index, row in df.iterrows():\n index_num, date_reported, time_reported, symbol = index\n returns_df = df[df.index.get_level_values('Date Reported') <\n date_reported].tail(8)\n if len(returns_df) >= 4:\n df.loc[index_num, ['Average Change 5 Days']] = returns_df[\n '5 Day Change'].mean()\n df.loc[index_num, ['Average Change 10 Days']] = returns_df[\n '10 Day Change'].mean()\n df.loc[index_num, ['Average Abnormal Change 5 Days']\n ] = returns_df['5 Day Change Abnormal'].mean()\n df.loc[index_num, ['Average Abnormal Change 10 Days']\n ] = returns_df['10 Day Change Abnormal'].mean()\n\n def get_YoY_growth():\n df['YoY Growth'] = None\n for index, row in df.iterrows():\n index_num, date_reported, time_reported, symbol = index\n time_reported = time_reported.replace(\"'\", '')\n quarter_numer, year = time_reported.split(' ')\n this_df = df['EPS Actual']\n try:\n this_quarter = this_df[this_df.index.get_level_values(\n 'Time Reported') == quarter_numer + \" '\" + year].values[0]\n last_quarter = this_df[this_df.index.get_level_values(\n 'Time Reported') == quarter_numer + \" '\" + str(int(year\n ) - 1)].values[0]\n df.loc[index_num, ['YoY Growth']] = (this_quarter -\n last_quarter) / last_quarter\n except Exception as e:\n pass\n\n def get_market_cap():\n finviz_page = r.get('https://finviz.com/quote.ashx?t=%s' % symbol)\n soup = BeautifulSoup(finviz_page.text, features='lxml')\n table_row = soup.findAll('tr', attrs={'class': 'table-dark-row'})[1]\n market_cap = table_row.text.replace('Market Cap', '').split('\\n')[1]\n if 'K' in market_cap:\n market_cap = float(market_cap[:-1]) * 1000\n elif 'M' in market_cap:\n market_cap = float(market_cap[:-1]) * 1000000\n elif 'B' in market_cap:\n market_cap = float(market_cap[:-1]) * 1000000000\n market_cap = int(market_cap)\n if market_cap > 10000000000:\n market_cap_text = 'Large'\n elif market_cap > 2000000000:\n market_cap_text = 'Medium'\n elif market_cap > 300000000:\n market_cap_text = 'Small'\n elif market_cap > 50000000:\n market_cap_text = 'Micro'\n else:\n market_cap_text = 'Nano'\n df['Market Cap Text'] = market_cap_text\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass announcement:\n\n def __init__(eps_df, revenue_df):\n conn = sqlite3.connect('earnings.db', timeout=120)\n cur = conn.cursor()\n symbol_href = self.driver.find_element_by_class_name('lfkTWp')\n symbol = symbol_href.text\n eps_history_df = pd.read_sql(\n 'select * from estimize_eps where Symbol == \"%s\"' % symbol, conn)\n revenue_history_df = pd.read_sql('select * from estimize_revenue', conn\n )\n price_history_df = pd.read_sql('select * from price_history', conn)\n\n def get_combined_df(eps_df, revenue_df):\n del eps_df['Historical Beat Rate']\n del revenue_df['Historical Beat Rate']\n date_reported_df = eps_df['Date Reported'].str.split(' ', n=1,\n expand=True)\n date_reported_df = date_reported_df.rename(columns={(0):\n 'Date Reported', (1): 'Time Reported'})\n date_reported_df['Date Reported'] = pd.to_datetime(date_reported_df\n ['Date Reported'])\n eps_df['Date Reported'] = date_reported_df['Date Reported']\n eps_df['Time Reported'] = date_reported_df['Time Reported']\n date_reported_df = revenue_df['Date Reported'].str.split(' ', n=1,\n expand=True)\n date_reported_df = date_reported_df.rename(columns={(0):\n 'Date Reported', (1): 'Time Reported'})\n date_reported_df['Date Reported'] = pd.to_datetime(date_reported_df\n ['Date Reported'])\n revenue_df['Date Reported'] = date_reported_df['Date Reported']\n revenue_df['Time Reported'] = date_reported_df['Time Reported']\n eps_df = eps_df.sort_values(by='Date Reported')\n revenue_df = revenue_df.sort_values(by='Date Reported')\n eps_df = eps_df.set_index(['Date Reported', 'Time Reported',\n 'Symbol'], append=True, drop=True)\n revenue_df = revenue_df.set_index(['Date Reported', 'Time Reported',\n 'Symbol'], append=True, drop=True)\n eps_df.columns = 'EPS ' + eps_df.columns\n revenue_df.columns = 'Revenue ' + revenue_df.columns\n df = eps_df.join(revenue_df)\n return df\n\n def get_historical_beat():\n df['Historical EPS Beat Ratio'] = None\n df['Historical EPS Beat Percent'] = None\n for index, row in df.iterrows():\n index_num, date_reported, time_reported, symbol = index\n this_df = df[df.index.get_level_values('Symbol') == symbol]\n beat_rate = this_df[this_df.index.get_level_values(\n 'Date Reported') <= date_reported].tail(8)\n if len(beat_rate) >= 4:\n beat_rate_ratio = len(beat_rate[beat_rate['EPS Surprise'] > 0]\n ) / float(len(beat_rate))\n beat_rate_percent = beat_rate['EPS Surprise'] / beat_rate[\n 'EPS Actual']\n beat_rate_percent = beat_rate_percent.replace([np.inf, -np.\n inf], np.nan)\n beat_rate_percent = beat_rate_percent.mean()\n df.loc[index_num, ['Historical EPS Beat Ratio']\n ] = beat_rate_ratio\n df.loc[index_num, ['Historical EPS Beat Percent']\n ] = beat_rate_percent\n\n def get_average_change():\n df['Average Change 5 Days'] = None\n df['Average Abnormal Change 5 Days'] = None\n df['Average Change 10 Days'] = None\n df['Average Abnormal Change 10 Days'] = None\n for index, row in df.iterrows():\n index_num, date_reported, time_reported, symbol = index\n returns_df = df[df.index.get_level_values('Date Reported') <\n date_reported].tail(8)\n if len(returns_df) >= 4:\n df.loc[index_num, ['Average Change 5 Days']] = returns_df[\n '5 Day Change'].mean()\n df.loc[index_num, ['Average Change 10 Days']] = returns_df[\n '10 Day Change'].mean()\n df.loc[index_num, ['Average Abnormal Change 5 Days']\n ] = returns_df['5 Day Change Abnormal'].mean()\n df.loc[index_num, ['Average Abnormal Change 10 Days']\n ] = returns_df['10 Day Change Abnormal'].mean()\n\n def get_YoY_growth():\n df['YoY Growth'] = None\n for index, row in df.iterrows():\n index_num, date_reported, time_reported, symbol = index\n time_reported = time_reported.replace(\"'\", '')\n quarter_numer, year = time_reported.split(' ')\n this_df = df['EPS Actual']\n try:\n this_quarter = this_df[this_df.index.get_level_values(\n 'Time Reported') == quarter_numer + \" '\" + year].values[0]\n last_quarter = this_df[this_df.index.get_level_values(\n 'Time Reported') == quarter_numer + \" '\" + str(int(year\n ) - 1)].values[0]\n df.loc[index_num, ['YoY Growth']] = (this_quarter -\n last_quarter) / last_quarter\n except Exception as e:\n pass\n\n def get_market_cap():\n finviz_page = r.get('https://finviz.com/quote.ashx?t=%s' % symbol)\n soup = BeautifulSoup(finviz_page.text, features='lxml')\n table_row = soup.findAll('tr', attrs={'class': 'table-dark-row'})[1]\n market_cap = table_row.text.replace('Market Cap', '').split('\\n')[1]\n if 'K' in market_cap:\n market_cap = float(market_cap[:-1]) * 1000\n elif 'M' in market_cap:\n market_cap = float(market_cap[:-1]) * 1000000\n elif 'B' in market_cap:\n market_cap = float(market_cap[:-1]) * 1000000000\n market_cap = int(market_cap)\n if market_cap > 10000000000:\n market_cap_text = 'Large'\n elif market_cap > 2000000000:\n market_cap_text = 'Medium'\n elif market_cap > 300000000:\n market_cap_text = 'Small'\n elif market_cap > 50000000:\n market_cap_text = 'Micro'\n else:\n market_cap_text = 'Nano'\n df['Market Cap Text'] = market_cap_text\n\n\ndef get_estimize_data(self):\n url = 'https://www.estimize.com/calendar?tab=equity&date=' + datetime.now(\n ).strftime('%Y-%m-%d')\n self.driver.get(url)\n myElem = WebDriverWait(self.driver, self.delay).until(EC.\n presence_of_element_located((By.CLASS_NAME, 'dAViVi')))\n companies_reporting_div = self.driver.find_element_by_class_name('dAViVi')\n if '0 Events' == companies_reporting_div.text.split('\\n')[1]:\n return\n tickers = self.get_tickers()\n eps_df = pd.read_html(self.driver.page_source)[0]\n eps_df['Symbol'] = tickers\n eps_df = eps_df.iloc[:, [2, 3, 5, 6, 7, 8, 9, 10, 12]]\n eps_df.columns = ['Date Reported', 'Num of Estimates', 'Delta',\n 'Surprise', 'Historical Beat Rate', 'Wall St', 'Estimize', 'Actual',\n 'Symbol']\n url = (\n 'https://www.estimize.com/calendar?tab=equity&metric=revenue&date=' +\n self.read_date.strftime('%Y-%m-%d'))\n self.driver.get(url)\n myElem = WebDriverWait(self.driver, self.delay).until(EC.\n presence_of_element_located((By.TAG_NAME, 'table')))\n revenue_df = pd.read_html(self.driver.page_source)[0]\n tickers = self.get_tickers()\n revenue_df['Symbol'] = tickers\n revenue_df = revenue_df.iloc[:, [2, 3, 5, 6, 7, 8, 9, 10, 12]]\n revenue_df.columns = ['Date Reported', 'Num of Estimates', 'Delta',\n 'Surprise', 'Historical Beat Rate', 'Wall St', 'Estimize', 'Actual',\n 'Symbol']\n return eps_df, revenue_df\n\n\n<mask token>\n",
"step-4": "<mask token>\n\n\nclass announcement:\n\n def __init__(eps_df, revenue_df):\n conn = sqlite3.connect('earnings.db', timeout=120)\n cur = conn.cursor()\n symbol_href = self.driver.find_element_by_class_name('lfkTWp')\n symbol = symbol_href.text\n eps_history_df = pd.read_sql(\n 'select * from estimize_eps where Symbol == \"%s\"' % symbol, conn)\n revenue_history_df = pd.read_sql('select * from estimize_revenue', conn\n )\n price_history_df = pd.read_sql('select * from price_history', conn)\n\n def get_combined_df(eps_df, revenue_df):\n del eps_df['Historical Beat Rate']\n del revenue_df['Historical Beat Rate']\n date_reported_df = eps_df['Date Reported'].str.split(' ', n=1,\n expand=True)\n date_reported_df = date_reported_df.rename(columns={(0):\n 'Date Reported', (1): 'Time Reported'})\n date_reported_df['Date Reported'] = pd.to_datetime(date_reported_df\n ['Date Reported'])\n eps_df['Date Reported'] = date_reported_df['Date Reported']\n eps_df['Time Reported'] = date_reported_df['Time Reported']\n date_reported_df = revenue_df['Date Reported'].str.split(' ', n=1,\n expand=True)\n date_reported_df = date_reported_df.rename(columns={(0):\n 'Date Reported', (1): 'Time Reported'})\n date_reported_df['Date Reported'] = pd.to_datetime(date_reported_df\n ['Date Reported'])\n revenue_df['Date Reported'] = date_reported_df['Date Reported']\n revenue_df['Time Reported'] = date_reported_df['Time Reported']\n eps_df = eps_df.sort_values(by='Date Reported')\n revenue_df = revenue_df.sort_values(by='Date Reported')\n eps_df = eps_df.set_index(['Date Reported', 'Time Reported',\n 'Symbol'], append=True, drop=True)\n revenue_df = revenue_df.set_index(['Date Reported', 'Time Reported',\n 'Symbol'], append=True, drop=True)\n eps_df.columns = 'EPS ' + eps_df.columns\n revenue_df.columns = 'Revenue ' + revenue_df.columns\n df = eps_df.join(revenue_df)\n return df\n\n def get_historical_beat():\n df['Historical EPS Beat Ratio'] = None\n df['Historical EPS Beat Percent'] = None\n for index, row in df.iterrows():\n index_num, date_reported, time_reported, symbol = index\n this_df = df[df.index.get_level_values('Symbol') == symbol]\n beat_rate = this_df[this_df.index.get_level_values(\n 'Date Reported') <= date_reported].tail(8)\n if len(beat_rate) >= 4:\n beat_rate_ratio = len(beat_rate[beat_rate['EPS Surprise'] > 0]\n ) / float(len(beat_rate))\n beat_rate_percent = beat_rate['EPS Surprise'] / beat_rate[\n 'EPS Actual']\n beat_rate_percent = beat_rate_percent.replace([np.inf, -np.\n inf], np.nan)\n beat_rate_percent = beat_rate_percent.mean()\n df.loc[index_num, ['Historical EPS Beat Ratio']\n ] = beat_rate_ratio\n df.loc[index_num, ['Historical EPS Beat Percent']\n ] = beat_rate_percent\n\n def get_average_change():\n df['Average Change 5 Days'] = None\n df['Average Abnormal Change 5 Days'] = None\n df['Average Change 10 Days'] = None\n df['Average Abnormal Change 10 Days'] = None\n for index, row in df.iterrows():\n index_num, date_reported, time_reported, symbol = index\n returns_df = df[df.index.get_level_values('Date Reported') <\n date_reported].tail(8)\n if len(returns_df) >= 4:\n df.loc[index_num, ['Average Change 5 Days']] = returns_df[\n '5 Day Change'].mean()\n df.loc[index_num, ['Average Change 10 Days']] = returns_df[\n '10 Day Change'].mean()\n df.loc[index_num, ['Average Abnormal Change 5 Days']\n ] = returns_df['5 Day Change Abnormal'].mean()\n df.loc[index_num, ['Average Abnormal Change 10 Days']\n ] = returns_df['10 Day Change Abnormal'].mean()\n\n def get_YoY_growth():\n df['YoY Growth'] = None\n for index, row in df.iterrows():\n index_num, date_reported, time_reported, symbol = index\n time_reported = time_reported.replace(\"'\", '')\n quarter_numer, year = time_reported.split(' ')\n this_df = df['EPS Actual']\n try:\n this_quarter = this_df[this_df.index.get_level_values(\n 'Time Reported') == quarter_numer + \" '\" + year].values[0]\n last_quarter = this_df[this_df.index.get_level_values(\n 'Time Reported') == quarter_numer + \" '\" + str(int(year\n ) - 1)].values[0]\n df.loc[index_num, ['YoY Growth']] = (this_quarter -\n last_quarter) / last_quarter\n except Exception as e:\n pass\n\n def get_market_cap():\n finviz_page = r.get('https://finviz.com/quote.ashx?t=%s' % symbol)\n soup = BeautifulSoup(finviz_page.text, features='lxml')\n table_row = soup.findAll('tr', attrs={'class': 'table-dark-row'})[1]\n market_cap = table_row.text.replace('Market Cap', '').split('\\n')[1]\n if 'K' in market_cap:\n market_cap = float(market_cap[:-1]) * 1000\n elif 'M' in market_cap:\n market_cap = float(market_cap[:-1]) * 1000000\n elif 'B' in market_cap:\n market_cap = float(market_cap[:-1]) * 1000000000\n market_cap = int(market_cap)\n if market_cap > 10000000000:\n market_cap_text = 'Large'\n elif market_cap > 2000000000:\n market_cap_text = 'Medium'\n elif market_cap > 300000000:\n market_cap_text = 'Small'\n elif market_cap > 50000000:\n market_cap_text = 'Micro'\n else:\n market_cap_text = 'Nano'\n df['Market Cap Text'] = market_cap_text\n\n\ndef get_estimize_data(self):\n url = 'https://www.estimize.com/calendar?tab=equity&date=' + datetime.now(\n ).strftime('%Y-%m-%d')\n self.driver.get(url)\n myElem = WebDriverWait(self.driver, self.delay).until(EC.\n presence_of_element_located((By.CLASS_NAME, 'dAViVi')))\n companies_reporting_div = self.driver.find_element_by_class_name('dAViVi')\n if '0 Events' == companies_reporting_div.text.split('\\n')[1]:\n return\n tickers = self.get_tickers()\n eps_df = pd.read_html(self.driver.page_source)[0]\n eps_df['Symbol'] = tickers\n eps_df = eps_df.iloc[:, [2, 3, 5, 6, 7, 8, 9, 10, 12]]\n eps_df.columns = ['Date Reported', 'Num of Estimates', 'Delta',\n 'Surprise', 'Historical Beat Rate', 'Wall St', 'Estimize', 'Actual',\n 'Symbol']\n url = (\n 'https://www.estimize.com/calendar?tab=equity&metric=revenue&date=' +\n self.read_date.strftime('%Y-%m-%d'))\n self.driver.get(url)\n myElem = WebDriverWait(self.driver, self.delay).until(EC.\n presence_of_element_located((By.TAG_NAME, 'table')))\n revenue_df = pd.read_html(self.driver.page_source)[0]\n tickers = self.get_tickers()\n revenue_df['Symbol'] = tickers\n revenue_df = revenue_df.iloc[:, [2, 3, 5, 6, 7, 8, 9, 10, 12]]\n revenue_df.columns = ['Date Reported', 'Num of Estimates', 'Delta',\n 'Surprise', 'Historical Beat Rate', 'Wall St', 'Estimize', 'Actual',\n 'Symbol']\n return eps_df, revenue_df\n\n\ndef get_tickers(self):\n soup = BeautifulSoup(self.driver.page_source, features='lxml')\n ticker_links = soup.findAll('a', attrs={'class': 'lfkTWp'})\n tickers = []\n for ticker in ticker_links:\n tickers.append(ticker.contents[0])\n return tickers\n",
"step-5": "import sqlite3\n\n\nclass announcement:\n def __init__(eps_df, revenue_df):\n conn = sqlite3.connect(\"earnings.db\", timeout=120)\n cur = conn.cursor()\n\n symbol_href = self.driver.find_element_by_class_name(\"lfkTWp\")\n symbol = symbol_href.text\n\n eps_history_df = pd.read_sql(\n 'select * from estimize_eps where Symbol == \"%s\"' % symbol, conn\n )\n revenue_history_df = pd.read_sql(\"select * from estimize_revenue\", conn)\n price_history_df = pd.read_sql(\"select * from price_history\", conn)\n\n def get_combined_df(eps_df, revenue_df):\n del eps_df[\"Historical Beat Rate\"]\n del revenue_df[\"Historical Beat Rate\"]\n\n date_reported_df = eps_df[\"Date Reported\"].str.split(\" \", n=1, expand=True)\n date_reported_df = date_reported_df.rename(\n columns={0: \"Date Reported\", 1: \"Time Reported\"}\n )\n date_reported_df[\"Date Reported\"] = pd.to_datetime(\n date_reported_df[\"Date Reported\"]\n )\n eps_df[\"Date Reported\"] = date_reported_df[\"Date Reported\"]\n eps_df[\"Time Reported\"] = date_reported_df[\"Time Reported\"]\n\n date_reported_df = revenue_df[\"Date Reported\"].str.split(\" \", n=1, expand=True)\n date_reported_df = date_reported_df.rename(\n columns={0: \"Date Reported\", 1: \"Time Reported\"}\n )\n date_reported_df[\"Date Reported\"] = pd.to_datetime(\n date_reported_df[\"Date Reported\"]\n )\n revenue_df[\"Date Reported\"] = date_reported_df[\"Date Reported\"]\n revenue_df[\"Time Reported\"] = date_reported_df[\"Time Reported\"]\n\n eps_df = eps_df.sort_values(by=\"Date Reported\")\n revenue_df = revenue_df.sort_values(by=\"Date Reported\")\n\n eps_df = eps_df.set_index(\n [\"Date Reported\", \"Time Reported\", \"Symbol\"], append=True, drop=True\n )\n revenue_df = revenue_df.set_index(\n [\"Date Reported\", \"Time Reported\", \"Symbol\"], append=True, drop=True\n )\n\n eps_df.columns = \"EPS \" + eps_df.columns\n revenue_df.columns = \"Revenue \" + revenue_df.columns\n\n df = eps_df.join(revenue_df)\n\n return df\n\n def get_historical_beat():\n df[\"Historical EPS Beat Ratio\"] = None\n df[\"Historical EPS Beat Percent\"] = None\n for index, row in df.iterrows():\n index_num, date_reported, time_reported, symbol = index\n\n this_df = df[df.index.get_level_values(\"Symbol\") == symbol]\n beat_rate = this_df[\n this_df.index.get_level_values(\"Date Reported\") <= date_reported\n ].tail(8)\n\n if len(beat_rate) >= 4:\n beat_rate_ratio = len(beat_rate[beat_rate[\"EPS Surprise\"] > 0]) / float(\n len(beat_rate)\n )\n beat_rate_percent = beat_rate[\"EPS Surprise\"] / beat_rate[\"EPS Actual\"]\n beat_rate_percent = beat_rate_percent.replace([np.inf, -np.inf], np.nan)\n beat_rate_percent = beat_rate_percent.mean()\n\n # TODO: Do the same for revenue\n df.loc[index_num, [\"Historical EPS Beat Ratio\"]] = beat_rate_ratio\n df.loc[index_num, [\"Historical EPS Beat Percent\"]] = beat_rate_percent\n\n def get_average_change():\n df[\"Average Change 5 Days\"] = None\n df[\"Average Abnormal Change 5 Days\"] = None\n df[\"Average Change 10 Days\"] = None\n df[\"Average Abnormal Change 10 Days\"] = None\n for index, row in df.iterrows():\n index_num, date_reported, time_reported, symbol = index\n\n returns_df = df[\n df.index.get_level_values(\"Date Reported\") < date_reported\n ].tail(8)\n\n if len(returns_df) >= 4:\n df.loc[index_num, [\"Average Change 5 Days\"]] = returns_df[\n \"5 Day Change\"\n ].mean()\n df.loc[index_num, [\"Average Change 10 Days\"]] = returns_df[\n \"10 Day Change\"\n ].mean()\n df.loc[index_num, [\"Average Abnormal Change 5 Days\"]] = returns_df[\n \"5 Day Change Abnormal\"\n ].mean()\n df.loc[index_num, [\"Average Abnormal Change 10 Days\"]] = returns_df[\n \"10 Day Change Abnormal\"\n ].mean()\n\n def get_YoY_growth():\n df[\"YoY Growth\"] = None\n for index, row in df.iterrows():\n index_num, date_reported, time_reported, symbol = index\n time_reported = time_reported.replace(\"'\", \"\")\n quarter_numer, year = time_reported.split(\" \")\n\n this_df = df[\"EPS Actual\"]\n try:\n this_quarter = this_df[\n this_df.index.get_level_values(\"Time Reported\")\n == quarter_numer + \" '\" + year\n ].values[0]\n last_quarter = this_df[\n this_df.index.get_level_values(\"Time Reported\")\n == quarter_numer + \" '\" + str(int(year) - 1)\n ].values[0]\n df.loc[index_num, [\"YoY Growth\"]] = (\n this_quarter - last_quarter\n ) / last_quarter\n except Exception as e:\n pass\n\n def get_market_cap():\n finviz_page = r.get(\"https://finviz.com/quote.ashx?t=%s\" % symbol)\n\n soup = BeautifulSoup(finviz_page.text, features=\"lxml\")\n table_row = soup.findAll(\"tr\", attrs={\"class\": \"table-dark-row\"})[1]\n market_cap = table_row.text.replace(\"Market Cap\", \"\").split(\"\\n\")[1]\n if \"K\" in market_cap:\n market_cap = float(market_cap[:-1]) * 1000\n elif \"M\" in market_cap:\n market_cap = float(market_cap[:-1]) * 1000000\n elif \"B\" in market_cap:\n market_cap = float(market_cap[:-1]) * 1000000000\n\n market_cap = int(market_cap)\n if market_cap > 10000000000:\n market_cap_text = \"Large\"\n elif market_cap > 2000000000:\n market_cap_text = \"Medium\"\n elif market_cap > 300000000:\n market_cap_text = \"Small\"\n elif market_cap > 50000000:\n market_cap_text = \"Micro\"\n else:\n market_cap_text = \"Nano\"\n\n df[\"Market Cap Text\"] = market_cap_text\n\n\ndef get_estimize_data(self):\n # request the estimize website for data\n url = \"https://www.estimize.com/calendar?tab=equity&date=\" + datetime.now().strftime(\n \"%Y-%m-%d\"\n )\n self.driver.get(url)\n\n # check if there are no companies reporting earnings\n myElem = WebDriverWait(self.driver, self.delay).until(\n EC.presence_of_element_located((By.CLASS_NAME, \"dAViVi\"))\n )\n companies_reporting_div = self.driver.find_element_by_class_name(\"dAViVi\")\n if \"0 Events\" == companies_reporting_div.text.split(\"\\n\")[1]:\n return\n\n # method to extra the ticker symbols from the webpage\n tickers = self.get_tickers()\n\n # method to get the historical data from yahoo\n # self.get_yahoo_historical(tickers)\n # TODO: update price history table with missing yahoo price data entries\n\n # read the table and make a dataframe out of it\n eps_df = pd.read_html(self.driver.page_source)[0]\n eps_df[\"Symbol\"] = tickers\n eps_df = eps_df.iloc[:, [2, 3, 5, 6, 7, 8, 9, 10, 12]]\n eps_df.columns = [\n \"Date Reported\",\n \"Num of Estimates\",\n \"Delta\",\n \"Surprise\",\n \"Historical Beat Rate\",\n \"Wall St\",\n \"Estimize\",\n \"Actual\",\n \"Symbol\",\n ]\n\n # same as above, but for revenues table instead of EPS table\n url = (\n \"https://www.estimize.com/calendar?tab=equity&metric=revenue&date=\"\n + self.read_date.strftime(\"%Y-%m-%d\")\n )\n self.driver.get(url)\n myElem = WebDriverWait(self.driver, self.delay).until(\n EC.presence_of_element_located((By.TAG_NAME, \"table\"))\n )\n\n revenue_df = pd.read_html(self.driver.page_source)[0]\n tickers = self.get_tickers()\n revenue_df[\"Symbol\"] = tickers\n revenue_df = revenue_df.iloc[:, [2, 3, 5, 6, 7, 8, 9, 10, 12]]\n revenue_df.columns = [\n \"Date Reported\",\n \"Num of Estimates\",\n \"Delta\",\n \"Surprise\",\n \"Historical Beat Rate\",\n \"Wall St\",\n \"Estimize\",\n \"Actual\",\n \"Symbol\",\n ]\n\n return eps_df, revenue_df\n\n\ndef get_tickers(self):\n # extract ticker symbopls from the html source\n soup = BeautifulSoup(self.driver.page_source, features=\"lxml\")\n ticker_links = soup.findAll(\"a\", attrs={\"class\": \"lfkTWp\"})\n\n # create list of symbols that were extracted\n tickers = []\n for ticker in ticker_links:\n tickers.append(ticker.contents[0])\n\n return tickers\n",
"step-ids": [
6,
7,
8,
9,
11
]
}
|
[
6,
7,
8,
9,
11
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
async def query(request):
filters = []
if 'filters' not in request.query:
raise web.HTTPBadRequest(reason='Query parameter `filters` is required'
)
try:
_filters = ujson.loads(request.query.get('filters', '{}'))
for k, v in _filters.items():
filters.extend(parse_sa_filter(TLE, k, v))
except ValueError:
raise web.HTTPBadRequest(reason=
'Query parameter `filters` must contains valid JSON')
_order = request.query.get('order', '{}')
if _order.startswith('{'):
try:
order = ujson.loads(_order)
except ValueError:
raise web.HTTPBadRequest(reason=
'Query parameter `order` must contains valid JSON')
else:
order = _order
order = parse_sa_order(TLE, order)
only = [get_sa_column(TLE, key) for key in request.query.get('only', ''
).split(',') if check_sa_column(TLE, key)]
async with request.app['pg'].acquire() as conn:
rp = await conn.execute(sa.select(only or [TLE]).where(sa.and_(*
filters)).order_by(*order))
return [dict(r) async for r in rp]
async def index(request):
html = """
<html>
<head>
<script src="//ajax.googleapis.com/ajax/libs/jquery/1.9.1/jquery.min.js"></script>
<script>
var source = new WebSocket('ws://' + window.location.host + '/subscribe');
function eventListener(event) {
var message = JSON.parse(event.data);
$('.messages').append([
$('<dt>').text(message.channel),
$('<dd>').text(event.data),
]);
}
source.onmessage = eventListener;
</script>
</head>
<body>
<dl class="messages"></dl>
</body>
</html>
"""
return web.Response(text=html, content_type='text/html')
async def subscribe(request):
ws = web.WebSocketResponse()
await ws.prepare(request)
request.app['channels'].add(ws)
logger.debug('Someone joined.')
try:
while True:
msg = await ws.receive_json()
if msg.get('command') == 'close':
await ws.close()
except Exception as exc:
logger.exception(exc)
finally:
request.app['channels'].remove(ws)
if ws.closed:
request.app['channels'].remove(ws)
logger.debug('websocket connection closed')
return ws
<|reserved_special_token_1|>
import sqlalchemy as sa
import ujson
from aiohttp import web, WSMsgType
from .db import TLE
from .log import logger
from .utils import parse_sa_filter, parse_sa_order, check_sa_column, get_sa_column
async def query(request):
filters = []
if 'filters' not in request.query:
raise web.HTTPBadRequest(reason='Query parameter `filters` is required'
)
try:
_filters = ujson.loads(request.query.get('filters', '{}'))
for k, v in _filters.items():
filters.extend(parse_sa_filter(TLE, k, v))
except ValueError:
raise web.HTTPBadRequest(reason=
'Query parameter `filters` must contains valid JSON')
_order = request.query.get('order', '{}')
if _order.startswith('{'):
try:
order = ujson.loads(_order)
except ValueError:
raise web.HTTPBadRequest(reason=
'Query parameter `order` must contains valid JSON')
else:
order = _order
order = parse_sa_order(TLE, order)
only = [get_sa_column(TLE, key) for key in request.query.get('only', ''
).split(',') if check_sa_column(TLE, key)]
async with request.app['pg'].acquire() as conn:
rp = await conn.execute(sa.select(only or [TLE]).where(sa.and_(*
filters)).order_by(*order))
return [dict(r) async for r in rp]
async def index(request):
html = """
<html>
<head>
<script src="//ajax.googleapis.com/ajax/libs/jquery/1.9.1/jquery.min.js"></script>
<script>
var source = new WebSocket('ws://' + window.location.host + '/subscribe');
function eventListener(event) {
var message = JSON.parse(event.data);
$('.messages').append([
$('<dt>').text(message.channel),
$('<dd>').text(event.data),
]);
}
source.onmessage = eventListener;
</script>
</head>
<body>
<dl class="messages"></dl>
</body>
</html>
"""
return web.Response(text=html, content_type='text/html')
async def subscribe(request):
ws = web.WebSocketResponse()
await ws.prepare(request)
request.app['channels'].add(ws)
logger.debug('Someone joined.')
try:
while True:
msg = await ws.receive_json()
if msg.get('command') == 'close':
await ws.close()
except Exception as exc:
logger.exception(exc)
finally:
request.app['channels'].remove(ws)
if ws.closed:
request.app['channels'].remove(ws)
logger.debug('websocket connection closed')
return ws
<|reserved_special_token_1|>
# -*- coding: utf-8 -*-
import sqlalchemy as sa
import ujson
from aiohttp import web, WSMsgType
from .db import TLE
from .log import logger
from .utils import parse_sa_filter, parse_sa_order, check_sa_column, get_sa_column
async def query(request):
filters = []
if 'filters' not in request.query:
raise web.HTTPBadRequest(reason='Query parameter `filters` is required')
try:
_filters = ujson.loads(request.query.get('filters', '{}'))
for k, v in _filters.items():
filters.extend(parse_sa_filter(TLE, k, v))
except ValueError:
raise web.HTTPBadRequest(reason='Query parameter `filters` must contains valid JSON')
_order = request.query.get('order', '{}')
if _order.startswith('{'):
try:
order = ujson.loads(_order)
except ValueError:
raise web.HTTPBadRequest(reason='Query parameter `order` must contains valid JSON')
else:
order = _order
order = parse_sa_order(TLE, order)
only = [get_sa_column(TLE, key) for key in request.query.get('only', '').split(',') if check_sa_column(TLE, key)]
async with request.app['pg'].acquire() as conn:
rp = await conn.execute(sa.select(only or [TLE]).where(sa.and_(*filters)).order_by(*order))
return [dict(r) async for r in rp]
async def index(request):
html = '''
<html>
<head>
<script src="//ajax.googleapis.com/ajax/libs/jquery/1.9.1/jquery.min.js"></script>
<script>
var source = new WebSocket('ws://' + window.location.host + '/subscribe');
function eventListener(event) {
var message = JSON.parse(event.data);
$('.messages').append([
$('<dt>').text(message.channel),
$('<dd>').text(event.data),
]);
}
source.onmessage = eventListener;
</script>
</head>
<body>
<dl class="messages"></dl>
</body>
</html>
'''
return web.Response(text=html, content_type='text/html')
async def subscribe(request):
ws = web.WebSocketResponse()
await ws.prepare(request)
request.app['channels'].add(ws)
logger.debug('Someone joined.')
try:
while True:
msg = await ws.receive_json()
if msg.get('command') == 'close':
await ws.close()
except Exception as exc:
logger.exception(exc)
finally:
request.app['channels'].remove(ws)
if ws.closed:
request.app['channels'].remove(ws)
logger.debug('websocket connection closed')
return ws
|
flexible
|
{
"blob_id": "c414e5d3934f741540fb5721a529b48f95e17016",
"index": 5982,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nasync def query(request):\n filters = []\n if 'filters' not in request.query:\n raise web.HTTPBadRequest(reason='Query parameter `filters` is required'\n )\n try:\n _filters = ujson.loads(request.query.get('filters', '{}'))\n for k, v in _filters.items():\n filters.extend(parse_sa_filter(TLE, k, v))\n except ValueError:\n raise web.HTTPBadRequest(reason=\n 'Query parameter `filters` must contains valid JSON')\n _order = request.query.get('order', '{}')\n if _order.startswith('{'):\n try:\n order = ujson.loads(_order)\n except ValueError:\n raise web.HTTPBadRequest(reason=\n 'Query parameter `order` must contains valid JSON')\n else:\n order = _order\n order = parse_sa_order(TLE, order)\n only = [get_sa_column(TLE, key) for key in request.query.get('only', ''\n ).split(',') if check_sa_column(TLE, key)]\n async with request.app['pg'].acquire() as conn:\n rp = await conn.execute(sa.select(only or [TLE]).where(sa.and_(*\n filters)).order_by(*order))\n return [dict(r) async for r in rp]\n\n\nasync def index(request):\n html = \"\"\"\n <html>\n <head>\n <script src=\"//ajax.googleapis.com/ajax/libs/jquery/1.9.1/jquery.min.js\"></script>\n <script>\n var source = new WebSocket('ws://' + window.location.host + '/subscribe');\n function eventListener(event) {\n var message = JSON.parse(event.data);\n $('.messages').append([\n $('<dt>').text(message.channel),\n $('<dd>').text(event.data),\n ]);\n }\n source.onmessage = eventListener;\n </script>\n </head>\n <body>\n <dl class=\"messages\"></dl>\n </body>\n </html>\n \"\"\"\n return web.Response(text=html, content_type='text/html')\n\n\nasync def subscribe(request):\n ws = web.WebSocketResponse()\n await ws.prepare(request)\n request.app['channels'].add(ws)\n logger.debug('Someone joined.')\n try:\n while True:\n msg = await ws.receive_json()\n if msg.get('command') == 'close':\n await ws.close()\n except Exception as exc:\n logger.exception(exc)\n finally:\n request.app['channels'].remove(ws)\n if ws.closed:\n request.app['channels'].remove(ws)\n logger.debug('websocket connection closed')\n return ws\n",
"step-3": "import sqlalchemy as sa\nimport ujson\nfrom aiohttp import web, WSMsgType\nfrom .db import TLE\nfrom .log import logger\nfrom .utils import parse_sa_filter, parse_sa_order, check_sa_column, get_sa_column\n\n\nasync def query(request):\n filters = []\n if 'filters' not in request.query:\n raise web.HTTPBadRequest(reason='Query parameter `filters` is required'\n )\n try:\n _filters = ujson.loads(request.query.get('filters', '{}'))\n for k, v in _filters.items():\n filters.extend(parse_sa_filter(TLE, k, v))\n except ValueError:\n raise web.HTTPBadRequest(reason=\n 'Query parameter `filters` must contains valid JSON')\n _order = request.query.get('order', '{}')\n if _order.startswith('{'):\n try:\n order = ujson.loads(_order)\n except ValueError:\n raise web.HTTPBadRequest(reason=\n 'Query parameter `order` must contains valid JSON')\n else:\n order = _order\n order = parse_sa_order(TLE, order)\n only = [get_sa_column(TLE, key) for key in request.query.get('only', ''\n ).split(',') if check_sa_column(TLE, key)]\n async with request.app['pg'].acquire() as conn:\n rp = await conn.execute(sa.select(only or [TLE]).where(sa.and_(*\n filters)).order_by(*order))\n return [dict(r) async for r in rp]\n\n\nasync def index(request):\n html = \"\"\"\n <html>\n <head>\n <script src=\"//ajax.googleapis.com/ajax/libs/jquery/1.9.1/jquery.min.js\"></script>\n <script>\n var source = new WebSocket('ws://' + window.location.host + '/subscribe');\n function eventListener(event) {\n var message = JSON.parse(event.data);\n $('.messages').append([\n $('<dt>').text(message.channel),\n $('<dd>').text(event.data),\n ]);\n }\n source.onmessage = eventListener;\n </script>\n </head>\n <body>\n <dl class=\"messages\"></dl>\n </body>\n </html>\n \"\"\"\n return web.Response(text=html, content_type='text/html')\n\n\nasync def subscribe(request):\n ws = web.WebSocketResponse()\n await ws.prepare(request)\n request.app['channels'].add(ws)\n logger.debug('Someone joined.')\n try:\n while True:\n msg = await ws.receive_json()\n if msg.get('command') == 'close':\n await ws.close()\n except Exception as exc:\n logger.exception(exc)\n finally:\n request.app['channels'].remove(ws)\n if ws.closed:\n request.app['channels'].remove(ws)\n logger.debug('websocket connection closed')\n return ws\n",
"step-4": "# -*- coding: utf-8 -*-\n\nimport sqlalchemy as sa\nimport ujson\nfrom aiohttp import web, WSMsgType\n\nfrom .db import TLE\nfrom .log import logger\nfrom .utils import parse_sa_filter, parse_sa_order, check_sa_column, get_sa_column\n\n\nasync def query(request):\n filters = []\n if 'filters' not in request.query:\n raise web.HTTPBadRequest(reason='Query parameter `filters` is required')\n\n try:\n _filters = ujson.loads(request.query.get('filters', '{}'))\n for k, v in _filters.items():\n filters.extend(parse_sa_filter(TLE, k, v))\n except ValueError:\n raise web.HTTPBadRequest(reason='Query parameter `filters` must contains valid JSON')\n\n _order = request.query.get('order', '{}')\n if _order.startswith('{'):\n try:\n order = ujson.loads(_order)\n except ValueError:\n raise web.HTTPBadRequest(reason='Query parameter `order` must contains valid JSON')\n else:\n order = _order\n\n order = parse_sa_order(TLE, order)\n only = [get_sa_column(TLE, key) for key in request.query.get('only', '').split(',') if check_sa_column(TLE, key)]\n\n async with request.app['pg'].acquire() as conn:\n rp = await conn.execute(sa.select(only or [TLE]).where(sa.and_(*filters)).order_by(*order))\n return [dict(r) async for r in rp]\n\n\nasync def index(request):\n html = '''\n <html>\n <head>\n <script src=\"//ajax.googleapis.com/ajax/libs/jquery/1.9.1/jquery.min.js\"></script>\n <script>\n var source = new WebSocket('ws://' + window.location.host + '/subscribe');\n function eventListener(event) {\n var message = JSON.parse(event.data);\n $('.messages').append([\n $('<dt>').text(message.channel),\n $('<dd>').text(event.data),\n ]);\n }\n source.onmessage = eventListener;\n </script>\n </head>\n <body>\n <dl class=\"messages\"></dl>\n </body>\n </html>\n '''\n return web.Response(text=html, content_type='text/html')\n\n\nasync def subscribe(request):\n ws = web.WebSocketResponse()\n await ws.prepare(request)\n request.app['channels'].add(ws)\n logger.debug('Someone joined.')\n try:\n while True:\n msg = await ws.receive_json()\n if msg.get('command') == 'close':\n await ws.close()\n except Exception as exc:\n logger.exception(exc)\n finally:\n request.app['channels'].remove(ws)\n\n if ws.closed:\n request.app['channels'].remove(ws)\n\n logger.debug('websocket connection closed')\n return ws\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
class StackingModelTester:
def __init__(self, **kwargs):
self.__use_bias = kwargs['use_bias']
self.__use_peepholes = kwargs['use_peepholes']
self.__input_size = kwargs['input_size']
self.__output_size = kwargs['output_size']
self.__binary_train_file_path = kwargs['binary_train_file_path']
self.__binary_test_file_path = kwargs['binary_test_file_path']
self.__seed = kwargs['seed']
self.__cell_type = kwargs['cell_type']
<|reserved_special_token_0|>
def __l2_loss(selfself, z, t):
loss = tf.losses.mean_squared_error(labels=t, predictions=z)
return loss
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class StackingModelTester:
def __init__(self, **kwargs):
self.__use_bias = kwargs['use_bias']
self.__use_peepholes = kwargs['use_peepholes']
self.__input_size = kwargs['input_size']
self.__output_size = kwargs['output_size']
self.__binary_train_file_path = kwargs['binary_train_file_path']
self.__binary_test_file_path = kwargs['binary_test_file_path']
self.__seed = kwargs['seed']
self.__cell_type = kwargs['cell_type']
def __l1_loss(self, z, t):
loss = tf.reduce_mean(tf.abs(t - z))
return loss
def __l2_loss(selfself, z, t):
loss = tf.losses.mean_squared_error(labels=t, predictions=z)
return loss
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class StackingModelTester:
def __init__(self, **kwargs):
self.__use_bias = kwargs['use_bias']
self.__use_peepholes = kwargs['use_peepholes']
self.__input_size = kwargs['input_size']
self.__output_size = kwargs['output_size']
self.__binary_train_file_path = kwargs['binary_train_file_path']
self.__binary_test_file_path = kwargs['binary_test_file_path']
self.__seed = kwargs['seed']
self.__cell_type = kwargs['cell_type']
def __l1_loss(self, z, t):
loss = tf.reduce_mean(tf.abs(t - z))
return loss
def __l2_loss(selfself, z, t):
loss = tf.losses.mean_squared_error(labels=t, predictions=z)
return loss
def test_model(self, **kwargs):
num_hidden_layers = kwargs['num_hidden_layers']
cell_dimension = kwargs['cell_dimension']
minibatch_size = kwargs['minibatch_size']
max_epoch_size = kwargs['max_epoch_size']
max_num_epochs = kwargs['max_num_epochs']
l2_regularization = kwargs['l2_regularization']
gaussian_noise_stdev = kwargs['gaussian_noise_stdev']
optimizer_fn = kwargs['optimizer_fn']
random_normal_initializer_stdev = kwargs[
'random_normal_initializer_stdev']
tf.reset_default_graph()
tf.set_random_seed(self.__seed)
input = tf.placeholder(dtype=tf.float32, shape=[None, None, self.
__input_size])
noise = tf.random_normal(shape=tf.shape(input), mean=0.0, stddev=
gaussian_noise_stdev, dtype=tf.float32)
training_input = input + noise
testing_input = input
true_output = tf.placeholder(dtype=tf.float32, shape=[None, None,
self.__output_size])
sequence_lengths = tf.placeholder(dtype=tf.int64, shape=[None])
weight_initializer = tf.truncated_normal_initializer(stddev=
random_normal_initializer_stdev)
def cell():
if self.__cell_type == 'LSTM':
cell = tf.nn.rnn_cell.LSTMCell(num_units=int(cell_dimension
), use_peepholes=self.__use_peepholes, initializer=
weight_initializer)
elif self.__cell_type == 'GRU':
cell = tf.nn.rnn_cell.GRUCell(num_units=int(cell_dimension),
kernel_initializer=weight_initializer)
elif self.__cell_type == 'RNN':
cell = tf.nn.rnn_cell.BasicRNNCell(num_units=int(
cell_dimension))
return cell
multi_layered_cell = tf.nn.rnn_cell.MultiRNNCell(cells=[cell() for
_ in range(int(num_hidden_layers))])
with tf.variable_scope('train_scope') as train_scope:
training_rnn_outputs, training_rnn_states = tf.nn.dynamic_rnn(cell
=multi_layered_cell, inputs=training_input, sequence_length
=sequence_lengths, dtype=tf.float32)
training_prediction_output = tf.layers.dense(inputs=tf.
convert_to_tensor(value=training_rnn_outputs, dtype=tf.
float32), units=self.__output_size, use_bias=self.
__use_bias, kernel_initializer=weight_initializer, name=
'dense_layer')
with tf.variable_scope(train_scope, reuse=tf.AUTO_REUSE
) as inference_scope:
inference_rnn_outputs, inference_rnn_states = tf.nn.dynamic_rnn(
cell=multi_layered_cell, inputs=testing_input,
sequence_length=sequence_lengths, dtype=tf.float32)
inference_prediction_output = tf.layers.dense(inputs=tf.
convert_to_tensor(value=inference_rnn_outputs, dtype=tf.
float32), units=self.__output_size, use_bias=self.
__use_bias, kernel_initializer=weight_initializer, name=
'dense_layer', reuse=True)
error = self.__l1_loss(training_prediction_output, true_output)
l2_loss = 0.0
for var in tf.trainable_variables():
l2_loss += tf.nn.l2_loss(var)
l2_loss = tf.multiply(tf.cast(l2_regularization, dtype=tf.float64),
tf.cast(l2_loss, dtype=tf.float64))
total_loss = tf.cast(error, dtype=tf.float64) + l2_loss
optimizer = optimizer_fn(total_loss)
training_dataset = tf.data.TFRecordDataset(filenames=[self.
__binary_train_file_path], compression_type='ZLIB')
test_dataset = tf.data.TFRecordDataset([self.
__binary_test_file_path], compression_type='ZLIB')
tfrecord_reader = TFRecordReader(self.__input_size, self.__output_size)
shuffle_seed = tf.placeholder(dtype=tf.int64, shape=[])
training_dataset = training_dataset.repeat(count=int(max_epoch_size))
training_dataset = training_dataset.map(tfrecord_reader.
validation_data_parser)
padded_training_data_batches = training_dataset.padded_batch(batch_size
=int(minibatch_size), padded_shapes=([], [tf.Dimension(None),
self.__input_size], [tf.Dimension(None), self.__output_size], [
tf.Dimension(None), self.__output_size + 2]))
training_data_batch_iterator = (padded_training_data_batches.
make_initializable_iterator())
next_training_data_batch = training_data_batch_iterator.get_next()
test_dataset = test_dataset.map(tfrecord_reader.test_data_parser)
padded_test_input_data = test_dataset.padded_batch(batch_size=int(
minibatch_size), padded_shapes=([], [tf.Dimension(None), self.
__input_size], [tf.Dimension(None), self.__output_size + 2]))
test_input_iterator = padded_test_input_data.make_one_shot_iterator()
test_input_data_batch = test_input_iterator.get_next()
init_op = tf.global_variables_initializer()
with tf.Session() as session:
session.run(init_op)
for epoch in range(int(max_num_epochs)):
print('Epoch->', epoch)
session.run(training_data_batch_iterator.initializer,
feed_dict={shuffle_seed: epoch})
while True:
try:
training_data_batch_value = session.run(
next_training_data_batch, feed_dict={
shuffle_seed: epoch})
session.run(optimizer, feed_dict={input:
training_data_batch_value[1], true_output:
training_data_batch_value[2], sequence_lengths:
training_data_batch_value[0]})
except tf.errors.OutOfRangeError:
break
list_of_forecasts = []
while True:
try:
test_input_batch_value = session.run(test_input_data_batch)
test_output = session.run(inference_prediction_output,
feed_dict={input: test_input_batch_value[1],
sequence_lengths: test_input_batch_value[0]})
last_output_index = test_input_batch_value[0] - 1
array_first_dimension = np.array(range(0,
test_input_batch_value[0].shape[0]))
forecasts = test_output[array_first_dimension,
last_output_index]
list_of_forecasts.extend(forecasts.tolist())
except tf.errors.OutOfRangeError:
break
session.close()
return list_of_forecasts
<|reserved_special_token_1|>
import numpy as np
import tensorflow as tf
from tfrecords_handler.moving_window.tfrecord_mean_reader import TFRecordReader
from configs.global_configs import training_data_configs
class StackingModelTester:
def __init__(self, **kwargs):
self.__use_bias = kwargs['use_bias']
self.__use_peepholes = kwargs['use_peepholes']
self.__input_size = kwargs['input_size']
self.__output_size = kwargs['output_size']
self.__binary_train_file_path = kwargs['binary_train_file_path']
self.__binary_test_file_path = kwargs['binary_test_file_path']
self.__seed = kwargs['seed']
self.__cell_type = kwargs['cell_type']
def __l1_loss(self, z, t):
loss = tf.reduce_mean(tf.abs(t - z))
return loss
def __l2_loss(selfself, z, t):
loss = tf.losses.mean_squared_error(labels=t, predictions=z)
return loss
def test_model(self, **kwargs):
num_hidden_layers = kwargs['num_hidden_layers']
cell_dimension = kwargs['cell_dimension']
minibatch_size = kwargs['minibatch_size']
max_epoch_size = kwargs['max_epoch_size']
max_num_epochs = kwargs['max_num_epochs']
l2_regularization = kwargs['l2_regularization']
gaussian_noise_stdev = kwargs['gaussian_noise_stdev']
optimizer_fn = kwargs['optimizer_fn']
random_normal_initializer_stdev = kwargs[
'random_normal_initializer_stdev']
tf.reset_default_graph()
tf.set_random_seed(self.__seed)
input = tf.placeholder(dtype=tf.float32, shape=[None, None, self.
__input_size])
noise = tf.random_normal(shape=tf.shape(input), mean=0.0, stddev=
gaussian_noise_stdev, dtype=tf.float32)
training_input = input + noise
testing_input = input
true_output = tf.placeholder(dtype=tf.float32, shape=[None, None,
self.__output_size])
sequence_lengths = tf.placeholder(dtype=tf.int64, shape=[None])
weight_initializer = tf.truncated_normal_initializer(stddev=
random_normal_initializer_stdev)
def cell():
if self.__cell_type == 'LSTM':
cell = tf.nn.rnn_cell.LSTMCell(num_units=int(cell_dimension
), use_peepholes=self.__use_peepholes, initializer=
weight_initializer)
elif self.__cell_type == 'GRU':
cell = tf.nn.rnn_cell.GRUCell(num_units=int(cell_dimension),
kernel_initializer=weight_initializer)
elif self.__cell_type == 'RNN':
cell = tf.nn.rnn_cell.BasicRNNCell(num_units=int(
cell_dimension))
return cell
multi_layered_cell = tf.nn.rnn_cell.MultiRNNCell(cells=[cell() for
_ in range(int(num_hidden_layers))])
with tf.variable_scope('train_scope') as train_scope:
training_rnn_outputs, training_rnn_states = tf.nn.dynamic_rnn(cell
=multi_layered_cell, inputs=training_input, sequence_length
=sequence_lengths, dtype=tf.float32)
training_prediction_output = tf.layers.dense(inputs=tf.
convert_to_tensor(value=training_rnn_outputs, dtype=tf.
float32), units=self.__output_size, use_bias=self.
__use_bias, kernel_initializer=weight_initializer, name=
'dense_layer')
with tf.variable_scope(train_scope, reuse=tf.AUTO_REUSE
) as inference_scope:
inference_rnn_outputs, inference_rnn_states = tf.nn.dynamic_rnn(
cell=multi_layered_cell, inputs=testing_input,
sequence_length=sequence_lengths, dtype=tf.float32)
inference_prediction_output = tf.layers.dense(inputs=tf.
convert_to_tensor(value=inference_rnn_outputs, dtype=tf.
float32), units=self.__output_size, use_bias=self.
__use_bias, kernel_initializer=weight_initializer, name=
'dense_layer', reuse=True)
error = self.__l1_loss(training_prediction_output, true_output)
l2_loss = 0.0
for var in tf.trainable_variables():
l2_loss += tf.nn.l2_loss(var)
l2_loss = tf.multiply(tf.cast(l2_regularization, dtype=tf.float64),
tf.cast(l2_loss, dtype=tf.float64))
total_loss = tf.cast(error, dtype=tf.float64) + l2_loss
optimizer = optimizer_fn(total_loss)
training_dataset = tf.data.TFRecordDataset(filenames=[self.
__binary_train_file_path], compression_type='ZLIB')
test_dataset = tf.data.TFRecordDataset([self.
__binary_test_file_path], compression_type='ZLIB')
tfrecord_reader = TFRecordReader(self.__input_size, self.__output_size)
shuffle_seed = tf.placeholder(dtype=tf.int64, shape=[])
training_dataset = training_dataset.repeat(count=int(max_epoch_size))
training_dataset = training_dataset.map(tfrecord_reader.
validation_data_parser)
padded_training_data_batches = training_dataset.padded_batch(batch_size
=int(minibatch_size), padded_shapes=([], [tf.Dimension(None),
self.__input_size], [tf.Dimension(None), self.__output_size], [
tf.Dimension(None), self.__output_size + 2]))
training_data_batch_iterator = (padded_training_data_batches.
make_initializable_iterator())
next_training_data_batch = training_data_batch_iterator.get_next()
test_dataset = test_dataset.map(tfrecord_reader.test_data_parser)
padded_test_input_data = test_dataset.padded_batch(batch_size=int(
minibatch_size), padded_shapes=([], [tf.Dimension(None), self.
__input_size], [tf.Dimension(None), self.__output_size + 2]))
test_input_iterator = padded_test_input_data.make_one_shot_iterator()
test_input_data_batch = test_input_iterator.get_next()
init_op = tf.global_variables_initializer()
with tf.Session() as session:
session.run(init_op)
for epoch in range(int(max_num_epochs)):
print('Epoch->', epoch)
session.run(training_data_batch_iterator.initializer,
feed_dict={shuffle_seed: epoch})
while True:
try:
training_data_batch_value = session.run(
next_training_data_batch, feed_dict={
shuffle_seed: epoch})
session.run(optimizer, feed_dict={input:
training_data_batch_value[1], true_output:
training_data_batch_value[2], sequence_lengths:
training_data_batch_value[0]})
except tf.errors.OutOfRangeError:
break
list_of_forecasts = []
while True:
try:
test_input_batch_value = session.run(test_input_data_batch)
test_output = session.run(inference_prediction_output,
feed_dict={input: test_input_batch_value[1],
sequence_lengths: test_input_batch_value[0]})
last_output_index = test_input_batch_value[0] - 1
array_first_dimension = np.array(range(0,
test_input_batch_value[0].shape[0]))
forecasts = test_output[array_first_dimension,
last_output_index]
list_of_forecasts.extend(forecasts.tolist())
except tf.errors.OutOfRangeError:
break
session.close()
return list_of_forecasts
<|reserved_special_token_1|>
import numpy as np
import tensorflow as tf
from tfrecords_handler.moving_window.tfrecord_mean_reader import TFRecordReader
from configs.global_configs import training_data_configs
class StackingModelTester:
def __init__(self, **kwargs):
self.__use_bias = kwargs["use_bias"]
self.__use_peepholes = kwargs["use_peepholes"]
self.__input_size = kwargs["input_size"]
self.__output_size = kwargs["output_size"]
self.__binary_train_file_path = kwargs["binary_train_file_path"]
self.__binary_test_file_path = kwargs["binary_test_file_path"]
self.__seed = kwargs["seed"]
self.__cell_type = kwargs["cell_type"]
def __l1_loss(self, z, t):
loss = tf.reduce_mean(tf.abs(t - z))
return loss
def __l2_loss(selfself, z, t):
loss = tf.losses.mean_squared_error(labels=t, predictions=z)
return loss
# Training the time series
def test_model(self, **kwargs):
# extract the parameters from the kwargs
num_hidden_layers = kwargs['num_hidden_layers']
cell_dimension = kwargs['cell_dimension']
minibatch_size = kwargs['minibatch_size']
max_epoch_size = kwargs['max_epoch_size']
max_num_epochs = kwargs['max_num_epochs']
l2_regularization = kwargs['l2_regularization']
gaussian_noise_stdev = kwargs['gaussian_noise_stdev']
optimizer_fn = kwargs['optimizer_fn']
random_normal_initializer_stdev = kwargs['random_normal_initializer_stdev']
# reset the tensorflow graph
tf.reset_default_graph()
tf.set_random_seed(self.__seed)
# declare the input and output placeholders
input = tf.placeholder(dtype=tf.float32, shape=[None, None, self.__input_size])
noise = tf.random_normal(shape=tf.shape(input), mean=0.0, stddev=gaussian_noise_stdev, dtype=tf.float32)
training_input = input + noise
testing_input = input
# output format [batch_size, sequence_length, dimension]
true_output = tf.placeholder(dtype=tf.float32, shape=[None, None, self.__output_size])
sequence_lengths = tf.placeholder(dtype=tf.int64, shape=[None])
weight_initializer = tf.truncated_normal_initializer(stddev=random_normal_initializer_stdev)
# RNN with the layer of cells
def cell():
if self.__cell_type == "LSTM":
cell = tf.nn.rnn_cell.LSTMCell(num_units=int(cell_dimension), use_peepholes=self.__use_peepholes,
initializer=weight_initializer)
elif self.__cell_type == "GRU":
cell = tf.nn.rnn_cell.GRUCell(num_units=int(cell_dimension), kernel_initializer=weight_initializer)
elif self.__cell_type == "RNN":
cell = tf.nn.rnn_cell.BasicRNNCell(num_units=int(cell_dimension))
return cell
multi_layered_cell = tf.nn.rnn_cell.MultiRNNCell(cells=[cell() for _ in range(int(num_hidden_layers))])
with tf.variable_scope('train_scope') as train_scope:
training_rnn_outputs, training_rnn_states = tf.nn.dynamic_rnn(cell=multi_layered_cell,
inputs=training_input,
sequence_length=sequence_lengths,
dtype=tf.float32)
# connect the dense layer to the RNN
training_prediction_output = tf.layers.dense(
inputs=tf.convert_to_tensor(value=training_rnn_outputs, dtype=tf.float32),
units=self.__output_size,
use_bias=self.__use_bias, kernel_initializer=weight_initializer, name='dense_layer')
with tf.variable_scope(train_scope, reuse=tf.AUTO_REUSE) as inference_scope:
inference_rnn_outputs, inference_rnn_states = tf.nn.dynamic_rnn(cell=multi_layered_cell,
inputs=testing_input,
sequence_length=sequence_lengths,
dtype=tf.float32)
# connect the dense layer to the RNN
inference_prediction_output = tf.layers.dense(
inputs=tf.convert_to_tensor(value=inference_rnn_outputs, dtype=tf.float32),
units=self.__output_size,
use_bias=self.__use_bias, kernel_initializer=weight_initializer, name='dense_layer', reuse=True)
# error that should be minimized in the training process
error = self.__l1_loss(training_prediction_output, true_output)
# l2 regularization of the trainable model parameters
l2_loss = 0.0
for var in tf.trainable_variables():
l2_loss += tf.nn.l2_loss(var)
l2_loss = tf.multiply(tf.cast(l2_regularization, dtype=tf.float64), tf.cast(l2_loss, dtype=tf.float64))
total_loss = tf.cast(error, dtype=tf.float64) + l2_loss
# create the adagrad optimizer
optimizer = optimizer_fn(total_loss)
# create the Dataset objects for the training and test data
training_dataset = tf.data.TFRecordDataset(filenames=[self.__binary_train_file_path], compression_type="ZLIB")
test_dataset = tf.data.TFRecordDataset([self.__binary_test_file_path], compression_type="ZLIB")
# parse the records
tfrecord_reader = TFRecordReader(self.__input_size, self.__output_size)
# prepare the training data into batches
# randomly shuffle the time series within the dataset
shuffle_seed = tf.placeholder(dtype=tf.int64, shape=[])
# training_dataset = training_dataset.apply(
# tf.data.experimental.shuffle_and_repeat(buffer_size=training_data_configs.SHUFFLE_BUFFER_SIZE,
# count=int(max_epoch_size), seed=shuffle_seed))
training_dataset = training_dataset.repeat(count=int(max_epoch_size))
training_dataset = training_dataset.map(tfrecord_reader.validation_data_parser)
# create the batches by padding the datasets to make the variable sequence lengths fixed within the individual batches
padded_training_data_batches = training_dataset.padded_batch(batch_size=int(minibatch_size),
padded_shapes=(
[], [tf.Dimension(None), self.__input_size],
[tf.Dimension(None), self.__output_size],
[tf.Dimension(None), self.__output_size + 2]))
# get an iterator to the batches
training_data_batch_iterator = padded_training_data_batches.make_initializable_iterator()
# access each batch using the iterator
next_training_data_batch = training_data_batch_iterator.get_next()
# preparing the test data
test_dataset = test_dataset.map(tfrecord_reader.test_data_parser)
# create a single batch from all the test time series by padding the datasets to make the variable sequence lengths fixed
padded_test_input_data = test_dataset.padded_batch(batch_size=int(minibatch_size),
padded_shapes=([], [tf.Dimension(None), self.__input_size],
[tf.Dimension(None), self.__output_size + 2]))
# get an iterator to the test input data batch
test_input_iterator = padded_test_input_data.make_one_shot_iterator()
# access the test input batch using the iterator
test_input_data_batch = test_input_iterator.get_next()
# setup variable initialization
init_op = tf.global_variables_initializer()
with tf.Session() as session:
session.run(init_op)
for epoch in range(int(max_num_epochs)):
print("Epoch->", epoch)
session.run(training_data_batch_iterator.initializer, feed_dict={shuffle_seed: epoch})
while True:
try:
training_data_batch_value = session.run(next_training_data_batch,
feed_dict={shuffle_seed: epoch})
session.run(optimizer,
feed_dict={input: training_data_batch_value[1],
true_output: training_data_batch_value[2],
sequence_lengths: training_data_batch_value[0]})
except tf.errors.OutOfRangeError:
break
# applying the model to the test data
list_of_forecasts = []
while True:
try:
# get the batch of test inputs
test_input_batch_value = session.run(test_input_data_batch)
# get the output of the network for the test input data batch
test_output = session.run(inference_prediction_output,
feed_dict={input: test_input_batch_value[1],
sequence_lengths: test_input_batch_value[0]})
last_output_index = test_input_batch_value[0] - 1
array_first_dimension = np.array(range(0, test_input_batch_value[0].shape[0]))
forecasts = test_output[array_first_dimension, last_output_index]
list_of_forecasts.extend(forecasts.tolist())
except tf.errors.OutOfRangeError:
break
session.close()
return list_of_forecasts
|
flexible
|
{
"blob_id": "3b7839347f24d39904d29d40e688a5dfd63534d7",
"index": 3560,
"step-1": "<mask token>\n\n\nclass StackingModelTester:\n\n def __init__(self, **kwargs):\n self.__use_bias = kwargs['use_bias']\n self.__use_peepholes = kwargs['use_peepholes']\n self.__input_size = kwargs['input_size']\n self.__output_size = kwargs['output_size']\n self.__binary_train_file_path = kwargs['binary_train_file_path']\n self.__binary_test_file_path = kwargs['binary_test_file_path']\n self.__seed = kwargs['seed']\n self.__cell_type = kwargs['cell_type']\n <mask token>\n\n def __l2_loss(selfself, z, t):\n loss = tf.losses.mean_squared_error(labels=t, predictions=z)\n return loss\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass StackingModelTester:\n\n def __init__(self, **kwargs):\n self.__use_bias = kwargs['use_bias']\n self.__use_peepholes = kwargs['use_peepholes']\n self.__input_size = kwargs['input_size']\n self.__output_size = kwargs['output_size']\n self.__binary_train_file_path = kwargs['binary_train_file_path']\n self.__binary_test_file_path = kwargs['binary_test_file_path']\n self.__seed = kwargs['seed']\n self.__cell_type = kwargs['cell_type']\n\n def __l1_loss(self, z, t):\n loss = tf.reduce_mean(tf.abs(t - z))\n return loss\n\n def __l2_loss(selfself, z, t):\n loss = tf.losses.mean_squared_error(labels=t, predictions=z)\n return loss\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass StackingModelTester:\n\n def __init__(self, **kwargs):\n self.__use_bias = kwargs['use_bias']\n self.__use_peepholes = kwargs['use_peepholes']\n self.__input_size = kwargs['input_size']\n self.__output_size = kwargs['output_size']\n self.__binary_train_file_path = kwargs['binary_train_file_path']\n self.__binary_test_file_path = kwargs['binary_test_file_path']\n self.__seed = kwargs['seed']\n self.__cell_type = kwargs['cell_type']\n\n def __l1_loss(self, z, t):\n loss = tf.reduce_mean(tf.abs(t - z))\n return loss\n\n def __l2_loss(selfself, z, t):\n loss = tf.losses.mean_squared_error(labels=t, predictions=z)\n return loss\n\n def test_model(self, **kwargs):\n num_hidden_layers = kwargs['num_hidden_layers']\n cell_dimension = kwargs['cell_dimension']\n minibatch_size = kwargs['minibatch_size']\n max_epoch_size = kwargs['max_epoch_size']\n max_num_epochs = kwargs['max_num_epochs']\n l2_regularization = kwargs['l2_regularization']\n gaussian_noise_stdev = kwargs['gaussian_noise_stdev']\n optimizer_fn = kwargs['optimizer_fn']\n random_normal_initializer_stdev = kwargs[\n 'random_normal_initializer_stdev']\n tf.reset_default_graph()\n tf.set_random_seed(self.__seed)\n input = tf.placeholder(dtype=tf.float32, shape=[None, None, self.\n __input_size])\n noise = tf.random_normal(shape=tf.shape(input), mean=0.0, stddev=\n gaussian_noise_stdev, dtype=tf.float32)\n training_input = input + noise\n testing_input = input\n true_output = tf.placeholder(dtype=tf.float32, shape=[None, None,\n self.__output_size])\n sequence_lengths = tf.placeholder(dtype=tf.int64, shape=[None])\n weight_initializer = tf.truncated_normal_initializer(stddev=\n random_normal_initializer_stdev)\n\n def cell():\n if self.__cell_type == 'LSTM':\n cell = tf.nn.rnn_cell.LSTMCell(num_units=int(cell_dimension\n ), use_peepholes=self.__use_peepholes, initializer=\n weight_initializer)\n elif self.__cell_type == 'GRU':\n cell = tf.nn.rnn_cell.GRUCell(num_units=int(cell_dimension),\n kernel_initializer=weight_initializer)\n elif self.__cell_type == 'RNN':\n cell = tf.nn.rnn_cell.BasicRNNCell(num_units=int(\n cell_dimension))\n return cell\n multi_layered_cell = tf.nn.rnn_cell.MultiRNNCell(cells=[cell() for\n _ in range(int(num_hidden_layers))])\n with tf.variable_scope('train_scope') as train_scope:\n training_rnn_outputs, training_rnn_states = tf.nn.dynamic_rnn(cell\n =multi_layered_cell, inputs=training_input, sequence_length\n =sequence_lengths, dtype=tf.float32)\n training_prediction_output = tf.layers.dense(inputs=tf.\n convert_to_tensor(value=training_rnn_outputs, dtype=tf.\n float32), units=self.__output_size, use_bias=self.\n __use_bias, kernel_initializer=weight_initializer, name=\n 'dense_layer')\n with tf.variable_scope(train_scope, reuse=tf.AUTO_REUSE\n ) as inference_scope:\n inference_rnn_outputs, inference_rnn_states = tf.nn.dynamic_rnn(\n cell=multi_layered_cell, inputs=testing_input,\n sequence_length=sequence_lengths, dtype=tf.float32)\n inference_prediction_output = tf.layers.dense(inputs=tf.\n convert_to_tensor(value=inference_rnn_outputs, dtype=tf.\n float32), units=self.__output_size, use_bias=self.\n __use_bias, kernel_initializer=weight_initializer, name=\n 'dense_layer', reuse=True)\n error = self.__l1_loss(training_prediction_output, true_output)\n l2_loss = 0.0\n for var in tf.trainable_variables():\n l2_loss += tf.nn.l2_loss(var)\n l2_loss = tf.multiply(tf.cast(l2_regularization, dtype=tf.float64),\n tf.cast(l2_loss, dtype=tf.float64))\n total_loss = tf.cast(error, dtype=tf.float64) + l2_loss\n optimizer = optimizer_fn(total_loss)\n training_dataset = tf.data.TFRecordDataset(filenames=[self.\n __binary_train_file_path], compression_type='ZLIB')\n test_dataset = tf.data.TFRecordDataset([self.\n __binary_test_file_path], compression_type='ZLIB')\n tfrecord_reader = TFRecordReader(self.__input_size, self.__output_size)\n shuffle_seed = tf.placeholder(dtype=tf.int64, shape=[])\n training_dataset = training_dataset.repeat(count=int(max_epoch_size))\n training_dataset = training_dataset.map(tfrecord_reader.\n validation_data_parser)\n padded_training_data_batches = training_dataset.padded_batch(batch_size\n =int(minibatch_size), padded_shapes=([], [tf.Dimension(None),\n self.__input_size], [tf.Dimension(None), self.__output_size], [\n tf.Dimension(None), self.__output_size + 2]))\n training_data_batch_iterator = (padded_training_data_batches.\n make_initializable_iterator())\n next_training_data_batch = training_data_batch_iterator.get_next()\n test_dataset = test_dataset.map(tfrecord_reader.test_data_parser)\n padded_test_input_data = test_dataset.padded_batch(batch_size=int(\n minibatch_size), padded_shapes=([], [tf.Dimension(None), self.\n __input_size], [tf.Dimension(None), self.__output_size + 2]))\n test_input_iterator = padded_test_input_data.make_one_shot_iterator()\n test_input_data_batch = test_input_iterator.get_next()\n init_op = tf.global_variables_initializer()\n with tf.Session() as session:\n session.run(init_op)\n for epoch in range(int(max_num_epochs)):\n print('Epoch->', epoch)\n session.run(training_data_batch_iterator.initializer,\n feed_dict={shuffle_seed: epoch})\n while True:\n try:\n training_data_batch_value = session.run(\n next_training_data_batch, feed_dict={\n shuffle_seed: epoch})\n session.run(optimizer, feed_dict={input:\n training_data_batch_value[1], true_output:\n training_data_batch_value[2], sequence_lengths:\n training_data_batch_value[0]})\n except tf.errors.OutOfRangeError:\n break\n list_of_forecasts = []\n while True:\n try:\n test_input_batch_value = session.run(test_input_data_batch)\n test_output = session.run(inference_prediction_output,\n feed_dict={input: test_input_batch_value[1],\n sequence_lengths: test_input_batch_value[0]})\n last_output_index = test_input_batch_value[0] - 1\n array_first_dimension = np.array(range(0,\n test_input_batch_value[0].shape[0]))\n forecasts = test_output[array_first_dimension,\n last_output_index]\n list_of_forecasts.extend(forecasts.tolist())\n except tf.errors.OutOfRangeError:\n break\n session.close()\n return list_of_forecasts\n",
"step-4": "import numpy as np\nimport tensorflow as tf\nfrom tfrecords_handler.moving_window.tfrecord_mean_reader import TFRecordReader\nfrom configs.global_configs import training_data_configs\n\n\nclass StackingModelTester:\n\n def __init__(self, **kwargs):\n self.__use_bias = kwargs['use_bias']\n self.__use_peepholes = kwargs['use_peepholes']\n self.__input_size = kwargs['input_size']\n self.__output_size = kwargs['output_size']\n self.__binary_train_file_path = kwargs['binary_train_file_path']\n self.__binary_test_file_path = kwargs['binary_test_file_path']\n self.__seed = kwargs['seed']\n self.__cell_type = kwargs['cell_type']\n\n def __l1_loss(self, z, t):\n loss = tf.reduce_mean(tf.abs(t - z))\n return loss\n\n def __l2_loss(selfself, z, t):\n loss = tf.losses.mean_squared_error(labels=t, predictions=z)\n return loss\n\n def test_model(self, **kwargs):\n num_hidden_layers = kwargs['num_hidden_layers']\n cell_dimension = kwargs['cell_dimension']\n minibatch_size = kwargs['minibatch_size']\n max_epoch_size = kwargs['max_epoch_size']\n max_num_epochs = kwargs['max_num_epochs']\n l2_regularization = kwargs['l2_regularization']\n gaussian_noise_stdev = kwargs['gaussian_noise_stdev']\n optimizer_fn = kwargs['optimizer_fn']\n random_normal_initializer_stdev = kwargs[\n 'random_normal_initializer_stdev']\n tf.reset_default_graph()\n tf.set_random_seed(self.__seed)\n input = tf.placeholder(dtype=tf.float32, shape=[None, None, self.\n __input_size])\n noise = tf.random_normal(shape=tf.shape(input), mean=0.0, stddev=\n gaussian_noise_stdev, dtype=tf.float32)\n training_input = input + noise\n testing_input = input\n true_output = tf.placeholder(dtype=tf.float32, shape=[None, None,\n self.__output_size])\n sequence_lengths = tf.placeholder(dtype=tf.int64, shape=[None])\n weight_initializer = tf.truncated_normal_initializer(stddev=\n random_normal_initializer_stdev)\n\n def cell():\n if self.__cell_type == 'LSTM':\n cell = tf.nn.rnn_cell.LSTMCell(num_units=int(cell_dimension\n ), use_peepholes=self.__use_peepholes, initializer=\n weight_initializer)\n elif self.__cell_type == 'GRU':\n cell = tf.nn.rnn_cell.GRUCell(num_units=int(cell_dimension),\n kernel_initializer=weight_initializer)\n elif self.__cell_type == 'RNN':\n cell = tf.nn.rnn_cell.BasicRNNCell(num_units=int(\n cell_dimension))\n return cell\n multi_layered_cell = tf.nn.rnn_cell.MultiRNNCell(cells=[cell() for\n _ in range(int(num_hidden_layers))])\n with tf.variable_scope('train_scope') as train_scope:\n training_rnn_outputs, training_rnn_states = tf.nn.dynamic_rnn(cell\n =multi_layered_cell, inputs=training_input, sequence_length\n =sequence_lengths, dtype=tf.float32)\n training_prediction_output = tf.layers.dense(inputs=tf.\n convert_to_tensor(value=training_rnn_outputs, dtype=tf.\n float32), units=self.__output_size, use_bias=self.\n __use_bias, kernel_initializer=weight_initializer, name=\n 'dense_layer')\n with tf.variable_scope(train_scope, reuse=tf.AUTO_REUSE\n ) as inference_scope:\n inference_rnn_outputs, inference_rnn_states = tf.nn.dynamic_rnn(\n cell=multi_layered_cell, inputs=testing_input,\n sequence_length=sequence_lengths, dtype=tf.float32)\n inference_prediction_output = tf.layers.dense(inputs=tf.\n convert_to_tensor(value=inference_rnn_outputs, dtype=tf.\n float32), units=self.__output_size, use_bias=self.\n __use_bias, kernel_initializer=weight_initializer, name=\n 'dense_layer', reuse=True)\n error = self.__l1_loss(training_prediction_output, true_output)\n l2_loss = 0.0\n for var in tf.trainable_variables():\n l2_loss += tf.nn.l2_loss(var)\n l2_loss = tf.multiply(tf.cast(l2_regularization, dtype=tf.float64),\n tf.cast(l2_loss, dtype=tf.float64))\n total_loss = tf.cast(error, dtype=tf.float64) + l2_loss\n optimizer = optimizer_fn(total_loss)\n training_dataset = tf.data.TFRecordDataset(filenames=[self.\n __binary_train_file_path], compression_type='ZLIB')\n test_dataset = tf.data.TFRecordDataset([self.\n __binary_test_file_path], compression_type='ZLIB')\n tfrecord_reader = TFRecordReader(self.__input_size, self.__output_size)\n shuffle_seed = tf.placeholder(dtype=tf.int64, shape=[])\n training_dataset = training_dataset.repeat(count=int(max_epoch_size))\n training_dataset = training_dataset.map(tfrecord_reader.\n validation_data_parser)\n padded_training_data_batches = training_dataset.padded_batch(batch_size\n =int(minibatch_size), padded_shapes=([], [tf.Dimension(None),\n self.__input_size], [tf.Dimension(None), self.__output_size], [\n tf.Dimension(None), self.__output_size + 2]))\n training_data_batch_iterator = (padded_training_data_batches.\n make_initializable_iterator())\n next_training_data_batch = training_data_batch_iterator.get_next()\n test_dataset = test_dataset.map(tfrecord_reader.test_data_parser)\n padded_test_input_data = test_dataset.padded_batch(batch_size=int(\n minibatch_size), padded_shapes=([], [tf.Dimension(None), self.\n __input_size], [tf.Dimension(None), self.__output_size + 2]))\n test_input_iterator = padded_test_input_data.make_one_shot_iterator()\n test_input_data_batch = test_input_iterator.get_next()\n init_op = tf.global_variables_initializer()\n with tf.Session() as session:\n session.run(init_op)\n for epoch in range(int(max_num_epochs)):\n print('Epoch->', epoch)\n session.run(training_data_batch_iterator.initializer,\n feed_dict={shuffle_seed: epoch})\n while True:\n try:\n training_data_batch_value = session.run(\n next_training_data_batch, feed_dict={\n shuffle_seed: epoch})\n session.run(optimizer, feed_dict={input:\n training_data_batch_value[1], true_output:\n training_data_batch_value[2], sequence_lengths:\n training_data_batch_value[0]})\n except tf.errors.OutOfRangeError:\n break\n list_of_forecasts = []\n while True:\n try:\n test_input_batch_value = session.run(test_input_data_batch)\n test_output = session.run(inference_prediction_output,\n feed_dict={input: test_input_batch_value[1],\n sequence_lengths: test_input_batch_value[0]})\n last_output_index = test_input_batch_value[0] - 1\n array_first_dimension = np.array(range(0,\n test_input_batch_value[0].shape[0]))\n forecasts = test_output[array_first_dimension,\n last_output_index]\n list_of_forecasts.extend(forecasts.tolist())\n except tf.errors.OutOfRangeError:\n break\n session.close()\n return list_of_forecasts\n",
"step-5": "import numpy as np\nimport tensorflow as tf\nfrom tfrecords_handler.moving_window.tfrecord_mean_reader import TFRecordReader\nfrom configs.global_configs import training_data_configs\n\n\nclass StackingModelTester:\n\n def __init__(self, **kwargs):\n self.__use_bias = kwargs[\"use_bias\"]\n self.__use_peepholes = kwargs[\"use_peepholes\"]\n self.__input_size = kwargs[\"input_size\"]\n self.__output_size = kwargs[\"output_size\"]\n self.__binary_train_file_path = kwargs[\"binary_train_file_path\"]\n self.__binary_test_file_path = kwargs[\"binary_test_file_path\"]\n self.__seed = kwargs[\"seed\"]\n self.__cell_type = kwargs[\"cell_type\"]\n\n def __l1_loss(self, z, t):\n loss = tf.reduce_mean(tf.abs(t - z))\n return loss\n\n def __l2_loss(selfself, z, t):\n loss = tf.losses.mean_squared_error(labels=t, predictions=z)\n return loss\n\n # Training the time series\n def test_model(self, **kwargs):\n\n # extract the parameters from the kwargs\n num_hidden_layers = kwargs['num_hidden_layers']\n cell_dimension = kwargs['cell_dimension']\n minibatch_size = kwargs['minibatch_size']\n max_epoch_size = kwargs['max_epoch_size']\n max_num_epochs = kwargs['max_num_epochs']\n l2_regularization = kwargs['l2_regularization']\n gaussian_noise_stdev = kwargs['gaussian_noise_stdev']\n optimizer_fn = kwargs['optimizer_fn']\n random_normal_initializer_stdev = kwargs['random_normal_initializer_stdev']\n\n # reset the tensorflow graph\n tf.reset_default_graph()\n\n tf.set_random_seed(self.__seed)\n\n # declare the input and output placeholders\n input = tf.placeholder(dtype=tf.float32, shape=[None, None, self.__input_size])\n noise = tf.random_normal(shape=tf.shape(input), mean=0.0, stddev=gaussian_noise_stdev, dtype=tf.float32)\n training_input = input + noise\n\n testing_input = input\n\n # output format [batch_size, sequence_length, dimension]\n true_output = tf.placeholder(dtype=tf.float32, shape=[None, None, self.__output_size])\n sequence_lengths = tf.placeholder(dtype=tf.int64, shape=[None])\n\n weight_initializer = tf.truncated_normal_initializer(stddev=random_normal_initializer_stdev)\n\n # RNN with the layer of cells\n def cell():\n if self.__cell_type == \"LSTM\":\n cell = tf.nn.rnn_cell.LSTMCell(num_units=int(cell_dimension), use_peepholes=self.__use_peepholes,\n initializer=weight_initializer)\n elif self.__cell_type == \"GRU\":\n cell = tf.nn.rnn_cell.GRUCell(num_units=int(cell_dimension), kernel_initializer=weight_initializer)\n elif self.__cell_type == \"RNN\":\n cell = tf.nn.rnn_cell.BasicRNNCell(num_units=int(cell_dimension))\n return cell\n\n multi_layered_cell = tf.nn.rnn_cell.MultiRNNCell(cells=[cell() for _ in range(int(num_hidden_layers))])\n\n with tf.variable_scope('train_scope') as train_scope:\n training_rnn_outputs, training_rnn_states = tf.nn.dynamic_rnn(cell=multi_layered_cell,\n inputs=training_input,\n sequence_length=sequence_lengths,\n dtype=tf.float32)\n\n # connect the dense layer to the RNN\n training_prediction_output = tf.layers.dense(\n inputs=tf.convert_to_tensor(value=training_rnn_outputs, dtype=tf.float32),\n units=self.__output_size,\n use_bias=self.__use_bias, kernel_initializer=weight_initializer, name='dense_layer')\n\n with tf.variable_scope(train_scope, reuse=tf.AUTO_REUSE) as inference_scope:\n inference_rnn_outputs, inference_rnn_states = tf.nn.dynamic_rnn(cell=multi_layered_cell,\n inputs=testing_input,\n sequence_length=sequence_lengths,\n dtype=tf.float32)\n # connect the dense layer to the RNN\n inference_prediction_output = tf.layers.dense(\n inputs=tf.convert_to_tensor(value=inference_rnn_outputs, dtype=tf.float32),\n units=self.__output_size,\n use_bias=self.__use_bias, kernel_initializer=weight_initializer, name='dense_layer', reuse=True)\n\n # error that should be minimized in the training process\n error = self.__l1_loss(training_prediction_output, true_output)\n\n # l2 regularization of the trainable model parameters\n l2_loss = 0.0\n for var in tf.trainable_variables():\n l2_loss += tf.nn.l2_loss(var)\n\n l2_loss = tf.multiply(tf.cast(l2_regularization, dtype=tf.float64), tf.cast(l2_loss, dtype=tf.float64))\n\n total_loss = tf.cast(error, dtype=tf.float64) + l2_loss\n\n # create the adagrad optimizer\n optimizer = optimizer_fn(total_loss)\n\n # create the Dataset objects for the training and test data\n training_dataset = tf.data.TFRecordDataset(filenames=[self.__binary_train_file_path], compression_type=\"ZLIB\")\n test_dataset = tf.data.TFRecordDataset([self.__binary_test_file_path], compression_type=\"ZLIB\")\n\n # parse the records\n tfrecord_reader = TFRecordReader(self.__input_size, self.__output_size)\n\n # prepare the training data into batches\n # randomly shuffle the time series within the dataset\n shuffle_seed = tf.placeholder(dtype=tf.int64, shape=[])\n # training_dataset = training_dataset.apply(\n # tf.data.experimental.shuffle_and_repeat(buffer_size=training_data_configs.SHUFFLE_BUFFER_SIZE,\n # count=int(max_epoch_size), seed=shuffle_seed))\n training_dataset = training_dataset.repeat(count=int(max_epoch_size))\n training_dataset = training_dataset.map(tfrecord_reader.validation_data_parser)\n\n # create the batches by padding the datasets to make the variable sequence lengths fixed within the individual batches\n padded_training_data_batches = training_dataset.padded_batch(batch_size=int(minibatch_size),\n padded_shapes=(\n [], [tf.Dimension(None), self.__input_size],\n [tf.Dimension(None), self.__output_size],\n [tf.Dimension(None), self.__output_size + 2]))\n\n # get an iterator to the batches\n training_data_batch_iterator = padded_training_data_batches.make_initializable_iterator()\n\n # access each batch using the iterator\n next_training_data_batch = training_data_batch_iterator.get_next()\n\n # preparing the test data\n test_dataset = test_dataset.map(tfrecord_reader.test_data_parser)\n\n # create a single batch from all the test time series by padding the datasets to make the variable sequence lengths fixed\n padded_test_input_data = test_dataset.padded_batch(batch_size=int(minibatch_size),\n padded_shapes=([], [tf.Dimension(None), self.__input_size],\n [tf.Dimension(None), self.__output_size + 2]))\n\n # get an iterator to the test input data batch\n test_input_iterator = padded_test_input_data.make_one_shot_iterator()\n\n # access the test input batch using the iterator\n test_input_data_batch = test_input_iterator.get_next()\n\n # setup variable initialization\n init_op = tf.global_variables_initializer()\n\n with tf.Session() as session:\n session.run(init_op)\n\n for epoch in range(int(max_num_epochs)):\n print(\"Epoch->\", epoch)\n session.run(training_data_batch_iterator.initializer, feed_dict={shuffle_seed: epoch})\n while True:\n try:\n training_data_batch_value = session.run(next_training_data_batch,\n feed_dict={shuffle_seed: epoch})\n\n session.run(optimizer,\n feed_dict={input: training_data_batch_value[1],\n true_output: training_data_batch_value[2],\n sequence_lengths: training_data_batch_value[0]})\n\n except tf.errors.OutOfRangeError:\n break\n\n # applying the model to the test data\n\n list_of_forecasts = []\n while True:\n try:\n\n # get the batch of test inputs\n test_input_batch_value = session.run(test_input_data_batch)\n\n # get the output of the network for the test input data batch\n test_output = session.run(inference_prediction_output,\n feed_dict={input: test_input_batch_value[1],\n sequence_lengths: test_input_batch_value[0]})\n\n last_output_index = test_input_batch_value[0] - 1\n array_first_dimension = np.array(range(0, test_input_batch_value[0].shape[0]))\n forecasts = test_output[array_first_dimension, last_output_index]\n list_of_forecasts.extend(forecasts.tolist())\n\n except tf.errors.OutOfRangeError:\n break\n\n session.close()\n return list_of_forecasts\n",
"step-ids": [
3,
4,
5,
6,
7
]
}
|
[
3,
4,
5,
6,
7
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
for line in open('9.in'):
if line:
processing_pattern = False
new_line = ''
for idx, char in enumerate(line):
pattern_found = False
if line[idx] == '(' and line[idx + 1].isnumeric() and line[idx + 2
] == 'x' and line[idx + 3].isnumeric() and line[idx + 4
] == ')':
pattern_found = True
num_chars = int(line[idx + 1])
repeat_times = int(line[idx + 3])
else:
new_line += char
processed_lines.append(new_line)
<|reserved_special_token_1|>
processed_lines = []
for line in open('9.in'):
if line:
processing_pattern = False
new_line = ''
for idx, char in enumerate(line):
pattern_found = False
if line[idx] == '(' and line[idx + 1].isnumeric() and line[idx + 2
] == 'x' and line[idx + 3].isnumeric() and line[idx + 4
] == ')':
pattern_found = True
num_chars = int(line[idx + 1])
repeat_times = int(line[idx + 3])
else:
new_line += char
processed_lines.append(new_line)
|
flexible
|
{
"blob_id": "3605f46da25eb98767ca8d7248beaa07572d3171",
"index": 644,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor line in open('9.in'):\n if line:\n processing_pattern = False\n new_line = ''\n for idx, char in enumerate(line):\n pattern_found = False\n if line[idx] == '(' and line[idx + 1].isnumeric() and line[idx + 2\n ] == 'x' and line[idx + 3].isnumeric() and line[idx + 4\n ] == ')':\n pattern_found = True\n num_chars = int(line[idx + 1])\n repeat_times = int(line[idx + 3])\n else:\n new_line += char\n processed_lines.append(new_line)\n",
"step-3": "processed_lines = []\nfor line in open('9.in'):\n if line:\n processing_pattern = False\n new_line = ''\n for idx, char in enumerate(line):\n pattern_found = False\n if line[idx] == '(' and line[idx + 1].isnumeric() and line[idx + 2\n ] == 'x' and line[idx + 3].isnumeric() and line[idx + 4\n ] == ')':\n pattern_found = True\n num_chars = int(line[idx + 1])\n repeat_times = int(line[idx + 3])\n else:\n new_line += char\n processed_lines.append(new_line)\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
def calcula_norma(x):
lista=[]
for e in x:
lista.append(e**2)
v=(sum(lista)**(1/2))
return v
|
normal
|
{
"blob_id": "7346992d69250240207a0fc981d0adc245e69f87",
"index": 5206,
"step-1": "<mask token>\n",
"step-2": "def calcula_norma(x):\n lista = []\n for e in x:\n lista.append(e ** 2)\n v = sum(lista) ** (1 / 2)\n return v\n",
"step-3": "def calcula_norma(x):\n lista=[]\n for e in x:\n lista.append(e**2)\n v=(sum(lista)**(1/2))\n return v ",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
<|reserved_special_token_0|>
class Post(models.Model):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
class SurveyHistory(models.Model):
post = models.ForeignKey(to=Post, on_delete=models.CASCADE)
record = models.BooleanField()
recorded_date = models.DateTimeField(auto_now_add=timezone.now)
def __str__(self):
return self.post.title
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Post(models.Model):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def get_absolute_url(self):
return reverse('post-detail', kwargs={'pk': self.pk})
class SurveyHistory(models.Model):
post = models.ForeignKey(to=Post, on_delete=models.CASCADE)
record = models.BooleanField()
recorded_date = models.DateTimeField(auto_now_add=timezone.now)
def __str__(self):
return self.post.title
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Post(models.Model):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def __str__(self):
return self.title
def get_absolute_url(self):
return reverse('post-detail', kwargs={'pk': self.pk})
class SurveyHistory(models.Model):
post = models.ForeignKey(to=Post, on_delete=models.CASCADE)
record = models.BooleanField()
recorded_date = models.DateTimeField(auto_now_add=timezone.now)
def __str__(self):
return self.post.title
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Post(models.Model):
title = models.CharField(max_length=100)
content = models.TextField()
date_posted = models.DateTimeField(auto_now_add=timezone.now)
author = models.ForeignKey(to=User, on_delete=models.CASCADE)
def __str__(self):
return self.title
def get_absolute_url(self):
return reverse('post-detail', kwargs={'pk': self.pk})
class SurveyHistory(models.Model):
post = models.ForeignKey(to=Post, on_delete=models.CASCADE)
record = models.BooleanField()
recorded_date = models.DateTimeField(auto_now_add=timezone.now)
def __str__(self):
return self.post.title
<|reserved_special_token_1|>
from django.db import models
from django.utils import timezone
from django.contrib.auth.models import User
from django.urls import reverse
class Post(models.Model):
title = models.CharField(max_length=100)
content = models.TextField()
date_posted = models.DateTimeField(auto_now_add=timezone.now)
author = models.ForeignKey(to=User, on_delete=models.CASCADE)
def __str__(self):
return self.title
def get_absolute_url(self):
return reverse('post-detail', kwargs={'pk': self.pk})
class SurveyHistory(models.Model):
post = models.ForeignKey(to=Post, on_delete=models.CASCADE)
record = models.BooleanField()
recorded_date = models.DateTimeField(auto_now_add=timezone.now)
def __str__(self):
return self.post.title
|
flexible
|
{
"blob_id": "25ce31aee44c80ce4a5c1af7d1ca12c73c14df47",
"index": 5530,
"step-1": "<mask token>\n\n\nclass Post(models.Model):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n\nclass SurveyHistory(models.Model):\n post = models.ForeignKey(to=Post, on_delete=models.CASCADE)\n record = models.BooleanField()\n recorded_date = models.DateTimeField(auto_now_add=timezone.now)\n\n def __str__(self):\n return self.post.title\n",
"step-2": "<mask token>\n\n\nclass Post(models.Model):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def get_absolute_url(self):\n return reverse('post-detail', kwargs={'pk': self.pk})\n\n\nclass SurveyHistory(models.Model):\n post = models.ForeignKey(to=Post, on_delete=models.CASCADE)\n record = models.BooleanField()\n recorded_date = models.DateTimeField(auto_now_add=timezone.now)\n\n def __str__(self):\n return self.post.title\n",
"step-3": "<mask token>\n\n\nclass Post(models.Model):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def __str__(self):\n return self.title\n\n def get_absolute_url(self):\n return reverse('post-detail', kwargs={'pk': self.pk})\n\n\nclass SurveyHistory(models.Model):\n post = models.ForeignKey(to=Post, on_delete=models.CASCADE)\n record = models.BooleanField()\n recorded_date = models.DateTimeField(auto_now_add=timezone.now)\n\n def __str__(self):\n return self.post.title\n",
"step-4": "<mask token>\n\n\nclass Post(models.Model):\n title = models.CharField(max_length=100)\n content = models.TextField()\n date_posted = models.DateTimeField(auto_now_add=timezone.now)\n author = models.ForeignKey(to=User, on_delete=models.CASCADE)\n\n def __str__(self):\n return self.title\n\n def get_absolute_url(self):\n return reverse('post-detail', kwargs={'pk': self.pk})\n\n\nclass SurveyHistory(models.Model):\n post = models.ForeignKey(to=Post, on_delete=models.CASCADE)\n record = models.BooleanField()\n recorded_date = models.DateTimeField(auto_now_add=timezone.now)\n\n def __str__(self):\n return self.post.title\n",
"step-5": "from django.db import models\nfrom django.utils import timezone\nfrom django.contrib.auth.models import User\nfrom django.urls import reverse\n\n\nclass Post(models.Model):\n title = models.CharField(max_length=100)\n content = models.TextField()\n date_posted = models.DateTimeField(auto_now_add=timezone.now)\n author = models.ForeignKey(to=User, on_delete=models.CASCADE)\n\n def __str__(self):\n return self.title\n\n def get_absolute_url(self):\n return reverse('post-detail', kwargs={'pk': self.pk})\n\n\nclass SurveyHistory(models.Model):\n post = models.ForeignKey(to=Post, on_delete=models.CASCADE)\n record = models.BooleanField()\n recorded_date = models.DateTimeField(auto_now_add=timezone.now)\n\n def __str__(self):\n return self.post.title\n",
"step-ids": [
4,
5,
6,
7,
8
]
}
|
[
4,
5,
6,
7,
8
] |
"""
Tests for `sqlalchemy-cql` module.
"""
import pytest
from sqlalchemy import create_engine
from sqlalchemy import Table, Column, Integer, String, MetaData, ForeignKey
metadata = MetaData()
users = Table('users', metadata,
Column('id', Integer, primary_key=True),
Column('name', String),
Column('fullname', String),
)
def test_create_engine():
eng = create_engine("cql://user:password@localhost:49154/system")
assert eng.execute("select * from system.schema_keyspaces")
def test_table_names():
eng = create_engine("cql://user:password@localhost:49154/system")
eng.table_names()
def test_create_all():
eng = create_engine("cql://user:password@localhost:49154/system")
metadata.create_all(eng)
|
normal
|
{
"blob_id": "f5b18673dd5a3ba3070c07e88ae83a531669311a",
"index": 2139,
"step-1": "<mask token>\n\n\ndef test_create_all():\n eng = create_engine('cql://user:password@localhost:49154/system')\n metadata.create_all(eng)\n",
"step-2": "<mask token>\n\n\ndef test_create_engine():\n eng = create_engine('cql://user:password@localhost:49154/system')\n assert eng.execute('select * from system.schema_keyspaces')\n\n\ndef test_table_names():\n eng = create_engine('cql://user:password@localhost:49154/system')\n eng.table_names()\n\n\ndef test_create_all():\n eng = create_engine('cql://user:password@localhost:49154/system')\n metadata.create_all(eng)\n",
"step-3": "<mask token>\nmetadata = MetaData()\nusers = Table('users', metadata, Column('id', Integer, primary_key=True),\n Column('name', String), Column('fullname', String))\n\n\ndef test_create_engine():\n eng = create_engine('cql://user:password@localhost:49154/system')\n assert eng.execute('select * from system.schema_keyspaces')\n\n\ndef test_table_names():\n eng = create_engine('cql://user:password@localhost:49154/system')\n eng.table_names()\n\n\ndef test_create_all():\n eng = create_engine('cql://user:password@localhost:49154/system')\n metadata.create_all(eng)\n",
"step-4": "<mask token>\nimport pytest\nfrom sqlalchemy import create_engine\nfrom sqlalchemy import Table, Column, Integer, String, MetaData, ForeignKey\nmetadata = MetaData()\nusers = Table('users', metadata, Column('id', Integer, primary_key=True),\n Column('name', String), Column('fullname', String))\n\n\ndef test_create_engine():\n eng = create_engine('cql://user:password@localhost:49154/system')\n assert eng.execute('select * from system.schema_keyspaces')\n\n\ndef test_table_names():\n eng = create_engine('cql://user:password@localhost:49154/system')\n eng.table_names()\n\n\ndef test_create_all():\n eng = create_engine('cql://user:password@localhost:49154/system')\n metadata.create_all(eng)\n",
"step-5": "\"\"\"\nTests for `sqlalchemy-cql` module.\n\"\"\"\nimport pytest\n\nfrom sqlalchemy import create_engine\nfrom sqlalchemy import Table, Column, Integer, String, MetaData, ForeignKey\n\nmetadata = MetaData()\nusers = Table('users', metadata,\n Column('id', Integer, primary_key=True),\n Column('name', String),\n Column('fullname', String),\n)\n\ndef test_create_engine():\n eng = create_engine(\"cql://user:password@localhost:49154/system\")\n assert eng.execute(\"select * from system.schema_keyspaces\")\n\n\ndef test_table_names():\n eng = create_engine(\"cql://user:password@localhost:49154/system\")\n eng.table_names()\n\n\ndef test_create_all():\n eng = create_engine(\"cql://user:password@localhost:49154/system\")\n metadata.create_all(eng)",
"step-ids": [
1,
3,
4,
5,
6
]
}
|
[
1,
3,
4,
5,
6
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Iapp1Config(AppConfig):
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Iapp1Config(AppConfig):
name = 'iapp1'
<|reserved_special_token_1|>
from django.apps import AppConfig
class Iapp1Config(AppConfig):
name = 'iapp1'
|
flexible
|
{
"blob_id": "c27ca6a8c38f2b96011e3a09da073ccc0e5a1467",
"index": 3386,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Iapp1Config(AppConfig):\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Iapp1Config(AppConfig):\n name = 'iapp1'\n",
"step-4": "from django.apps import AppConfig\n\n\nclass Iapp1Config(AppConfig):\n name = 'iapp1'\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
#!/usr/bin/python
# pymd2mc.xyzfile
"""
"""
__author__ = 'Mateusz Lis'
__version__= '0.1'
from optparse import OptionParser
import sys
from time import time
from constants import R, T
from energyCalc import EnergyCalculator
from latticeProjector import LatticeProjectorSimple
from lattices import HexLattice
from structures.xyzfile import XYZFile
from utils import delLine, clearFile
def main():
options = parseCommandLine()
inFile = XYZFile(options.inXyzFilename)
clearFile(options.outDatFilename)
outFile = open(options.outDatFilename, 'w')
i = 0
startTime = time()
omegas = []
sumOmegas = 0L
calc = EnergyCalculator(inFile, R, T)
while True:
i += 1
if options.verbose:
delLine()
print i,
omega = calc.getNextEnergy(options.symbol)
if omega is None:
break
omega , sim, diff = omega
if omega > -10**4 and omega < 10**10:
omegas.append(omega)
sumOmegas += omega
outFile.write("%d %f %f %f \n" % (i, omega, sim, diff))
outFile.close()
if options.verbose:
print "Done. Execution time=%f" % (time() - startTime)
print "omegas" ,sumOmegas, (sum(omegas))
lenOmegas = len(omegas)
midOmega = (sum(omegas)/len(omegas))
print "Result omegaAB = %f" % midOmega
sd = 0
for omega in omegas:
sd += (midOmega - omega)**2
sd /= len(omegas)
sd **= (1./2.)
print "Standard deviation = %f" % sd
def parseCommandLine():
"""
Sets up command line arguments and parses them
"""
parser = OptionParser(usage="%prog ", version="%prog " + __version__,
description='''
This program calculates omegaAB value from a hexagonal lattice trajectory
stored in xyz file (see for more details)''')
parser.add_option("-f", "--traj", dest="inXyzFilename",default = "hexTraj.xyz",
help="xyz input trajectory file (default traj.xyz)", metavar="INXYZFILE")
parser.add_option("-r", "--reference", dest="symbol",default = "P11",
help="reference particle name", metavar="ADATOM")
parser.add_option("-o", "--output", dest="outDatFilename", default="omega.dat",
help="output dat file with omega values for each frame. WARNING: it will be overriden", metavar="OUTXYZFILE")
parser.add_option("-q", "--quiet",
action="store_false", dest="verbose", default=True,
help="don't print status messages to stdout")
(options, _) = parser.parse_args()
return options
if __name__ == '__main__':
sys.exit(main())
|
normal
|
{
"blob_id": "a325feba1c2bb588321429a045133d6eede9e8cf",
"index": 9350,
"step-1": "#!/usr/bin/python\n# pymd2mc.xyzfile\n\"\"\"\n\n\"\"\"\n\n__author__ = 'Mateusz Lis'\n__version__= '0.1'\n\n\nfrom optparse import OptionParser\nimport sys\nfrom time import time\n\nfrom constants import R, T\nfrom energyCalc import EnergyCalculator\nfrom latticeProjector import LatticeProjectorSimple\nfrom lattices import HexLattice\nfrom structures.xyzfile import XYZFile\nfrom utils import delLine, clearFile\n \n\n \ndef main():\n \n options = parseCommandLine()\n inFile = XYZFile(options.inXyzFilename)\n \n clearFile(options.outDatFilename)\n outFile = open(options.outDatFilename, 'w')\n i = 0\n startTime = time()\n omegas = []\n\n sumOmegas = 0L\n calc = EnergyCalculator(inFile, R, T)\n \n while True:\n i += 1\n if options.verbose:\n delLine()\n print i, \n omega = calc.getNextEnergy(options.symbol)\n if omega is None:\n break\n omega , sim, diff = omega\n if omega > -10**4 and omega < 10**10: \n omegas.append(omega)\n sumOmegas += omega\n outFile.write(\"%d %f %f %f \\n\" % (i, omega, sim, diff))\n\n \n outFile.close()\n if options.verbose: \n print \"Done. Execution time=%f\" % (time() - startTime) \n print \"omegas\" ,sumOmegas, (sum(omegas))\n lenOmegas = len(omegas)\n midOmega = (sum(omegas)/len(omegas))\n print \"Result omegaAB = %f\" % midOmega\n sd = 0\n for omega in omegas:\n sd += (midOmega - omega)**2\n sd /= len(omegas)\n sd **= (1./2.)\n print \"Standard deviation = %f\" % sd\ndef parseCommandLine():\n \"\"\"\n Sets up command line arguments and parses them\n \"\"\"\n parser = OptionParser(usage=\"%prog \", version=\"%prog \" + __version__,\n description='''\n This program calculates omegaAB value from a hexagonal lattice trajectory\n stored in xyz file (see for more details)''')\n parser.add_option(\"-f\", \"--traj\", dest=\"inXyzFilename\",default = \"hexTraj.xyz\",\n help=\"xyz input trajectory file (default traj.xyz)\", metavar=\"INXYZFILE\")\n parser.add_option(\"-r\", \"--reference\", dest=\"symbol\",default = \"P11\",\n help=\"reference particle name\", metavar=\"ADATOM\")\n parser.add_option(\"-o\", \"--output\", dest=\"outDatFilename\", default=\"omega.dat\",\n help=\"output dat file with omega values for each frame. WARNING: it will be overriden\", metavar=\"OUTXYZFILE\")\n \n parser.add_option(\"-q\", \"--quiet\",\n action=\"store_false\", dest=\"verbose\", default=True,\n help=\"don't print status messages to stdout\")\n\n (options, _) = parser.parse_args()\n\n return options \n \n\n\nif __name__ == '__main__':\n sys.exit(main())\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Neverland2Style(Style):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Neverland2Style(Style):
background_color = '#121212'
styles = {Token: '#ffffff', Name.Function: '#ff005f', Operator.Word:
'#00ff00', Name.Label: 'noinherit #ffffaf', Generic.Subheading:
'#0000ff', Generic.Traceback: '#ff00af bg:#121212 bold', Generic.
Error: '#ffafff bg:#121212', Comment: '#87875f', Name.Attribute:
'#ff005f', Name.Constant: '#af5fff bold', Number.Float: '#af5fff',
Generic.Inserted: 'bg:#121212', Keyword.Type: 'noinherit #5fd7ff',
String: '#d7af5f', Generic.Deleted: '#d70087 bg:#080808', Comment.
Preproc: '#ffafd7', Keyword: '#ffff87 bold', Name.Exception:
'#87ff00 bold', Name.Variable: '#d75f00', Generic.Heading:
'#0000ff', Name.Tag: '#ffff87 bold', Number: '#0087ff', Generic.
Output: '#121212 bg:#121212', Name.Entity: '#5fd7ff bg:#080808',
Generic.Emph: '#808080 underline'}
<|reserved_special_token_1|>
<|reserved_special_token_0|>
from pygments.style import Style
from pygments.token import Token, Keyword, Comment, Number, Generic, Operator, Name, String
class Neverland2Style(Style):
background_color = '#121212'
styles = {Token: '#ffffff', Name.Function: '#ff005f', Operator.Word:
'#00ff00', Name.Label: 'noinherit #ffffaf', Generic.Subheading:
'#0000ff', Generic.Traceback: '#ff00af bg:#121212 bold', Generic.
Error: '#ffafff bg:#121212', Comment: '#87875f', Name.Attribute:
'#ff005f', Name.Constant: '#af5fff bold', Number.Float: '#af5fff',
Generic.Inserted: 'bg:#121212', Keyword.Type: 'noinherit #5fd7ff',
String: '#d7af5f', Generic.Deleted: '#d70087 bg:#080808', Comment.
Preproc: '#ffafd7', Keyword: '#ffff87 bold', Name.Exception:
'#87ff00 bold', Name.Variable: '#d75f00', Generic.Heading:
'#0000ff', Name.Tag: '#ffff87 bold', Number: '#0087ff', Generic.
Output: '#121212 bg:#121212', Name.Entity: '#5fd7ff bg:#080808',
Generic.Emph: '#808080 underline'}
<|reserved_special_token_1|>
# -*- coding: utf-8 -*-
"""
Neverland2 Colorscheme
~~~~~~~~~~~~~~~~~~~~~~
Converted by Vim Colorscheme Converter
"""
from pygments.style import Style
from pygments.token import Token, Keyword, Comment, Number, Generic, Operator, Name, String
class Neverland2Style(Style):
background_color = '#121212'
styles = {
Token: '#ffffff',
Name.Function: '#ff005f',
Operator.Word: '#00ff00',
Name.Label: 'noinherit #ffffaf',
Generic.Subheading: '#0000ff',
Generic.Traceback: '#ff00af bg:#121212 bold',
Generic.Error: '#ffafff bg:#121212',
Comment: '#87875f',
Name.Attribute: '#ff005f',
Name.Constant: '#af5fff bold',
Number.Float: '#af5fff',
Generic.Inserted: 'bg:#121212',
Keyword.Type: 'noinherit #5fd7ff',
String: '#d7af5f',
Generic.Deleted: '#d70087 bg:#080808',
Comment.Preproc: '#ffafd7',
Keyword: '#ffff87 bold',
Name.Exception: '#87ff00 bold',
Name.Variable: '#d75f00',
Generic.Heading: '#0000ff',
Name.Tag: '#ffff87 bold',
Number: '#0087ff',
Generic.Output: '#121212 bg:#121212',
Name.Entity: '#5fd7ff bg:#080808',
Generic.Emph: '#808080 underline',
}
|
flexible
|
{
"blob_id": "9dccc19abb6dac9e9606dc1fd83a227b4da9bf1f",
"index": 4047,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Neverland2Style(Style):\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Neverland2Style(Style):\n background_color = '#121212'\n styles = {Token: '#ffffff', Name.Function: '#ff005f', Operator.Word:\n '#00ff00', Name.Label: 'noinherit #ffffaf', Generic.Subheading:\n '#0000ff', Generic.Traceback: '#ff00af bg:#121212 bold', Generic.\n Error: '#ffafff bg:#121212', Comment: '#87875f', Name.Attribute:\n '#ff005f', Name.Constant: '#af5fff bold', Number.Float: '#af5fff',\n Generic.Inserted: 'bg:#121212', Keyword.Type: 'noinherit #5fd7ff',\n String: '#d7af5f', Generic.Deleted: '#d70087 bg:#080808', Comment.\n Preproc: '#ffafd7', Keyword: '#ffff87 bold', Name.Exception:\n '#87ff00 bold', Name.Variable: '#d75f00', Generic.Heading:\n '#0000ff', Name.Tag: '#ffff87 bold', Number: '#0087ff', Generic.\n Output: '#121212 bg:#121212', Name.Entity: '#5fd7ff bg:#080808',\n Generic.Emph: '#808080 underline'}\n",
"step-4": "<mask token>\nfrom pygments.style import Style\nfrom pygments.token import Token, Keyword, Comment, Number, Generic, Operator, Name, String\n\n\nclass Neverland2Style(Style):\n background_color = '#121212'\n styles = {Token: '#ffffff', Name.Function: '#ff005f', Operator.Word:\n '#00ff00', Name.Label: 'noinherit #ffffaf', Generic.Subheading:\n '#0000ff', Generic.Traceback: '#ff00af bg:#121212 bold', Generic.\n Error: '#ffafff bg:#121212', Comment: '#87875f', Name.Attribute:\n '#ff005f', Name.Constant: '#af5fff bold', Number.Float: '#af5fff',\n Generic.Inserted: 'bg:#121212', Keyword.Type: 'noinherit #5fd7ff',\n String: '#d7af5f', Generic.Deleted: '#d70087 bg:#080808', Comment.\n Preproc: '#ffafd7', Keyword: '#ffff87 bold', Name.Exception:\n '#87ff00 bold', Name.Variable: '#d75f00', Generic.Heading:\n '#0000ff', Name.Tag: '#ffff87 bold', Number: '#0087ff', Generic.\n Output: '#121212 bg:#121212', Name.Entity: '#5fd7ff bg:#080808',\n Generic.Emph: '#808080 underline'}\n",
"step-5": "# -*- coding: utf-8 -*-\n\"\"\"\n Neverland2 Colorscheme\n ~~~~~~~~~~~~~~~~~~~~~~\n\n Converted by Vim Colorscheme Converter\n\"\"\"\nfrom pygments.style import Style\nfrom pygments.token import Token, Keyword, Comment, Number, Generic, Operator, Name, String\n\nclass Neverland2Style(Style):\n\n background_color = '#121212'\n styles = {\n Token: '#ffffff',\n Name.Function: '#ff005f',\n Operator.Word: '#00ff00',\n Name.Label: 'noinherit #ffffaf',\n Generic.Subheading: '#0000ff',\n Generic.Traceback: '#ff00af bg:#121212 bold',\n Generic.Error: '#ffafff bg:#121212',\n Comment: '#87875f',\n Name.Attribute: '#ff005f',\n Name.Constant: '#af5fff bold',\n Number.Float: '#af5fff',\n Generic.Inserted: 'bg:#121212',\n Keyword.Type: 'noinherit #5fd7ff',\n String: '#d7af5f',\n Generic.Deleted: '#d70087 bg:#080808',\n Comment.Preproc: '#ffafd7',\n Keyword: '#ffff87 bold',\n Name.Exception: '#87ff00 bold',\n Name.Variable: '#d75f00',\n Generic.Heading: '#0000ff',\n Name.Tag: '#ffff87 bold',\n Number: '#0087ff',\n Generic.Output: '#121212 bg:#121212',\n Name.Entity: '#5fd7ff bg:#080808',\n Generic.Emph: '#808080 underline',\n }\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
def itemexists(name):
lx.eval('select.item {%s} set' % name)
selected = lx.evalN('item.name ?')
return name in selected
def lockcamera():
if not itemexists('HDRECam_Grp'):
lx.eval('select.drop item')
lx.eval('group.create')
lx.eval('item.name HDRECam_Grp')
lx.eval('select.subItem HDRECam set camera')
lx.eval('!!group.edit add item')
lx.eval('select.item HDRECam_Grp set')
lx.eval('item.channel lock on item:HDRECam_Grp')
<|reserved_special_token_0|>
def hastag(item):
lx.eval('select.drop item')
lx.eval('select.item {%s} set' % item)
if lx.eval('item.tag HDRE ?') == 'set':
return true
def clearold():
try:
numenvs = lx.eval('query sceneservice environment.N ? all')
envs = []
oldclips = []
for x in xrange(numenvs):
envs.append(lx.eval('query sceneservice environment.ID ? %s' % x))
for env in envs:
lx.eval('select.item %s set' % env)
if lx.eval('item.tag string HDRE ?') == 'set':
layer, process = lx.eval(
'query sceneservice mask.children ? {%s}' % env)
lx.eval('select.item {%s} set' % layer)
oldclips.append(lx.eval('texture.setIMap ?'))
for env in envs:
lx.eval('select.item %s set' % env)
if lx.eval('item.tag string HDRE ?') == 'set':
lx.eval('!!item.delete')
numgrplocs = lx.eval('query sceneservice groupLocator.N ? all')
grplocs = []
for x in xrange(numgrplocs):
grplocs.append(lx.eval(
'query sceneservice groupLocator.ID ? %s' % x))
for loc in grplocs:
lx.eval('select.item %s set' % loc)
if lx.eval('item.tag string HDRE ?') == 'set':
lx.eval('!!item.delete')
break
lx.eval('select.itemPattern HDREGroup')
id = lx.eval1('query sceneservice selection ? mask')
parent = lx.eval('query sceneservice mask.parent ? %s' % id)
lx.eval('select.item %s set' % parent)
lx.eval('texture.delete')
for clip in oldclips:
lx.eval('select.drop item')
lx.eval('select.item {%s} set' % clip)
lx.eval('clip.delete')
except:
lx.out('Exception "%s" on line: %d' % (sys.exc_value, sys.
exc_traceback.tb_lineno))
def renamenew(incr):
try:
lx.eval('item.name HDRECam item:{HDRECam%s}' % incr)
lx.eval('item.name HDRECamAnimate item:{HDRECamAnimate%s}' % incr)
lx.eval('item.name HDRESun item:{HDRESun%s}' % incr)
lx.eval('item.name HDRERefl item:{HDRERefl%s}' % incr)
lx.eval('item.name HDREBackplate item:{HDREBackplate%s}' % incr)
lx.eval('item.name HDREEnv item:{HDREEnv%s}' % incr)
lx.eval('item.name {HDREActivate} item:{HDREActivate%s}' % incr)
lx.eval('item.name {HDREWater} item:{HDREWater%s}' % incr)
lx.eval('item.name {HDREShadowGround} item:{HDREShadowGround%s}' % incr
)
lx.eval('item.name {HDREControls} item:{HDREControls%s}' % incr)
lx.eval('item.name {BackdropBrowser} item:{BackdropBrowser%s}' % incr)
lx.eval('item.name {Texture Group} item:{Texture Group%s}' % incr)
lx.eval('item.name {HDREGroup} item:{HDREGroup%s}' % incr)
root = lx.eval('query sceneservice item.parent ? HDRECam')
rootname = lx.eval('query sceneservice item.name ? %s' % root)
newname = rootname.split(incr)[0]
lx.eval('item.name {%s} item:{%s}' % (newname, rootname))
except:
lx.out('Exception "%s" on line: %d' % (sys.exc_value, sys.
exc_traceback.tb_lineno))
<|reserved_special_token_0|>
def setframesize():
try:
backplate = None
envchildren = lx.eval(
'query sceneservice item.children ? HDREBackplate')
for child in envchildren:
if lx.eval('query sceneservice item.type ? {%s}' % child
) == 'imageMap':
lx.eval('select.item %s set' % child)
backplate = lx.eval('texture.setIMap ?')
break
if backplate:
clip_width = None
clip_height = None
clips = lx.evalN('query layerservice clips ? all')
for clip in clips:
if lx.eval('query layerservice clip.name ? {%s}' % clip
) == backplate:
info = lx.eval('query layerservice clip.info ? {%s}' % clip
).split()
clip_width = float(info[1].split(':')[1])
clip_height = float(info[2].split(':')[1])
if clip_width != None and clip_height != None:
if clip_width > clip_height:
frame_width = 1024
frame_height = int(clip_height / clip_width * 1024)
else:
frame_height = 1024
frame_width = int(clip_width / clip_height * 1024)
lx.eval('render.res 0 %s' % frame_width)
lx.eval('render.res 1 %s' % frame_height)
except:
lx.out('Exception "%s" on line: %d' % (sys.exc_value, sys.
exc_traceback.tb_lineno))
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def itemexists(name):
lx.eval('select.item {%s} set' % name)
selected = lx.evalN('item.name ?')
return name in selected
def lockcamera():
if not itemexists('HDRECam_Grp'):
lx.eval('select.drop item')
lx.eval('group.create')
lx.eval('item.name HDRECam_Grp')
lx.eval('select.subItem HDRECam set camera')
lx.eval('!!group.edit add item')
lx.eval('select.item HDRECam_Grp set')
lx.eval('item.channel lock on item:HDRECam_Grp')
<|reserved_special_token_0|>
def hastag(item):
lx.eval('select.drop item')
lx.eval('select.item {%s} set' % item)
if lx.eval('item.tag HDRE ?') == 'set':
return true
def clearold():
try:
numenvs = lx.eval('query sceneservice environment.N ? all')
envs = []
oldclips = []
for x in xrange(numenvs):
envs.append(lx.eval('query sceneservice environment.ID ? %s' % x))
for env in envs:
lx.eval('select.item %s set' % env)
if lx.eval('item.tag string HDRE ?') == 'set':
layer, process = lx.eval(
'query sceneservice mask.children ? {%s}' % env)
lx.eval('select.item {%s} set' % layer)
oldclips.append(lx.eval('texture.setIMap ?'))
for env in envs:
lx.eval('select.item %s set' % env)
if lx.eval('item.tag string HDRE ?') == 'set':
lx.eval('!!item.delete')
numgrplocs = lx.eval('query sceneservice groupLocator.N ? all')
grplocs = []
for x in xrange(numgrplocs):
grplocs.append(lx.eval(
'query sceneservice groupLocator.ID ? %s' % x))
for loc in grplocs:
lx.eval('select.item %s set' % loc)
if lx.eval('item.tag string HDRE ?') == 'set':
lx.eval('!!item.delete')
break
lx.eval('select.itemPattern HDREGroup')
id = lx.eval1('query sceneservice selection ? mask')
parent = lx.eval('query sceneservice mask.parent ? %s' % id)
lx.eval('select.item %s set' % parent)
lx.eval('texture.delete')
for clip in oldclips:
lx.eval('select.drop item')
lx.eval('select.item {%s} set' % clip)
lx.eval('clip.delete')
except:
lx.out('Exception "%s" on line: %d' % (sys.exc_value, sys.
exc_traceback.tb_lineno))
def renamenew(incr):
try:
lx.eval('item.name HDRECam item:{HDRECam%s}' % incr)
lx.eval('item.name HDRECamAnimate item:{HDRECamAnimate%s}' % incr)
lx.eval('item.name HDRESun item:{HDRESun%s}' % incr)
lx.eval('item.name HDRERefl item:{HDRERefl%s}' % incr)
lx.eval('item.name HDREBackplate item:{HDREBackplate%s}' % incr)
lx.eval('item.name HDREEnv item:{HDREEnv%s}' % incr)
lx.eval('item.name {HDREActivate} item:{HDREActivate%s}' % incr)
lx.eval('item.name {HDREWater} item:{HDREWater%s}' % incr)
lx.eval('item.name {HDREShadowGround} item:{HDREShadowGround%s}' % incr
)
lx.eval('item.name {HDREControls} item:{HDREControls%s}' % incr)
lx.eval('item.name {BackdropBrowser} item:{BackdropBrowser%s}' % incr)
lx.eval('item.name {Texture Group} item:{Texture Group%s}' % incr)
lx.eval('item.name {HDREGroup} item:{HDREGroup%s}' % incr)
root = lx.eval('query sceneservice item.parent ? HDRECam')
rootname = lx.eval('query sceneservice item.name ? %s' % root)
newname = rootname.split(incr)[0]
lx.eval('item.name {%s} item:{%s}' % (newname, rootname))
except:
lx.out('Exception "%s" on line: %d' % (sys.exc_value, sys.
exc_traceback.tb_lineno))
def tagitems():
try:
lx.eval('select.drop item')
for item in HDREEnvs:
lx.eval('select.item {%s} set' % item)
lx.eval('item.tag string {HDRE} {set}')
lx.eval('select.item {%s} set' % rootID)
lx.eval('item.tag string {HDRE} {set}')
except:
lx.out('Exception "%s" on line: %d' % (sys.exc_value, sys.
exc_traceback.tb_lineno))
def setframesize():
try:
backplate = None
envchildren = lx.eval(
'query sceneservice item.children ? HDREBackplate')
for child in envchildren:
if lx.eval('query sceneservice item.type ? {%s}' % child
) == 'imageMap':
lx.eval('select.item %s set' % child)
backplate = lx.eval('texture.setIMap ?')
break
if backplate:
clip_width = None
clip_height = None
clips = lx.evalN('query layerservice clips ? all')
for clip in clips:
if lx.eval('query layerservice clip.name ? {%s}' % clip
) == backplate:
info = lx.eval('query layerservice clip.info ? {%s}' % clip
).split()
clip_width = float(info[1].split(':')[1])
clip_height = float(info[2].split(':')[1])
if clip_width != None and clip_height != None:
if clip_width > clip_height:
frame_width = 1024
frame_height = int(clip_height / clip_width * 1024)
else:
frame_height = 1024
frame_width = int(clip_width / clip_height * 1024)
lx.eval('render.res 0 %s' % frame_width)
lx.eval('render.res 1 %s' % frame_height)
except:
lx.out('Exception "%s" on line: %d' % (sys.exc_value, sys.
exc_traceback.tb_lineno))
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def itemexists(name):
lx.eval('select.item {%s} set' % name)
selected = lx.evalN('item.name ?')
return name in selected
def lockcamera():
if not itemexists('HDRECam_Grp'):
lx.eval('select.drop item')
lx.eval('group.create')
lx.eval('item.name HDRECam_Grp')
lx.eval('select.subItem HDRECam set camera')
lx.eval('!!group.edit add item')
lx.eval('select.item HDRECam_Grp set')
lx.eval('item.channel lock on item:HDRECam_Grp')
def lockanimcamera():
if not itemexists('HDRECamAnimate_Grp'):
lx.eval('select.drop item')
lx.eval('group.create')
lx.eval('item.name HDRECamAnimate_Grp')
xfrmitem = lx.eval('query sceneservice item.xfrmPos ? HDRECamAnimate')
lx.eval('select.channel {%s:pos.X} set' % xfrmitem)
lx.eval('select.channel {%s:pos.Y} add' % xfrmitem)
lx.eval('select.channel {%s:pos.Z} add' % xfrmitem)
lx.eval('!!group.edit add chan')
lx.eval('item.channel lock on item:HDRECamAnimate_Grp')
def hastag(item):
lx.eval('select.drop item')
lx.eval('select.item {%s} set' % item)
if lx.eval('item.tag HDRE ?') == 'set':
return true
def clearold():
try:
numenvs = lx.eval('query sceneservice environment.N ? all')
envs = []
oldclips = []
for x in xrange(numenvs):
envs.append(lx.eval('query sceneservice environment.ID ? %s' % x))
for env in envs:
lx.eval('select.item %s set' % env)
if lx.eval('item.tag string HDRE ?') == 'set':
layer, process = lx.eval(
'query sceneservice mask.children ? {%s}' % env)
lx.eval('select.item {%s} set' % layer)
oldclips.append(lx.eval('texture.setIMap ?'))
for env in envs:
lx.eval('select.item %s set' % env)
if lx.eval('item.tag string HDRE ?') == 'set':
lx.eval('!!item.delete')
numgrplocs = lx.eval('query sceneservice groupLocator.N ? all')
grplocs = []
for x in xrange(numgrplocs):
grplocs.append(lx.eval(
'query sceneservice groupLocator.ID ? %s' % x))
for loc in grplocs:
lx.eval('select.item %s set' % loc)
if lx.eval('item.tag string HDRE ?') == 'set':
lx.eval('!!item.delete')
break
lx.eval('select.itemPattern HDREGroup')
id = lx.eval1('query sceneservice selection ? mask')
parent = lx.eval('query sceneservice mask.parent ? %s' % id)
lx.eval('select.item %s set' % parent)
lx.eval('texture.delete')
for clip in oldclips:
lx.eval('select.drop item')
lx.eval('select.item {%s} set' % clip)
lx.eval('clip.delete')
except:
lx.out('Exception "%s" on line: %d' % (sys.exc_value, sys.
exc_traceback.tb_lineno))
def renamenew(incr):
try:
lx.eval('item.name HDRECam item:{HDRECam%s}' % incr)
lx.eval('item.name HDRECamAnimate item:{HDRECamAnimate%s}' % incr)
lx.eval('item.name HDRESun item:{HDRESun%s}' % incr)
lx.eval('item.name HDRERefl item:{HDRERefl%s}' % incr)
lx.eval('item.name HDREBackplate item:{HDREBackplate%s}' % incr)
lx.eval('item.name HDREEnv item:{HDREEnv%s}' % incr)
lx.eval('item.name {HDREActivate} item:{HDREActivate%s}' % incr)
lx.eval('item.name {HDREWater} item:{HDREWater%s}' % incr)
lx.eval('item.name {HDREShadowGround} item:{HDREShadowGround%s}' % incr
)
lx.eval('item.name {HDREControls} item:{HDREControls%s}' % incr)
lx.eval('item.name {BackdropBrowser} item:{BackdropBrowser%s}' % incr)
lx.eval('item.name {Texture Group} item:{Texture Group%s}' % incr)
lx.eval('item.name {HDREGroup} item:{HDREGroup%s}' % incr)
root = lx.eval('query sceneservice item.parent ? HDRECam')
rootname = lx.eval('query sceneservice item.name ? %s' % root)
newname = rootname.split(incr)[0]
lx.eval('item.name {%s} item:{%s}' % (newname, rootname))
except:
lx.out('Exception "%s" on line: %d' % (sys.exc_value, sys.
exc_traceback.tb_lineno))
def tagitems():
try:
lx.eval('select.drop item')
for item in HDREEnvs:
lx.eval('select.item {%s} set' % item)
lx.eval('item.tag string {HDRE} {set}')
lx.eval('select.item {%s} set' % rootID)
lx.eval('item.tag string {HDRE} {set}')
except:
lx.out('Exception "%s" on line: %d' % (sys.exc_value, sys.
exc_traceback.tb_lineno))
def setframesize():
try:
backplate = None
envchildren = lx.eval(
'query sceneservice item.children ? HDREBackplate')
for child in envchildren:
if lx.eval('query sceneservice item.type ? {%s}' % child
) == 'imageMap':
lx.eval('select.item %s set' % child)
backplate = lx.eval('texture.setIMap ?')
break
if backplate:
clip_width = None
clip_height = None
clips = lx.evalN('query layerservice clips ? all')
for clip in clips:
if lx.eval('query layerservice clip.name ? {%s}' % clip
) == backplate:
info = lx.eval('query layerservice clip.info ? {%s}' % clip
).split()
clip_width = float(info[1].split(':')[1])
clip_height = float(info[2].split(':')[1])
if clip_width != None and clip_height != None:
if clip_width > clip_height:
frame_width = 1024
frame_height = int(clip_height / clip_width * 1024)
else:
frame_height = 1024
frame_width = int(clip_width / clip_height * 1024)
lx.eval('render.res 0 %s' % frame_width)
lx.eval('render.res 1 %s' % frame_height)
except:
lx.out('Exception "%s" on line: %d' % (sys.exc_value, sys.
exc_traceback.tb_lineno))
try:
if lx.eval('query scriptsysservice userValue.isDefined ? HDRE_Card'):
cookie = lx.eval('user.value HDRE_Card ?')
lx.eval('layout.createOrClose {%s} open:0' % cookie)
selectedItem = lx.eval1('query sceneservice selection ? locator')
rootID = lx.eval('query sceneservice item.parent ? %s' % selectedItem)
numcams = lx.eval('query sceneservice camera.N ? all')
for x in xrange(numcams):
camname = lx.eval('query sceneservice camera.name ? %s' % x)
if camname in camnames.keys():
incr = camnames[camname]
clearold()
renamenew(incr)
break
if itemexists('HDRECam'):
if itemexists('HDRECamAnimate'):
flength = round(lx.eval('item.channel focalLen ? item:HDRECam'), 3
) * 1000
if flength >= 101 and flength <= 200:
flength = flength + 100
elif flength >= 51 and flength <= 100:
flength = flength + 50
elif flength >= 18 and flength <= 50:
flength = flength + 10
lx.eval('item.channel focalLen [%s mm] item:HDRECamAnimate' %
flength)
lx.eval('render.camera HDRECamAnimate')
lockanimcamera()
lx.eval('render.camera HDRECam')
lockcamera()
renID = lx.eval('query sceneservice polyRender.ID ? 0')
lx.eval('item.channel globEnable true item:%s' % renID)
lx.eval('item.channel dispRate 3 item:%s' % renID)
lx.eval('item.channel dispRatio 8 item:%s' % renID)
numouts = lx.eval('query sceneservice renderOutput.N ? all')
for x in xrange(numouts):
id = lx.eval('query sceneservice renderOutput.ID ? %s' % x)
lx.eval('select.item %s set' % id)
if lx.eval('shader.setEffect ?') == 'shade.color':
lx.eval('item.channel gamma 2.2 item:%s' % id)
num_envs = lx.eval('query sceneservice environment.N ? all')
environments = []
for x in xrange(num_envs):
environments.append(lx.eval(
'query sceneservice environment.name ? %s' % x))
for env in environments:
if env not in HDREEnvs:
lx.eval('item.channel visCam false item:{%s}' % env)
lx.eval('item.channel visInd false item:{%s}' % env)
lx.eval('item.channel visRefl false item:{%s}' % env)
lx.eval('item.channel visRefr false item:{%s}' % env)
numlights = lx.eval('query sceneservice light.N ? all')
for x in xrange(numlights):
if lx.eval('query sceneservice light.name ? %s' % x) != 'HDRESun':
id = lx.eval('query sceneservice light.ID ? %s' % x)
lx.eval('layer.setVisibility {%s} 0' % id)
if itemexists('HDREActivate'):
lx.eval('layer.setVisibility {HDREActivate} 0')
controlsID = lx.eval('query sceneservice item.ID ? HDREControls')
if controlsID:
lx.eval('layer.setVisibility {%s} 1' % controlsID)
setframesize()
tagitems()
except:
lx.out('Exception "%s" on line: %d' % (sys.exc_value, sys.exc_traceback
.tb_lineno))
<|reserved_special_token_1|>
camnames = {'HDRECam (2)': ' (2)', 'HDRECam(2)': '(2)', 'HDRECam 2': ' 2',
'HDRECam_2': '_2', 'HDRECam2': '2'}
HDREEnvs = ['HDRERefl', 'HDREBackplate', 'HDREEnv']
def itemexists(name):
lx.eval('select.item {%s} set' % name)
selected = lx.evalN('item.name ?')
return name in selected
def lockcamera():
if not itemexists('HDRECam_Grp'):
lx.eval('select.drop item')
lx.eval('group.create')
lx.eval('item.name HDRECam_Grp')
lx.eval('select.subItem HDRECam set camera')
lx.eval('!!group.edit add item')
lx.eval('select.item HDRECam_Grp set')
lx.eval('item.channel lock on item:HDRECam_Grp')
def lockanimcamera():
if not itemexists('HDRECamAnimate_Grp'):
lx.eval('select.drop item')
lx.eval('group.create')
lx.eval('item.name HDRECamAnimate_Grp')
xfrmitem = lx.eval('query sceneservice item.xfrmPos ? HDRECamAnimate')
lx.eval('select.channel {%s:pos.X} set' % xfrmitem)
lx.eval('select.channel {%s:pos.Y} add' % xfrmitem)
lx.eval('select.channel {%s:pos.Z} add' % xfrmitem)
lx.eval('!!group.edit add chan')
lx.eval('item.channel lock on item:HDRECamAnimate_Grp')
def hastag(item):
lx.eval('select.drop item')
lx.eval('select.item {%s} set' % item)
if lx.eval('item.tag HDRE ?') == 'set':
return true
def clearold():
try:
numenvs = lx.eval('query sceneservice environment.N ? all')
envs = []
oldclips = []
for x in xrange(numenvs):
envs.append(lx.eval('query sceneservice environment.ID ? %s' % x))
for env in envs:
lx.eval('select.item %s set' % env)
if lx.eval('item.tag string HDRE ?') == 'set':
layer, process = lx.eval(
'query sceneservice mask.children ? {%s}' % env)
lx.eval('select.item {%s} set' % layer)
oldclips.append(lx.eval('texture.setIMap ?'))
for env in envs:
lx.eval('select.item %s set' % env)
if lx.eval('item.tag string HDRE ?') == 'set':
lx.eval('!!item.delete')
numgrplocs = lx.eval('query sceneservice groupLocator.N ? all')
grplocs = []
for x in xrange(numgrplocs):
grplocs.append(lx.eval(
'query sceneservice groupLocator.ID ? %s' % x))
for loc in grplocs:
lx.eval('select.item %s set' % loc)
if lx.eval('item.tag string HDRE ?') == 'set':
lx.eval('!!item.delete')
break
lx.eval('select.itemPattern HDREGroup')
id = lx.eval1('query sceneservice selection ? mask')
parent = lx.eval('query sceneservice mask.parent ? %s' % id)
lx.eval('select.item %s set' % parent)
lx.eval('texture.delete')
for clip in oldclips:
lx.eval('select.drop item')
lx.eval('select.item {%s} set' % clip)
lx.eval('clip.delete')
except:
lx.out('Exception "%s" on line: %d' % (sys.exc_value, sys.
exc_traceback.tb_lineno))
def renamenew(incr):
try:
lx.eval('item.name HDRECam item:{HDRECam%s}' % incr)
lx.eval('item.name HDRECamAnimate item:{HDRECamAnimate%s}' % incr)
lx.eval('item.name HDRESun item:{HDRESun%s}' % incr)
lx.eval('item.name HDRERefl item:{HDRERefl%s}' % incr)
lx.eval('item.name HDREBackplate item:{HDREBackplate%s}' % incr)
lx.eval('item.name HDREEnv item:{HDREEnv%s}' % incr)
lx.eval('item.name {HDREActivate} item:{HDREActivate%s}' % incr)
lx.eval('item.name {HDREWater} item:{HDREWater%s}' % incr)
lx.eval('item.name {HDREShadowGround} item:{HDREShadowGround%s}' % incr
)
lx.eval('item.name {HDREControls} item:{HDREControls%s}' % incr)
lx.eval('item.name {BackdropBrowser} item:{BackdropBrowser%s}' % incr)
lx.eval('item.name {Texture Group} item:{Texture Group%s}' % incr)
lx.eval('item.name {HDREGroup} item:{HDREGroup%s}' % incr)
root = lx.eval('query sceneservice item.parent ? HDRECam')
rootname = lx.eval('query sceneservice item.name ? %s' % root)
newname = rootname.split(incr)[0]
lx.eval('item.name {%s} item:{%s}' % (newname, rootname))
except:
lx.out('Exception "%s" on line: %d' % (sys.exc_value, sys.
exc_traceback.tb_lineno))
def tagitems():
try:
lx.eval('select.drop item')
for item in HDREEnvs:
lx.eval('select.item {%s} set' % item)
lx.eval('item.tag string {HDRE} {set}')
lx.eval('select.item {%s} set' % rootID)
lx.eval('item.tag string {HDRE} {set}')
except:
lx.out('Exception "%s" on line: %d' % (sys.exc_value, sys.
exc_traceback.tb_lineno))
def setframesize():
try:
backplate = None
envchildren = lx.eval(
'query sceneservice item.children ? HDREBackplate')
for child in envchildren:
if lx.eval('query sceneservice item.type ? {%s}' % child
) == 'imageMap':
lx.eval('select.item %s set' % child)
backplate = lx.eval('texture.setIMap ?')
break
if backplate:
clip_width = None
clip_height = None
clips = lx.evalN('query layerservice clips ? all')
for clip in clips:
if lx.eval('query layerservice clip.name ? {%s}' % clip
) == backplate:
info = lx.eval('query layerservice clip.info ? {%s}' % clip
).split()
clip_width = float(info[1].split(':')[1])
clip_height = float(info[2].split(':')[1])
if clip_width != None and clip_height != None:
if clip_width > clip_height:
frame_width = 1024
frame_height = int(clip_height / clip_width * 1024)
else:
frame_height = 1024
frame_width = int(clip_width / clip_height * 1024)
lx.eval('render.res 0 %s' % frame_width)
lx.eval('render.res 1 %s' % frame_height)
except:
lx.out('Exception "%s" on line: %d' % (sys.exc_value, sys.
exc_traceback.tb_lineno))
try:
if lx.eval('query scriptsysservice userValue.isDefined ? HDRE_Card'):
cookie = lx.eval('user.value HDRE_Card ?')
lx.eval('layout.createOrClose {%s} open:0' % cookie)
selectedItem = lx.eval1('query sceneservice selection ? locator')
rootID = lx.eval('query sceneservice item.parent ? %s' % selectedItem)
numcams = lx.eval('query sceneservice camera.N ? all')
for x in xrange(numcams):
camname = lx.eval('query sceneservice camera.name ? %s' % x)
if camname in camnames.keys():
incr = camnames[camname]
clearold()
renamenew(incr)
break
if itemexists('HDRECam'):
if itemexists('HDRECamAnimate'):
flength = round(lx.eval('item.channel focalLen ? item:HDRECam'), 3
) * 1000
if flength >= 101 and flength <= 200:
flength = flength + 100
elif flength >= 51 and flength <= 100:
flength = flength + 50
elif flength >= 18 and flength <= 50:
flength = flength + 10
lx.eval('item.channel focalLen [%s mm] item:HDRECamAnimate' %
flength)
lx.eval('render.camera HDRECamAnimate')
lockanimcamera()
lx.eval('render.camera HDRECam')
lockcamera()
renID = lx.eval('query sceneservice polyRender.ID ? 0')
lx.eval('item.channel globEnable true item:%s' % renID)
lx.eval('item.channel dispRate 3 item:%s' % renID)
lx.eval('item.channel dispRatio 8 item:%s' % renID)
numouts = lx.eval('query sceneservice renderOutput.N ? all')
for x in xrange(numouts):
id = lx.eval('query sceneservice renderOutput.ID ? %s' % x)
lx.eval('select.item %s set' % id)
if lx.eval('shader.setEffect ?') == 'shade.color':
lx.eval('item.channel gamma 2.2 item:%s' % id)
num_envs = lx.eval('query sceneservice environment.N ? all')
environments = []
for x in xrange(num_envs):
environments.append(lx.eval(
'query sceneservice environment.name ? %s' % x))
for env in environments:
if env not in HDREEnvs:
lx.eval('item.channel visCam false item:{%s}' % env)
lx.eval('item.channel visInd false item:{%s}' % env)
lx.eval('item.channel visRefl false item:{%s}' % env)
lx.eval('item.channel visRefr false item:{%s}' % env)
numlights = lx.eval('query sceneservice light.N ? all')
for x in xrange(numlights):
if lx.eval('query sceneservice light.name ? %s' % x) != 'HDRESun':
id = lx.eval('query sceneservice light.ID ? %s' % x)
lx.eval('layer.setVisibility {%s} 0' % id)
if itemexists('HDREActivate'):
lx.eval('layer.setVisibility {HDREActivate} 0')
controlsID = lx.eval('query sceneservice item.ID ? HDREControls')
if controlsID:
lx.eval('layer.setVisibility {%s} 1' % controlsID)
setframesize()
tagitems()
except:
lx.out('Exception "%s" on line: %d' % (sys.exc_value, sys.exc_traceback
.tb_lineno))
<|reserved_special_token_1|>
#!/usr/bin/env python
################################################################################
#
# HDREEnable.py
#
# Version: 1.000
#
# Author: Gwynne Reddick
#
# Description:
#
#
# Usage:
#
# Last Update 16:49 08/12/10
#
################################################################################
# part of a hack for later on so we can identify if a second HDRE assembly has been applied
camnames = {'HDRECam (2)':' (2)',
'HDRECam(2)':'(2)',
'HDRECam 2':' 2',
'HDRECam_2':'_2',
'HDRECam2':'2'}
HDREEnvs = ['HDRERefl', 'HDREBackplate', 'HDREEnv']
def itemexists(name):
lx.eval('select.item {%s} set' % name)
selected = lx.evalN('item.name ?')
return name in selected
def lockcamera():
if not itemexists('HDRECam_Grp'):
lx.eval('select.drop item')
lx.eval('group.create')
lx.eval('item.name HDRECam_Grp')
lx.eval('select.subItem HDRECam set camera')
lx.eval('!!group.edit add item')
lx.eval('select.item HDRECam_Grp set')
lx.eval('item.channel lock on item:HDRECam_Grp')
def lockanimcamera():
if not itemexists('HDRECamAnimate_Grp'):
lx.eval('select.drop item')
lx.eval('group.create')
lx.eval('item.name HDRECamAnimate_Grp')
xfrmitem = lx.eval('query sceneservice item.xfrmPos ? HDRECamAnimate')
lx.eval('select.channel {%s:pos.X} set' % xfrmitem)
lx.eval('select.channel {%s:pos.Y} add' % xfrmitem)
lx.eval('select.channel {%s:pos.Z} add' % xfrmitem)
lx.eval('!!group.edit add chan')
lx.eval('item.channel lock on item:HDRECamAnimate_Grp')
def hastag(item):
lx.eval('select.drop item')
lx.eval('select.item {%s} set' % item)
if lx.eval('item.tag HDRE ?') == 'set':
return true
def clearold():
try:
numenvs = lx.eval('query sceneservice environment.N ? all')
envs = []
oldclips = []
for x in xrange(numenvs):
envs.append(lx.eval('query sceneservice environment.ID ? %s' % x))
# need a hack here to work round what appears to be a bug. We need to collect a
# list of clips to delete after deleting the env items. For some reason we have
# to collect the list in one loop, then delete the env items in a second loop
# otherwise querying the env refl image returns None. I think this is because the
# env image layer is originally an instance
for env in envs:
lx.eval('select.item %s set' % env)
if lx.eval('item.tag string HDRE ?') == 'set':
layer, process = lx.eval('query sceneservice mask.children ? {%s}' % env)
lx.eval('select.item {%s} set' % layer)
oldclips.append(lx.eval('texture.setIMap ?'))
# now delete the env items
for env in envs:
lx.eval('select.item %s set' % env)
if lx.eval('item.tag string HDRE ?') == 'set':
lx.eval('!!item.delete')
numgrplocs = lx.eval('query sceneservice groupLocator.N ? all')
grplocs = []
for x in xrange(numgrplocs):
grplocs.append(lx.eval('query sceneservice groupLocator.ID ? %s' % x))
for loc in grplocs:
lx.eval('select.item %s set' % loc)
if lx.eval('item.tag string HDRE ?') == 'set':
lx.eval('!!item.delete')
break
# clear old ground and water material groups
lx.eval('select.itemPattern HDREGroup')
id = lx.eval1('query sceneservice selection ? mask')
parent = lx.eval('query sceneservice mask.parent ? %s' % id)
lx.eval('select.item %s set' % parent)
lx.eval('texture.delete')
# clear old clips
for clip in oldclips:
lx.eval('select.drop item')
lx.eval('select.item {%s} set' % clip)
lx.eval('clip.delete')
except:
lx.out('Exception "%s" on line: %d' % (sys.exc_value, sys.exc_traceback.tb_lineno))
def renamenew(incr):
try:
lx.eval('item.name HDRECam item:{HDRECam%s}' % incr)
lx.eval('item.name HDRECamAnimate item:{HDRECamAnimate%s}' % incr)
lx.eval('item.name HDRESun item:{HDRESun%s}' % incr)
lx.eval('item.name HDRERefl item:{HDRERefl%s}' % incr)
lx.eval('item.name HDREBackplate item:{HDREBackplate%s}' % incr)
lx.eval('item.name HDREEnv item:{HDREEnv%s}' % incr)
lx.eval('item.name {HDREActivate} item:{HDREActivate%s}' % incr)
lx.eval('item.name {HDREWater} item:{HDREWater%s}' % incr)
lx.eval('item.name {HDREShadowGround} item:{HDREShadowGround%s}' % incr)
lx.eval('item.name {HDREControls} item:{HDREControls%s}' % incr)
lx.eval('item.name {BackdropBrowser} item:{BackdropBrowser%s}' % incr)
lx.eval('item.name {Texture Group} item:{Texture Group%s}' % incr)
lx.eval('item.name {HDREGroup} item:{HDREGroup%s}' % incr)
# rename the parent group
root = lx.eval('query sceneservice item.parent ? HDRECam')
rootname = lx.eval('query sceneservice item.name ? %s' % root)
newname = rootname.split(incr)[0]
lx.eval('item.name {%s} item:{%s}' % (newname, rootname))
except:
lx.out('Exception "%s" on line: %d' % (sys.exc_value, sys.exc_traceback.tb_lineno))
def tagitems():
try:
lx.eval('select.drop item')
for item in HDREEnvs:
lx.eval('select.item {%s} set' % item)
lx.eval('item.tag string {HDRE} {set}')
lx.eval('select.item {%s} set' % rootID)
lx.eval('item.tag string {HDRE} {set}')
except:
lx.out('Exception "%s" on line: %d' % (sys.exc_value, sys.exc_traceback.tb_lineno))
def setframesize():
try:
backplate = None
# find the backplate
envchildren = lx.eval('query sceneservice item.children ? HDREBackplate')
for child in envchildren:
if lx.eval('query sceneservice item.type ? {%s}' % child) == 'imageMap':
lx.eval('select.item %s set' % child)
backplate = lx.eval('texture.setIMap ?')
break
if backplate:
clip_width = None
clip_height = None
# set render frame size and film back aspect aspect
clips = lx.evalN('query layerservice clips ? all')
for clip in clips:
if lx.eval('query layerservice clip.name ? {%s}' % clip) == backplate:
info = lx.eval('query layerservice clip.info ? {%s}' % clip).split()
clip_width = float(info[1].split(':')[1])
clip_height = float(info[2].split(':')[1])
if clip_width != None and clip_height != None:
if clip_width > clip_height:
frame_width = 1024
frame_height = int((clip_height/clip_width) * 1024)
else:
frame_height = 1024
frame_width = int((clip_width/clip_height) * 1024)
lx.eval('render.res 0 %s' % frame_width)
lx.eval('render.res 1 %s' % frame_height)
except:
lx.out('Exception "%s" on line: %d' % (sys.exc_value, sys.exc_traceback.tb_lineno))
try:
# close previously open backdrop browser if there is one
if lx.eval('query scriptsysservice userValue.isDefined ? HDRE_Card'):
cookie = lx.eval('user.value HDRE_Card ?')
lx.eval('layout.createOrClose {%s} open:0' % cookie)
selectedItem = lx.eval1('query sceneservice selection ? locator')
rootID = lx.eval('query sceneservice item.parent ? %s' % selectedItem)
# check to see if an HDRE environment already exists and clear it out if it does.
# this is a bit of a hack, we have to test to see if one of our known items exists
# with an incremented name. If it does we delete all HDRE items with names that
# are not incremented and then rename all the ones that are - YUK!!!!
numcams = lx.eval('query sceneservice camera.N ? all')
for x in xrange(numcams):
camname = lx.eval('query sceneservice camera.name ? %s' % x)
if camname in camnames.keys():
incr = camnames[camname]
clearold()
renamenew(incr)
break
if itemexists('HDRECam'):
# set animate camera focal length
if itemexists('HDRECamAnimate'):
flength = round(lx.eval('item.channel focalLen ? item:HDRECam'), 3) * 1000
if flength >= 101 and flength <= 200:
flength = flength + 100
elif flength >= 51 and flength <= 100:
flength = flength + 50
elif flength >= 18 and flength <= 50:
flength = flength + 10
lx.eval('item.channel focalLen [%s mm] item:HDRECamAnimate' % flength)
lx.eval('render.camera HDRECamAnimate')
lockanimcamera()
lx.eval('render.camera HDRECam')
# group and lock the camera
lockcamera()
renID = lx.eval('query sceneservice polyRender.ID ? 0')
lx.eval('item.channel globEnable true item:%s' % renID)
lx.eval('item.channel dispRate 3 item:%s' % renID)
lx.eval('item.channel dispRatio 8 item:%s' % renID)
# set the scene gamma
numouts = lx.eval('query sceneservice renderOutput.N ? all')
for x in xrange(numouts):
id = lx.eval('query sceneservice renderOutput.ID ? %s' % x)
lx.eval('select.item %s set' % id)
if lx.eval('shader.setEffect ?') == 'shade.color':
lx.eval('item.channel gamma 2.2 item:%s' % id)
num_envs = lx.eval('query sceneservice environment.N ? all')
environments = []
for x in xrange(num_envs):
environments.append(lx.eval('query sceneservice environment.name ? %s' % x))
for env in environments:
if env not in HDREEnvs:
lx.eval('item.channel visCam false item:{%s}' % env)
lx.eval('item.channel visInd false item:{%s}' % env)
lx.eval('item.channel visRefl false item:{%s}' % env)
lx.eval('item.channel visRefr false item:{%s}' % env)
numlights = lx.eval('query sceneservice light.N ? all')
for x in xrange(numlights):
if lx.eval('query sceneservice light.name ? %s' % x) != 'HDRESun':
id = lx.eval('query sceneservice light.ID ? %s' % x)
lx.eval('layer.setVisibility {%s} 0' % id)
if itemexists('HDREActivate'):
lx.eval('layer.setVisibility {HDREActivate} 0')
controlsID = lx.eval('query sceneservice item.ID ? HDREControls')
if controlsID:
lx.eval('layer.setVisibility {%s} 1' % controlsID)
# set render frame size
setframesize()
tagitems()
except:
lx.out('Exception "%s" on line: %d' % (sys.exc_value, sys.exc_traceback.tb_lineno))
|
flexible
|
{
"blob_id": "78a96020abfd393438c2fce1dfd5fd159a23ca5a",
"index": 9666,
"step-1": "<mask token>\n\n\ndef itemexists(name):\n lx.eval('select.item {%s} set' % name)\n selected = lx.evalN('item.name ?')\n return name in selected\n\n\ndef lockcamera():\n if not itemexists('HDRECam_Grp'):\n lx.eval('select.drop item')\n lx.eval('group.create')\n lx.eval('item.name HDRECam_Grp')\n lx.eval('select.subItem HDRECam set camera')\n lx.eval('!!group.edit add item')\n lx.eval('select.item HDRECam_Grp set')\n lx.eval('item.channel lock on item:HDRECam_Grp')\n\n\n<mask token>\n\n\ndef hastag(item):\n lx.eval('select.drop item')\n lx.eval('select.item {%s} set' % item)\n if lx.eval('item.tag HDRE ?') == 'set':\n return true\n\n\ndef clearold():\n try:\n numenvs = lx.eval('query sceneservice environment.N ? all')\n envs = []\n oldclips = []\n for x in xrange(numenvs):\n envs.append(lx.eval('query sceneservice environment.ID ? %s' % x))\n for env in envs:\n lx.eval('select.item %s set' % env)\n if lx.eval('item.tag string HDRE ?') == 'set':\n layer, process = lx.eval(\n 'query sceneservice mask.children ? {%s}' % env)\n lx.eval('select.item {%s} set' % layer)\n oldclips.append(lx.eval('texture.setIMap ?'))\n for env in envs:\n lx.eval('select.item %s set' % env)\n if lx.eval('item.tag string HDRE ?') == 'set':\n lx.eval('!!item.delete')\n numgrplocs = lx.eval('query sceneservice groupLocator.N ? all')\n grplocs = []\n for x in xrange(numgrplocs):\n grplocs.append(lx.eval(\n 'query sceneservice groupLocator.ID ? %s' % x))\n for loc in grplocs:\n lx.eval('select.item %s set' % loc)\n if lx.eval('item.tag string HDRE ?') == 'set':\n lx.eval('!!item.delete')\n break\n lx.eval('select.itemPattern HDREGroup')\n id = lx.eval1('query sceneservice selection ? mask')\n parent = lx.eval('query sceneservice mask.parent ? %s' % id)\n lx.eval('select.item %s set' % parent)\n lx.eval('texture.delete')\n for clip in oldclips:\n lx.eval('select.drop item')\n lx.eval('select.item {%s} set' % clip)\n lx.eval('clip.delete')\n except:\n lx.out('Exception \"%s\" on line: %d' % (sys.exc_value, sys.\n exc_traceback.tb_lineno))\n\n\ndef renamenew(incr):\n try:\n lx.eval('item.name HDRECam item:{HDRECam%s}' % incr)\n lx.eval('item.name HDRECamAnimate item:{HDRECamAnimate%s}' % incr)\n lx.eval('item.name HDRESun item:{HDRESun%s}' % incr)\n lx.eval('item.name HDRERefl item:{HDRERefl%s}' % incr)\n lx.eval('item.name HDREBackplate item:{HDREBackplate%s}' % incr)\n lx.eval('item.name HDREEnv item:{HDREEnv%s}' % incr)\n lx.eval('item.name {HDREActivate} item:{HDREActivate%s}' % incr)\n lx.eval('item.name {HDREWater} item:{HDREWater%s}' % incr)\n lx.eval('item.name {HDREShadowGround} item:{HDREShadowGround%s}' % incr\n )\n lx.eval('item.name {HDREControls} item:{HDREControls%s}' % incr)\n lx.eval('item.name {BackdropBrowser} item:{BackdropBrowser%s}' % incr)\n lx.eval('item.name {Texture Group} item:{Texture Group%s}' % incr)\n lx.eval('item.name {HDREGroup} item:{HDREGroup%s}' % incr)\n root = lx.eval('query sceneservice item.parent ? HDRECam')\n rootname = lx.eval('query sceneservice item.name ? %s' % root)\n newname = rootname.split(incr)[0]\n lx.eval('item.name {%s} item:{%s}' % (newname, rootname))\n except:\n lx.out('Exception \"%s\" on line: %d' % (sys.exc_value, sys.\n exc_traceback.tb_lineno))\n\n\n<mask token>\n\n\ndef setframesize():\n try:\n backplate = None\n envchildren = lx.eval(\n 'query sceneservice item.children ? HDREBackplate')\n for child in envchildren:\n if lx.eval('query sceneservice item.type ? {%s}' % child\n ) == 'imageMap':\n lx.eval('select.item %s set' % child)\n backplate = lx.eval('texture.setIMap ?')\n break\n if backplate:\n clip_width = None\n clip_height = None\n clips = lx.evalN('query layerservice clips ? all')\n for clip in clips:\n if lx.eval('query layerservice clip.name ? {%s}' % clip\n ) == backplate:\n info = lx.eval('query layerservice clip.info ? {%s}' % clip\n ).split()\n clip_width = float(info[1].split(':')[1])\n clip_height = float(info[2].split(':')[1])\n if clip_width != None and clip_height != None:\n if clip_width > clip_height:\n frame_width = 1024\n frame_height = int(clip_height / clip_width * 1024)\n else:\n frame_height = 1024\n frame_width = int(clip_width / clip_height * 1024)\n lx.eval('render.res 0 %s' % frame_width)\n lx.eval('render.res 1 %s' % frame_height)\n except:\n lx.out('Exception \"%s\" on line: %d' % (sys.exc_value, sys.\n exc_traceback.tb_lineno))\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef itemexists(name):\n lx.eval('select.item {%s} set' % name)\n selected = lx.evalN('item.name ?')\n return name in selected\n\n\ndef lockcamera():\n if not itemexists('HDRECam_Grp'):\n lx.eval('select.drop item')\n lx.eval('group.create')\n lx.eval('item.name HDRECam_Grp')\n lx.eval('select.subItem HDRECam set camera')\n lx.eval('!!group.edit add item')\n lx.eval('select.item HDRECam_Grp set')\n lx.eval('item.channel lock on item:HDRECam_Grp')\n\n\n<mask token>\n\n\ndef hastag(item):\n lx.eval('select.drop item')\n lx.eval('select.item {%s} set' % item)\n if lx.eval('item.tag HDRE ?') == 'set':\n return true\n\n\ndef clearold():\n try:\n numenvs = lx.eval('query sceneservice environment.N ? all')\n envs = []\n oldclips = []\n for x in xrange(numenvs):\n envs.append(lx.eval('query sceneservice environment.ID ? %s' % x))\n for env in envs:\n lx.eval('select.item %s set' % env)\n if lx.eval('item.tag string HDRE ?') == 'set':\n layer, process = lx.eval(\n 'query sceneservice mask.children ? {%s}' % env)\n lx.eval('select.item {%s} set' % layer)\n oldclips.append(lx.eval('texture.setIMap ?'))\n for env in envs:\n lx.eval('select.item %s set' % env)\n if lx.eval('item.tag string HDRE ?') == 'set':\n lx.eval('!!item.delete')\n numgrplocs = lx.eval('query sceneservice groupLocator.N ? all')\n grplocs = []\n for x in xrange(numgrplocs):\n grplocs.append(lx.eval(\n 'query sceneservice groupLocator.ID ? %s' % x))\n for loc in grplocs:\n lx.eval('select.item %s set' % loc)\n if lx.eval('item.tag string HDRE ?') == 'set':\n lx.eval('!!item.delete')\n break\n lx.eval('select.itemPattern HDREGroup')\n id = lx.eval1('query sceneservice selection ? mask')\n parent = lx.eval('query sceneservice mask.parent ? %s' % id)\n lx.eval('select.item %s set' % parent)\n lx.eval('texture.delete')\n for clip in oldclips:\n lx.eval('select.drop item')\n lx.eval('select.item {%s} set' % clip)\n lx.eval('clip.delete')\n except:\n lx.out('Exception \"%s\" on line: %d' % (sys.exc_value, sys.\n exc_traceback.tb_lineno))\n\n\ndef renamenew(incr):\n try:\n lx.eval('item.name HDRECam item:{HDRECam%s}' % incr)\n lx.eval('item.name HDRECamAnimate item:{HDRECamAnimate%s}' % incr)\n lx.eval('item.name HDRESun item:{HDRESun%s}' % incr)\n lx.eval('item.name HDRERefl item:{HDRERefl%s}' % incr)\n lx.eval('item.name HDREBackplate item:{HDREBackplate%s}' % incr)\n lx.eval('item.name HDREEnv item:{HDREEnv%s}' % incr)\n lx.eval('item.name {HDREActivate} item:{HDREActivate%s}' % incr)\n lx.eval('item.name {HDREWater} item:{HDREWater%s}' % incr)\n lx.eval('item.name {HDREShadowGround} item:{HDREShadowGround%s}' % incr\n )\n lx.eval('item.name {HDREControls} item:{HDREControls%s}' % incr)\n lx.eval('item.name {BackdropBrowser} item:{BackdropBrowser%s}' % incr)\n lx.eval('item.name {Texture Group} item:{Texture Group%s}' % incr)\n lx.eval('item.name {HDREGroup} item:{HDREGroup%s}' % incr)\n root = lx.eval('query sceneservice item.parent ? HDRECam')\n rootname = lx.eval('query sceneservice item.name ? %s' % root)\n newname = rootname.split(incr)[0]\n lx.eval('item.name {%s} item:{%s}' % (newname, rootname))\n except:\n lx.out('Exception \"%s\" on line: %d' % (sys.exc_value, sys.\n exc_traceback.tb_lineno))\n\n\ndef tagitems():\n try:\n lx.eval('select.drop item')\n for item in HDREEnvs:\n lx.eval('select.item {%s} set' % item)\n lx.eval('item.tag string {HDRE} {set}')\n lx.eval('select.item {%s} set' % rootID)\n lx.eval('item.tag string {HDRE} {set}')\n except:\n lx.out('Exception \"%s\" on line: %d' % (sys.exc_value, sys.\n exc_traceback.tb_lineno))\n\n\ndef setframesize():\n try:\n backplate = None\n envchildren = lx.eval(\n 'query sceneservice item.children ? HDREBackplate')\n for child in envchildren:\n if lx.eval('query sceneservice item.type ? {%s}' % child\n ) == 'imageMap':\n lx.eval('select.item %s set' % child)\n backplate = lx.eval('texture.setIMap ?')\n break\n if backplate:\n clip_width = None\n clip_height = None\n clips = lx.evalN('query layerservice clips ? all')\n for clip in clips:\n if lx.eval('query layerservice clip.name ? {%s}' % clip\n ) == backplate:\n info = lx.eval('query layerservice clip.info ? {%s}' % clip\n ).split()\n clip_width = float(info[1].split(':')[1])\n clip_height = float(info[2].split(':')[1])\n if clip_width != None and clip_height != None:\n if clip_width > clip_height:\n frame_width = 1024\n frame_height = int(clip_height / clip_width * 1024)\n else:\n frame_height = 1024\n frame_width = int(clip_width / clip_height * 1024)\n lx.eval('render.res 0 %s' % frame_width)\n lx.eval('render.res 1 %s' % frame_height)\n except:\n lx.out('Exception \"%s\" on line: %d' % (sys.exc_value, sys.\n exc_traceback.tb_lineno))\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef itemexists(name):\n lx.eval('select.item {%s} set' % name)\n selected = lx.evalN('item.name ?')\n return name in selected\n\n\ndef lockcamera():\n if not itemexists('HDRECam_Grp'):\n lx.eval('select.drop item')\n lx.eval('group.create')\n lx.eval('item.name HDRECam_Grp')\n lx.eval('select.subItem HDRECam set camera')\n lx.eval('!!group.edit add item')\n lx.eval('select.item HDRECam_Grp set')\n lx.eval('item.channel lock on item:HDRECam_Grp')\n\n\ndef lockanimcamera():\n if not itemexists('HDRECamAnimate_Grp'):\n lx.eval('select.drop item')\n lx.eval('group.create')\n lx.eval('item.name HDRECamAnimate_Grp')\n xfrmitem = lx.eval('query sceneservice item.xfrmPos ? HDRECamAnimate')\n lx.eval('select.channel {%s:pos.X} set' % xfrmitem)\n lx.eval('select.channel {%s:pos.Y} add' % xfrmitem)\n lx.eval('select.channel {%s:pos.Z} add' % xfrmitem)\n lx.eval('!!group.edit add chan')\n lx.eval('item.channel lock on item:HDRECamAnimate_Grp')\n\n\ndef hastag(item):\n lx.eval('select.drop item')\n lx.eval('select.item {%s} set' % item)\n if lx.eval('item.tag HDRE ?') == 'set':\n return true\n\n\ndef clearold():\n try:\n numenvs = lx.eval('query sceneservice environment.N ? all')\n envs = []\n oldclips = []\n for x in xrange(numenvs):\n envs.append(lx.eval('query sceneservice environment.ID ? %s' % x))\n for env in envs:\n lx.eval('select.item %s set' % env)\n if lx.eval('item.tag string HDRE ?') == 'set':\n layer, process = lx.eval(\n 'query sceneservice mask.children ? {%s}' % env)\n lx.eval('select.item {%s} set' % layer)\n oldclips.append(lx.eval('texture.setIMap ?'))\n for env in envs:\n lx.eval('select.item %s set' % env)\n if lx.eval('item.tag string HDRE ?') == 'set':\n lx.eval('!!item.delete')\n numgrplocs = lx.eval('query sceneservice groupLocator.N ? all')\n grplocs = []\n for x in xrange(numgrplocs):\n grplocs.append(lx.eval(\n 'query sceneservice groupLocator.ID ? %s' % x))\n for loc in grplocs:\n lx.eval('select.item %s set' % loc)\n if lx.eval('item.tag string HDRE ?') == 'set':\n lx.eval('!!item.delete')\n break\n lx.eval('select.itemPattern HDREGroup')\n id = lx.eval1('query sceneservice selection ? mask')\n parent = lx.eval('query sceneservice mask.parent ? %s' % id)\n lx.eval('select.item %s set' % parent)\n lx.eval('texture.delete')\n for clip in oldclips:\n lx.eval('select.drop item')\n lx.eval('select.item {%s} set' % clip)\n lx.eval('clip.delete')\n except:\n lx.out('Exception \"%s\" on line: %d' % (sys.exc_value, sys.\n exc_traceback.tb_lineno))\n\n\ndef renamenew(incr):\n try:\n lx.eval('item.name HDRECam item:{HDRECam%s}' % incr)\n lx.eval('item.name HDRECamAnimate item:{HDRECamAnimate%s}' % incr)\n lx.eval('item.name HDRESun item:{HDRESun%s}' % incr)\n lx.eval('item.name HDRERefl item:{HDRERefl%s}' % incr)\n lx.eval('item.name HDREBackplate item:{HDREBackplate%s}' % incr)\n lx.eval('item.name HDREEnv item:{HDREEnv%s}' % incr)\n lx.eval('item.name {HDREActivate} item:{HDREActivate%s}' % incr)\n lx.eval('item.name {HDREWater} item:{HDREWater%s}' % incr)\n lx.eval('item.name {HDREShadowGround} item:{HDREShadowGround%s}' % incr\n )\n lx.eval('item.name {HDREControls} item:{HDREControls%s}' % incr)\n lx.eval('item.name {BackdropBrowser} item:{BackdropBrowser%s}' % incr)\n lx.eval('item.name {Texture Group} item:{Texture Group%s}' % incr)\n lx.eval('item.name {HDREGroup} item:{HDREGroup%s}' % incr)\n root = lx.eval('query sceneservice item.parent ? HDRECam')\n rootname = lx.eval('query sceneservice item.name ? %s' % root)\n newname = rootname.split(incr)[0]\n lx.eval('item.name {%s} item:{%s}' % (newname, rootname))\n except:\n lx.out('Exception \"%s\" on line: %d' % (sys.exc_value, sys.\n exc_traceback.tb_lineno))\n\n\ndef tagitems():\n try:\n lx.eval('select.drop item')\n for item in HDREEnvs:\n lx.eval('select.item {%s} set' % item)\n lx.eval('item.tag string {HDRE} {set}')\n lx.eval('select.item {%s} set' % rootID)\n lx.eval('item.tag string {HDRE} {set}')\n except:\n lx.out('Exception \"%s\" on line: %d' % (sys.exc_value, sys.\n exc_traceback.tb_lineno))\n\n\ndef setframesize():\n try:\n backplate = None\n envchildren = lx.eval(\n 'query sceneservice item.children ? HDREBackplate')\n for child in envchildren:\n if lx.eval('query sceneservice item.type ? {%s}' % child\n ) == 'imageMap':\n lx.eval('select.item %s set' % child)\n backplate = lx.eval('texture.setIMap ?')\n break\n if backplate:\n clip_width = None\n clip_height = None\n clips = lx.evalN('query layerservice clips ? all')\n for clip in clips:\n if lx.eval('query layerservice clip.name ? {%s}' % clip\n ) == backplate:\n info = lx.eval('query layerservice clip.info ? {%s}' % clip\n ).split()\n clip_width = float(info[1].split(':')[1])\n clip_height = float(info[2].split(':')[1])\n if clip_width != None and clip_height != None:\n if clip_width > clip_height:\n frame_width = 1024\n frame_height = int(clip_height / clip_width * 1024)\n else:\n frame_height = 1024\n frame_width = int(clip_width / clip_height * 1024)\n lx.eval('render.res 0 %s' % frame_width)\n lx.eval('render.res 1 %s' % frame_height)\n except:\n lx.out('Exception \"%s\" on line: %d' % (sys.exc_value, sys.\n exc_traceback.tb_lineno))\n\n\ntry:\n if lx.eval('query scriptsysservice userValue.isDefined ? HDRE_Card'):\n cookie = lx.eval('user.value HDRE_Card ?')\n lx.eval('layout.createOrClose {%s} open:0' % cookie)\n selectedItem = lx.eval1('query sceneservice selection ? locator')\n rootID = lx.eval('query sceneservice item.parent ? %s' % selectedItem)\n numcams = lx.eval('query sceneservice camera.N ? all')\n for x in xrange(numcams):\n camname = lx.eval('query sceneservice camera.name ? %s' % x)\n if camname in camnames.keys():\n incr = camnames[camname]\n clearold()\n renamenew(incr)\n break\n if itemexists('HDRECam'):\n if itemexists('HDRECamAnimate'):\n flength = round(lx.eval('item.channel focalLen ? item:HDRECam'), 3\n ) * 1000\n if flength >= 101 and flength <= 200:\n flength = flength + 100\n elif flength >= 51 and flength <= 100:\n flength = flength + 50\n elif flength >= 18 and flength <= 50:\n flength = flength + 10\n lx.eval('item.channel focalLen [%s mm] item:HDRECamAnimate' %\n flength)\n lx.eval('render.camera HDRECamAnimate')\n lockanimcamera()\n lx.eval('render.camera HDRECam')\n lockcamera()\n renID = lx.eval('query sceneservice polyRender.ID ? 0')\n lx.eval('item.channel globEnable true item:%s' % renID)\n lx.eval('item.channel dispRate 3 item:%s' % renID)\n lx.eval('item.channel dispRatio 8 item:%s' % renID)\n numouts = lx.eval('query sceneservice renderOutput.N ? all')\n for x in xrange(numouts):\n id = lx.eval('query sceneservice renderOutput.ID ? %s' % x)\n lx.eval('select.item %s set' % id)\n if lx.eval('shader.setEffect ?') == 'shade.color':\n lx.eval('item.channel gamma 2.2 item:%s' % id)\n num_envs = lx.eval('query sceneservice environment.N ? all')\n environments = []\n for x in xrange(num_envs):\n environments.append(lx.eval(\n 'query sceneservice environment.name ? %s' % x))\n for env in environments:\n if env not in HDREEnvs:\n lx.eval('item.channel visCam false item:{%s}' % env)\n lx.eval('item.channel visInd false item:{%s}' % env)\n lx.eval('item.channel visRefl false item:{%s}' % env)\n lx.eval('item.channel visRefr false item:{%s}' % env)\n numlights = lx.eval('query sceneservice light.N ? all')\n for x in xrange(numlights):\n if lx.eval('query sceneservice light.name ? %s' % x) != 'HDRESun':\n id = lx.eval('query sceneservice light.ID ? %s' % x)\n lx.eval('layer.setVisibility {%s} 0' % id)\n if itemexists('HDREActivate'):\n lx.eval('layer.setVisibility {HDREActivate} 0')\n controlsID = lx.eval('query sceneservice item.ID ? HDREControls')\n if controlsID:\n lx.eval('layer.setVisibility {%s} 1' % controlsID)\n setframesize()\n tagitems()\nexcept:\n lx.out('Exception \"%s\" on line: %d' % (sys.exc_value, sys.exc_traceback\n .tb_lineno))\n",
"step-4": "camnames = {'HDRECam (2)': ' (2)', 'HDRECam(2)': '(2)', 'HDRECam 2': ' 2',\n 'HDRECam_2': '_2', 'HDRECam2': '2'}\nHDREEnvs = ['HDRERefl', 'HDREBackplate', 'HDREEnv']\n\n\ndef itemexists(name):\n lx.eval('select.item {%s} set' % name)\n selected = lx.evalN('item.name ?')\n return name in selected\n\n\ndef lockcamera():\n if not itemexists('HDRECam_Grp'):\n lx.eval('select.drop item')\n lx.eval('group.create')\n lx.eval('item.name HDRECam_Grp')\n lx.eval('select.subItem HDRECam set camera')\n lx.eval('!!group.edit add item')\n lx.eval('select.item HDRECam_Grp set')\n lx.eval('item.channel lock on item:HDRECam_Grp')\n\n\ndef lockanimcamera():\n if not itemexists('HDRECamAnimate_Grp'):\n lx.eval('select.drop item')\n lx.eval('group.create')\n lx.eval('item.name HDRECamAnimate_Grp')\n xfrmitem = lx.eval('query sceneservice item.xfrmPos ? HDRECamAnimate')\n lx.eval('select.channel {%s:pos.X} set' % xfrmitem)\n lx.eval('select.channel {%s:pos.Y} add' % xfrmitem)\n lx.eval('select.channel {%s:pos.Z} add' % xfrmitem)\n lx.eval('!!group.edit add chan')\n lx.eval('item.channel lock on item:HDRECamAnimate_Grp')\n\n\ndef hastag(item):\n lx.eval('select.drop item')\n lx.eval('select.item {%s} set' % item)\n if lx.eval('item.tag HDRE ?') == 'set':\n return true\n\n\ndef clearold():\n try:\n numenvs = lx.eval('query sceneservice environment.N ? all')\n envs = []\n oldclips = []\n for x in xrange(numenvs):\n envs.append(lx.eval('query sceneservice environment.ID ? %s' % x))\n for env in envs:\n lx.eval('select.item %s set' % env)\n if lx.eval('item.tag string HDRE ?') == 'set':\n layer, process = lx.eval(\n 'query sceneservice mask.children ? {%s}' % env)\n lx.eval('select.item {%s} set' % layer)\n oldclips.append(lx.eval('texture.setIMap ?'))\n for env in envs:\n lx.eval('select.item %s set' % env)\n if lx.eval('item.tag string HDRE ?') == 'set':\n lx.eval('!!item.delete')\n numgrplocs = lx.eval('query sceneservice groupLocator.N ? all')\n grplocs = []\n for x in xrange(numgrplocs):\n grplocs.append(lx.eval(\n 'query sceneservice groupLocator.ID ? %s' % x))\n for loc in grplocs:\n lx.eval('select.item %s set' % loc)\n if lx.eval('item.tag string HDRE ?') == 'set':\n lx.eval('!!item.delete')\n break\n lx.eval('select.itemPattern HDREGroup')\n id = lx.eval1('query sceneservice selection ? mask')\n parent = lx.eval('query sceneservice mask.parent ? %s' % id)\n lx.eval('select.item %s set' % parent)\n lx.eval('texture.delete')\n for clip in oldclips:\n lx.eval('select.drop item')\n lx.eval('select.item {%s} set' % clip)\n lx.eval('clip.delete')\n except:\n lx.out('Exception \"%s\" on line: %d' % (sys.exc_value, sys.\n exc_traceback.tb_lineno))\n\n\ndef renamenew(incr):\n try:\n lx.eval('item.name HDRECam item:{HDRECam%s}' % incr)\n lx.eval('item.name HDRECamAnimate item:{HDRECamAnimate%s}' % incr)\n lx.eval('item.name HDRESun item:{HDRESun%s}' % incr)\n lx.eval('item.name HDRERefl item:{HDRERefl%s}' % incr)\n lx.eval('item.name HDREBackplate item:{HDREBackplate%s}' % incr)\n lx.eval('item.name HDREEnv item:{HDREEnv%s}' % incr)\n lx.eval('item.name {HDREActivate} item:{HDREActivate%s}' % incr)\n lx.eval('item.name {HDREWater} item:{HDREWater%s}' % incr)\n lx.eval('item.name {HDREShadowGround} item:{HDREShadowGround%s}' % incr\n )\n lx.eval('item.name {HDREControls} item:{HDREControls%s}' % incr)\n lx.eval('item.name {BackdropBrowser} item:{BackdropBrowser%s}' % incr)\n lx.eval('item.name {Texture Group} item:{Texture Group%s}' % incr)\n lx.eval('item.name {HDREGroup} item:{HDREGroup%s}' % incr)\n root = lx.eval('query sceneservice item.parent ? HDRECam')\n rootname = lx.eval('query sceneservice item.name ? %s' % root)\n newname = rootname.split(incr)[0]\n lx.eval('item.name {%s} item:{%s}' % (newname, rootname))\n except:\n lx.out('Exception \"%s\" on line: %d' % (sys.exc_value, sys.\n exc_traceback.tb_lineno))\n\n\ndef tagitems():\n try:\n lx.eval('select.drop item')\n for item in HDREEnvs:\n lx.eval('select.item {%s} set' % item)\n lx.eval('item.tag string {HDRE} {set}')\n lx.eval('select.item {%s} set' % rootID)\n lx.eval('item.tag string {HDRE} {set}')\n except:\n lx.out('Exception \"%s\" on line: %d' % (sys.exc_value, sys.\n exc_traceback.tb_lineno))\n\n\ndef setframesize():\n try:\n backplate = None\n envchildren = lx.eval(\n 'query sceneservice item.children ? HDREBackplate')\n for child in envchildren:\n if lx.eval('query sceneservice item.type ? {%s}' % child\n ) == 'imageMap':\n lx.eval('select.item %s set' % child)\n backplate = lx.eval('texture.setIMap ?')\n break\n if backplate:\n clip_width = None\n clip_height = None\n clips = lx.evalN('query layerservice clips ? all')\n for clip in clips:\n if lx.eval('query layerservice clip.name ? {%s}' % clip\n ) == backplate:\n info = lx.eval('query layerservice clip.info ? {%s}' % clip\n ).split()\n clip_width = float(info[1].split(':')[1])\n clip_height = float(info[2].split(':')[1])\n if clip_width != None and clip_height != None:\n if clip_width > clip_height:\n frame_width = 1024\n frame_height = int(clip_height / clip_width * 1024)\n else:\n frame_height = 1024\n frame_width = int(clip_width / clip_height * 1024)\n lx.eval('render.res 0 %s' % frame_width)\n lx.eval('render.res 1 %s' % frame_height)\n except:\n lx.out('Exception \"%s\" on line: %d' % (sys.exc_value, sys.\n exc_traceback.tb_lineno))\n\n\ntry:\n if lx.eval('query scriptsysservice userValue.isDefined ? HDRE_Card'):\n cookie = lx.eval('user.value HDRE_Card ?')\n lx.eval('layout.createOrClose {%s} open:0' % cookie)\n selectedItem = lx.eval1('query sceneservice selection ? locator')\n rootID = lx.eval('query sceneservice item.parent ? %s' % selectedItem)\n numcams = lx.eval('query sceneservice camera.N ? all')\n for x in xrange(numcams):\n camname = lx.eval('query sceneservice camera.name ? %s' % x)\n if camname in camnames.keys():\n incr = camnames[camname]\n clearold()\n renamenew(incr)\n break\n if itemexists('HDRECam'):\n if itemexists('HDRECamAnimate'):\n flength = round(lx.eval('item.channel focalLen ? item:HDRECam'), 3\n ) * 1000\n if flength >= 101 and flength <= 200:\n flength = flength + 100\n elif flength >= 51 and flength <= 100:\n flength = flength + 50\n elif flength >= 18 and flength <= 50:\n flength = flength + 10\n lx.eval('item.channel focalLen [%s mm] item:HDRECamAnimate' %\n flength)\n lx.eval('render.camera HDRECamAnimate')\n lockanimcamera()\n lx.eval('render.camera HDRECam')\n lockcamera()\n renID = lx.eval('query sceneservice polyRender.ID ? 0')\n lx.eval('item.channel globEnable true item:%s' % renID)\n lx.eval('item.channel dispRate 3 item:%s' % renID)\n lx.eval('item.channel dispRatio 8 item:%s' % renID)\n numouts = lx.eval('query sceneservice renderOutput.N ? all')\n for x in xrange(numouts):\n id = lx.eval('query sceneservice renderOutput.ID ? %s' % x)\n lx.eval('select.item %s set' % id)\n if lx.eval('shader.setEffect ?') == 'shade.color':\n lx.eval('item.channel gamma 2.2 item:%s' % id)\n num_envs = lx.eval('query sceneservice environment.N ? all')\n environments = []\n for x in xrange(num_envs):\n environments.append(lx.eval(\n 'query sceneservice environment.name ? %s' % x))\n for env in environments:\n if env not in HDREEnvs:\n lx.eval('item.channel visCam false item:{%s}' % env)\n lx.eval('item.channel visInd false item:{%s}' % env)\n lx.eval('item.channel visRefl false item:{%s}' % env)\n lx.eval('item.channel visRefr false item:{%s}' % env)\n numlights = lx.eval('query sceneservice light.N ? all')\n for x in xrange(numlights):\n if lx.eval('query sceneservice light.name ? %s' % x) != 'HDRESun':\n id = lx.eval('query sceneservice light.ID ? %s' % x)\n lx.eval('layer.setVisibility {%s} 0' % id)\n if itemexists('HDREActivate'):\n lx.eval('layer.setVisibility {HDREActivate} 0')\n controlsID = lx.eval('query sceneservice item.ID ? HDREControls')\n if controlsID:\n lx.eval('layer.setVisibility {%s} 1' % controlsID)\n setframesize()\n tagitems()\nexcept:\n lx.out('Exception \"%s\" on line: %d' % (sys.exc_value, sys.exc_traceback\n .tb_lineno))\n",
"step-5": "#!/usr/bin/env python\n\n################################################################################\n#\n# HDREEnable.py\n#\n# Version: 1.000\n#\n# Author: Gwynne Reddick\n#\n# Description:\n# \n#\n# Usage: \n#\n# Last Update 16:49 08/12/10 \n#\n################################################################################\n\n# part of a hack for later on so we can identify if a second HDRE assembly has been applied\ncamnames = {'HDRECam (2)':' (2)',\n 'HDRECam(2)':'(2)',\n 'HDRECam 2':' 2',\n 'HDRECam_2':'_2',\n 'HDRECam2':'2'}\n\nHDREEnvs = ['HDRERefl', 'HDREBackplate', 'HDREEnv']\n\ndef itemexists(name):\n lx.eval('select.item {%s} set' % name)\n selected = lx.evalN('item.name ?')\n return name in selected\n\ndef lockcamera():\n if not itemexists('HDRECam_Grp'):\n lx.eval('select.drop item')\n lx.eval('group.create')\n lx.eval('item.name HDRECam_Grp')\n lx.eval('select.subItem HDRECam set camera')\n lx.eval('!!group.edit add item')\n lx.eval('select.item HDRECam_Grp set')\n lx.eval('item.channel lock on item:HDRECam_Grp')\n\ndef lockanimcamera():\n if not itemexists('HDRECamAnimate_Grp'):\n lx.eval('select.drop item')\n lx.eval('group.create')\n lx.eval('item.name HDRECamAnimate_Grp')\n xfrmitem = lx.eval('query sceneservice item.xfrmPos ? HDRECamAnimate')\n lx.eval('select.channel {%s:pos.X} set' % xfrmitem)\n lx.eval('select.channel {%s:pos.Y} add' % xfrmitem)\n lx.eval('select.channel {%s:pos.Z} add' % xfrmitem)\n lx.eval('!!group.edit add chan')\n lx.eval('item.channel lock on item:HDRECamAnimate_Grp')\n \n\ndef hastag(item):\n lx.eval('select.drop item')\n lx.eval('select.item {%s} set' % item)\n if lx.eval('item.tag HDRE ?') == 'set':\n return true\n\ndef clearold():\n try:\n numenvs = lx.eval('query sceneservice environment.N ? all')\n envs = []\n oldclips = []\n for x in xrange(numenvs):\n envs.append(lx.eval('query sceneservice environment.ID ? %s' % x))\n # need a hack here to work round what appears to be a bug. We need to collect a\n # list of clips to delete after deleting the env items. For some reason we have\n # to collect the list in one loop, then delete the env items in a second loop\n # otherwise querying the env refl image returns None. I think this is because the\n # env image layer is originally an instance\n for env in envs:\n lx.eval('select.item %s set' % env)\n if lx.eval('item.tag string HDRE ?') == 'set':\n layer, process = lx.eval('query sceneservice mask.children ? {%s}' % env)\n lx.eval('select.item {%s} set' % layer)\n oldclips.append(lx.eval('texture.setIMap ?'))\n # now delete the env items\n for env in envs:\n lx.eval('select.item %s set' % env)\n if lx.eval('item.tag string HDRE ?') == 'set':\n lx.eval('!!item.delete')\n numgrplocs = lx.eval('query sceneservice groupLocator.N ? all')\n grplocs = []\n for x in xrange(numgrplocs):\n grplocs.append(lx.eval('query sceneservice groupLocator.ID ? %s' % x))\n for loc in grplocs:\n lx.eval('select.item %s set' % loc)\n if lx.eval('item.tag string HDRE ?') == 'set':\n lx.eval('!!item.delete')\n break\n \n # clear old ground and water material groups\n lx.eval('select.itemPattern HDREGroup')\n id = lx.eval1('query sceneservice selection ? mask')\n parent = lx.eval('query sceneservice mask.parent ? %s' % id)\n lx.eval('select.item %s set' % parent)\n lx.eval('texture.delete')\n \n # clear old clips\n for clip in oldclips:\n lx.eval('select.drop item')\n lx.eval('select.item {%s} set' % clip)\n lx.eval('clip.delete')\n except:\n lx.out('Exception \"%s\" on line: %d' % (sys.exc_value, sys.exc_traceback.tb_lineno))\n\ndef renamenew(incr):\n try:\n lx.eval('item.name HDRECam item:{HDRECam%s}' % incr)\n lx.eval('item.name HDRECamAnimate item:{HDRECamAnimate%s}' % incr)\n lx.eval('item.name HDRESun item:{HDRESun%s}' % incr)\n lx.eval('item.name HDRERefl item:{HDRERefl%s}' % incr)\n lx.eval('item.name HDREBackplate item:{HDREBackplate%s}' % incr)\n lx.eval('item.name HDREEnv item:{HDREEnv%s}' % incr)\n lx.eval('item.name {HDREActivate} item:{HDREActivate%s}' % incr)\n lx.eval('item.name {HDREWater} item:{HDREWater%s}' % incr)\n lx.eval('item.name {HDREShadowGround} item:{HDREShadowGround%s}' % incr)\n lx.eval('item.name {HDREControls} item:{HDREControls%s}' % incr)\n lx.eval('item.name {BackdropBrowser} item:{BackdropBrowser%s}' % incr)\n lx.eval('item.name {Texture Group} item:{Texture Group%s}' % incr)\n lx.eval('item.name {HDREGroup} item:{HDREGroup%s}' % incr)\n # rename the parent group\n root = lx.eval('query sceneservice item.parent ? HDRECam')\n rootname = lx.eval('query sceneservice item.name ? %s' % root)\n newname = rootname.split(incr)[0]\n lx.eval('item.name {%s} item:{%s}' % (newname, rootname))\n\n except:\n lx.out('Exception \"%s\" on line: %d' % (sys.exc_value, sys.exc_traceback.tb_lineno))\n\ndef tagitems():\n try:\n lx.eval('select.drop item')\n for item in HDREEnvs:\n lx.eval('select.item {%s} set' % item)\n lx.eval('item.tag string {HDRE} {set}')\n lx.eval('select.item {%s} set' % rootID)\n lx.eval('item.tag string {HDRE} {set}')\n except:\n lx.out('Exception \"%s\" on line: %d' % (sys.exc_value, sys.exc_traceback.tb_lineno))\n \n\ndef setframesize():\n try:\n backplate = None\n # find the backplate\n envchildren = lx.eval('query sceneservice item.children ? HDREBackplate')\n for child in envchildren:\n if lx.eval('query sceneservice item.type ? {%s}' % child) == 'imageMap':\n lx.eval('select.item %s set' % child)\n backplate = lx.eval('texture.setIMap ?')\n break\n if backplate:\n clip_width = None\n clip_height = None\n # set render frame size and film back aspect aspect\n clips = lx.evalN('query layerservice clips ? all')\n for clip in clips:\n if lx.eval('query layerservice clip.name ? {%s}' % clip) == backplate:\n info = lx.eval('query layerservice clip.info ? {%s}' % clip).split()\n clip_width = float(info[1].split(':')[1])\n clip_height = float(info[2].split(':')[1])\n \n if clip_width != None and clip_height != None:\n if clip_width > clip_height:\n frame_width = 1024\n frame_height = int((clip_height/clip_width) * 1024)\n else:\n frame_height = 1024\n frame_width = int((clip_width/clip_height) * 1024)\n lx.eval('render.res 0 %s' % frame_width)\n lx.eval('render.res 1 %s' % frame_height)\n except:\n lx.out('Exception \"%s\" on line: %d' % (sys.exc_value, sys.exc_traceback.tb_lineno))\n\ntry:\n # close previously open backdrop browser if there is one\n if lx.eval('query scriptsysservice userValue.isDefined ? HDRE_Card'):\n cookie = lx.eval('user.value HDRE_Card ?')\n lx.eval('layout.createOrClose {%s} open:0' % cookie)\n \n selectedItem = lx.eval1('query sceneservice selection ? locator')\n rootID = lx.eval('query sceneservice item.parent ? %s' % selectedItem)\n \n # check to see if an HDRE environment already exists and clear it out if it does.\n # this is a bit of a hack, we have to test to see if one of our known items exists\n # with an incremented name. If it does we delete all HDRE items with names that\n # are not incremented and then rename all the ones that are - YUK!!!!\n \n numcams = lx.eval('query sceneservice camera.N ? all')\n for x in xrange(numcams):\n camname = lx.eval('query sceneservice camera.name ? %s' % x)\n if camname in camnames.keys():\n incr = camnames[camname]\n clearold()\n renamenew(incr)\n break\n \n if itemexists('HDRECam'):\n # set animate camera focal length\n if itemexists('HDRECamAnimate'):\n flength = round(lx.eval('item.channel focalLen ? item:HDRECam'), 3) * 1000\n if flength >= 101 and flength <= 200:\n flength = flength + 100\n elif flength >= 51 and flength <= 100:\n flength = flength + 50\n elif flength >= 18 and flength <= 50:\n flength = flength + 10\n lx.eval('item.channel focalLen [%s mm] item:HDRECamAnimate' % flength)\n lx.eval('render.camera HDRECamAnimate')\n lockanimcamera()\n lx.eval('render.camera HDRECam')\n # group and lock the camera\n lockcamera()\n \n renID = lx.eval('query sceneservice polyRender.ID ? 0')\n lx.eval('item.channel globEnable true item:%s' % renID)\n lx.eval('item.channel dispRate 3 item:%s' % renID)\n lx.eval('item.channel dispRatio 8 item:%s' % renID)\n # set the scene gamma\n numouts = lx.eval('query sceneservice renderOutput.N ? all')\n for x in xrange(numouts):\n id = lx.eval('query sceneservice renderOutput.ID ? %s' % x)\n lx.eval('select.item %s set' % id)\n if lx.eval('shader.setEffect ?') == 'shade.color':\n lx.eval('item.channel gamma 2.2 item:%s' % id)\n \n num_envs = lx.eval('query sceneservice environment.N ? all')\n environments = []\n for x in xrange(num_envs):\n environments.append(lx.eval('query sceneservice environment.name ? %s' % x))\n for env in environments:\n if env not in HDREEnvs:\n lx.eval('item.channel visCam false item:{%s}' % env)\n lx.eval('item.channel visInd false item:{%s}' % env)\n lx.eval('item.channel visRefl false item:{%s}' % env)\n lx.eval('item.channel visRefr false item:{%s}' % env)\n \n numlights = lx.eval('query sceneservice light.N ? all')\n for x in xrange(numlights):\n if lx.eval('query sceneservice light.name ? %s' % x) != 'HDRESun':\n id = lx.eval('query sceneservice light.ID ? %s' % x)\n lx.eval('layer.setVisibility {%s} 0' % id)\n \n if itemexists('HDREActivate'):\n lx.eval('layer.setVisibility {HDREActivate} 0')\n \n controlsID = lx.eval('query sceneservice item.ID ? HDREControls')\n if controlsID:\n lx.eval('layer.setVisibility {%s} 1' % controlsID)\n \n # set render frame size\n setframesize()\n tagitems()\n \nexcept:\n lx.out('Exception \"%s\" on line: %d' % (sys.exc_value, sys.exc_traceback.tb_lineno))\n",
"step-ids": [
6,
7,
9,
10,
11
]
}
|
[
6,
7,
9,
10,
11
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
os.system('psfex -dd > config.psfex')
if ic.use_backsub:
prefix = 'b'
else:
prefix = ''
<|reserved_special_token_0|>
f.write('\n')
f.write('#############################' + '\n')
f.write('##### Scripts for PSFEx #####' + '\n')
f.write('#############################' + '\n')
f.write('\n')
for i in np.arange(len(ic.fields)):
f.write('# ----- HSC field : ' + ic.fields[i] + ' ----- #' + '\n')
f.write('\n')
for j in np.arange(len(ic.filters)):
flt = ic.filters[j].split('-')[1]
f.write('rm -rfv prepsfex_' + flt + '.cat\n')
f.write('sex Images/' + prefix + ic.fields[i] + '-' + flt +
'.fits -c prepsfex.sex -CATALOG_NAME prepsfex_' + flt + '.cat ')
f.write('-DETECT_THRESH {0:.1f} -ANALYSIS_THRESH {0:.1f} '.format(
ic.THRES_psf))
f.write(
f"""-MAG_ZEROPOINT {ic.MAG0:.1f} -GAIN {ic.GAIN0[i][j]:.1f} -SEEING_FWHM {ic.SEEING0:.2f}
"""
)
f.write('sex Images/' + prefix + ic.fields[i] + '-' + flt +
'.fits -c prepsfex.sex -CATALOG_NAME prepsfex_' + ic.fields[i] +
'-' + flt + '.cat -CATALOG_TYPE ASCII_HEAD ')
f.write('-DETECT_THRESH {0:.1f} -ANALYSIS_THRESH {0:.1f} '.format(
ic.THRES_psf))
f.write(
f"""-MAG_ZEROPOINT {ic.MAG0:.1f} -GAIN {ic.GAIN0[i][j]:.1f} -SEEING_FWHM {ic.SEEING0:.2f}
"""
)
f.write('psfex prepsfex_' + flt + '.cat -c config.psfex ')
f.write(
f'-SAMPLE_FWHMRANGE {ic.FWHMR_psf[0]:.1f},{ic.FWHMR_psf[1]:.1f} ')
f.write(
f'-SAMPLE_MINSN {ic.MINSN_psf:.1f} -SAMPLE_MAXELLIP {ic.MAXEL_psf:.2f} '
)
f.write('-OUTCAT_TYPE ASCII_HEAD -OUTCAT_NAME psf_' + ic.fields[i] +
'-' + flt + '.cat ')
f.write('-CHECKPLOT_TYPE NONE -XML_NAME psf_' + ic.fields[i] + '-' +
flt + '.xml\n')
f.write('mv -v prepsfex_' + flt + '.psf psf_' + ic.fields[i] + '-' +
flt + '.psf\n')
f.write('\n')
f.write('\n\n')
f.close()
if glob.glob('PSFEx/') == []:
os.system('mkdir PSFEx')
else:
os.system('rm -rfv PSFEx/*')
os.system('sh psfex_all.sh')
os.system('mv -v psf_*.cat psf_*.xml psf_*.psf PSFEx/')
os.system('mv -v prepsfex_*-*.cat PSFEx/')
os.system('rm -rfv ./*.fits prepsfex_*.cat')
print('--- %s seconds ---' % (time.time() - start_time))
<|reserved_special_token_1|>
<|reserved_special_token_0|>
start_time = time.time()
<|reserved_special_token_0|>
os.system('psfex -dd > config.psfex')
if ic.use_backsub:
prefix = 'b'
else:
prefix = ''
f = open('psfex_all.sh', 'w')
f.write('\n')
f.write('#############################' + '\n')
f.write('##### Scripts for PSFEx #####' + '\n')
f.write('#############################' + '\n')
f.write('\n')
for i in np.arange(len(ic.fields)):
f.write('# ----- HSC field : ' + ic.fields[i] + ' ----- #' + '\n')
f.write('\n')
for j in np.arange(len(ic.filters)):
flt = ic.filters[j].split('-')[1]
f.write('rm -rfv prepsfex_' + flt + '.cat\n')
f.write('sex Images/' + prefix + ic.fields[i] + '-' + flt +
'.fits -c prepsfex.sex -CATALOG_NAME prepsfex_' + flt + '.cat ')
f.write('-DETECT_THRESH {0:.1f} -ANALYSIS_THRESH {0:.1f} '.format(
ic.THRES_psf))
f.write(
f"""-MAG_ZEROPOINT {ic.MAG0:.1f} -GAIN {ic.GAIN0[i][j]:.1f} -SEEING_FWHM {ic.SEEING0:.2f}
"""
)
f.write('sex Images/' + prefix + ic.fields[i] + '-' + flt +
'.fits -c prepsfex.sex -CATALOG_NAME prepsfex_' + ic.fields[i] +
'-' + flt + '.cat -CATALOG_TYPE ASCII_HEAD ')
f.write('-DETECT_THRESH {0:.1f} -ANALYSIS_THRESH {0:.1f} '.format(
ic.THRES_psf))
f.write(
f"""-MAG_ZEROPOINT {ic.MAG0:.1f} -GAIN {ic.GAIN0[i][j]:.1f} -SEEING_FWHM {ic.SEEING0:.2f}
"""
)
f.write('psfex prepsfex_' + flt + '.cat -c config.psfex ')
f.write(
f'-SAMPLE_FWHMRANGE {ic.FWHMR_psf[0]:.1f},{ic.FWHMR_psf[1]:.1f} ')
f.write(
f'-SAMPLE_MINSN {ic.MINSN_psf:.1f} -SAMPLE_MAXELLIP {ic.MAXEL_psf:.2f} '
)
f.write('-OUTCAT_TYPE ASCII_HEAD -OUTCAT_NAME psf_' + ic.fields[i] +
'-' + flt + '.cat ')
f.write('-CHECKPLOT_TYPE NONE -XML_NAME psf_' + ic.fields[i] + '-' +
flt + '.xml\n')
f.write('mv -v prepsfex_' + flt + '.psf psf_' + ic.fields[i] + '-' +
flt + '.psf\n')
f.write('\n')
f.write('\n\n')
f.close()
if glob.glob('PSFEx/') == []:
os.system('mkdir PSFEx')
else:
os.system('rm -rfv PSFEx/*')
os.system('sh psfex_all.sh')
os.system('mv -v psf_*.cat psf_*.xml psf_*.psf PSFEx/')
os.system('mv -v prepsfex_*-*.cat PSFEx/')
os.system('rm -rfv ./*.fits prepsfex_*.cat')
print('--- %s seconds ---' % (time.time() - start_time))
<|reserved_special_token_1|>
<|reserved_special_token_0|>
import time
start_time = time.time()
import numpy as np
import glob, os
from astropy.io import fits
import init_cfg as ic
os.system('psfex -dd > config.psfex')
if ic.use_backsub:
prefix = 'b'
else:
prefix = ''
f = open('psfex_all.sh', 'w')
f.write('\n')
f.write('#############################' + '\n')
f.write('##### Scripts for PSFEx #####' + '\n')
f.write('#############################' + '\n')
f.write('\n')
for i in np.arange(len(ic.fields)):
f.write('# ----- HSC field : ' + ic.fields[i] + ' ----- #' + '\n')
f.write('\n')
for j in np.arange(len(ic.filters)):
flt = ic.filters[j].split('-')[1]
f.write('rm -rfv prepsfex_' + flt + '.cat\n')
f.write('sex Images/' + prefix + ic.fields[i] + '-' + flt +
'.fits -c prepsfex.sex -CATALOG_NAME prepsfex_' + flt + '.cat ')
f.write('-DETECT_THRESH {0:.1f} -ANALYSIS_THRESH {0:.1f} '.format(
ic.THRES_psf))
f.write(
f"""-MAG_ZEROPOINT {ic.MAG0:.1f} -GAIN {ic.GAIN0[i][j]:.1f} -SEEING_FWHM {ic.SEEING0:.2f}
"""
)
f.write('sex Images/' + prefix + ic.fields[i] + '-' + flt +
'.fits -c prepsfex.sex -CATALOG_NAME prepsfex_' + ic.fields[i] +
'-' + flt + '.cat -CATALOG_TYPE ASCII_HEAD ')
f.write('-DETECT_THRESH {0:.1f} -ANALYSIS_THRESH {0:.1f} '.format(
ic.THRES_psf))
f.write(
f"""-MAG_ZEROPOINT {ic.MAG0:.1f} -GAIN {ic.GAIN0[i][j]:.1f} -SEEING_FWHM {ic.SEEING0:.2f}
"""
)
f.write('psfex prepsfex_' + flt + '.cat -c config.psfex ')
f.write(
f'-SAMPLE_FWHMRANGE {ic.FWHMR_psf[0]:.1f},{ic.FWHMR_psf[1]:.1f} ')
f.write(
f'-SAMPLE_MINSN {ic.MINSN_psf:.1f} -SAMPLE_MAXELLIP {ic.MAXEL_psf:.2f} '
)
f.write('-OUTCAT_TYPE ASCII_HEAD -OUTCAT_NAME psf_' + ic.fields[i] +
'-' + flt + '.cat ')
f.write('-CHECKPLOT_TYPE NONE -XML_NAME psf_' + ic.fields[i] + '-' +
flt + '.xml\n')
f.write('mv -v prepsfex_' + flt + '.psf psf_' + ic.fields[i] + '-' +
flt + '.psf\n')
f.write('\n')
f.write('\n\n')
f.close()
if glob.glob('PSFEx/') == []:
os.system('mkdir PSFEx')
else:
os.system('rm -rfv PSFEx/*')
os.system('sh psfex_all.sh')
os.system('mv -v psf_*.cat psf_*.xml psf_*.psf PSFEx/')
os.system('mv -v prepsfex_*-*.cat PSFEx/')
os.system('rm -rfv ./*.fits prepsfex_*.cat')
print('--- %s seconds ---' % (time.time() - start_time))
<|reserved_special_token_1|>
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu May 21 11:40:26 2020
@author: jlee
"""
import time
start_time = time.time()
import numpy as np
import glob, os
from astropy.io import fits
import init_cfg as ic
# ----- Making scripts for PSFEx ----- #
os.system("psfex -dd > config.psfex")
if ic.use_backsub:
prefix = 'b'
else:
prefix = ''
f = open('psfex_all.sh','w')
f.write('\n')
f.write('#############################'+'\n')
f.write('##### Scripts for PSFEx #####'+'\n')
f.write('#############################'+'\n')
f.write('\n')
for i in np.arange(len(ic.fields)):
f.write('# ----- HSC field : '+ic.fields[i]+' ----- #'+'\n')
f.write('\n')
for j in np.arange(len(ic.filters)):
flt = ic.filters[j].split('-')[1]
f.write('rm -rfv prepsfex_'+flt+'.cat\n')
f.write('sex Images/'+prefix+ic.fields[i]+'-'+flt+'.fits -c prepsfex.sex -CATALOG_NAME prepsfex_'+flt+'.cat ')
f.write('-DETECT_THRESH {0:.1f} -ANALYSIS_THRESH {0:.1f} '.format(ic.THRES_psf))
f.write(f"-MAG_ZEROPOINT {ic.MAG0:.1f} -GAIN {ic.GAIN0[i][j]:.1f} -SEEING_FWHM {ic.SEEING0:.2f}\n")
f.write('sex Images/'+prefix+ic.fields[i]+'-'+flt+'.fits -c prepsfex.sex -CATALOG_NAME prepsfex_'+ic.fields[i]+'-'+flt+'.cat -CATALOG_TYPE ASCII_HEAD ')
f.write('-DETECT_THRESH {0:.1f} -ANALYSIS_THRESH {0:.1f} '.format(ic.THRES_psf))
f.write(f"-MAG_ZEROPOINT {ic.MAG0:.1f} -GAIN {ic.GAIN0[i][j]:.1f} -SEEING_FWHM {ic.SEEING0:.2f}\n")
f.write('psfex prepsfex_'+flt+'.cat -c config.psfex ')
f.write(f"-SAMPLE_FWHMRANGE {ic.FWHMR_psf[0]:.1f},{ic.FWHMR_psf[1]:.1f} ")
f.write(f"-SAMPLE_MINSN {ic.MINSN_psf:.1f} -SAMPLE_MAXELLIP {ic.MAXEL_psf:.2f} ")
f.write('-OUTCAT_TYPE ASCII_HEAD -OUTCAT_NAME psf_'+ic.fields[i]+'-'+flt+'.cat ')
f.write('-CHECKPLOT_TYPE NONE -XML_NAME psf_'+ic.fields[i]+'-'+flt+'.xml\n')
f.write('mv -v prepsfex_'+flt+'.psf psf_'+ic.fields[i]+'-'+flt+'.psf\n')
f.write('\n')
f.write('\n\n')
f.close()
# ----- Running scripts for PSFEx ----- #
if (glob.glob("PSFEx/") == []):
os.system("mkdir PSFEx")
else:
os.system("rm -rfv PSFEx/*")
os.system("sh psfex_all.sh")
os.system("mv -v psf_*.cat psf_*.xml psf_*.psf PSFEx/")
os.system("mv -v prepsfex_*-*.cat PSFEx/")
os.system("rm -rfv ./*.fits prepsfex_*.cat")
# Printing the running time
print("--- %s seconds ---" % (time.time() - start_time))
|
flexible
|
{
"blob_id": "c23125018a77508dad6fd2cb86ec6d556fbd1019",
"index": 90,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nos.system('psfex -dd > config.psfex')\nif ic.use_backsub:\n prefix = 'b'\nelse:\n prefix = ''\n<mask token>\nf.write('\\n')\nf.write('#############################' + '\\n')\nf.write('##### Scripts for PSFEx #####' + '\\n')\nf.write('#############################' + '\\n')\nf.write('\\n')\nfor i in np.arange(len(ic.fields)):\n f.write('# ----- HSC field : ' + ic.fields[i] + ' ----- #' + '\\n')\n f.write('\\n')\n for j in np.arange(len(ic.filters)):\n flt = ic.filters[j].split('-')[1]\n f.write('rm -rfv prepsfex_' + flt + '.cat\\n')\n f.write('sex Images/' + prefix + ic.fields[i] + '-' + flt +\n '.fits -c prepsfex.sex -CATALOG_NAME prepsfex_' + flt + '.cat ')\n f.write('-DETECT_THRESH {0:.1f} -ANALYSIS_THRESH {0:.1f} '.format(\n ic.THRES_psf))\n f.write(\n f\"\"\"-MAG_ZEROPOINT {ic.MAG0:.1f} -GAIN {ic.GAIN0[i][j]:.1f} -SEEING_FWHM {ic.SEEING0:.2f}\n\"\"\"\n )\n f.write('sex Images/' + prefix + ic.fields[i] + '-' + flt +\n '.fits -c prepsfex.sex -CATALOG_NAME prepsfex_' + ic.fields[i] +\n '-' + flt + '.cat -CATALOG_TYPE ASCII_HEAD ')\n f.write('-DETECT_THRESH {0:.1f} -ANALYSIS_THRESH {0:.1f} '.format(\n ic.THRES_psf))\n f.write(\n f\"\"\"-MAG_ZEROPOINT {ic.MAG0:.1f} -GAIN {ic.GAIN0[i][j]:.1f} -SEEING_FWHM {ic.SEEING0:.2f}\n\"\"\"\n )\n f.write('psfex prepsfex_' + flt + '.cat -c config.psfex ')\n f.write(\n f'-SAMPLE_FWHMRANGE {ic.FWHMR_psf[0]:.1f},{ic.FWHMR_psf[1]:.1f} ')\n f.write(\n f'-SAMPLE_MINSN {ic.MINSN_psf:.1f} -SAMPLE_MAXELLIP {ic.MAXEL_psf:.2f} '\n )\n f.write('-OUTCAT_TYPE ASCII_HEAD -OUTCAT_NAME psf_' + ic.fields[i] +\n '-' + flt + '.cat ')\n f.write('-CHECKPLOT_TYPE NONE -XML_NAME psf_' + ic.fields[i] + '-' +\n flt + '.xml\\n')\n f.write('mv -v prepsfex_' + flt + '.psf psf_' + ic.fields[i] + '-' +\n flt + '.psf\\n')\n f.write('\\n')\n f.write('\\n\\n')\nf.close()\nif glob.glob('PSFEx/') == []:\n os.system('mkdir PSFEx')\nelse:\n os.system('rm -rfv PSFEx/*')\nos.system('sh psfex_all.sh')\nos.system('mv -v psf_*.cat psf_*.xml psf_*.psf PSFEx/')\nos.system('mv -v prepsfex_*-*.cat PSFEx/')\nos.system('rm -rfv ./*.fits prepsfex_*.cat')\nprint('--- %s seconds ---' % (time.time() - start_time))\n",
"step-3": "<mask token>\nstart_time = time.time()\n<mask token>\nos.system('psfex -dd > config.psfex')\nif ic.use_backsub:\n prefix = 'b'\nelse:\n prefix = ''\nf = open('psfex_all.sh', 'w')\nf.write('\\n')\nf.write('#############################' + '\\n')\nf.write('##### Scripts for PSFEx #####' + '\\n')\nf.write('#############################' + '\\n')\nf.write('\\n')\nfor i in np.arange(len(ic.fields)):\n f.write('# ----- HSC field : ' + ic.fields[i] + ' ----- #' + '\\n')\n f.write('\\n')\n for j in np.arange(len(ic.filters)):\n flt = ic.filters[j].split('-')[1]\n f.write('rm -rfv prepsfex_' + flt + '.cat\\n')\n f.write('sex Images/' + prefix + ic.fields[i] + '-' + flt +\n '.fits -c prepsfex.sex -CATALOG_NAME prepsfex_' + flt + '.cat ')\n f.write('-DETECT_THRESH {0:.1f} -ANALYSIS_THRESH {0:.1f} '.format(\n ic.THRES_psf))\n f.write(\n f\"\"\"-MAG_ZEROPOINT {ic.MAG0:.1f} -GAIN {ic.GAIN0[i][j]:.1f} -SEEING_FWHM {ic.SEEING0:.2f}\n\"\"\"\n )\n f.write('sex Images/' + prefix + ic.fields[i] + '-' + flt +\n '.fits -c prepsfex.sex -CATALOG_NAME prepsfex_' + ic.fields[i] +\n '-' + flt + '.cat -CATALOG_TYPE ASCII_HEAD ')\n f.write('-DETECT_THRESH {0:.1f} -ANALYSIS_THRESH {0:.1f} '.format(\n ic.THRES_psf))\n f.write(\n f\"\"\"-MAG_ZEROPOINT {ic.MAG0:.1f} -GAIN {ic.GAIN0[i][j]:.1f} -SEEING_FWHM {ic.SEEING0:.2f}\n\"\"\"\n )\n f.write('psfex prepsfex_' + flt + '.cat -c config.psfex ')\n f.write(\n f'-SAMPLE_FWHMRANGE {ic.FWHMR_psf[0]:.1f},{ic.FWHMR_psf[1]:.1f} ')\n f.write(\n f'-SAMPLE_MINSN {ic.MINSN_psf:.1f} -SAMPLE_MAXELLIP {ic.MAXEL_psf:.2f} '\n )\n f.write('-OUTCAT_TYPE ASCII_HEAD -OUTCAT_NAME psf_' + ic.fields[i] +\n '-' + flt + '.cat ')\n f.write('-CHECKPLOT_TYPE NONE -XML_NAME psf_' + ic.fields[i] + '-' +\n flt + '.xml\\n')\n f.write('mv -v prepsfex_' + flt + '.psf psf_' + ic.fields[i] + '-' +\n flt + '.psf\\n')\n f.write('\\n')\n f.write('\\n\\n')\nf.close()\nif glob.glob('PSFEx/') == []:\n os.system('mkdir PSFEx')\nelse:\n os.system('rm -rfv PSFEx/*')\nos.system('sh psfex_all.sh')\nos.system('mv -v psf_*.cat psf_*.xml psf_*.psf PSFEx/')\nos.system('mv -v prepsfex_*-*.cat PSFEx/')\nos.system('rm -rfv ./*.fits prepsfex_*.cat')\nprint('--- %s seconds ---' % (time.time() - start_time))\n",
"step-4": "<mask token>\nimport time\nstart_time = time.time()\nimport numpy as np\nimport glob, os\nfrom astropy.io import fits\nimport init_cfg as ic\nos.system('psfex -dd > config.psfex')\nif ic.use_backsub:\n prefix = 'b'\nelse:\n prefix = ''\nf = open('psfex_all.sh', 'w')\nf.write('\\n')\nf.write('#############################' + '\\n')\nf.write('##### Scripts for PSFEx #####' + '\\n')\nf.write('#############################' + '\\n')\nf.write('\\n')\nfor i in np.arange(len(ic.fields)):\n f.write('# ----- HSC field : ' + ic.fields[i] + ' ----- #' + '\\n')\n f.write('\\n')\n for j in np.arange(len(ic.filters)):\n flt = ic.filters[j].split('-')[1]\n f.write('rm -rfv prepsfex_' + flt + '.cat\\n')\n f.write('sex Images/' + prefix + ic.fields[i] + '-' + flt +\n '.fits -c prepsfex.sex -CATALOG_NAME prepsfex_' + flt + '.cat ')\n f.write('-DETECT_THRESH {0:.1f} -ANALYSIS_THRESH {0:.1f} '.format(\n ic.THRES_psf))\n f.write(\n f\"\"\"-MAG_ZEROPOINT {ic.MAG0:.1f} -GAIN {ic.GAIN0[i][j]:.1f} -SEEING_FWHM {ic.SEEING0:.2f}\n\"\"\"\n )\n f.write('sex Images/' + prefix + ic.fields[i] + '-' + flt +\n '.fits -c prepsfex.sex -CATALOG_NAME prepsfex_' + ic.fields[i] +\n '-' + flt + '.cat -CATALOG_TYPE ASCII_HEAD ')\n f.write('-DETECT_THRESH {0:.1f} -ANALYSIS_THRESH {0:.1f} '.format(\n ic.THRES_psf))\n f.write(\n f\"\"\"-MAG_ZEROPOINT {ic.MAG0:.1f} -GAIN {ic.GAIN0[i][j]:.1f} -SEEING_FWHM {ic.SEEING0:.2f}\n\"\"\"\n )\n f.write('psfex prepsfex_' + flt + '.cat -c config.psfex ')\n f.write(\n f'-SAMPLE_FWHMRANGE {ic.FWHMR_psf[0]:.1f},{ic.FWHMR_psf[1]:.1f} ')\n f.write(\n f'-SAMPLE_MINSN {ic.MINSN_psf:.1f} -SAMPLE_MAXELLIP {ic.MAXEL_psf:.2f} '\n )\n f.write('-OUTCAT_TYPE ASCII_HEAD -OUTCAT_NAME psf_' + ic.fields[i] +\n '-' + flt + '.cat ')\n f.write('-CHECKPLOT_TYPE NONE -XML_NAME psf_' + ic.fields[i] + '-' +\n flt + '.xml\\n')\n f.write('mv -v prepsfex_' + flt + '.psf psf_' + ic.fields[i] + '-' +\n flt + '.psf\\n')\n f.write('\\n')\n f.write('\\n\\n')\nf.close()\nif glob.glob('PSFEx/') == []:\n os.system('mkdir PSFEx')\nelse:\n os.system('rm -rfv PSFEx/*')\nos.system('sh psfex_all.sh')\nos.system('mv -v psf_*.cat psf_*.xml psf_*.psf PSFEx/')\nos.system('mv -v prepsfex_*-*.cat PSFEx/')\nos.system('rm -rfv ./*.fits prepsfex_*.cat')\nprint('--- %s seconds ---' % (time.time() - start_time))\n",
"step-5": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu May 21 11:40:26 2020\n\n@author: jlee\n\"\"\"\n\n\nimport time\nstart_time = time.time()\n\nimport numpy as np\nimport glob, os\nfrom astropy.io import fits\n\nimport init_cfg as ic\n\n\n# ----- Making scripts for PSFEx ----- #\nos.system(\"psfex -dd > config.psfex\")\n\nif ic.use_backsub:\n\tprefix = 'b'\nelse:\n\tprefix = ''\n\nf = open('psfex_all.sh','w')\nf.write('\\n')\nf.write('#############################'+'\\n')\nf.write('##### Scripts for PSFEx #####'+'\\n')\nf.write('#############################'+'\\n')\nf.write('\\n')\nfor i in np.arange(len(ic.fields)):\n\tf.write('# ----- HSC field : '+ic.fields[i]+' ----- #'+'\\n')\n\tf.write('\\n')\n\tfor j in np.arange(len(ic.filters)):\n\t\tflt = ic.filters[j].split('-')[1]\n\t\tf.write('rm -rfv prepsfex_'+flt+'.cat\\n')\n\t\tf.write('sex Images/'+prefix+ic.fields[i]+'-'+flt+'.fits -c prepsfex.sex -CATALOG_NAME prepsfex_'+flt+'.cat ')\n\t\tf.write('-DETECT_THRESH {0:.1f} -ANALYSIS_THRESH {0:.1f} '.format(ic.THRES_psf))\n\t\tf.write(f\"-MAG_ZEROPOINT {ic.MAG0:.1f} -GAIN {ic.GAIN0[i][j]:.1f} -SEEING_FWHM {ic.SEEING0:.2f}\\n\")\n\t\tf.write('sex Images/'+prefix+ic.fields[i]+'-'+flt+'.fits -c prepsfex.sex -CATALOG_NAME prepsfex_'+ic.fields[i]+'-'+flt+'.cat -CATALOG_TYPE ASCII_HEAD ')\n\t\tf.write('-DETECT_THRESH {0:.1f} -ANALYSIS_THRESH {0:.1f} '.format(ic.THRES_psf))\n\t\tf.write(f\"-MAG_ZEROPOINT {ic.MAG0:.1f} -GAIN {ic.GAIN0[i][j]:.1f} -SEEING_FWHM {ic.SEEING0:.2f}\\n\")\n\t\tf.write('psfex prepsfex_'+flt+'.cat -c config.psfex ')\n\t\tf.write(f\"-SAMPLE_FWHMRANGE {ic.FWHMR_psf[0]:.1f},{ic.FWHMR_psf[1]:.1f} \")\n\t\tf.write(f\"-SAMPLE_MINSN {ic.MINSN_psf:.1f} -SAMPLE_MAXELLIP {ic.MAXEL_psf:.2f} \")\n\t\tf.write('-OUTCAT_TYPE ASCII_HEAD -OUTCAT_NAME psf_'+ic.fields[i]+'-'+flt+'.cat ')\n\t\tf.write('-CHECKPLOT_TYPE NONE -XML_NAME psf_'+ic.fields[i]+'-'+flt+'.xml\\n')\n\t\tf.write('mv -v prepsfex_'+flt+'.psf psf_'+ic.fields[i]+'-'+flt+'.psf\\n')\n\t\tf.write('\\n')\n\tf.write('\\n\\n')\nf.close()\n\n\n# ----- Running scripts for PSFEx ----- #\nif (glob.glob(\"PSFEx/\") == []):\n\tos.system(\"mkdir PSFEx\")\nelse:\n\tos.system(\"rm -rfv PSFEx/*\")\n\nos.system(\"sh psfex_all.sh\")\n\nos.system(\"mv -v psf_*.cat psf_*.xml psf_*.psf PSFEx/\")\nos.system(\"mv -v prepsfex_*-*.cat PSFEx/\")\nos.system(\"rm -rfv ./*.fits prepsfex_*.cat\")\n\n\n# Printing the running time \nprint(\"--- %s seconds ---\" % (time.time() - start_time))\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
# Name: BoardingPass.py
# Description: Class to create and output a boarding pass
# Ver. Writer Date Notes
# 1.0 Shuvam Chatterjee 05/22/20 Original
from random import randint
class BoardingPass:
def __init__(self, reservation):
self.reservation = reservation
self.export()
def export(self):
fileName = "reservations/data_reservation/boarding_passes"
file = open(fileName, "a")
flights = self.reservation.getFlights()
string = ""
for i in range(len(flights)):
flight = flights[i]
gate = randint(1, 8)
for passenger in self.reservation.getPassengers():
string += "BOARDING PASS"
string += "NAME OF PASSENGER:\n"
string += passenger.getLastName() + " / " + passenger.getFirstName() + "\n"
string += "FROM: " + flight.getOrigin() + "\n"
string += "TO: " + flight.getDestination() + "\n"
string += "SEAT: " + passenger.getSeats()[i]
string += "GATE: " + str(gate) + "\n"
string += "\n\n"
print(string, file=file)
file.close()
return fileName
|
normal
|
{
"blob_id": "a3662b4b9569046e67c39c1002234c1fbd85c650",
"index": 8102,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass BoardingPass:\n <mask token>\n\n def export(self):\n fileName = 'reservations/data_reservation/boarding_passes'\n file = open(fileName, 'a')\n flights = self.reservation.getFlights()\n string = ''\n for i in range(len(flights)):\n flight = flights[i]\n gate = randint(1, 8)\n for passenger in self.reservation.getPassengers():\n string += 'BOARDING PASS'\n string += 'NAME OF PASSENGER:\\n'\n string += passenger.getLastName(\n ) + ' / ' + passenger.getFirstName() + '\\n'\n string += 'FROM: ' + flight.getOrigin() + '\\n'\n string += 'TO: ' + flight.getDestination() + '\\n'\n string += 'SEAT: ' + passenger.getSeats()[i]\n string += 'GATE: ' + str(gate) + '\\n'\n string += '\\n\\n'\n print(string, file=file)\n file.close()\n return fileName\n",
"step-3": "<mask token>\n\n\nclass BoardingPass:\n\n def __init__(self, reservation):\n self.reservation = reservation\n self.export()\n\n def export(self):\n fileName = 'reservations/data_reservation/boarding_passes'\n file = open(fileName, 'a')\n flights = self.reservation.getFlights()\n string = ''\n for i in range(len(flights)):\n flight = flights[i]\n gate = randint(1, 8)\n for passenger in self.reservation.getPassengers():\n string += 'BOARDING PASS'\n string += 'NAME OF PASSENGER:\\n'\n string += passenger.getLastName(\n ) + ' / ' + passenger.getFirstName() + '\\n'\n string += 'FROM: ' + flight.getOrigin() + '\\n'\n string += 'TO: ' + flight.getDestination() + '\\n'\n string += 'SEAT: ' + passenger.getSeats()[i]\n string += 'GATE: ' + str(gate) + '\\n'\n string += '\\n\\n'\n print(string, file=file)\n file.close()\n return fileName\n",
"step-4": "from random import randint\n\n\nclass BoardingPass:\n\n def __init__(self, reservation):\n self.reservation = reservation\n self.export()\n\n def export(self):\n fileName = 'reservations/data_reservation/boarding_passes'\n file = open(fileName, 'a')\n flights = self.reservation.getFlights()\n string = ''\n for i in range(len(flights)):\n flight = flights[i]\n gate = randint(1, 8)\n for passenger in self.reservation.getPassengers():\n string += 'BOARDING PASS'\n string += 'NAME OF PASSENGER:\\n'\n string += passenger.getLastName(\n ) + ' / ' + passenger.getFirstName() + '\\n'\n string += 'FROM: ' + flight.getOrigin() + '\\n'\n string += 'TO: ' + flight.getDestination() + '\\n'\n string += 'SEAT: ' + passenger.getSeats()[i]\n string += 'GATE: ' + str(gate) + '\\n'\n string += '\\n\\n'\n print(string, file=file)\n file.close()\n return fileName\n",
"step-5": "# Name: BoardingPass.py\n# Description: Class to create and output a boarding pass\n\n# Ver. Writer Date Notes\n# 1.0 Shuvam Chatterjee 05/22/20 Original\n\nfrom random import randint\n\nclass BoardingPass:\n def __init__(self, reservation):\n self.reservation = reservation\n self.export()\n\n def export(self):\n fileName = \"reservations/data_reservation/boarding_passes\"\n file = open(fileName, \"a\")\n\n flights = self.reservation.getFlights()\n string = \"\"\n for i in range(len(flights)):\n flight = flights[i]\n gate = randint(1, 8)\n\n for passenger in self.reservation.getPassengers():\n string += \"BOARDING PASS\"\n string += \"NAME OF PASSENGER:\\n\"\n string += passenger.getLastName() + \" / \" + passenger.getFirstName() + \"\\n\"\n string += \"FROM: \" + flight.getOrigin() + \"\\n\"\n string += \"TO: \" + flight.getDestination() + \"\\n\"\n string += \"SEAT: \" + passenger.getSeats()[i]\n string += \"GATE: \" + str(gate) + \"\\n\"\n string += \"\\n\\n\"\n\n print(string, file=file)\n\n file.close()\n\n return fileName",
"step-ids": [
0,
2,
3,
4,
5
]
}
|
[
0,
2,
3,
4,
5
] |
<|reserved_special_token_0|>
@app.route('/')
def home_page():
with open('/Users/samfunk/ds/metis/project_mcnulty/stock_page.html', 'r'
) as viz_file:
return viz_file.read()
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
@app.route('/')
def home_page():
with open('/Users/samfunk/ds/metis/project_mcnulty/stock_page.html', 'r'
) as viz_file:
return viz_file.read()
@app.route('/stock', methods=['POST'])
def stock(ok_tickers=ok_tickers()):
data = flask.request.json
ticker = str(data['ticker']).upper()
if ticker in ok_tickers:
earnings_soup = BeautifulSoup(requests.get(
'https://finance.yahoo.com/quote/%s/analysts?p=%s' % (ticker,
ticker)).text, 'html.parser')
surprise_string = earnings_soup.find_all('table')[2].tbody.find_all(
'tr')[3].find_all('td')[4].text
surprise = float(re.search('(.*)%', surprise_string)[1])
if abs(surprise) < 5.0:
score = 0
else:
score = 1
else:
surprise_string = 'null'
score = 'null'
results = {'surprise': surprise_string, 'score': score}
print(ticker, results)
return flask.jsonify(results)
if __name__ == '__main__':
app.run()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
app = flask.Flask(__name__)
@app.route('/')
def home_page():
with open('/Users/samfunk/ds/metis/project_mcnulty/stock_page.html', 'r'
) as viz_file:
return viz_file.read()
@app.route('/stock', methods=['POST'])
def stock(ok_tickers=ok_tickers()):
data = flask.request.json
ticker = str(data['ticker']).upper()
if ticker in ok_tickers:
earnings_soup = BeautifulSoup(requests.get(
'https://finance.yahoo.com/quote/%s/analysts?p=%s' % (ticker,
ticker)).text, 'html.parser')
surprise_string = earnings_soup.find_all('table')[2].tbody.find_all(
'tr')[3].find_all('td')[4].text
surprise = float(re.search('(.*)%', surprise_string)[1])
if abs(surprise) < 5.0:
score = 0
else:
score = 1
else:
surprise_string = 'null'
score = 'null'
results = {'surprise': surprise_string, 'score': score}
print(ticker, results)
return flask.jsonify(results)
if __name__ == '__main__':
app.run()
<|reserved_special_token_1|>
import flask
import numpy as np
import pandas as pd
import requests
from bs4 import BeautifulSoup
import pickle
from recent_earnings_tickers import ok_tickers
import re
<|reserved_special_token_0|>
app = flask.Flask(__name__)
@app.route('/')
def home_page():
with open('/Users/samfunk/ds/metis/project_mcnulty/stock_page.html', 'r'
) as viz_file:
return viz_file.read()
@app.route('/stock', methods=['POST'])
def stock(ok_tickers=ok_tickers()):
data = flask.request.json
ticker = str(data['ticker']).upper()
if ticker in ok_tickers:
earnings_soup = BeautifulSoup(requests.get(
'https://finance.yahoo.com/quote/%s/analysts?p=%s' % (ticker,
ticker)).text, 'html.parser')
surprise_string = earnings_soup.find_all('table')[2].tbody.find_all(
'tr')[3].find_all('td')[4].text
surprise = float(re.search('(.*)%', surprise_string)[1])
if abs(surprise) < 5.0:
score = 0
else:
score = 1
else:
surprise_string = 'null'
score = 'null'
results = {'surprise': surprise_string, 'score': score}
print(ticker, results)
return flask.jsonify(results)
if __name__ == '__main__':
app.run()
<|reserved_special_token_1|>
import flask
import numpy as np
import pandas as pd
import requests
from bs4 import BeautifulSoup
import pickle
from recent_earnings_tickers import ok_tickers
import re
#---------- Model ----------------#
#with open('/Users/samfunk/ds/metis/project_mcnulty/code/REPLACE_WITH_MODEL_PICKLE', 'rb') as f:
#PREDICTOR = pickle.load(f)
'''Have final model in the pickle file
Should be prefit to main data
Simply ask for a company/list of companies
Input the ticker into model (which will scrape web for current features)
Pray some of them are right'''
#---------- URLS AND WEB PAGES -------------#
app = flask.Flask(__name__)
@app.route('/')
def home_page():
with open("/Users/samfunk/ds/metis/project_mcnulty/stock_page.html",'r') as viz_file:
return viz_file.read()
@app.route("/stock", methods=["POST"])
def stock(ok_tickers=ok_tickers()):
data = flask.request.json
ticker = str(data["ticker"]).upper()
if ticker in ok_tickers:
earnings_soup = BeautifulSoup(requests.get("https://finance.yahoo.com/quote/%s/analysts?p=%s" % (ticker, ticker)).text, 'html.parser')
surprise_string = earnings_soup.find_all('table')[2].tbody.find_all('tr')[3].find_all('td')[4].text
surprise = float(re.search(r'(.*)%', surprise_string)[1])
#score = PREDICTOR.predict_proba(x)
if abs(surprise) < 5.0:
score = 0
else:
score = 1
else:
surprise_string = 'null'
score = 'null'
#score = PREDICTOR.predict_proba(x)
results = {"surprise": surprise_string, "score": score}
print(ticker, results)
return flask.jsonify(results)
if __name__ == '__main__':
app.run()
|
flexible
|
{
"blob_id": "3be1947ead65f8e8a9bf73cc8cae2c7d69d8b756",
"index": 1641,
"step-1": "<mask token>\n\n\n@app.route('/')\ndef home_page():\n with open('/Users/samfunk/ds/metis/project_mcnulty/stock_page.html', 'r'\n ) as viz_file:\n return viz_file.read()\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\n@app.route('/')\ndef home_page():\n with open('/Users/samfunk/ds/metis/project_mcnulty/stock_page.html', 'r'\n ) as viz_file:\n return viz_file.read()\n\n\n@app.route('/stock', methods=['POST'])\ndef stock(ok_tickers=ok_tickers()):\n data = flask.request.json\n ticker = str(data['ticker']).upper()\n if ticker in ok_tickers:\n earnings_soup = BeautifulSoup(requests.get(\n 'https://finance.yahoo.com/quote/%s/analysts?p=%s' % (ticker,\n ticker)).text, 'html.parser')\n surprise_string = earnings_soup.find_all('table')[2].tbody.find_all(\n 'tr')[3].find_all('td')[4].text\n surprise = float(re.search('(.*)%', surprise_string)[1])\n if abs(surprise) < 5.0:\n score = 0\n else:\n score = 1\n else:\n surprise_string = 'null'\n score = 'null'\n results = {'surprise': surprise_string, 'score': score}\n print(ticker, results)\n return flask.jsonify(results)\n\n\nif __name__ == '__main__':\n app.run()\n",
"step-3": "<mask token>\napp = flask.Flask(__name__)\n\n\n@app.route('/')\ndef home_page():\n with open('/Users/samfunk/ds/metis/project_mcnulty/stock_page.html', 'r'\n ) as viz_file:\n return viz_file.read()\n\n\n@app.route('/stock', methods=['POST'])\ndef stock(ok_tickers=ok_tickers()):\n data = flask.request.json\n ticker = str(data['ticker']).upper()\n if ticker in ok_tickers:\n earnings_soup = BeautifulSoup(requests.get(\n 'https://finance.yahoo.com/quote/%s/analysts?p=%s' % (ticker,\n ticker)).text, 'html.parser')\n surprise_string = earnings_soup.find_all('table')[2].tbody.find_all(\n 'tr')[3].find_all('td')[4].text\n surprise = float(re.search('(.*)%', surprise_string)[1])\n if abs(surprise) < 5.0:\n score = 0\n else:\n score = 1\n else:\n surprise_string = 'null'\n score = 'null'\n results = {'surprise': surprise_string, 'score': score}\n print(ticker, results)\n return flask.jsonify(results)\n\n\nif __name__ == '__main__':\n app.run()\n",
"step-4": "import flask\nimport numpy as np\nimport pandas as pd\nimport requests\nfrom bs4 import BeautifulSoup\nimport pickle\nfrom recent_earnings_tickers import ok_tickers\nimport re\n<mask token>\napp = flask.Flask(__name__)\n\n\n@app.route('/')\ndef home_page():\n with open('/Users/samfunk/ds/metis/project_mcnulty/stock_page.html', 'r'\n ) as viz_file:\n return viz_file.read()\n\n\n@app.route('/stock', methods=['POST'])\ndef stock(ok_tickers=ok_tickers()):\n data = flask.request.json\n ticker = str(data['ticker']).upper()\n if ticker in ok_tickers:\n earnings_soup = BeautifulSoup(requests.get(\n 'https://finance.yahoo.com/quote/%s/analysts?p=%s' % (ticker,\n ticker)).text, 'html.parser')\n surprise_string = earnings_soup.find_all('table')[2].tbody.find_all(\n 'tr')[3].find_all('td')[4].text\n surprise = float(re.search('(.*)%', surprise_string)[1])\n if abs(surprise) < 5.0:\n score = 0\n else:\n score = 1\n else:\n surprise_string = 'null'\n score = 'null'\n results = {'surprise': surprise_string, 'score': score}\n print(ticker, results)\n return flask.jsonify(results)\n\n\nif __name__ == '__main__':\n app.run()\n",
"step-5": "import flask\nimport numpy as np\nimport pandas as pd\nimport requests\nfrom bs4 import BeautifulSoup\nimport pickle\nfrom recent_earnings_tickers import ok_tickers\nimport re\n\n#---------- Model ----------------#\n\n#with open('/Users/samfunk/ds/metis/project_mcnulty/code/REPLACE_WITH_MODEL_PICKLE', 'rb') as f:\n #PREDICTOR = pickle.load(f)\n\n\n'''Have final model in the pickle file\nShould be prefit to main data\nSimply ask for a company/list of companies\nInput the ticker into model (which will scrape web for current features)\nPray some of them are right'''\n\n\n\n#---------- URLS AND WEB PAGES -------------#\napp = flask.Flask(__name__)\n\n@app.route('/')\ndef home_page():\n with open(\"/Users/samfunk/ds/metis/project_mcnulty/stock_page.html\",'r') as viz_file:\n return viz_file.read()\n\n\n@app.route(\"/stock\", methods=[\"POST\"])\ndef stock(ok_tickers=ok_tickers()):\n\n data = flask.request.json\n ticker = str(data[\"ticker\"]).upper()\n if ticker in ok_tickers:\n earnings_soup = BeautifulSoup(requests.get(\"https://finance.yahoo.com/quote/%s/analysts?p=%s\" % (ticker, ticker)).text, 'html.parser')\n surprise_string = earnings_soup.find_all('table')[2].tbody.find_all('tr')[3].find_all('td')[4].text\n surprise = float(re.search(r'(.*)%', surprise_string)[1])\n\n\n #score = PREDICTOR.predict_proba(x)\n\n if abs(surprise) < 5.0:\n score = 0\n else:\n score = 1\n else:\n surprise_string = 'null'\n score = 'null'\n #score = PREDICTOR.predict_proba(x)\n results = {\"surprise\": surprise_string, \"score\": score}\n\n print(ticker, results)\n return flask.jsonify(results)\n\nif __name__ == '__main__':\n app.run()\n",
"step-ids": [
1,
3,
4,
5,
6
]
}
|
[
1,
3,
4,
5,
6
] |
<|reserved_special_token_0|>
class Centering(object):
def __init__(self, letter, additional_lattice_points):
self.letter = letter
self.additional_lattice_points = additional_lattice_points
def transform(self, op):
additional_ops = []
for point in self.additional_lattice_points:
func = lambda x: '{0}+{3},{1}+{4},{2}+{5}'.format(*(x.split(','
) + list(point)))
additional_ops.append(op.__class__(func(op.operation_string)))
return additional_ops
@classmethod
def primitive(cls):
return cls('P', [])
<|reserved_special_token_0|>
@classmethod
def hexagonal(cls):
return cls('H', [(Fraction(2, 3), Fraction(1, 3), 0.0), (Fraction(1,
3), Fraction(2, 3), 0.0)])
@classmethod
def rhombohedral(cls):
return cls('R', [(Fraction(2, 3), Fraction(1, 3), Fraction(1, 3)),
(Fraction(1, 3), Fraction(2, 3), Fraction(2, 3))])
@classmethod
def face_centered(cls):
return cls('F', [(0.0, 0.5, 0.5), (0.5, 0.0, 0.5), (0.5, 0.5, 0.0)])
@classmethod
def base_centered_A(cls):
return cls('A', [(0.0, 0.5, 0.5)])
@classmethod
def base_centered_B(cls):
return cls('B', [(0.5, 0.0, 0.5)])
@classmethod
def base_centered_C(cls):
return cls('C', [(0.5, 0.5, 0.0)])
<|reserved_special_token_0|>
def get_LATT_code(self):
if self.letter == 'P':
return 1
elif self.letter == 'I':
return 2
elif self.letter == 'R':
return 3
elif self.letter == 'F':
return 4
elif self.letter == 'A':
return 5
elif self.letter == 'B':
return 6
elif self.letter == 'C':
return 7
class AsymmetricUnit(object):
UNKNOWN = [[0, 1.0], [0, 1.0], [0, 1.0]]
FULL = [[0, 1.0], [0, 1.0], [0, 1.0]]
HALF_X = [[0, 0.5], [0, 1.0], [0, 1.0]]
HALF_Y = [[0, 1.0], [0, 0.5], [0, 1.0]]
HALF_Z = [[0, 1.0], [0, 1.0], [0, 0.5]]
QUART_Y = [[0, 1.0], [0, 0.25], [0, 1.0]]
HALF_X_QUART_Y = [[0, 0.5], [0, 0.25], [0, 1.0]]
HALF_XZ = [[0, 0.5], [0, 1.0], [0, 0.5]]
HALF_XY = [[0, 0.5], [0, 0.5], [0, 1.0]]
EIGHT_Z = [[0, 1.0], [0, 1.0], [0, 0.125]]
class UniqueAxis(object):
UNKNOWN = -1
NA = -1
X = 0
Y = 1
Z = 2
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class SymmetryOperation(object):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
class Symmetry(object):
@staticmethod
def get(value):
if value is '' or value == 'UNKNOWN':
return []
return [SymmetryOperation(v) for v in value.split(';')]
class Inversion(object):
YES = True
NO = False
UNKNOWN = None
class InversionFactory(object):
@staticmethod
def construct(latt):
if int(latt) > 0:
return CentroSymmetric()
else:
return NonCentroSymmetric()
class CentroSymmetric(Inversion):
@staticmethod
def transform(op):
func = lambda x: '-1*(%s),-1*(%s),-1*(%s)' % tuple(x.split(','))
return func(op)
class NonCentroSymmetric(Inversion):
@staticmethod
def transform(op):
return op
class Centering(object):
def __init__(self, letter, additional_lattice_points):
self.letter = letter
self.additional_lattice_points = additional_lattice_points
def transform(self, op):
additional_ops = []
for point in self.additional_lattice_points:
func = lambda x: '{0}+{3},{1}+{4},{2}+{5}'.format(*(x.split(','
) + list(point)))
additional_ops.append(op.__class__(func(op.operation_string)))
return additional_ops
@classmethod
def primitive(cls):
return cls('P', [])
@classmethod
def body_centered(cls):
return cls('I', [(0.5, 0.5, 0.5)])
@classmethod
def hexagonal(cls):
return cls('H', [(Fraction(2, 3), Fraction(1, 3), 0.0), (Fraction(1,
3), Fraction(2, 3), 0.0)])
@classmethod
def rhombohedral(cls):
return cls('R', [(Fraction(2, 3), Fraction(1, 3), Fraction(1, 3)),
(Fraction(1, 3), Fraction(2, 3), Fraction(2, 3))])
@classmethod
def face_centered(cls):
return cls('F', [(0.0, 0.5, 0.5), (0.5, 0.0, 0.5), (0.5, 0.5, 0.0)])
@classmethod
def base_centered_A(cls):
return cls('A', [(0.0, 0.5, 0.5)])
@classmethod
def base_centered_B(cls):
return cls('B', [(0.5, 0.0, 0.5)])
@classmethod
def base_centered_C(cls):
return cls('C', [(0.5, 0.5, 0.0)])
@classmethod
def construct(cls, latt):
"""
Given the LATT directive in a res file, return the corresponding centered lattice type.
:param latt: the absolute integer value specified in LATT
:return: corrected centered lattice type
"""
latt = abs(latt)
if latt == 1:
return cls.primitive()
elif latt == 2:
return cls.body_centered()
elif latt == 3:
return cls.rhombohedral()
elif latt == 4:
return cls.face_centered()
elif latt == 5:
return cls.base_centered_A()
elif latt == 6:
return cls.base_centered_B()
elif latt == 7:
return cls.base_centered_C()
def get_LATT_code(self):
if self.letter == 'P':
return 1
elif self.letter == 'I':
return 2
elif self.letter == 'R':
return 3
elif self.letter == 'F':
return 4
elif self.letter == 'A':
return 5
elif self.letter == 'B':
return 6
elif self.letter == 'C':
return 7
class AsymmetricUnit(object):
UNKNOWN = [[0, 1.0], [0, 1.0], [0, 1.0]]
FULL = [[0, 1.0], [0, 1.0], [0, 1.0]]
HALF_X = [[0, 0.5], [0, 1.0], [0, 1.0]]
HALF_Y = [[0, 1.0], [0, 0.5], [0, 1.0]]
HALF_Z = [[0, 1.0], [0, 1.0], [0, 0.5]]
QUART_Y = [[0, 1.0], [0, 0.25], [0, 1.0]]
HALF_X_QUART_Y = [[0, 0.5], [0, 0.25], [0, 1.0]]
HALF_XZ = [[0, 0.5], [0, 1.0], [0, 0.5]]
HALF_XY = [[0, 0.5], [0, 0.5], [0, 1.0]]
EIGHT_Z = [[0, 1.0], [0, 1.0], [0, 0.125]]
class UniqueAxis(object):
UNKNOWN = -1
NA = -1
X = 0
Y = 1
Z = 2
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class SymmetryOperation(object):
def __init__(self, operation_string):
"""
Initialize a symmetry operation object from a string representation of symmetry operation.
:param operation_string: A string (as read in from res/cif file) representing a symmetry operation
for a space group.
"""
self.operation_string = operation_string.lower()
self.operation_function = None
self.__set_operation_function()
def __set_operation_function(self):
"""
Convert the string form of the symmetry operation into the form of a mathematical
function that can be directly applied to a vector to transform a point to a
symmetry related point.
"""
if self.operation_function is not None:
return self.operation_function
else:
self.operation_function = symm_eval
def transform_scaled_position(self, data):
"""
Applying this symmetry operation on a 3D coordinate to transform it to a symmetry-related
position in the crystal.
:param data: A vector (:class:`entdecker.core.models.vector3d.cVector3D`)
representing the fractional coordinates on which the symmetry
operation will be applied upon.
:return: Symmetry transformed vector.
"""
return self.operation_function(prepare_operation(self.
operation_string), data)
def transform_atom(self, atom):
return Atom(label=atom.label, scaled_position=self.
transform_scaled_position(atom.scaled_position))
def inversion(self):
func = lambda x: '-1*(%s),-1*(%s),-1*(%s)' % tuple(x.split(','))
return self.__class__(func(self.operation_string))
class Symmetry(object):
@staticmethod
def get(value):
if value is '' or value == 'UNKNOWN':
return []
return [SymmetryOperation(v) for v in value.split(';')]
class Inversion(object):
YES = True
NO = False
UNKNOWN = None
class InversionFactory(object):
@staticmethod
def construct(latt):
if int(latt) > 0:
return CentroSymmetric()
else:
return NonCentroSymmetric()
class CentroSymmetric(Inversion):
@staticmethod
def transform(op):
func = lambda x: '-1*(%s),-1*(%s),-1*(%s)' % tuple(x.split(','))
return func(op)
class NonCentroSymmetric(Inversion):
@staticmethod
def transform(op):
return op
class Centering(object):
def __init__(self, letter, additional_lattice_points):
self.letter = letter
self.additional_lattice_points = additional_lattice_points
def transform(self, op):
additional_ops = []
for point in self.additional_lattice_points:
func = lambda x: '{0}+{3},{1}+{4},{2}+{5}'.format(*(x.split(','
) + list(point)))
additional_ops.append(op.__class__(func(op.operation_string)))
return additional_ops
@classmethod
def primitive(cls):
return cls('P', [])
@classmethod
def body_centered(cls):
return cls('I', [(0.5, 0.5, 0.5)])
@classmethod
def hexagonal(cls):
return cls('H', [(Fraction(2, 3), Fraction(1, 3), 0.0), (Fraction(1,
3), Fraction(2, 3), 0.0)])
@classmethod
def rhombohedral(cls):
return cls('R', [(Fraction(2, 3), Fraction(1, 3), Fraction(1, 3)),
(Fraction(1, 3), Fraction(2, 3), Fraction(2, 3))])
@classmethod
def face_centered(cls):
return cls('F', [(0.0, 0.5, 0.5), (0.5, 0.0, 0.5), (0.5, 0.5, 0.0)])
@classmethod
def base_centered_A(cls):
return cls('A', [(0.0, 0.5, 0.5)])
@classmethod
def base_centered_B(cls):
return cls('B', [(0.5, 0.0, 0.5)])
@classmethod
def base_centered_C(cls):
return cls('C', [(0.5, 0.5, 0.0)])
@classmethod
def construct(cls, latt):
"""
Given the LATT directive in a res file, return the corresponding centered lattice type.
:param latt: the absolute integer value specified in LATT
:return: corrected centered lattice type
"""
latt = abs(latt)
if latt == 1:
return cls.primitive()
elif latt == 2:
return cls.body_centered()
elif latt == 3:
return cls.rhombohedral()
elif latt == 4:
return cls.face_centered()
elif latt == 5:
return cls.base_centered_A()
elif latt == 6:
return cls.base_centered_B()
elif latt == 7:
return cls.base_centered_C()
def get_LATT_code(self):
if self.letter == 'P':
return 1
elif self.letter == 'I':
return 2
elif self.letter == 'R':
return 3
elif self.letter == 'F':
return 4
elif self.letter == 'A':
return 5
elif self.letter == 'B':
return 6
elif self.letter == 'C':
return 7
class AsymmetricUnit(object):
UNKNOWN = [[0, 1.0], [0, 1.0], [0, 1.0]]
FULL = [[0, 1.0], [0, 1.0], [0, 1.0]]
HALF_X = [[0, 0.5], [0, 1.0], [0, 1.0]]
HALF_Y = [[0, 1.0], [0, 0.5], [0, 1.0]]
HALF_Z = [[0, 1.0], [0, 1.0], [0, 0.5]]
QUART_Y = [[0, 1.0], [0, 0.25], [0, 1.0]]
HALF_X_QUART_Y = [[0, 0.5], [0, 0.25], [0, 1.0]]
HALF_XZ = [[0, 0.5], [0, 1.0], [0, 0.5]]
HALF_XY = [[0, 0.5], [0, 0.5], [0, 1.0]]
EIGHT_Z = [[0, 1.0], [0, 1.0], [0, 0.125]]
class UniqueAxis(object):
UNKNOWN = -1
NA = -1
X = 0
Y = 1
Z = 2
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class SpaceGroup(object):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
class SymmetryOperation(object):
def __init__(self, operation_string):
"""
Initialize a symmetry operation object from a string representation of symmetry operation.
:param operation_string: A string (as read in from res/cif file) representing a symmetry operation
for a space group.
"""
self.operation_string = operation_string.lower()
self.operation_function = None
self.__set_operation_function()
def __set_operation_function(self):
"""
Convert the string form of the symmetry operation into the form of a mathematical
function that can be directly applied to a vector to transform a point to a
symmetry related point.
"""
if self.operation_function is not None:
return self.operation_function
else:
self.operation_function = symm_eval
def transform_scaled_position(self, data):
"""
Applying this symmetry operation on a 3D coordinate to transform it to a symmetry-related
position in the crystal.
:param data: A vector (:class:`entdecker.core.models.vector3d.cVector3D`)
representing the fractional coordinates on which the symmetry
operation will be applied upon.
:return: Symmetry transformed vector.
"""
return self.operation_function(prepare_operation(self.
operation_string), data)
def transform_atom(self, atom):
return Atom(label=atom.label, scaled_position=self.
transform_scaled_position(atom.scaled_position))
def inversion(self):
func = lambda x: '-1*(%s),-1*(%s),-1*(%s)' % tuple(x.split(','))
return self.__class__(func(self.operation_string))
class Symmetry(object):
@staticmethod
def get(value):
if value is '' or value == 'UNKNOWN':
return []
return [SymmetryOperation(v) for v in value.split(';')]
class Inversion(object):
YES = True
NO = False
UNKNOWN = None
class InversionFactory(object):
@staticmethod
def construct(latt):
if int(latt) > 0:
return CentroSymmetric()
else:
return NonCentroSymmetric()
class CentroSymmetric(Inversion):
@staticmethod
def transform(op):
func = lambda x: '-1*(%s),-1*(%s),-1*(%s)' % tuple(x.split(','))
return func(op)
class NonCentroSymmetric(Inversion):
@staticmethod
def transform(op):
return op
class Centering(object):
def __init__(self, letter, additional_lattice_points):
self.letter = letter
self.additional_lattice_points = additional_lattice_points
def transform(self, op):
additional_ops = []
for point in self.additional_lattice_points:
func = lambda x: '{0}+{3},{1}+{4},{2}+{5}'.format(*(x.split(','
) + list(point)))
additional_ops.append(op.__class__(func(op.operation_string)))
return additional_ops
@classmethod
def primitive(cls):
return cls('P', [])
@classmethod
def body_centered(cls):
return cls('I', [(0.5, 0.5, 0.5)])
@classmethod
def hexagonal(cls):
return cls('H', [(Fraction(2, 3), Fraction(1, 3), 0.0), (Fraction(1,
3), Fraction(2, 3), 0.0)])
@classmethod
def rhombohedral(cls):
return cls('R', [(Fraction(2, 3), Fraction(1, 3), Fraction(1, 3)),
(Fraction(1, 3), Fraction(2, 3), Fraction(2, 3))])
@classmethod
def face_centered(cls):
return cls('F', [(0.0, 0.5, 0.5), (0.5, 0.0, 0.5), (0.5, 0.5, 0.0)])
@classmethod
def base_centered_A(cls):
return cls('A', [(0.0, 0.5, 0.5)])
@classmethod
def base_centered_B(cls):
return cls('B', [(0.5, 0.0, 0.5)])
@classmethod
def base_centered_C(cls):
return cls('C', [(0.5, 0.5, 0.0)])
@classmethod
def construct(cls, latt):
"""
Given the LATT directive in a res file, return the corresponding centered lattice type.
:param latt: the absolute integer value specified in LATT
:return: corrected centered lattice type
"""
latt = abs(latt)
if latt == 1:
return cls.primitive()
elif latt == 2:
return cls.body_centered()
elif latt == 3:
return cls.rhombohedral()
elif latt == 4:
return cls.face_centered()
elif latt == 5:
return cls.base_centered_A()
elif latt == 6:
return cls.base_centered_B()
elif latt == 7:
return cls.base_centered_C()
def get_LATT_code(self):
if self.letter == 'P':
return 1
elif self.letter == 'I':
return 2
elif self.letter == 'R':
return 3
elif self.letter == 'F':
return 4
elif self.letter == 'A':
return 5
elif self.letter == 'B':
return 6
elif self.letter == 'C':
return 7
class AsymmetricUnit(object):
UNKNOWN = [[0, 1.0], [0, 1.0], [0, 1.0]]
FULL = [[0, 1.0], [0, 1.0], [0, 1.0]]
HALF_X = [[0, 0.5], [0, 1.0], [0, 1.0]]
HALF_Y = [[0, 1.0], [0, 0.5], [0, 1.0]]
HALF_Z = [[0, 1.0], [0, 1.0], [0, 0.5]]
QUART_Y = [[0, 1.0], [0, 0.25], [0, 1.0]]
HALF_X_QUART_Y = [[0, 0.5], [0, 0.25], [0, 1.0]]
HALF_XZ = [[0, 0.5], [0, 1.0], [0, 0.5]]
HALF_XY = [[0, 0.5], [0, 0.5], [0, 1.0]]
EIGHT_Z = [[0, 1.0], [0, 1.0], [0, 0.125]]
class UniqueAxis(object):
UNKNOWN = -1
NA = -1
X = 0
Y = 1
Z = 2
<|reserved_special_token_0|>
<|reserved_special_token_1|>
from core.models import Atom
from core.models.vector3d import cVector3D
from fractions import Fraction
class SpaceGroup(object):
def __init__(self,
index=None,
name=None,
lattice_system=None,
lattice_centering=None,
inversion=None,
symmetry=None,
asymmetric_unit=None,
unique_axis=None):
self.index = index
self.name = name
self.lattice_system = lattice_system
self.lattice_centering = lattice_centering
self.inversion = inversion
self.symmetry = symmetry
self.asymmetric_unit = asymmetric_unit
self.unique_axis = unique_axis
self.non_centering_symmetry = []
self.full_symmetry = []
self.__compute_full_symmetry()
@property
def identity(self):
return SymmetryOperation('x,y,z')
def __append_identity(self):
if 'x,y,z' not in [i.operation_string for i in self.symmetry]:
self.symmetry = [self.identity] + self.symmetry
def __add_inversion_symmetry(self):
for op in self.symmetry:
self.non_centering_symmetry.append(op)
if isinstance(self.inversion,CentroSymmetric):
for i in range(len(self.non_centering_symmetry)):
op = self.non_centering_symmetry[i]
self.non_centering_symmetry.append(op.inversion())
def __add_centering_symmetry(self):
if self.lattice_centering:
for i in range(len(self.full_symmetry)):
op = self.full_symmetry[i]
centering_ops = self.lattice_centering.transform(op)
self.full_symmetry += centering_ops
def __compute_full_symmetry(self):
self.__append_identity()
self.__add_inversion_symmetry()
self.full_symmetry = [op for op in self.non_centering_symmetry]
self.__add_centering_symmetry()
return self.full_symmetry
class SymmetryOperation(object):
def __init__(self, operation_string):
"""
Initialize a symmetry operation object from a string representation of symmetry operation.
:param operation_string: A string (as read in from res/cif file) representing a symmetry operation
for a space group.
"""
self.operation_string = operation_string.lower()
self.operation_function = None
self.__set_operation_function()
def __set_operation_function(self):
"""
Convert the string form of the symmetry operation into the form of a mathematical
function that can be directly applied to a vector to transform a point to a
symmetry related point.
"""
if self.operation_function is not None:
return self.operation_function
else:
self.operation_function = symm_eval
def transform_scaled_position(self, data):
"""
Applying this symmetry operation on a 3D coordinate to transform it to a symmetry-related
position in the crystal.
:param data: A vector (:class:`entdecker.core.models.vector3d.cVector3D`)
representing the fractional coordinates on which the symmetry
operation will be applied upon.
:return: Symmetry transformed vector.
"""
return self.operation_function(prepare_operation(self.operation_string), data)
def transform_atom(self, atom):
return Atom(label=atom.label, scaled_position=self.transform_scaled_position(atom.scaled_position))
def inversion(self):
func = lambda x: "-1*(%s),-1*(%s),-1*(%s)" % tuple(x.split(","))
return self.__class__(func(self.operation_string))
class Symmetry(object):
@staticmethod
def get(value):
if value is '' or value == 'UNKNOWN':
return []
return [SymmetryOperation(v) for v in value.split(';')]
class Inversion(object):
# TODO - this needs to be fixed later to make it consistent with the Centering class!
YES = True
NO = False
UNKNOWN = None
class InversionFactory(object):
@staticmethod
def construct(latt):
if int(latt) > 0:
return CentroSymmetric()
else:
return NonCentroSymmetric()
class CentroSymmetric(Inversion):
@staticmethod
def transform(op):
func = lambda x: "-1*(%s),-1*(%s),-1*(%s)" % tuple(x.split(","))
return func(op)
class NonCentroSymmetric(Inversion):
@staticmethod
def transform(op):
return op
class Centering(object):
def __init__(self, letter, additional_lattice_points):
self.letter = letter
self.additional_lattice_points = additional_lattice_points
def transform(self, op):
additional_ops = []
for point in self.additional_lattice_points:
func = lambda x: "{0}+{3},{1}+{4},{2}+{5}".format(*(x.split(",") + list(point)))
additional_ops.append(op.__class__(func(op.operation_string)))
return additional_ops
@classmethod
def primitive(cls):
return cls('P', [])
@classmethod
def body_centered(cls):
return cls('I', [(0.5, 0.5, 0.5)])
@classmethod
def hexagonal(cls):
return cls('H', [(Fraction(2, 3), Fraction(1, 3), 0.0),
(Fraction(1, 3), Fraction(2, 3), 0.0)])
@classmethod
def rhombohedral(cls):
return cls('R', [(Fraction(2, 3), Fraction(1, 3), Fraction(1, 3)),
(Fraction(1, 3), Fraction(2, 3), Fraction(2, 3))])
@classmethod
def face_centered(cls):
return cls('F', [(0.0, 0.5, 0.5), (0.5, 0.0, 0.5), (0.5, 0.5, 0.0)])
@classmethod
def base_centered_A(cls):
return cls('A', [(0.0, 0.5, 0.5)])
@classmethod
def base_centered_B(cls):
return cls('B', [(0.5, 0.0, 0.5)])
@classmethod
def base_centered_C(cls):
return cls('C', [(0.5, 0.5, 0.0)])
@classmethod
def construct(cls, latt):
"""
Given the LATT directive in a res file, return the corresponding centered lattice type.
:param latt: the absolute integer value specified in LATT
:return: corrected centered lattice type
"""
latt = abs(latt)
if latt == 1:
return cls.primitive()
elif latt == 2:
return cls.body_centered()
elif latt == 3:
# default setting from reading in a res file is Rhombohedral
return cls.rhombohedral()
elif latt == 4:
return cls.face_centered()
elif latt == 5:
return cls.base_centered_A()
elif latt == 6:
return cls.base_centered_B()
elif latt == 7:
return cls.base_centered_C()
def get_LATT_code(self):
if self.letter == 'P':
return 1
elif self.letter == 'I':
return 2
elif self.letter == 'R':
return 3
elif self.letter == 'F':
return 4
elif self.letter == 'A':
return 5
elif self.letter == 'B':
return 6
elif self.letter == 'C':
return 7
class AsymmetricUnit(object):
UNKNOWN = [[0, 1.00], [0, 1.00], [0, 1.00]]
FULL = [[0, 1.00], [0, 1.00], [0, 1.00]]
HALF_X = [[0, 0.50], [0, 1.00], [0, 1.00]]
HALF_Y = [[0, 1.00], [0, 0.50], [0, 1.00]]
HALF_Z = [[0, 1.00], [0, 1.00], [0, 0.50]]
QUART_Y = [[0, 1.00], [0, 0.25], [0, 1.00]]
HALF_X_QUART_Y = [[0, 0.50], [0, 0.25], [0, 1.00]]
HALF_XZ = [[0, 0.50], [0, 1.00], [0, 0.50]]
HALF_XY = [[0, 0.50], [0, 0.50], [0, 1.00]]
EIGHT_Z = [[0, 1.00], [0, 1.00], [0, 0.125]]
class UniqueAxis(object):
UNKNOWN = -1
NA = -1
X = 0
Y = 1
Z = 2
def symm_eval(s, data):
x, y, z = data.x, data.y, data.z
out = list(map(eval, s.split(",")))
return cVector3D(out[0], out[1], out[2])
def prepare_operation(s):
''' Cleans up a string of a symmetry operation to be used in eval or exec
:param s: Input string e.g. "x,y,z+1/2"
:type s: string
:rtype: string
'''
tmp = s.replace("1/4", "1.0/4.0")
tmp = tmp.replace("1/2", "1.0/2.0")
tmp = tmp.replace("3/4", "3.0/4.0")
tmp = tmp.replace("1/3", "1.0/3.0")
tmp = tmp.replace("2/3", "2.0/3.0")
tmp = tmp.replace("1/6", "1.0/6.0")
tmp = tmp.replace("5/6", "5.0/6.0")
return tmp.replace(" ", "").lower()
|
flexible
|
{
"blob_id": "88731049227629ed84ff56922d7ac11d4a137984",
"index": 5376,
"step-1": "<mask token>\n\n\nclass Centering(object):\n\n def __init__(self, letter, additional_lattice_points):\n self.letter = letter\n self.additional_lattice_points = additional_lattice_points\n\n def transform(self, op):\n additional_ops = []\n for point in self.additional_lattice_points:\n func = lambda x: '{0}+{3},{1}+{4},{2}+{5}'.format(*(x.split(','\n ) + list(point)))\n additional_ops.append(op.__class__(func(op.operation_string)))\n return additional_ops\n\n @classmethod\n def primitive(cls):\n return cls('P', [])\n <mask token>\n\n @classmethod\n def hexagonal(cls):\n return cls('H', [(Fraction(2, 3), Fraction(1, 3), 0.0), (Fraction(1,\n 3), Fraction(2, 3), 0.0)])\n\n @classmethod\n def rhombohedral(cls):\n return cls('R', [(Fraction(2, 3), Fraction(1, 3), Fraction(1, 3)),\n (Fraction(1, 3), Fraction(2, 3), Fraction(2, 3))])\n\n @classmethod\n def face_centered(cls):\n return cls('F', [(0.0, 0.5, 0.5), (0.5, 0.0, 0.5), (0.5, 0.5, 0.0)])\n\n @classmethod\n def base_centered_A(cls):\n return cls('A', [(0.0, 0.5, 0.5)])\n\n @classmethod\n def base_centered_B(cls):\n return cls('B', [(0.5, 0.0, 0.5)])\n\n @classmethod\n def base_centered_C(cls):\n return cls('C', [(0.5, 0.5, 0.0)])\n <mask token>\n\n def get_LATT_code(self):\n if self.letter == 'P':\n return 1\n elif self.letter == 'I':\n return 2\n elif self.letter == 'R':\n return 3\n elif self.letter == 'F':\n return 4\n elif self.letter == 'A':\n return 5\n elif self.letter == 'B':\n return 6\n elif self.letter == 'C':\n return 7\n\n\nclass AsymmetricUnit(object):\n UNKNOWN = [[0, 1.0], [0, 1.0], [0, 1.0]]\n FULL = [[0, 1.0], [0, 1.0], [0, 1.0]]\n HALF_X = [[0, 0.5], [0, 1.0], [0, 1.0]]\n HALF_Y = [[0, 1.0], [0, 0.5], [0, 1.0]]\n HALF_Z = [[0, 1.0], [0, 1.0], [0, 0.5]]\n QUART_Y = [[0, 1.0], [0, 0.25], [0, 1.0]]\n HALF_X_QUART_Y = [[0, 0.5], [0, 0.25], [0, 1.0]]\n HALF_XZ = [[0, 0.5], [0, 1.0], [0, 0.5]]\n HALF_XY = [[0, 0.5], [0, 0.5], [0, 1.0]]\n EIGHT_Z = [[0, 1.0], [0, 1.0], [0, 0.125]]\n\n\nclass UniqueAxis(object):\n UNKNOWN = -1\n NA = -1\n X = 0\n Y = 1\n Z = 2\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass SymmetryOperation(object):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n\nclass Symmetry(object):\n\n @staticmethod\n def get(value):\n if value is '' or value == 'UNKNOWN':\n return []\n return [SymmetryOperation(v) for v in value.split(';')]\n\n\nclass Inversion(object):\n YES = True\n NO = False\n UNKNOWN = None\n\n\nclass InversionFactory(object):\n\n @staticmethod\n def construct(latt):\n if int(latt) > 0:\n return CentroSymmetric()\n else:\n return NonCentroSymmetric()\n\n\nclass CentroSymmetric(Inversion):\n\n @staticmethod\n def transform(op):\n func = lambda x: '-1*(%s),-1*(%s),-1*(%s)' % tuple(x.split(','))\n return func(op)\n\n\nclass NonCentroSymmetric(Inversion):\n\n @staticmethod\n def transform(op):\n return op\n\n\nclass Centering(object):\n\n def __init__(self, letter, additional_lattice_points):\n self.letter = letter\n self.additional_lattice_points = additional_lattice_points\n\n def transform(self, op):\n additional_ops = []\n for point in self.additional_lattice_points:\n func = lambda x: '{0}+{3},{1}+{4},{2}+{5}'.format(*(x.split(','\n ) + list(point)))\n additional_ops.append(op.__class__(func(op.operation_string)))\n return additional_ops\n\n @classmethod\n def primitive(cls):\n return cls('P', [])\n\n @classmethod\n def body_centered(cls):\n return cls('I', [(0.5, 0.5, 0.5)])\n\n @classmethod\n def hexagonal(cls):\n return cls('H', [(Fraction(2, 3), Fraction(1, 3), 0.0), (Fraction(1,\n 3), Fraction(2, 3), 0.0)])\n\n @classmethod\n def rhombohedral(cls):\n return cls('R', [(Fraction(2, 3), Fraction(1, 3), Fraction(1, 3)),\n (Fraction(1, 3), Fraction(2, 3), Fraction(2, 3))])\n\n @classmethod\n def face_centered(cls):\n return cls('F', [(0.0, 0.5, 0.5), (0.5, 0.0, 0.5), (0.5, 0.5, 0.0)])\n\n @classmethod\n def base_centered_A(cls):\n return cls('A', [(0.0, 0.5, 0.5)])\n\n @classmethod\n def base_centered_B(cls):\n return cls('B', [(0.5, 0.0, 0.5)])\n\n @classmethod\n def base_centered_C(cls):\n return cls('C', [(0.5, 0.5, 0.0)])\n\n @classmethod\n def construct(cls, latt):\n \"\"\"\n Given the LATT directive in a res file, return the corresponding centered lattice type.\n\n :param latt: the absolute integer value specified in LATT\n :return: corrected centered lattice type\n \"\"\"\n latt = abs(latt)\n if latt == 1:\n return cls.primitive()\n elif latt == 2:\n return cls.body_centered()\n elif latt == 3:\n return cls.rhombohedral()\n elif latt == 4:\n return cls.face_centered()\n elif latt == 5:\n return cls.base_centered_A()\n elif latt == 6:\n return cls.base_centered_B()\n elif latt == 7:\n return cls.base_centered_C()\n\n def get_LATT_code(self):\n if self.letter == 'P':\n return 1\n elif self.letter == 'I':\n return 2\n elif self.letter == 'R':\n return 3\n elif self.letter == 'F':\n return 4\n elif self.letter == 'A':\n return 5\n elif self.letter == 'B':\n return 6\n elif self.letter == 'C':\n return 7\n\n\nclass AsymmetricUnit(object):\n UNKNOWN = [[0, 1.0], [0, 1.0], [0, 1.0]]\n FULL = [[0, 1.0], [0, 1.0], [0, 1.0]]\n HALF_X = [[0, 0.5], [0, 1.0], [0, 1.0]]\n HALF_Y = [[0, 1.0], [0, 0.5], [0, 1.0]]\n HALF_Z = [[0, 1.0], [0, 1.0], [0, 0.5]]\n QUART_Y = [[0, 1.0], [0, 0.25], [0, 1.0]]\n HALF_X_QUART_Y = [[0, 0.5], [0, 0.25], [0, 1.0]]\n HALF_XZ = [[0, 0.5], [0, 1.0], [0, 0.5]]\n HALF_XY = [[0, 0.5], [0, 0.5], [0, 1.0]]\n EIGHT_Z = [[0, 1.0], [0, 1.0], [0, 0.125]]\n\n\nclass UniqueAxis(object):\n UNKNOWN = -1\n NA = -1\n X = 0\n Y = 1\n Z = 2\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass SymmetryOperation(object):\n\n def __init__(self, operation_string):\n \"\"\"\n Initialize a symmetry operation object from a string representation of symmetry operation.\n\n :param operation_string: A string (as read in from res/cif file) representing a symmetry operation\n for a space group.\n \"\"\"\n self.operation_string = operation_string.lower()\n self.operation_function = None\n self.__set_operation_function()\n\n def __set_operation_function(self):\n \"\"\"\n Convert the string form of the symmetry operation into the form of a mathematical\n function that can be directly applied to a vector to transform a point to a\n symmetry related point.\n \"\"\"\n if self.operation_function is not None:\n return self.operation_function\n else:\n self.operation_function = symm_eval\n\n def transform_scaled_position(self, data):\n \"\"\"\n Applying this symmetry operation on a 3D coordinate to transform it to a symmetry-related\n position in the crystal.\n\n :param data: A vector (:class:`entdecker.core.models.vector3d.cVector3D`)\n representing the fractional coordinates on which the symmetry\n operation will be applied upon.\n :return: Symmetry transformed vector.\n \"\"\"\n return self.operation_function(prepare_operation(self.\n operation_string), data)\n\n def transform_atom(self, atom):\n return Atom(label=atom.label, scaled_position=self.\n transform_scaled_position(atom.scaled_position))\n\n def inversion(self):\n func = lambda x: '-1*(%s),-1*(%s),-1*(%s)' % tuple(x.split(','))\n return self.__class__(func(self.operation_string))\n\n\nclass Symmetry(object):\n\n @staticmethod\n def get(value):\n if value is '' or value == 'UNKNOWN':\n return []\n return [SymmetryOperation(v) for v in value.split(';')]\n\n\nclass Inversion(object):\n YES = True\n NO = False\n UNKNOWN = None\n\n\nclass InversionFactory(object):\n\n @staticmethod\n def construct(latt):\n if int(latt) > 0:\n return CentroSymmetric()\n else:\n return NonCentroSymmetric()\n\n\nclass CentroSymmetric(Inversion):\n\n @staticmethod\n def transform(op):\n func = lambda x: '-1*(%s),-1*(%s),-1*(%s)' % tuple(x.split(','))\n return func(op)\n\n\nclass NonCentroSymmetric(Inversion):\n\n @staticmethod\n def transform(op):\n return op\n\n\nclass Centering(object):\n\n def __init__(self, letter, additional_lattice_points):\n self.letter = letter\n self.additional_lattice_points = additional_lattice_points\n\n def transform(self, op):\n additional_ops = []\n for point in self.additional_lattice_points:\n func = lambda x: '{0}+{3},{1}+{4},{2}+{5}'.format(*(x.split(','\n ) + list(point)))\n additional_ops.append(op.__class__(func(op.operation_string)))\n return additional_ops\n\n @classmethod\n def primitive(cls):\n return cls('P', [])\n\n @classmethod\n def body_centered(cls):\n return cls('I', [(0.5, 0.5, 0.5)])\n\n @classmethod\n def hexagonal(cls):\n return cls('H', [(Fraction(2, 3), Fraction(1, 3), 0.0), (Fraction(1,\n 3), Fraction(2, 3), 0.0)])\n\n @classmethod\n def rhombohedral(cls):\n return cls('R', [(Fraction(2, 3), Fraction(1, 3), Fraction(1, 3)),\n (Fraction(1, 3), Fraction(2, 3), Fraction(2, 3))])\n\n @classmethod\n def face_centered(cls):\n return cls('F', [(0.0, 0.5, 0.5), (0.5, 0.0, 0.5), (0.5, 0.5, 0.0)])\n\n @classmethod\n def base_centered_A(cls):\n return cls('A', [(0.0, 0.5, 0.5)])\n\n @classmethod\n def base_centered_B(cls):\n return cls('B', [(0.5, 0.0, 0.5)])\n\n @classmethod\n def base_centered_C(cls):\n return cls('C', [(0.5, 0.5, 0.0)])\n\n @classmethod\n def construct(cls, latt):\n \"\"\"\n Given the LATT directive in a res file, return the corresponding centered lattice type.\n\n :param latt: the absolute integer value specified in LATT\n :return: corrected centered lattice type\n \"\"\"\n latt = abs(latt)\n if latt == 1:\n return cls.primitive()\n elif latt == 2:\n return cls.body_centered()\n elif latt == 3:\n return cls.rhombohedral()\n elif latt == 4:\n return cls.face_centered()\n elif latt == 5:\n return cls.base_centered_A()\n elif latt == 6:\n return cls.base_centered_B()\n elif latt == 7:\n return cls.base_centered_C()\n\n def get_LATT_code(self):\n if self.letter == 'P':\n return 1\n elif self.letter == 'I':\n return 2\n elif self.letter == 'R':\n return 3\n elif self.letter == 'F':\n return 4\n elif self.letter == 'A':\n return 5\n elif self.letter == 'B':\n return 6\n elif self.letter == 'C':\n return 7\n\n\nclass AsymmetricUnit(object):\n UNKNOWN = [[0, 1.0], [0, 1.0], [0, 1.0]]\n FULL = [[0, 1.0], [0, 1.0], [0, 1.0]]\n HALF_X = [[0, 0.5], [0, 1.0], [0, 1.0]]\n HALF_Y = [[0, 1.0], [0, 0.5], [0, 1.0]]\n HALF_Z = [[0, 1.0], [0, 1.0], [0, 0.5]]\n QUART_Y = [[0, 1.0], [0, 0.25], [0, 1.0]]\n HALF_X_QUART_Y = [[0, 0.5], [0, 0.25], [0, 1.0]]\n HALF_XZ = [[0, 0.5], [0, 1.0], [0, 0.5]]\n HALF_XY = [[0, 0.5], [0, 0.5], [0, 1.0]]\n EIGHT_Z = [[0, 1.0], [0, 1.0], [0, 0.125]]\n\n\nclass UniqueAxis(object):\n UNKNOWN = -1\n NA = -1\n X = 0\n Y = 1\n Z = 2\n\n\n<mask token>\n",
"step-4": "<mask token>\n\n\nclass SpaceGroup(object):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n\nclass SymmetryOperation(object):\n\n def __init__(self, operation_string):\n \"\"\"\n Initialize a symmetry operation object from a string representation of symmetry operation.\n\n :param operation_string: A string (as read in from res/cif file) representing a symmetry operation\n for a space group.\n \"\"\"\n self.operation_string = operation_string.lower()\n self.operation_function = None\n self.__set_operation_function()\n\n def __set_operation_function(self):\n \"\"\"\n Convert the string form of the symmetry operation into the form of a mathematical\n function that can be directly applied to a vector to transform a point to a\n symmetry related point.\n \"\"\"\n if self.operation_function is not None:\n return self.operation_function\n else:\n self.operation_function = symm_eval\n\n def transform_scaled_position(self, data):\n \"\"\"\n Applying this symmetry operation on a 3D coordinate to transform it to a symmetry-related\n position in the crystal.\n\n :param data: A vector (:class:`entdecker.core.models.vector3d.cVector3D`)\n representing the fractional coordinates on which the symmetry\n operation will be applied upon.\n :return: Symmetry transformed vector.\n \"\"\"\n return self.operation_function(prepare_operation(self.\n operation_string), data)\n\n def transform_atom(self, atom):\n return Atom(label=atom.label, scaled_position=self.\n transform_scaled_position(atom.scaled_position))\n\n def inversion(self):\n func = lambda x: '-1*(%s),-1*(%s),-1*(%s)' % tuple(x.split(','))\n return self.__class__(func(self.operation_string))\n\n\nclass Symmetry(object):\n\n @staticmethod\n def get(value):\n if value is '' or value == 'UNKNOWN':\n return []\n return [SymmetryOperation(v) for v in value.split(';')]\n\n\nclass Inversion(object):\n YES = True\n NO = False\n UNKNOWN = None\n\n\nclass InversionFactory(object):\n\n @staticmethod\n def construct(latt):\n if int(latt) > 0:\n return CentroSymmetric()\n else:\n return NonCentroSymmetric()\n\n\nclass CentroSymmetric(Inversion):\n\n @staticmethod\n def transform(op):\n func = lambda x: '-1*(%s),-1*(%s),-1*(%s)' % tuple(x.split(','))\n return func(op)\n\n\nclass NonCentroSymmetric(Inversion):\n\n @staticmethod\n def transform(op):\n return op\n\n\nclass Centering(object):\n\n def __init__(self, letter, additional_lattice_points):\n self.letter = letter\n self.additional_lattice_points = additional_lattice_points\n\n def transform(self, op):\n additional_ops = []\n for point in self.additional_lattice_points:\n func = lambda x: '{0}+{3},{1}+{4},{2}+{5}'.format(*(x.split(','\n ) + list(point)))\n additional_ops.append(op.__class__(func(op.operation_string)))\n return additional_ops\n\n @classmethod\n def primitive(cls):\n return cls('P', [])\n\n @classmethod\n def body_centered(cls):\n return cls('I', [(0.5, 0.5, 0.5)])\n\n @classmethod\n def hexagonal(cls):\n return cls('H', [(Fraction(2, 3), Fraction(1, 3), 0.0), (Fraction(1,\n 3), Fraction(2, 3), 0.0)])\n\n @classmethod\n def rhombohedral(cls):\n return cls('R', [(Fraction(2, 3), Fraction(1, 3), Fraction(1, 3)),\n (Fraction(1, 3), Fraction(2, 3), Fraction(2, 3))])\n\n @classmethod\n def face_centered(cls):\n return cls('F', [(0.0, 0.5, 0.5), (0.5, 0.0, 0.5), (0.5, 0.5, 0.0)])\n\n @classmethod\n def base_centered_A(cls):\n return cls('A', [(0.0, 0.5, 0.5)])\n\n @classmethod\n def base_centered_B(cls):\n return cls('B', [(0.5, 0.0, 0.5)])\n\n @classmethod\n def base_centered_C(cls):\n return cls('C', [(0.5, 0.5, 0.0)])\n\n @classmethod\n def construct(cls, latt):\n \"\"\"\n Given the LATT directive in a res file, return the corresponding centered lattice type.\n\n :param latt: the absolute integer value specified in LATT\n :return: corrected centered lattice type\n \"\"\"\n latt = abs(latt)\n if latt == 1:\n return cls.primitive()\n elif latt == 2:\n return cls.body_centered()\n elif latt == 3:\n return cls.rhombohedral()\n elif latt == 4:\n return cls.face_centered()\n elif latt == 5:\n return cls.base_centered_A()\n elif latt == 6:\n return cls.base_centered_B()\n elif latt == 7:\n return cls.base_centered_C()\n\n def get_LATT_code(self):\n if self.letter == 'P':\n return 1\n elif self.letter == 'I':\n return 2\n elif self.letter == 'R':\n return 3\n elif self.letter == 'F':\n return 4\n elif self.letter == 'A':\n return 5\n elif self.letter == 'B':\n return 6\n elif self.letter == 'C':\n return 7\n\n\nclass AsymmetricUnit(object):\n UNKNOWN = [[0, 1.0], [0, 1.0], [0, 1.0]]\n FULL = [[0, 1.0], [0, 1.0], [0, 1.0]]\n HALF_X = [[0, 0.5], [0, 1.0], [0, 1.0]]\n HALF_Y = [[0, 1.0], [0, 0.5], [0, 1.0]]\n HALF_Z = [[0, 1.0], [0, 1.0], [0, 0.5]]\n QUART_Y = [[0, 1.0], [0, 0.25], [0, 1.0]]\n HALF_X_QUART_Y = [[0, 0.5], [0, 0.25], [0, 1.0]]\n HALF_XZ = [[0, 0.5], [0, 1.0], [0, 0.5]]\n HALF_XY = [[0, 0.5], [0, 0.5], [0, 1.0]]\n EIGHT_Z = [[0, 1.0], [0, 1.0], [0, 0.125]]\n\n\nclass UniqueAxis(object):\n UNKNOWN = -1\n NA = -1\n X = 0\n Y = 1\n Z = 2\n\n\n<mask token>\n",
"step-5": "from core.models import Atom\nfrom core.models.vector3d import cVector3D\nfrom fractions import Fraction\n\n\nclass SpaceGroup(object):\n def __init__(self,\n index=None,\n name=None,\n lattice_system=None,\n lattice_centering=None,\n inversion=None,\n symmetry=None,\n asymmetric_unit=None,\n unique_axis=None):\n self.index = index\n self.name = name\n self.lattice_system = lattice_system\n self.lattice_centering = lattice_centering\n self.inversion = inversion\n self.symmetry = symmetry\n self.asymmetric_unit = asymmetric_unit\n self.unique_axis = unique_axis\n self.non_centering_symmetry = []\n\n self.full_symmetry = []\n self.__compute_full_symmetry()\n\n @property\n def identity(self):\n return SymmetryOperation('x,y,z')\n\n def __append_identity(self):\n if 'x,y,z' not in [i.operation_string for i in self.symmetry]:\n self.symmetry = [self.identity] + self.symmetry\n\n def __add_inversion_symmetry(self):\n for op in self.symmetry:\n self.non_centering_symmetry.append(op)\n\n if isinstance(self.inversion,CentroSymmetric):\n for i in range(len(self.non_centering_symmetry)):\n op = self.non_centering_symmetry[i]\n self.non_centering_symmetry.append(op.inversion())\n\n def __add_centering_symmetry(self):\n if self.lattice_centering:\n for i in range(len(self.full_symmetry)):\n op = self.full_symmetry[i]\n centering_ops = self.lattice_centering.transform(op)\n self.full_symmetry += centering_ops\n\n def __compute_full_symmetry(self):\n self.__append_identity()\n self.__add_inversion_symmetry()\n self.full_symmetry = [op for op in self.non_centering_symmetry]\n self.__add_centering_symmetry()\n return self.full_symmetry\n\n\nclass SymmetryOperation(object):\n def __init__(self, operation_string):\n \"\"\"\n Initialize a symmetry operation object from a string representation of symmetry operation.\n\n :param operation_string: A string (as read in from res/cif file) representing a symmetry operation\n for a space group.\n \"\"\"\n self.operation_string = operation_string.lower()\n self.operation_function = None\n self.__set_operation_function()\n\n def __set_operation_function(self):\n \"\"\"\n Convert the string form of the symmetry operation into the form of a mathematical\n function that can be directly applied to a vector to transform a point to a\n symmetry related point.\n \"\"\"\n if self.operation_function is not None:\n return self.operation_function\n else:\n self.operation_function = symm_eval\n\n def transform_scaled_position(self, data):\n \"\"\"\n Applying this symmetry operation on a 3D coordinate to transform it to a symmetry-related\n position in the crystal.\n\n :param data: A vector (:class:`entdecker.core.models.vector3d.cVector3D`)\n representing the fractional coordinates on which the symmetry\n operation will be applied upon.\n :return: Symmetry transformed vector.\n \"\"\"\n return self.operation_function(prepare_operation(self.operation_string), data)\n\n def transform_atom(self, atom):\n return Atom(label=atom.label, scaled_position=self.transform_scaled_position(atom.scaled_position))\n\n def inversion(self):\n func = lambda x: \"-1*(%s),-1*(%s),-1*(%s)\" % tuple(x.split(\",\"))\n return self.__class__(func(self.operation_string))\n\n\nclass Symmetry(object):\n @staticmethod\n def get(value):\n if value is '' or value == 'UNKNOWN':\n return []\n return [SymmetryOperation(v) for v in value.split(';')]\n\n\nclass Inversion(object):\n # TODO - this needs to be fixed later to make it consistent with the Centering class!\n YES = True\n NO = False\n UNKNOWN = None\n\n\nclass InversionFactory(object):\n @staticmethod\n def construct(latt):\n if int(latt) > 0:\n return CentroSymmetric()\n else:\n return NonCentroSymmetric()\n\n\nclass CentroSymmetric(Inversion):\n @staticmethod\n def transform(op):\n func = lambda x: \"-1*(%s),-1*(%s),-1*(%s)\" % tuple(x.split(\",\"))\n return func(op)\n\n\nclass NonCentroSymmetric(Inversion):\n @staticmethod\n def transform(op):\n return op\n\n\nclass Centering(object):\n def __init__(self, letter, additional_lattice_points):\n self.letter = letter\n self.additional_lattice_points = additional_lattice_points\n\n def transform(self, op):\n additional_ops = []\n for point in self.additional_lattice_points:\n func = lambda x: \"{0}+{3},{1}+{4},{2}+{5}\".format(*(x.split(\",\") + list(point)))\n additional_ops.append(op.__class__(func(op.operation_string)))\n return additional_ops\n\n @classmethod\n def primitive(cls):\n return cls('P', [])\n\n @classmethod\n def body_centered(cls):\n return cls('I', [(0.5, 0.5, 0.5)])\n\n @classmethod\n def hexagonal(cls):\n return cls('H', [(Fraction(2, 3), Fraction(1, 3), 0.0),\n (Fraction(1, 3), Fraction(2, 3), 0.0)])\n\n @classmethod\n def rhombohedral(cls):\n return cls('R', [(Fraction(2, 3), Fraction(1, 3), Fraction(1, 3)),\n (Fraction(1, 3), Fraction(2, 3), Fraction(2, 3))])\n\n @classmethod\n def face_centered(cls):\n return cls('F', [(0.0, 0.5, 0.5), (0.5, 0.0, 0.5), (0.5, 0.5, 0.0)])\n\n @classmethod\n def base_centered_A(cls):\n return cls('A', [(0.0, 0.5, 0.5)])\n\n @classmethod\n def base_centered_B(cls):\n return cls('B', [(0.5, 0.0, 0.5)])\n\n @classmethod\n def base_centered_C(cls):\n return cls('C', [(0.5, 0.5, 0.0)])\n\n @classmethod\n def construct(cls, latt):\n \"\"\"\n Given the LATT directive in a res file, return the corresponding centered lattice type.\n\n :param latt: the absolute integer value specified in LATT\n :return: corrected centered lattice type\n \"\"\"\n latt = abs(latt)\n if latt == 1:\n return cls.primitive()\n elif latt == 2:\n return cls.body_centered()\n elif latt == 3:\n # default setting from reading in a res file is Rhombohedral\n return cls.rhombohedral()\n elif latt == 4:\n return cls.face_centered()\n elif latt == 5:\n return cls.base_centered_A()\n elif latt == 6:\n return cls.base_centered_B()\n elif latt == 7:\n return cls.base_centered_C()\n\n def get_LATT_code(self):\n if self.letter == 'P':\n return 1\n elif self.letter == 'I':\n return 2\n elif self.letter == 'R':\n return 3\n elif self.letter == 'F':\n return 4\n elif self.letter == 'A':\n return 5\n elif self.letter == 'B':\n return 6\n elif self.letter == 'C':\n return 7\n\n\nclass AsymmetricUnit(object):\n UNKNOWN = [[0, 1.00], [0, 1.00], [0, 1.00]]\n FULL = [[0, 1.00], [0, 1.00], [0, 1.00]]\n HALF_X = [[0, 0.50], [0, 1.00], [0, 1.00]]\n HALF_Y = [[0, 1.00], [0, 0.50], [0, 1.00]]\n HALF_Z = [[0, 1.00], [0, 1.00], [0, 0.50]]\n QUART_Y = [[0, 1.00], [0, 0.25], [0, 1.00]]\n HALF_X_QUART_Y = [[0, 0.50], [0, 0.25], [0, 1.00]]\n HALF_XZ = [[0, 0.50], [0, 1.00], [0, 0.50]]\n HALF_XY = [[0, 0.50], [0, 0.50], [0, 1.00]]\n EIGHT_Z = [[0, 1.00], [0, 1.00], [0, 0.125]]\n\n\nclass UniqueAxis(object):\n UNKNOWN = -1\n NA = -1\n X = 0\n Y = 1\n Z = 2\n\n\ndef symm_eval(s, data):\n x, y, z = data.x, data.y, data.z\n out = list(map(eval, s.split(\",\")))\n return cVector3D(out[0], out[1], out[2])\n\n\ndef prepare_operation(s):\n ''' Cleans up a string of a symmetry operation to be used in eval or exec\n\n :param s: Input string e.g. \"x,y,z+1/2\"\n :type s: string\n\n :rtype: string\n '''\n tmp = s.replace(\"1/4\", \"1.0/4.0\")\n tmp = tmp.replace(\"1/2\", \"1.0/2.0\")\n tmp = tmp.replace(\"3/4\", \"3.0/4.0\")\n tmp = tmp.replace(\"1/3\", \"1.0/3.0\")\n tmp = tmp.replace(\"2/3\", \"2.0/3.0\")\n tmp = tmp.replace(\"1/6\", \"1.0/6.0\")\n tmp = tmp.replace(\"5/6\", \"5.0/6.0\")\n return tmp.replace(\" \", \"\").lower()\n",
"step-ids": [
15,
28,
33,
34,
44
]
}
|
[
15,
28,
33,
34,
44
] |
<|reserved_special_token_0|>
class ResizableConv2d(torch.nn.Module):
<|reserved_special_token_0|>
def forward(self, x):
y = self.conv(x)
y = self.conv2(y)
y = self.resize(y)
y = y + self.resize(self.residual_conv(x))
y = self.activation(y)
return y
class ActorNet(torch.nn.Module):
def __init__(self, state_size, action_size):
super().__init__()
self.conv = ResizableConv2d(state_size, 6, 3)
self.conv_backwards = ResizableConv2d(state_size, 3, 6)
self.conv2 = ResizableConv2d(state_size, 3, 3)
self.conv3 = ResizableConv2d(state_size, 3, 3)
self.conv4 = ResizableConv2d(state_size, 3, 3)
self.conv_resize = ResizableConv2d((8, 8), 3, 3)
self.linears = torch.nn.ModuleList([])
for i in action_size:
self.linears.append(torch.nn.Linear(8 * 8 * 3, i))
self.optim = torch.optim.AdamW(self.parameters(), lr=0.0001)
def forward(self, x, goal, time):
x = torch.cat([x, goal], dim=1) + time
x = self.conv(x)
x_ = self.conv_backwards(x)
x = self.conv(x_) + goal
x = x + torch.randn_like(x)
x = self.conv2(x) + time
x = x + torch.randn_like(x)
x = self.conv3(x) + goal
x = x + torch.randn_like(x)
x = self.conv4(x) + goal
x = self.conv_resize(x)
y = x
y = torch.flatten(y, start_dim=1)
y_list = []
for i in self.linears:
iy = i(y)
iy = torch.sigmoid(iy)
y_list.append(iy)
return y_list
def optimize(self, loss):
self.optim.zero_grad()
loss.backward()
self.optim.step()
print('Actor Loss:', loss.item())
class GoalkeeperNet(torch.nn.Module):
def __init__(self, state_size):
super().__init__()
self.conv = ResizableConv2d(state_size, 3, 3)
self.conv2 = ResizableConv2d(state_size, 3, 3)
self.conv3 = ResizableConv2d(state_size, 3, 3)
self.conv4 = ResizableConv2d(state_size, 3, 3)
self.flatten = torch.nn.Flatten()
self.optim = torch.optim.AdamW(self.parameters(), lr=0.0001)
def forward(self, state):
y = self.conv(state)
goal = self.conv2(y)
goal = self.conv3(goal)
return goal
def optimize(self, loss):
self.optim.zero_grad()
loss.backward()
self.optim.step()
print('Goalkeeper Loss:', loss.item())
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class ResizableConv2d(torch.nn.Module):
def __init__(self, state_size, inchan, outchan):
super().__init__()
self.conv = torch.nn.Conv2d(inchan, outchan, 3)
self.conv2 = torch.nn.Conv2d(outchan, outchan, 3)
self.residual_conv = torch.nn.Conv2d(inchan, outchan, 3)
self.resize = lambda x: torch.nn.functional.interpolate(x, size=
state_size, mode='bicubic', align_corners=True)
self.activation = Activation()
def forward(self, x):
y = self.conv(x)
y = self.conv2(y)
y = self.resize(y)
y = y + self.resize(self.residual_conv(x))
y = self.activation(y)
return y
class ActorNet(torch.nn.Module):
def __init__(self, state_size, action_size):
super().__init__()
self.conv = ResizableConv2d(state_size, 6, 3)
self.conv_backwards = ResizableConv2d(state_size, 3, 6)
self.conv2 = ResizableConv2d(state_size, 3, 3)
self.conv3 = ResizableConv2d(state_size, 3, 3)
self.conv4 = ResizableConv2d(state_size, 3, 3)
self.conv_resize = ResizableConv2d((8, 8), 3, 3)
self.linears = torch.nn.ModuleList([])
for i in action_size:
self.linears.append(torch.nn.Linear(8 * 8 * 3, i))
self.optim = torch.optim.AdamW(self.parameters(), lr=0.0001)
def forward(self, x, goal, time):
x = torch.cat([x, goal], dim=1) + time
x = self.conv(x)
x_ = self.conv_backwards(x)
x = self.conv(x_) + goal
x = x + torch.randn_like(x)
x = self.conv2(x) + time
x = x + torch.randn_like(x)
x = self.conv3(x) + goal
x = x + torch.randn_like(x)
x = self.conv4(x) + goal
x = self.conv_resize(x)
y = x
y = torch.flatten(y, start_dim=1)
y_list = []
for i in self.linears:
iy = i(y)
iy = torch.sigmoid(iy)
y_list.append(iy)
return y_list
def optimize(self, loss):
self.optim.zero_grad()
loss.backward()
self.optim.step()
print('Actor Loss:', loss.item())
class GoalkeeperNet(torch.nn.Module):
def __init__(self, state_size):
super().__init__()
self.conv = ResizableConv2d(state_size, 3, 3)
self.conv2 = ResizableConv2d(state_size, 3, 3)
self.conv3 = ResizableConv2d(state_size, 3, 3)
self.conv4 = ResizableConv2d(state_size, 3, 3)
self.flatten = torch.nn.Flatten()
self.optim = torch.optim.AdamW(self.parameters(), lr=0.0001)
def forward(self, state):
y = self.conv(state)
goal = self.conv2(y)
goal = self.conv3(goal)
return goal
def optimize(self, loss):
self.optim.zero_grad()
loss.backward()
self.optim.step()
print('Goalkeeper Loss:', loss.item())
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Activation(torch.nn.Module):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
class ResizableConv2d(torch.nn.Module):
def __init__(self, state_size, inchan, outchan):
super().__init__()
self.conv = torch.nn.Conv2d(inchan, outchan, 3)
self.conv2 = torch.nn.Conv2d(outchan, outchan, 3)
self.residual_conv = torch.nn.Conv2d(inchan, outchan, 3)
self.resize = lambda x: torch.nn.functional.interpolate(x, size=
state_size, mode='bicubic', align_corners=True)
self.activation = Activation()
def forward(self, x):
y = self.conv(x)
y = self.conv2(y)
y = self.resize(y)
y = y + self.resize(self.residual_conv(x))
y = self.activation(y)
return y
class ActorNet(torch.nn.Module):
def __init__(self, state_size, action_size):
super().__init__()
self.conv = ResizableConv2d(state_size, 6, 3)
self.conv_backwards = ResizableConv2d(state_size, 3, 6)
self.conv2 = ResizableConv2d(state_size, 3, 3)
self.conv3 = ResizableConv2d(state_size, 3, 3)
self.conv4 = ResizableConv2d(state_size, 3, 3)
self.conv_resize = ResizableConv2d((8, 8), 3, 3)
self.linears = torch.nn.ModuleList([])
for i in action_size:
self.linears.append(torch.nn.Linear(8 * 8 * 3, i))
self.optim = torch.optim.AdamW(self.parameters(), lr=0.0001)
def forward(self, x, goal, time):
x = torch.cat([x, goal], dim=1) + time
x = self.conv(x)
x_ = self.conv_backwards(x)
x = self.conv(x_) + goal
x = x + torch.randn_like(x)
x = self.conv2(x) + time
x = x + torch.randn_like(x)
x = self.conv3(x) + goal
x = x + torch.randn_like(x)
x = self.conv4(x) + goal
x = self.conv_resize(x)
y = x
y = torch.flatten(y, start_dim=1)
y_list = []
for i in self.linears:
iy = i(y)
iy = torch.sigmoid(iy)
y_list.append(iy)
return y_list
def optimize(self, loss):
self.optim.zero_grad()
loss.backward()
self.optim.step()
print('Actor Loss:', loss.item())
class GoalkeeperNet(torch.nn.Module):
def __init__(self, state_size):
super().__init__()
self.conv = ResizableConv2d(state_size, 3, 3)
self.conv2 = ResizableConv2d(state_size, 3, 3)
self.conv3 = ResizableConv2d(state_size, 3, 3)
self.conv4 = ResizableConv2d(state_size, 3, 3)
self.flatten = torch.nn.Flatten()
self.optim = torch.optim.AdamW(self.parameters(), lr=0.0001)
def forward(self, state):
y = self.conv(state)
goal = self.conv2(y)
goal = self.conv3(goal)
return goal
def optimize(self, loss):
self.optim.zero_grad()
loss.backward()
self.optim.step()
print('Goalkeeper Loss:', loss.item())
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Activation(torch.nn.Module):
def __init__(self):
super().__init__()
self.swish = lambda x: x * torch.sigmoid(x)
self.linear = lambda x: x
self.sigmoid = lambda x: torch.sigmoid(x)
self.neg = lambda x: -x
self.sine = lambda x: torch.sin(x)
self.params = torch.nn.Parameter(torch.zeros(10))
def forward(self, x):
params = torch.sigmoid(self.params)
linear_x = self.linear(x) * params[0]
swish_x = self.swish(x) * params[1]
sigmoid_x = self.sigmoid(x) * params[2]
neg_x = self.neg(x) * params[3]
sine_x = self.sine(x) * params[4]
x = swish_x + linear_x + sigmoid_x + neg_x + sine_x
return x
class ResizableConv2d(torch.nn.Module):
def __init__(self, state_size, inchan, outchan):
super().__init__()
self.conv = torch.nn.Conv2d(inchan, outchan, 3)
self.conv2 = torch.nn.Conv2d(outchan, outchan, 3)
self.residual_conv = torch.nn.Conv2d(inchan, outchan, 3)
self.resize = lambda x: torch.nn.functional.interpolate(x, size=
state_size, mode='bicubic', align_corners=True)
self.activation = Activation()
def forward(self, x):
y = self.conv(x)
y = self.conv2(y)
y = self.resize(y)
y = y + self.resize(self.residual_conv(x))
y = self.activation(y)
return y
class ActorNet(torch.nn.Module):
def __init__(self, state_size, action_size):
super().__init__()
self.conv = ResizableConv2d(state_size, 6, 3)
self.conv_backwards = ResizableConv2d(state_size, 3, 6)
self.conv2 = ResizableConv2d(state_size, 3, 3)
self.conv3 = ResizableConv2d(state_size, 3, 3)
self.conv4 = ResizableConv2d(state_size, 3, 3)
self.conv_resize = ResizableConv2d((8, 8), 3, 3)
self.linears = torch.nn.ModuleList([])
for i in action_size:
self.linears.append(torch.nn.Linear(8 * 8 * 3, i))
self.optim = torch.optim.AdamW(self.parameters(), lr=0.0001)
def forward(self, x, goal, time):
x = torch.cat([x, goal], dim=1) + time
x = self.conv(x)
x_ = self.conv_backwards(x)
x = self.conv(x_) + goal
x = x + torch.randn_like(x)
x = self.conv2(x) + time
x = x + torch.randn_like(x)
x = self.conv3(x) + goal
x = x + torch.randn_like(x)
x = self.conv4(x) + goal
x = self.conv_resize(x)
y = x
y = torch.flatten(y, start_dim=1)
y_list = []
for i in self.linears:
iy = i(y)
iy = torch.sigmoid(iy)
y_list.append(iy)
return y_list
def optimize(self, loss):
self.optim.zero_grad()
loss.backward()
self.optim.step()
print('Actor Loss:', loss.item())
class GoalkeeperNet(torch.nn.Module):
def __init__(self, state_size):
super().__init__()
self.conv = ResizableConv2d(state_size, 3, 3)
self.conv2 = ResizableConv2d(state_size, 3, 3)
self.conv3 = ResizableConv2d(state_size, 3, 3)
self.conv4 = ResizableConv2d(state_size, 3, 3)
self.flatten = torch.nn.Flatten()
self.optim = torch.optim.AdamW(self.parameters(), lr=0.0001)
def forward(self, state):
y = self.conv(state)
goal = self.conv2(y)
goal = self.conv3(goal)
return goal
def optimize(self, loss):
self.optim.zero_grad()
loss.backward()
self.optim.step()
print('Goalkeeper Loss:', loss.item())
<|reserved_special_token_1|>
import torch
class Activation(torch.nn.Module):
def __init__(self):
super().__init__()
self.swish = lambda x: x * torch.sigmoid(x)
self.linear = lambda x: x
self.sigmoid = lambda x: torch.sigmoid(x)
self.neg = lambda x: -x
self.sine = lambda x: torch.sin(x)
self.params = torch.nn.Parameter(torch.zeros(10))
def forward(self, x):
params = torch.sigmoid(self.params)
linear_x = self.linear(x) * params[0]
swish_x = self.swish(x) * params[1]
sigmoid_x = self.sigmoid(x) * params[2]
neg_x = self.neg(x) * params[3]
sine_x = self.sine(x) * params[4]
x = swish_x + linear_x + sigmoid_x + neg_x + sine_x
return x
class ResizableConv2d(torch.nn.Module):
def __init__(self, state_size, inchan, outchan):
super().__init__()
self.conv = torch.nn.Conv2d(inchan, outchan, 3)
self.conv2 = torch.nn.Conv2d(outchan, outchan, 3)
self.residual_conv = torch.nn.Conv2d(inchan, outchan, 3)
self.resize = lambda x: torch.nn.functional.interpolate(x, size=state_size, mode='bicubic', align_corners=True)
self.activation = Activation()
def forward(self, x):
y = self.conv(x)
y = self.conv2(y)
y = self.resize(y)
y = y + self.resize(self.residual_conv(x))
y = self.activation(y)
return y
class ActorNet(torch.nn.Module):
def __init__(self, state_size, action_size):
super().__init__()
self.conv = ResizableConv2d(state_size, 6, 3)
self.conv_backwards = ResizableConv2d(state_size, 3, 6)
self.conv2 = ResizableConv2d(state_size, 3, 3)
self.conv3 = ResizableConv2d(state_size, 3, 3)
self.conv4 = ResizableConv2d(state_size, 3, 3)
self.conv_resize = ResizableConv2d((8, 8), 3, 3)
self.linears = torch.nn.ModuleList([])
for i in action_size:
self.linears.append(torch.nn.Linear(8*8*3, i))
self.optim = torch.optim.AdamW(self.parameters(), lr=1e-4)
def forward(self, x, goal, time):
x = torch.cat([x, goal], dim=1) + time
x = self.conv(x)
x_ = self.conv_backwards(x)
x = self.conv(x_) + goal
x = x + torch.randn_like(x)
x = self.conv2(x) + time
x = x + torch.randn_like(x)
x = self.conv3(x) + goal
x = x + torch.randn_like(x)
x = self.conv4(x) + goal
x = self.conv_resize(x)
y = x
y = torch.flatten(y, start_dim=1)
y_list = []
for i in self.linears:
iy = i(y)
iy = torch.sigmoid(iy)
y_list.append(iy)
return y_list
def optimize(self, loss):
self.optim.zero_grad()
loss.backward()
self.optim.step()
print("Actor Loss:", loss.item())
class GoalkeeperNet(torch.nn.Module):
def __init__(self, state_size):
super().__init__()
self.conv = ResizableConv2d(state_size, 3, 3)
self.conv2 = ResizableConv2d(state_size, 3, 3)
self.conv3 = ResizableConv2d(state_size, 3, 3)
self.conv4 = ResizableConv2d(state_size, 3, 3)
self.flatten = torch.nn.Flatten()
self.optim = torch.optim.AdamW(self.parameters(), lr=1e-4)
def forward(self, state):
y = self.conv(state)
goal = self.conv2(y)
goal = self.conv3(goal)
return goal
def optimize(self, loss):
self.optim.zero_grad()
loss.backward()
self.optim.step()
print("Goalkeeper Loss:", loss.item())
|
flexible
|
{
"blob_id": "850310b6c431981a246832e8a6f5417a88587b99",
"index": 3151,
"step-1": "<mask token>\n\n\nclass ResizableConv2d(torch.nn.Module):\n <mask token>\n\n def forward(self, x):\n y = self.conv(x)\n y = self.conv2(y)\n y = self.resize(y)\n y = y + self.resize(self.residual_conv(x))\n y = self.activation(y)\n return y\n\n\nclass ActorNet(torch.nn.Module):\n\n def __init__(self, state_size, action_size):\n super().__init__()\n self.conv = ResizableConv2d(state_size, 6, 3)\n self.conv_backwards = ResizableConv2d(state_size, 3, 6)\n self.conv2 = ResizableConv2d(state_size, 3, 3)\n self.conv3 = ResizableConv2d(state_size, 3, 3)\n self.conv4 = ResizableConv2d(state_size, 3, 3)\n self.conv_resize = ResizableConv2d((8, 8), 3, 3)\n self.linears = torch.nn.ModuleList([])\n for i in action_size:\n self.linears.append(torch.nn.Linear(8 * 8 * 3, i))\n self.optim = torch.optim.AdamW(self.parameters(), lr=0.0001)\n\n def forward(self, x, goal, time):\n x = torch.cat([x, goal], dim=1) + time\n x = self.conv(x)\n x_ = self.conv_backwards(x)\n x = self.conv(x_) + goal\n x = x + torch.randn_like(x)\n x = self.conv2(x) + time\n x = x + torch.randn_like(x)\n x = self.conv3(x) + goal\n x = x + torch.randn_like(x)\n x = self.conv4(x) + goal\n x = self.conv_resize(x)\n y = x\n y = torch.flatten(y, start_dim=1)\n y_list = []\n for i in self.linears:\n iy = i(y)\n iy = torch.sigmoid(iy)\n y_list.append(iy)\n return y_list\n\n def optimize(self, loss):\n self.optim.zero_grad()\n loss.backward()\n self.optim.step()\n print('Actor Loss:', loss.item())\n\n\nclass GoalkeeperNet(torch.nn.Module):\n\n def __init__(self, state_size):\n super().__init__()\n self.conv = ResizableConv2d(state_size, 3, 3)\n self.conv2 = ResizableConv2d(state_size, 3, 3)\n self.conv3 = ResizableConv2d(state_size, 3, 3)\n self.conv4 = ResizableConv2d(state_size, 3, 3)\n self.flatten = torch.nn.Flatten()\n self.optim = torch.optim.AdamW(self.parameters(), lr=0.0001)\n\n def forward(self, state):\n y = self.conv(state)\n goal = self.conv2(y)\n goal = self.conv3(goal)\n return goal\n\n def optimize(self, loss):\n self.optim.zero_grad()\n loss.backward()\n self.optim.step()\n print('Goalkeeper Loss:', loss.item())\n",
"step-2": "<mask token>\n\n\nclass ResizableConv2d(torch.nn.Module):\n\n def __init__(self, state_size, inchan, outchan):\n super().__init__()\n self.conv = torch.nn.Conv2d(inchan, outchan, 3)\n self.conv2 = torch.nn.Conv2d(outchan, outchan, 3)\n self.residual_conv = torch.nn.Conv2d(inchan, outchan, 3)\n self.resize = lambda x: torch.nn.functional.interpolate(x, size=\n state_size, mode='bicubic', align_corners=True)\n self.activation = Activation()\n\n def forward(self, x):\n y = self.conv(x)\n y = self.conv2(y)\n y = self.resize(y)\n y = y + self.resize(self.residual_conv(x))\n y = self.activation(y)\n return y\n\n\nclass ActorNet(torch.nn.Module):\n\n def __init__(self, state_size, action_size):\n super().__init__()\n self.conv = ResizableConv2d(state_size, 6, 3)\n self.conv_backwards = ResizableConv2d(state_size, 3, 6)\n self.conv2 = ResizableConv2d(state_size, 3, 3)\n self.conv3 = ResizableConv2d(state_size, 3, 3)\n self.conv4 = ResizableConv2d(state_size, 3, 3)\n self.conv_resize = ResizableConv2d((8, 8), 3, 3)\n self.linears = torch.nn.ModuleList([])\n for i in action_size:\n self.linears.append(torch.nn.Linear(8 * 8 * 3, i))\n self.optim = torch.optim.AdamW(self.parameters(), lr=0.0001)\n\n def forward(self, x, goal, time):\n x = torch.cat([x, goal], dim=1) + time\n x = self.conv(x)\n x_ = self.conv_backwards(x)\n x = self.conv(x_) + goal\n x = x + torch.randn_like(x)\n x = self.conv2(x) + time\n x = x + torch.randn_like(x)\n x = self.conv3(x) + goal\n x = x + torch.randn_like(x)\n x = self.conv4(x) + goal\n x = self.conv_resize(x)\n y = x\n y = torch.flatten(y, start_dim=1)\n y_list = []\n for i in self.linears:\n iy = i(y)\n iy = torch.sigmoid(iy)\n y_list.append(iy)\n return y_list\n\n def optimize(self, loss):\n self.optim.zero_grad()\n loss.backward()\n self.optim.step()\n print('Actor Loss:', loss.item())\n\n\nclass GoalkeeperNet(torch.nn.Module):\n\n def __init__(self, state_size):\n super().__init__()\n self.conv = ResizableConv2d(state_size, 3, 3)\n self.conv2 = ResizableConv2d(state_size, 3, 3)\n self.conv3 = ResizableConv2d(state_size, 3, 3)\n self.conv4 = ResizableConv2d(state_size, 3, 3)\n self.flatten = torch.nn.Flatten()\n self.optim = torch.optim.AdamW(self.parameters(), lr=0.0001)\n\n def forward(self, state):\n y = self.conv(state)\n goal = self.conv2(y)\n goal = self.conv3(goal)\n return goal\n\n def optimize(self, loss):\n self.optim.zero_grad()\n loss.backward()\n self.optim.step()\n print('Goalkeeper Loss:', loss.item())\n",
"step-3": "<mask token>\n\n\nclass Activation(torch.nn.Module):\n <mask token>\n <mask token>\n\n\nclass ResizableConv2d(torch.nn.Module):\n\n def __init__(self, state_size, inchan, outchan):\n super().__init__()\n self.conv = torch.nn.Conv2d(inchan, outchan, 3)\n self.conv2 = torch.nn.Conv2d(outchan, outchan, 3)\n self.residual_conv = torch.nn.Conv2d(inchan, outchan, 3)\n self.resize = lambda x: torch.nn.functional.interpolate(x, size=\n state_size, mode='bicubic', align_corners=True)\n self.activation = Activation()\n\n def forward(self, x):\n y = self.conv(x)\n y = self.conv2(y)\n y = self.resize(y)\n y = y + self.resize(self.residual_conv(x))\n y = self.activation(y)\n return y\n\n\nclass ActorNet(torch.nn.Module):\n\n def __init__(self, state_size, action_size):\n super().__init__()\n self.conv = ResizableConv2d(state_size, 6, 3)\n self.conv_backwards = ResizableConv2d(state_size, 3, 6)\n self.conv2 = ResizableConv2d(state_size, 3, 3)\n self.conv3 = ResizableConv2d(state_size, 3, 3)\n self.conv4 = ResizableConv2d(state_size, 3, 3)\n self.conv_resize = ResizableConv2d((8, 8), 3, 3)\n self.linears = torch.nn.ModuleList([])\n for i in action_size:\n self.linears.append(torch.nn.Linear(8 * 8 * 3, i))\n self.optim = torch.optim.AdamW(self.parameters(), lr=0.0001)\n\n def forward(self, x, goal, time):\n x = torch.cat([x, goal], dim=1) + time\n x = self.conv(x)\n x_ = self.conv_backwards(x)\n x = self.conv(x_) + goal\n x = x + torch.randn_like(x)\n x = self.conv2(x) + time\n x = x + torch.randn_like(x)\n x = self.conv3(x) + goal\n x = x + torch.randn_like(x)\n x = self.conv4(x) + goal\n x = self.conv_resize(x)\n y = x\n y = torch.flatten(y, start_dim=1)\n y_list = []\n for i in self.linears:\n iy = i(y)\n iy = torch.sigmoid(iy)\n y_list.append(iy)\n return y_list\n\n def optimize(self, loss):\n self.optim.zero_grad()\n loss.backward()\n self.optim.step()\n print('Actor Loss:', loss.item())\n\n\nclass GoalkeeperNet(torch.nn.Module):\n\n def __init__(self, state_size):\n super().__init__()\n self.conv = ResizableConv2d(state_size, 3, 3)\n self.conv2 = ResizableConv2d(state_size, 3, 3)\n self.conv3 = ResizableConv2d(state_size, 3, 3)\n self.conv4 = ResizableConv2d(state_size, 3, 3)\n self.flatten = torch.nn.Flatten()\n self.optim = torch.optim.AdamW(self.parameters(), lr=0.0001)\n\n def forward(self, state):\n y = self.conv(state)\n goal = self.conv2(y)\n goal = self.conv3(goal)\n return goal\n\n def optimize(self, loss):\n self.optim.zero_grad()\n loss.backward()\n self.optim.step()\n print('Goalkeeper Loss:', loss.item())\n",
"step-4": "<mask token>\n\n\nclass Activation(torch.nn.Module):\n\n def __init__(self):\n super().__init__()\n self.swish = lambda x: x * torch.sigmoid(x)\n self.linear = lambda x: x\n self.sigmoid = lambda x: torch.sigmoid(x)\n self.neg = lambda x: -x\n self.sine = lambda x: torch.sin(x)\n self.params = torch.nn.Parameter(torch.zeros(10))\n\n def forward(self, x):\n params = torch.sigmoid(self.params)\n linear_x = self.linear(x) * params[0]\n swish_x = self.swish(x) * params[1]\n sigmoid_x = self.sigmoid(x) * params[2]\n neg_x = self.neg(x) * params[3]\n sine_x = self.sine(x) * params[4]\n x = swish_x + linear_x + sigmoid_x + neg_x + sine_x\n return x\n\n\nclass ResizableConv2d(torch.nn.Module):\n\n def __init__(self, state_size, inchan, outchan):\n super().__init__()\n self.conv = torch.nn.Conv2d(inchan, outchan, 3)\n self.conv2 = torch.nn.Conv2d(outchan, outchan, 3)\n self.residual_conv = torch.nn.Conv2d(inchan, outchan, 3)\n self.resize = lambda x: torch.nn.functional.interpolate(x, size=\n state_size, mode='bicubic', align_corners=True)\n self.activation = Activation()\n\n def forward(self, x):\n y = self.conv(x)\n y = self.conv2(y)\n y = self.resize(y)\n y = y + self.resize(self.residual_conv(x))\n y = self.activation(y)\n return y\n\n\nclass ActorNet(torch.nn.Module):\n\n def __init__(self, state_size, action_size):\n super().__init__()\n self.conv = ResizableConv2d(state_size, 6, 3)\n self.conv_backwards = ResizableConv2d(state_size, 3, 6)\n self.conv2 = ResizableConv2d(state_size, 3, 3)\n self.conv3 = ResizableConv2d(state_size, 3, 3)\n self.conv4 = ResizableConv2d(state_size, 3, 3)\n self.conv_resize = ResizableConv2d((8, 8), 3, 3)\n self.linears = torch.nn.ModuleList([])\n for i in action_size:\n self.linears.append(torch.nn.Linear(8 * 8 * 3, i))\n self.optim = torch.optim.AdamW(self.parameters(), lr=0.0001)\n\n def forward(self, x, goal, time):\n x = torch.cat([x, goal], dim=1) + time\n x = self.conv(x)\n x_ = self.conv_backwards(x)\n x = self.conv(x_) + goal\n x = x + torch.randn_like(x)\n x = self.conv2(x) + time\n x = x + torch.randn_like(x)\n x = self.conv3(x) + goal\n x = x + torch.randn_like(x)\n x = self.conv4(x) + goal\n x = self.conv_resize(x)\n y = x\n y = torch.flatten(y, start_dim=1)\n y_list = []\n for i in self.linears:\n iy = i(y)\n iy = torch.sigmoid(iy)\n y_list.append(iy)\n return y_list\n\n def optimize(self, loss):\n self.optim.zero_grad()\n loss.backward()\n self.optim.step()\n print('Actor Loss:', loss.item())\n\n\nclass GoalkeeperNet(torch.nn.Module):\n\n def __init__(self, state_size):\n super().__init__()\n self.conv = ResizableConv2d(state_size, 3, 3)\n self.conv2 = ResizableConv2d(state_size, 3, 3)\n self.conv3 = ResizableConv2d(state_size, 3, 3)\n self.conv4 = ResizableConv2d(state_size, 3, 3)\n self.flatten = torch.nn.Flatten()\n self.optim = torch.optim.AdamW(self.parameters(), lr=0.0001)\n\n def forward(self, state):\n y = self.conv(state)\n goal = self.conv2(y)\n goal = self.conv3(goal)\n return goal\n\n def optimize(self, loss):\n self.optim.zero_grad()\n loss.backward()\n self.optim.step()\n print('Goalkeeper Loss:', loss.item())\n",
"step-5": "import torch\n\nclass Activation(torch.nn.Module):\n\tdef __init__(self):\n\t\tsuper().__init__()\n\t\tself.swish = lambda x: x * torch.sigmoid(x)\n\t\tself.linear = lambda x: x\n\t\tself.sigmoid = lambda x: torch.sigmoid(x)\n\t\tself.neg = lambda x: -x\n\t\tself.sine = lambda x: torch.sin(x)\n\t\t\n\t\tself.params = torch.nn.Parameter(torch.zeros(10))\n\n\tdef forward(self, x):\n\t\tparams = torch.sigmoid(self.params)\n\t\t\n\t\tlinear_x = self.linear(x) * params[0]\n\t\tswish_x = self.swish(x) * params[1]\n\t\tsigmoid_x = self.sigmoid(x) * params[2]\n\t\tneg_x = self.neg(x) * params[3]\n\t\tsine_x = self.sine(x) * params[4]\n\n\t\tx = swish_x + linear_x + sigmoid_x + neg_x + sine_x\n\t\t\n\t\treturn x\n\nclass ResizableConv2d(torch.nn.Module):\n\tdef __init__(self, state_size, inchan, outchan):\n\t\tsuper().__init__()\n\t\tself.conv = torch.nn.Conv2d(inchan, outchan, 3)\n\t\tself.conv2 = torch.nn.Conv2d(outchan, outchan, 3)\n\t\tself.residual_conv = torch.nn.Conv2d(inchan, outchan, 3)\n\t\tself.resize = lambda x: torch.nn.functional.interpolate(x, size=state_size, mode='bicubic', align_corners=True)\n\t\tself.activation = Activation()\n\tdef forward(self, x):\n\t\ty = self.conv(x)\n\t\ty = self.conv2(y)\n\t\ty = self.resize(y)\n\n\t\ty = y + self.resize(self.residual_conv(x))\n\t\ty = self.activation(y)\n\t\treturn y\n\nclass ActorNet(torch.nn.Module):\n\tdef __init__(self, state_size, action_size):\n\t\tsuper().__init__()\n\t\tself.conv = ResizableConv2d(state_size, 6, 3)\n\t\tself.conv_backwards = ResizableConv2d(state_size, 3, 6)\n\n\t\tself.conv2 = ResizableConv2d(state_size, 3, 3)\n\t\tself.conv3 = ResizableConv2d(state_size, 3, 3)\n\t\tself.conv4 = ResizableConv2d(state_size, 3, 3)\n\t\tself.conv_resize = ResizableConv2d((8, 8), 3, 3)\n\n\t\tself.linears = torch.nn.ModuleList([])\n\n\t\tfor i in action_size:\n\t\t\tself.linears.append(torch.nn.Linear(8*8*3, i))\n\n\t\tself.optim = torch.optim.AdamW(self.parameters(), lr=1e-4)\n\n\tdef forward(self, x, goal, time):\n\t\tx = torch.cat([x, goal], dim=1) + time\n\n\t\tx = self.conv(x)\n\t\tx_ = self.conv_backwards(x)\n\n\t\tx = self.conv(x_) + goal\n\t\tx = x + torch.randn_like(x)\n\t\tx = self.conv2(x) + time\n\t\tx = x + torch.randn_like(x)\n\t\tx = self.conv3(x) + goal\n\t\tx = x + torch.randn_like(x)\n\t\tx = self.conv4(x) + goal\n\n\t\tx = self.conv_resize(x)\n\n\t\ty = x\n\n\t\ty = torch.flatten(y, start_dim=1)\n\n\t\ty_list = []\n\t\tfor i in self.linears:\n\t\t\tiy = i(y)\n\t\t\tiy = torch.sigmoid(iy)\t\n\t\t\ty_list.append(iy)\n\n\t\treturn y_list\n\t\n\tdef optimize(self, loss):\n\t\tself.optim.zero_grad()\n\t\tloss.backward()\n\t\tself.optim.step()\n\t\tprint(\"Actor Loss:\", loss.item())\n\nclass GoalkeeperNet(torch.nn.Module):\n\tdef __init__(self, state_size):\n\t\tsuper().__init__()\n\t\tself.conv = ResizableConv2d(state_size, 3, 3)\n\t\tself.conv2 = ResizableConv2d(state_size, 3, 3)\n\t\tself.conv3 = ResizableConv2d(state_size, 3, 3)\n\t\tself.conv4 = ResizableConv2d(state_size, 3, 3)\n\t\tself.flatten = torch.nn.Flatten()\n\t\tself.optim = torch.optim.AdamW(self.parameters(), lr=1e-4)\n\n\tdef forward(self, state):\n\t\ty = self.conv(state)\n\t\tgoal = self.conv2(y)\n\t\tgoal = self.conv3(goal)\n\n\t\treturn goal\n\n\n\tdef optimize(self, loss):\n\t\tself.optim.zero_grad()\n\t\tloss.backward()\n\t\tself.optim.step()\n\t\tprint(\"Goalkeeper Loss:\", loss.item())",
"step-ids": [
10,
11,
12,
14,
16
]
}
|
[
10,
11,
12,
14,
16
] |
<|reserved_special_token_0|>
class GunicornLogger(Logger):
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
logzero.setup_logger(**logger_args)
logzero.setup_default_logger(**logger_args)
<|reserved_special_token_0|>
class GunicornLogger(Logger):
def __init__(self, cfg):
super().__init__(cfg)
self.error_log = logzero.setup_logger('gunicorn', **logger_args)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
_log_level = os.environ.get('LOG_LEVEL', 'info').upper()
log_level = getattr(logging, _log_level)
log_format = (
'%(color)s[%(levelname)1.1s %(asctime)s %(name)s]%(end_color)s %(message)s'
)
formatter = logzero.LogFormatter(fmt=log_format)
logger_args = dict(level=log_level, formatter=formatter)
logzero.__name__ = ''
logzero.setup_logger(**logger_args)
logzero.setup_default_logger(**logger_args)
logger = logzero.setup_logger('alertmanager_telegram', **logger_args)
class GunicornLogger(Logger):
def __init__(self, cfg):
super().__init__(cfg)
self.error_log = logzero.setup_logger('gunicorn', **logger_args)
<|reserved_special_token_1|>
import logging
import os
import logzero
from gunicorn.glogging import Logger
_log_level = os.environ.get('LOG_LEVEL', 'info').upper()
log_level = getattr(logging, _log_level)
log_format = (
'%(color)s[%(levelname)1.1s %(asctime)s %(name)s]%(end_color)s %(message)s'
)
formatter = logzero.LogFormatter(fmt=log_format)
logger_args = dict(level=log_level, formatter=formatter)
logzero.__name__ = ''
logzero.setup_logger(**logger_args)
logzero.setup_default_logger(**logger_args)
logger = logzero.setup_logger('alertmanager_telegram', **logger_args)
class GunicornLogger(Logger):
def __init__(self, cfg):
super().__init__(cfg)
self.error_log = logzero.setup_logger('gunicorn', **logger_args)
<|reserved_special_token_1|>
import logging
import os
import logzero
from gunicorn.glogging import Logger
_log_level = os.environ.get("LOG_LEVEL", "info").upper()
log_level = getattr(logging, _log_level)
log_format = "%(color)s[%(levelname)1.1s %(asctime)s %(name)s]%(end_color)s %(message)s"
formatter = logzero.LogFormatter(fmt=log_format)
logger_args = dict(level=log_level, formatter=formatter)
logzero.__name__ = ""
logzero.setup_logger(**logger_args)
logzero.setup_default_logger(**logger_args)
logger = logzero.setup_logger("alertmanager_telegram", **logger_args)
class GunicornLogger(Logger):
def __init__(self, cfg):
super().__init__(cfg)
self.error_log = logzero.setup_logger("gunicorn", **logger_args)
|
flexible
|
{
"blob_id": "b8b50ef021c4b25edbab355e1db5d62d3c5a28ad",
"index": 7257,
"step-1": "<mask token>\n\n\nclass GunicornLogger(Logger):\n <mask token>\n",
"step-2": "<mask token>\nlogzero.setup_logger(**logger_args)\nlogzero.setup_default_logger(**logger_args)\n<mask token>\n\n\nclass GunicornLogger(Logger):\n\n def __init__(self, cfg):\n super().__init__(cfg)\n self.error_log = logzero.setup_logger('gunicorn', **logger_args)\n",
"step-3": "<mask token>\n_log_level = os.environ.get('LOG_LEVEL', 'info').upper()\nlog_level = getattr(logging, _log_level)\nlog_format = (\n '%(color)s[%(levelname)1.1s %(asctime)s %(name)s]%(end_color)s %(message)s'\n )\nformatter = logzero.LogFormatter(fmt=log_format)\nlogger_args = dict(level=log_level, formatter=formatter)\nlogzero.__name__ = ''\nlogzero.setup_logger(**logger_args)\nlogzero.setup_default_logger(**logger_args)\nlogger = logzero.setup_logger('alertmanager_telegram', **logger_args)\n\n\nclass GunicornLogger(Logger):\n\n def __init__(self, cfg):\n super().__init__(cfg)\n self.error_log = logzero.setup_logger('gunicorn', **logger_args)\n",
"step-4": "import logging\nimport os\nimport logzero\nfrom gunicorn.glogging import Logger\n_log_level = os.environ.get('LOG_LEVEL', 'info').upper()\nlog_level = getattr(logging, _log_level)\nlog_format = (\n '%(color)s[%(levelname)1.1s %(asctime)s %(name)s]%(end_color)s %(message)s'\n )\nformatter = logzero.LogFormatter(fmt=log_format)\nlogger_args = dict(level=log_level, formatter=formatter)\nlogzero.__name__ = ''\nlogzero.setup_logger(**logger_args)\nlogzero.setup_default_logger(**logger_args)\nlogger = logzero.setup_logger('alertmanager_telegram', **logger_args)\n\n\nclass GunicornLogger(Logger):\n\n def __init__(self, cfg):\n super().__init__(cfg)\n self.error_log = logzero.setup_logger('gunicorn', **logger_args)\n",
"step-5": "import logging\nimport os\n\nimport logzero\nfrom gunicorn.glogging import Logger\n\n_log_level = os.environ.get(\"LOG_LEVEL\", \"info\").upper()\nlog_level = getattr(logging, _log_level)\nlog_format = \"%(color)s[%(levelname)1.1s %(asctime)s %(name)s]%(end_color)s %(message)s\"\n\nformatter = logzero.LogFormatter(fmt=log_format)\nlogger_args = dict(level=log_level, formatter=formatter)\n\nlogzero.__name__ = \"\"\nlogzero.setup_logger(**logger_args)\nlogzero.setup_default_logger(**logger_args)\nlogger = logzero.setup_logger(\"alertmanager_telegram\", **logger_args)\n\n\nclass GunicornLogger(Logger):\n def __init__(self, cfg):\n super().__init__(cfg)\n\n self.error_log = logzero.setup_logger(\"gunicorn\", **logger_args)\n",
"step-ids": [
1,
3,
4,
5,
6
]
}
|
[
1,
3,
4,
5,
6
] |
import pandas
class _RegressionModelTable(object):
def __init__(self, regression_models, function_to_evaluate_model=None, function_to_select_model=None):
if not isinstance(regression_models, list):
regression_models = [regression_models]
self._check_model_inputs(regression_models, function_to_evaluate_model, function_to_select_model)
self._function_to_evaluate_model = function_to_evaluate_model
self._function_to_select_model = function_to_select_model
self._regression_model_list = regression_models
self._table_evaluation_dict = {}
self._all_fit_models_table_dict = {}
self._fit_model_table_dict = {}
@property
def pandas_table(self):
model_names = [model.__str__() for model in self._regression_model_list]
df = pandas.DataFrame(self._table_evaluation_dict, index=model_names)
df = df.transpose()
return df
@classmethod
def _check_model_inputs(cls, regression_models, function_to_evaluate_model, function_to_select_model):
if len(regression_models) > 1:
if function_to_select_model is None or function_to_evaluate_model is None:
raise ValueError("Functions to evaluate and select regression models must be specified "
"in case of regression model list.")
def initialize_tables(self, label_names):
n_models = len(self._regression_model_list)
self._table_evaluation_dict = {LABEL_NAME: [None]*n_models for LABEL_NAME in label_names}
self._fit_model_table_dict = {LABEL_NAME: None for LABEL_NAME in label_names}
def evaluate_label_models(self, x, y, label_name):
label_tuple_list = list(map(lambda model: self.evaluate_model(model, x, y), self._regression_model_list))
# print("TUPLES! Window, Model", label_tuple_list[0][0], label_tuple_list[0][0]._model)
self._all_fit_models_table_dict[label_name] = [T[0] for T in label_tuple_list]
self._table_evaluation_dict[label_name] = [T[1] for T in label_tuple_list]
def evaluate_model(self, model, x, y):
model, value = self._function_to_evaluate_model(model, x, y)
return model, value
def return_selected_label_model(self, label_name):
if len(self._regression_model_list) == 1:
# print("unique model")
return self._all_fit_models_table_dict[label_name][0]
if self._is_any_none_in_list(self._table_evaluation_dict[label_name]):
raise ValueError("Some models were not evaluated")
return self._function_to_select_model(self._all_fit_models_table_dict[label_name], self._table_evaluation_dict[label_name])
@staticmethod
def _is_any_none_in_list(list_):
return any(list(map(lambda x: x is None, list_)))
def set_label_regression_model(self, model, label_name):
self._fit_model_table_dict[label_name] = model
def return_label_regression_model(self, label_name):
return self._fit_model_table_dict[label_name]
@classmethod
def _predict_func(cls, model, x_instance, n_samples):
return model.predict(x_instance, n_samples)
|
normal
|
{
"blob_id": "94264e121bb31a08cbd9766be1ff16173d2838ed",
"index": 5331,
"step-1": "<mask token>\n\n\nclass _RegressionModelTable(object):\n\n def __init__(self, regression_models, function_to_evaluate_model=None,\n function_to_select_model=None):\n if not isinstance(regression_models, list):\n regression_models = [regression_models]\n self._check_model_inputs(regression_models,\n function_to_evaluate_model, function_to_select_model)\n self._function_to_evaluate_model = function_to_evaluate_model\n self._function_to_select_model = function_to_select_model\n self._regression_model_list = regression_models\n self._table_evaluation_dict = {}\n self._all_fit_models_table_dict = {}\n self._fit_model_table_dict = {}\n <mask token>\n\n @classmethod\n def _check_model_inputs(cls, regression_models,\n function_to_evaluate_model, function_to_select_model):\n if len(regression_models) > 1:\n if (function_to_select_model is None or \n function_to_evaluate_model is None):\n raise ValueError(\n 'Functions to evaluate and select regression models must be specified in case of regression model list.'\n )\n\n def initialize_tables(self, label_names):\n n_models = len(self._regression_model_list)\n self._table_evaluation_dict = {LABEL_NAME: ([None] * n_models) for\n LABEL_NAME in label_names}\n self._fit_model_table_dict = {LABEL_NAME: None for LABEL_NAME in\n label_names}\n\n def evaluate_label_models(self, x, y, label_name):\n label_tuple_list = list(map(lambda model: self.evaluate_model(model,\n x, y), self._regression_model_list))\n self._all_fit_models_table_dict[label_name] = [T[0] for T in\n label_tuple_list]\n self._table_evaluation_dict[label_name] = [T[1] for T in\n label_tuple_list]\n\n def evaluate_model(self, model, x, y):\n model, value = self._function_to_evaluate_model(model, x, y)\n return model, value\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass _RegressionModelTable(object):\n\n def __init__(self, regression_models, function_to_evaluate_model=None,\n function_to_select_model=None):\n if not isinstance(regression_models, list):\n regression_models = [regression_models]\n self._check_model_inputs(regression_models,\n function_to_evaluate_model, function_to_select_model)\n self._function_to_evaluate_model = function_to_evaluate_model\n self._function_to_select_model = function_to_select_model\n self._regression_model_list = regression_models\n self._table_evaluation_dict = {}\n self._all_fit_models_table_dict = {}\n self._fit_model_table_dict = {}\n <mask token>\n\n @classmethod\n def _check_model_inputs(cls, regression_models,\n function_to_evaluate_model, function_to_select_model):\n if len(regression_models) > 1:\n if (function_to_select_model is None or \n function_to_evaluate_model is None):\n raise ValueError(\n 'Functions to evaluate and select regression models must be specified in case of regression model list.'\n )\n\n def initialize_tables(self, label_names):\n n_models = len(self._regression_model_list)\n self._table_evaluation_dict = {LABEL_NAME: ([None] * n_models) for\n LABEL_NAME in label_names}\n self._fit_model_table_dict = {LABEL_NAME: None for LABEL_NAME in\n label_names}\n\n def evaluate_label_models(self, x, y, label_name):\n label_tuple_list = list(map(lambda model: self.evaluate_model(model,\n x, y), self._regression_model_list))\n self._all_fit_models_table_dict[label_name] = [T[0] for T in\n label_tuple_list]\n self._table_evaluation_dict[label_name] = [T[1] for T in\n label_tuple_list]\n\n def evaluate_model(self, model, x, y):\n model, value = self._function_to_evaluate_model(model, x, y)\n return model, value\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n @classmethod\n def _predict_func(cls, model, x_instance, n_samples):\n return model.predict(x_instance, n_samples)\n",
"step-3": "<mask token>\n\n\nclass _RegressionModelTable(object):\n\n def __init__(self, regression_models, function_to_evaluate_model=None,\n function_to_select_model=None):\n if not isinstance(regression_models, list):\n regression_models = [regression_models]\n self._check_model_inputs(regression_models,\n function_to_evaluate_model, function_to_select_model)\n self._function_to_evaluate_model = function_to_evaluate_model\n self._function_to_select_model = function_to_select_model\n self._regression_model_list = regression_models\n self._table_evaluation_dict = {}\n self._all_fit_models_table_dict = {}\n self._fit_model_table_dict = {}\n\n @property\n def pandas_table(self):\n model_names = [model.__str__() for model in self._regression_model_list\n ]\n df = pandas.DataFrame(self._table_evaluation_dict, index=model_names)\n df = df.transpose()\n return df\n\n @classmethod\n def _check_model_inputs(cls, regression_models,\n function_to_evaluate_model, function_to_select_model):\n if len(regression_models) > 1:\n if (function_to_select_model is None or \n function_to_evaluate_model is None):\n raise ValueError(\n 'Functions to evaluate and select regression models must be specified in case of regression model list.'\n )\n\n def initialize_tables(self, label_names):\n n_models = len(self._regression_model_list)\n self._table_evaluation_dict = {LABEL_NAME: ([None] * n_models) for\n LABEL_NAME in label_names}\n self._fit_model_table_dict = {LABEL_NAME: None for LABEL_NAME in\n label_names}\n\n def evaluate_label_models(self, x, y, label_name):\n label_tuple_list = list(map(lambda model: self.evaluate_model(model,\n x, y), self._regression_model_list))\n self._all_fit_models_table_dict[label_name] = [T[0] for T in\n label_tuple_list]\n self._table_evaluation_dict[label_name] = [T[1] for T in\n label_tuple_list]\n\n def evaluate_model(self, model, x, y):\n model, value = self._function_to_evaluate_model(model, x, y)\n return model, value\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n @classmethod\n def _predict_func(cls, model, x_instance, n_samples):\n return model.predict(x_instance, n_samples)\n",
"step-4": "import pandas\n\n\nclass _RegressionModelTable(object):\n\n def __init__(self, regression_models, function_to_evaluate_model=None,\n function_to_select_model=None):\n if not isinstance(regression_models, list):\n regression_models = [regression_models]\n self._check_model_inputs(regression_models,\n function_to_evaluate_model, function_to_select_model)\n self._function_to_evaluate_model = function_to_evaluate_model\n self._function_to_select_model = function_to_select_model\n self._regression_model_list = regression_models\n self._table_evaluation_dict = {}\n self._all_fit_models_table_dict = {}\n self._fit_model_table_dict = {}\n\n @property\n def pandas_table(self):\n model_names = [model.__str__() for model in self._regression_model_list\n ]\n df = pandas.DataFrame(self._table_evaluation_dict, index=model_names)\n df = df.transpose()\n return df\n\n @classmethod\n def _check_model_inputs(cls, regression_models,\n function_to_evaluate_model, function_to_select_model):\n if len(regression_models) > 1:\n if (function_to_select_model is None or \n function_to_evaluate_model is None):\n raise ValueError(\n 'Functions to evaluate and select regression models must be specified in case of regression model list.'\n )\n\n def initialize_tables(self, label_names):\n n_models = len(self._regression_model_list)\n self._table_evaluation_dict = {LABEL_NAME: ([None] * n_models) for\n LABEL_NAME in label_names}\n self._fit_model_table_dict = {LABEL_NAME: None for LABEL_NAME in\n label_names}\n\n def evaluate_label_models(self, x, y, label_name):\n label_tuple_list = list(map(lambda model: self.evaluate_model(model,\n x, y), self._regression_model_list))\n self._all_fit_models_table_dict[label_name] = [T[0] for T in\n label_tuple_list]\n self._table_evaluation_dict[label_name] = [T[1] for T in\n label_tuple_list]\n\n def evaluate_model(self, model, x, y):\n model, value = self._function_to_evaluate_model(model, x, y)\n return model, value\n\n def return_selected_label_model(self, label_name):\n if len(self._regression_model_list) == 1:\n return self._all_fit_models_table_dict[label_name][0]\n if self._is_any_none_in_list(self._table_evaluation_dict[label_name]):\n raise ValueError('Some models were not evaluated')\n return self._function_to_select_model(self.\n _all_fit_models_table_dict[label_name], self.\n _table_evaluation_dict[label_name])\n\n @staticmethod\n def _is_any_none_in_list(list_):\n return any(list(map(lambda x: x is None, list_)))\n\n def set_label_regression_model(self, model, label_name):\n self._fit_model_table_dict[label_name] = model\n\n def return_label_regression_model(self, label_name):\n return self._fit_model_table_dict[label_name]\n\n @classmethod\n def _predict_func(cls, model, x_instance, n_samples):\n return model.predict(x_instance, n_samples)\n",
"step-5": "import pandas\n\n\nclass _RegressionModelTable(object):\n def __init__(self, regression_models, function_to_evaluate_model=None, function_to_select_model=None):\n\n if not isinstance(regression_models, list):\n regression_models = [regression_models]\n\n self._check_model_inputs(regression_models, function_to_evaluate_model, function_to_select_model)\n\n self._function_to_evaluate_model = function_to_evaluate_model\n self._function_to_select_model = function_to_select_model\n\n self._regression_model_list = regression_models\n self._table_evaluation_dict = {}\n self._all_fit_models_table_dict = {}\n self._fit_model_table_dict = {}\n\n @property\n def pandas_table(self):\n model_names = [model.__str__() for model in self._regression_model_list]\n df = pandas.DataFrame(self._table_evaluation_dict, index=model_names)\n df = df.transpose()\n return df\n\n @classmethod\n def _check_model_inputs(cls, regression_models, function_to_evaluate_model, function_to_select_model):\n if len(regression_models) > 1:\n if function_to_select_model is None or function_to_evaluate_model is None:\n raise ValueError(\"Functions to evaluate and select regression models must be specified \"\n \"in case of regression model list.\")\n\n def initialize_tables(self, label_names):\n n_models = len(self._regression_model_list)\n self._table_evaluation_dict = {LABEL_NAME: [None]*n_models for LABEL_NAME in label_names}\n self._fit_model_table_dict = {LABEL_NAME: None for LABEL_NAME in label_names}\n\n def evaluate_label_models(self, x, y, label_name):\n label_tuple_list = list(map(lambda model: self.evaluate_model(model, x, y), self._regression_model_list))\n # print(\"TUPLES! Window, Model\", label_tuple_list[0][0], label_tuple_list[0][0]._model)\n self._all_fit_models_table_dict[label_name] = [T[0] for T in label_tuple_list]\n self._table_evaluation_dict[label_name] = [T[1] for T in label_tuple_list]\n\n def evaluate_model(self, model, x, y):\n model, value = self._function_to_evaluate_model(model, x, y)\n return model, value\n\n def return_selected_label_model(self, label_name):\n if len(self._regression_model_list) == 1:\n # print(\"unique model\")\n return self._all_fit_models_table_dict[label_name][0]\n if self._is_any_none_in_list(self._table_evaluation_dict[label_name]):\n raise ValueError(\"Some models were not evaluated\")\n return self._function_to_select_model(self._all_fit_models_table_dict[label_name], self._table_evaluation_dict[label_name])\n\n @staticmethod\n def _is_any_none_in_list(list_):\n return any(list(map(lambda x: x is None, list_)))\n\n def set_label_regression_model(self, model, label_name):\n self._fit_model_table_dict[label_name] = model\n\n def return_label_regression_model(self, label_name):\n return self._fit_model_table_dict[label_name]\n\n @classmethod\n def _predict_func(cls, model, x_instance, n_samples):\n return model.predict(x_instance, n_samples)\n",
"step-ids": [
6,
7,
8,
13,
14
]
}
|
[
6,
7,
8,
13,
14
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
app.include_router(default.router)
app.include_router(mane.router)
app.include_router(mappings.router)
def custom_openapi() ->Dict:
"""Generate custom fields for OpenAPI response."""
if app.openapi_schema:
return app.openapi_schema
openapi_schema = get_openapi(title='The GenomicMedLab Cool Seq Tool',
version=__version__, description=
'Common Operations On Lots-of Sequences Tool.', routes=app.routes)
openapi_schema['info']['contact'] = {'name': 'Alex H. Wagner', 'email':
'Alex.Wagner@nationwidechildrens.org', 'url':
'https://www.nationwidechildrens.org/specialties/institute-for-genomic-medicine/research-labs/wagner-lab'
}
app.openapi_schema = openapi_schema
return app.openapi_schema
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
app = FastAPI(docs_url=f'/{SERVICE_NAME}', openapi_url=
f'/{SERVICE_NAME}/openapi.json', swagger_ui_parameters={
'tryItOutEnabled': True})
app.include_router(default.router)
app.include_router(mane.router)
app.include_router(mappings.router)
def custom_openapi() ->Dict:
"""Generate custom fields for OpenAPI response."""
if app.openapi_schema:
return app.openapi_schema
openapi_schema = get_openapi(title='The GenomicMedLab Cool Seq Tool',
version=__version__, description=
'Common Operations On Lots-of Sequences Tool.', routes=app.routes)
openapi_schema['info']['contact'] = {'name': 'Alex H. Wagner', 'email':
'Alex.Wagner@nationwidechildrens.org', 'url':
'https://www.nationwidechildrens.org/specialties/institute-for-genomic-medicine/research-labs/wagner-lab'
}
app.openapi_schema = openapi_schema
return app.openapi_schema
app.openapi = custom_openapi
<|reserved_special_token_1|>
<|reserved_special_token_0|>
from typing import Dict
from fastapi import FastAPI
from fastapi.openapi.utils import get_openapi
from cool_seq_tool.routers import default, mane, mappings, SERVICE_NAME
from cool_seq_tool.version import __version__
app = FastAPI(docs_url=f'/{SERVICE_NAME}', openapi_url=
f'/{SERVICE_NAME}/openapi.json', swagger_ui_parameters={
'tryItOutEnabled': True})
app.include_router(default.router)
app.include_router(mane.router)
app.include_router(mappings.router)
def custom_openapi() ->Dict:
"""Generate custom fields for OpenAPI response."""
if app.openapi_schema:
return app.openapi_schema
openapi_schema = get_openapi(title='The GenomicMedLab Cool Seq Tool',
version=__version__, description=
'Common Operations On Lots-of Sequences Tool.', routes=app.routes)
openapi_schema['info']['contact'] = {'name': 'Alex H. Wagner', 'email':
'Alex.Wagner@nationwidechildrens.org', 'url':
'https://www.nationwidechildrens.org/specialties/institute-for-genomic-medicine/research-labs/wagner-lab'
}
app.openapi_schema = openapi_schema
return app.openapi_schema
app.openapi = custom_openapi
<|reserved_special_token_1|>
"""Main application for FastAPI"""
from typing import Dict
from fastapi import FastAPI
from fastapi.openapi.utils import get_openapi
from cool_seq_tool.routers import default, mane, mappings, SERVICE_NAME
from cool_seq_tool.version import __version__
app = FastAPI(
docs_url=f"/{SERVICE_NAME}",
openapi_url=f"/{SERVICE_NAME}/openapi.json",
swagger_ui_parameters={"tryItOutEnabled": True}
)
app.include_router(default.router)
app.include_router(mane.router)
app.include_router(mappings.router)
def custom_openapi() -> Dict:
"""Generate custom fields for OpenAPI response."""
if app.openapi_schema:
return app.openapi_schema
openapi_schema = get_openapi(
title="The GenomicMedLab Cool Seq Tool",
version=__version__,
description="Common Operations On Lots-of Sequences Tool.",
routes=app.routes
)
openapi_schema["info"]["contact"] = {
"name": "Alex H. Wagner",
"email": "Alex.Wagner@nationwidechildrens.org",
"url": "https://www.nationwidechildrens.org/specialties/institute-for-genomic-medicine/research-labs/wagner-lab" # noqa: E501
}
app.openapi_schema = openapi_schema
return app.openapi_schema
app.openapi = custom_openapi
|
flexible
|
{
"blob_id": "c6fa8c33630fc2f7ffb08aace1a260e6805ddfa2",
"index": 7670,
"step-1": "<mask token>\n",
"step-2": "<mask token>\napp.include_router(default.router)\napp.include_router(mane.router)\napp.include_router(mappings.router)\n\n\ndef custom_openapi() ->Dict:\n \"\"\"Generate custom fields for OpenAPI response.\"\"\"\n if app.openapi_schema:\n return app.openapi_schema\n openapi_schema = get_openapi(title='The GenomicMedLab Cool Seq Tool',\n version=__version__, description=\n 'Common Operations On Lots-of Sequences Tool.', routes=app.routes)\n openapi_schema['info']['contact'] = {'name': 'Alex H. Wagner', 'email':\n 'Alex.Wagner@nationwidechildrens.org', 'url':\n 'https://www.nationwidechildrens.org/specialties/institute-for-genomic-medicine/research-labs/wagner-lab'\n }\n app.openapi_schema = openapi_schema\n return app.openapi_schema\n\n\n<mask token>\n",
"step-3": "<mask token>\napp = FastAPI(docs_url=f'/{SERVICE_NAME}', openapi_url=\n f'/{SERVICE_NAME}/openapi.json', swagger_ui_parameters={\n 'tryItOutEnabled': True})\napp.include_router(default.router)\napp.include_router(mane.router)\napp.include_router(mappings.router)\n\n\ndef custom_openapi() ->Dict:\n \"\"\"Generate custom fields for OpenAPI response.\"\"\"\n if app.openapi_schema:\n return app.openapi_schema\n openapi_schema = get_openapi(title='The GenomicMedLab Cool Seq Tool',\n version=__version__, description=\n 'Common Operations On Lots-of Sequences Tool.', routes=app.routes)\n openapi_schema['info']['contact'] = {'name': 'Alex H. Wagner', 'email':\n 'Alex.Wagner@nationwidechildrens.org', 'url':\n 'https://www.nationwidechildrens.org/specialties/institute-for-genomic-medicine/research-labs/wagner-lab'\n }\n app.openapi_schema = openapi_schema\n return app.openapi_schema\n\n\napp.openapi = custom_openapi\n",
"step-4": "<mask token>\nfrom typing import Dict\nfrom fastapi import FastAPI\nfrom fastapi.openapi.utils import get_openapi\nfrom cool_seq_tool.routers import default, mane, mappings, SERVICE_NAME\nfrom cool_seq_tool.version import __version__\napp = FastAPI(docs_url=f'/{SERVICE_NAME}', openapi_url=\n f'/{SERVICE_NAME}/openapi.json', swagger_ui_parameters={\n 'tryItOutEnabled': True})\napp.include_router(default.router)\napp.include_router(mane.router)\napp.include_router(mappings.router)\n\n\ndef custom_openapi() ->Dict:\n \"\"\"Generate custom fields for OpenAPI response.\"\"\"\n if app.openapi_schema:\n return app.openapi_schema\n openapi_schema = get_openapi(title='The GenomicMedLab Cool Seq Tool',\n version=__version__, description=\n 'Common Operations On Lots-of Sequences Tool.', routes=app.routes)\n openapi_schema['info']['contact'] = {'name': 'Alex H. Wagner', 'email':\n 'Alex.Wagner@nationwidechildrens.org', 'url':\n 'https://www.nationwidechildrens.org/specialties/institute-for-genomic-medicine/research-labs/wagner-lab'\n }\n app.openapi_schema = openapi_schema\n return app.openapi_schema\n\n\napp.openapi = custom_openapi\n",
"step-5": "\"\"\"Main application for FastAPI\"\"\"\nfrom typing import Dict\n\nfrom fastapi import FastAPI\nfrom fastapi.openapi.utils import get_openapi\n\n\nfrom cool_seq_tool.routers import default, mane, mappings, SERVICE_NAME\nfrom cool_seq_tool.version import __version__\n\n\napp = FastAPI(\n docs_url=f\"/{SERVICE_NAME}\",\n openapi_url=f\"/{SERVICE_NAME}/openapi.json\",\n swagger_ui_parameters={\"tryItOutEnabled\": True}\n)\n\n\napp.include_router(default.router)\napp.include_router(mane.router)\napp.include_router(mappings.router)\n\n\ndef custom_openapi() -> Dict:\n \"\"\"Generate custom fields for OpenAPI response.\"\"\"\n if app.openapi_schema:\n return app.openapi_schema\n openapi_schema = get_openapi(\n title=\"The GenomicMedLab Cool Seq Tool\",\n version=__version__,\n description=\"Common Operations On Lots-of Sequences Tool.\",\n routes=app.routes\n )\n\n openapi_schema[\"info\"][\"contact\"] = {\n \"name\": \"Alex H. Wagner\",\n \"email\": \"Alex.Wagner@nationwidechildrens.org\",\n \"url\": \"https://www.nationwidechildrens.org/specialties/institute-for-genomic-medicine/research-labs/wagner-lab\" # noqa: E501\n }\n app.openapi_schema = openapi_schema\n return app.openapi_schema\n\n\napp.openapi = custom_openapi\n",
"step-ids": [
0,
2,
3,
4,
5
]
}
|
[
0,
2,
3,
4,
5
] |
"""
Every block element test will be automatically
wrapped inside `<p></p>\n`. Thats why every block
test should include this wrapper tag.
"""
from io import BytesIO
from unittest import TestCase
from unittest.mock import patch, Mock
import pytest
from django.core.files import File
from django_dynamic_fixture import G
from magplan.models import Attachment
from magplan.xmd.renderer import XMDRenderer
from magplan.xmd.mappers import plan_internal_mapper
@pytest.mark.django_db
class TestImage(TestCase):
MOCK_SRC = 'dummy.jpg'
MOCK_TITLE = 'title'
MOCK_ALT_TEXT = 'alt_text'
def setUp(self):
file1 = File(name='file1.jpg', file=BytesIO(b'abcdef'))
attachment1 = G(Attachment, original_filename='user_friendly_filename1.jpg', file=file1)
self.mock_image_mapper = Mock()
self.renderer = XMDRenderer(image_mapper=self.mock_image_mapper, attachments=[attachment1])
self.expected_html = (
'<figure>'
'<img conf="dummy.jpg" alt="alt_text" /><figcaption>alt_text</figcaption>'
'</figure>'
)
def test_render_image(self):
self.mock_image_mapper.return_value = self.MOCK_SRC
html = self.renderer.image(self.MOCK_SRC, self.MOCK_TITLE, self.MOCK_ALT_TEXT)
self.mock_image_mapper.assert_called_with(self.MOCK_SRC, self.renderer.attachments)
assert html == self.expected_html
|
normal
|
{
"blob_id": "e5bf57e7a171f7e42928b78d09dda7593a231cf9",
"index": 9841,
"step-1": "<mask token>\n\n\n@pytest.mark.django_db\nclass TestImage(TestCase):\n <mask token>\n <mask token>\n <mask token>\n\n def setUp(self):\n file1 = File(name='file1.jpg', file=BytesIO(b'abcdef'))\n attachment1 = G(Attachment, original_filename=\n 'user_friendly_filename1.jpg', file=file1)\n self.mock_image_mapper = Mock()\n self.renderer = XMDRenderer(image_mapper=self.mock_image_mapper,\n attachments=[attachment1])\n self.expected_html = (\n '<figure><img conf=\"dummy.jpg\" alt=\"alt_text\" /><figcaption>alt_text</figcaption></figure>'\n )\n <mask token>\n",
"step-2": "<mask token>\n\n\n@pytest.mark.django_db\nclass TestImage(TestCase):\n <mask token>\n <mask token>\n <mask token>\n\n def setUp(self):\n file1 = File(name='file1.jpg', file=BytesIO(b'abcdef'))\n attachment1 = G(Attachment, original_filename=\n 'user_friendly_filename1.jpg', file=file1)\n self.mock_image_mapper = Mock()\n self.renderer = XMDRenderer(image_mapper=self.mock_image_mapper,\n attachments=[attachment1])\n self.expected_html = (\n '<figure><img conf=\"dummy.jpg\" alt=\"alt_text\" /><figcaption>alt_text</figcaption></figure>'\n )\n\n def test_render_image(self):\n self.mock_image_mapper.return_value = self.MOCK_SRC\n html = self.renderer.image(self.MOCK_SRC, self.MOCK_TITLE, self.\n MOCK_ALT_TEXT)\n self.mock_image_mapper.assert_called_with(self.MOCK_SRC, self.\n renderer.attachments)\n assert html == self.expected_html\n",
"step-3": "<mask token>\n\n\n@pytest.mark.django_db\nclass TestImage(TestCase):\n MOCK_SRC = 'dummy.jpg'\n MOCK_TITLE = 'title'\n MOCK_ALT_TEXT = 'alt_text'\n\n def setUp(self):\n file1 = File(name='file1.jpg', file=BytesIO(b'abcdef'))\n attachment1 = G(Attachment, original_filename=\n 'user_friendly_filename1.jpg', file=file1)\n self.mock_image_mapper = Mock()\n self.renderer = XMDRenderer(image_mapper=self.mock_image_mapper,\n attachments=[attachment1])\n self.expected_html = (\n '<figure><img conf=\"dummy.jpg\" alt=\"alt_text\" /><figcaption>alt_text</figcaption></figure>'\n )\n\n def test_render_image(self):\n self.mock_image_mapper.return_value = self.MOCK_SRC\n html = self.renderer.image(self.MOCK_SRC, self.MOCK_TITLE, self.\n MOCK_ALT_TEXT)\n self.mock_image_mapper.assert_called_with(self.MOCK_SRC, self.\n renderer.attachments)\n assert html == self.expected_html\n",
"step-4": "<mask token>\nfrom io import BytesIO\nfrom unittest import TestCase\nfrom unittest.mock import patch, Mock\nimport pytest\nfrom django.core.files import File\nfrom django_dynamic_fixture import G\nfrom magplan.models import Attachment\nfrom magplan.xmd.renderer import XMDRenderer\nfrom magplan.xmd.mappers import plan_internal_mapper\n\n\n@pytest.mark.django_db\nclass TestImage(TestCase):\n MOCK_SRC = 'dummy.jpg'\n MOCK_TITLE = 'title'\n MOCK_ALT_TEXT = 'alt_text'\n\n def setUp(self):\n file1 = File(name='file1.jpg', file=BytesIO(b'abcdef'))\n attachment1 = G(Attachment, original_filename=\n 'user_friendly_filename1.jpg', file=file1)\n self.mock_image_mapper = Mock()\n self.renderer = XMDRenderer(image_mapper=self.mock_image_mapper,\n attachments=[attachment1])\n self.expected_html = (\n '<figure><img conf=\"dummy.jpg\" alt=\"alt_text\" /><figcaption>alt_text</figcaption></figure>'\n )\n\n def test_render_image(self):\n self.mock_image_mapper.return_value = self.MOCK_SRC\n html = self.renderer.image(self.MOCK_SRC, self.MOCK_TITLE, self.\n MOCK_ALT_TEXT)\n self.mock_image_mapper.assert_called_with(self.MOCK_SRC, self.\n renderer.attachments)\n assert html == self.expected_html\n",
"step-5": "\"\"\"\nEvery block element test will be automatically\nwrapped inside `<p></p>\\n`. Thats why every block\n\ntest should include this wrapper tag.\n\"\"\"\nfrom io import BytesIO\nfrom unittest import TestCase\nfrom unittest.mock import patch, Mock\n\nimport pytest\nfrom django.core.files import File\nfrom django_dynamic_fixture import G\n\nfrom magplan.models import Attachment\nfrom magplan.xmd.renderer import XMDRenderer\nfrom magplan.xmd.mappers import plan_internal_mapper\n\n\n@pytest.mark.django_db\nclass TestImage(TestCase):\n MOCK_SRC = 'dummy.jpg'\n MOCK_TITLE = 'title'\n MOCK_ALT_TEXT = 'alt_text'\n\n def setUp(self):\n file1 = File(name='file1.jpg', file=BytesIO(b'abcdef'))\n attachment1 = G(Attachment, original_filename='user_friendly_filename1.jpg', file=file1)\n\n self.mock_image_mapper = Mock()\n\n self.renderer = XMDRenderer(image_mapper=self.mock_image_mapper, attachments=[attachment1])\n\n self.expected_html = (\n '<figure>'\n '<img conf=\"dummy.jpg\" alt=\"alt_text\" /><figcaption>alt_text</figcaption>'\n '</figure>'\n )\n\n def test_render_image(self):\n self.mock_image_mapper.return_value = self.MOCK_SRC\n\n html = self.renderer.image(self.MOCK_SRC, self.MOCK_TITLE, self.MOCK_ALT_TEXT)\n\n self.mock_image_mapper.assert_called_with(self.MOCK_SRC, self.renderer.attachments)\n assert html == self.expected_html\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
SPACE = 0
MARK = 1
def frame_to_bit_chunks(frame_values, baud_rate=45.45, start_bit=SPACE, stop_bit=MARK):
"""フレームごとの信号強度からデータビットのまとまりに変換する"""
binary_values = frame_to_binary_values(frame_values)
bit_duration_values = binary_values_to_bit_duration(binary_values)
bit_values = bit_duration_to_bit_values(bit_duration_values, baud_rate)
bit_chunks = bit_values_to_bit_chunks(bit_values, start_bit, stop_bit)
return bit_chunks
def frame_to_binary_values(frame_values, threshold=1.0):
"""フレームごとの信号強度から0/1を判定する"""
# ヒステリシスを持たせるときの前の状態
current_binary_value = SPACE
for mark_value, space_value, time in frame_values:
# mark の強度が space の強度の threshold 倍を越えていれば mark と判断する
if mark_value > space_value * threshold:
current_binary_value = MARK
# space の強度が mark の強度の threshold 倍を越えていれば space と判断する
if space_value > mark_value * threshold:
current_binary_value = SPACE
yield (current_binary_value, time)
def binary_values_to_bit_duration(binary_values):
"""連続する0/1の長さを測る"""
# 前の値
previous_binary_value = SPACE
# 前の値に変化した経過時間
previous_time = 0
# 今の値
current_binary_value = SPACE
# 今の値に変化した経過時間
current_time = 0
for binary_value, time in binary_values:
# 今の値を代入する
current_binary_value = binary_value
current_time = time
# 前と値が変わっていれば、前の値とその長さを出力する
if current_binary_value != previous_binary_value:
yield (previous_binary_value, current_time - previous_time)
# 今の値を前の値に代入する
previous_binary_value = current_binary_value
previous_time = current_time
# ループ内では最後の値は出力されないので、ここで出力する
yield (current_binary_value, current_time - previous_time)
def bit_duration_to_bit_values(bit_duration_values, baud_rate=45.45, minimum_bit_width=0.25):
"""短すぎる値を無視したり長い値を1bitごとに分割したりする"""
# 1bit あたりの時間(秒)
bit_duration = 1 / baud_rate
# 基準(minimum_bit_width) bit あたりの時間(秒)
minimum_duration = bit_duration * minimum_bit_width
# 最後に出力してからの経過時間
duration = 0
for bit_value, original_duration in bit_duration_values:
# 次の値を読んで、経過時間を足す
duration += original_duration
while duration > minimum_duration:
# 今の値の経過時間が基準を超えている間繰り返す
handle_duration = min(bit_duration, duration)
width = handle_duration / bit_duration
yield (bit_value, width)
# 出力した分だけ経過時間を減らす
duration -= handle_duration
def bit_values_to_bit_chunks(bit_values, start_bit=SPACE, stop_bit=MARK, lsb_on_left=True):
"""1bit ごとの値からデータビットを抽出する
bit_index|ビットの役割
---------|----------
0 |スタートビット
1 |データビット
2 |データビット
3 |データビット
4 |データビット
5 |データビット
6 |ストップビット
bit_index が 1-5の範囲のみを出力する
"""
# 前のデータ とりあえずスタートビットとしておく
previous_bit_value = start_bit
# データビットの何番目を処理しているかを数えておく
# はじめはどのタイミングか分からないので None にしておく
bit_index = None
# データビットを貯める
chunk = []
for current_bit_value, _ in bit_values:
if bit_index is None:
# 初期状態、まだデータのタイミングが分かっていない
if previous_bit_value == stop_bit and current_bit_value == start_bit:
# 1つ目のストップビット→スタートビットの遷移を検出
# タイミングが決まる
bit_index = 0
else:
# データのタイミングが分かっている
# 次のビットを読む
bit_index += 1
if bit_index <= 5:
# 5個目まではデータビットなので読む
# この if はデータビットの順番が 12345 か 54321 のどちらにも対応するためのもの
if lsb_on_left:
# list への append は最後に追加する
chunk.append(current_bit_value)
else:
# list への insert(0) は最初に追加する
chunk.insert(0, current_bit_value)
else:
# データビットが終わった
if bit_index == 6:
# ストップビットが来るはず あんまり気にしないで貯めたデータを出力する
yield ''.join(str(bit) for bit in chunk)
# データを空にしておく
chunk.clear()
if previous_bit_value == stop_bit and current_bit_value == start_bit:
# スタートビットが来たので状態をリセットする
bit_index = 0
previous_bit_value = current_bit_value
|
normal
|
{
"blob_id": "ff67ef77958e78335dc1dc2c7e08bf42998387c6",
"index": 2374,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef binary_values_to_bit_duration(binary_values):\n \"\"\"連続する0/1の長さを測る\"\"\"\n previous_binary_value = SPACE\n previous_time = 0\n current_binary_value = SPACE\n current_time = 0\n for binary_value, time in binary_values:\n current_binary_value = binary_value\n current_time = time\n if current_binary_value != previous_binary_value:\n yield previous_binary_value, current_time - previous_time\n previous_binary_value = current_binary_value\n previous_time = current_time\n yield current_binary_value, current_time - previous_time\n\n\ndef bit_duration_to_bit_values(bit_duration_values, baud_rate=45.45,\n minimum_bit_width=0.25):\n \"\"\"短すぎる値を無視したり長い値を1bitごとに分割したりする\"\"\"\n bit_duration = 1 / baud_rate\n minimum_duration = bit_duration * minimum_bit_width\n duration = 0\n for bit_value, original_duration in bit_duration_values:\n duration += original_duration\n while duration > minimum_duration:\n handle_duration = min(bit_duration, duration)\n width = handle_duration / bit_duration\n yield bit_value, width\n duration -= handle_duration\n\n\ndef bit_values_to_bit_chunks(bit_values, start_bit=SPACE, stop_bit=MARK,\n lsb_on_left=True):\n \"\"\"1bit ごとの値からデータビットを抽出する\n\n bit_index|ビットの役割\n ---------|----------\n 0 |スタートビット\n 1 |データビット\n 2 |データビット\n 3 |データビット\n 4 |データビット\n 5 |データビット\n 6 |ストップビット\n\n bit_index が 1-5の範囲のみを出力する\n \"\"\"\n previous_bit_value = start_bit\n bit_index = None\n chunk = []\n for current_bit_value, _ in bit_values:\n if bit_index is None:\n if (previous_bit_value == stop_bit and current_bit_value ==\n start_bit):\n bit_index = 0\n else:\n bit_index += 1\n if bit_index <= 5:\n if lsb_on_left:\n chunk.append(current_bit_value)\n else:\n chunk.insert(0, current_bit_value)\n else:\n if bit_index == 6:\n yield ''.join(str(bit) for bit in chunk)\n chunk.clear()\n if (previous_bit_value == stop_bit and current_bit_value ==\n start_bit):\n bit_index = 0\n previous_bit_value = current_bit_value\n",
"step-3": "<mask token>\n\n\ndef frame_to_bit_chunks(frame_values, baud_rate=45.45, start_bit=SPACE,\n stop_bit=MARK):\n \"\"\"フレームごとの信号強度からデータビットのまとまりに変換する\"\"\"\n binary_values = frame_to_binary_values(frame_values)\n bit_duration_values = binary_values_to_bit_duration(binary_values)\n bit_values = bit_duration_to_bit_values(bit_duration_values, baud_rate)\n bit_chunks = bit_values_to_bit_chunks(bit_values, start_bit, stop_bit)\n return bit_chunks\n\n\n<mask token>\n\n\ndef binary_values_to_bit_duration(binary_values):\n \"\"\"連続する0/1の長さを測る\"\"\"\n previous_binary_value = SPACE\n previous_time = 0\n current_binary_value = SPACE\n current_time = 0\n for binary_value, time in binary_values:\n current_binary_value = binary_value\n current_time = time\n if current_binary_value != previous_binary_value:\n yield previous_binary_value, current_time - previous_time\n previous_binary_value = current_binary_value\n previous_time = current_time\n yield current_binary_value, current_time - previous_time\n\n\ndef bit_duration_to_bit_values(bit_duration_values, baud_rate=45.45,\n minimum_bit_width=0.25):\n \"\"\"短すぎる値を無視したり長い値を1bitごとに分割したりする\"\"\"\n bit_duration = 1 / baud_rate\n minimum_duration = bit_duration * minimum_bit_width\n duration = 0\n for bit_value, original_duration in bit_duration_values:\n duration += original_duration\n while duration > minimum_duration:\n handle_duration = min(bit_duration, duration)\n width = handle_duration / bit_duration\n yield bit_value, width\n duration -= handle_duration\n\n\ndef bit_values_to_bit_chunks(bit_values, start_bit=SPACE, stop_bit=MARK,\n lsb_on_left=True):\n \"\"\"1bit ごとの値からデータビットを抽出する\n\n bit_index|ビットの役割\n ---------|----------\n 0 |スタートビット\n 1 |データビット\n 2 |データビット\n 3 |データビット\n 4 |データビット\n 5 |データビット\n 6 |ストップビット\n\n bit_index が 1-5の範囲のみを出力する\n \"\"\"\n previous_bit_value = start_bit\n bit_index = None\n chunk = []\n for current_bit_value, _ in bit_values:\n if bit_index is None:\n if (previous_bit_value == stop_bit and current_bit_value ==\n start_bit):\n bit_index = 0\n else:\n bit_index += 1\n if bit_index <= 5:\n if lsb_on_left:\n chunk.append(current_bit_value)\n else:\n chunk.insert(0, current_bit_value)\n else:\n if bit_index == 6:\n yield ''.join(str(bit) for bit in chunk)\n chunk.clear()\n if (previous_bit_value == stop_bit and current_bit_value ==\n start_bit):\n bit_index = 0\n previous_bit_value = current_bit_value\n",
"step-4": "<mask token>\n\n\ndef frame_to_bit_chunks(frame_values, baud_rate=45.45, start_bit=SPACE,\n stop_bit=MARK):\n \"\"\"フレームごとの信号強度からデータビットのまとまりに変換する\"\"\"\n binary_values = frame_to_binary_values(frame_values)\n bit_duration_values = binary_values_to_bit_duration(binary_values)\n bit_values = bit_duration_to_bit_values(bit_duration_values, baud_rate)\n bit_chunks = bit_values_to_bit_chunks(bit_values, start_bit, stop_bit)\n return bit_chunks\n\n\ndef frame_to_binary_values(frame_values, threshold=1.0):\n \"\"\"フレームごとの信号強度から0/1を判定する\"\"\"\n current_binary_value = SPACE\n for mark_value, space_value, time in frame_values:\n if mark_value > space_value * threshold:\n current_binary_value = MARK\n if space_value > mark_value * threshold:\n current_binary_value = SPACE\n yield current_binary_value, time\n\n\ndef binary_values_to_bit_duration(binary_values):\n \"\"\"連続する0/1の長さを測る\"\"\"\n previous_binary_value = SPACE\n previous_time = 0\n current_binary_value = SPACE\n current_time = 0\n for binary_value, time in binary_values:\n current_binary_value = binary_value\n current_time = time\n if current_binary_value != previous_binary_value:\n yield previous_binary_value, current_time - previous_time\n previous_binary_value = current_binary_value\n previous_time = current_time\n yield current_binary_value, current_time - previous_time\n\n\ndef bit_duration_to_bit_values(bit_duration_values, baud_rate=45.45,\n minimum_bit_width=0.25):\n \"\"\"短すぎる値を無視したり長い値を1bitごとに分割したりする\"\"\"\n bit_duration = 1 / baud_rate\n minimum_duration = bit_duration * minimum_bit_width\n duration = 0\n for bit_value, original_duration in bit_duration_values:\n duration += original_duration\n while duration > minimum_duration:\n handle_duration = min(bit_duration, duration)\n width = handle_duration / bit_duration\n yield bit_value, width\n duration -= handle_duration\n\n\ndef bit_values_to_bit_chunks(bit_values, start_bit=SPACE, stop_bit=MARK,\n lsb_on_left=True):\n \"\"\"1bit ごとの値からデータビットを抽出する\n\n bit_index|ビットの役割\n ---------|----------\n 0 |スタートビット\n 1 |データビット\n 2 |データビット\n 3 |データビット\n 4 |データビット\n 5 |データビット\n 6 |ストップビット\n\n bit_index が 1-5の範囲のみを出力する\n \"\"\"\n previous_bit_value = start_bit\n bit_index = None\n chunk = []\n for current_bit_value, _ in bit_values:\n if bit_index is None:\n if (previous_bit_value == stop_bit and current_bit_value ==\n start_bit):\n bit_index = 0\n else:\n bit_index += 1\n if bit_index <= 5:\n if lsb_on_left:\n chunk.append(current_bit_value)\n else:\n chunk.insert(0, current_bit_value)\n else:\n if bit_index == 6:\n yield ''.join(str(bit) for bit in chunk)\n chunk.clear()\n if (previous_bit_value == stop_bit and current_bit_value ==\n start_bit):\n bit_index = 0\n previous_bit_value = current_bit_value\n",
"step-5": "\nSPACE = 0\nMARK = 1\n\ndef frame_to_bit_chunks(frame_values, baud_rate=45.45, start_bit=SPACE, stop_bit=MARK):\n \"\"\"フレームごとの信号強度からデータビットのまとまりに変換する\"\"\"\n\n binary_values = frame_to_binary_values(frame_values)\n bit_duration_values = binary_values_to_bit_duration(binary_values)\n bit_values = bit_duration_to_bit_values(bit_duration_values, baud_rate)\n bit_chunks = bit_values_to_bit_chunks(bit_values, start_bit, stop_bit)\n\n return bit_chunks\n\n\ndef frame_to_binary_values(frame_values, threshold=1.0):\n \"\"\"フレームごとの信号強度から0/1を判定する\"\"\"\n\n # ヒステリシスを持たせるときの前の状態\n current_binary_value = SPACE\n for mark_value, space_value, time in frame_values:\n # mark の強度が space の強度の threshold 倍を越えていれば mark と判断する\n if mark_value > space_value * threshold:\n current_binary_value = MARK\n # space の強度が mark の強度の threshold 倍を越えていれば space と判断する\n if space_value > mark_value * threshold:\n current_binary_value = SPACE\n yield (current_binary_value, time)\n\n\ndef binary_values_to_bit_duration(binary_values):\n \"\"\"連続する0/1の長さを測る\"\"\"\n\n # 前の値\n previous_binary_value = SPACE\n # 前の値に変化した経過時間\n previous_time = 0\n # 今の値\n current_binary_value = SPACE\n # 今の値に変化した経過時間\n current_time = 0\n for binary_value, time in binary_values:\n # 今の値を代入する\n current_binary_value = binary_value\n current_time = time\n # 前と値が変わっていれば、前の値とその長さを出力する\n if current_binary_value != previous_binary_value:\n yield (previous_binary_value, current_time - previous_time)\n # 今の値を前の値に代入する\n previous_binary_value = current_binary_value\n previous_time = current_time\n\n # ループ内では最後の値は出力されないので、ここで出力する\n yield (current_binary_value, current_time - previous_time)\n\n\ndef bit_duration_to_bit_values(bit_duration_values, baud_rate=45.45, minimum_bit_width=0.25):\n \"\"\"短すぎる値を無視したり長い値を1bitごとに分割したりする\"\"\"\n\n # 1bit あたりの時間(秒)\n bit_duration = 1 / baud_rate\n # 基準(minimum_bit_width) bit あたりの時間(秒)\n minimum_duration = bit_duration * minimum_bit_width\n # 最後に出力してからの経過時間\n duration = 0\n for bit_value, original_duration in bit_duration_values:\n # 次の値を読んで、経過時間を足す\n duration += original_duration\n while duration > minimum_duration:\n # 今の値の経過時間が基準を超えている間繰り返す\n handle_duration = min(bit_duration, duration)\n width = handle_duration / bit_duration\n yield (bit_value, width)\n # 出力した分だけ経過時間を減らす\n duration -= handle_duration\n\n\ndef bit_values_to_bit_chunks(bit_values, start_bit=SPACE, stop_bit=MARK, lsb_on_left=True):\n \"\"\"1bit ごとの値からデータビットを抽出する\n\n bit_index|ビットの役割\n ---------|----------\n 0 |スタートビット\n 1 |データビット\n 2 |データビット\n 3 |データビット\n 4 |データビット\n 5 |データビット\n 6 |ストップビット\n\n bit_index が 1-5の範囲のみを出力する\n \"\"\"\n # 前のデータ とりあえずスタートビットとしておく\n previous_bit_value = start_bit\n # データビットの何番目を処理しているかを数えておく\n # はじめはどのタイミングか分からないので None にしておく\n bit_index = None\n # データビットを貯める\n chunk = []\n\n for current_bit_value, _ in bit_values:\n if bit_index is None:\n # 初期状態、まだデータのタイミングが分かっていない\n if previous_bit_value == stop_bit and current_bit_value == start_bit:\n # 1つ目のストップビット→スタートビットの遷移を検出\n # タイミングが決まる\n bit_index = 0\n else:\n # データのタイミングが分かっている\n # 次のビットを読む\n bit_index += 1\n if bit_index <= 5:\n # 5個目まではデータビットなので読む\n # この if はデータビットの順番が 12345 か 54321 のどちらにも対応するためのもの\n if lsb_on_left:\n # list への append は最後に追加する\n chunk.append(current_bit_value)\n else:\n # list への insert(0) は最初に追加する\n chunk.insert(0, current_bit_value)\n else:\n # データビットが終わった\n if bit_index == 6:\n # ストップビットが来るはず あんまり気にしないで貯めたデータを出力する\n yield ''.join(str(bit) for bit in chunk)\n # データを空にしておく\n chunk.clear()\n if previous_bit_value == stop_bit and current_bit_value == start_bit:\n # スタートビットが来たので状態をリセットする\n bit_index = 0\n previous_bit_value = current_bit_value\n\n",
"step-ids": [
0,
3,
4,
5,
7
]
}
|
[
0,
3,
4,
5,
7
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Player:
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
@property
def right(self):
return self.pos.sub(Vector(Player.RADIUS, 0))
@property
def left(self):
return self.pos.add(Vector(Player.RADIUS, 0))
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def set(self, v):
self.old_pos = self.pos
self.pos = v
self.paint()
def move(self, v: Vector):
self.set(self.pos.add(v))
<|reserved_special_token_0|>
def get_ball(self, ball):
if self.team == 1:
ball.set(self.right)
elif self.team == 2:
ball.set(self.left)
def paint(self):
if self.shape is None:
self.shape = self.canvas.create_rectangle(-Player.RADIUS, -
Player.RADIUS, Player.RADIUS, Player.RADIUS, outline=Player
.OUTLINE, fill=self.color)
delta = self.pos.sub(self.old_pos)
self.canvas.move(self.shape, delta.x, delta.y)
<|reserved_special_token_0|>
def ball_hit_test(self, ball: Ball) ->bool:
return self.rectangle().hit(ball.pos)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Player:
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
@property
def right(self):
return self.pos.sub(Vector(Player.RADIUS, 0))
@property
def left(self):
return self.pos.add(Vector(Player.RADIUS, 0))
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def set(self, v):
self.old_pos = self.pos
self.pos = v
self.paint()
def move(self, v: Vector):
self.set(self.pos.add(v))
def move_to_point(self, point: Vector):
v = randint(1, 10) / 10
self.move(point.sub(self.pos).norm().mul(Vector(v, v)))
def get_ball(self, ball):
if self.team == 1:
ball.set(self.right)
elif self.team == 2:
ball.set(self.left)
def paint(self):
if self.shape is None:
self.shape = self.canvas.create_rectangle(-Player.RADIUS, -
Player.RADIUS, Player.RADIUS, Player.RADIUS, outline=Player
.OUTLINE, fill=self.color)
delta = self.pos.sub(self.old_pos)
self.canvas.move(self.shape, delta.x, delta.y)
<|reserved_special_token_0|>
def ball_hit_test(self, ball: Ball) ->bool:
return self.rectangle().hit(ball.pos)
<|reserved_special_token_1|>
from random import randint
from Ball import Ball
from Util import Vector, Rectangle
class Player:
RADIUS = 10
COLOR1 = '#80d6ff'
COLOR2 = '#ff867c'
OUTLINE = '#000000'
@property
def right(self):
return self.pos.sub(Vector(Player.RADIUS, 0))
@property
def left(self):
return self.pos.add(Vector(Player.RADIUS, 0))
@property
def color(self):
if self.team == 1:
return Player.COLOR1
elif self.team == 2:
return Player.COLOR2
def __init__(self, canvas, team):
self.canvas = canvas
self.team = team
self.pos = Vector(0, 0)
self.old_pos = Vector(0, 0)
self.shape = None
def set(self, v):
self.old_pos = self.pos
self.pos = v
self.paint()
def move(self, v: Vector):
self.set(self.pos.add(v))
def move_to_point(self, point: Vector):
v = randint(1, 10) / 10
self.move(point.sub(self.pos).norm().mul(Vector(v, v)))
def get_ball(self, ball):
if self.team == 1:
ball.set(self.right)
elif self.team == 2:
ball.set(self.left)
def paint(self):
if self.shape is None:
self.shape = self.canvas.create_rectangle(-Player.RADIUS, -
Player.RADIUS, Player.RADIUS, Player.RADIUS, outline=Player
.OUTLINE, fill=self.color)
delta = self.pos.sub(self.old_pos)
self.canvas.move(self.shape, delta.x, delta.y)
def rectangle(self) ->Rectangle:
return self.pos.rect(Player.RADIUS)
def ball_hit_test(self, ball: Ball) ->bool:
return self.rectangle().hit(ball.pos)
<|reserved_special_token_1|>
from random import randint
from Ball import Ball
from Util import Vector, Rectangle
class Player:
RADIUS = 10
COLOR1 = "#80d6ff"
COLOR2 = "#ff867c"
OUTLINE = "#000000"
@property
def right(self):
return self.pos.sub(Vector(Player.RADIUS, 0))
@property
def left(self):
return self.pos.add(Vector(Player.RADIUS, 0))
@property
def color(self):
if self.team == 1:
return Player.COLOR1
elif self.team == 2:
return Player.COLOR2
def __init__(self, canvas, team):
self.canvas = canvas
self.team = team
self.pos = Vector(0, 0)
self.old_pos = Vector(0, 0)
self.shape = None
def set(self, v):
self.old_pos = self.pos
self.pos = v
self.paint()
def move(self, v: Vector):
self.set(self.pos.add(v))
def move_to_point(self, point: Vector):
v = randint(1, 10) / 10
self.move(point.sub(self.pos).norm().mul(Vector(v, v)))
def get_ball(self, ball):
if self.team == 1:
ball.set(self.right)
elif self.team == 2:
ball.set(self.left)
def paint(self):
if self.shape is None:
self.shape = self.canvas.create_rectangle(-Player.RADIUS, -Player.RADIUS, Player.RADIUS, Player.RADIUS,
outline=Player.OUTLINE, fill=self.color)
delta = self.pos.sub(self.old_pos)
self.canvas.move(self.shape, delta.x, delta.y)
def rectangle(self) -> Rectangle:
return self.pos.rect(Player.RADIUS)
def ball_hit_test(self, ball: Ball) -> bool:
return self.rectangle().hit(ball.pos)
|
flexible
|
{
"blob_id": "04b02931b749ad06a512b78ca5661ae1f5cb8a9c",
"index": 5534,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Player:\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n @property\n def right(self):\n return self.pos.sub(Vector(Player.RADIUS, 0))\n\n @property\n def left(self):\n return self.pos.add(Vector(Player.RADIUS, 0))\n <mask token>\n <mask token>\n\n def set(self, v):\n self.old_pos = self.pos\n self.pos = v\n self.paint()\n\n def move(self, v: Vector):\n self.set(self.pos.add(v))\n <mask token>\n\n def get_ball(self, ball):\n if self.team == 1:\n ball.set(self.right)\n elif self.team == 2:\n ball.set(self.left)\n\n def paint(self):\n if self.shape is None:\n self.shape = self.canvas.create_rectangle(-Player.RADIUS, -\n Player.RADIUS, Player.RADIUS, Player.RADIUS, outline=Player\n .OUTLINE, fill=self.color)\n delta = self.pos.sub(self.old_pos)\n self.canvas.move(self.shape, delta.x, delta.y)\n <mask token>\n\n def ball_hit_test(self, ball: Ball) ->bool:\n return self.rectangle().hit(ball.pos)\n",
"step-3": "<mask token>\n\n\nclass Player:\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n @property\n def right(self):\n return self.pos.sub(Vector(Player.RADIUS, 0))\n\n @property\n def left(self):\n return self.pos.add(Vector(Player.RADIUS, 0))\n <mask token>\n <mask token>\n\n def set(self, v):\n self.old_pos = self.pos\n self.pos = v\n self.paint()\n\n def move(self, v: Vector):\n self.set(self.pos.add(v))\n\n def move_to_point(self, point: Vector):\n v = randint(1, 10) / 10\n self.move(point.sub(self.pos).norm().mul(Vector(v, v)))\n\n def get_ball(self, ball):\n if self.team == 1:\n ball.set(self.right)\n elif self.team == 2:\n ball.set(self.left)\n\n def paint(self):\n if self.shape is None:\n self.shape = self.canvas.create_rectangle(-Player.RADIUS, -\n Player.RADIUS, Player.RADIUS, Player.RADIUS, outline=Player\n .OUTLINE, fill=self.color)\n delta = self.pos.sub(self.old_pos)\n self.canvas.move(self.shape, delta.x, delta.y)\n <mask token>\n\n def ball_hit_test(self, ball: Ball) ->bool:\n return self.rectangle().hit(ball.pos)\n",
"step-4": "from random import randint\nfrom Ball import Ball\nfrom Util import Vector, Rectangle\n\n\nclass Player:\n RADIUS = 10\n COLOR1 = '#80d6ff'\n COLOR2 = '#ff867c'\n OUTLINE = '#000000'\n\n @property\n def right(self):\n return self.pos.sub(Vector(Player.RADIUS, 0))\n\n @property\n def left(self):\n return self.pos.add(Vector(Player.RADIUS, 0))\n\n @property\n def color(self):\n if self.team == 1:\n return Player.COLOR1\n elif self.team == 2:\n return Player.COLOR2\n\n def __init__(self, canvas, team):\n self.canvas = canvas\n self.team = team\n self.pos = Vector(0, 0)\n self.old_pos = Vector(0, 0)\n self.shape = None\n\n def set(self, v):\n self.old_pos = self.pos\n self.pos = v\n self.paint()\n\n def move(self, v: Vector):\n self.set(self.pos.add(v))\n\n def move_to_point(self, point: Vector):\n v = randint(1, 10) / 10\n self.move(point.sub(self.pos).norm().mul(Vector(v, v)))\n\n def get_ball(self, ball):\n if self.team == 1:\n ball.set(self.right)\n elif self.team == 2:\n ball.set(self.left)\n\n def paint(self):\n if self.shape is None:\n self.shape = self.canvas.create_rectangle(-Player.RADIUS, -\n Player.RADIUS, Player.RADIUS, Player.RADIUS, outline=Player\n .OUTLINE, fill=self.color)\n delta = self.pos.sub(self.old_pos)\n self.canvas.move(self.shape, delta.x, delta.y)\n\n def rectangle(self) ->Rectangle:\n return self.pos.rect(Player.RADIUS)\n\n def ball_hit_test(self, ball: Ball) ->bool:\n return self.rectangle().hit(ball.pos)\n",
"step-5": "from random import randint\n\nfrom Ball import Ball\nfrom Util import Vector, Rectangle\n\n\nclass Player:\n RADIUS = 10\n\n COLOR1 = \"#80d6ff\"\n COLOR2 = \"#ff867c\"\n OUTLINE = \"#000000\"\n\n @property\n def right(self):\n return self.pos.sub(Vector(Player.RADIUS, 0))\n\n @property\n def left(self):\n return self.pos.add(Vector(Player.RADIUS, 0))\n\n @property\n def color(self):\n if self.team == 1:\n return Player.COLOR1\n elif self.team == 2:\n return Player.COLOR2\n\n def __init__(self, canvas, team):\n self.canvas = canvas\n self.team = team\n self.pos = Vector(0, 0)\n self.old_pos = Vector(0, 0)\n self.shape = None\n\n def set(self, v):\n self.old_pos = self.pos\n self.pos = v\n self.paint()\n\n def move(self, v: Vector):\n self.set(self.pos.add(v))\n\n def move_to_point(self, point: Vector):\n v = randint(1, 10) / 10\n self.move(point.sub(self.pos).norm().mul(Vector(v, v)))\n\n def get_ball(self, ball):\n if self.team == 1:\n ball.set(self.right)\n elif self.team == 2:\n ball.set(self.left)\n\n def paint(self):\n if self.shape is None:\n self.shape = self.canvas.create_rectangle(-Player.RADIUS, -Player.RADIUS, Player.RADIUS, Player.RADIUS,\n outline=Player.OUTLINE, fill=self.color)\n delta = self.pos.sub(self.old_pos)\n self.canvas.move(self.shape, delta.x, delta.y)\n\n def rectangle(self) -> Rectangle:\n return self.pos.rect(Player.RADIUS)\n\n def ball_hit_test(self, ball: Ball) -> bool:\n return self.rectangle().hit(ball.pos)\n",
"step-ids": [
0,
8,
9,
14,
15
]
}
|
[
0,
8,
9,
14,
15
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
while i <= lowerlimit:
print(i, '*', tablenumber, '=', i * tablenumber)
i = i + 1
print('=======================================================')
<|reserved_special_token_0|>
for foreachnumber in range(upperlimit, lowerlimit + 1):
print(i, '*', tablenumber, '=', i * tablenumber)
print('=======================================================')
<|reserved_special_token_1|>
tablenumber = int(input('Enter a number: '))
upperlimit = int(input('Enter a upper limit: '))
lowerlimit = int(input('Enter a lower limit: '))
i = upperlimit
while i <= lowerlimit:
print(i, '*', tablenumber, '=', i * tablenumber)
i = i + 1
print('=======================================================')
tablenumber = int(input('Enter a number: '))
upperlimit = int(input('Enter a upper limit: '))
lowerlimit = int(input('Enter a lower limit: '))
for foreachnumber in range(upperlimit, lowerlimit + 1):
print(i, '*', tablenumber, '=', i * tablenumber)
print('=======================================================')
<|reserved_special_token_1|>
#Print table using while loop
tablenumber = int(input("Enter a number: "))
upperlimit = int(input("Enter a upper limit: "))
lowerlimit = int(input("Enter a lower limit: "))
i = upperlimit
while (i <= lowerlimit):
print (i,"*",tablenumber,"=",i*tablenumber)
i=i+1
print("=======================================================")
#Printing table using for loop
tablenumber = int(input("Enter a number: "))
upperlimit = int(input("Enter a upper limit: "))
lowerlimit = int(input("Enter a lower limit: "))
for foreachnumber in range(upperlimit, lowerlimit+1):
print (i,"*",tablenumber,"=",i*tablenumber)
print("=======================================================")
|
flexible
|
{
"blob_id": "e2c69191d81724cac44bebba3111a773e408b7c8",
"index": 639,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nwhile i <= lowerlimit:\n print(i, '*', tablenumber, '=', i * tablenumber)\n i = i + 1\nprint('=======================================================')\n<mask token>\nfor foreachnumber in range(upperlimit, lowerlimit + 1):\n print(i, '*', tablenumber, '=', i * tablenumber)\nprint('=======================================================')\n",
"step-3": "tablenumber = int(input('Enter a number: '))\nupperlimit = int(input('Enter a upper limit: '))\nlowerlimit = int(input('Enter a lower limit: '))\ni = upperlimit\nwhile i <= lowerlimit:\n print(i, '*', tablenumber, '=', i * tablenumber)\n i = i + 1\nprint('=======================================================')\ntablenumber = int(input('Enter a number: '))\nupperlimit = int(input('Enter a upper limit: '))\nlowerlimit = int(input('Enter a lower limit: '))\nfor foreachnumber in range(upperlimit, lowerlimit + 1):\n print(i, '*', tablenumber, '=', i * tablenumber)\nprint('=======================================================')\n",
"step-4": "#Print table using while loop\n\n\ntablenumber = int(input(\"Enter a number: \"))\nupperlimit = int(input(\"Enter a upper limit: \"))\nlowerlimit = int(input(\"Enter a lower limit: \"))\ni = upperlimit\nwhile (i <= lowerlimit):\n print (i,\"*\",tablenumber,\"=\",i*tablenumber)\n i=i+1\nprint(\"=======================================================\")\n#Printing table using for loop\n\ntablenumber = int(input(\"Enter a number: \"))\nupperlimit = int(input(\"Enter a upper limit: \"))\nlowerlimit = int(input(\"Enter a lower limit: \"))\nfor foreachnumber in range(upperlimit, lowerlimit+1):\n print (i,\"*\",tablenumber,\"=\",i*tablenumber)\nprint(\"=======================================================\")\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
import numpy as np
from sklearn.naive_bayes import BernoulliNB
X = np.array([[1, 2, 3, 3], [1, 3, 4, 4], [2, 4, 5, 5]])
y = np.array([1, 2, 3])
"""
alpha: 平滑系数
binarize: 将特征二值化的阈值
fit_prior: 使用数据拟合先验概率
"""
clf = BernoulliNB(alpha=2.0, binarize=3.0, fit_prior=True)
clf.fit(X, y)
print("class_prior:", clf.class_prior)
print("class_count_:", clf.class_count_) # 按类别顺序输出其对应个数
print("class_log_prior_:", clf.class_log_prior_) # 先验概率对数值
print("feature_count_:", clf.feature_count_) # 各类别个特征之和
print("n_features_:", clf.n_features_)
print("feature_log_prob_:", clf.feature_log_prob_) # 指定类的各特征的条件概率的对数
# 其他参数与方法与MultinomialNB类似
|
normal
|
{
"blob_id": "98a1fab8cee91f37ceee2cfd868d3a5756a055b0",
"index": 7628,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nclf.fit(X, y)\nprint('class_prior:', clf.class_prior)\nprint('class_count_:', clf.class_count_)\nprint('class_log_prior_:', clf.class_log_prior_)\nprint('feature_count_:', clf.feature_count_)\nprint('n_features_:', clf.n_features_)\nprint('feature_log_prob_:', clf.feature_log_prob_)\n",
"step-3": "<mask token>\nX = np.array([[1, 2, 3, 3], [1, 3, 4, 4], [2, 4, 5, 5]])\ny = np.array([1, 2, 3])\n<mask token>\nclf = BernoulliNB(alpha=2.0, binarize=3.0, fit_prior=True)\nclf.fit(X, y)\nprint('class_prior:', clf.class_prior)\nprint('class_count_:', clf.class_count_)\nprint('class_log_prior_:', clf.class_log_prior_)\nprint('feature_count_:', clf.feature_count_)\nprint('n_features_:', clf.n_features_)\nprint('feature_log_prob_:', clf.feature_log_prob_)\n",
"step-4": "import numpy as np\nfrom sklearn.naive_bayes import BernoulliNB\nX = np.array([[1, 2, 3, 3], [1, 3, 4, 4], [2, 4, 5, 5]])\ny = np.array([1, 2, 3])\n<mask token>\nclf = BernoulliNB(alpha=2.0, binarize=3.0, fit_prior=True)\nclf.fit(X, y)\nprint('class_prior:', clf.class_prior)\nprint('class_count_:', clf.class_count_)\nprint('class_log_prior_:', clf.class_log_prior_)\nprint('feature_count_:', clf.feature_count_)\nprint('n_features_:', clf.n_features_)\nprint('feature_log_prob_:', clf.feature_log_prob_)\n",
"step-5": "import numpy as np\n\nfrom sklearn.naive_bayes import BernoulliNB\n\nX = np.array([[1, 2, 3, 3], [1, 3, 4, 4], [2, 4, 5, 5]])\ny = np.array([1, 2, 3])\n\"\"\"\nalpha: 平滑系数\nbinarize: 将特征二值化的阈值\nfit_prior: 使用数据拟合先验概率\n\"\"\"\nclf = BernoulliNB(alpha=2.0, binarize=3.0, fit_prior=True)\nclf.fit(X, y)\nprint(\"class_prior:\", clf.class_prior)\nprint(\"class_count_:\", clf.class_count_) # 按类别顺序输出其对应个数\nprint(\"class_log_prior_:\", clf.class_log_prior_) # 先验概率对数值\nprint(\"feature_count_:\", clf.feature_count_) # 各类别个特征之和\nprint(\"n_features_:\", clf.n_features_)\nprint(\"feature_log_prob_:\", clf.feature_log_prob_) # 指定类的各特征的条件概率的对数\n# 其他参数与方法与MultinomialNB类似\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
import cv2
import pdb
import skvideo
import numpy as np
import pandas as pd
from tqdm import tqdm
from harp import fdops
from word2number import w2n
from harp.vid import VidReader
class RegPropData:
"""
Processes region proposal data.
"""
_df = None
props = None
"""Dictionary containing region proposal data properties """
def __init__(self, csv_path):
"""
Initialize a region proposal data instance.
Parameters
----------
csv_path : str
Path to csv file containing proposal information.
Note
----
It is assumed that the directory containing the proposals
csv file has `properties_session.cv` file. This file should
contain information about current session.
"""
# Checking files
fdops.check_if_file_exists(csv_path)
# loading proposal data as a data frame
self._df = pd.read_csv(csv_path)
# Dictionary containing proposal properties
self.props = self._get_properties(csv_path)
def _get_properties(self, csv_path):
"""
Creates a dictionary containing properties of proposal
data.
Parameters
----------
csv_path : str
Path to csv file containing proposal information
"""
props = {}
# File properties
loc, fname, ext = fdops.get_loc_name_ext(csv_path)
props['loc'] = loc
props['name'] = fname
props['ext'] = ext
# Video properties
props['W'] = self._df['W'].unique().item()
props['H'] = self._df['H'].unique().item()
props['FPS'] = self._df['FPS'].unique().item()
props['dur'] = self._df['dur'].unique().item()
props['vname'] = self._get_video_name(fname)
# Proposal properties
props['num_props'] = self._get_num_proposals()
return props
def write_proposals_to_video(self, vdir, frms_per_sec=1.0):
""" Writes proposals to video.
Parameters
----------
vdir : str
Directory where we can find video.
frms_per_sec : float, default 1
A value of 0.5 means that we will skip
`FPS x 1/(frms_per_sec) = 60` frames
"""
# Input video
vid_name = self.props['vname']
vfpath = fdops.get_files_with_kws(vdir, [vid_name, ".mp4"])
if len(vfpath) > 1:
raise Exception(f"More than one video found\n\t{vfpath}")
vin = VidReader(vfpath[0])
# Output video
ovid_path = f"{self.props['loc']}/{self.props['name']}.mp4"
vw = skvideo.io.FFmpegWriter(
ovid_path,
outputdict={'-vcodec': 'libx264','-r':'30'}
)
# Calculate frame numbers(POC) that we will use.
f0_start = 0 # starting frame poc
f0_end = vin.props['num_frames'] - 1 # ending frame poc
f0_skip = vin.props['frame_rate']*(1/frms_per_sec)
f0s = list(range(f0_start, f0_end, int(f0_skip)))
# Loop over each frame number and draw proposal regions
# over them
for f0 in tqdm(f0s):
frm = vin.get_frame(f0, c='bgr')
# Get proposals for frame f0
props = self._get_proposals_for_frame(f0)
# Proposals looop
for p in props:
if len(p) > 0:
w0, h0, w, h = p
frame = cv2.rectangle(
frm, (w0, h0), (w0+w, h0+h), (0, 256, 0), 1
)
# Write frame to output
vw.writeFrame(cv2.cvtColor(frame, cv2.COLOR_BGR2RGB))
vw.close()
vin.release()
import sys; sys.exit()
def _get_proposals_for_frame(self, fn):
"""
Returns a list of proposal regions
Parameters
----------
fn : int
Frame number
"""
# Get dataframe that contains f0. It should have only one row
tdf = self._df.copy() # lower bound
tdf['f1'] = (tdf['f0'] # creating column
+ tdf['f'] - 1) # with last frame
df = tdf[fn >= tdf['f0']]
df = df[fn <= df['f1']]
if len(df) == 0:
return []
if len(df) > 1:
pdb.set_trace()
raise Exception("USER_ERROR: proposals csv is fishy\n"
f"{df}")
# Proposal string to numpy array
prop_list = df['props'].item().split(":")
# Loop over bounding box list and create a numpy array
if len(prop_list) > 0:
props = []
for p in prop_list:
coords = p.split("-")
if len(coords) == 4:
props += [[int(x) for x in coords]]
return props
def _get_video_name(self, fname):
""" Returns video name by parsing csv file name
Parameters
----------
fname : str
Name of csv file having proposals
"""
csv_name_split = fname.split("_")
thirty_fps_loc = csv_name_split.index("30fps")
video_name = "_".join(csv_name_split[0:thirty_fps_loc+1])
return video_name
def _get_num_proposals(self):
""" Returns number of proposals.
"""
total_props = self._df['nprops'].sum()
return total_props
|
normal
|
{
"blob_id": "b10badc172be119be5b2ab8ccc32cc95a0ed1e7a",
"index": 2680,
"step-1": "<mask token>\n\n\nclass RegPropData:\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def __init__(self, csv_path):\n \"\"\"\n Initialize a region proposal data instance.\n\n Parameters\n ----------\n csv_path : str\n Path to csv file containing proposal information.\n\n Note\n ----\n It is assumed that the directory containing the proposals\n csv file has `properties_session.cv` file. This file should\n contain information about current session.\n \"\"\"\n fdops.check_if_file_exists(csv_path)\n self._df = pd.read_csv(csv_path)\n self.props = self._get_properties(csv_path)\n\n def _get_properties(self, csv_path):\n \"\"\"\n Creates a dictionary containing properties of proposal\n data.\n\n Parameters\n ----------\n csv_path : str\n Path to csv file containing proposal information\n \"\"\"\n props = {}\n loc, fname, ext = fdops.get_loc_name_ext(csv_path)\n props['loc'] = loc\n props['name'] = fname\n props['ext'] = ext\n props['W'] = self._df['W'].unique().item()\n props['H'] = self._df['H'].unique().item()\n props['FPS'] = self._df['FPS'].unique().item()\n props['dur'] = self._df['dur'].unique().item()\n props['vname'] = self._get_video_name(fname)\n props['num_props'] = self._get_num_proposals()\n return props\n\n def write_proposals_to_video(self, vdir, frms_per_sec=1.0):\n \"\"\" Writes proposals to video.\n\n Parameters\n ----------\n vdir : str\n Directory where we can find video.\n frms_per_sec : float, default 1\n A value of 0.5 means that we will skip\n `FPS x 1/(frms_per_sec) = 60` frames\n \"\"\"\n vid_name = self.props['vname']\n vfpath = fdops.get_files_with_kws(vdir, [vid_name, '.mp4'])\n if len(vfpath) > 1:\n raise Exception(f'More than one video found\\n\\t{vfpath}')\n vin = VidReader(vfpath[0])\n ovid_path = f\"{self.props['loc']}/{self.props['name']}.mp4\"\n vw = skvideo.io.FFmpegWriter(ovid_path, outputdict={'-vcodec':\n 'libx264', '-r': '30'})\n f0_start = 0\n f0_end = vin.props['num_frames'] - 1\n f0_skip = vin.props['frame_rate'] * (1 / frms_per_sec)\n f0s = list(range(f0_start, f0_end, int(f0_skip)))\n for f0 in tqdm(f0s):\n frm = vin.get_frame(f0, c='bgr')\n props = self._get_proposals_for_frame(f0)\n for p in props:\n if len(p) > 0:\n w0, h0, w, h = p\n frame = cv2.rectangle(frm, (w0, h0), (w0 + w, h0 + h),\n (0, 256, 0), 1)\n vw.writeFrame(cv2.cvtColor(frame, cv2.COLOR_BGR2RGB))\n vw.close()\n vin.release()\n import sys\n sys.exit()\n <mask token>\n\n def _get_video_name(self, fname):\n \"\"\" Returns video name by parsing csv file name\n\n Parameters\n ----------\n fname : str\n Name of csv file having proposals\n \"\"\"\n csv_name_split = fname.split('_')\n thirty_fps_loc = csv_name_split.index('30fps')\n video_name = '_'.join(csv_name_split[0:thirty_fps_loc + 1])\n return video_name\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass RegPropData:\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def __init__(self, csv_path):\n \"\"\"\n Initialize a region proposal data instance.\n\n Parameters\n ----------\n csv_path : str\n Path to csv file containing proposal information.\n\n Note\n ----\n It is assumed that the directory containing the proposals\n csv file has `properties_session.cv` file. This file should\n contain information about current session.\n \"\"\"\n fdops.check_if_file_exists(csv_path)\n self._df = pd.read_csv(csv_path)\n self.props = self._get_properties(csv_path)\n\n def _get_properties(self, csv_path):\n \"\"\"\n Creates a dictionary containing properties of proposal\n data.\n\n Parameters\n ----------\n csv_path : str\n Path to csv file containing proposal information\n \"\"\"\n props = {}\n loc, fname, ext = fdops.get_loc_name_ext(csv_path)\n props['loc'] = loc\n props['name'] = fname\n props['ext'] = ext\n props['W'] = self._df['W'].unique().item()\n props['H'] = self._df['H'].unique().item()\n props['FPS'] = self._df['FPS'].unique().item()\n props['dur'] = self._df['dur'].unique().item()\n props['vname'] = self._get_video_name(fname)\n props['num_props'] = self._get_num_proposals()\n return props\n\n def write_proposals_to_video(self, vdir, frms_per_sec=1.0):\n \"\"\" Writes proposals to video.\n\n Parameters\n ----------\n vdir : str\n Directory where we can find video.\n frms_per_sec : float, default 1\n A value of 0.5 means that we will skip\n `FPS x 1/(frms_per_sec) = 60` frames\n \"\"\"\n vid_name = self.props['vname']\n vfpath = fdops.get_files_with_kws(vdir, [vid_name, '.mp4'])\n if len(vfpath) > 1:\n raise Exception(f'More than one video found\\n\\t{vfpath}')\n vin = VidReader(vfpath[0])\n ovid_path = f\"{self.props['loc']}/{self.props['name']}.mp4\"\n vw = skvideo.io.FFmpegWriter(ovid_path, outputdict={'-vcodec':\n 'libx264', '-r': '30'})\n f0_start = 0\n f0_end = vin.props['num_frames'] - 1\n f0_skip = vin.props['frame_rate'] * (1 / frms_per_sec)\n f0s = list(range(f0_start, f0_end, int(f0_skip)))\n for f0 in tqdm(f0s):\n frm = vin.get_frame(f0, c='bgr')\n props = self._get_proposals_for_frame(f0)\n for p in props:\n if len(p) > 0:\n w0, h0, w, h = p\n frame = cv2.rectangle(frm, (w0, h0), (w0 + w, h0 + h),\n (0, 256, 0), 1)\n vw.writeFrame(cv2.cvtColor(frame, cv2.COLOR_BGR2RGB))\n vw.close()\n vin.release()\n import sys\n sys.exit()\n\n def _get_proposals_for_frame(self, fn):\n \"\"\"\n Returns a list of proposal regions\n\n Parameters\n ----------\n fn : int\n Frame number\n \"\"\"\n tdf = self._df.copy()\n tdf['f1'] = tdf['f0'] + tdf['f'] - 1\n df = tdf[fn >= tdf['f0']]\n df = df[fn <= df['f1']]\n if len(df) == 0:\n return []\n if len(df) > 1:\n pdb.set_trace()\n raise Exception(f'USER_ERROR: proposals csv is fishy\\n{df}')\n prop_list = df['props'].item().split(':')\n if len(prop_list) > 0:\n props = []\n for p in prop_list:\n coords = p.split('-')\n if len(coords) == 4:\n props += [[int(x) for x in coords]]\n return props\n\n def _get_video_name(self, fname):\n \"\"\" Returns video name by parsing csv file name\n\n Parameters\n ----------\n fname : str\n Name of csv file having proposals\n \"\"\"\n csv_name_split = fname.split('_')\n thirty_fps_loc = csv_name_split.index('30fps')\n video_name = '_'.join(csv_name_split[0:thirty_fps_loc + 1])\n return video_name\n\n def _get_num_proposals(self):\n \"\"\" Returns number of proposals.\n \"\"\"\n total_props = self._df['nprops'].sum()\n return total_props\n",
"step-3": "<mask token>\n\n\nclass RegPropData:\n <mask token>\n _df = None\n props = None\n <mask token>\n\n def __init__(self, csv_path):\n \"\"\"\n Initialize a region proposal data instance.\n\n Parameters\n ----------\n csv_path : str\n Path to csv file containing proposal information.\n\n Note\n ----\n It is assumed that the directory containing the proposals\n csv file has `properties_session.cv` file. This file should\n contain information about current session.\n \"\"\"\n fdops.check_if_file_exists(csv_path)\n self._df = pd.read_csv(csv_path)\n self.props = self._get_properties(csv_path)\n\n def _get_properties(self, csv_path):\n \"\"\"\n Creates a dictionary containing properties of proposal\n data.\n\n Parameters\n ----------\n csv_path : str\n Path to csv file containing proposal information\n \"\"\"\n props = {}\n loc, fname, ext = fdops.get_loc_name_ext(csv_path)\n props['loc'] = loc\n props['name'] = fname\n props['ext'] = ext\n props['W'] = self._df['W'].unique().item()\n props['H'] = self._df['H'].unique().item()\n props['FPS'] = self._df['FPS'].unique().item()\n props['dur'] = self._df['dur'].unique().item()\n props['vname'] = self._get_video_name(fname)\n props['num_props'] = self._get_num_proposals()\n return props\n\n def write_proposals_to_video(self, vdir, frms_per_sec=1.0):\n \"\"\" Writes proposals to video.\n\n Parameters\n ----------\n vdir : str\n Directory where we can find video.\n frms_per_sec : float, default 1\n A value of 0.5 means that we will skip\n `FPS x 1/(frms_per_sec) = 60` frames\n \"\"\"\n vid_name = self.props['vname']\n vfpath = fdops.get_files_with_kws(vdir, [vid_name, '.mp4'])\n if len(vfpath) > 1:\n raise Exception(f'More than one video found\\n\\t{vfpath}')\n vin = VidReader(vfpath[0])\n ovid_path = f\"{self.props['loc']}/{self.props['name']}.mp4\"\n vw = skvideo.io.FFmpegWriter(ovid_path, outputdict={'-vcodec':\n 'libx264', '-r': '30'})\n f0_start = 0\n f0_end = vin.props['num_frames'] - 1\n f0_skip = vin.props['frame_rate'] * (1 / frms_per_sec)\n f0s = list(range(f0_start, f0_end, int(f0_skip)))\n for f0 in tqdm(f0s):\n frm = vin.get_frame(f0, c='bgr')\n props = self._get_proposals_for_frame(f0)\n for p in props:\n if len(p) > 0:\n w0, h0, w, h = p\n frame = cv2.rectangle(frm, (w0, h0), (w0 + w, h0 + h),\n (0, 256, 0), 1)\n vw.writeFrame(cv2.cvtColor(frame, cv2.COLOR_BGR2RGB))\n vw.close()\n vin.release()\n import sys\n sys.exit()\n\n def _get_proposals_for_frame(self, fn):\n \"\"\"\n Returns a list of proposal regions\n\n Parameters\n ----------\n fn : int\n Frame number\n \"\"\"\n tdf = self._df.copy()\n tdf['f1'] = tdf['f0'] + tdf['f'] - 1\n df = tdf[fn >= tdf['f0']]\n df = df[fn <= df['f1']]\n if len(df) == 0:\n return []\n if len(df) > 1:\n pdb.set_trace()\n raise Exception(f'USER_ERROR: proposals csv is fishy\\n{df}')\n prop_list = df['props'].item().split(':')\n if len(prop_list) > 0:\n props = []\n for p in prop_list:\n coords = p.split('-')\n if len(coords) == 4:\n props += [[int(x) for x in coords]]\n return props\n\n def _get_video_name(self, fname):\n \"\"\" Returns video name by parsing csv file name\n\n Parameters\n ----------\n fname : str\n Name of csv file having proposals\n \"\"\"\n csv_name_split = fname.split('_')\n thirty_fps_loc = csv_name_split.index('30fps')\n video_name = '_'.join(csv_name_split[0:thirty_fps_loc + 1])\n return video_name\n\n def _get_num_proposals(self):\n \"\"\" Returns number of proposals.\n \"\"\"\n total_props = self._df['nprops'].sum()\n return total_props\n",
"step-4": "<mask token>\n\n\nclass RegPropData:\n \"\"\"\n Processes region proposal data.\n \"\"\"\n _df = None\n props = None\n \"\"\"Dictionary containing region proposal data properties \"\"\"\n\n def __init__(self, csv_path):\n \"\"\"\n Initialize a region proposal data instance.\n\n Parameters\n ----------\n csv_path : str\n Path to csv file containing proposal information.\n\n Note\n ----\n It is assumed that the directory containing the proposals\n csv file has `properties_session.cv` file. This file should\n contain information about current session.\n \"\"\"\n fdops.check_if_file_exists(csv_path)\n self._df = pd.read_csv(csv_path)\n self.props = self._get_properties(csv_path)\n\n def _get_properties(self, csv_path):\n \"\"\"\n Creates a dictionary containing properties of proposal\n data.\n\n Parameters\n ----------\n csv_path : str\n Path to csv file containing proposal information\n \"\"\"\n props = {}\n loc, fname, ext = fdops.get_loc_name_ext(csv_path)\n props['loc'] = loc\n props['name'] = fname\n props['ext'] = ext\n props['W'] = self._df['W'].unique().item()\n props['H'] = self._df['H'].unique().item()\n props['FPS'] = self._df['FPS'].unique().item()\n props['dur'] = self._df['dur'].unique().item()\n props['vname'] = self._get_video_name(fname)\n props['num_props'] = self._get_num_proposals()\n return props\n\n def write_proposals_to_video(self, vdir, frms_per_sec=1.0):\n \"\"\" Writes proposals to video.\n\n Parameters\n ----------\n vdir : str\n Directory where we can find video.\n frms_per_sec : float, default 1\n A value of 0.5 means that we will skip\n `FPS x 1/(frms_per_sec) = 60` frames\n \"\"\"\n vid_name = self.props['vname']\n vfpath = fdops.get_files_with_kws(vdir, [vid_name, '.mp4'])\n if len(vfpath) > 1:\n raise Exception(f'More than one video found\\n\\t{vfpath}')\n vin = VidReader(vfpath[0])\n ovid_path = f\"{self.props['loc']}/{self.props['name']}.mp4\"\n vw = skvideo.io.FFmpegWriter(ovid_path, outputdict={'-vcodec':\n 'libx264', '-r': '30'})\n f0_start = 0\n f0_end = vin.props['num_frames'] - 1\n f0_skip = vin.props['frame_rate'] * (1 / frms_per_sec)\n f0s = list(range(f0_start, f0_end, int(f0_skip)))\n for f0 in tqdm(f0s):\n frm = vin.get_frame(f0, c='bgr')\n props = self._get_proposals_for_frame(f0)\n for p in props:\n if len(p) > 0:\n w0, h0, w, h = p\n frame = cv2.rectangle(frm, (w0, h0), (w0 + w, h0 + h),\n (0, 256, 0), 1)\n vw.writeFrame(cv2.cvtColor(frame, cv2.COLOR_BGR2RGB))\n vw.close()\n vin.release()\n import sys\n sys.exit()\n\n def _get_proposals_for_frame(self, fn):\n \"\"\"\n Returns a list of proposal regions\n\n Parameters\n ----------\n fn : int\n Frame number\n \"\"\"\n tdf = self._df.copy()\n tdf['f1'] = tdf['f0'] + tdf['f'] - 1\n df = tdf[fn >= tdf['f0']]\n df = df[fn <= df['f1']]\n if len(df) == 0:\n return []\n if len(df) > 1:\n pdb.set_trace()\n raise Exception(f'USER_ERROR: proposals csv is fishy\\n{df}')\n prop_list = df['props'].item().split(':')\n if len(prop_list) > 0:\n props = []\n for p in prop_list:\n coords = p.split('-')\n if len(coords) == 4:\n props += [[int(x) for x in coords]]\n return props\n\n def _get_video_name(self, fname):\n \"\"\" Returns video name by parsing csv file name\n\n Parameters\n ----------\n fname : str\n Name of csv file having proposals\n \"\"\"\n csv_name_split = fname.split('_')\n thirty_fps_loc = csv_name_split.index('30fps')\n video_name = '_'.join(csv_name_split[0:thirty_fps_loc + 1])\n return video_name\n\n def _get_num_proposals(self):\n \"\"\" Returns number of proposals.\n \"\"\"\n total_props = self._df['nprops'].sum()\n return total_props\n",
"step-5": "import cv2\nimport pdb\nimport skvideo\nimport numpy as np\nimport pandas as pd\nfrom tqdm import tqdm\nfrom harp import fdops\nfrom word2number import w2n\nfrom harp.vid import VidReader\n\nclass RegPropData:\n \"\"\"\n Processes region proposal data.\n \"\"\"\n\n _df = None\n\n props = None\n \"\"\"Dictionary containing region proposal data properties \"\"\"\n\n def __init__(self, csv_path):\n \"\"\"\n Initialize a region proposal data instance.\n\n Parameters\n ----------\n csv_path : str\n Path to csv file containing proposal information.\n\n Note\n ----\n It is assumed that the directory containing the proposals\n csv file has `properties_session.cv` file. This file should\n contain information about current session.\n \"\"\"\n # Checking files\n fdops.check_if_file_exists(csv_path)\n\n # loading proposal data as a data frame\n self._df = pd.read_csv(csv_path)\n\n # Dictionary containing proposal properties\n self.props = self._get_properties(csv_path)\n\n def _get_properties(self, csv_path):\n \"\"\"\n Creates a dictionary containing properties of proposal\n data.\n\n Parameters\n ----------\n csv_path : str\n Path to csv file containing proposal information\n \"\"\"\n props = {}\n\n # File properties\n loc, fname, ext = fdops.get_loc_name_ext(csv_path)\n props['loc'] = loc\n props['name'] = fname\n props['ext'] = ext\n\n # Video properties\n props['W'] = self._df['W'].unique().item()\n props['H'] = self._df['H'].unique().item()\n props['FPS'] = self._df['FPS'].unique().item()\n props['dur'] = self._df['dur'].unique().item()\n props['vname'] = self._get_video_name(fname)\n\n # Proposal properties\n props['num_props'] = self._get_num_proposals()\n\n return props\n\n def write_proposals_to_video(self, vdir, frms_per_sec=1.0):\n \"\"\" Writes proposals to video.\n\n Parameters\n ----------\n vdir : str\n Directory where we can find video.\n frms_per_sec : float, default 1\n A value of 0.5 means that we will skip\n `FPS x 1/(frms_per_sec) = 60` frames\n \"\"\"\n # Input video\n vid_name = self.props['vname']\n vfpath = fdops.get_files_with_kws(vdir, [vid_name, \".mp4\"])\n if len(vfpath) > 1:\n raise Exception(f\"More than one video found\\n\\t{vfpath}\")\n vin = VidReader(vfpath[0])\n\n # Output video\n ovid_path = f\"{self.props['loc']}/{self.props['name']}.mp4\"\n vw = skvideo.io.FFmpegWriter(\n ovid_path,\n outputdict={'-vcodec': 'libx264','-r':'30'}\n )\n\n # Calculate frame numbers(POC) that we will use.\n f0_start = 0 # starting frame poc\n f0_end = vin.props['num_frames'] - 1 # ending frame poc\n f0_skip = vin.props['frame_rate']*(1/frms_per_sec)\n f0s = list(range(f0_start, f0_end, int(f0_skip)))\n\n # Loop over each frame number and draw proposal regions\n # over them\n for f0 in tqdm(f0s):\n frm = vin.get_frame(f0, c='bgr')\n\n # Get proposals for frame f0\n props = self._get_proposals_for_frame(f0)\n\n # Proposals looop\n for p in props:\n if len(p) > 0:\n w0, h0, w, h = p\n frame = cv2.rectangle(\n frm, (w0, h0), (w0+w, h0+h), (0, 256, 0), 1\n )\n # Write frame to output\n vw.writeFrame(cv2.cvtColor(frame, cv2.COLOR_BGR2RGB))\n\n vw.close()\n vin.release()\n import sys; sys.exit()\n\n def _get_proposals_for_frame(self, fn):\n \"\"\"\n Returns a list of proposal regions\n\n Parameters\n ----------\n fn : int\n Frame number\n \"\"\"\n\n # Get dataframe that contains f0. It should have only one row\n tdf = self._df.copy() # lower bound\n tdf['f1'] = (tdf['f0'] # creating column\n + tdf['f'] - 1) # with last frame\n df = tdf[fn >= tdf['f0']]\n df = df[fn <= df['f1']]\n if len(df) == 0:\n return []\n if len(df) > 1:\n pdb.set_trace()\n raise Exception(\"USER_ERROR: proposals csv is fishy\\n\"\n f\"{df}\")\n\n # Proposal string to numpy array\n prop_list = df['props'].item().split(\":\")\n\n # Loop over bounding box list and create a numpy array\n if len(prop_list) > 0:\n props = []\n for p in prop_list:\n coords = p.split(\"-\")\n if len(coords) == 4:\n props += [[int(x) for x in coords]]\n return props\n\n def _get_video_name(self, fname):\n \"\"\" Returns video name by parsing csv file name\n\n Parameters\n ----------\n fname : str\n Name of csv file having proposals\n \"\"\"\n csv_name_split = fname.split(\"_\")\n thirty_fps_loc = csv_name_split.index(\"30fps\")\n video_name = \"_\".join(csv_name_split[0:thirty_fps_loc+1])\n return video_name\n\n def _get_num_proposals(self):\n \"\"\" Returns number of proposals.\n \"\"\"\n total_props = self._df['nprops'].sum()\n return total_props\n",
"step-ids": [
5,
7,
8,
9,
11
]
}
|
[
5,
7,
8,
9,
11
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
print('\n')
if BMI < 18.5:
print(Name, ', your BMI calculation is ', format(BMI, '.1f'),
', which indicates your weight category is underweight.', sep='')
elif BMI < 24.9:
print(Name, ', your BMI calculation is ', format(BMI, '.1f'),
', which indicates your weight category is ideal.', sep='')
elif BMI < 29.9:
print(Name, ', your BMI calculation is ', format(BMI, '.1f'),
', which indicates your weight category is overweight.', sep='')
else:
print(Name, ', your BMI calculation is ', format(BMI, '.1f'),
', which indicates your weight category is obese.', sep='')
<|reserved_special_token_1|>
Name = input('Enter your full name: ')
Weight = float(input('Enter your weight in pounds: '))
Height = float(input('Enter your height in inches: '))
BMI = Weight * 703 / Height ** 2
print('\n')
if BMI < 18.5:
print(Name, ', your BMI calculation is ', format(BMI, '.1f'),
', which indicates your weight category is underweight.', sep='')
elif BMI < 24.9:
print(Name, ', your BMI calculation is ', format(BMI, '.1f'),
', which indicates your weight category is ideal.', sep='')
elif BMI < 29.9:
print(Name, ', your BMI calculation is ', format(BMI, '.1f'),
', which indicates your weight category is overweight.', sep='')
else:
print(Name, ', your BMI calculation is ', format(BMI, '.1f'),
', which indicates your weight category is obese.', sep='')
<|reserved_special_token_1|>
# aylat
# This program will calculate an individual's body mass index (BMI),
# based on their height and their weight
# Prompt user to input information
Name = input('Enter your full name: ')
Weight = float(input('Enter your weight in pounds: '))
Height = float(input('Enter your height in inches: '))
# Perform BMI calculation, based on user input
BMI = Weight * 703 / Height**2
# Use an if/elif structure to determine the user's weight category, based on BMI
print('\n')
if BMI < 18.5:
print(Name, ", your BMI calculation is ", format(BMI, '.1f'),
", which indicates your weight category is underweight.", sep='')
elif BMI < 24.9:
print(Name, ", your BMI calculation is ", format(BMI, '.1f'),
", which indicates your weight category is ideal.", sep='')
elif BMI < 29.9:
print(Name, ", your BMI calculation is ", format(BMI, '.1f'),
", which indicates your weight category is overweight.", sep='')
else:
print(Name, ", your BMI calculation is ", format(BMI, '.1f'),
", which indicates your weight category is obese.", sep='')
|
flexible
|
{
"blob_id": "8b009451e9f65ef12e5db1321a9d5347ef7fd756",
"index": 9593,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint('\\n')\nif BMI < 18.5:\n print(Name, ', your BMI calculation is ', format(BMI, '.1f'),\n ', which indicates your weight category is underweight.', sep='')\nelif BMI < 24.9:\n print(Name, ', your BMI calculation is ', format(BMI, '.1f'),\n ', which indicates your weight category is ideal.', sep='')\nelif BMI < 29.9:\n print(Name, ', your BMI calculation is ', format(BMI, '.1f'),\n ', which indicates your weight category is overweight.', sep='')\nelse:\n print(Name, ', your BMI calculation is ', format(BMI, '.1f'),\n ', which indicates your weight category is obese.', sep='')\n",
"step-3": "Name = input('Enter your full name: ')\nWeight = float(input('Enter your weight in pounds: '))\nHeight = float(input('Enter your height in inches: '))\nBMI = Weight * 703 / Height ** 2\nprint('\\n')\nif BMI < 18.5:\n print(Name, ', your BMI calculation is ', format(BMI, '.1f'),\n ', which indicates your weight category is underweight.', sep='')\nelif BMI < 24.9:\n print(Name, ', your BMI calculation is ', format(BMI, '.1f'),\n ', which indicates your weight category is ideal.', sep='')\nelif BMI < 29.9:\n print(Name, ', your BMI calculation is ', format(BMI, '.1f'),\n ', which indicates your weight category is overweight.', sep='')\nelse:\n print(Name, ', your BMI calculation is ', format(BMI, '.1f'),\n ', which indicates your weight category is obese.', sep='')\n",
"step-4": "# aylat\n\n# This program will calculate an individual's body mass index (BMI), \n# based on their height and their weight\n\n# Prompt user to input information\nName = input('Enter your full name: ')\nWeight = float(input('Enter your weight in pounds: '))\nHeight = float(input('Enter your height in inches: '))\n\n# Perform BMI calculation, based on user input\nBMI = Weight * 703 / Height**2\n\n# Use an if/elif structure to determine the user's weight category, based on BMI\nprint('\\n')\n\nif BMI < 18.5: \n print(Name, \", your BMI calculation is \", format(BMI, '.1f'),\n \", which indicates your weight category is underweight.\", sep='')\n \nelif BMI < 24.9:\n print(Name, \", your BMI calculation is \", format(BMI, '.1f'),\n \", which indicates your weight category is ideal.\", sep='')\n \nelif BMI < 29.9:\n print(Name, \", your BMI calculation is \", format(BMI, '.1f'),\n \", which indicates your weight category is overweight.\", sep='')\n \nelse:\n print(Name, \", your BMI calculation is \", format(BMI, '.1f'),\n \", which indicates your weight category is obese.\", sep='')\n\n\n \n \n \n \n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
import datetime
from threading import Thread
import cv2
class WebcamVideoStream:
#Constructor
def __init__(self, src=0):
# initialize the video camera stream and read the first frame
# from the stream
self.stream = cv2.VideoCapture(src)
(self.grabbed, self.frame) = self.stream.read()
# initialize the variable used to indicate if the thread should be stopped
self.stopped = False
def start(self):
# start the thread to read frames from the video stream
# calling update method causes the method to be placed in separate thread from main script - hence better FPS!
Thread(target=self.update, args=()).start()
return self
def update(self):
# keep looping infinitely until the thread is stopped
while True:
# if the thread indicator variable is set, stop the thread
if self.stopped:
print("returning")
cv2.destroyAllWindows()
return
# otherwise, read the next frame from the stream
print("before")
(self.grabbed, self.frame) = self.stream.read()
print("after")
def read(self):
# return the frame most recently read
print("in read func func")
return self.frame
def stop(self):
# indicate that the thread should be stopped
print("Stop in thread!")
self.stopped = True
|
normal
|
{
"blob_id": "8a4fe88bfa39eeeda42198260a1b22621c33183e",
"index": 7894,
"step-1": "<mask token>\n\n\nclass WebcamVideoStream:\n <mask token>\n\n def start(self):\n Thread(target=self.update, args=()).start()\n return self\n\n def update(self):\n while True:\n if self.stopped:\n print('returning')\n cv2.destroyAllWindows()\n return\n print('before')\n self.grabbed, self.frame = self.stream.read()\n print('after')\n <mask token>\n\n def stop(self):\n print('Stop in thread!')\n self.stopped = True\n",
"step-2": "<mask token>\n\n\nclass WebcamVideoStream:\n\n def __init__(self, src=0):\n self.stream = cv2.VideoCapture(src)\n self.grabbed, self.frame = self.stream.read()\n self.stopped = False\n\n def start(self):\n Thread(target=self.update, args=()).start()\n return self\n\n def update(self):\n while True:\n if self.stopped:\n print('returning')\n cv2.destroyAllWindows()\n return\n print('before')\n self.grabbed, self.frame = self.stream.read()\n print('after')\n <mask token>\n\n def stop(self):\n print('Stop in thread!')\n self.stopped = True\n",
"step-3": "<mask token>\n\n\nclass WebcamVideoStream:\n\n def __init__(self, src=0):\n self.stream = cv2.VideoCapture(src)\n self.grabbed, self.frame = self.stream.read()\n self.stopped = False\n\n def start(self):\n Thread(target=self.update, args=()).start()\n return self\n\n def update(self):\n while True:\n if self.stopped:\n print('returning')\n cv2.destroyAllWindows()\n return\n print('before')\n self.grabbed, self.frame = self.stream.read()\n print('after')\n\n def read(self):\n print('in read func func')\n return self.frame\n\n def stop(self):\n print('Stop in thread!')\n self.stopped = True\n",
"step-4": "import datetime\nfrom threading import Thread\nimport cv2\n\n\nclass WebcamVideoStream:\n\n def __init__(self, src=0):\n self.stream = cv2.VideoCapture(src)\n self.grabbed, self.frame = self.stream.read()\n self.stopped = False\n\n def start(self):\n Thread(target=self.update, args=()).start()\n return self\n\n def update(self):\n while True:\n if self.stopped:\n print('returning')\n cv2.destroyAllWindows()\n return\n print('before')\n self.grabbed, self.frame = self.stream.read()\n print('after')\n\n def read(self):\n print('in read func func')\n return self.frame\n\n def stop(self):\n print('Stop in thread!')\n self.stopped = True\n",
"step-5": "import datetime\nfrom threading import Thread\nimport cv2\n\nclass WebcamVideoStream:\n\n #Constructor\n\tdef __init__(self, src=0):\n\t\t# initialize the video camera stream and read the first frame\n\t\t# from the stream\n\t\tself.stream = cv2.VideoCapture(src)\n\t\t(self.grabbed, self.frame) = self.stream.read()\n\t\t# initialize the variable used to indicate if the thread should be stopped\n\t\tself.stopped = False\n\n\tdef start(self):\n\t\t# start the thread to read frames from the video stream\n\t # calling update method causes the method to be placed in separate thread from main script - hence better FPS!\n\t\tThread(target=self.update, args=()).start()\n\t\treturn self\n\n\tdef update(self):\n\t\t# keep looping infinitely until the thread is stopped\n\t\twhile True:\n\t\t\t# if the thread indicator variable is set, stop the thread\n\t\t\tif self.stopped:\n\t\t\t\tprint(\"returning\")\n\t\t\t\tcv2.destroyAllWindows()\n\t\t\t\treturn\n\t\t\t# otherwise, read the next frame from the stream\n\t\t\tprint(\"before\")\n\t\t\t(self.grabbed, self.frame) = self.stream.read()\n\t\t\tprint(\"after\")\n\tdef read(self):\n\t\t# return the frame most recently read\n\t\tprint(\"in read func func\")\n\t\treturn self.frame\n\tdef stop(self):\n\t\t# indicate that the thread should be stopped\n\t\tprint(\"Stop in thread!\")\n\t\tself.stopped = True\n",
"step-ids": [
4,
5,
6,
7,
8
]
}
|
[
4,
5,
6,
7,
8
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def test_colorization_net():
model_cfg = dict(type='ColorizationNet', input_nc=4, output_nc=2,
norm_type='batch')
model = MODELS.build(model_cfg)
assert model.__class__.__name__ == 'ColorizationNet'
input_A = torch.rand(1, 1, 256, 256)
input_B = torch.rand(1, 2, 256, 256)
mask_B = torch.rand(1, 1, 256, 256)
target_shape = 1, 2, 256, 256
out_class, out_reg, feature_map = model(input_A, input_B, mask_B)
assert isinstance(feature_map, dict)
assert feature_map['conv1_2'].shape == (1, 64, 256, 256) and feature_map[
'out_reg'].shape == target_shape
if torch.cuda.is_available():
model = model.cuda()
input_A = input_A.cuda()
input_B = input_B.cuda()
mask_B = mask_B.cuda()
out_class, out_reg, feature_map = model(input_A, input_B, mask_B)
assert isinstance(feature_map, dict)
for item in feature_map.keys():
assert torch.is_tensor(feature_map[item])
<|reserved_special_token_1|>
import torch
from mmagic.registry import MODELS
def test_colorization_net():
model_cfg = dict(type='ColorizationNet', input_nc=4, output_nc=2,
norm_type='batch')
model = MODELS.build(model_cfg)
assert model.__class__.__name__ == 'ColorizationNet'
input_A = torch.rand(1, 1, 256, 256)
input_B = torch.rand(1, 2, 256, 256)
mask_B = torch.rand(1, 1, 256, 256)
target_shape = 1, 2, 256, 256
out_class, out_reg, feature_map = model(input_A, input_B, mask_B)
assert isinstance(feature_map, dict)
assert feature_map['conv1_2'].shape == (1, 64, 256, 256) and feature_map[
'out_reg'].shape == target_shape
if torch.cuda.is_available():
model = model.cuda()
input_A = input_A.cuda()
input_B = input_B.cuda()
mask_B = mask_B.cuda()
out_class, out_reg, feature_map = model(input_A, input_B, mask_B)
assert isinstance(feature_map, dict)
for item in feature_map.keys():
assert torch.is_tensor(feature_map[item])
<|reserved_special_token_1|>
# Copyright (c) OpenMMLab. All rights reserved.
import torch
from mmagic.registry import MODELS
def test_colorization_net():
model_cfg = dict(
type='ColorizationNet', input_nc=4, output_nc=2, norm_type='batch')
# build model
model = MODELS.build(model_cfg)
# test attributes
assert model.__class__.__name__ == 'ColorizationNet'
# prepare data
input_A = torch.rand(1, 1, 256, 256)
input_B = torch.rand(1, 2, 256, 256)
mask_B = torch.rand(1, 1, 256, 256)
target_shape = (1, 2, 256, 256)
# test on cpu
(out_class, out_reg, feature_map) = model(input_A, input_B, mask_B)
assert isinstance(feature_map, dict)
assert feature_map['conv1_2'].shape == (1, 64, 256, 256) \
and feature_map['out_reg'].shape == target_shape
# test on gpu
if torch.cuda.is_available():
model = model.cuda()
input_A = input_A.cuda()
input_B = input_B.cuda()
mask_B = mask_B.cuda()
(out_class, out_reg, feature_map) = \
model(input_A, input_B, mask_B)
assert isinstance(feature_map, dict)
for item in feature_map.keys():
assert torch.is_tensor(feature_map[item])
|
flexible
|
{
"blob_id": "94be205e516c1f1248b6028419c04c927236596e",
"index": 618,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef test_colorization_net():\n model_cfg = dict(type='ColorizationNet', input_nc=4, output_nc=2,\n norm_type='batch')\n model = MODELS.build(model_cfg)\n assert model.__class__.__name__ == 'ColorizationNet'\n input_A = torch.rand(1, 1, 256, 256)\n input_B = torch.rand(1, 2, 256, 256)\n mask_B = torch.rand(1, 1, 256, 256)\n target_shape = 1, 2, 256, 256\n out_class, out_reg, feature_map = model(input_A, input_B, mask_B)\n assert isinstance(feature_map, dict)\n assert feature_map['conv1_2'].shape == (1, 64, 256, 256) and feature_map[\n 'out_reg'].shape == target_shape\n if torch.cuda.is_available():\n model = model.cuda()\n input_A = input_A.cuda()\n input_B = input_B.cuda()\n mask_B = mask_B.cuda()\n out_class, out_reg, feature_map = model(input_A, input_B, mask_B)\n assert isinstance(feature_map, dict)\n for item in feature_map.keys():\n assert torch.is_tensor(feature_map[item])\n",
"step-3": "import torch\nfrom mmagic.registry import MODELS\n\n\ndef test_colorization_net():\n model_cfg = dict(type='ColorizationNet', input_nc=4, output_nc=2,\n norm_type='batch')\n model = MODELS.build(model_cfg)\n assert model.__class__.__name__ == 'ColorizationNet'\n input_A = torch.rand(1, 1, 256, 256)\n input_B = torch.rand(1, 2, 256, 256)\n mask_B = torch.rand(1, 1, 256, 256)\n target_shape = 1, 2, 256, 256\n out_class, out_reg, feature_map = model(input_A, input_B, mask_B)\n assert isinstance(feature_map, dict)\n assert feature_map['conv1_2'].shape == (1, 64, 256, 256) and feature_map[\n 'out_reg'].shape == target_shape\n if torch.cuda.is_available():\n model = model.cuda()\n input_A = input_A.cuda()\n input_B = input_B.cuda()\n mask_B = mask_B.cuda()\n out_class, out_reg, feature_map = model(input_A, input_B, mask_B)\n assert isinstance(feature_map, dict)\n for item in feature_map.keys():\n assert torch.is_tensor(feature_map[item])\n",
"step-4": "# Copyright (c) OpenMMLab. All rights reserved.\nimport torch\n\nfrom mmagic.registry import MODELS\n\n\ndef test_colorization_net():\n\n model_cfg = dict(\n type='ColorizationNet', input_nc=4, output_nc=2, norm_type='batch')\n\n # build model\n model = MODELS.build(model_cfg)\n\n # test attributes\n assert model.__class__.__name__ == 'ColorizationNet'\n\n # prepare data\n input_A = torch.rand(1, 1, 256, 256)\n input_B = torch.rand(1, 2, 256, 256)\n mask_B = torch.rand(1, 1, 256, 256)\n\n target_shape = (1, 2, 256, 256)\n\n # test on cpu\n (out_class, out_reg, feature_map) = model(input_A, input_B, mask_B)\n assert isinstance(feature_map, dict)\n assert feature_map['conv1_2'].shape == (1, 64, 256, 256) \\\n and feature_map['out_reg'].shape == target_shape\n\n # test on gpu\n if torch.cuda.is_available():\n model = model.cuda()\n input_A = input_A.cuda()\n input_B = input_B.cuda()\n mask_B = mask_B.cuda()\n (out_class, out_reg, feature_map) = \\\n model(input_A, input_B, mask_B)\n\n assert isinstance(feature_map, dict)\n for item in feature_map.keys():\n assert torch.is_tensor(feature_map[item])\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
def is_balanced(tree_root):
if tree_root is None:
return True
nodeQ = [(tree_root, 0)]
depths = []
while len(nodeQ):
last_node, depth = nodeQ.pop()
if not last_node.left and not last_node.right:
if depth not in depths:
depths.append(depth)
if len(depths) > 1 and max(depths) - min(depths) > 1:
return False
else:
if last_node.left:
nodeQ.append((last_node.left, depth + 1))
if last_node.right:
nodeQ.append((last_node.right, depth + 1))
return True
<|reserved_special_token_1|>
def is_balanced(tree_root):
# Determine if the tree is superbalanced
if tree_root is None:
return True
nodeQ = [(tree_root, 0)]
depths = []
while len(nodeQ):
last_node, depth = nodeQ.pop()
if( not last_node.left ) and (not last_node.right ):
if depth not in depths:
depths.append(depth)
if ((len(depths) > 1) and (max(depths) - min(depths) > 1)):
return False
else:
if(last_node.left):
nodeQ.append((last_node.left, depth + 1))
if(last_node.right):
nodeQ.append((last_node.right, depth + 1))
return True
# store node pointer and depth as tuples
# pop together and store in variables node, depth
# append node.right, node.left
# put in while loop until list is empty
|
flexible
|
{
"blob_id": "833c8234d829dfa1937392f0ad4952aeffa4e26d",
"index": 1150,
"step-1": "<mask token>\n",
"step-2": "def is_balanced(tree_root):\n if tree_root is None:\n return True\n nodeQ = [(tree_root, 0)]\n depths = []\n while len(nodeQ):\n last_node, depth = nodeQ.pop()\n if not last_node.left and not last_node.right:\n if depth not in depths:\n depths.append(depth)\n if len(depths) > 1 and max(depths) - min(depths) > 1:\n return False\n else:\n if last_node.left:\n nodeQ.append((last_node.left, depth + 1))\n if last_node.right:\n nodeQ.append((last_node.right, depth + 1))\n return True\n",
"step-3": "def is_balanced(tree_root):\n # Determine if the tree is superbalanced\n \n if tree_root is None:\n return True\n \n nodeQ = [(tree_root, 0)]\n depths = []\n \n while len(nodeQ):\n \n last_node, depth = nodeQ.pop()\n \n if( not last_node.left ) and (not last_node.right ):\n if depth not in depths:\n depths.append(depth)\n \n if ((len(depths) > 1) and (max(depths) - min(depths) > 1)):\n return False\n else:\n \n if(last_node.left):\n nodeQ.append((last_node.left, depth + 1))\n if(last_node.right):\n nodeQ.append((last_node.right, depth + 1))\n \n return True\n \n \n# store node pointer and depth as tuples\n# pop together and store in variables node, depth\n# append node.right, node.left\n# put in while loop until list is empty\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
from multiprocessing import Process, Pipe
from time import sleep
from os import getpid
def ponger(pipe, response):
while True:
msg = pipe.recv()
print(f"{getpid()} receiving: {msg}")
sleep(1)
pipe.send(response)
if __name__ == '__main__':
ping_conn, pong_conn = Pipe()
Process(target=ponger, args=(ping_conn, 'ping')).start()
Process(target=ponger, args=(pong_conn, 'pong')).start()
ping_conn.send('ping')
|
normal
|
{
"blob_id": "aac9960dafc9e8d3a5670251fcc54eb8e34d4458",
"index": 9282,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef ponger(pipe, response):\n while True:\n msg = pipe.recv()\n print(f'{getpid()} receiving: {msg}')\n sleep(1)\n pipe.send(response)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef ponger(pipe, response):\n while True:\n msg = pipe.recv()\n print(f'{getpid()} receiving: {msg}')\n sleep(1)\n pipe.send(response)\n\n\nif __name__ == '__main__':\n ping_conn, pong_conn = Pipe()\n Process(target=ponger, args=(ping_conn, 'ping')).start()\n Process(target=ponger, args=(pong_conn, 'pong')).start()\n ping_conn.send('ping')\n",
"step-4": "from multiprocessing import Process, Pipe\nfrom time import sleep\nfrom os import getpid\n\n\ndef ponger(pipe, response):\n while True:\n msg = pipe.recv()\n print(f'{getpid()} receiving: {msg}')\n sleep(1)\n pipe.send(response)\n\n\nif __name__ == '__main__':\n ping_conn, pong_conn = Pipe()\n Process(target=ponger, args=(ping_conn, 'ping')).start()\n Process(target=ponger, args=(pong_conn, 'pong')).start()\n ping_conn.send('ping')\n",
"step-5": "from multiprocessing import Process, Pipe\nfrom time import sleep\nfrom os import getpid\n\n\ndef ponger(pipe, response):\n while True:\n msg = pipe.recv()\n print(f\"{getpid()} receiving: {msg}\")\n sleep(1)\n pipe.send(response)\n\n\nif __name__ == '__main__':\n ping_conn, pong_conn = Pipe()\n\n Process(target=ponger, args=(ping_conn, 'ping')).start()\n Process(target=ponger, args=(pong_conn, 'pong')).start()\n\n ping_conn.send('ping')\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
from urllib.request import urlopen
from bs4 import BeautifulSoup
<|reserved_special_token_1|>
#!/C:\Program Files (x86)\Python35-32
#importar librarias necesarias
from urllib.request import urlopen
from bs4 import BeautifulSoup
|
flexible
|
{
"blob_id": "7a59c8c883a9aaa723175783e01aa62e23503fde",
"index": 376,
"step-1": "<mask token>\n",
"step-2": "from urllib.request import urlopen\nfrom bs4 import BeautifulSoup\n",
"step-3": "#!/C:\\Program Files (x86)\\Python35-32\n\n#importar librarias necesarias\nfrom urllib.request import urlopen\nfrom bs4 import BeautifulSoup\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.