code
stringlengths 13
6.09M
| order_type
stringclasses 2
values | original_example
dict | step_ids
listlengths 1
5
|
|---|---|---|---|
import numpy as np
from random import randint
def combinacaoDeEmbralhamento(qtdeLinhas):
while True:
a = randint(0,qtdeLinhas)
b = randint(0,qtdeLinhas)
if a == b :
continue
else:
break
resp = [[a,b]]
return resp
def embaralhaMatriz(x):
for i in range(qtdeLinhas):
print(i)
combinacaoDeEmbralhamento(x.shape[0])
x[[1,0]] = x[[0,1]]
# MAIN
A = np.zeros(shape=(5,2))
print(A)
print("-------------")
print(A.shape)
|
normal
|
{
"blob_id": "28ed494939d0928bf3ad4f07f58186374e925426",
"index": 7024,
"step-1": "<mask token>\n\n\ndef combinacaoDeEmbralhamento(qtdeLinhas):\n while True:\n a = randint(0, qtdeLinhas)\n b = randint(0, qtdeLinhas)\n if a == b:\n continue\n else:\n break\n resp = [[a, b]]\n return resp\n\n\ndef embaralhaMatriz(x):\n for i in range(qtdeLinhas):\n print(i)\n combinacaoDeEmbralhamento(x.shape[0])\n x[[1, 0]] = x[[0, 1]]\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef combinacaoDeEmbralhamento(qtdeLinhas):\n while True:\n a = randint(0, qtdeLinhas)\n b = randint(0, qtdeLinhas)\n if a == b:\n continue\n else:\n break\n resp = [[a, b]]\n return resp\n\n\ndef embaralhaMatriz(x):\n for i in range(qtdeLinhas):\n print(i)\n combinacaoDeEmbralhamento(x.shape[0])\n x[[1, 0]] = x[[0, 1]]\n\n\n<mask token>\nprint(A)\nprint('-------------')\nprint(A.shape)\n",
"step-3": "<mask token>\n\n\ndef combinacaoDeEmbralhamento(qtdeLinhas):\n while True:\n a = randint(0, qtdeLinhas)\n b = randint(0, qtdeLinhas)\n if a == b:\n continue\n else:\n break\n resp = [[a, b]]\n return resp\n\n\ndef embaralhaMatriz(x):\n for i in range(qtdeLinhas):\n print(i)\n combinacaoDeEmbralhamento(x.shape[0])\n x[[1, 0]] = x[[0, 1]]\n\n\nA = np.zeros(shape=(5, 2))\nprint(A)\nprint('-------------')\nprint(A.shape)\n",
"step-4": "import numpy as np\nfrom random import randint\n\n\ndef combinacaoDeEmbralhamento(qtdeLinhas):\n while True:\n a = randint(0, qtdeLinhas)\n b = randint(0, qtdeLinhas)\n if a == b:\n continue\n else:\n break\n resp = [[a, b]]\n return resp\n\n\ndef embaralhaMatriz(x):\n for i in range(qtdeLinhas):\n print(i)\n combinacaoDeEmbralhamento(x.shape[0])\n x[[1, 0]] = x[[0, 1]]\n\n\nA = np.zeros(shape=(5, 2))\nprint(A)\nprint('-------------')\nprint(A.shape)\n",
"step-5": "import numpy as np\nfrom random import randint\n\ndef combinacaoDeEmbralhamento(qtdeLinhas):\n\twhile True:\n\t\ta = randint(0,qtdeLinhas)\n\t\tb = randint(0,qtdeLinhas)\n\t\tif a == b :\n\t\t\tcontinue\n\t\telse:\n\t\t\tbreak\n\tresp = [[a,b]]\n\treturn resp\n\n\t\n\ndef embaralhaMatriz(x):\n\tfor i in range(qtdeLinhas):\n\t\tprint(i)\n\tcombinacaoDeEmbralhamento(x.shape[0])\n\tx[[1,0]] = x[[0,1]]\n\n\n# MAIN\n\nA = np.zeros(shape=(5,2))\n\nprint(A)\nprint(\"-------------\")\nprint(A.shape)",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
def pin():
print('wqeqwwqe')
from tkinter import *
from tkinter import messagebox
from PIL import Image
from PIL import ImageTk
window = Tk() #创建一个窗口
window.title('爱你吆') #定义窗口标题
window.geometry('400x400+800+200') #定义窗口大小 窗口显示位置
# window.protocol('WM_DELETE_WINDOW', pin) #摧毁窗口,引到另一个函数命令
window.protocol('WM_DELETE_WINDOW')
##############
label = Label(window, text='hey,小姐姐', font=("微软雅黑", 15))
# text 窗口文本 font 设置字体 fg设置字体颜色
label.grid(row=10, column=10) # 网格布局 显示位置
################# 人
window=mainloop()
|
normal
|
{
"blob_id": "55c9fe8caf1983f22d5a752574f590fa129e8017",
"index": 1443,
"step-1": "def pin():\n print('wqeqwwqe')\n\n\n<mask token>\n",
"step-2": "def pin():\n print('wqeqwwqe')\n\n\n<mask token>\nwindow.title('爱你吆')\nwindow.geometry('400x400+800+200')\nwindow.protocol('WM_DELETE_WINDOW')\n<mask token>\nlabel.grid(row=10, column=10)\n<mask token>\n",
"step-3": "def pin():\n print('wqeqwwqe')\n\n\n<mask token>\nwindow = Tk()\nwindow.title('爱你吆')\nwindow.geometry('400x400+800+200')\nwindow.protocol('WM_DELETE_WINDOW')\nlabel = Label(window, text='hey,小姐姐', font=('微软雅黑', 15))\nlabel.grid(row=10, column=10)\nwindow = mainloop()\n",
"step-4": "def pin():\n print('wqeqwwqe')\n\n\nfrom tkinter import *\nfrom tkinter import messagebox\nfrom PIL import Image\nfrom PIL import ImageTk\nwindow = Tk()\nwindow.title('爱你吆')\nwindow.geometry('400x400+800+200')\nwindow.protocol('WM_DELETE_WINDOW')\nlabel = Label(window, text='hey,小姐姐', font=('微软雅黑', 15))\nlabel.grid(row=10, column=10)\nwindow = mainloop()\n",
"step-5": "def pin():\n print('wqeqwwqe')\n\n\n\nfrom tkinter import *\nfrom tkinter import messagebox\nfrom PIL import Image\nfrom PIL import ImageTk\nwindow = Tk() #创建一个窗口\nwindow.title('爱你吆') #定义窗口标题\nwindow.geometry('400x400+800+200') #定义窗口大小 窗口显示位置\n# window.protocol('WM_DELETE_WINDOW', pin) #摧毁窗口,引到另一个函数命令\nwindow.protocol('WM_DELETE_WINDOW')\n##############\nlabel = Label(window, text='hey,小姐姐', font=(\"微软雅黑\", 15))\n# text 窗口文本 font 设置字体 fg设置字体颜色\nlabel.grid(row=10, column=10) # 网格布局 显示位置\n################# 人\nwindow=mainloop()",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
renWin.SetMultiSamples(0)
renWin.AddRenderer(ren1)
renWin.AddRenderer(ren2)
<|reserved_special_token_0|>
iren.SetRenderWindow(renWin)
<|reserved_special_token_0|>
pl3d.SetXYZFileName('' + str(VTK_DATA_ROOT) + '/Data/combxyz.bin')
pl3d.SetQFileName('' + str(VTK_DATA_ROOT) + '/Data/combq.bin')
pl3d.SetScalarFunctionNumber(110)
pl3d.SetVectorFunctionNumber(202)
pl3d.Update()
<|reserved_special_token_0|>
probeLine.SetPoint1(1, 1, 29)
probeLine.SetPoint2(16.5, 5, 31.7693)
probeLine.SetResolution(500)
<|reserved_special_token_0|>
probe.SetInputConnection(probeLine.GetOutputPort())
probe.SetSourceData(output)
probe.Update()
<|reserved_special_token_0|>
probeTube.SetInputData(probe.GetPolyDataOutput())
probeTube.SetNumberOfSides(5)
probeTube.SetRadius(0.05)
<|reserved_special_token_0|>
probeMapper.SetInputConnection(probeTube.GetOutputPort())
probeMapper.SetScalarRange(output.GetScalarRange())
<|reserved_special_token_0|>
probeActor.SetMapper(probeMapper)
<|reserved_special_token_0|>
displayLine.SetPoint1(0, 0, 0)
displayLine.SetPoint2(1, 0, 0)
displayLine.SetResolution(probeLine.GetResolution())
<|reserved_special_token_0|>
displayMerge.SetGeometryConnection(displayLine.GetOutputPort())
displayMerge.SetScalarsData(probe.GetPolyDataOutput())
displayMerge.Update()
<|reserved_special_token_0|>
displayWarp.SetInputData(displayMerge.GetPolyDataOutput())
displayWarp.SetNormal(0, 1, 0)
displayWarp.SetScaleFactor(1e-06)
displayWarp.Update()
<|reserved_special_token_0|>
displayMapper.SetInputData(displayWarp.GetPolyDataOutput())
displayMapper.SetScalarRange(output.GetScalarRange())
<|reserved_special_token_0|>
displayActor.SetMapper(displayMapper)
<|reserved_special_token_0|>
outline.SetInputData(output)
<|reserved_special_token_0|>
outlineMapper.SetInputConnection(outline.GetOutputPort())
<|reserved_special_token_0|>
outlineActor.SetMapper(outlineMapper)
outlineActor.GetProperty().SetColor(0, 0, 0)
ren1.AddActor(outlineActor)
ren1.AddActor(probeActor)
ren1.SetBackground(1, 1, 1)
ren1.SetViewport(0, 0.25, 1, 1)
ren2.AddActor(displayActor)
ren2.SetBackground(0, 0, 0)
ren2.SetViewport(0, 0, 1, 0.25)
renWin.SetSize(300, 300)
ren1.ResetCamera()
<|reserved_special_token_0|>
cam1.SetClippingRange(3.95297, 50)
cam1.SetFocalPoint(8.88908, 0.595038, 29.3342)
cam1.SetPosition(9.9, -26, 41)
cam1.SetViewUp(0.060772, -0.319905, 0.945498)
ren2.ResetCamera()
<|reserved_special_token_0|>
cam2.ParallelProjectionOn()
cam2.SetParallelScale(0.15)
iren.Initialize()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
VTK_DATA_ROOT = vtkGetDataRoot()
ren1 = vtk.vtkRenderer()
ren2 = vtk.vtkRenderer()
renWin = vtk.vtkRenderWindow()
renWin.SetMultiSamples(0)
renWin.AddRenderer(ren1)
renWin.AddRenderer(ren2)
iren = vtk.vtkRenderWindowInteractor()
iren.SetRenderWindow(renWin)
pl3d = vtk.vtkMultiBlockPLOT3DReader()
pl3d.SetXYZFileName('' + str(VTK_DATA_ROOT) + '/Data/combxyz.bin')
pl3d.SetQFileName('' + str(VTK_DATA_ROOT) + '/Data/combq.bin')
pl3d.SetScalarFunctionNumber(110)
pl3d.SetVectorFunctionNumber(202)
pl3d.Update()
output = pl3d.GetOutput().GetBlock(0)
probeLine = vtk.vtkLineSource()
probeLine.SetPoint1(1, 1, 29)
probeLine.SetPoint2(16.5, 5, 31.7693)
probeLine.SetResolution(500)
probe = vtk.vtkProbeFilter()
probe.SetInputConnection(probeLine.GetOutputPort())
probe.SetSourceData(output)
probe.Update()
probeTube = vtk.vtkTubeFilter()
probeTube.SetInputData(probe.GetPolyDataOutput())
probeTube.SetNumberOfSides(5)
probeTube.SetRadius(0.05)
probeMapper = vtk.vtkPolyDataMapper()
probeMapper.SetInputConnection(probeTube.GetOutputPort())
probeMapper.SetScalarRange(output.GetScalarRange())
probeActor = vtk.vtkActor()
probeActor.SetMapper(probeMapper)
displayLine = vtk.vtkLineSource()
displayLine.SetPoint1(0, 0, 0)
displayLine.SetPoint2(1, 0, 0)
displayLine.SetResolution(probeLine.GetResolution())
displayMerge = vtk.vtkMergeFilter()
displayMerge.SetGeometryConnection(displayLine.GetOutputPort())
displayMerge.SetScalarsData(probe.GetPolyDataOutput())
displayMerge.Update()
displayWarp = vtk.vtkWarpScalar()
displayWarp.SetInputData(displayMerge.GetPolyDataOutput())
displayWarp.SetNormal(0, 1, 0)
displayWarp.SetScaleFactor(1e-06)
displayWarp.Update()
displayMapper = vtk.vtkPolyDataMapper()
displayMapper.SetInputData(displayWarp.GetPolyDataOutput())
displayMapper.SetScalarRange(output.GetScalarRange())
displayActor = vtk.vtkActor()
displayActor.SetMapper(displayMapper)
outline = vtk.vtkStructuredGridOutlineFilter()
outline.SetInputData(output)
outlineMapper = vtk.vtkPolyDataMapper()
outlineMapper.SetInputConnection(outline.GetOutputPort())
outlineActor = vtk.vtkActor()
outlineActor.SetMapper(outlineMapper)
outlineActor.GetProperty().SetColor(0, 0, 0)
ren1.AddActor(outlineActor)
ren1.AddActor(probeActor)
ren1.SetBackground(1, 1, 1)
ren1.SetViewport(0, 0.25, 1, 1)
ren2.AddActor(displayActor)
ren2.SetBackground(0, 0, 0)
ren2.SetViewport(0, 0, 1, 0.25)
renWin.SetSize(300, 300)
ren1.ResetCamera()
cam1 = ren1.GetActiveCamera()
cam1.SetClippingRange(3.95297, 50)
cam1.SetFocalPoint(8.88908, 0.595038, 29.3342)
cam1.SetPosition(9.9, -26, 41)
cam1.SetViewUp(0.060772, -0.319905, 0.945498)
ren2.ResetCamera()
cam2 = ren2.GetActiveCamera()
cam2.ParallelProjectionOn()
cam2.SetParallelScale(0.15)
iren.Initialize()
<|reserved_special_token_1|>
import vtk
from vtk.util.misc import vtkGetDataRoot
VTK_DATA_ROOT = vtkGetDataRoot()
ren1 = vtk.vtkRenderer()
ren2 = vtk.vtkRenderer()
renWin = vtk.vtkRenderWindow()
renWin.SetMultiSamples(0)
renWin.AddRenderer(ren1)
renWin.AddRenderer(ren2)
iren = vtk.vtkRenderWindowInteractor()
iren.SetRenderWindow(renWin)
pl3d = vtk.vtkMultiBlockPLOT3DReader()
pl3d.SetXYZFileName('' + str(VTK_DATA_ROOT) + '/Data/combxyz.bin')
pl3d.SetQFileName('' + str(VTK_DATA_ROOT) + '/Data/combq.bin')
pl3d.SetScalarFunctionNumber(110)
pl3d.SetVectorFunctionNumber(202)
pl3d.Update()
output = pl3d.GetOutput().GetBlock(0)
probeLine = vtk.vtkLineSource()
probeLine.SetPoint1(1, 1, 29)
probeLine.SetPoint2(16.5, 5, 31.7693)
probeLine.SetResolution(500)
probe = vtk.vtkProbeFilter()
probe.SetInputConnection(probeLine.GetOutputPort())
probe.SetSourceData(output)
probe.Update()
probeTube = vtk.vtkTubeFilter()
probeTube.SetInputData(probe.GetPolyDataOutput())
probeTube.SetNumberOfSides(5)
probeTube.SetRadius(0.05)
probeMapper = vtk.vtkPolyDataMapper()
probeMapper.SetInputConnection(probeTube.GetOutputPort())
probeMapper.SetScalarRange(output.GetScalarRange())
probeActor = vtk.vtkActor()
probeActor.SetMapper(probeMapper)
displayLine = vtk.vtkLineSource()
displayLine.SetPoint1(0, 0, 0)
displayLine.SetPoint2(1, 0, 0)
displayLine.SetResolution(probeLine.GetResolution())
displayMerge = vtk.vtkMergeFilter()
displayMerge.SetGeometryConnection(displayLine.GetOutputPort())
displayMerge.SetScalarsData(probe.GetPolyDataOutput())
displayMerge.Update()
displayWarp = vtk.vtkWarpScalar()
displayWarp.SetInputData(displayMerge.GetPolyDataOutput())
displayWarp.SetNormal(0, 1, 0)
displayWarp.SetScaleFactor(1e-06)
displayWarp.Update()
displayMapper = vtk.vtkPolyDataMapper()
displayMapper.SetInputData(displayWarp.GetPolyDataOutput())
displayMapper.SetScalarRange(output.GetScalarRange())
displayActor = vtk.vtkActor()
displayActor.SetMapper(displayMapper)
outline = vtk.vtkStructuredGridOutlineFilter()
outline.SetInputData(output)
outlineMapper = vtk.vtkPolyDataMapper()
outlineMapper.SetInputConnection(outline.GetOutputPort())
outlineActor = vtk.vtkActor()
outlineActor.SetMapper(outlineMapper)
outlineActor.GetProperty().SetColor(0, 0, 0)
ren1.AddActor(outlineActor)
ren1.AddActor(probeActor)
ren1.SetBackground(1, 1, 1)
ren1.SetViewport(0, 0.25, 1, 1)
ren2.AddActor(displayActor)
ren2.SetBackground(0, 0, 0)
ren2.SetViewport(0, 0, 1, 0.25)
renWin.SetSize(300, 300)
ren1.ResetCamera()
cam1 = ren1.GetActiveCamera()
cam1.SetClippingRange(3.95297, 50)
cam1.SetFocalPoint(8.88908, 0.595038, 29.3342)
cam1.SetPosition(9.9, -26, 41)
cam1.SetViewUp(0.060772, -0.319905, 0.945498)
ren2.ResetCamera()
cam2 = ren2.GetActiveCamera()
cam2.ParallelProjectionOn()
cam2.SetParallelScale(0.15)
iren.Initialize()
<|reserved_special_token_1|>
#!/usr/bin/env python
import vtk
from vtk.util.misc import vtkGetDataRoot
VTK_DATA_ROOT = vtkGetDataRoot()
# Create the RenderWindow, Renderer and both Actors
#
ren1 = vtk.vtkRenderer()
ren2 = vtk.vtkRenderer()
renWin = vtk.vtkRenderWindow()
renWin.SetMultiSamples(0)
renWin.AddRenderer(ren1)
renWin.AddRenderer(ren2)
iren = vtk.vtkRenderWindowInteractor()
iren.SetRenderWindow(renWin)
# create pipeline
#
pl3d = vtk.vtkMultiBlockPLOT3DReader()
pl3d.SetXYZFileName("" + str(VTK_DATA_ROOT) + "/Data/combxyz.bin")
pl3d.SetQFileName("" + str(VTK_DATA_ROOT) + "/Data/combq.bin")
pl3d.SetScalarFunctionNumber(110)
pl3d.SetVectorFunctionNumber(202)
pl3d.Update()
output = pl3d.GetOutput().GetBlock(0)
probeLine = vtk.vtkLineSource()
probeLine.SetPoint1(1,1,29)
probeLine.SetPoint2(16.5,5,31.7693)
probeLine.SetResolution(500)
probe = vtk.vtkProbeFilter()
probe.SetInputConnection(probeLine.GetOutputPort())
probe.SetSourceData(output)
probe.Update()
probeTube = vtk.vtkTubeFilter()
probeTube.SetInputData(probe.GetPolyDataOutput())
probeTube.SetNumberOfSides(5)
probeTube.SetRadius(.05)
probeMapper = vtk.vtkPolyDataMapper()
probeMapper.SetInputConnection(probeTube.GetOutputPort())
probeMapper.SetScalarRange(output.GetScalarRange())
probeActor = vtk.vtkActor()
probeActor.SetMapper(probeMapper)
displayLine = vtk.vtkLineSource()
displayLine.SetPoint1(0,0,0)
displayLine.SetPoint2(1,0,0)
displayLine.SetResolution(probeLine.GetResolution())
displayMerge = vtk.vtkMergeFilter()
displayMerge.SetGeometryConnection(displayLine.GetOutputPort())
displayMerge.SetScalarsData(probe.GetPolyDataOutput())
displayMerge.Update()
displayWarp = vtk.vtkWarpScalar()
displayWarp.SetInputData(displayMerge.GetPolyDataOutput())
displayWarp.SetNormal(0,1,0)
displayWarp.SetScaleFactor(.000001)
displayWarp.Update()
displayMapper = vtk.vtkPolyDataMapper()
displayMapper.SetInputData(displayWarp.GetPolyDataOutput())
displayMapper.SetScalarRange(output.GetScalarRange())
displayActor = vtk.vtkActor()
displayActor.SetMapper(displayMapper)
outline = vtk.vtkStructuredGridOutlineFilter()
outline.SetInputData(output)
outlineMapper = vtk.vtkPolyDataMapper()
outlineMapper.SetInputConnection(outline.GetOutputPort())
outlineActor = vtk.vtkActor()
outlineActor.SetMapper(outlineMapper)
outlineActor.GetProperty().SetColor(0,0,0)
ren1.AddActor(outlineActor)
ren1.AddActor(probeActor)
ren1.SetBackground(1,1,1)
ren1.SetViewport(0,.25,1,1)
ren2.AddActor(displayActor)
ren2.SetBackground(0,0,0)
ren2.SetViewport(0,0,1,.25)
renWin.SetSize(300,300)
ren1.ResetCamera()
cam1 = ren1.GetActiveCamera()
cam1.SetClippingRange(3.95297,50)
cam1.SetFocalPoint(8.88908,0.595038,29.3342)
cam1.SetPosition(9.9,-26,41)
cam1.SetViewUp(0.060772,-0.319905,0.945498)
ren2.ResetCamera()
cam2 = ren2.GetActiveCamera()
cam2.ParallelProjectionOn()
cam2.SetParallelScale(.15)
iren.Initialize()
# render the image
#
# prevent the tk window from showing up then start the event loop
# --- end of script --
|
flexible
|
{
"blob_id": "7399612f64eb8e500bc676e6d507be5fe375f40f",
"index": 3746,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nrenWin.SetMultiSamples(0)\nrenWin.AddRenderer(ren1)\nrenWin.AddRenderer(ren2)\n<mask token>\niren.SetRenderWindow(renWin)\n<mask token>\npl3d.SetXYZFileName('' + str(VTK_DATA_ROOT) + '/Data/combxyz.bin')\npl3d.SetQFileName('' + str(VTK_DATA_ROOT) + '/Data/combq.bin')\npl3d.SetScalarFunctionNumber(110)\npl3d.SetVectorFunctionNumber(202)\npl3d.Update()\n<mask token>\nprobeLine.SetPoint1(1, 1, 29)\nprobeLine.SetPoint2(16.5, 5, 31.7693)\nprobeLine.SetResolution(500)\n<mask token>\nprobe.SetInputConnection(probeLine.GetOutputPort())\nprobe.SetSourceData(output)\nprobe.Update()\n<mask token>\nprobeTube.SetInputData(probe.GetPolyDataOutput())\nprobeTube.SetNumberOfSides(5)\nprobeTube.SetRadius(0.05)\n<mask token>\nprobeMapper.SetInputConnection(probeTube.GetOutputPort())\nprobeMapper.SetScalarRange(output.GetScalarRange())\n<mask token>\nprobeActor.SetMapper(probeMapper)\n<mask token>\ndisplayLine.SetPoint1(0, 0, 0)\ndisplayLine.SetPoint2(1, 0, 0)\ndisplayLine.SetResolution(probeLine.GetResolution())\n<mask token>\ndisplayMerge.SetGeometryConnection(displayLine.GetOutputPort())\ndisplayMerge.SetScalarsData(probe.GetPolyDataOutput())\ndisplayMerge.Update()\n<mask token>\ndisplayWarp.SetInputData(displayMerge.GetPolyDataOutput())\ndisplayWarp.SetNormal(0, 1, 0)\ndisplayWarp.SetScaleFactor(1e-06)\ndisplayWarp.Update()\n<mask token>\ndisplayMapper.SetInputData(displayWarp.GetPolyDataOutput())\ndisplayMapper.SetScalarRange(output.GetScalarRange())\n<mask token>\ndisplayActor.SetMapper(displayMapper)\n<mask token>\noutline.SetInputData(output)\n<mask token>\noutlineMapper.SetInputConnection(outline.GetOutputPort())\n<mask token>\noutlineActor.SetMapper(outlineMapper)\noutlineActor.GetProperty().SetColor(0, 0, 0)\nren1.AddActor(outlineActor)\nren1.AddActor(probeActor)\nren1.SetBackground(1, 1, 1)\nren1.SetViewport(0, 0.25, 1, 1)\nren2.AddActor(displayActor)\nren2.SetBackground(0, 0, 0)\nren2.SetViewport(0, 0, 1, 0.25)\nrenWin.SetSize(300, 300)\nren1.ResetCamera()\n<mask token>\ncam1.SetClippingRange(3.95297, 50)\ncam1.SetFocalPoint(8.88908, 0.595038, 29.3342)\ncam1.SetPosition(9.9, -26, 41)\ncam1.SetViewUp(0.060772, -0.319905, 0.945498)\nren2.ResetCamera()\n<mask token>\ncam2.ParallelProjectionOn()\ncam2.SetParallelScale(0.15)\niren.Initialize()\n",
"step-3": "<mask token>\nVTK_DATA_ROOT = vtkGetDataRoot()\nren1 = vtk.vtkRenderer()\nren2 = vtk.vtkRenderer()\nrenWin = vtk.vtkRenderWindow()\nrenWin.SetMultiSamples(0)\nrenWin.AddRenderer(ren1)\nrenWin.AddRenderer(ren2)\niren = vtk.vtkRenderWindowInteractor()\niren.SetRenderWindow(renWin)\npl3d = vtk.vtkMultiBlockPLOT3DReader()\npl3d.SetXYZFileName('' + str(VTK_DATA_ROOT) + '/Data/combxyz.bin')\npl3d.SetQFileName('' + str(VTK_DATA_ROOT) + '/Data/combq.bin')\npl3d.SetScalarFunctionNumber(110)\npl3d.SetVectorFunctionNumber(202)\npl3d.Update()\noutput = pl3d.GetOutput().GetBlock(0)\nprobeLine = vtk.vtkLineSource()\nprobeLine.SetPoint1(1, 1, 29)\nprobeLine.SetPoint2(16.5, 5, 31.7693)\nprobeLine.SetResolution(500)\nprobe = vtk.vtkProbeFilter()\nprobe.SetInputConnection(probeLine.GetOutputPort())\nprobe.SetSourceData(output)\nprobe.Update()\nprobeTube = vtk.vtkTubeFilter()\nprobeTube.SetInputData(probe.GetPolyDataOutput())\nprobeTube.SetNumberOfSides(5)\nprobeTube.SetRadius(0.05)\nprobeMapper = vtk.vtkPolyDataMapper()\nprobeMapper.SetInputConnection(probeTube.GetOutputPort())\nprobeMapper.SetScalarRange(output.GetScalarRange())\nprobeActor = vtk.vtkActor()\nprobeActor.SetMapper(probeMapper)\ndisplayLine = vtk.vtkLineSource()\ndisplayLine.SetPoint1(0, 0, 0)\ndisplayLine.SetPoint2(1, 0, 0)\ndisplayLine.SetResolution(probeLine.GetResolution())\ndisplayMerge = vtk.vtkMergeFilter()\ndisplayMerge.SetGeometryConnection(displayLine.GetOutputPort())\ndisplayMerge.SetScalarsData(probe.GetPolyDataOutput())\ndisplayMerge.Update()\ndisplayWarp = vtk.vtkWarpScalar()\ndisplayWarp.SetInputData(displayMerge.GetPolyDataOutput())\ndisplayWarp.SetNormal(0, 1, 0)\ndisplayWarp.SetScaleFactor(1e-06)\ndisplayWarp.Update()\ndisplayMapper = vtk.vtkPolyDataMapper()\ndisplayMapper.SetInputData(displayWarp.GetPolyDataOutput())\ndisplayMapper.SetScalarRange(output.GetScalarRange())\ndisplayActor = vtk.vtkActor()\ndisplayActor.SetMapper(displayMapper)\noutline = vtk.vtkStructuredGridOutlineFilter()\noutline.SetInputData(output)\noutlineMapper = vtk.vtkPolyDataMapper()\noutlineMapper.SetInputConnection(outline.GetOutputPort())\noutlineActor = vtk.vtkActor()\noutlineActor.SetMapper(outlineMapper)\noutlineActor.GetProperty().SetColor(0, 0, 0)\nren1.AddActor(outlineActor)\nren1.AddActor(probeActor)\nren1.SetBackground(1, 1, 1)\nren1.SetViewport(0, 0.25, 1, 1)\nren2.AddActor(displayActor)\nren2.SetBackground(0, 0, 0)\nren2.SetViewport(0, 0, 1, 0.25)\nrenWin.SetSize(300, 300)\nren1.ResetCamera()\ncam1 = ren1.GetActiveCamera()\ncam1.SetClippingRange(3.95297, 50)\ncam1.SetFocalPoint(8.88908, 0.595038, 29.3342)\ncam1.SetPosition(9.9, -26, 41)\ncam1.SetViewUp(0.060772, -0.319905, 0.945498)\nren2.ResetCamera()\ncam2 = ren2.GetActiveCamera()\ncam2.ParallelProjectionOn()\ncam2.SetParallelScale(0.15)\niren.Initialize()\n",
"step-4": "import vtk\nfrom vtk.util.misc import vtkGetDataRoot\nVTK_DATA_ROOT = vtkGetDataRoot()\nren1 = vtk.vtkRenderer()\nren2 = vtk.vtkRenderer()\nrenWin = vtk.vtkRenderWindow()\nrenWin.SetMultiSamples(0)\nrenWin.AddRenderer(ren1)\nrenWin.AddRenderer(ren2)\niren = vtk.vtkRenderWindowInteractor()\niren.SetRenderWindow(renWin)\npl3d = vtk.vtkMultiBlockPLOT3DReader()\npl3d.SetXYZFileName('' + str(VTK_DATA_ROOT) + '/Data/combxyz.bin')\npl3d.SetQFileName('' + str(VTK_DATA_ROOT) + '/Data/combq.bin')\npl3d.SetScalarFunctionNumber(110)\npl3d.SetVectorFunctionNumber(202)\npl3d.Update()\noutput = pl3d.GetOutput().GetBlock(0)\nprobeLine = vtk.vtkLineSource()\nprobeLine.SetPoint1(1, 1, 29)\nprobeLine.SetPoint2(16.5, 5, 31.7693)\nprobeLine.SetResolution(500)\nprobe = vtk.vtkProbeFilter()\nprobe.SetInputConnection(probeLine.GetOutputPort())\nprobe.SetSourceData(output)\nprobe.Update()\nprobeTube = vtk.vtkTubeFilter()\nprobeTube.SetInputData(probe.GetPolyDataOutput())\nprobeTube.SetNumberOfSides(5)\nprobeTube.SetRadius(0.05)\nprobeMapper = vtk.vtkPolyDataMapper()\nprobeMapper.SetInputConnection(probeTube.GetOutputPort())\nprobeMapper.SetScalarRange(output.GetScalarRange())\nprobeActor = vtk.vtkActor()\nprobeActor.SetMapper(probeMapper)\ndisplayLine = vtk.vtkLineSource()\ndisplayLine.SetPoint1(0, 0, 0)\ndisplayLine.SetPoint2(1, 0, 0)\ndisplayLine.SetResolution(probeLine.GetResolution())\ndisplayMerge = vtk.vtkMergeFilter()\ndisplayMerge.SetGeometryConnection(displayLine.GetOutputPort())\ndisplayMerge.SetScalarsData(probe.GetPolyDataOutput())\ndisplayMerge.Update()\ndisplayWarp = vtk.vtkWarpScalar()\ndisplayWarp.SetInputData(displayMerge.GetPolyDataOutput())\ndisplayWarp.SetNormal(0, 1, 0)\ndisplayWarp.SetScaleFactor(1e-06)\ndisplayWarp.Update()\ndisplayMapper = vtk.vtkPolyDataMapper()\ndisplayMapper.SetInputData(displayWarp.GetPolyDataOutput())\ndisplayMapper.SetScalarRange(output.GetScalarRange())\ndisplayActor = vtk.vtkActor()\ndisplayActor.SetMapper(displayMapper)\noutline = vtk.vtkStructuredGridOutlineFilter()\noutline.SetInputData(output)\noutlineMapper = vtk.vtkPolyDataMapper()\noutlineMapper.SetInputConnection(outline.GetOutputPort())\noutlineActor = vtk.vtkActor()\noutlineActor.SetMapper(outlineMapper)\noutlineActor.GetProperty().SetColor(0, 0, 0)\nren1.AddActor(outlineActor)\nren1.AddActor(probeActor)\nren1.SetBackground(1, 1, 1)\nren1.SetViewport(0, 0.25, 1, 1)\nren2.AddActor(displayActor)\nren2.SetBackground(0, 0, 0)\nren2.SetViewport(0, 0, 1, 0.25)\nrenWin.SetSize(300, 300)\nren1.ResetCamera()\ncam1 = ren1.GetActiveCamera()\ncam1.SetClippingRange(3.95297, 50)\ncam1.SetFocalPoint(8.88908, 0.595038, 29.3342)\ncam1.SetPosition(9.9, -26, 41)\ncam1.SetViewUp(0.060772, -0.319905, 0.945498)\nren2.ResetCamera()\ncam2 = ren2.GetActiveCamera()\ncam2.ParallelProjectionOn()\ncam2.SetParallelScale(0.15)\niren.Initialize()\n",
"step-5": "#!/usr/bin/env python\nimport vtk\nfrom vtk.util.misc import vtkGetDataRoot\nVTK_DATA_ROOT = vtkGetDataRoot()\n\n# Create the RenderWindow, Renderer and both Actors\n#\nren1 = vtk.vtkRenderer()\nren2 = vtk.vtkRenderer()\nrenWin = vtk.vtkRenderWindow()\nrenWin.SetMultiSamples(0)\nrenWin.AddRenderer(ren1)\nrenWin.AddRenderer(ren2)\niren = vtk.vtkRenderWindowInteractor()\niren.SetRenderWindow(renWin)\n# create pipeline\n#\npl3d = vtk.vtkMultiBlockPLOT3DReader()\npl3d.SetXYZFileName(\"\" + str(VTK_DATA_ROOT) + \"/Data/combxyz.bin\")\npl3d.SetQFileName(\"\" + str(VTK_DATA_ROOT) + \"/Data/combq.bin\")\npl3d.SetScalarFunctionNumber(110)\npl3d.SetVectorFunctionNumber(202)\npl3d.Update()\noutput = pl3d.GetOutput().GetBlock(0)\nprobeLine = vtk.vtkLineSource()\nprobeLine.SetPoint1(1,1,29)\nprobeLine.SetPoint2(16.5,5,31.7693)\nprobeLine.SetResolution(500)\nprobe = vtk.vtkProbeFilter()\nprobe.SetInputConnection(probeLine.GetOutputPort())\nprobe.SetSourceData(output)\nprobe.Update()\nprobeTube = vtk.vtkTubeFilter()\nprobeTube.SetInputData(probe.GetPolyDataOutput())\nprobeTube.SetNumberOfSides(5)\nprobeTube.SetRadius(.05)\nprobeMapper = vtk.vtkPolyDataMapper()\nprobeMapper.SetInputConnection(probeTube.GetOutputPort())\nprobeMapper.SetScalarRange(output.GetScalarRange())\nprobeActor = vtk.vtkActor()\nprobeActor.SetMapper(probeMapper)\ndisplayLine = vtk.vtkLineSource()\ndisplayLine.SetPoint1(0,0,0)\ndisplayLine.SetPoint2(1,0,0)\ndisplayLine.SetResolution(probeLine.GetResolution())\ndisplayMerge = vtk.vtkMergeFilter()\ndisplayMerge.SetGeometryConnection(displayLine.GetOutputPort())\ndisplayMerge.SetScalarsData(probe.GetPolyDataOutput())\ndisplayMerge.Update()\ndisplayWarp = vtk.vtkWarpScalar()\ndisplayWarp.SetInputData(displayMerge.GetPolyDataOutput())\ndisplayWarp.SetNormal(0,1,0)\ndisplayWarp.SetScaleFactor(.000001)\ndisplayWarp.Update()\ndisplayMapper = vtk.vtkPolyDataMapper()\ndisplayMapper.SetInputData(displayWarp.GetPolyDataOutput())\ndisplayMapper.SetScalarRange(output.GetScalarRange())\ndisplayActor = vtk.vtkActor()\ndisplayActor.SetMapper(displayMapper)\noutline = vtk.vtkStructuredGridOutlineFilter()\noutline.SetInputData(output)\noutlineMapper = vtk.vtkPolyDataMapper()\noutlineMapper.SetInputConnection(outline.GetOutputPort())\noutlineActor = vtk.vtkActor()\noutlineActor.SetMapper(outlineMapper)\noutlineActor.GetProperty().SetColor(0,0,0)\nren1.AddActor(outlineActor)\nren1.AddActor(probeActor)\nren1.SetBackground(1,1,1)\nren1.SetViewport(0,.25,1,1)\nren2.AddActor(displayActor)\nren2.SetBackground(0,0,0)\nren2.SetViewport(0,0,1,.25)\nrenWin.SetSize(300,300)\nren1.ResetCamera()\ncam1 = ren1.GetActiveCamera()\ncam1.SetClippingRange(3.95297,50)\ncam1.SetFocalPoint(8.88908,0.595038,29.3342)\ncam1.SetPosition(9.9,-26,41)\ncam1.SetViewUp(0.060772,-0.319905,0.945498)\nren2.ResetCamera()\ncam2 = ren2.GetActiveCamera()\ncam2.ParallelProjectionOn()\ncam2.SetParallelScale(.15)\niren.Initialize()\n# render the image\n#\n# prevent the tk window from showing up then start the event loop\n# --- end of script --\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.2 on 2017-07-20 08:05
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Child',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=128)),
('age', models.IntegerField(choices=[(-1, 'not defined'), (0, '0 - 1'), (1, '1'), (2, '2'), (3, '3'), (4, '4'), (5, '5'), (6, '6')], default=-1)),
('sex', models.IntegerField(choices=[(1, 'dziewczynka'), (2, 'chłopiec')], default=1)),
('whose_child', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Message',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('content', models.TextField()),
('creation_date', models.DateTimeField(default=django.utils.timezone.now)),
('is_read', models.BooleanField(default=False)),
('receiver', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='message_receiver', to=settings.AUTH_USER_MODEL)),
('sender', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Parent',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('children', models.ManyToManyField(to='placyk_app.Child')),
],
),
migrations.CreateModel(
name='Pground',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('place', models.CharField(max_length=128)),
('description', models.TextField()),
],
),
migrations.CreateModel(
name='Quarter',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(choices=[('not defined', 0), ('Bronowice Małe', 1), ('Krowodrza', 2)], default='not defined', max_length=64)),
],
),
migrations.CreateModel(
name='Visit',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('time_from', models.DateTimeField()),
('time_to', models.DateTimeField()),
('pground', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='placyk_app.Pground')),
('who', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.AddField(
model_name='pground',
name='quarter',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='placyk_app.Quarter'),
),
migrations.AddField(
model_name='parent',
name='quarter',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='placyk_app.Quarter'),
),
migrations.AddField(
model_name='parent',
name='user',
field=models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
]
|
normal
|
{
"blob_id": "e68d872232b3eab4c33cbbe4376be7dd788888e2",
"index": 1242,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Migration(migrations.Migration):\n <mask token>\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Migration(migrations.Migration):\n initial = True\n dependencies = [migrations.swappable_dependency(settings.AUTH_USER_MODEL)]\n operations = [migrations.CreateModel(name='Child', fields=[('id',\n models.AutoField(auto_created=True, primary_key=True, serialize=\n False, verbose_name='ID')), ('name', models.CharField(max_length=\n 128)), ('age', models.IntegerField(choices=[(-1, 'not defined'), (0,\n '0 - 1'), (1, '1'), (2, '2'), (3, '3'), (4, '4'), (5, '5'), (6, '6'\n )], default=-1)), ('sex', models.IntegerField(choices=[(1,\n 'dziewczynka'), (2, 'chłopiec')], default=1)), ('whose_child',\n models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=\n settings.AUTH_USER_MODEL))]), migrations.CreateModel(name='Message',\n fields=[('id', models.AutoField(auto_created=True, primary_key=True,\n serialize=False, verbose_name='ID')), ('content', models.TextField(\n )), ('creation_date', models.DateTimeField(default=django.utils.\n timezone.now)), ('is_read', models.BooleanField(default=False)), (\n 'receiver', models.ForeignKey(on_delete=django.db.models.deletion.\n CASCADE, related_name='message_receiver', to=settings.\n AUTH_USER_MODEL)), ('sender', models.ForeignKey(on_delete=django.db\n .models.deletion.CASCADE, to=settings.AUTH_USER_MODEL))]),\n migrations.CreateModel(name='Parent', fields=[('id', models.\n AutoField(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')), ('children', models.ManyToManyField(to=\n 'placyk_app.Child'))]), migrations.CreateModel(name='Pground',\n fields=[('id', models.AutoField(auto_created=True, primary_key=True,\n serialize=False, verbose_name='ID')), ('place', models.CharField(\n max_length=128)), ('description', models.TextField())]), migrations\n .CreateModel(name='Quarter', fields=[('id', models.AutoField(\n auto_created=True, primary_key=True, serialize=False, verbose_name=\n 'ID')), ('name', models.CharField(choices=[('not defined', 0), (\n 'Bronowice Małe', 1), ('Krowodrza', 2)], default='not defined',\n max_length=64))]), migrations.CreateModel(name='Visit', fields=[(\n 'id', models.AutoField(auto_created=True, primary_key=True,\n serialize=False, verbose_name='ID')), ('time_from', models.\n DateTimeField()), ('time_to', models.DateTimeField()), ('pground',\n models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=\n 'placyk_app.Pground')), ('who', models.ForeignKey(on_delete=django.\n db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL))]),\n migrations.AddField(model_name='pground', name='quarter', field=\n models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=\n 'placyk_app.Quarter')), migrations.AddField(model_name='parent',\n name='quarter', field=models.ForeignKey(on_delete=django.db.models.\n deletion.CASCADE, to='placyk_app.Quarter')), migrations.AddField(\n model_name='parent', name='user', field=models.OneToOneField(\n on_delete=django.db.models.deletion.CASCADE, to=settings.\n AUTH_USER_MODEL))]\n",
"step-4": "from __future__ import unicode_literals\nfrom django.conf import settings\nfrom django.db import migrations, models\nimport django.db.models.deletion\nimport django.utils.timezone\n\n\nclass Migration(migrations.Migration):\n initial = True\n dependencies = [migrations.swappable_dependency(settings.AUTH_USER_MODEL)]\n operations = [migrations.CreateModel(name='Child', fields=[('id',\n models.AutoField(auto_created=True, primary_key=True, serialize=\n False, verbose_name='ID')), ('name', models.CharField(max_length=\n 128)), ('age', models.IntegerField(choices=[(-1, 'not defined'), (0,\n '0 - 1'), (1, '1'), (2, '2'), (3, '3'), (4, '4'), (5, '5'), (6, '6'\n )], default=-1)), ('sex', models.IntegerField(choices=[(1,\n 'dziewczynka'), (2, 'chłopiec')], default=1)), ('whose_child',\n models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=\n settings.AUTH_USER_MODEL))]), migrations.CreateModel(name='Message',\n fields=[('id', models.AutoField(auto_created=True, primary_key=True,\n serialize=False, verbose_name='ID')), ('content', models.TextField(\n )), ('creation_date', models.DateTimeField(default=django.utils.\n timezone.now)), ('is_read', models.BooleanField(default=False)), (\n 'receiver', models.ForeignKey(on_delete=django.db.models.deletion.\n CASCADE, related_name='message_receiver', to=settings.\n AUTH_USER_MODEL)), ('sender', models.ForeignKey(on_delete=django.db\n .models.deletion.CASCADE, to=settings.AUTH_USER_MODEL))]),\n migrations.CreateModel(name='Parent', fields=[('id', models.\n AutoField(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')), ('children', models.ManyToManyField(to=\n 'placyk_app.Child'))]), migrations.CreateModel(name='Pground',\n fields=[('id', models.AutoField(auto_created=True, primary_key=True,\n serialize=False, verbose_name='ID')), ('place', models.CharField(\n max_length=128)), ('description', models.TextField())]), migrations\n .CreateModel(name='Quarter', fields=[('id', models.AutoField(\n auto_created=True, primary_key=True, serialize=False, verbose_name=\n 'ID')), ('name', models.CharField(choices=[('not defined', 0), (\n 'Bronowice Małe', 1), ('Krowodrza', 2)], default='not defined',\n max_length=64))]), migrations.CreateModel(name='Visit', fields=[(\n 'id', models.AutoField(auto_created=True, primary_key=True,\n serialize=False, verbose_name='ID')), ('time_from', models.\n DateTimeField()), ('time_to', models.DateTimeField()), ('pground',\n models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=\n 'placyk_app.Pground')), ('who', models.ForeignKey(on_delete=django.\n db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL))]),\n migrations.AddField(model_name='pground', name='quarter', field=\n models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=\n 'placyk_app.Quarter')), migrations.AddField(model_name='parent',\n name='quarter', field=models.ForeignKey(on_delete=django.db.models.\n deletion.CASCADE, to='placyk_app.Quarter')), migrations.AddField(\n model_name='parent', name='user', field=models.OneToOneField(\n on_delete=django.db.models.deletion.CASCADE, to=settings.\n AUTH_USER_MODEL))]\n",
"step-5": "# -*- coding: utf-8 -*-\n# Generated by Django 1.11.2 on 2017-07-20 08:05\nfrom __future__ import unicode_literals\n\nfrom django.conf import settings\nfrom django.db import migrations, models\nimport django.db.models.deletion\nimport django.utils.timezone\n\n\nclass Migration(migrations.Migration):\n\n initial = True\n\n dependencies = [\n migrations.swappable_dependency(settings.AUTH_USER_MODEL),\n ]\n\n operations = [\n migrations.CreateModel(\n name='Child',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('name', models.CharField(max_length=128)),\n ('age', models.IntegerField(choices=[(-1, 'not defined'), (0, '0 - 1'), (1, '1'), (2, '2'), (3, '3'), (4, '4'), (5, '5'), (6, '6')], default=-1)),\n ('sex', models.IntegerField(choices=[(1, 'dziewczynka'), (2, 'chłopiec')], default=1)),\n ('whose_child', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),\n ],\n ),\n migrations.CreateModel(\n name='Message',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('content', models.TextField()),\n ('creation_date', models.DateTimeField(default=django.utils.timezone.now)),\n ('is_read', models.BooleanField(default=False)),\n ('receiver', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='message_receiver', to=settings.AUTH_USER_MODEL)),\n ('sender', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),\n ],\n ),\n migrations.CreateModel(\n name='Parent',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('children', models.ManyToManyField(to='placyk_app.Child')),\n ],\n ),\n migrations.CreateModel(\n name='Pground',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('place', models.CharField(max_length=128)),\n ('description', models.TextField()),\n ],\n ),\n migrations.CreateModel(\n name='Quarter',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('name', models.CharField(choices=[('not defined', 0), ('Bronowice Małe', 1), ('Krowodrza', 2)], default='not defined', max_length=64)),\n ],\n ),\n migrations.CreateModel(\n name='Visit',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('time_from', models.DateTimeField()),\n ('time_to', models.DateTimeField()),\n ('pground', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='placyk_app.Pground')),\n ('who', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),\n ],\n ),\n migrations.AddField(\n model_name='pground',\n name='quarter',\n field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='placyk_app.Quarter'),\n ),\n migrations.AddField(\n model_name='parent',\n name='quarter',\n field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='placyk_app.Quarter'),\n ),\n migrations.AddField(\n model_name='parent',\n name='user',\n field=models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),\n ),\n ]\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
"""
Create all figures and Excel files that combine data from all embryos in a given genetic background
Copyright (C) 2017 Ahmet Ay, Dong Mai, Soo Bin Kwon, Ha Vu
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import sys, shared, os
from subprocess import call
DEFAULT_NUM_BIN = 5
def main():
args = sys.argv[1:]
num_args = len(args)
req_args = [False]*3
num_bins = DEFAULT_NUM_BIN
if num_args >= 6:
i = 0
while i<num_args-1:
option = args[i]
value = args[i+1]
if (option == '-ne' or option == '--number-of-embryos') and shared.isInt(value):
num_embryos = int(value)
req_args[0] = True
i+=2
elif (option == '-nb' or option == '--number-of-bins') and shared.isInt(value):
num_bins = int(value)
i+=2
elif option == '-d' or option == '--output-directory':
directory = value
req_args[1] = True
i+=2
elif req_args[0] and (option == '-i' or option == '--input-files') and ((num_args-7)==num_embryos):
slice_files = args[i+1:i+1+num_embryos]
for f in slice_files:
if not os.path.isfile(f):
print("combine_embryos.py: File "+f+" does not exist.")
exit(1)
req_args[2] = True
i+=num_embryos
else:
usage()
for arg in req_args:
if not arg:
usage()
else:
usage()
shared.ensureDir(directory)
### Spatial amplitude ###
print("Plotting spatial amplitude...")
command = ["python","plot_spatial_amplitude.py",str(num_embryos)] + slice_files + [directory]
if 1==call(command):
exit(1)
# (compare_spatial_amplitude.py can run after plot_spatial_amplitude.py is run for all genetic backgrounds)
'''### Burst size and frequency ###
# 1. create_burst_data.py
print("Creating data for estimate_burst_parameters.m...")
command = ["python","create_burst_data.py",str(num_embryos)] + slice_files + [directory]
if 1==call(command):
exit(1)
# 2. estimate_burst_parameters.m
print("Running estimate_burst_parameters.m on MATLAB...")
command = ['/Applications/MATLAB_R2016a.app/bin/matlab','-nodesktop','-nosplash','-nodisplay','-r','estimate_burst_parameters(\''+directory+'/burst_data.xls\',\''+directory+'\')']
if 1==call(command): # this will automatically open and run MATLAB
exit(1)
# 3. plot_estimated_burst_parameters.py using the output from estimate_burst_parameters.m
print("Plotting estimated burst size and frequencies...")
command = ["python","plot_estimated_burst_parameters.py",directory+"/burst_result.xls",directory]
if 1==call(command):
exit(1)'''
# (compare_burst_parameters.py can run after plot_estimated_burst_parameters.py is run for all genetic backgrounds)
# Fano factor (to demonstrate burstiness)
command = ["python","plot_fano_factor.py",str(num_embryos)] + slice_files + [directory]
print("Plotting fano factor...")
if 1==call(command):
exit(1)
# (compare_fano_factor.py can run after plot_fano_factor.py is run for all genetic backgrounds)
### Noise ###
# Intrinsic and extrinsic noise
print("Plotting intrinsic and extrinsic noise...")
command = ["python","plot_noise.py",str(num_embryos), str(num_bins)] + slice_files + [directory]
if 1==call(command):
exit(1)
# (compare_noise.py can run after plot_noise.py is run for all genetic backgrounds)
### Scatter plot of her1 and her7 for all bins ####
print("Plotting scatter plots of her1 vs her7 mRNAs in all bins ...")
command = ["python", "plot_scatter_her1_her7.py", directory + "/combined_slices.xls", str(num_bins), directory]
if 1 == call(command):
exit(1)
# Spatial noise (coefficient of variation squared across space)
print("Plotting spatial noise (coefficient of variation squared across space)...")
command = ["python","plot_CVsquared.py",str(num_embryos)] + slice_files + [directory]
if 1==call(command):
exit(1)
# (compare_grouped_CVsquared.py and compare_CV_squared.py can run after plot_CVsquared.py is run for all genetic backgrounds)
### Raw data Excel files ###
command = ["python","create_raw_expression_excel.py",str(num_embryos)] + slice_files + [directory]
print("Creating Excel files for RNA expression levels...")
if 1==call(command):
exit(1)
command = ["python","create_raw_spacial_noise_excel.py",str(num_embryos)] + slice_files + [directory]
print("Creating Excel files for spacial noise...")
if 1==call(command):
exit(1)
command = ["python","create_raw_noise_excel.py",str(num_embryos)] + slice_files + [directory]
print("Creating Excel files for noise...")
if 1==call(command):
exit(1)
def usage():
print("combine_embryos.py: Invalid command-line arguments.")
print("Format: combine_embryos.py -ne <number of embryos> -nb <number of bins> -d <output directory> -i <first embryo's slice.xls> <second embryo's slice.xls> ... <last embryo's slice.xls>")
print("Example: python combine_embryos.py -ne 20 -d ../wildtypefulldataset/output -nb 5 -i ../wildtypefulldataset/output/embryo1/slices.xls \
../wildtypefulldataset/output/embryo2/slices.xls .... ../wildtypefulldataset/output/embryo20/slices.xls")
exit(1)
main()
|
normal
|
{
"blob_id": "86928f4358e4999a5cec8bfad1fe055c9a2778d1",
"index": 6230,
"step-1": "<mask token>\n\n\ndef main():\n args = sys.argv[1:]\n num_args = len(args)\n req_args = [False] * 3\n num_bins = DEFAULT_NUM_BIN\n if num_args >= 6:\n i = 0\n while i < num_args - 1:\n option = args[i]\n value = args[i + 1]\n if (option == '-ne' or option == '--number-of-embryos'\n ) and shared.isInt(value):\n num_embryos = int(value)\n req_args[0] = True\n i += 2\n elif (option == '-nb' or option == '--number-of-bins'\n ) and shared.isInt(value):\n num_bins = int(value)\n i += 2\n elif option == '-d' or option == '--output-directory':\n directory = value\n req_args[1] = True\n i += 2\n elif req_args[0] and (option == '-i' or option == '--input-files'\n ) and num_args - 7 == num_embryos:\n slice_files = args[i + 1:i + 1 + num_embryos]\n for f in slice_files:\n if not os.path.isfile(f):\n print('combine_embryos.py: File ' + f +\n ' does not exist.')\n exit(1)\n req_args[2] = True\n i += num_embryos\n else:\n usage()\n for arg in req_args:\n if not arg:\n usage()\n else:\n usage()\n shared.ensureDir(directory)\n print('Plotting spatial amplitude...')\n command = ['python', 'plot_spatial_amplitude.py', str(num_embryos)\n ] + slice_files + [directory]\n if 1 == call(command):\n exit(1)\n \"\"\"### Burst size and frequency ###\n\t# 1. create_burst_data.py\n\tprint(\"Creating data for estimate_burst_parameters.m...\")\n\tcommand = [\"python\",\"create_burst_data.py\",str(num_embryos)] + slice_files + [directory]\n\tif 1==call(command):\n\t\texit(1)\n\t# 2. estimate_burst_parameters.m \n\tprint(\"Running estimate_burst_parameters.m on MATLAB...\")\n\tcommand = ['/Applications/MATLAB_R2016a.app/bin/matlab','-nodesktop','-nosplash','-nodisplay','-r','estimate_burst_parameters(''+directory+'/burst_data.xls',''+directory+'')']\n\tif 1==call(command): # this will automatically open and run MATLAB\n\t\texit(1)\n\t\n\t# 3. plot_estimated_burst_parameters.py using the output from estimate_burst_parameters.m \t\n\tprint(\"Plotting estimated burst size and frequencies...\")\n\tcommand = [\"python\",\"plot_estimated_burst_parameters.py\",directory+\"/burst_result.xls\",directory]\n\tif 1==call(command):\n\t\texit(1)\"\"\"\n command = ['python', 'plot_fano_factor.py', str(num_embryos)\n ] + slice_files + [directory]\n print('Plotting fano factor...')\n if 1 == call(command):\n exit(1)\n print('Plotting intrinsic and extrinsic noise...')\n command = ['python', 'plot_noise.py', str(num_embryos), str(num_bins)\n ] + slice_files + [directory]\n if 1 == call(command):\n exit(1)\n print('Plotting scatter plots of her1 vs her7 mRNAs in all bins ...')\n command = ['python', 'plot_scatter_her1_her7.py', directory +\n '/combined_slices.xls', str(num_bins), directory]\n if 1 == call(command):\n exit(1)\n print(\n 'Plotting spatial noise (coefficient of variation squared across space)...'\n )\n command = ['python', 'plot_CVsquared.py', str(num_embryos)\n ] + slice_files + [directory]\n if 1 == call(command):\n exit(1)\n command = ['python', 'create_raw_expression_excel.py', str(num_embryos)\n ] + slice_files + [directory]\n print('Creating Excel files for RNA expression levels...')\n if 1 == call(command):\n exit(1)\n command = ['python', 'create_raw_spacial_noise_excel.py', str(num_embryos)\n ] + slice_files + [directory]\n print('Creating Excel files for spacial noise...')\n if 1 == call(command):\n exit(1)\n command = ['python', 'create_raw_noise_excel.py', str(num_embryos)\n ] + slice_files + [directory]\n print('Creating Excel files for noise...')\n if 1 == call(command):\n exit(1)\n\n\ndef usage():\n print('combine_embryos.py: Invalid command-line arguments.')\n print(\n \"Format: combine_embryos.py -ne <number of embryos> -nb <number of bins> -d <output directory> -i <first embryo's slice.xls> <second embryo's slice.xls> ... <last embryo's slice.xls>\"\n )\n print(\n 'Example: python combine_embryos.py -ne 20 -d ../wildtypefulldataset/output -nb 5 -i ../wildtypefulldataset/output/embryo1/slices.xls \\t../wildtypefulldataset/output/embryo2/slices.xls .... ../wildtypefulldataset/output/embryo20/slices.xls'\n )\n exit(1)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef main():\n args = sys.argv[1:]\n num_args = len(args)\n req_args = [False] * 3\n num_bins = DEFAULT_NUM_BIN\n if num_args >= 6:\n i = 0\n while i < num_args - 1:\n option = args[i]\n value = args[i + 1]\n if (option == '-ne' or option == '--number-of-embryos'\n ) and shared.isInt(value):\n num_embryos = int(value)\n req_args[0] = True\n i += 2\n elif (option == '-nb' or option == '--number-of-bins'\n ) and shared.isInt(value):\n num_bins = int(value)\n i += 2\n elif option == '-d' or option == '--output-directory':\n directory = value\n req_args[1] = True\n i += 2\n elif req_args[0] and (option == '-i' or option == '--input-files'\n ) and num_args - 7 == num_embryos:\n slice_files = args[i + 1:i + 1 + num_embryos]\n for f in slice_files:\n if not os.path.isfile(f):\n print('combine_embryos.py: File ' + f +\n ' does not exist.')\n exit(1)\n req_args[2] = True\n i += num_embryos\n else:\n usage()\n for arg in req_args:\n if not arg:\n usage()\n else:\n usage()\n shared.ensureDir(directory)\n print('Plotting spatial amplitude...')\n command = ['python', 'plot_spatial_amplitude.py', str(num_embryos)\n ] + slice_files + [directory]\n if 1 == call(command):\n exit(1)\n \"\"\"### Burst size and frequency ###\n\t# 1. create_burst_data.py\n\tprint(\"Creating data for estimate_burst_parameters.m...\")\n\tcommand = [\"python\",\"create_burst_data.py\",str(num_embryos)] + slice_files + [directory]\n\tif 1==call(command):\n\t\texit(1)\n\t# 2. estimate_burst_parameters.m \n\tprint(\"Running estimate_burst_parameters.m on MATLAB...\")\n\tcommand = ['/Applications/MATLAB_R2016a.app/bin/matlab','-nodesktop','-nosplash','-nodisplay','-r','estimate_burst_parameters(''+directory+'/burst_data.xls',''+directory+'')']\n\tif 1==call(command): # this will automatically open and run MATLAB\n\t\texit(1)\n\t\n\t# 3. plot_estimated_burst_parameters.py using the output from estimate_burst_parameters.m \t\n\tprint(\"Plotting estimated burst size and frequencies...\")\n\tcommand = [\"python\",\"plot_estimated_burst_parameters.py\",directory+\"/burst_result.xls\",directory]\n\tif 1==call(command):\n\t\texit(1)\"\"\"\n command = ['python', 'plot_fano_factor.py', str(num_embryos)\n ] + slice_files + [directory]\n print('Plotting fano factor...')\n if 1 == call(command):\n exit(1)\n print('Plotting intrinsic and extrinsic noise...')\n command = ['python', 'plot_noise.py', str(num_embryos), str(num_bins)\n ] + slice_files + [directory]\n if 1 == call(command):\n exit(1)\n print('Plotting scatter plots of her1 vs her7 mRNAs in all bins ...')\n command = ['python', 'plot_scatter_her1_her7.py', directory +\n '/combined_slices.xls', str(num_bins), directory]\n if 1 == call(command):\n exit(1)\n print(\n 'Plotting spatial noise (coefficient of variation squared across space)...'\n )\n command = ['python', 'plot_CVsquared.py', str(num_embryos)\n ] + slice_files + [directory]\n if 1 == call(command):\n exit(1)\n command = ['python', 'create_raw_expression_excel.py', str(num_embryos)\n ] + slice_files + [directory]\n print('Creating Excel files for RNA expression levels...')\n if 1 == call(command):\n exit(1)\n command = ['python', 'create_raw_spacial_noise_excel.py', str(num_embryos)\n ] + slice_files + [directory]\n print('Creating Excel files for spacial noise...')\n if 1 == call(command):\n exit(1)\n command = ['python', 'create_raw_noise_excel.py', str(num_embryos)\n ] + slice_files + [directory]\n print('Creating Excel files for noise...')\n if 1 == call(command):\n exit(1)\n\n\ndef usage():\n print('combine_embryos.py: Invalid command-line arguments.')\n print(\n \"Format: combine_embryos.py -ne <number of embryos> -nb <number of bins> -d <output directory> -i <first embryo's slice.xls> <second embryo's slice.xls> ... <last embryo's slice.xls>\"\n )\n print(\n 'Example: python combine_embryos.py -ne 20 -d ../wildtypefulldataset/output -nb 5 -i ../wildtypefulldataset/output/embryo1/slices.xls \\t../wildtypefulldataset/output/embryo2/slices.xls .... ../wildtypefulldataset/output/embryo20/slices.xls'\n )\n exit(1)\n\n\nmain()\n",
"step-3": "<mask token>\nDEFAULT_NUM_BIN = 5\n\n\ndef main():\n args = sys.argv[1:]\n num_args = len(args)\n req_args = [False] * 3\n num_bins = DEFAULT_NUM_BIN\n if num_args >= 6:\n i = 0\n while i < num_args - 1:\n option = args[i]\n value = args[i + 1]\n if (option == '-ne' or option == '--number-of-embryos'\n ) and shared.isInt(value):\n num_embryos = int(value)\n req_args[0] = True\n i += 2\n elif (option == '-nb' or option == '--number-of-bins'\n ) and shared.isInt(value):\n num_bins = int(value)\n i += 2\n elif option == '-d' or option == '--output-directory':\n directory = value\n req_args[1] = True\n i += 2\n elif req_args[0] and (option == '-i' or option == '--input-files'\n ) and num_args - 7 == num_embryos:\n slice_files = args[i + 1:i + 1 + num_embryos]\n for f in slice_files:\n if not os.path.isfile(f):\n print('combine_embryos.py: File ' + f +\n ' does not exist.')\n exit(1)\n req_args[2] = True\n i += num_embryos\n else:\n usage()\n for arg in req_args:\n if not arg:\n usage()\n else:\n usage()\n shared.ensureDir(directory)\n print('Plotting spatial amplitude...')\n command = ['python', 'plot_spatial_amplitude.py', str(num_embryos)\n ] + slice_files + [directory]\n if 1 == call(command):\n exit(1)\n \"\"\"### Burst size and frequency ###\n\t# 1. create_burst_data.py\n\tprint(\"Creating data for estimate_burst_parameters.m...\")\n\tcommand = [\"python\",\"create_burst_data.py\",str(num_embryos)] + slice_files + [directory]\n\tif 1==call(command):\n\t\texit(1)\n\t# 2. estimate_burst_parameters.m \n\tprint(\"Running estimate_burst_parameters.m on MATLAB...\")\n\tcommand = ['/Applications/MATLAB_R2016a.app/bin/matlab','-nodesktop','-nosplash','-nodisplay','-r','estimate_burst_parameters(''+directory+'/burst_data.xls',''+directory+'')']\n\tif 1==call(command): # this will automatically open and run MATLAB\n\t\texit(1)\n\t\n\t# 3. plot_estimated_burst_parameters.py using the output from estimate_burst_parameters.m \t\n\tprint(\"Plotting estimated burst size and frequencies...\")\n\tcommand = [\"python\",\"plot_estimated_burst_parameters.py\",directory+\"/burst_result.xls\",directory]\n\tif 1==call(command):\n\t\texit(1)\"\"\"\n command = ['python', 'plot_fano_factor.py', str(num_embryos)\n ] + slice_files + [directory]\n print('Plotting fano factor...')\n if 1 == call(command):\n exit(1)\n print('Plotting intrinsic and extrinsic noise...')\n command = ['python', 'plot_noise.py', str(num_embryos), str(num_bins)\n ] + slice_files + [directory]\n if 1 == call(command):\n exit(1)\n print('Plotting scatter plots of her1 vs her7 mRNAs in all bins ...')\n command = ['python', 'plot_scatter_her1_her7.py', directory +\n '/combined_slices.xls', str(num_bins), directory]\n if 1 == call(command):\n exit(1)\n print(\n 'Plotting spatial noise (coefficient of variation squared across space)...'\n )\n command = ['python', 'plot_CVsquared.py', str(num_embryos)\n ] + slice_files + [directory]\n if 1 == call(command):\n exit(1)\n command = ['python', 'create_raw_expression_excel.py', str(num_embryos)\n ] + slice_files + [directory]\n print('Creating Excel files for RNA expression levels...')\n if 1 == call(command):\n exit(1)\n command = ['python', 'create_raw_spacial_noise_excel.py', str(num_embryos)\n ] + slice_files + [directory]\n print('Creating Excel files for spacial noise...')\n if 1 == call(command):\n exit(1)\n command = ['python', 'create_raw_noise_excel.py', str(num_embryos)\n ] + slice_files + [directory]\n print('Creating Excel files for noise...')\n if 1 == call(command):\n exit(1)\n\n\ndef usage():\n print('combine_embryos.py: Invalid command-line arguments.')\n print(\n \"Format: combine_embryos.py -ne <number of embryos> -nb <number of bins> -d <output directory> -i <first embryo's slice.xls> <second embryo's slice.xls> ... <last embryo's slice.xls>\"\n )\n print(\n 'Example: python combine_embryos.py -ne 20 -d ../wildtypefulldataset/output -nb 5 -i ../wildtypefulldataset/output/embryo1/slices.xls \\t../wildtypefulldataset/output/embryo2/slices.xls .... ../wildtypefulldataset/output/embryo20/slices.xls'\n )\n exit(1)\n\n\nmain()\n",
"step-4": "<mask token>\nimport sys, shared, os\nfrom subprocess import call\nDEFAULT_NUM_BIN = 5\n\n\ndef main():\n args = sys.argv[1:]\n num_args = len(args)\n req_args = [False] * 3\n num_bins = DEFAULT_NUM_BIN\n if num_args >= 6:\n i = 0\n while i < num_args - 1:\n option = args[i]\n value = args[i + 1]\n if (option == '-ne' or option == '--number-of-embryos'\n ) and shared.isInt(value):\n num_embryos = int(value)\n req_args[0] = True\n i += 2\n elif (option == '-nb' or option == '--number-of-bins'\n ) and shared.isInt(value):\n num_bins = int(value)\n i += 2\n elif option == '-d' or option == '--output-directory':\n directory = value\n req_args[1] = True\n i += 2\n elif req_args[0] and (option == '-i' or option == '--input-files'\n ) and num_args - 7 == num_embryos:\n slice_files = args[i + 1:i + 1 + num_embryos]\n for f in slice_files:\n if not os.path.isfile(f):\n print('combine_embryos.py: File ' + f +\n ' does not exist.')\n exit(1)\n req_args[2] = True\n i += num_embryos\n else:\n usage()\n for arg in req_args:\n if not arg:\n usage()\n else:\n usage()\n shared.ensureDir(directory)\n print('Plotting spatial amplitude...')\n command = ['python', 'plot_spatial_amplitude.py', str(num_embryos)\n ] + slice_files + [directory]\n if 1 == call(command):\n exit(1)\n \"\"\"### Burst size and frequency ###\n\t# 1. create_burst_data.py\n\tprint(\"Creating data for estimate_burst_parameters.m...\")\n\tcommand = [\"python\",\"create_burst_data.py\",str(num_embryos)] + slice_files + [directory]\n\tif 1==call(command):\n\t\texit(1)\n\t# 2. estimate_burst_parameters.m \n\tprint(\"Running estimate_burst_parameters.m on MATLAB...\")\n\tcommand = ['/Applications/MATLAB_R2016a.app/bin/matlab','-nodesktop','-nosplash','-nodisplay','-r','estimate_burst_parameters(''+directory+'/burst_data.xls',''+directory+'')']\n\tif 1==call(command): # this will automatically open and run MATLAB\n\t\texit(1)\n\t\n\t# 3. plot_estimated_burst_parameters.py using the output from estimate_burst_parameters.m \t\n\tprint(\"Plotting estimated burst size and frequencies...\")\n\tcommand = [\"python\",\"plot_estimated_burst_parameters.py\",directory+\"/burst_result.xls\",directory]\n\tif 1==call(command):\n\t\texit(1)\"\"\"\n command = ['python', 'plot_fano_factor.py', str(num_embryos)\n ] + slice_files + [directory]\n print('Plotting fano factor...')\n if 1 == call(command):\n exit(1)\n print('Plotting intrinsic and extrinsic noise...')\n command = ['python', 'plot_noise.py', str(num_embryos), str(num_bins)\n ] + slice_files + [directory]\n if 1 == call(command):\n exit(1)\n print('Plotting scatter plots of her1 vs her7 mRNAs in all bins ...')\n command = ['python', 'plot_scatter_her1_her7.py', directory +\n '/combined_slices.xls', str(num_bins), directory]\n if 1 == call(command):\n exit(1)\n print(\n 'Plotting spatial noise (coefficient of variation squared across space)...'\n )\n command = ['python', 'plot_CVsquared.py', str(num_embryos)\n ] + slice_files + [directory]\n if 1 == call(command):\n exit(1)\n command = ['python', 'create_raw_expression_excel.py', str(num_embryos)\n ] + slice_files + [directory]\n print('Creating Excel files for RNA expression levels...')\n if 1 == call(command):\n exit(1)\n command = ['python', 'create_raw_spacial_noise_excel.py', str(num_embryos)\n ] + slice_files + [directory]\n print('Creating Excel files for spacial noise...')\n if 1 == call(command):\n exit(1)\n command = ['python', 'create_raw_noise_excel.py', str(num_embryos)\n ] + slice_files + [directory]\n print('Creating Excel files for noise...')\n if 1 == call(command):\n exit(1)\n\n\ndef usage():\n print('combine_embryos.py: Invalid command-line arguments.')\n print(\n \"Format: combine_embryos.py -ne <number of embryos> -nb <number of bins> -d <output directory> -i <first embryo's slice.xls> <second embryo's slice.xls> ... <last embryo's slice.xls>\"\n )\n print(\n 'Example: python combine_embryos.py -ne 20 -d ../wildtypefulldataset/output -nb 5 -i ../wildtypefulldataset/output/embryo1/slices.xls \\t../wildtypefulldataset/output/embryo2/slices.xls .... ../wildtypefulldataset/output/embryo20/slices.xls'\n )\n exit(1)\n\n\nmain()\n",
"step-5": "\"\"\"\nCreate all figures and Excel files that combine data from all embryos in a given genetic background\nCopyright (C) 2017 Ahmet Ay, Dong Mai, Soo Bin Kwon, Ha Vu\n\nThis program is free software: you can redistribute it and/or modify\nit under the terms of the GNU General Public License as published by\nthe Free Software Foundation, either version 3 of the License, or\n(at your option) any later version.\n\nThis program is distributed in the hope that it will be useful,\nbut WITHOUT ANY WARRANTY; without even the implied warranty of\nMERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\nGNU General Public License for more details.\n\nYou should have received a copy of the GNU General Public License\nalong with this program. If not, see <http://www.gnu.org/licenses/>.\n\"\"\"\nimport sys, shared, os\nfrom subprocess import call\n\nDEFAULT_NUM_BIN = 5\n\ndef main():\n\targs = sys.argv[1:]\n\tnum_args = len(args)\n\treq_args = [False]*3\n\t\n\tnum_bins = DEFAULT_NUM_BIN\n\tif num_args >= 6:\n\t\ti = 0\n\t\twhile i<num_args-1:\n\t\t\toption = args[i]\n\t\t\tvalue = args[i+1]\t\t\t\n\t\t\tif (option == '-ne' or option == '--number-of-embryos') and shared.isInt(value):\n\t\t\t\tnum_embryos = int(value)\n\t\t\t\treq_args[0] = True\n\t\t\t\ti+=2\n\t\t\telif (option == '-nb' or option == '--number-of-bins') and shared.isInt(value):\n\t\t\t\tnum_bins = int(value)\n\t\t\t\ti+=2\n\t\t\telif option == '-d' or option == '--output-directory':\n\t\t\t\tdirectory = value\n\t\t\t\treq_args[1] = True\n\t\t\t\ti+=2\n\t\t\telif req_args[0] and (option == '-i' or option == '--input-files') and ((num_args-7)==num_embryos):\n\t\t\t\tslice_files = args[i+1:i+1+num_embryos]\n\t\t\t\tfor f in slice_files:\n\t\t\t\t\tif not os.path.isfile(f):\n\t\t\t\t\t\tprint(\"combine_embryos.py: File \"+f+\" does not exist.\")\n\t\t\t\t\t\texit(1)\n\t\t\t\treq_args[2] = True\t\n\t\t\t\ti+=num_embryos\n\t\t\telse:\n\t\t\t\tusage()\n\t\tfor arg in req_args:\n\t\t\tif not arg:\n\t\t\t\tusage()\n\telse:\n\t\tusage()\n\n\tshared.ensureDir(directory)\n\n\t### Spatial amplitude ###\n\tprint(\"Plotting spatial amplitude...\")\n\tcommand = [\"python\",\"plot_spatial_amplitude.py\",str(num_embryos)] + slice_files + [directory]\n\tif 1==call(command):\n\t\texit(1)\n\t# (compare_spatial_amplitude.py can run after plot_spatial_amplitude.py is run for all genetic backgrounds)\t\t\n\n\t'''### Burst size and frequency ###\n\t# 1. create_burst_data.py\n\tprint(\"Creating data for estimate_burst_parameters.m...\")\n\tcommand = [\"python\",\"create_burst_data.py\",str(num_embryos)] + slice_files + [directory]\n\tif 1==call(command):\n\t\texit(1)\n\t# 2. estimate_burst_parameters.m \n\tprint(\"Running estimate_burst_parameters.m on MATLAB...\")\n\tcommand = ['/Applications/MATLAB_R2016a.app/bin/matlab','-nodesktop','-nosplash','-nodisplay','-r','estimate_burst_parameters(\\''+directory+'/burst_data.xls\\',\\''+directory+'\\')']\n\tif 1==call(command): # this will automatically open and run MATLAB\n\t\texit(1)\n\t\n\t# 3. plot_estimated_burst_parameters.py using the output from estimate_burst_parameters.m \t\n\tprint(\"Plotting estimated burst size and frequencies...\")\n\tcommand = [\"python\",\"plot_estimated_burst_parameters.py\",directory+\"/burst_result.xls\",directory]\n\tif 1==call(command):\n\t\texit(1)'''\t\n\t# (compare_burst_parameters.py can run after plot_estimated_burst_parameters.py is run for all genetic backgrounds)\n\t\n\t# Fano factor (to demonstrate burstiness) \n\tcommand = [\"python\",\"plot_fano_factor.py\",str(num_embryos)] + slice_files + [directory]\n\tprint(\"Plotting fano factor...\")\n\tif 1==call(command):\n\t\texit(1)\n\t# (compare_fano_factor.py can run after plot_fano_factor.py is run for all genetic backgrounds)\n\n\t### Noise ###\n\t# Intrinsic and extrinsic noise\n\tprint(\"Plotting intrinsic and extrinsic noise...\")\n\tcommand = [\"python\",\"plot_noise.py\",str(num_embryos), str(num_bins)] + slice_files + [directory]\n\tif 1==call(command):\n\t\texit(1)\t\n\t# (compare_noise.py can run after plot_noise.py is run for all genetic backgrounds)\t\n\t\n\t### Scatter plot of her1 and her7 for all bins ####\n\tprint(\"Plotting scatter plots of her1 vs her7 mRNAs in all bins ...\")\t\n\tcommand = [\"python\", \"plot_scatter_her1_her7.py\", directory + \"/combined_slices.xls\", str(num_bins), directory]\n\tif 1 == call(command):\n\t\texit(1)\n\t\n\t# Spatial noise (coefficient of variation squared across space)\t\n\tprint(\"Plotting spatial noise (coefficient of variation squared across space)...\")\n\tcommand = [\"python\",\"plot_CVsquared.py\",str(num_embryos)] + slice_files + [directory]\n\tif 1==call(command):\n\t\texit(1)\t\n\t\n\t# (compare_grouped_CVsquared.py and compare_CV_squared.py can run after plot_CVsquared.py is run for all genetic backgrounds)\t\t\n\t### Raw data Excel files ###\n\tcommand = [\"python\",\"create_raw_expression_excel.py\",str(num_embryos)] + slice_files + [directory]\n\tprint(\"Creating Excel files for RNA expression levels...\")\n\tif 1==call(command):\n\t\texit(1)\t\n\n\tcommand = [\"python\",\"create_raw_spacial_noise_excel.py\",str(num_embryos)] + slice_files + [directory]\n\tprint(\"Creating Excel files for spacial noise...\")\n\tif 1==call(command):\n\t\texit(1)\n\t\n\tcommand = [\"python\",\"create_raw_noise_excel.py\",str(num_embryos)] + slice_files + [directory]\n\tprint(\"Creating Excel files for noise...\")\n\tif 1==call(command):\n\t\texit(1)\n\ndef usage():\n\tprint(\"combine_embryos.py: Invalid command-line arguments.\")\n\tprint(\"Format: combine_embryos.py -ne <number of embryos> -nb <number of bins> -d <output directory> -i <first embryo's slice.xls> <second embryo's slice.xls> ... <last embryo's slice.xls>\")\n\tprint(\"Example: python combine_embryos.py -ne 20 -d ../wildtypefulldataset/output -nb 5 -i ../wildtypefulldataset/output/embryo1/slices.xls \\\n\t../wildtypefulldataset/output/embryo2/slices.xls .... ../wildtypefulldataset/output/embryo20/slices.xls\")\n\texit(1)\n\t\nmain()\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
import pickle
from sklearn import linear_model
from sklearn.model_selection import train_test_split
import random
from sklearn.manifold import TSNE
import matplotlib
def loadXY():
zippedXY = pickle.load(open("../Vectorizer/zippedXY_wff_2k.p","rb"))
#random.shuffle(zippedXY)
X,Y = zip(*zippedXY)
return X,Y
def outliers(X,Y):
from sklearn.ensemble import IsolationForest
out = IsolationForest()
out.fit(X,Y)
outliers = list(out.predict(X))
print "Total outliers : ",outliers
if __name__ == "__main__":
X,Y = loadXY()
print "X and Y loaded"
Ynum = []
# converting labels to num
label2num = {}
label2num["ANGER"],label2num["SADNESS"],label2num["JOY"],label2num["FEAR"],label2num["SURPRISE"] = 0,1,2,3,4
for yy in range(len(Y)):
Ynum.append(label2num[Y[yy]])
print Ynum.index(0)
print Ynum.index(1)
print Ynum.index(2)
print Ynum.index(3)
print Ynum.index(4)
"""
########## 2D PLOT ####################
# Fitting the tsne with data
tsne = TSNE(n_components=2, verbose=1)
tsne_fit = tsne.fit_transform(X)
# Saving and loading the fitted tsne
import pickle
pickle.dump(tsne_fit,open("tsne_fit_wff_2k.p","wb"))
tsne_fit = pickle.load(open("tsne_fit_wff_2k.p","rb"))
"""
"""
# Visualize the data
from matplotlib import pyplot as plt
xx = tsne_fit[:, 0]
yy = tsne_fit[:, 1]
colors = ['red','green','blue','black','yellow']
plt.scatter(xx, yy, c=Ynum, edgecolors='none',cmap=matplotlib.colors.ListedColormap(colors))
#plt.show()
# Saving the plot in Plots/ folder
plt.draw()
plt.savefig("wff_2k_visualise.png")
#outliers(X,Ynum)
"""
################## 3D PLOT #############################
# Fitting the tsne with data
tsne = TSNE(n_components=3, verbose=1)
tsne_fit = tsne.fit_transform(X)
# Saving and loading the fitted tsne
import pickle
pickle.dump(tsne_fit,open("tsne_fit_wff_2k_3d.p","wb"))
tsne_fit = pickle.load(open("tsne_fit_wff_2k_3d.p","rb"))
"""
"""
# Visualize the data
from matplotlib import pyplot as plt
xx = tsne_fit[:, 0]
yy = tsne_fit[:, 1]
zz = tsne_fit[:, 2]
colors = ['red','green','blue','black','yellow']
from mpl_toolkits.mplot3d import Axes3D
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
print Ynum
ax.scatter(xx, yy,zz, c=Ynum, edgecolors='none',cmap=matplotlib.colors.ListedColormap(colors))
#plt.show()
# Saving the plot in Plots/ folder
plt.draw()
plt.savefig("wff_2k_visualise_3d__new.png")
#outliers(X,Ynum)
|
normal
|
{
"blob_id": "cb13011def8fc7ed6a2e98a794343857e3e34562",
"index": 3142,
"step-1": "import pickle\nfrom sklearn import linear_model\nfrom sklearn.model_selection import train_test_split\nimport random\nfrom sklearn.manifold import TSNE\nimport matplotlib\n\ndef loadXY():\n\tzippedXY = pickle.load(open(\"../Vectorizer/zippedXY_wff_2k.p\",\"rb\"))\n\t#random.shuffle(zippedXY)\n\tX,Y = zip(*zippedXY)\n\treturn X,Y\n\ndef outliers(X,Y):\n\tfrom sklearn.ensemble import IsolationForest\n\tout = IsolationForest()\n\tout.fit(X,Y)\n\toutliers = list(out.predict(X))\n\tprint \"Total outliers : \",outliers\n\n\n\nif __name__ == \"__main__\":\n\n\tX,Y = loadXY()\n\tprint \"X and Y loaded\"\n\tYnum = []\n\t# converting labels to num\n\tlabel2num = {}\n\tlabel2num[\"ANGER\"],label2num[\"SADNESS\"],label2num[\"JOY\"],label2num[\"FEAR\"],label2num[\"SURPRISE\"] = 0,1,2,3,4\n\n\tfor yy in range(len(Y)):\n\t\tYnum.append(label2num[Y[yy]])\n\tprint Ynum.index(0)\n\tprint Ynum.index(1)\n\tprint Ynum.index(2)\n\tprint Ynum.index(3)\n\tprint Ynum.index(4)\n\t\"\"\"\n\t########## 2D PLOT ####################\n\t# Fitting the tsne with data\n\ttsne = TSNE(n_components=2, verbose=1) \n\ttsne_fit = tsne.fit_transform(X)\n\n\t\n\t# Saving and loading the fitted tsne\n\timport pickle\n\tpickle.dump(tsne_fit,open(\"tsne_fit_wff_2k.p\",\"wb\"))\n\ttsne_fit = pickle.load(open(\"tsne_fit_wff_2k.p\",\"rb\"))\n\t\"\"\"\n\t\"\"\"\n\t# Visualize the data\n\tfrom matplotlib import pyplot as plt\n\txx = tsne_fit[:, 0]\n\tyy = tsne_fit[:, 1]\n\tcolors = ['red','green','blue','black','yellow']\n\tplt.scatter(xx, yy, c=Ynum, edgecolors='none',cmap=matplotlib.colors.ListedColormap(colors))\n\t#plt.show()\n\t\n\n\t# Saving the plot in Plots/ folder\n\tplt.draw()\n\tplt.savefig(\"wff_2k_visualise.png\")\n\t#outliers(X,Ynum)\n\t\"\"\"\n\n\t################## 3D PLOT #############################\n\t# Fitting the tsne with data\n\ttsne = TSNE(n_components=3, verbose=1) \n\ttsne_fit = tsne.fit_transform(X)\n\n\t\n\t# Saving and loading the fitted tsne\n\timport pickle\n\tpickle.dump(tsne_fit,open(\"tsne_fit_wff_2k_3d.p\",\"wb\"))\n\ttsne_fit = pickle.load(open(\"tsne_fit_wff_2k_3d.p\",\"rb\"))\n\t\"\"\"\n\t\"\"\"\n\t# Visualize the data\n\tfrom matplotlib import pyplot as plt\n\txx = tsne_fit[:, 0]\n\tyy = tsne_fit[:, 1]\n\tzz = tsne_fit[:, 2]\n\tcolors = ['red','green','blue','black','yellow']\n\tfrom mpl_toolkits.mplot3d import Axes3D\n\tfig = plt.figure()\n\tax = fig.add_subplot(111, projection='3d')\n\tprint Ynum\n\tax.scatter(xx, yy,zz, c=Ynum, edgecolors='none',cmap=matplotlib.colors.ListedColormap(colors))\n\t#plt.show()\n\t\n\n\t# Saving the plot in Plots/ folder\n\tplt.draw()\n\tplt.savefig(\"wff_2k_visualise_3d__new.png\")\n\t#outliers(X,Ynum)\n\n\t\n\t\n\t\n\n\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
for line in hand:
line = line.rstrip()
x = re.findall('([0-9]+)', line)
if len(x) > 0:
for i in x:
total += float(i)
print('sum is', int(total))
<|reserved_special_token_1|>
<|reserved_special_token_0|>
hand = open('regex_sum_act.txt')
total = 0
count = 0
for line in hand:
line = line.rstrip()
x = re.findall('([0-9]+)', line)
if len(x) > 0:
for i in x:
total += float(i)
print('sum is', int(total))
<|reserved_special_token_1|>
import re
hand = open('regex_sum_act.txt')
total = 0
count = 0
for line in hand:
line = line.rstrip()
x = re.findall('([0-9]+)', line)
if len(x) > 0:
for i in x:
total += float(i)
print('sum is', int(total))
<|reserved_special_token_1|>
###############################################################
## File Name: 11_exercise.py
## File Type: Python File
## Author: surge55
## Course: Python 4 Everybody
## Chapter: Chapter 11 - Regular Expressions
## Excercise: n/a
## Description: Code walkthrough from book
## Other References: associated files in folder
###############################################################
#11.2 Extracting data using regular expressions
# import re
# s = 'A message from csev@umich.edu to cwen@iupui.edu about meeting @2PM'
# lst = re.findall('\S+@\S+', s)
# print(lst)
# We can use this regular expression in a program
# to read all the lines in a file and print out
# anything that looks like an email address:
# import re
# hand = open('mbox-short.txt')
# for line in hand:
# line = line.rstrip()
# x = re.findall('\S+@\S+', line)
# if len(x) > 0:
# print(x)
## Much Cleaner Version
# import re
# hand = open('mbox-short.txt')
# for line in hand:
# line = line.rstrip()
# x = re.findall('[a-zA-Z0-9]\S*@\S*[a-zA-Z]', line)
# if len(x) > 0:
# print(x)
# Search for lines that start with "X" followed by any
# non-whitespace characters and ':'
# followed by a space and any number
# the number can include a decimal
# import re
# hand = open('mbox-short.txt')
# # Returns a List
# # for line in hand:
# # line = line.rstrip()
# # x = re.findall('^X\S*: [0-9.]+', line)
# # if len(x) > 0:
# # print(x)
# # print(type(line))
# # Returnes a String
# for line in hand:
# line = line.rstrip()
# if re.search('^X\S*: [0-9.]+', line):
# print(line)
# # print(type(line))
# Search for lines that start with 'X' followed by any
# non whitespace characters and ':' followed by a space
# and any number. The number can include a decimal
# Then print the number if it is greater than 0
# import re
# hand = open('mbox-short.txt')
# for line in hand:
# line = line.rstrip()
# x = re.findall('^X\S*: ([0-9.]+)', line)
# if len(x) > 0:
# print(x)
# Exercise 1
# Write a simple program to simulate the operation of the grep
# command on unix. Ask the user to enter a regular expression
# and count the nuber of lines that matched the regular expression:
# import re
# reg_inp = input("Enter a regular expression: ")
# count = 0
# hand = open('mbox.txt')
# for line in hand:
# line = line.rstrip()
# if re.search(reg_inp, line):
# count += 1
# print('mbox.txt had', count, 'lines that match', reg_inp)
# Exercise 2
# Write a program to look for lines of the form:
# 'New Revision: 39772'
# Extract the number from each of the lines using a regular expression
# and the findall() method. Compute the average of the numbers
# and print out the average as an integer.
# import re
# hand = open('mbox.txt')
# total = 0
# count = 0
# for line in hand:
# line = line.rstrip()
# x = re.findall('^New Revision: ([0-9]+)', line)
# if len(x) > 0:
# for i in x:
# total = total + float(i)
# count += 1
# print(int(total/count))
# FINDING NUMBERS IN A HAYSTACK
# In this assignment you will read through and parse a file with text and numbers
# You will extract all the numbers in the file and compute the sum
# of the numbers
import re
hand = open('regex_sum_act.txt')
total = 0
count = 0
for line in hand:
line = line.rstrip()
x = re.findall('([0-9]+)', line)
if len(x) > 0:
# print(x)
for i in x:
total += float(i)
print('sum is', int(total))
|
flexible
|
{
"blob_id": "860908126d473e6c4ed070992a1b518683fd4c27",
"index": 3275,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor line in hand:\n line = line.rstrip()\n x = re.findall('([0-9]+)', line)\n if len(x) > 0:\n for i in x:\n total += float(i)\nprint('sum is', int(total))\n",
"step-3": "<mask token>\nhand = open('regex_sum_act.txt')\ntotal = 0\ncount = 0\nfor line in hand:\n line = line.rstrip()\n x = re.findall('([0-9]+)', line)\n if len(x) > 0:\n for i in x:\n total += float(i)\nprint('sum is', int(total))\n",
"step-4": "import re\nhand = open('regex_sum_act.txt')\ntotal = 0\ncount = 0\nfor line in hand:\n line = line.rstrip()\n x = re.findall('([0-9]+)', line)\n if len(x) > 0:\n for i in x:\n total += float(i)\nprint('sum is', int(total))\n",
"step-5": "###############################################################\r\n## File Name: 11_exercise.py\r\n## File Type: Python File\r\n## Author: surge55\r\n## Course: Python 4 Everybody\r\n## Chapter: Chapter 11 - Regular Expressions\r\n## Excercise: n/a\r\n## Description: Code walkthrough from book \r\n## Other References: associated files in folder\r\n###############################################################\r\n\r\n\r\n#11.2 Extracting data using regular expressions\r\n\r\n# import re\r\n# s = 'A message from csev@umich.edu to cwen@iupui.edu about meeting @2PM'\r\n# lst = re.findall('\\S+@\\S+', s)\r\n# print(lst) \r\n\r\n# We can use this regular expression in a program\r\n# to read all the lines in a file and print out\r\n# anything that looks like an email address:\r\n# import re\r\n# hand = open('mbox-short.txt')\r\n# for line in hand:\r\n# line = line.rstrip()\r\n# x = re.findall('\\S+@\\S+', line)\r\n# if len(x) > 0:\r\n# print(x)\r\n\r\n## Much Cleaner Version\r\n# import re\r\n# hand = open('mbox-short.txt')\r\n# for line in hand:\r\n# line = line.rstrip()\r\n# x = re.findall('[a-zA-Z0-9]\\S*@\\S*[a-zA-Z]', line)\r\n# if len(x) > 0:\r\n# print(x)\r\n\r\n# Search for lines that start with \"X\" followed by any\r\n# non-whitespace characters and ':'\r\n# followed by a space and any number\r\n# the number can include a decimal\r\n# import re\r\n# hand = open('mbox-short.txt')\r\n\r\n# # Returns a List\r\n# # for line in hand:\r\n# # line = line.rstrip()\r\n# # x = re.findall('^X\\S*: [0-9.]+', line)\r\n# # if len(x) > 0:\r\n# # print(x)\r\n# # print(type(line))\r\n\r\n# # Returnes a String\r\n# for line in hand:\r\n# line = line.rstrip()\r\n# if re.search('^X\\S*: [0-9.]+', line):\r\n# print(line)\r\n# # print(type(line))\r\n\r\n\r\n\r\n# Search for lines that start with 'X' followed by any\r\n# non whitespace characters and ':' followed by a space\r\n# and any number. The number can include a decimal\r\n# Then print the number if it is greater than 0\r\n\r\n# import re\r\n# hand = open('mbox-short.txt')\r\n\r\n# for line in hand:\r\n# line = line.rstrip()\r\n# x = re.findall('^X\\S*: ([0-9.]+)', line)\r\n# if len(x) > 0:\r\n# print(x)\r\n\r\n\r\n# Exercise 1\r\n# Write a simple program to simulate the operation of the grep\r\n# command on unix. Ask the user to enter a regular expression \r\n# and count the nuber of lines that matched the regular expression:\r\n\r\n# import re\r\n\r\n# reg_inp = input(\"Enter a regular expression: \")\r\n# count = 0\r\n\r\n# hand = open('mbox.txt')\r\n# for line in hand:\r\n# line = line.rstrip()\r\n# if re.search(reg_inp, line):\r\n# count += 1\r\n\r\n# print('mbox.txt had', count, 'lines that match', reg_inp)\r\n\r\n\r\n\r\n# Exercise 2\r\n# Write a program to look for lines of the form:\r\n# 'New Revision: 39772'\r\n# Extract the number from each of the lines using a regular expression\r\n# and the findall() method. Compute the average of the numbers\r\n# and print out the average as an integer.\r\n\r\n# import re\r\n# hand = open('mbox.txt')\r\n# total = 0\r\n# count = 0\r\n\r\n# for line in hand:\r\n# line = line.rstrip()\r\n# x = re.findall('^New Revision: ([0-9]+)', line)\r\n# if len(x) > 0:\r\n# for i in x:\r\n# total = total + float(i)\r\n# count += 1\r\n\r\n# print(int(total/count))\r\n\r\n\r\n# FINDING NUMBERS IN A HAYSTACK\r\n# In this assignment you will read through and parse a file with text and numbers\r\n# You will extract all the numbers in the file and compute the sum\r\n# of the numbers \r\n\r\nimport re\r\nhand = open('regex_sum_act.txt')\r\ntotal = 0\r\ncount = 0\r\n\r\nfor line in hand:\r\n line = line.rstrip()\r\n x = re.findall('([0-9]+)', line)\r\n if len(x) > 0:\r\n # print(x)\r\n for i in x:\r\n total += float(i)\r\n\r\nprint('sum is', int(total))\r\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
#!/usr/bin/env python
import argparse
import subprocess
def module_exists(module_name):
try:
__import__(module_name)
except ImportError:
return False
else:
return True
def quote(items):
return ["'" + item + "'" for item in items]
if module_exists('urllib.parse'):
from urllib.parse import unquote
else:
from urllib import unquote
parser = argparse.ArgumentParser()
parser.add_argument("url", help="The url to send the request to.")
parser.add_argument("--data")
parser.add_argument("-H", action="append", dest='headers')
# HTTPie arguments
parser.add_argument("--verbose", action="store_true")
parser.add_argument("--timeout", type=int)
# curlie arguments
parser.add_argument("-q", "--quiet", action="store_true")
# ignored curl arguments
parser.add_argument("--compressed", action="store_true")
args = parser.parse_args()
flags = []
method = "GET"
data = None
if args.data:
data = quote(unquote(args.data).split("&"))
method = "POST"
if "Content-Type: application/x-www-form-urlencoded" in args.headers:
flags.append("-f")
headers = quote(args.headers)
httpieArgs = []
if len(flags) > 0:
httpieArgs.append(" ".join(flags))
httpieArgs.append(method)
httpieArgs.append("'" + args.url + "'")
if headers and len(headers) > 0:
httpieArgs.append(" ".join(headers))
if data and len(data) > 0:
httpieArgs.append(' '.join(data))
if args.verbose:
httpieArgs.append("--verbose")
if args.timeout is not None:
httpieArgs.append("--timeout " + args.timeout)
cmd = "http " + " ".join(httpieArgs)
if not args.quiet:
print("\n" + cmd + "\n")
subprocess.call(cmd, shell=True)
|
normal
|
{
"blob_id": "68371acc58da6d986d94d746abb4fea541d65fdd",
"index": 3384,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef module_exists(module_name):\n try:\n __import__(module_name)\n except ImportError:\n return False\n else:\n return True\n\n\ndef quote(items):\n return [(\"'\" + item + \"'\") for item in items]\n\n\nif module_exists('urllib.parse'):\n from urllib.parse import unquote\nelse:\n from urllib import unquote\n<mask token>\nparser.add_argument('url', help='The url to send the request to.')\nparser.add_argument('--data')\nparser.add_argument('-H', action='append', dest='headers')\nparser.add_argument('--verbose', action='store_true')\nparser.add_argument('--timeout', type=int)\nparser.add_argument('-q', '--quiet', action='store_true')\nparser.add_argument('--compressed', action='store_true')\n<mask token>\nif args.data:\n data = quote(unquote(args.data).split('&'))\n method = 'POST'\nif 'Content-Type: application/x-www-form-urlencoded' in args.headers:\n flags.append('-f')\n<mask token>\nif len(flags) > 0:\n httpieArgs.append(' '.join(flags))\nhttpieArgs.append(method)\nhttpieArgs.append(\"'\" + args.url + \"'\")\nif headers and len(headers) > 0:\n httpieArgs.append(' '.join(headers))\nif data and len(data) > 0:\n httpieArgs.append(' '.join(data))\nif args.verbose:\n httpieArgs.append('--verbose')\nif args.timeout is not None:\n httpieArgs.append('--timeout ' + args.timeout)\n<mask token>\nif not args.quiet:\n print('\\n' + cmd + '\\n')\nsubprocess.call(cmd, shell=True)\n",
"step-3": "<mask token>\n\n\ndef module_exists(module_name):\n try:\n __import__(module_name)\n except ImportError:\n return False\n else:\n return True\n\n\ndef quote(items):\n return [(\"'\" + item + \"'\") for item in items]\n\n\nif module_exists('urllib.parse'):\n from urllib.parse import unquote\nelse:\n from urllib import unquote\nparser = argparse.ArgumentParser()\nparser.add_argument('url', help='The url to send the request to.')\nparser.add_argument('--data')\nparser.add_argument('-H', action='append', dest='headers')\nparser.add_argument('--verbose', action='store_true')\nparser.add_argument('--timeout', type=int)\nparser.add_argument('-q', '--quiet', action='store_true')\nparser.add_argument('--compressed', action='store_true')\nargs = parser.parse_args()\nflags = []\nmethod = 'GET'\ndata = None\nif args.data:\n data = quote(unquote(args.data).split('&'))\n method = 'POST'\nif 'Content-Type: application/x-www-form-urlencoded' in args.headers:\n flags.append('-f')\nheaders = quote(args.headers)\nhttpieArgs = []\nif len(flags) > 0:\n httpieArgs.append(' '.join(flags))\nhttpieArgs.append(method)\nhttpieArgs.append(\"'\" + args.url + \"'\")\nif headers and len(headers) > 0:\n httpieArgs.append(' '.join(headers))\nif data and len(data) > 0:\n httpieArgs.append(' '.join(data))\nif args.verbose:\n httpieArgs.append('--verbose')\nif args.timeout is not None:\n httpieArgs.append('--timeout ' + args.timeout)\ncmd = 'http ' + ' '.join(httpieArgs)\nif not args.quiet:\n print('\\n' + cmd + '\\n')\nsubprocess.call(cmd, shell=True)\n",
"step-4": "import argparse\nimport subprocess\n\n\ndef module_exists(module_name):\n try:\n __import__(module_name)\n except ImportError:\n return False\n else:\n return True\n\n\ndef quote(items):\n return [(\"'\" + item + \"'\") for item in items]\n\n\nif module_exists('urllib.parse'):\n from urllib.parse import unquote\nelse:\n from urllib import unquote\nparser = argparse.ArgumentParser()\nparser.add_argument('url', help='The url to send the request to.')\nparser.add_argument('--data')\nparser.add_argument('-H', action='append', dest='headers')\nparser.add_argument('--verbose', action='store_true')\nparser.add_argument('--timeout', type=int)\nparser.add_argument('-q', '--quiet', action='store_true')\nparser.add_argument('--compressed', action='store_true')\nargs = parser.parse_args()\nflags = []\nmethod = 'GET'\ndata = None\nif args.data:\n data = quote(unquote(args.data).split('&'))\n method = 'POST'\nif 'Content-Type: application/x-www-form-urlencoded' in args.headers:\n flags.append('-f')\nheaders = quote(args.headers)\nhttpieArgs = []\nif len(flags) > 0:\n httpieArgs.append(' '.join(flags))\nhttpieArgs.append(method)\nhttpieArgs.append(\"'\" + args.url + \"'\")\nif headers and len(headers) > 0:\n httpieArgs.append(' '.join(headers))\nif data and len(data) > 0:\n httpieArgs.append(' '.join(data))\nif args.verbose:\n httpieArgs.append('--verbose')\nif args.timeout is not None:\n httpieArgs.append('--timeout ' + args.timeout)\ncmd = 'http ' + ' '.join(httpieArgs)\nif not args.quiet:\n print('\\n' + cmd + '\\n')\nsubprocess.call(cmd, shell=True)\n",
"step-5": "#!/usr/bin/env python\n\nimport argparse\nimport subprocess\n\ndef module_exists(module_name):\n try:\n __import__(module_name)\n except ImportError:\n return False\n else:\n return True\n\ndef quote(items):\n return [\"'\" + item + \"'\" for item in items]\n\nif module_exists('urllib.parse'):\n from urllib.parse import unquote\nelse:\n from urllib import unquote\n\nparser = argparse.ArgumentParser()\nparser.add_argument(\"url\", help=\"The url to send the request to.\")\nparser.add_argument(\"--data\")\nparser.add_argument(\"-H\", action=\"append\", dest='headers')\n\n# HTTPie arguments\nparser.add_argument(\"--verbose\", action=\"store_true\")\nparser.add_argument(\"--timeout\", type=int)\n\n# curlie arguments\nparser.add_argument(\"-q\", \"--quiet\", action=\"store_true\")\n\n# ignored curl arguments\nparser.add_argument(\"--compressed\", action=\"store_true\")\n\nargs = parser.parse_args()\n\nflags = []\nmethod = \"GET\"\n\ndata = None\nif args.data:\n data = quote(unquote(args.data).split(\"&\"))\n method = \"POST\"\n\nif \"Content-Type: application/x-www-form-urlencoded\" in args.headers:\n flags.append(\"-f\")\n\nheaders = quote(args.headers)\n\nhttpieArgs = []\n\nif len(flags) > 0:\n httpieArgs.append(\" \".join(flags))\n\nhttpieArgs.append(method)\nhttpieArgs.append(\"'\" + args.url + \"'\")\n\nif headers and len(headers) > 0:\n httpieArgs.append(\" \".join(headers))\n\nif data and len(data) > 0:\n httpieArgs.append(' '.join(data))\n\nif args.verbose:\n httpieArgs.append(\"--verbose\")\n\nif args.timeout is not None:\n httpieArgs.append(\"--timeout \" + args.timeout)\n\ncmd = \"http \" + \" \".join(httpieArgs)\n\nif not args.quiet:\n print(\"\\n\" + cmd + \"\\n\")\n\nsubprocess.call(cmd, shell=True)\n",
"step-ids": [
0,
3,
4,
5,
6
]
}
|
[
0,
3,
4,
5,
6
] |
<|reserved_special_token_0|>
class LaunchMovieLottery(object):
<|reserved_special_token_0|>
def movie_list(self):
movie_list = MovieList(file_name, function)
self.return_movie_list = movie_list.return_movie_list()
self.full_list = movie_list.return_full_list()
return [self.return_movie_list, self.full_list]
def limit_list(self):
self.movie_list()
limit_length = LengthLimit(self.return_movie_list, self.limit_low,
self.limit_high)
self.shorten_list = limit_length.return_asked_length()
def return_movie(self):
self.limit_list()
movie_random = MovieRandom(self.shorten_list)
self.temp_movie_random = movie_random.return_random_movie()
return self.temp_movie_random
def remove_and_save(self, the_movie):
full_list = self.movie_list()[1]
try:
remove = RemoveChosenMovieFromList(the_movie, full_list)
new_movie_list = remove.remove_movie()
save_doc = SaveListToCSV(new_movie_list, filename_save,
function_save)
save_doc.save_file()
except ValueError:
pass
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class LaunchMovieLottery(object):
def __init__(self, limit_low=None, limit_high=None):
self.limit_low = limit_low
self.limit_high = limit_high
self.full_list = None
def movie_list(self):
movie_list = MovieList(file_name, function)
self.return_movie_list = movie_list.return_movie_list()
self.full_list = movie_list.return_full_list()
return [self.return_movie_list, self.full_list]
def limit_list(self):
self.movie_list()
limit_length = LengthLimit(self.return_movie_list, self.limit_low,
self.limit_high)
self.shorten_list = limit_length.return_asked_length()
def return_movie(self):
self.limit_list()
movie_random = MovieRandom(self.shorten_list)
self.temp_movie_random = movie_random.return_random_movie()
return self.temp_movie_random
def remove_and_save(self, the_movie):
full_list = self.movie_list()[1]
try:
remove = RemoveChosenMovieFromList(the_movie, full_list)
new_movie_list = remove.remove_movie()
save_doc = SaveListToCSV(new_movie_list, filename_save,
function_save)
save_doc.save_file()
except ValueError:
pass
<|reserved_special_token_1|>
__author__ = 'Orka'
<|reserved_special_token_0|>
file_name = 'cinema.csv'
function = 'r+'
filename_save = 'cinema.csv'
function_save = 'w'
class LaunchMovieLottery(object):
def __init__(self, limit_low=None, limit_high=None):
self.limit_low = limit_low
self.limit_high = limit_high
self.full_list = None
def movie_list(self):
movie_list = MovieList(file_name, function)
self.return_movie_list = movie_list.return_movie_list()
self.full_list = movie_list.return_full_list()
return [self.return_movie_list, self.full_list]
def limit_list(self):
self.movie_list()
limit_length = LengthLimit(self.return_movie_list, self.limit_low,
self.limit_high)
self.shorten_list = limit_length.return_asked_length()
def return_movie(self):
self.limit_list()
movie_random = MovieRandom(self.shorten_list)
self.temp_movie_random = movie_random.return_random_movie()
return self.temp_movie_random
def remove_and_save(self, the_movie):
full_list = self.movie_list()[1]
try:
remove = RemoveChosenMovieFromList(the_movie, full_list)
new_movie_list = remove.remove_movie()
save_doc = SaveListToCSV(new_movie_list, filename_save,
function_save)
save_doc.save_file()
except ValueError:
pass
<|reserved_special_token_1|>
__author__ = 'Orka'
from movie_list import MovieList
from movie_random import MovieRandom
from remove_chosen_movie_from_list import RemoveChosenMovieFromList
from save_list_to_CSV import SaveListToCSV
from length_limit import LengthLimit
file_name = 'cinema.csv'
function = 'r+'
filename_save = 'cinema.csv'
function_save = 'w'
class LaunchMovieLottery(object):
def __init__(self, limit_low=None, limit_high=None):
self.limit_low = limit_low
self.limit_high = limit_high
self.full_list = None
def movie_list(self):
movie_list = MovieList(file_name, function)
self.return_movie_list = movie_list.return_movie_list()
self.full_list = movie_list.return_full_list()
return [self.return_movie_list, self.full_list]
def limit_list(self):
self.movie_list()
limit_length = LengthLimit(self.return_movie_list, self.limit_low,
self.limit_high)
self.shorten_list = limit_length.return_asked_length()
def return_movie(self):
self.limit_list()
movie_random = MovieRandom(self.shorten_list)
self.temp_movie_random = movie_random.return_random_movie()
return self.temp_movie_random
def remove_and_save(self, the_movie):
full_list = self.movie_list()[1]
try:
remove = RemoveChosenMovieFromList(the_movie, full_list)
new_movie_list = remove.remove_movie()
save_doc = SaveListToCSV(new_movie_list, filename_save,
function_save)
save_doc.save_file()
except ValueError:
pass
<|reserved_special_token_1|>
__author__ = 'Orka'
from movie_list import MovieList
from movie_random import MovieRandom
from remove_chosen_movie_from_list import RemoveChosenMovieFromList
from save_list_to_CSV import SaveListToCSV
from length_limit import LengthLimit
file_name = 'cinema.csv'
function = 'r+'
filename_save = 'cinema.csv'
function_save = 'w'
class LaunchMovieLottery(object):
def __init__(self, limit_low=None, limit_high=None):
self.limit_low = limit_low
self.limit_high = limit_high
self.full_list = None
def movie_list(self):
# creates movies list without sequels
movie_list = MovieList(file_name, function)
self.return_movie_list = movie_list.return_movie_list()
self.full_list = movie_list.return_full_list()
return [self.return_movie_list, self.full_list]
def limit_list(self):
self.movie_list()
# limit the movie_list - returns list of movies limited to the specified length
limit_length = LengthLimit(self.return_movie_list, self.limit_low, self.limit_high)
self.shorten_list = limit_length.return_asked_length()
# returns: 'No movie of this length.'
def return_movie(self):
self.limit_list()
# draw a movie from movie list and print it
movie_random = MovieRandom(self.shorten_list)
self.temp_movie_random = movie_random.return_random_movie()
return self.temp_movie_random
def remove_and_save(self, the_movie):
full_list = self.movie_list()[1]
try:
# remove chosen movie from movie list and allow the next movie in the series in next lottery
remove = RemoveChosenMovieFromList(the_movie, full_list)
new_movie_list = remove.remove_movie()
# save to CSV
save_doc = SaveListToCSV(new_movie_list, filename_save, function_save)
save_doc.save_file()
except ValueError:
# Movie not exists
pass
|
flexible
|
{
"blob_id": "e35a106a3852a7a004fdae6819d4075e1fe929d6",
"index": 4373,
"step-1": "<mask token>\n\n\nclass LaunchMovieLottery(object):\n <mask token>\n\n def movie_list(self):\n movie_list = MovieList(file_name, function)\n self.return_movie_list = movie_list.return_movie_list()\n self.full_list = movie_list.return_full_list()\n return [self.return_movie_list, self.full_list]\n\n def limit_list(self):\n self.movie_list()\n limit_length = LengthLimit(self.return_movie_list, self.limit_low,\n self.limit_high)\n self.shorten_list = limit_length.return_asked_length()\n\n def return_movie(self):\n self.limit_list()\n movie_random = MovieRandom(self.shorten_list)\n self.temp_movie_random = movie_random.return_random_movie()\n return self.temp_movie_random\n\n def remove_and_save(self, the_movie):\n full_list = self.movie_list()[1]\n try:\n remove = RemoveChosenMovieFromList(the_movie, full_list)\n new_movie_list = remove.remove_movie()\n save_doc = SaveListToCSV(new_movie_list, filename_save,\n function_save)\n save_doc.save_file()\n except ValueError:\n pass\n",
"step-2": "<mask token>\n\n\nclass LaunchMovieLottery(object):\n\n def __init__(self, limit_low=None, limit_high=None):\n self.limit_low = limit_low\n self.limit_high = limit_high\n self.full_list = None\n\n def movie_list(self):\n movie_list = MovieList(file_name, function)\n self.return_movie_list = movie_list.return_movie_list()\n self.full_list = movie_list.return_full_list()\n return [self.return_movie_list, self.full_list]\n\n def limit_list(self):\n self.movie_list()\n limit_length = LengthLimit(self.return_movie_list, self.limit_low,\n self.limit_high)\n self.shorten_list = limit_length.return_asked_length()\n\n def return_movie(self):\n self.limit_list()\n movie_random = MovieRandom(self.shorten_list)\n self.temp_movie_random = movie_random.return_random_movie()\n return self.temp_movie_random\n\n def remove_and_save(self, the_movie):\n full_list = self.movie_list()[1]\n try:\n remove = RemoveChosenMovieFromList(the_movie, full_list)\n new_movie_list = remove.remove_movie()\n save_doc = SaveListToCSV(new_movie_list, filename_save,\n function_save)\n save_doc.save_file()\n except ValueError:\n pass\n",
"step-3": "__author__ = 'Orka'\n<mask token>\nfile_name = 'cinema.csv'\nfunction = 'r+'\nfilename_save = 'cinema.csv'\nfunction_save = 'w'\n\n\nclass LaunchMovieLottery(object):\n\n def __init__(self, limit_low=None, limit_high=None):\n self.limit_low = limit_low\n self.limit_high = limit_high\n self.full_list = None\n\n def movie_list(self):\n movie_list = MovieList(file_name, function)\n self.return_movie_list = movie_list.return_movie_list()\n self.full_list = movie_list.return_full_list()\n return [self.return_movie_list, self.full_list]\n\n def limit_list(self):\n self.movie_list()\n limit_length = LengthLimit(self.return_movie_list, self.limit_low,\n self.limit_high)\n self.shorten_list = limit_length.return_asked_length()\n\n def return_movie(self):\n self.limit_list()\n movie_random = MovieRandom(self.shorten_list)\n self.temp_movie_random = movie_random.return_random_movie()\n return self.temp_movie_random\n\n def remove_and_save(self, the_movie):\n full_list = self.movie_list()[1]\n try:\n remove = RemoveChosenMovieFromList(the_movie, full_list)\n new_movie_list = remove.remove_movie()\n save_doc = SaveListToCSV(new_movie_list, filename_save,\n function_save)\n save_doc.save_file()\n except ValueError:\n pass\n",
"step-4": "__author__ = 'Orka'\nfrom movie_list import MovieList\nfrom movie_random import MovieRandom\nfrom remove_chosen_movie_from_list import RemoveChosenMovieFromList\nfrom save_list_to_CSV import SaveListToCSV\nfrom length_limit import LengthLimit\nfile_name = 'cinema.csv'\nfunction = 'r+'\nfilename_save = 'cinema.csv'\nfunction_save = 'w'\n\n\nclass LaunchMovieLottery(object):\n\n def __init__(self, limit_low=None, limit_high=None):\n self.limit_low = limit_low\n self.limit_high = limit_high\n self.full_list = None\n\n def movie_list(self):\n movie_list = MovieList(file_name, function)\n self.return_movie_list = movie_list.return_movie_list()\n self.full_list = movie_list.return_full_list()\n return [self.return_movie_list, self.full_list]\n\n def limit_list(self):\n self.movie_list()\n limit_length = LengthLimit(self.return_movie_list, self.limit_low,\n self.limit_high)\n self.shorten_list = limit_length.return_asked_length()\n\n def return_movie(self):\n self.limit_list()\n movie_random = MovieRandom(self.shorten_list)\n self.temp_movie_random = movie_random.return_random_movie()\n return self.temp_movie_random\n\n def remove_and_save(self, the_movie):\n full_list = self.movie_list()[1]\n try:\n remove = RemoveChosenMovieFromList(the_movie, full_list)\n new_movie_list = remove.remove_movie()\n save_doc = SaveListToCSV(new_movie_list, filename_save,\n function_save)\n save_doc.save_file()\n except ValueError:\n pass\n",
"step-5": "__author__ = 'Orka'\r\nfrom movie_list import MovieList\r\nfrom movie_random import MovieRandom\r\nfrom remove_chosen_movie_from_list import RemoveChosenMovieFromList\r\nfrom save_list_to_CSV import SaveListToCSV\r\nfrom length_limit import LengthLimit\r\n\r\nfile_name = 'cinema.csv'\r\nfunction = 'r+'\r\nfilename_save = 'cinema.csv'\r\nfunction_save = 'w'\r\n\r\n\r\nclass LaunchMovieLottery(object):\r\n def __init__(self, limit_low=None, limit_high=None):\r\n self.limit_low = limit_low\r\n self.limit_high = limit_high\r\n self.full_list = None\r\n\r\n def movie_list(self):\r\n # creates movies list without sequels\r\n movie_list = MovieList(file_name, function)\r\n self.return_movie_list = movie_list.return_movie_list()\r\n self.full_list = movie_list.return_full_list()\r\n return [self.return_movie_list, self.full_list]\r\n\r\n def limit_list(self):\r\n self.movie_list()\r\n # limit the movie_list - returns list of movies limited to the specified length\r\n limit_length = LengthLimit(self.return_movie_list, self.limit_low, self.limit_high)\r\n self.shorten_list = limit_length.return_asked_length()\r\n # returns: 'No movie of this length.'\r\n\r\n def return_movie(self):\r\n self.limit_list()\r\n # draw a movie from movie list and print it\r\n movie_random = MovieRandom(self.shorten_list)\r\n self.temp_movie_random = movie_random.return_random_movie()\r\n return self.temp_movie_random\r\n\r\n def remove_and_save(self, the_movie):\r\n full_list = self.movie_list()[1]\r\n\r\n try:\r\n # remove chosen movie from movie list and allow the next movie in the series in next lottery\r\n remove = RemoveChosenMovieFromList(the_movie, full_list)\r\n new_movie_list = remove.remove_movie()\r\n\r\n # save to CSV\r\n save_doc = SaveListToCSV(new_movie_list, filename_save, function_save)\r\n save_doc.save_file()\r\n\r\n except ValueError:\r\n # Movie not exists\r\n pass\r\n",
"step-ids": [
5,
6,
7,
8,
9
]
}
|
[
5,
6,
7,
8,
9
] |
<|reserved_special_token_0|>
@app.route('/direct')
def direct():
"""
A RemoteFlowSource is used directly as DN and search filter using a oneline call to .search
"""
unsafe_dc = request.args['dc']
unsafe_filter = request.args['username']
dn = 'dc={}'.format(unsafe_dc)
search_filter = '(user={})'.format(unsafe_filter)
srv = ldap3.Server('ldap://127.0.0.1')
conn = ldap3.Connection(srv, user=dn, auto_bind=True).search(dn,
search_filter)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
@app.route('/normal')
def normal():
"""
A RemoteFlowSource is used directly as DN and search filter
"""
unsafe_dc = request.args['dc']
unsafe_filter = request.args['username']
dn = 'dc={}'.format(unsafe_dc)
search_filter = '(user={})'.format(unsafe_filter)
srv = ldap3.Server('ldap://127.0.0.1')
conn = ldap3.Connection(srv, user=dn, auto_bind=True)
conn.search(dn, search_filter)
@app.route('/direct')
def direct():
"""
A RemoteFlowSource is used directly as DN and search filter using a oneline call to .search
"""
unsafe_dc = request.args['dc']
unsafe_filter = request.args['username']
dn = 'dc={}'.format(unsafe_dc)
search_filter = '(user={})'.format(unsafe_filter)
srv = ldap3.Server('ldap://127.0.0.1')
conn = ldap3.Connection(srv, user=dn, auto_bind=True).search(dn,
search_filter)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
app = Flask(__name__)
@app.route('/normal')
def normal():
"""
A RemoteFlowSource is used directly as DN and search filter
"""
unsafe_dc = request.args['dc']
unsafe_filter = request.args['username']
dn = 'dc={}'.format(unsafe_dc)
search_filter = '(user={})'.format(unsafe_filter)
srv = ldap3.Server('ldap://127.0.0.1')
conn = ldap3.Connection(srv, user=dn, auto_bind=True)
conn.search(dn, search_filter)
@app.route('/direct')
def direct():
"""
A RemoteFlowSource is used directly as DN and search filter using a oneline call to .search
"""
unsafe_dc = request.args['dc']
unsafe_filter = request.args['username']
dn = 'dc={}'.format(unsafe_dc)
search_filter = '(user={})'.format(unsafe_filter)
srv = ldap3.Server('ldap://127.0.0.1')
conn = ldap3.Connection(srv, user=dn, auto_bind=True).search(dn,
search_filter)
<|reserved_special_token_1|>
from flask import request, Flask
import ldap3
app = Flask(__name__)
@app.route('/normal')
def normal():
"""
A RemoteFlowSource is used directly as DN and search filter
"""
unsafe_dc = request.args['dc']
unsafe_filter = request.args['username']
dn = 'dc={}'.format(unsafe_dc)
search_filter = '(user={})'.format(unsafe_filter)
srv = ldap3.Server('ldap://127.0.0.1')
conn = ldap3.Connection(srv, user=dn, auto_bind=True)
conn.search(dn, search_filter)
@app.route('/direct')
def direct():
"""
A RemoteFlowSource is used directly as DN and search filter using a oneline call to .search
"""
unsafe_dc = request.args['dc']
unsafe_filter = request.args['username']
dn = 'dc={}'.format(unsafe_dc)
search_filter = '(user={})'.format(unsafe_filter)
srv = ldap3.Server('ldap://127.0.0.1')
conn = ldap3.Connection(srv, user=dn, auto_bind=True).search(dn,
search_filter)
<|reserved_special_token_1|>
from flask import request, Flask
import ldap3
app = Flask(__name__)
@app.route("/normal")
def normal():
"""
A RemoteFlowSource is used directly as DN and search filter
"""
unsafe_dc = request.args['dc']
unsafe_filter = request.args['username']
dn = "dc={}".format(unsafe_dc)
search_filter = "(user={})".format(unsafe_filter)
srv = ldap3.Server('ldap://127.0.0.1')
conn = ldap3.Connection(srv, user=dn, auto_bind=True)
conn.search(dn, search_filter)
@app.route("/direct")
def direct():
"""
A RemoteFlowSource is used directly as DN and search filter using a oneline call to .search
"""
unsafe_dc = request.args['dc']
unsafe_filter = request.args['username']
dn = "dc={}".format(unsafe_dc)
search_filter = "(user={})".format(unsafe_filter)
srv = ldap3.Server('ldap://127.0.0.1')
conn = ldap3.Connection(srv, user=dn, auto_bind=True).search(
dn, search_filter)
# if __name__ == "__main__":
# app.run(debug=True)
|
flexible
|
{
"blob_id": "b51591de921f6e153c1dd478cec7fad42ff4251a",
"index": 749,
"step-1": "<mask token>\n\n\n@app.route('/direct')\ndef direct():\n \"\"\"\n A RemoteFlowSource is used directly as DN and search filter using a oneline call to .search\n \"\"\"\n unsafe_dc = request.args['dc']\n unsafe_filter = request.args['username']\n dn = 'dc={}'.format(unsafe_dc)\n search_filter = '(user={})'.format(unsafe_filter)\n srv = ldap3.Server('ldap://127.0.0.1')\n conn = ldap3.Connection(srv, user=dn, auto_bind=True).search(dn,\n search_filter)\n",
"step-2": "<mask token>\n\n\n@app.route('/normal')\ndef normal():\n \"\"\"\n A RemoteFlowSource is used directly as DN and search filter\n \"\"\"\n unsafe_dc = request.args['dc']\n unsafe_filter = request.args['username']\n dn = 'dc={}'.format(unsafe_dc)\n search_filter = '(user={})'.format(unsafe_filter)\n srv = ldap3.Server('ldap://127.0.0.1')\n conn = ldap3.Connection(srv, user=dn, auto_bind=True)\n conn.search(dn, search_filter)\n\n\n@app.route('/direct')\ndef direct():\n \"\"\"\n A RemoteFlowSource is used directly as DN and search filter using a oneline call to .search\n \"\"\"\n unsafe_dc = request.args['dc']\n unsafe_filter = request.args['username']\n dn = 'dc={}'.format(unsafe_dc)\n search_filter = '(user={})'.format(unsafe_filter)\n srv = ldap3.Server('ldap://127.0.0.1')\n conn = ldap3.Connection(srv, user=dn, auto_bind=True).search(dn,\n search_filter)\n",
"step-3": "<mask token>\napp = Flask(__name__)\n\n\n@app.route('/normal')\ndef normal():\n \"\"\"\n A RemoteFlowSource is used directly as DN and search filter\n \"\"\"\n unsafe_dc = request.args['dc']\n unsafe_filter = request.args['username']\n dn = 'dc={}'.format(unsafe_dc)\n search_filter = '(user={})'.format(unsafe_filter)\n srv = ldap3.Server('ldap://127.0.0.1')\n conn = ldap3.Connection(srv, user=dn, auto_bind=True)\n conn.search(dn, search_filter)\n\n\n@app.route('/direct')\ndef direct():\n \"\"\"\n A RemoteFlowSource is used directly as DN and search filter using a oneline call to .search\n \"\"\"\n unsafe_dc = request.args['dc']\n unsafe_filter = request.args['username']\n dn = 'dc={}'.format(unsafe_dc)\n search_filter = '(user={})'.format(unsafe_filter)\n srv = ldap3.Server('ldap://127.0.0.1')\n conn = ldap3.Connection(srv, user=dn, auto_bind=True).search(dn,\n search_filter)\n",
"step-4": "from flask import request, Flask\nimport ldap3\napp = Flask(__name__)\n\n\n@app.route('/normal')\ndef normal():\n \"\"\"\n A RemoteFlowSource is used directly as DN and search filter\n \"\"\"\n unsafe_dc = request.args['dc']\n unsafe_filter = request.args['username']\n dn = 'dc={}'.format(unsafe_dc)\n search_filter = '(user={})'.format(unsafe_filter)\n srv = ldap3.Server('ldap://127.0.0.1')\n conn = ldap3.Connection(srv, user=dn, auto_bind=True)\n conn.search(dn, search_filter)\n\n\n@app.route('/direct')\ndef direct():\n \"\"\"\n A RemoteFlowSource is used directly as DN and search filter using a oneline call to .search\n \"\"\"\n unsafe_dc = request.args['dc']\n unsafe_filter = request.args['username']\n dn = 'dc={}'.format(unsafe_dc)\n search_filter = '(user={})'.format(unsafe_filter)\n srv = ldap3.Server('ldap://127.0.0.1')\n conn = ldap3.Connection(srv, user=dn, auto_bind=True).search(dn,\n search_filter)\n",
"step-5": "from flask import request, Flask\nimport ldap3\n\napp = Flask(__name__)\n\n\n@app.route(\"/normal\")\ndef normal():\n \"\"\"\n A RemoteFlowSource is used directly as DN and search filter\n \"\"\"\n\n unsafe_dc = request.args['dc']\n unsafe_filter = request.args['username']\n\n dn = \"dc={}\".format(unsafe_dc)\n search_filter = \"(user={})\".format(unsafe_filter)\n\n srv = ldap3.Server('ldap://127.0.0.1')\n conn = ldap3.Connection(srv, user=dn, auto_bind=True)\n conn.search(dn, search_filter)\n\n\n@app.route(\"/direct\")\ndef direct():\n \"\"\"\n A RemoteFlowSource is used directly as DN and search filter using a oneline call to .search\n \"\"\"\n\n unsafe_dc = request.args['dc']\n unsafe_filter = request.args['username']\n\n dn = \"dc={}\".format(unsafe_dc)\n search_filter = \"(user={})\".format(unsafe_filter)\n\n srv = ldap3.Server('ldap://127.0.0.1')\n conn = ldap3.Connection(srv, user=dn, auto_bind=True).search(\n dn, search_filter)\n\n# if __name__ == \"__main__\":\n# app.run(debug=True)\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
if n % 10 == 1 and (n < 11 or n > 20):
print(n, 'korova')
elif n % 10 > 1 and n % 10 < 5 and (n < 11 or n > 20):
print(n, 'korovy')
else:
print(n, 'korov')
<|reserved_special_token_1|>
n = int(input())
if n % 10 == 1 and (n < 11 or n > 20):
print(n, 'korova')
elif n % 10 > 1 and n % 10 < 5 and (n < 11 or n > 20):
print(n, 'korovy')
else:
print(n, 'korov')
<|reserved_special_token_1|>
n = int(input())
if n % 10 == 1 and (n < 11 or n > 20):
print(n, "korova")
elif n % 10 > 1 and n % 10 < 5 and (n < 11 or n > 20):
print(n, "korovy")
else:
print(n, "korov")
|
flexible
|
{
"blob_id": "78037d936ee5f9b31bf00263885fbec225a4f8f2",
"index": 2191,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nif n % 10 == 1 and (n < 11 or n > 20):\n print(n, 'korova')\nelif n % 10 > 1 and n % 10 < 5 and (n < 11 or n > 20):\n print(n, 'korovy')\nelse:\n print(n, 'korov')\n",
"step-3": "n = int(input())\nif n % 10 == 1 and (n < 11 or n > 20):\n print(n, 'korova')\nelif n % 10 > 1 and n % 10 < 5 and (n < 11 or n > 20):\n print(n, 'korovy')\nelse:\n print(n, 'korov')\n",
"step-4": "n = int(input())\n\nif n % 10 == 1 and (n < 11 or n > 20):\n print(n, \"korova\")\nelif n % 10 > 1 and n % 10 < 5 and (n < 11 or n > 20):\n print(n, \"korovy\")\nelse:\n print(n, \"korov\")\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
# This file is Copyright (c) 2020 LambdaConcept <contact@lambdaconcept.com>
# License: BSD
from math import log2
from nmigen import *
from nmigen.utils import log2_int
from nmigen_soc import wishbone
from nmigen_soc.memory import MemoryMap
from lambdasoc.periph import Peripheral
class gramWishbone(Peripheral, Elaboratable):
def __init__(self, core, data_width=32, granularity=8):
super().__init__(name="wishbone")
self.native_port = core.crossbar.get_native_port()
self.ratio = self.native_port.data_width//data_width
addr_width = log2_int(core.size//(self.native_port.data_width//data_width))
self.bus = wishbone.Interface(addr_width=addr_width+log2_int(self.ratio),
data_width=data_width, granularity=granularity)
map = MemoryMap(addr_width=addr_width+log2_int(self.ratio)+log2_int(data_width//granularity),
data_width=granularity)
self.bus.memory_map = map
def elaborate(self, platform):
m = Module()
# Write datapath
m.d.comb += [
self.native_port.wdata.valid.eq(self.bus.cyc & self.bus.stb & self.bus.we),
]
ratio_bitmask = Repl(1, log2_int(self.ratio))
sel = Signal.like(self.bus.sel)
with m.If(self.bus.sel == 0):
m.d.comb += sel.eq(Repl(1, sel.width))
with m.Else():
m.d.comb += sel.eq(self.bus.sel)
with m.Switch(self.bus.adr & ratio_bitmask):
for i in range(self.ratio):
with m.Case(i):
m.d.comb += self.native_port.wdata.we.eq(Repl(sel, self.bus.granularity//8) << (self.ratio*i))
with m.Switch(self.bus.adr & ratio_bitmask):
for i in range(self.ratio):
with m.Case(i):
m.d.comb += self.native_port.wdata.data.eq(self.bus.dat_w << (self.bus.data_width*i))
# Read datapath
m.d.comb += [
self.native_port.rdata.ready.eq(1),
]
with m.Switch(self.bus.adr & ratio_bitmask):
for i in range(self.ratio):
with m.Case(i):
m.d.comb += self.bus.dat_r.eq(self.native_port.rdata.data >> (self.bus.data_width*i))
with m.FSM():
with m.State("Send-Cmd"):
m.d.comb += [
self.native_port.cmd.valid.eq(self.bus.cyc & self.bus.stb),
self.native_port.cmd.we.eq(self.bus.we),
self.native_port.cmd.addr.eq(self.bus.adr >> log2_int(self.bus.data_width//self.bus.granularity)),
]
with m.If(self.native_port.cmd.valid & self.native_port.cmd.ready):
with m.If(self.bus.we):
m.next = "Wait-Write"
with m.Else():
m.next = "Wait-Read"
with m.State("Wait-Read"):
with m.If(self.native_port.rdata.valid):
m.d.comb += self.bus.ack.eq(1)
m.next = "Send-Cmd"
with m.State("Wait-Write"):
with m.If(self.native_port.wdata.ready):
m.d.comb += self.bus.ack.eq(1)
m.next = "Send-Cmd"
return m
|
normal
|
{
"blob_id": "3775ba538d6fab13e35e2f0761a1cacbe087f339",
"index": 4723,
"step-1": "<mask token>\n\n\nclass gramWishbone(Peripheral, Elaboratable):\n <mask token>\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass gramWishbone(Peripheral, Elaboratable):\n\n def __init__(self, core, data_width=32, granularity=8):\n super().__init__(name='wishbone')\n self.native_port = core.crossbar.get_native_port()\n self.ratio = self.native_port.data_width // data_width\n addr_width = log2_int(core.size // (self.native_port.data_width //\n data_width))\n self.bus = wishbone.Interface(addr_width=addr_width + log2_int(self\n .ratio), data_width=data_width, granularity=granularity)\n map = MemoryMap(addr_width=addr_width + log2_int(self.ratio) +\n log2_int(data_width // granularity), data_width=granularity)\n self.bus.memory_map = map\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass gramWishbone(Peripheral, Elaboratable):\n\n def __init__(self, core, data_width=32, granularity=8):\n super().__init__(name='wishbone')\n self.native_port = core.crossbar.get_native_port()\n self.ratio = self.native_port.data_width // data_width\n addr_width = log2_int(core.size // (self.native_port.data_width //\n data_width))\n self.bus = wishbone.Interface(addr_width=addr_width + log2_int(self\n .ratio), data_width=data_width, granularity=granularity)\n map = MemoryMap(addr_width=addr_width + log2_int(self.ratio) +\n log2_int(data_width // granularity), data_width=granularity)\n self.bus.memory_map = map\n\n def elaborate(self, platform):\n m = Module()\n m.d.comb += [self.native_port.wdata.valid.eq(self.bus.cyc & self.\n bus.stb & self.bus.we)]\n ratio_bitmask = Repl(1, log2_int(self.ratio))\n sel = Signal.like(self.bus.sel)\n with m.If(self.bus.sel == 0):\n m.d.comb += sel.eq(Repl(1, sel.width))\n with m.Else():\n m.d.comb += sel.eq(self.bus.sel)\n with m.Switch(self.bus.adr & ratio_bitmask):\n for i in range(self.ratio):\n with m.Case(i):\n m.d.comb += self.native_port.wdata.we.eq(Repl(sel, self\n .bus.granularity // 8) << self.ratio * i)\n with m.Switch(self.bus.adr & ratio_bitmask):\n for i in range(self.ratio):\n with m.Case(i):\n m.d.comb += self.native_port.wdata.data.eq(self.bus.\n dat_w << self.bus.data_width * i)\n m.d.comb += [self.native_port.rdata.ready.eq(1)]\n with m.Switch(self.bus.adr & ratio_bitmask):\n for i in range(self.ratio):\n with m.Case(i):\n m.d.comb += self.bus.dat_r.eq(self.native_port.rdata.\n data >> self.bus.data_width * i)\n with m.FSM():\n with m.State('Send-Cmd'):\n m.d.comb += [self.native_port.cmd.valid.eq(self.bus.cyc &\n self.bus.stb), self.native_port.cmd.we.eq(self.bus.we),\n self.native_port.cmd.addr.eq(self.bus.adr >> log2_int(\n self.bus.data_width // self.bus.granularity))]\n with m.If(self.native_port.cmd.valid & self.native_port.cmd\n .ready):\n with m.If(self.bus.we):\n m.next = 'Wait-Write'\n with m.Else():\n m.next = 'Wait-Read'\n with m.State('Wait-Read'):\n with m.If(self.native_port.rdata.valid):\n m.d.comb += self.bus.ack.eq(1)\n m.next = 'Send-Cmd'\n with m.State('Wait-Write'):\n with m.If(self.native_port.wdata.ready):\n m.d.comb += self.bus.ack.eq(1)\n m.next = 'Send-Cmd'\n return m\n",
"step-4": "from math import log2\nfrom nmigen import *\nfrom nmigen.utils import log2_int\nfrom nmigen_soc import wishbone\nfrom nmigen_soc.memory import MemoryMap\nfrom lambdasoc.periph import Peripheral\n\n\nclass gramWishbone(Peripheral, Elaboratable):\n\n def __init__(self, core, data_width=32, granularity=8):\n super().__init__(name='wishbone')\n self.native_port = core.crossbar.get_native_port()\n self.ratio = self.native_port.data_width // data_width\n addr_width = log2_int(core.size // (self.native_port.data_width //\n data_width))\n self.bus = wishbone.Interface(addr_width=addr_width + log2_int(self\n .ratio), data_width=data_width, granularity=granularity)\n map = MemoryMap(addr_width=addr_width + log2_int(self.ratio) +\n log2_int(data_width // granularity), data_width=granularity)\n self.bus.memory_map = map\n\n def elaborate(self, platform):\n m = Module()\n m.d.comb += [self.native_port.wdata.valid.eq(self.bus.cyc & self.\n bus.stb & self.bus.we)]\n ratio_bitmask = Repl(1, log2_int(self.ratio))\n sel = Signal.like(self.bus.sel)\n with m.If(self.bus.sel == 0):\n m.d.comb += sel.eq(Repl(1, sel.width))\n with m.Else():\n m.d.comb += sel.eq(self.bus.sel)\n with m.Switch(self.bus.adr & ratio_bitmask):\n for i in range(self.ratio):\n with m.Case(i):\n m.d.comb += self.native_port.wdata.we.eq(Repl(sel, self\n .bus.granularity // 8) << self.ratio * i)\n with m.Switch(self.bus.adr & ratio_bitmask):\n for i in range(self.ratio):\n with m.Case(i):\n m.d.comb += self.native_port.wdata.data.eq(self.bus.\n dat_w << self.bus.data_width * i)\n m.d.comb += [self.native_port.rdata.ready.eq(1)]\n with m.Switch(self.bus.adr & ratio_bitmask):\n for i in range(self.ratio):\n with m.Case(i):\n m.d.comb += self.bus.dat_r.eq(self.native_port.rdata.\n data >> self.bus.data_width * i)\n with m.FSM():\n with m.State('Send-Cmd'):\n m.d.comb += [self.native_port.cmd.valid.eq(self.bus.cyc &\n self.bus.stb), self.native_port.cmd.we.eq(self.bus.we),\n self.native_port.cmd.addr.eq(self.bus.adr >> log2_int(\n self.bus.data_width // self.bus.granularity))]\n with m.If(self.native_port.cmd.valid & self.native_port.cmd\n .ready):\n with m.If(self.bus.we):\n m.next = 'Wait-Write'\n with m.Else():\n m.next = 'Wait-Read'\n with m.State('Wait-Read'):\n with m.If(self.native_port.rdata.valid):\n m.d.comb += self.bus.ack.eq(1)\n m.next = 'Send-Cmd'\n with m.State('Wait-Write'):\n with m.If(self.native_port.wdata.ready):\n m.d.comb += self.bus.ack.eq(1)\n m.next = 'Send-Cmd'\n return m\n",
"step-5": "# This file is Copyright (c) 2020 LambdaConcept <contact@lambdaconcept.com>\n# License: BSD\n\nfrom math import log2\n\nfrom nmigen import *\nfrom nmigen.utils import log2_int\n\nfrom nmigen_soc import wishbone\nfrom nmigen_soc.memory import MemoryMap\nfrom lambdasoc.periph import Peripheral\n\n\nclass gramWishbone(Peripheral, Elaboratable):\n def __init__(self, core, data_width=32, granularity=8):\n super().__init__(name=\"wishbone\")\n\n self.native_port = core.crossbar.get_native_port()\n\n self.ratio = self.native_port.data_width//data_width\n\n addr_width = log2_int(core.size//(self.native_port.data_width//data_width))\n self.bus = wishbone.Interface(addr_width=addr_width+log2_int(self.ratio),\n data_width=data_width, granularity=granularity)\n\n map = MemoryMap(addr_width=addr_width+log2_int(self.ratio)+log2_int(data_width//granularity),\n data_width=granularity)\n self.bus.memory_map = map\n\n def elaborate(self, platform):\n m = Module()\n\n # Write datapath\n m.d.comb += [\n self.native_port.wdata.valid.eq(self.bus.cyc & self.bus.stb & self.bus.we),\n ]\n\n ratio_bitmask = Repl(1, log2_int(self.ratio))\n\n sel = Signal.like(self.bus.sel)\n with m.If(self.bus.sel == 0):\n m.d.comb += sel.eq(Repl(1, sel.width))\n with m.Else():\n m.d.comb += sel.eq(self.bus.sel)\n\n with m.Switch(self.bus.adr & ratio_bitmask):\n for i in range(self.ratio):\n with m.Case(i):\n m.d.comb += self.native_port.wdata.we.eq(Repl(sel, self.bus.granularity//8) << (self.ratio*i))\n\n with m.Switch(self.bus.adr & ratio_bitmask):\n for i in range(self.ratio):\n with m.Case(i):\n m.d.comb += self.native_port.wdata.data.eq(self.bus.dat_w << (self.bus.data_width*i))\n\n # Read datapath\n m.d.comb += [\n self.native_port.rdata.ready.eq(1),\n ]\n\n with m.Switch(self.bus.adr & ratio_bitmask):\n for i in range(self.ratio):\n with m.Case(i):\n m.d.comb += self.bus.dat_r.eq(self.native_port.rdata.data >> (self.bus.data_width*i))\n\n with m.FSM():\n with m.State(\"Send-Cmd\"):\n m.d.comb += [\n self.native_port.cmd.valid.eq(self.bus.cyc & self.bus.stb),\n self.native_port.cmd.we.eq(self.bus.we),\n self.native_port.cmd.addr.eq(self.bus.adr >> log2_int(self.bus.data_width//self.bus.granularity)),\n ]\n\n with m.If(self.native_port.cmd.valid & self.native_port.cmd.ready):\n with m.If(self.bus.we):\n m.next = \"Wait-Write\"\n with m.Else():\n m.next = \"Wait-Read\"\n\n with m.State(\"Wait-Read\"):\n with m.If(self.native_port.rdata.valid):\n m.d.comb += self.bus.ack.eq(1)\n m.next = \"Send-Cmd\"\n\n with m.State(\"Wait-Write\"):\n with m.If(self.native_port.wdata.ready):\n m.d.comb += self.bus.ack.eq(1)\n m.next = \"Send-Cmd\"\n\n return m\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
from django.urls import path
from .views import *
urlpatterns = [
path('', ListUser.as_view() , name = 'list'),
path('register/', UserRegister.as_view() , name = 'register'),
path('login/', UserLogin.as_view() , name = 'login'),
path('delete/' , UserDelete.as_view() , name ='delete'),
path('update/' , UserUpdate.as_view() , name = 'update'),
]
|
normal
|
{
"blob_id": "5fe4f2738285d2f4b8bbfee2c4c6d15665737ea4",
"index": 8627,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nurlpatterns = [path('', ListUser.as_view(), name='list'), path('register/',\n UserRegister.as_view(), name='register'), path('login/', UserLogin.\n as_view(), name='login'), path('delete/', UserDelete.as_view(), name=\n 'delete'), path('update/', UserUpdate.as_view(), name='update')]\n",
"step-3": "from django.urls import path\nfrom .views import *\nurlpatterns = [path('', ListUser.as_view(), name='list'), path('register/',\n UserRegister.as_view(), name='register'), path('login/', UserLogin.\n as_view(), name='login'), path('delete/', UserDelete.as_view(), name=\n 'delete'), path('update/', UserUpdate.as_view(), name='update')]\n",
"step-4": "from django.urls import path\nfrom .views import *\n\nurlpatterns = [\n path('', ListUser.as_view() , name = 'list'),\n path('register/', UserRegister.as_view() , name = 'register'),\n path('login/', UserLogin.as_view() , name = 'login'),\n path('delete/' , UserDelete.as_view() , name ='delete'),\n path('update/' , UserUpdate.as_view() , name = 'update'),\n]",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
# Copyright 2017 The LUCI Authors. All rights reserved.
# Use of this source code is governed under the Apache License, Version 2.0
# that can be found in the LICENSE file.
DEPS = [
'step',
]
def RunSteps(api):
try:
api.step('test step', [{}])
except AssertionError as e:
assert str(e) == 'Type <type \'dict\'> is not permitted. cmd is [{}]'
def GenTests(api):
yield api.test('basic')
|
normal
|
{
"blob_id": "25d210144ef209fd5e4ff7e4e4c2e77fd7eb79ac",
"index": 3480,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef GenTests(api):\n yield api.test('basic')\n",
"step-3": "<mask token>\n\n\ndef RunSteps(api):\n try:\n api.step('test step', [{}])\n except AssertionError as e:\n assert str(e) == \"Type <type 'dict'> is not permitted. cmd is [{}]\"\n\n\ndef GenTests(api):\n yield api.test('basic')\n",
"step-4": "DEPS = ['step']\n\n\ndef RunSteps(api):\n try:\n api.step('test step', [{}])\n except AssertionError as e:\n assert str(e) == \"Type <type 'dict'> is not permitted. cmd is [{}]\"\n\n\ndef GenTests(api):\n yield api.test('basic')\n",
"step-5": "# Copyright 2017 The LUCI Authors. All rights reserved.\n# Use of this source code is governed under the Apache License, Version 2.0\n# that can be found in the LICENSE file.\n\nDEPS = [\n 'step',\n]\n\n\ndef RunSteps(api):\n try:\n api.step('test step', [{}])\n except AssertionError as e:\n assert str(e) == 'Type <type \\'dict\\'> is not permitted. cmd is [{}]'\n\n\ndef GenTests(api):\n yield api.test('basic')",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
#
# -*- coding: utf-8 -*-
# Copyright 2019 Fortinet, Inc.
# GNU General Public License v3.0+
# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
"""
The fortios firewall monitor class
It is in this file the runtime information is collected from the device
for a given resource, parsed, and the facts tree is populated
based on the configuration.
"""
from __future__ import absolute_import, division, print_function
__metaclass__ = type
import re
from copy import deepcopy
from ansible.module_utils.network.common import utils
from ansible.module_utils.network.fortios.argspec.firewall.firewall import FirewallArgs
FACT_SYSTEM_SUBSETS = frozenset([
'system_current-admins_select',
'system_firmware_select',
'system_fortimanager_status',
'system_ha-checksums_select',
'system_interface_select',
'system_status_select',
'system_time_select',
])
class FirewallFacts(object):
""" The fortios firewall fact class
"""
def __init__(self, module, fos, uri=None, subspec='config', options='options'):
self._module = module
self._fos = fos
self._uri = uri
def populate_facts(self, connection, ansible_facts, data=None):
""" Populate the facts for firewall
:param connection: the device connection
:param ansible_facts: Facts dictionary
:param data: previously collected conf
:rtype: dictionary
:returns: facts
"""
fos = self._fos if self._fos else connection
vdom = self._module.params['vdom']
ansible_facts['ansible_network_resources'].pop('system', None)
facts = {}
if self._uri.startswith(tuple(FACT_SYSTEM_SUBSETS)):
resp = fos.monitor('system', self._uri[len('system_'):].replace('_', '/'), vdom=vdom)
facts.update({self._uri: resp})
ansible_facts['ansible_network_resources'].update(facts)
return ansible_facts
|
normal
|
{
"blob_id": "62bc8fec6833c5e8bc1598941eaad141ab6c9d5a",
"index": 3758,
"step-1": "<mask token>\n\n\nclass FirewallFacts(object):\n <mask token>\n <mask token>\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass FirewallFacts(object):\n <mask token>\n <mask token>\n\n def populate_facts(self, connection, ansible_facts, data=None):\n \"\"\" Populate the facts for firewall\n :param connection: the device connection\n :param ansible_facts: Facts dictionary\n :param data: previously collected conf\n :rtype: dictionary\n :returns: facts\n \"\"\"\n fos = self._fos if self._fos else connection\n vdom = self._module.params['vdom']\n ansible_facts['ansible_network_resources'].pop('system', None)\n facts = {}\n if self._uri.startswith(tuple(FACT_SYSTEM_SUBSETS)):\n resp = fos.monitor('system', self._uri[len('system_'):].replace\n ('_', '/'), vdom=vdom)\n facts.update({self._uri: resp})\n ansible_facts['ansible_network_resources'].update(facts)\n return ansible_facts\n",
"step-3": "<mask token>\n__metaclass__ = type\n<mask token>\nFACT_SYSTEM_SUBSETS = frozenset(['system_current-admins_select',\n 'system_firmware_select', 'system_fortimanager_status',\n 'system_ha-checksums_select', 'system_interface_select',\n 'system_status_select', 'system_time_select'])\n\n\nclass FirewallFacts(object):\n \"\"\" The fortios firewall fact class\n \"\"\"\n\n def __init__(self, module, fos, uri=None, subspec='config', options=\n 'options'):\n self._module = module\n self._fos = fos\n self._uri = uri\n\n def populate_facts(self, connection, ansible_facts, data=None):\n \"\"\" Populate the facts for firewall\n :param connection: the device connection\n :param ansible_facts: Facts dictionary\n :param data: previously collected conf\n :rtype: dictionary\n :returns: facts\n \"\"\"\n fos = self._fos if self._fos else connection\n vdom = self._module.params['vdom']\n ansible_facts['ansible_network_resources'].pop('system', None)\n facts = {}\n if self._uri.startswith(tuple(FACT_SYSTEM_SUBSETS)):\n resp = fos.monitor('system', self._uri[len('system_'):].replace\n ('_', '/'), vdom=vdom)\n facts.update({self._uri: resp})\n ansible_facts['ansible_network_resources'].update(facts)\n return ansible_facts\n",
"step-4": "<mask token>\nfrom __future__ import absolute_import, division, print_function\n__metaclass__ = type\nimport re\nfrom copy import deepcopy\nfrom ansible.module_utils.network.common import utils\nfrom ansible.module_utils.network.fortios.argspec.firewall.firewall import FirewallArgs\nFACT_SYSTEM_SUBSETS = frozenset(['system_current-admins_select',\n 'system_firmware_select', 'system_fortimanager_status',\n 'system_ha-checksums_select', 'system_interface_select',\n 'system_status_select', 'system_time_select'])\n\n\nclass FirewallFacts(object):\n \"\"\" The fortios firewall fact class\n \"\"\"\n\n def __init__(self, module, fos, uri=None, subspec='config', options=\n 'options'):\n self._module = module\n self._fos = fos\n self._uri = uri\n\n def populate_facts(self, connection, ansible_facts, data=None):\n \"\"\" Populate the facts for firewall\n :param connection: the device connection\n :param ansible_facts: Facts dictionary\n :param data: previously collected conf\n :rtype: dictionary\n :returns: facts\n \"\"\"\n fos = self._fos if self._fos else connection\n vdom = self._module.params['vdom']\n ansible_facts['ansible_network_resources'].pop('system', None)\n facts = {}\n if self._uri.startswith(tuple(FACT_SYSTEM_SUBSETS)):\n resp = fos.monitor('system', self._uri[len('system_'):].replace\n ('_', '/'), vdom=vdom)\n facts.update({self._uri: resp})\n ansible_facts['ansible_network_resources'].update(facts)\n return ansible_facts\n",
"step-5": "#\n# -*- coding: utf-8 -*-\n# Copyright 2019 Fortinet, Inc.\n# GNU General Public License v3.0+\n# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)\n\"\"\"\nThe fortios firewall monitor class\nIt is in this file the runtime information is collected from the device\nfor a given resource, parsed, and the facts tree is populated\nbased on the configuration.\n\"\"\"\nfrom __future__ import absolute_import, division, print_function\n__metaclass__ = type\n\nimport re\nfrom copy import deepcopy\n\nfrom ansible.module_utils.network.common import utils\nfrom ansible.module_utils.network.fortios.argspec.firewall.firewall import FirewallArgs\n\n\nFACT_SYSTEM_SUBSETS = frozenset([\n 'system_current-admins_select',\n 'system_firmware_select',\n 'system_fortimanager_status',\n 'system_ha-checksums_select',\n 'system_interface_select',\n 'system_status_select',\n 'system_time_select',\n])\n\n\nclass FirewallFacts(object):\n \"\"\" The fortios firewall fact class\n \"\"\"\n\n def __init__(self, module, fos, uri=None, subspec='config', options='options'):\n self._module = module\n self._fos = fos\n self._uri = uri\n\n def populate_facts(self, connection, ansible_facts, data=None):\n \"\"\" Populate the facts for firewall\n :param connection: the device connection\n :param ansible_facts: Facts dictionary\n :param data: previously collected conf\n :rtype: dictionary\n :returns: facts\n \"\"\"\n fos = self._fos if self._fos else connection\n vdom = self._module.params['vdom']\n ansible_facts['ansible_network_resources'].pop('system', None)\n facts = {}\n if self._uri.startswith(tuple(FACT_SYSTEM_SUBSETS)):\n resp = fos.monitor('system', self._uri[len('system_'):].replace('_', '/'), vdom=vdom)\n facts.update({self._uri: resp})\n ansible_facts['ansible_network_resources'].update(facts)\n return ansible_facts\n\n",
"step-ids": [
1,
2,
5,
6,
7
]
}
|
[
1,
2,
5,
6,
7
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
def modCount(n, m):
if m <= n:
inBetween = n - m
dividible = []
for x in range(m + 1, n):
if x % m == 0:
dividible.append(x)
return (
"""There are {} numbers between {} and {}
and the ones that are dividible by {} are {}"""
.format(inBetween, m, n, m, dividible))
else:
return 'n must be higher value then m'
<|reserved_special_token_0|>
<|reserved_special_token_1|>
def modCount(n, m):
if m <= n:
inBetween = n - m
dividible = []
for x in range(m + 1, n):
if x % m == 0:
dividible.append(x)
return (
"""There are {} numbers between {} and {}
and the ones that are dividible by {} are {}"""
.format(inBetween, m, n, m, dividible))
else:
return 'n must be higher value then m'
print(modCount(10, 2))
<|reserved_special_token_1|>
def modCount(n, m):
if(m <= n):
inBetween = n - m
dividible = []
for x in range(m+1, n):
if(x%m == 0):
dividible.append(x)
return 'There are {} numbers between {} and {} \nand the ones that are dividible by {} are {}'.format(inBetween, m, n, m, dividible)
else:
return 'n must be higher value then m'
print(modCount(10,2))
|
flexible
|
{
"blob_id": "0699c9f70f1c16b4cb9837edf7a4ef27f021faec",
"index": 8318,
"step-1": "<mask token>\n",
"step-2": "def modCount(n, m):\n if m <= n:\n inBetween = n - m\n dividible = []\n for x in range(m + 1, n):\n if x % m == 0:\n dividible.append(x)\n return (\n \"\"\"There are {} numbers between {} and {} \nand the ones that are dividible by {} are {}\"\"\"\n .format(inBetween, m, n, m, dividible))\n else:\n return 'n must be higher value then m'\n\n\n<mask token>\n",
"step-3": "def modCount(n, m):\n if m <= n:\n inBetween = n - m\n dividible = []\n for x in range(m + 1, n):\n if x % m == 0:\n dividible.append(x)\n return (\n \"\"\"There are {} numbers between {} and {} \nand the ones that are dividible by {} are {}\"\"\"\n .format(inBetween, m, n, m, dividible))\n else:\n return 'n must be higher value then m'\n\n\nprint(modCount(10, 2))\n",
"step-4": "def modCount(n, m):\n if(m <= n):\n inBetween = n - m\n dividible = []\n for x in range(m+1, n):\n if(x%m == 0):\n dividible.append(x)\n\n return 'There are {} numbers between {} and {} \\nand the ones that are dividible by {} are {}'.format(inBetween, m, n, m, dividible)\n else:\n return 'n must be higher value then m'\n\n\nprint(modCount(10,2))\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Solution:
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Solution:
def canDivideIntoSubsequences(self, nums: List[int], K: int) ->bool:
return len(nums) >= K * max(Counter(nums).values())
<|reserved_special_token_1|>
from collections import Counter
class Solution:
def canDivideIntoSubsequences(self, nums: List[int], K: int) ->bool:
return len(nums) >= K * max(Counter(nums).values())
<|reserved_special_token_1|>
#
# @lc app=leetcode id=1121 lang=python3
#
# [1121] Divide Array Into Increasing Sequences
#
# https://leetcode.com/problems/divide-array-into-increasing-sequences/description/
#
# algorithms
# Hard (53.30%)
# Likes: 32
# Dislikes: 11
# Total Accepted: 1.7K
# Total Submissions: 3.2K
# Testcase Example: '[1,2,2,3,3,4,4]\n3'
#
# Given a non-decreasing array of positive integers nums and an integer K, find
# out if this array can be divided into one or more disjoint increasing
# subsequences of length at least K.
#
#
#
# Example 1:
#
#
# Input: nums = [1,2,2,3,3,4,4], K = 3
# Output: true
# Explanation:
# The array can be divided into the two subsequences [1,2,3,4] and [2,3,4] with
# lengths at least 3 each.
#
#
# Example 2:
#
#
# Input: nums = [5,6,6,7,8], K = 3
# Output: false
# Explanation:
# There is no way to divide the array using the conditions required.
#
#
#
#
# Note:
#
#
# 1 <= nums.length <= 10^5
# 1 <= K <= nums.length
# 1 <= nums[i] <= 10^5
#
#
#
# @lc code=start
from collections import Counter
class Solution:
def canDivideIntoSubsequences(self, nums: List[int], K: int) -> bool:
return len(nums) >= K * max(Counter(nums).values())
# cur, groups = 1, 1
# for i in range(1, len(nums)):
# if nums[i] > nums[i - 1]:
# cur = 1
# else:
# cur += 1
# groups = max(groups, cur)
# return len(nums) >= K * groups
# @lc code=end
|
flexible
|
{
"blob_id": "6b55a9061bb118558e9077c77e18cfc81f3fa034",
"index": 1092,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Solution:\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Solution:\n\n def canDivideIntoSubsequences(self, nums: List[int], K: int) ->bool:\n return len(nums) >= K * max(Counter(nums).values())\n",
"step-4": "from collections import Counter\n\n\nclass Solution:\n\n def canDivideIntoSubsequences(self, nums: List[int], K: int) ->bool:\n return len(nums) >= K * max(Counter(nums).values())\n",
"step-5": "#\n# @lc app=leetcode id=1121 lang=python3\n#\n# [1121] Divide Array Into Increasing Sequences\n#\n# https://leetcode.com/problems/divide-array-into-increasing-sequences/description/\n#\n# algorithms\n# Hard (53.30%)\n# Likes: 32\n# Dislikes: 11\n# Total Accepted: 1.7K\n# Total Submissions: 3.2K\n# Testcase Example: '[1,2,2,3,3,4,4]\\n3'\n#\n# Given a non-decreasing array of positive integers nums and an integer K, find\n# out if this array can be divided into one or more disjoint increasing\n# subsequences of length at least K.\n# \n# \n# \n# Example 1:\n# \n# \n# Input: nums = [1,2,2,3,3,4,4], K = 3\n# Output: true\n# Explanation: \n# The array can be divided into the two subsequences [1,2,3,4] and [2,3,4] with\n# lengths at least 3 each.\n# \n# \n# Example 2:\n# \n# \n# Input: nums = [5,6,6,7,8], K = 3\n# Output: false\n# Explanation: \n# There is no way to divide the array using the conditions required.\n# \n# \n# \n# \n# Note:\n# \n# \n# 1 <= nums.length <= 10^5\n# 1 <= K <= nums.length\n# 1 <= nums[i] <= 10^5\n# \n# \n#\n\n# @lc code=start\nfrom collections import Counter\n\nclass Solution:\n def canDivideIntoSubsequences(self, nums: List[int], K: int) -> bool:\n\n return len(nums) >= K * max(Counter(nums).values())\n\n # cur, groups = 1, 1\n # for i in range(1, len(nums)):\n # if nums[i] > nums[i - 1]:\n # cur = 1\n # else:\n # cur += 1\n # groups = max(groups, cur)\n # return len(nums) >= K * groups\n \n# @lc code=end\n\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
def convertEnEntier(nombre):
result = ''
if nombre == 4:
result = 'IV'
if nombre == 3:
result = 'III'
if nombre == 2:
result = 'II'
if nombre == 1:
result = 'I'
return result
<|reserved_special_token_0|>
<|reserved_special_token_1|>
def convertEnEntier(nombre):
result = ''
if nombre == 4:
result = 'IV'
if nombre == 3:
result = 'III'
if nombre == 2:
result = 'II'
if nombre == 1:
result = 'I'
return result
print(convertEnEntier(1))
print(convertEnEntier(2))
print(convertEnEntier(3))
<|reserved_special_token_1|>
def convertEnEntier(nombre):
result = "";
if (nombre == 4):
result = "IV"
if (nombre == 3):
result = "III"
if (nombre == 2):
result = "II"
if (nombre == 1):
result = "I"
return result
print (convertEnEntier(1))
print (convertEnEntier(2))
print (convertEnEntier(3))
|
flexible
|
{
"blob_id": "ef7fad5019e79950e8fad56404e9ba5d302cfe1c",
"index": 7596,
"step-1": "<mask token>\n",
"step-2": "def convertEnEntier(nombre):\n result = ''\n if nombre == 4:\n result = 'IV'\n if nombre == 3:\n result = 'III'\n if nombre == 2:\n result = 'II'\n if nombre == 1:\n result = 'I'\n return result\n\n\n<mask token>\n",
"step-3": "def convertEnEntier(nombre):\n result = ''\n if nombre == 4:\n result = 'IV'\n if nombre == 3:\n result = 'III'\n if nombre == 2:\n result = 'II'\n if nombre == 1:\n result = 'I'\n return result\n\n\nprint(convertEnEntier(1))\nprint(convertEnEntier(2))\nprint(convertEnEntier(3))\n",
"step-4": "\r\ndef convertEnEntier(nombre):\r\n\r\n result = \"\";\r\n if (nombre == 4):\r\n result = \"IV\"\r\n if (nombre == 3):\r\n result = \"III\"\r\n if (nombre == 2):\r\n result = \"II\"\r\n if (nombre == 1):\r\n result = \"I\"\r\n\r\n return result\r\n\r\n\r\nprint (convertEnEntier(1))\r\nprint (convertEnEntier(2))\r\nprint (convertEnEntier(3))\r\n\r\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
class AirflowPlugin(AirflowPlugin):
name = 'airflow-plugin'
operators = []
hooks = []
executors = []
macros = []
admin_views = []
flask_blueprints = []
menu_links = []
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class AirflowPlugin(AirflowPlugin):
name = 'airflow-plugin'
operators = []
hooks = []
executors = []
macros = []
admin_views = []
flask_blueprints = []
menu_links = []
<|reserved_special_token_0|>
app.register_blueprint(views, url_prefix='/views')
app.register_blueprint(log, url_prefix='/')
app.register_blueprint(native_log_bp, url_prefix='/native_log')
app.register_blueprint(my_log_pb, url_prefix='/my_log')
if __name__ == '__main__':
app.run(debug=True)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class AirflowPlugin(AirflowPlugin):
name = 'airflow-plugin'
operators = []
hooks = []
executors = []
macros = []
admin_views = []
flask_blueprints = []
menu_links = []
airflow_bp = Blueprint('airflow_bp', __name__)
app = Flask(__name__)
app.register_blueprint(views, url_prefix='/views')
app.register_blueprint(log, url_prefix='/')
app.register_blueprint(native_log_bp, url_prefix='/native_log')
app.register_blueprint(my_log_pb, url_prefix='/my_log')
if __name__ == '__main__':
app.run(debug=True)
<|reserved_special_token_1|>
from airflow.plugins_manager import AirflowPlugin
from flask import Blueprint, Flask
from rest_api.log.views import views
from rest_api.route.log_route import log
from rest_api.route.mylog_route import my_log_pb
from rest_api.route.native_log_route import native_log_bp
class AirflowPlugin(AirflowPlugin):
name = 'airflow-plugin'
operators = []
hooks = []
executors = []
macros = []
admin_views = []
flask_blueprints = []
menu_links = []
airflow_bp = Blueprint('airflow_bp', __name__)
app = Flask(__name__)
app.register_blueprint(views, url_prefix='/views')
app.register_blueprint(log, url_prefix='/')
app.register_blueprint(native_log_bp, url_prefix='/native_log')
app.register_blueprint(my_log_pb, url_prefix='/my_log')
if __name__ == '__main__':
app.run(debug=True)
<|reserved_special_token_1|>
from airflow.plugins_manager import AirflowPlugin
from flask import Blueprint, Flask
from rest_api.log.views import views
from rest_api.route.log_route import log
from rest_api.route.mylog_route import my_log_pb
from rest_api.route.native_log_route import native_log_bp
class AirflowPlugin(AirflowPlugin):
name = "airflow-plugin"
operators = []
# Leave in for explicitness
hooks = []
executors = []
macros = []
admin_views = []
flask_blueprints = []
menu_links = []
# 创建Blueprint实例
# Blueprint实例创建之后我们就可以通过@Blueprint实例名.route('/')语法为我们的模块创建路由
airflow_bp = Blueprint(
'airflow_bp',
__name__
)
app = Flask(__name__)
# 注册我们在views.py模块中创建的蓝图实例views, 并将他的URL前缀设置为`/views`
app.register_blueprint(views, url_prefix='/views')
app.register_blueprint(log, url_prefix='/')
app.register_blueprint(native_log_bp, url_prefix='/native_log')
app.register_blueprint(my_log_pb, url_prefix='/my_log')
if __name__ == '__main__':
app.run(debug=True)
|
flexible
|
{
"blob_id": "39f1fc04911f8d22d07532add24cd1671a569e72",
"index": 9414,
"step-1": "<mask token>\n\n\nclass AirflowPlugin(AirflowPlugin):\n name = 'airflow-plugin'\n operators = []\n hooks = []\n executors = []\n macros = []\n admin_views = []\n flask_blueprints = []\n menu_links = []\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass AirflowPlugin(AirflowPlugin):\n name = 'airflow-plugin'\n operators = []\n hooks = []\n executors = []\n macros = []\n admin_views = []\n flask_blueprints = []\n menu_links = []\n\n\n<mask token>\napp.register_blueprint(views, url_prefix='/views')\napp.register_blueprint(log, url_prefix='/')\napp.register_blueprint(native_log_bp, url_prefix='/native_log')\napp.register_blueprint(my_log_pb, url_prefix='/my_log')\nif __name__ == '__main__':\n app.run(debug=True)\n",
"step-3": "<mask token>\n\n\nclass AirflowPlugin(AirflowPlugin):\n name = 'airflow-plugin'\n operators = []\n hooks = []\n executors = []\n macros = []\n admin_views = []\n flask_blueprints = []\n menu_links = []\n\n\nairflow_bp = Blueprint('airflow_bp', __name__)\napp = Flask(__name__)\napp.register_blueprint(views, url_prefix='/views')\napp.register_blueprint(log, url_prefix='/')\napp.register_blueprint(native_log_bp, url_prefix='/native_log')\napp.register_blueprint(my_log_pb, url_prefix='/my_log')\nif __name__ == '__main__':\n app.run(debug=True)\n",
"step-4": "from airflow.plugins_manager import AirflowPlugin\nfrom flask import Blueprint, Flask\nfrom rest_api.log.views import views\nfrom rest_api.route.log_route import log\nfrom rest_api.route.mylog_route import my_log_pb\nfrom rest_api.route.native_log_route import native_log_bp\n\n\nclass AirflowPlugin(AirflowPlugin):\n name = 'airflow-plugin'\n operators = []\n hooks = []\n executors = []\n macros = []\n admin_views = []\n flask_blueprints = []\n menu_links = []\n\n\nairflow_bp = Blueprint('airflow_bp', __name__)\napp = Flask(__name__)\napp.register_blueprint(views, url_prefix='/views')\napp.register_blueprint(log, url_prefix='/')\napp.register_blueprint(native_log_bp, url_prefix='/native_log')\napp.register_blueprint(my_log_pb, url_prefix='/my_log')\nif __name__ == '__main__':\n app.run(debug=True)\n",
"step-5": "from airflow.plugins_manager import AirflowPlugin\nfrom flask import Blueprint, Flask\nfrom rest_api.log.views import views\nfrom rest_api.route.log_route import log\nfrom rest_api.route.mylog_route import my_log_pb\nfrom rest_api.route.native_log_route import native_log_bp\n\n\nclass AirflowPlugin(AirflowPlugin):\n name = \"airflow-plugin\"\n operators = []\n # Leave in for explicitness\n hooks = []\n executors = []\n macros = []\n admin_views = []\n flask_blueprints = []\n menu_links = []\n\n\n# 创建Blueprint实例\n# Blueprint实例创建之后我们就可以通过@Blueprint实例名.route('/')语法为我们的模块创建路由\nairflow_bp = Blueprint(\n 'airflow_bp',\n __name__\n)\n\n\napp = Flask(__name__)\n\n# 注册我们在views.py模块中创建的蓝图实例views, 并将他的URL前缀设置为`/views`\napp.register_blueprint(views, url_prefix='/views')\n\napp.register_blueprint(log, url_prefix='/')\n\napp.register_blueprint(native_log_bp, url_prefix='/native_log')\n\napp.register_blueprint(my_log_pb, url_prefix='/my_log')\n\nif __name__ == '__main__':\n app.run(debug=True)\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
# -*- coding: utf-8 -*-
# @Time : 2019/3/21 20:12
# @Author : for
# @File : test01.py
# @Software: PyCharm
import socket
s=socket.socket()
host=socket.gethostname()
port=3456
s.connect((host,port))
cmd=input(">>>")
s.sendall(cmd.encode())
data=s.recv(1024)
print(data.decode())
s.close()
|
normal
|
{
"blob_id": "596814032218c3db746f67e54e4f1863753aea06",
"index": 6299,
"step-1": "<mask token>\n",
"step-2": "<mask token>\ns.connect((host, port))\n<mask token>\ns.sendall(cmd.encode())\n<mask token>\nprint(data.decode())\ns.close()\n",
"step-3": "<mask token>\ns = socket.socket()\nhost = socket.gethostname()\nport = 3456\ns.connect((host, port))\ncmd = input('>>>')\ns.sendall(cmd.encode())\ndata = s.recv(1024)\nprint(data.decode())\ns.close()\n",
"step-4": "import socket\ns = socket.socket()\nhost = socket.gethostname()\nport = 3456\ns.connect((host, port))\ncmd = input('>>>')\ns.sendall(cmd.encode())\ndata = s.recv(1024)\nprint(data.decode())\ns.close()\n",
"step-5": "# -*- coding: utf-8 -*-\r\n# @Time : 2019/3/21 20:12\r\n# @Author : for \r\n# @File : test01.py\r\n# @Software: PyCharm\r\nimport socket\r\n\r\ns=socket.socket()\r\n\r\nhost=socket.gethostname()\r\nport=3456\r\ns.connect((host,port))\r\n\r\ncmd=input(\">>>\")\r\ns.sendall(cmd.encode())\r\ndata=s.recv(1024)\r\nprint(data.decode())\r\n\r\ns.close()\r\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
import cv2
import numpy as np
"""
# Create a black image
image = np.zeros((512,512,3), np.uint8)
# Can we make this in black and white?
image_bw = np.zeros((512,512), np.uint8)
cv2.imshow("Black Rectangle (Color)", image)
cv2.imshow("Black Rectangle (B&W)", image_bw)
cv2.waitKey(0)
cv2.destroyAllWindows()
image = np.zeros((512,512,3), np.uint8)
cv2.line(image, (0,0), (511,511), (255,127,0), 5) #Start Position , End positon of a line , RGB , 5 >> Thickness
cv2.imshow("Blue Line", image)
cv2.imwrite("blueline.jpg",image)
cv2.waitKey(0)
cv2.destroyAllWindows()
image = np.zeros((512,512,3), np.uint8)
cv2.rectangle(image, (100,100), (300,250), (127,50,127), -1)
cv2.imshow("Rectangle", image)
cv2.imwrite("Rectangle.jpg",image)
cv2.waitKey(0)
cv2.destroyAllWindows()
image = np.zeros((512,512,3), np.uint8)
cv2.circle(image, (350, 350), 100, (15,75,50), -1)
cv2.imshow("Circle", image)
cv2.imwrite("circle.jpg",image)
cv2.waitKey(0)
cv2.destroyAllWindows()
image = np.zeros((512,512,3), np.uint8)
# Let's define four points
pts = np.array( [[10,50], [400,50], [90,200], [50,500]], np.int32)
# Let's now reshape our points in form required by polylines
pts = pts.reshape((-1,1,2))
cv2.polylines(image, [pts], True, (0,0,255), 3)
cv2.imshow("Polygon", image)
cv2.imwrite("polygon.jpg",image)
cv2.waitKey(0)
cv2.destroyAllWindows()
"""
image = np.zeros((512,512,3), np.uint8)
cv2.putText(image, 'Hello World!', (75,290), cv2.FONT_HERSHEY_COMPLEX, 2, (100,170,0), 3)
cv2.imshow("Hello World!", image)
cv2.imwrite("Text.jpg",image)
cv2.waitKey(0)
cv2.destroyAllWindows()
|
normal
|
{
"blob_id": "693f2a56578dfb1e4f9c73a0d33c5585070e9f9e",
"index": 5371,
"step-1": "<mask token>\n",
"step-2": "<mask token>\ncv2.putText(image, 'Hello World!', (75, 290), cv2.FONT_HERSHEY_COMPLEX, 2,\n (100, 170, 0), 3)\ncv2.imshow('Hello World!', image)\ncv2.imwrite('Text.jpg', image)\ncv2.waitKey(0)\ncv2.destroyAllWindows()\n",
"step-3": "<mask token>\nimage = np.zeros((512, 512, 3), np.uint8)\ncv2.putText(image, 'Hello World!', (75, 290), cv2.FONT_HERSHEY_COMPLEX, 2,\n (100, 170, 0), 3)\ncv2.imshow('Hello World!', image)\ncv2.imwrite('Text.jpg', image)\ncv2.waitKey(0)\ncv2.destroyAllWindows()\n",
"step-4": "import cv2\nimport numpy as np\n<mask token>\nimage = np.zeros((512, 512, 3), np.uint8)\ncv2.putText(image, 'Hello World!', (75, 290), cv2.FONT_HERSHEY_COMPLEX, 2,\n (100, 170, 0), 3)\ncv2.imshow('Hello World!', image)\ncv2.imwrite('Text.jpg', image)\ncv2.waitKey(0)\ncv2.destroyAllWindows()\n",
"step-5": "import cv2\nimport numpy as np\n\"\"\"\n# Create a black image\nimage = np.zeros((512,512,3), np.uint8)\n\n# Can we make this in black and white?\nimage_bw = np.zeros((512,512), np.uint8)\n\ncv2.imshow(\"Black Rectangle (Color)\", image)\ncv2.imshow(\"Black Rectangle (B&W)\", image_bw)\n\ncv2.waitKey(0)\ncv2.destroyAllWindows()\n\n\nimage = np.zeros((512,512,3), np.uint8)\ncv2.line(image, (0,0), (511,511), (255,127,0), 5) #Start Position , End positon of a line , RGB , 5 >> Thickness\ncv2.imshow(\"Blue Line\", image)\ncv2.imwrite(\"blueline.jpg\",image)\n\ncv2.waitKey(0)\ncv2.destroyAllWindows()\n\n\n\nimage = np.zeros((512,512,3), np.uint8)\n\ncv2.rectangle(image, (100,100), (300,250), (127,50,127), -1)\ncv2.imshow(\"Rectangle\", image)\ncv2.imwrite(\"Rectangle.jpg\",image)\ncv2.waitKey(0)\ncv2.destroyAllWindows()\n\n\n\nimage = np.zeros((512,512,3), np.uint8)\n\ncv2.circle(image, (350, 350), 100, (15,75,50), -1) \ncv2.imshow(\"Circle\", image)\ncv2.imwrite(\"circle.jpg\",image)\ncv2.waitKey(0)\ncv2.destroyAllWindows()\n\n\n\n\n\nimage = np.zeros((512,512,3), np.uint8)\n\n# Let's define four points\npts = np.array( [[10,50], [400,50], [90,200], [50,500]], np.int32)\n\n# Let's now reshape our points in form required by polylines\npts = pts.reshape((-1,1,2))\n\ncv2.polylines(image, [pts], True, (0,0,255), 3)\ncv2.imshow(\"Polygon\", image)\ncv2.imwrite(\"polygon.jpg\",image)\ncv2.waitKey(0)\ncv2.destroyAllWindows()\n\n\n\"\"\"\n\nimage = np.zeros((512,512,3), np.uint8)\n\ncv2.putText(image, 'Hello World!', (75,290), cv2.FONT_HERSHEY_COMPLEX, 2, (100,170,0), 3)\ncv2.imshow(\"Hello World!\", image)\ncv2.imwrite(\"Text.jpg\",image)\ncv2.waitKey(0)\ncv2.destroyAllWindows()",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
def on_release(key):
if key == keyboard.Key.esc:
write_in_file()
return False
def write_in_file():
file = open('strokes.txt', 'a')
for k in list:
file.writelines('{}\n'.format(str(k)))
file.close()
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def on_press(key):
global number_of_chars
global list
list.append(key)
number_of_chars += 1
if number_of_chars >= MAX_LENGTH:
write_in_file()
list.clear()
number_of_chars = 0
def on_release(key):
if key == keyboard.Key.esc:
write_in_file()
return False
def write_in_file():
file = open('strokes.txt', 'a')
for k in list:
file.writelines('{}\n'.format(str(k)))
file.close()
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def on_press(key):
global number_of_chars
global list
list.append(key)
number_of_chars += 1
if number_of_chars >= MAX_LENGTH:
write_in_file()
list.clear()
number_of_chars = 0
def on_release(key):
if key == keyboard.Key.esc:
write_in_file()
return False
def write_in_file():
file = open('strokes.txt', 'a')
for k in list:
file.writelines('{}\n'.format(str(k)))
file.close()
open('strokes.txt', 'w').close()
with keyboard.Listener(on_press=on_press, on_release=on_release) as listener:
listener.join()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
list = []
number_of_chars = 0
MAX_LENGTH = 300
def on_press(key):
global number_of_chars
global list
list.append(key)
number_of_chars += 1
if number_of_chars >= MAX_LENGTH:
write_in_file()
list.clear()
number_of_chars = 0
def on_release(key):
if key == keyboard.Key.esc:
write_in_file()
return False
def write_in_file():
file = open('strokes.txt', 'a')
for k in list:
file.writelines('{}\n'.format(str(k)))
file.close()
open('strokes.txt', 'w').close()
with keyboard.Listener(on_press=on_press, on_release=on_release) as listener:
listener.join()
<|reserved_special_token_1|>
from pynput import keyboard
# list of chars entered by the user
list = []
number_of_chars = 0
# if entered chars go above MAX LENGTH they will be written inside a file
MAX_LENGTH = 300
def on_press(key):
global number_of_chars
global list
list.append(key)
number_of_chars+=1
if number_of_chars>=MAX_LENGTH:
write_in_file()
list.clear()
number_of_chars = 0
def on_release(key):
if key == keyboard.Key.esc:
# if the user exist write all the contents inside the file
write_in_file()
return False
def write_in_file():
file = open("strokes.txt","a")
for k in list:
file.writelines("{}\n".format(str(k)))
file.close()
# erases contents of the file when the program is runned
open("strokes.txt","w").close()
with keyboard.Listener(on_press = on_press,on_release=on_release) as listener:
listener.join()
|
flexible
|
{
"blob_id": "e60fcf19560b4826577797c8ae8b626ff984dcfd",
"index": 6923,
"step-1": "<mask token>\n\n\ndef on_release(key):\n if key == keyboard.Key.esc:\n write_in_file()\n return False\n\n\ndef write_in_file():\n file = open('strokes.txt', 'a')\n for k in list:\n file.writelines('{}\\n'.format(str(k)))\n file.close()\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef on_press(key):\n global number_of_chars\n global list\n list.append(key)\n number_of_chars += 1\n if number_of_chars >= MAX_LENGTH:\n write_in_file()\n list.clear()\n number_of_chars = 0\n\n\ndef on_release(key):\n if key == keyboard.Key.esc:\n write_in_file()\n return False\n\n\ndef write_in_file():\n file = open('strokes.txt', 'a')\n for k in list:\n file.writelines('{}\\n'.format(str(k)))\n file.close()\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef on_press(key):\n global number_of_chars\n global list\n list.append(key)\n number_of_chars += 1\n if number_of_chars >= MAX_LENGTH:\n write_in_file()\n list.clear()\n number_of_chars = 0\n\n\ndef on_release(key):\n if key == keyboard.Key.esc:\n write_in_file()\n return False\n\n\ndef write_in_file():\n file = open('strokes.txt', 'a')\n for k in list:\n file.writelines('{}\\n'.format(str(k)))\n file.close()\n\n\nopen('strokes.txt', 'w').close()\nwith keyboard.Listener(on_press=on_press, on_release=on_release) as listener:\n listener.join()\n",
"step-4": "<mask token>\nlist = []\nnumber_of_chars = 0\nMAX_LENGTH = 300\n\n\ndef on_press(key):\n global number_of_chars\n global list\n list.append(key)\n number_of_chars += 1\n if number_of_chars >= MAX_LENGTH:\n write_in_file()\n list.clear()\n number_of_chars = 0\n\n\ndef on_release(key):\n if key == keyboard.Key.esc:\n write_in_file()\n return False\n\n\ndef write_in_file():\n file = open('strokes.txt', 'a')\n for k in list:\n file.writelines('{}\\n'.format(str(k)))\n file.close()\n\n\nopen('strokes.txt', 'w').close()\nwith keyboard.Listener(on_press=on_press, on_release=on_release) as listener:\n listener.join()\n",
"step-5": "from pynput import keyboard\n\n# list of chars entered by the user\nlist = []\nnumber_of_chars = 0\n# if entered chars go above MAX LENGTH they will be written inside a file\nMAX_LENGTH = 300\n\ndef on_press(key):\n global number_of_chars\n global list\n \n list.append(key)\n number_of_chars+=1\n\n\n if number_of_chars>=MAX_LENGTH:\n write_in_file()\n list.clear()\n number_of_chars = 0\n\ndef on_release(key):\n if key == keyboard.Key.esc:\n # if the user exist write all the contents inside the file\n write_in_file()\n return False\n\ndef write_in_file():\n file = open(\"strokes.txt\",\"a\")\n for k in list:\n file.writelines(\"{}\\n\".format(str(k)))\n file.close()\n\n\n\n# erases contents of the file when the program is runned\nopen(\"strokes.txt\",\"w\").close()\n\nwith keyboard.Listener(on_press = on_press,on_release=on_release) as listener:\n listener.join()",
"step-ids": [
2,
3,
4,
5,
7
]
}
|
[
2,
3,
4,
5,
7
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def register_handlers_for_other(dp: Dispatcher):
dp.register_message_handler(new_member, content_types=['new_chat_members'])
dp.register_message_handler(left_member, content_types=['left_chat_member']
)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
async def new_member(message):
new_user = message.new_chat_members[0]
user_id = new_user['id']
if new_user['username']:
user_name = new_user['username']
elif new_user['first_name']:
user_name = new_user['first_name']
elif new_user['last_name']:
user_name = new_user['last_name']
else:
user_name = 'Пользователь без имени'
await sqlite_db.sql_add_user_to_db(user_id, user_name)
await bot.send_message(message.chat.id,
f"""Добро пожаловать, {user_name}!
Команда - /start переход в пользовательское меню.
Команда - /help помощь по командам бота."""
)
async def left_member(message):
left_user = message.left_chat_member
user_name = await sqlite_db.sql_get_user_name(left_user['id'])
user_name = user_name[0][0]
await sqlite_db.sql_del_user_from_db(left_user['id'])
await bot.send_message(message.chat.id,
f'Будем рады Вас видеть, {user_name}! Возвращайтесь!')
def register_handlers_for_other(dp: Dispatcher):
dp.register_message_handler(new_member, content_types=['new_chat_members'])
dp.register_message_handler(left_member, content_types=['left_chat_member']
)
<|reserved_special_token_1|>
from aiogram import Dispatcher
from create_bot import bot
from data_base import sqlite_db
async def new_member(message):
new_user = message.new_chat_members[0]
user_id = new_user['id']
if new_user['username']:
user_name = new_user['username']
elif new_user['first_name']:
user_name = new_user['first_name']
elif new_user['last_name']:
user_name = new_user['last_name']
else:
user_name = 'Пользователь без имени'
await sqlite_db.sql_add_user_to_db(user_id, user_name)
await bot.send_message(message.chat.id,
f"""Добро пожаловать, {user_name}!
Команда - /start переход в пользовательское меню.
Команда - /help помощь по командам бота."""
)
async def left_member(message):
left_user = message.left_chat_member
user_name = await sqlite_db.sql_get_user_name(left_user['id'])
user_name = user_name[0][0]
await sqlite_db.sql_del_user_from_db(left_user['id'])
await bot.send_message(message.chat.id,
f'Будем рады Вас видеть, {user_name}! Возвращайтесь!')
def register_handlers_for_other(dp: Dispatcher):
dp.register_message_handler(new_member, content_types=['new_chat_members'])
dp.register_message_handler(left_member, content_types=['left_chat_member']
)
<|reserved_special_token_1|>
from aiogram import Dispatcher
from create_bot import bot
from data_base import sqlite_db
# new user in group
async def new_member(message):
new_user = message.new_chat_members[0]
user_id = new_user['id']
if new_user['username']:
user_name = new_user['username']
elif new_user['first_name']:
user_name = new_user['first_name']
elif new_user['last_name']:
user_name = new_user['last_name']
else:
user_name = 'Пользователь без имени'
await sqlite_db.sql_add_user_to_db(user_id, user_name)
await bot.send_message(message.chat.id, f'Добро пожаловать, {user_name}!\nКоманда - /start переход'
f' в пользовательское меню.\nКоманда - /help помощь по командам бота.')
# left user from group
async def left_member(message):
left_user = message.left_chat_member
user_name = await sqlite_db.sql_get_user_name(left_user['id'])
user_name = user_name[0][0]
await sqlite_db.sql_del_user_from_db(left_user['id'])
await bot.send_message(message.chat.id, f'Будем рады Вас видеть, {user_name}! Возвращайтесь!')
def register_handlers_for_other(dp: Dispatcher):
dp.register_message_handler(new_member, content_types=["new_chat_members"])
dp.register_message_handler(left_member, content_types=["left_chat_member"])
|
flexible
|
{
"blob_id": "dfcfa4fa036fe8c058d66fc0b9ea73ddb9d4446e",
"index": 7524,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef register_handlers_for_other(dp: Dispatcher):\n dp.register_message_handler(new_member, content_types=['new_chat_members'])\n dp.register_message_handler(left_member, content_types=['left_chat_member']\n )\n",
"step-3": "<mask token>\n\n\nasync def new_member(message):\n new_user = message.new_chat_members[0]\n user_id = new_user['id']\n if new_user['username']:\n user_name = new_user['username']\n elif new_user['first_name']:\n user_name = new_user['first_name']\n elif new_user['last_name']:\n user_name = new_user['last_name']\n else:\n user_name = 'Пользователь без имени'\n await sqlite_db.sql_add_user_to_db(user_id, user_name)\n await bot.send_message(message.chat.id,\n f\"\"\"Добро пожаловать, {user_name}!\nКоманда - /start переход в пользовательское меню.\nКоманда - /help помощь по командам бота.\"\"\"\n )\n\n\nasync def left_member(message):\n left_user = message.left_chat_member\n user_name = await sqlite_db.sql_get_user_name(left_user['id'])\n user_name = user_name[0][0]\n await sqlite_db.sql_del_user_from_db(left_user['id'])\n await bot.send_message(message.chat.id,\n f'Будем рады Вас видеть, {user_name}! Возвращайтесь!')\n\n\ndef register_handlers_for_other(dp: Dispatcher):\n dp.register_message_handler(new_member, content_types=['new_chat_members'])\n dp.register_message_handler(left_member, content_types=['left_chat_member']\n )\n",
"step-4": "from aiogram import Dispatcher\nfrom create_bot import bot\nfrom data_base import sqlite_db\n\n\nasync def new_member(message):\n new_user = message.new_chat_members[0]\n user_id = new_user['id']\n if new_user['username']:\n user_name = new_user['username']\n elif new_user['first_name']:\n user_name = new_user['first_name']\n elif new_user['last_name']:\n user_name = new_user['last_name']\n else:\n user_name = 'Пользователь без имени'\n await sqlite_db.sql_add_user_to_db(user_id, user_name)\n await bot.send_message(message.chat.id,\n f\"\"\"Добро пожаловать, {user_name}!\nКоманда - /start переход в пользовательское меню.\nКоманда - /help помощь по командам бота.\"\"\"\n )\n\n\nasync def left_member(message):\n left_user = message.left_chat_member\n user_name = await sqlite_db.sql_get_user_name(left_user['id'])\n user_name = user_name[0][0]\n await sqlite_db.sql_del_user_from_db(left_user['id'])\n await bot.send_message(message.chat.id,\n f'Будем рады Вас видеть, {user_name}! Возвращайтесь!')\n\n\ndef register_handlers_for_other(dp: Dispatcher):\n dp.register_message_handler(new_member, content_types=['new_chat_members'])\n dp.register_message_handler(left_member, content_types=['left_chat_member']\n )\n",
"step-5": "from aiogram import Dispatcher\n\nfrom create_bot import bot\nfrom data_base import sqlite_db\n\n\n# new user in group\nasync def new_member(message):\n new_user = message.new_chat_members[0]\n user_id = new_user['id']\n if new_user['username']:\n user_name = new_user['username']\n elif new_user['first_name']:\n user_name = new_user['first_name']\n elif new_user['last_name']:\n user_name = new_user['last_name']\n else:\n user_name = 'Пользователь без имени'\n await sqlite_db.sql_add_user_to_db(user_id, user_name)\n await bot.send_message(message.chat.id, f'Добро пожаловать, {user_name}!\\nКоманда - /start переход'\n f' в пользовательское меню.\\nКоманда - /help помощь по командам бота.')\n\n\n# left user from group\nasync def left_member(message):\n left_user = message.left_chat_member\n user_name = await sqlite_db.sql_get_user_name(left_user['id'])\n user_name = user_name[0][0]\n await sqlite_db.sql_del_user_from_db(left_user['id'])\n await bot.send_message(message.chat.id, f'Будем рады Вас видеть, {user_name}! Возвращайтесь!')\n\n\ndef register_handlers_for_other(dp: Dispatcher):\n dp.register_message_handler(new_member, content_types=[\"new_chat_members\"])\n dp.register_message_handler(left_member, content_types=[\"left_chat_member\"])\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
from flask import Flask, render_template, request
import random, requests
app = Flask(__name__)
@app.route('/')
def hello():
# return 'Hello World'
return render_template('index.html')
# root 디렉토리에 있는 templates라는 폴더를 탐색하여 파일을 찾음
@app.route('/ace')
def ace():
return '불기둥!'
@app.route('/html')
def html():
return '<h1> 태그 사용할 수 있어요! <h1>'
@app.route('/html_multiline')
# 동적 라우팅
@app.route('/greeting/<string:name>')
def greeting(name):
return render_template('index.html', html_name=name)
#세제곱을 되돌려주는 cube 페이지 작성!
#사용자에게 숫자값을 받아서, 세제곱한 결과를 보여주는 페이지
@app.route('/cube/<int:number>')
def cube(number):
result = number ** 3
return render_template('cube.html',number=number, result=result)
@app.route('/movies')
def movies():
movie_list = ['82년생김지영', '조커', '엔드게임', '궁예']
return render_template('movies.html', movies=movie_list)
# ping : 사용자로부터 입력을 받을 form 페이지를 넘겨준다
@app.route('/ping')
def ping():
return render_template('ping.html')
# pong : 사용자로부터 form 데이터를 전달받아서 가공한다
@app.route('/pong')
def pong():
user_name = request.args.get('user_name')
return render_template('pong.html', user_name=user_name)
# fake naver, google
@app.route('/naver')
def naver():
return render_template('naver.html')
# 사용자로부터 이름을 입력받을 Form 페이지!
@app.route('/vonvon')
def vonvon():
return render_template('vonvon.html')
# 전달받은 이름을 기준으로 넘겨줄 각종 정보를 가공해서 돌려주는 (응답)로직!
@app.route('/godmademe')
def godmademe():
# 1. 사용자가 입력한 데이터를 가져온다.
name = request.args.get('user_name')
# 2. 사용자에게 보여줄 여러가지 재밌는 특성들 리스트를 만든다.
first_list = ['잘생김','못생김','개성','키','몸무게','노안','동안','오징어']
second_list = ['게으름','성실함','근면함','낭비벽','신중함','덜렁거림','귀찮음']
third_list = ['식욕','똘끼','허세','우울함','가벼움']
# 3. 리스트에서 랜덤으로 하나씩을 선택한다.
first = random.choice(first_list)
second = random.choice(second_list)
third = random.choice(third_list)
# 4. 가공한 정보를 템플릿에 담아서 사용자에게 보여준다.
return render_template('godmademe.html', name=name, first=first, second=second, third=third)
# 1. 사용자로부터 임의의 텍스트를 입력받아서, 아스키 아트로 변환해서 돌려준다.
# 2. 이 때, 아스키 아트 폰트는 랜덤으로 하나를 지정해서 변환한다
@app.route('/catch')
def catch():
return render_template('catch.html')
@app.route('/result')
def result():
# 1. 사용자가 입력한 Form 데이터를 가져온다.
word = request.args.get("word")
# 2. ARTII API로 요청을 보내서, 응답 결과를 변수에 담는다. (폰트 정보들)
fonts = requests.get('http://artii.herokuapp.com/fonts_list').text
# 3. 가져온 폰트들을 리스트 형태로 바꾼다. -> 줄바꿈(\n)을 기준으로 변수 구분
fonts = fonts.split('\n')
# 4. 폰트 하나를 랜덤으로 선택한다.
font = random.choice(fonts)
# 5. 사용자가 입력한 단어와 랜덤으로 선택한 폰트 정보를 담아서 API에게 요청한다.
result = requests.get(f'http://artii.herokuapp.com/make?text={word}&font={font}').text
# 6. 최종 결과물을 사용자에게 돌려준다.
return render_template('result.html', result=result)
# 마지막에 꼭 넣어야 하는 코드
# debug 모드를 활성화해서 서버 새로고침을 생략한다
if __name__ == '__main__':
app.run(debug=True)
|
normal
|
{
"blob_id": "9fa3a7c57b311a47e67de73bf6083f1f151d73f4",
"index": 8554,
"step-1": "<mask token>\n\n\n@app.route('/html')\ndef html():\n return '<h1> 태그 사용할 수 있어요! <h1>'\n\n\n<mask token>\n\n\n@app.route('/ping')\ndef ping():\n return render_template('ping.html')\n\n\n@app.route('/pong')\ndef pong():\n user_name = request.args.get('user_name')\n return render_template('pong.html', user_name=user_name)\n\n\n<mask token>\n\n\n@app.route('/vonvon')\ndef vonvon():\n return render_template('vonvon.html')\n\n\n@app.route('/godmademe')\ndef godmademe():\n name = request.args.get('user_name')\n first_list = ['잘생김', '못생김', '개성', '키', '몸무게', '노안', '동안', '오징어']\n second_list = ['게으름', '성실함', '근면함', '낭비벽', '신중함', '덜렁거림', '귀찮음']\n third_list = ['식욕', '똘끼', '허세', '우울함', '가벼움']\n first = random.choice(first_list)\n second = random.choice(second_list)\n third = random.choice(third_list)\n return render_template('godmademe.html', name=name, first=first, second\n =second, third=third)\n\n\n@app.route('/catch')\ndef catch():\n return render_template('catch.html')\n\n\n@app.route('/result')\ndef result():\n word = request.args.get('word')\n fonts = requests.get('http://artii.herokuapp.com/fonts_list').text\n fonts = fonts.split('\\n')\n font = random.choice(fonts)\n result = requests.get(\n f'http://artii.herokuapp.com/make?text={word}&font={font}').text\n return render_template('result.html', result=result)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\n@app.route('/')\ndef hello():\n return render_template('index.html')\n\n\n@app.route('/ace')\ndef ace():\n return '불기둥!'\n\n\n@app.route('/html')\ndef html():\n return '<h1> 태그 사용할 수 있어요! <h1>'\n\n\n<mask token>\n\n\n@app.route('/cube/<int:number>')\ndef cube(number):\n result = number ** 3\n return render_template('cube.html', number=number, result=result)\n\n\n<mask token>\n\n\n@app.route('/ping')\ndef ping():\n return render_template('ping.html')\n\n\n@app.route('/pong')\ndef pong():\n user_name = request.args.get('user_name')\n return render_template('pong.html', user_name=user_name)\n\n\n<mask token>\n\n\n@app.route('/vonvon')\ndef vonvon():\n return render_template('vonvon.html')\n\n\n@app.route('/godmademe')\ndef godmademe():\n name = request.args.get('user_name')\n first_list = ['잘생김', '못생김', '개성', '키', '몸무게', '노안', '동안', '오징어']\n second_list = ['게으름', '성실함', '근면함', '낭비벽', '신중함', '덜렁거림', '귀찮음']\n third_list = ['식욕', '똘끼', '허세', '우울함', '가벼움']\n first = random.choice(first_list)\n second = random.choice(second_list)\n third = random.choice(third_list)\n return render_template('godmademe.html', name=name, first=first, second\n =second, third=third)\n\n\n@app.route('/catch')\ndef catch():\n return render_template('catch.html')\n\n\n@app.route('/result')\ndef result():\n word = request.args.get('word')\n fonts = requests.get('http://artii.herokuapp.com/fonts_list').text\n fonts = fonts.split('\\n')\n font = random.choice(fonts)\n result = requests.get(\n f'http://artii.herokuapp.com/make?text={word}&font={font}').text\n return render_template('result.html', result=result)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\n@app.route('/')\ndef hello():\n return render_template('index.html')\n\n\n@app.route('/ace')\ndef ace():\n return '불기둥!'\n\n\n@app.route('/html')\ndef html():\n return '<h1> 태그 사용할 수 있어요! <h1>'\n\n\n@app.route('/html_multiline')\n@app.route('/greeting/<string:name>')\ndef greeting(name):\n return render_template('index.html', html_name=name)\n\n\n@app.route('/cube/<int:number>')\ndef cube(number):\n result = number ** 3\n return render_template('cube.html', number=number, result=result)\n\n\n@app.route('/movies')\ndef movies():\n movie_list = ['82년생김지영', '조커', '엔드게임', '궁예']\n return render_template('movies.html', movies=movie_list)\n\n\n@app.route('/ping')\ndef ping():\n return render_template('ping.html')\n\n\n@app.route('/pong')\ndef pong():\n user_name = request.args.get('user_name')\n return render_template('pong.html', user_name=user_name)\n\n\n@app.route('/naver')\ndef naver():\n return render_template('naver.html')\n\n\n@app.route('/vonvon')\ndef vonvon():\n return render_template('vonvon.html')\n\n\n@app.route('/godmademe')\ndef godmademe():\n name = request.args.get('user_name')\n first_list = ['잘생김', '못생김', '개성', '키', '몸무게', '노안', '동안', '오징어']\n second_list = ['게으름', '성실함', '근면함', '낭비벽', '신중함', '덜렁거림', '귀찮음']\n third_list = ['식욕', '똘끼', '허세', '우울함', '가벼움']\n first = random.choice(first_list)\n second = random.choice(second_list)\n third = random.choice(third_list)\n return render_template('godmademe.html', name=name, first=first, second\n =second, third=third)\n\n\n@app.route('/catch')\ndef catch():\n return render_template('catch.html')\n\n\n@app.route('/result')\ndef result():\n word = request.args.get('word')\n fonts = requests.get('http://artii.herokuapp.com/fonts_list').text\n fonts = fonts.split('\\n')\n font = random.choice(fonts)\n result = requests.get(\n f'http://artii.herokuapp.com/make?text={word}&font={font}').text\n return render_template('result.html', result=result)\n\n\nif __name__ == '__main__':\n app.run(debug=True)\n",
"step-4": "from flask import Flask, render_template, request\nimport random, requests\napp = Flask(__name__)\n\n\n@app.route('/')\ndef hello():\n return render_template('index.html')\n\n\n@app.route('/ace')\ndef ace():\n return '불기둥!'\n\n\n@app.route('/html')\ndef html():\n return '<h1> 태그 사용할 수 있어요! <h1>'\n\n\n@app.route('/html_multiline')\n@app.route('/greeting/<string:name>')\ndef greeting(name):\n return render_template('index.html', html_name=name)\n\n\n@app.route('/cube/<int:number>')\ndef cube(number):\n result = number ** 3\n return render_template('cube.html', number=number, result=result)\n\n\n@app.route('/movies')\ndef movies():\n movie_list = ['82년생김지영', '조커', '엔드게임', '궁예']\n return render_template('movies.html', movies=movie_list)\n\n\n@app.route('/ping')\ndef ping():\n return render_template('ping.html')\n\n\n@app.route('/pong')\ndef pong():\n user_name = request.args.get('user_name')\n return render_template('pong.html', user_name=user_name)\n\n\n@app.route('/naver')\ndef naver():\n return render_template('naver.html')\n\n\n@app.route('/vonvon')\ndef vonvon():\n return render_template('vonvon.html')\n\n\n@app.route('/godmademe')\ndef godmademe():\n name = request.args.get('user_name')\n first_list = ['잘생김', '못생김', '개성', '키', '몸무게', '노안', '동안', '오징어']\n second_list = ['게으름', '성실함', '근면함', '낭비벽', '신중함', '덜렁거림', '귀찮음']\n third_list = ['식욕', '똘끼', '허세', '우울함', '가벼움']\n first = random.choice(first_list)\n second = random.choice(second_list)\n third = random.choice(third_list)\n return render_template('godmademe.html', name=name, first=first, second\n =second, third=third)\n\n\n@app.route('/catch')\ndef catch():\n return render_template('catch.html')\n\n\n@app.route('/result')\ndef result():\n word = request.args.get('word')\n fonts = requests.get('http://artii.herokuapp.com/fonts_list').text\n fonts = fonts.split('\\n')\n font = random.choice(fonts)\n result = requests.get(\n f'http://artii.herokuapp.com/make?text={word}&font={font}').text\n return render_template('result.html', result=result)\n\n\nif __name__ == '__main__':\n app.run(debug=True)\n",
"step-5": "from flask import Flask, render_template, request\nimport random, requests\napp = Flask(__name__)\n\n@app.route('/')\ndef hello():\n # return 'Hello World'\n return render_template('index.html')\n # root 디렉토리에 있는 templates라는 폴더를 탐색하여 파일을 찾음\n\n@app.route('/ace')\ndef ace():\n return '불기둥!'\n\n@app.route('/html')\ndef html():\n return '<h1> 태그 사용할 수 있어요! <h1>'\n\n@app.route('/html_multiline')\n\n# 동적 라우팅\n@app.route('/greeting/<string:name>')\ndef greeting(name):\n return render_template('index.html', html_name=name)\n\n#세제곱을 되돌려주는 cube 페이지 작성!\n#사용자에게 숫자값을 받아서, 세제곱한 결과를 보여주는 페이지\n@app.route('/cube/<int:number>')\ndef cube(number):\n result = number ** 3\n return render_template('cube.html',number=number, result=result)\n\n@app.route('/movies')\ndef movies():\n movie_list = ['82년생김지영', '조커', '엔드게임', '궁예']\n return render_template('movies.html', movies=movie_list)\n\n# ping : 사용자로부터 입력을 받을 form 페이지를 넘겨준다\n@app.route('/ping')\ndef ping():\n return render_template('ping.html')\n\n# pong : 사용자로부터 form 데이터를 전달받아서 가공한다\n@app.route('/pong')\ndef pong():\n user_name = request.args.get('user_name')\n return render_template('pong.html', user_name=user_name)\n\n# fake naver, google\n@app.route('/naver')\ndef naver():\n return render_template('naver.html')\n\n# 사용자로부터 이름을 입력받을 Form 페이지!\n@app.route('/vonvon')\ndef vonvon():\n return render_template('vonvon.html')\n\n# 전달받은 이름을 기준으로 넘겨줄 각종 정보를 가공해서 돌려주는 (응답)로직!\n@app.route('/godmademe')\ndef godmademe():\n # 1. 사용자가 입력한 데이터를 가져온다.\n name = request.args.get('user_name')\n # 2. 사용자에게 보여줄 여러가지 재밌는 특성들 리스트를 만든다.\n first_list = ['잘생김','못생김','개성','키','몸무게','노안','동안','오징어']\n second_list = ['게으름','성실함','근면함','낭비벽','신중함','덜렁거림','귀찮음']\n third_list = ['식욕','똘끼','허세','우울함','가벼움']\n # 3. 리스트에서 랜덤으로 하나씩을 선택한다.\n first = random.choice(first_list)\n second = random.choice(second_list)\n third = random.choice(third_list)\n # 4. 가공한 정보를 템플릿에 담아서 사용자에게 보여준다.\n return render_template('godmademe.html', name=name, first=first, second=second, third=third)\n\n# 1. 사용자로부터 임의의 텍스트를 입력받아서, 아스키 아트로 변환해서 돌려준다.\n# 2. 이 때, 아스키 아트 폰트는 랜덤으로 하나를 지정해서 변환한다\n@app.route('/catch')\ndef catch():\n return render_template('catch.html')\n\n@app.route('/result')\ndef result():\n # 1. 사용자가 입력한 Form 데이터를 가져온다.\n word = request.args.get(\"word\")\n # 2. ARTII API로 요청을 보내서, 응답 결과를 변수에 담는다. (폰트 정보들)\n fonts = requests.get('http://artii.herokuapp.com/fonts_list').text\n # 3. 가져온 폰트들을 리스트 형태로 바꾼다. -> 줄바꿈(\\n)을 기준으로 변수 구분\n fonts = fonts.split('\\n')\n # 4. 폰트 하나를 랜덤으로 선택한다.\n font = random.choice(fonts)\n # 5. 사용자가 입력한 단어와 랜덤으로 선택한 폰트 정보를 담아서 API에게 요청한다.\n result = requests.get(f'http://artii.herokuapp.com/make?text={word}&font={font}').text\n # 6. 최종 결과물을 사용자에게 돌려준다.\n return render_template('result.html', result=result)\n\n# 마지막에 꼭 넣어야 하는 코드\n# debug 모드를 활성화해서 서버 새로고침을 생략한다\nif __name__ == '__main__':\n app.run(debug=True)",
"step-ids": [
7,
10,
14,
16,
17
]
}
|
[
7,
10,
14,
16,
17
] |
<|reserved_special_token_0|>
class Company(models.Model):
<|reserved_special_token_0|>
@classmethod
def create(cls, name):
company = cls(name=name)
return company
def __str__(self):
return self.name
class Entry(models.Model):
fetched_date = models.DateTimeField()
message = models.CharField(max_length=200)
prediction = models.CharField(max_length=10)
parent_company = models.ForeignKey(Company, on_delete=models.CASCADE)
@classmethod
def create(cls, message, prediction, company):
entry = cls(message=message, prediction=prediction, parent_company=
company)
entry.fetched_date = datetime.now()
return entry
def __str__(self):
return self.fetched_date.strftime('%m/%d/%Y, %H:%M:%S'
) + ' ' + self.prediction + ':' + self.message
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Company(models.Model):
name = models.CharField(max_length=10)
@classmethod
def create(cls, name):
company = cls(name=name)
return company
def __str__(self):
return self.name
class Entry(models.Model):
fetched_date = models.DateTimeField()
message = models.CharField(max_length=200)
prediction = models.CharField(max_length=10)
parent_company = models.ForeignKey(Company, on_delete=models.CASCADE)
@classmethod
def create(cls, message, prediction, company):
entry = cls(message=message, prediction=prediction, parent_company=
company)
entry.fetched_date = datetime.now()
return entry
def __str__(self):
return self.fetched_date.strftime('%m/%d/%Y, %H:%M:%S'
) + ' ' + self.prediction + ':' + self.message
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Message(models.Model):
type = models.CharField(max_length=10)
body = models.CharField(max_length=50)
def __str__(self):
return self.type + ':' + self.body
class Company(models.Model):
name = models.CharField(max_length=10)
@classmethod
def create(cls, name):
company = cls(name=name)
return company
def __str__(self):
return self.name
class Entry(models.Model):
fetched_date = models.DateTimeField()
message = models.CharField(max_length=200)
prediction = models.CharField(max_length=10)
parent_company = models.ForeignKey(Company, on_delete=models.CASCADE)
@classmethod
def create(cls, message, prediction, company):
entry = cls(message=message, prediction=prediction, parent_company=
company)
entry.fetched_date = datetime.now()
return entry
def __str__(self):
return self.fetched_date.strftime('%m/%d/%Y, %H:%M:%S'
) + ' ' + self.prediction + ':' + self.message
<|reserved_special_token_1|>
from django.db import models
from datetime import datetime
class Message(models.Model):
type = models.CharField(max_length=10)
body = models.CharField(max_length=50)
def __str__(self):
return self.type + ':' + self.body
class Company(models.Model):
name = models.CharField(max_length=10)
@classmethod
def create(cls, name):
company = cls(name=name)
return company
def __str__(self):
return self.name
class Entry(models.Model):
fetched_date = models.DateTimeField()
message = models.CharField(max_length=200)
prediction = models.CharField(max_length=10)
parent_company = models.ForeignKey(Company, on_delete=models.CASCADE)
@classmethod
def create(cls, message, prediction, company):
entry = cls(message=message, prediction=prediction, parent_company=
company)
entry.fetched_date = datetime.now()
return entry
def __str__(self):
return self.fetched_date.strftime('%m/%d/%Y, %H:%M:%S'
) + ' ' + self.prediction + ':' + self.message
<|reserved_special_token_1|>
from django.db import models
from datetime import datetime
# Message model for testing purposes
class Message(models.Model):
type = models.CharField(max_length=10)
body = models.CharField(max_length=50)
def __str__(self):
return self.type + ":" + self.body
# Company model
class Company(models.Model):
name = models.CharField(max_length=10)
@classmethod
def create(cls, name):
company = cls(name=name)
return company
def __str__(self):
return self.name
# model for storing message and its prediction
class Entry(models.Model):
fetched_date = models.DateTimeField()
message = models.CharField(max_length=200)
prediction = models.CharField(max_length=10)
parent_company = models.ForeignKey(Company, on_delete=models.CASCADE)
@classmethod
def create(cls, message, prediction, company):
entry = cls(message=message, prediction=prediction, parent_company=company)
entry.fetched_date = datetime.now()
return entry
def __str__(self):
return self.fetched_date.strftime("%m/%d/%Y, %H:%M:%S") + " " + self.prediction + ":" + self.message
|
flexible
|
{
"blob_id": "47f6c4b3c279a065b8f21dab2faa71271db8d6ab",
"index": 6680,
"step-1": "<mask token>\n\n\nclass Company(models.Model):\n <mask token>\n\n @classmethod\n def create(cls, name):\n company = cls(name=name)\n return company\n\n def __str__(self):\n return self.name\n\n\nclass Entry(models.Model):\n fetched_date = models.DateTimeField()\n message = models.CharField(max_length=200)\n prediction = models.CharField(max_length=10)\n parent_company = models.ForeignKey(Company, on_delete=models.CASCADE)\n\n @classmethod\n def create(cls, message, prediction, company):\n entry = cls(message=message, prediction=prediction, parent_company=\n company)\n entry.fetched_date = datetime.now()\n return entry\n\n def __str__(self):\n return self.fetched_date.strftime('%m/%d/%Y, %H:%M:%S'\n ) + ' ' + self.prediction + ':' + self.message\n",
"step-2": "<mask token>\n\n\nclass Company(models.Model):\n name = models.CharField(max_length=10)\n\n @classmethod\n def create(cls, name):\n company = cls(name=name)\n return company\n\n def __str__(self):\n return self.name\n\n\nclass Entry(models.Model):\n fetched_date = models.DateTimeField()\n message = models.CharField(max_length=200)\n prediction = models.CharField(max_length=10)\n parent_company = models.ForeignKey(Company, on_delete=models.CASCADE)\n\n @classmethod\n def create(cls, message, prediction, company):\n entry = cls(message=message, prediction=prediction, parent_company=\n company)\n entry.fetched_date = datetime.now()\n return entry\n\n def __str__(self):\n return self.fetched_date.strftime('%m/%d/%Y, %H:%M:%S'\n ) + ' ' + self.prediction + ':' + self.message\n",
"step-3": "<mask token>\n\n\nclass Message(models.Model):\n type = models.CharField(max_length=10)\n body = models.CharField(max_length=50)\n\n def __str__(self):\n return self.type + ':' + self.body\n\n\nclass Company(models.Model):\n name = models.CharField(max_length=10)\n\n @classmethod\n def create(cls, name):\n company = cls(name=name)\n return company\n\n def __str__(self):\n return self.name\n\n\nclass Entry(models.Model):\n fetched_date = models.DateTimeField()\n message = models.CharField(max_length=200)\n prediction = models.CharField(max_length=10)\n parent_company = models.ForeignKey(Company, on_delete=models.CASCADE)\n\n @classmethod\n def create(cls, message, prediction, company):\n entry = cls(message=message, prediction=prediction, parent_company=\n company)\n entry.fetched_date = datetime.now()\n return entry\n\n def __str__(self):\n return self.fetched_date.strftime('%m/%d/%Y, %H:%M:%S'\n ) + ' ' + self.prediction + ':' + self.message\n",
"step-4": "from django.db import models\nfrom datetime import datetime\n\n\nclass Message(models.Model):\n type = models.CharField(max_length=10)\n body = models.CharField(max_length=50)\n\n def __str__(self):\n return self.type + ':' + self.body\n\n\nclass Company(models.Model):\n name = models.CharField(max_length=10)\n\n @classmethod\n def create(cls, name):\n company = cls(name=name)\n return company\n\n def __str__(self):\n return self.name\n\n\nclass Entry(models.Model):\n fetched_date = models.DateTimeField()\n message = models.CharField(max_length=200)\n prediction = models.CharField(max_length=10)\n parent_company = models.ForeignKey(Company, on_delete=models.CASCADE)\n\n @classmethod\n def create(cls, message, prediction, company):\n entry = cls(message=message, prediction=prediction, parent_company=\n company)\n entry.fetched_date = datetime.now()\n return entry\n\n def __str__(self):\n return self.fetched_date.strftime('%m/%d/%Y, %H:%M:%S'\n ) + ' ' + self.prediction + ':' + self.message\n",
"step-5": "from django.db import models\r\nfrom datetime import datetime\r\n\r\n\r\n# Message model for testing purposes\r\nclass Message(models.Model):\r\n type = models.CharField(max_length=10)\r\n body = models.CharField(max_length=50)\r\n\r\n def __str__(self):\r\n return self.type + \":\" + self.body\r\n\r\n\r\n# Company model\r\nclass Company(models.Model):\r\n name = models.CharField(max_length=10)\r\n\r\n @classmethod\r\n def create(cls, name):\r\n company = cls(name=name)\r\n return company\r\n\r\n def __str__(self):\r\n return self.name\r\n\r\n\r\n# model for storing message and its prediction\r\nclass Entry(models.Model):\r\n fetched_date = models.DateTimeField()\r\n message = models.CharField(max_length=200)\r\n prediction = models.CharField(max_length=10)\r\n parent_company = models.ForeignKey(Company, on_delete=models.CASCADE)\r\n\r\n @classmethod\r\n def create(cls, message, prediction, company):\r\n entry = cls(message=message, prediction=prediction, parent_company=company)\r\n entry.fetched_date = datetime.now()\r\n return entry\r\n\r\n def __str__(self):\r\n return self.fetched_date.strftime(\"%m/%d/%Y, %H:%M:%S\") + \" \" + self.prediction + \":\" + self.message\r\n",
"step-ids": [
7,
8,
11,
12,
13
]
}
|
[
7,
8,
11,
12,
13
] |
<|reserved_special_token_0|>
def array_to_stack(stack, source):
"""
-------------------------------------------------------
Pushes contents of source onto stack. At finish, source is empty.
Last value in source is at bottom of stack,
first value in source is on top of stack.
Use: array_to_stack(stack, source)
-------------------------------------------------------
Parameters:
stack - a Stack object (Stack)
source - a Python list (list)
Returns:
None
-------------------------------------------------------
"""
while source != []:
temp = source.pop()
stack.push(temp)
return
<|reserved_special_token_0|>
def queue_to_array(queue, target):
"""
-------------------------------------------------------
Removes contents of queue into target. At finish, queue is empty.
Front value of queue is at front of target,
rear value of queue is at end of target.
Use: queue_to_array(queue, target)
-------------------------------------------------------
Parameters:
queue - a Queue object (Queue)
target - a Python list (list)
Returns:
None
-------------------------------------------------------
"""
temp = None
while queue.is_empty() == False:
temp = queue.remove()
target.append(temp)
return
def array_to_pq(pq, source):
"""
-------------------------------------------------------
Inserts contents of source into pq. At finish, source is empty.
Last value in source is at rear of pq,
first value in source is at front of pq.
Use: array_to_pq(pq, source)
-------------------------------------------------------
Parameters:
pq - a Priority_Queue object (Priority_Queue)
source - a Python list (list)
Returns:
None
-------------------------------------------------------
"""
temp = None
while source != []:
temp = source.pop(0)
pq.insert(temp)
return
<|reserved_special_token_0|>
def list_to_array(llist, target):
"""
-------------------------------------------------------
Removes contents of llist into target. At finish, llist is empty.
Front element of llist is at front of target,
rear element of llist is at rear of target.
Use: list_to_array(llist, target)
-------------------------------------------------------
Parameters:
llist - a List object (List)
target - a Python list (list)
Returns:
None
-------------------------------------------------------
"""
while llist.is_empty() == False:
target.append(llist.pop(0))
return
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def array_to_stack(stack, source):
"""
-------------------------------------------------------
Pushes contents of source onto stack. At finish, source is empty.
Last value in source is at bottom of stack,
first value in source is on top of stack.
Use: array_to_stack(stack, source)
-------------------------------------------------------
Parameters:
stack - a Stack object (Stack)
source - a Python list (list)
Returns:
None
-------------------------------------------------------
"""
while source != []:
temp = source.pop()
stack.push(temp)
return
def stack_to_array(stack, target):
"""
-------------------------------------------------------
Pops contents of stack into target. At finish, stack is empty.
Top value of stack is at end of target,
bottom value of stack is at beginning of target.
Use: stack_to_array(stack, target)
-------------------------------------------------------
Parameters:
stack - a Stack object (Stack)
target - a Python list (list)
Returns:
None
-------------------------------------------------------
"""
while stack.is_empty() == False:
temp = stack.pop()
target.insert(0, temp)
return
def stack_test(source):
"""
-------------------------------------------------------
Tests the methods of Stack for empty and
non-empty stacks using the data in source:
is_empty, push, pop, peek
(Testing pop and peek while empty throws exceptions)
Use: stack_test(source)
-------------------------------------------------------
Parameters:
source - list of data (list of ?)
Returns:
None
-------------------------------------------------------
"""
stack = Stack()
dummy = []
if stack.is_empty() == True:
print('Stack is empty.')
array_to_stack(stack, source)
print('Converting source into a stack...')
if stack.is_empty() == False:
print('source has been transferred into stack!')
print('\nPopping stack...')
while stack.is_empty() == False:
temp = stack.pop()
print(temp)
dummy.append(temp)
print('\nstack is empty. Pushing values back into stack...')
while dummy != []:
temp = dummy.pop()
print(temp)
stack.push(temp)
print('\nPushing complete! Peeking...')
print(stack.peek())
return
def array_to_queue(queue, source):
"""
-------------------------------------------------------
Inserts contents of source into queue. At finish, source is empty.
Last value in source is at rear of queue,
first value in source is at front of queue.
Use: array_to_queue(queue, source)
-------------------------------------------------------
Parameters:
queue - a Queue object (Queue)
source - a Python list (list)
Returns:
None
-------------------------------------------------------
"""
temp = None
while source != []:
temp = source.pop(0)
queue.insert(temp)
return
def queue_to_array(queue, target):
"""
-------------------------------------------------------
Removes contents of queue into target. At finish, queue is empty.
Front value of queue is at front of target,
rear value of queue is at end of target.
Use: queue_to_array(queue, target)
-------------------------------------------------------
Parameters:
queue - a Queue object (Queue)
target - a Python list (list)
Returns:
None
-------------------------------------------------------
"""
temp = None
while queue.is_empty() == False:
temp = queue.remove()
target.append(temp)
return
def array_to_pq(pq, source):
"""
-------------------------------------------------------
Inserts contents of source into pq. At finish, source is empty.
Last value in source is at rear of pq,
first value in source is at front of pq.
Use: array_to_pq(pq, source)
-------------------------------------------------------
Parameters:
pq - a Priority_Queue object (Priority_Queue)
source - a Python list (list)
Returns:
None
-------------------------------------------------------
"""
temp = None
while source != []:
temp = source.pop(0)
pq.insert(temp)
return
<|reserved_special_token_0|>
def priority_queue_test(a):
"""
-------------------------------------------------------
Tests priority queue implementation.
Use: pq_test(a)
-------------------------------------------------------
Parameters:
a - list of data (list of ?)
Returns:
the methods of Priority_Queue are tested for both empty and
non-empty priority queues using the data in a:
is_empty, insert, remove, peek
-------------------------------------------------------
"""
pq = Priority_Queue()
dummy = []
if pq.is_empty() == True:
print('pq is empty.')
array_to_pq(pq, a)
print('Converting a into a pq...')
if pq.is_empty() == False:
print('a has been transferred into pq!')
print('\nRemoving pq...')
while pq.is_empty() == False:
temp = pq.remove()
print(temp)
dummy.append(temp)
print('\\pq is empty. Inserting values back into queue...')
while dummy != []:
temp = dummy.pop()
print(temp)
pq.insert(temp)
print('\nPushing complete! Peeking...')
print(pq.peek())
print('\npq is {} objects long!'.format(len(pq)))
return
<|reserved_special_token_0|>
def list_to_array(llist, target):
"""
-------------------------------------------------------
Removes contents of llist into target. At finish, llist is empty.
Front element of llist is at front of target,
rear element of llist is at rear of target.
Use: list_to_array(llist, target)
-------------------------------------------------------
Parameters:
llist - a List object (List)
target - a Python list (list)
Returns:
None
-------------------------------------------------------
"""
while llist.is_empty() == False:
target.append(llist.pop(0))
return
def list_test(a):
"""
-------------------------------------------------------
Tests list implementation.
The methods of List are tested for both empty and
non-empty lists using the data in a:
is_empty, insert, remove, append, index, __contains__,
find, count, max, min, __getitem__, __setitem__
Use: list_test(a)
-------------------------------------------------------
Parameters:
a - list of data (list of ?)
Returns:
None
-------------------------------------------------------
"""
lst = List()
if lst.is_empty() == True:
print('lst is empty.')
array_to_list(lst, a)
print('Converting a into a lst...')
if lst.is_empty() == False:
print('a has been transferred into lst!')
print('The movie at index 0 is {}'.format(lst[0]))
print('/nRemoving the movie at index 0...')
temp = lst.remove(lst[0])
print('Now the movie at index 0 is {}'.format(lst[0]))
print('/nInserting the movie at index 1...')
lst.insert(1, temp)
print('Now the movie at index 1 is {}'.format(lst[1]))
print('/nRemoving the movie at index 0...')
temp = lst.remove(lst[0])
print('/nAppending the movie...')
lst.append(temp)
print('Peeking...')
print(lst.peek())
print('/nThe index of the movie is {}'.format(lst.index(temp)))
print('/n{} appears {} time(s)'.format(temp, lst.count(temp)))
print('/nThe max is {}'.format(lst.max()))
print('The min is {}'.format(lst.min()))
print('/nThe movie is at index {}'.format(lst.find(temp)))
return
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def array_to_stack(stack, source):
"""
-------------------------------------------------------
Pushes contents of source onto stack. At finish, source is empty.
Last value in source is at bottom of stack,
first value in source is on top of stack.
Use: array_to_stack(stack, source)
-------------------------------------------------------
Parameters:
stack - a Stack object (Stack)
source - a Python list (list)
Returns:
None
-------------------------------------------------------
"""
while source != []:
temp = source.pop()
stack.push(temp)
return
def stack_to_array(stack, target):
"""
-------------------------------------------------------
Pops contents of stack into target. At finish, stack is empty.
Top value of stack is at end of target,
bottom value of stack is at beginning of target.
Use: stack_to_array(stack, target)
-------------------------------------------------------
Parameters:
stack - a Stack object (Stack)
target - a Python list (list)
Returns:
None
-------------------------------------------------------
"""
while stack.is_empty() == False:
temp = stack.pop()
target.insert(0, temp)
return
def stack_test(source):
"""
-------------------------------------------------------
Tests the methods of Stack for empty and
non-empty stacks using the data in source:
is_empty, push, pop, peek
(Testing pop and peek while empty throws exceptions)
Use: stack_test(source)
-------------------------------------------------------
Parameters:
source - list of data (list of ?)
Returns:
None
-------------------------------------------------------
"""
stack = Stack()
dummy = []
if stack.is_empty() == True:
print('Stack is empty.')
array_to_stack(stack, source)
print('Converting source into a stack...')
if stack.is_empty() == False:
print('source has been transferred into stack!')
print('\nPopping stack...')
while stack.is_empty() == False:
temp = stack.pop()
print(temp)
dummy.append(temp)
print('\nstack is empty. Pushing values back into stack...')
while dummy != []:
temp = dummy.pop()
print(temp)
stack.push(temp)
print('\nPushing complete! Peeking...')
print(stack.peek())
return
def array_to_queue(queue, source):
"""
-------------------------------------------------------
Inserts contents of source into queue. At finish, source is empty.
Last value in source is at rear of queue,
first value in source is at front of queue.
Use: array_to_queue(queue, source)
-------------------------------------------------------
Parameters:
queue - a Queue object (Queue)
source - a Python list (list)
Returns:
None
-------------------------------------------------------
"""
temp = None
while source != []:
temp = source.pop(0)
queue.insert(temp)
return
def queue_to_array(queue, target):
"""
-------------------------------------------------------
Removes contents of queue into target. At finish, queue is empty.
Front value of queue is at front of target,
rear value of queue is at end of target.
Use: queue_to_array(queue, target)
-------------------------------------------------------
Parameters:
queue - a Queue object (Queue)
target - a Python list (list)
Returns:
None
-------------------------------------------------------
"""
temp = None
while queue.is_empty() == False:
temp = queue.remove()
target.append(temp)
return
def array_to_pq(pq, source):
"""
-------------------------------------------------------
Inserts contents of source into pq. At finish, source is empty.
Last value in source is at rear of pq,
first value in source is at front of pq.
Use: array_to_pq(pq, source)
-------------------------------------------------------
Parameters:
pq - a Priority_Queue object (Priority_Queue)
source - a Python list (list)
Returns:
None
-------------------------------------------------------
"""
temp = None
while source != []:
temp = source.pop(0)
pq.insert(temp)
return
def pq_to_array(pq, target):
"""
-------------------------------------------------------
Removes contents of pq into target. At finish, pq is empty.
Highest priority value in pq is at front of target,
lowest priority value in pq is at end of target.
Use: pq_to_array(pq, target)
-------------------------------------------------------
Parameters:
pq - a Priority_Queue object (Priority_Queue)
target - a Python list (list)
Returns:
None
-------------------------------------------------------
"""
temp = None
while pq.is_empty() == False:
temp = pq.remove()
target.append(temp)
return
def queue_test(a):
"""
-------------------------------------------------------
Tests queue implementation.
Use: queue_test(a)
-------------------------------------------------------
Parameters:
a - list of data (list of ?)
Returns:
the methods of Queue are tested for both empty and
non-empty queues using the data in a:
is_empty, insert, remove, peek, len
-------------------------------------------------------
"""
queue = Queue()
dummy = []
if queue.is_empty() == True:
print('Queue is empty.')
array_to_queue(queue, a)
print('Converting a into a queue...')
if queue.is_empty() == False:
print('a has been transferred into queue!')
print('\nRemoving queue...')
while queue.is_empty() == False:
temp = queue.remove()
print(temp)
dummy.append(temp)
print('\nqueue is empty. Inserting values back into queue...')
while dummy != []:
temp = dummy.pop()
print(temp)
queue.insert(temp)
print('\nPushing complete! Peeking...')
print(queue.peek())
print('\nqueue is {} objects long!'.format(len(queue)))
return
def priority_queue_test(a):
"""
-------------------------------------------------------
Tests priority queue implementation.
Use: pq_test(a)
-------------------------------------------------------
Parameters:
a - list of data (list of ?)
Returns:
the methods of Priority_Queue are tested for both empty and
non-empty priority queues using the data in a:
is_empty, insert, remove, peek
-------------------------------------------------------
"""
pq = Priority_Queue()
dummy = []
if pq.is_empty() == True:
print('pq is empty.')
array_to_pq(pq, a)
print('Converting a into a pq...')
if pq.is_empty() == False:
print('a has been transferred into pq!')
print('\nRemoving pq...')
while pq.is_empty() == False:
temp = pq.remove()
print(temp)
dummy.append(temp)
print('\\pq is empty. Inserting values back into queue...')
while dummy != []:
temp = dummy.pop()
print(temp)
pq.insert(temp)
print('\nPushing complete! Peeking...')
print(pq.peek())
print('\npq is {} objects long!'.format(len(pq)))
return
def array_to_list(llist, source):
"""
-------------------------------------------------------
Appends contests of source to llist. At finish, source is empty.
Last element in source is at rear of llist,
first element in source is at front of llist.
Use: array_to_list(llist, source)
-------------------------------------------------------
Parameters:
llist - a List object (List)
source - a Python list (list)
Returns:
None
-------------------------------------------------------
"""
while source:
llist.append(source.pop(0))
return
def list_to_array(llist, target):
"""
-------------------------------------------------------
Removes contents of llist into target. At finish, llist is empty.
Front element of llist is at front of target,
rear element of llist is at rear of target.
Use: list_to_array(llist, target)
-------------------------------------------------------
Parameters:
llist - a List object (List)
target - a Python list (list)
Returns:
None
-------------------------------------------------------
"""
while llist.is_empty() == False:
target.append(llist.pop(0))
return
def list_test(a):
"""
-------------------------------------------------------
Tests list implementation.
The methods of List are tested for both empty and
non-empty lists using the data in a:
is_empty, insert, remove, append, index, __contains__,
find, count, max, min, __getitem__, __setitem__
Use: list_test(a)
-------------------------------------------------------
Parameters:
a - list of data (list of ?)
Returns:
None
-------------------------------------------------------
"""
lst = List()
if lst.is_empty() == True:
print('lst is empty.')
array_to_list(lst, a)
print('Converting a into a lst...')
if lst.is_empty() == False:
print('a has been transferred into lst!')
print('The movie at index 0 is {}'.format(lst[0]))
print('/nRemoving the movie at index 0...')
temp = lst.remove(lst[0])
print('Now the movie at index 0 is {}'.format(lst[0]))
print('/nInserting the movie at index 1...')
lst.insert(1, temp)
print('Now the movie at index 1 is {}'.format(lst[1]))
print('/nRemoving the movie at index 0...')
temp = lst.remove(lst[0])
print('/nAppending the movie...')
lst.append(temp)
print('Peeking...')
print(lst.peek())
print('/nThe index of the movie is {}'.format(lst.index(temp)))
print('/n{} appears {} time(s)'.format(temp, lst.count(temp)))
print('/nThe max is {}'.format(lst.max()))
print('The min is {}'.format(lst.min()))
print('/nThe movie is at index {}'.format(lst.find(temp)))
return
<|reserved_special_token_1|>
<|reserved_special_token_0|>
from Stack_array import Stack
from Queue_array import Queue
from Priority_Queue_array import Priority_Queue
from List_array import List
def array_to_stack(stack, source):
"""
-------------------------------------------------------
Pushes contents of source onto stack. At finish, source is empty.
Last value in source is at bottom of stack,
first value in source is on top of stack.
Use: array_to_stack(stack, source)
-------------------------------------------------------
Parameters:
stack - a Stack object (Stack)
source - a Python list (list)
Returns:
None
-------------------------------------------------------
"""
while source != []:
temp = source.pop()
stack.push(temp)
return
def stack_to_array(stack, target):
"""
-------------------------------------------------------
Pops contents of stack into target. At finish, stack is empty.
Top value of stack is at end of target,
bottom value of stack is at beginning of target.
Use: stack_to_array(stack, target)
-------------------------------------------------------
Parameters:
stack - a Stack object (Stack)
target - a Python list (list)
Returns:
None
-------------------------------------------------------
"""
while stack.is_empty() == False:
temp = stack.pop()
target.insert(0, temp)
return
def stack_test(source):
"""
-------------------------------------------------------
Tests the methods of Stack for empty and
non-empty stacks using the data in source:
is_empty, push, pop, peek
(Testing pop and peek while empty throws exceptions)
Use: stack_test(source)
-------------------------------------------------------
Parameters:
source - list of data (list of ?)
Returns:
None
-------------------------------------------------------
"""
stack = Stack()
dummy = []
if stack.is_empty() == True:
print('Stack is empty.')
array_to_stack(stack, source)
print('Converting source into a stack...')
if stack.is_empty() == False:
print('source has been transferred into stack!')
print('\nPopping stack...')
while stack.is_empty() == False:
temp = stack.pop()
print(temp)
dummy.append(temp)
print('\nstack is empty. Pushing values back into stack...')
while dummy != []:
temp = dummy.pop()
print(temp)
stack.push(temp)
print('\nPushing complete! Peeking...')
print(stack.peek())
return
def array_to_queue(queue, source):
"""
-------------------------------------------------------
Inserts contents of source into queue. At finish, source is empty.
Last value in source is at rear of queue,
first value in source is at front of queue.
Use: array_to_queue(queue, source)
-------------------------------------------------------
Parameters:
queue - a Queue object (Queue)
source - a Python list (list)
Returns:
None
-------------------------------------------------------
"""
temp = None
while source != []:
temp = source.pop(0)
queue.insert(temp)
return
def queue_to_array(queue, target):
"""
-------------------------------------------------------
Removes contents of queue into target. At finish, queue is empty.
Front value of queue is at front of target,
rear value of queue is at end of target.
Use: queue_to_array(queue, target)
-------------------------------------------------------
Parameters:
queue - a Queue object (Queue)
target - a Python list (list)
Returns:
None
-------------------------------------------------------
"""
temp = None
while queue.is_empty() == False:
temp = queue.remove()
target.append(temp)
return
def array_to_pq(pq, source):
"""
-------------------------------------------------------
Inserts contents of source into pq. At finish, source is empty.
Last value in source is at rear of pq,
first value in source is at front of pq.
Use: array_to_pq(pq, source)
-------------------------------------------------------
Parameters:
pq - a Priority_Queue object (Priority_Queue)
source - a Python list (list)
Returns:
None
-------------------------------------------------------
"""
temp = None
while source != []:
temp = source.pop(0)
pq.insert(temp)
return
def pq_to_array(pq, target):
"""
-------------------------------------------------------
Removes contents of pq into target. At finish, pq is empty.
Highest priority value in pq is at front of target,
lowest priority value in pq is at end of target.
Use: pq_to_array(pq, target)
-------------------------------------------------------
Parameters:
pq - a Priority_Queue object (Priority_Queue)
target - a Python list (list)
Returns:
None
-------------------------------------------------------
"""
temp = None
while pq.is_empty() == False:
temp = pq.remove()
target.append(temp)
return
def queue_test(a):
"""
-------------------------------------------------------
Tests queue implementation.
Use: queue_test(a)
-------------------------------------------------------
Parameters:
a - list of data (list of ?)
Returns:
the methods of Queue are tested for both empty and
non-empty queues using the data in a:
is_empty, insert, remove, peek, len
-------------------------------------------------------
"""
queue = Queue()
dummy = []
if queue.is_empty() == True:
print('Queue is empty.')
array_to_queue(queue, a)
print('Converting a into a queue...')
if queue.is_empty() == False:
print('a has been transferred into queue!')
print('\nRemoving queue...')
while queue.is_empty() == False:
temp = queue.remove()
print(temp)
dummy.append(temp)
print('\nqueue is empty. Inserting values back into queue...')
while dummy != []:
temp = dummy.pop()
print(temp)
queue.insert(temp)
print('\nPushing complete! Peeking...')
print(queue.peek())
print('\nqueue is {} objects long!'.format(len(queue)))
return
def priority_queue_test(a):
"""
-------------------------------------------------------
Tests priority queue implementation.
Use: pq_test(a)
-------------------------------------------------------
Parameters:
a - list of data (list of ?)
Returns:
the methods of Priority_Queue are tested for both empty and
non-empty priority queues using the data in a:
is_empty, insert, remove, peek
-------------------------------------------------------
"""
pq = Priority_Queue()
dummy = []
if pq.is_empty() == True:
print('pq is empty.')
array_to_pq(pq, a)
print('Converting a into a pq...')
if pq.is_empty() == False:
print('a has been transferred into pq!')
print('\nRemoving pq...')
while pq.is_empty() == False:
temp = pq.remove()
print(temp)
dummy.append(temp)
print('\\pq is empty. Inserting values back into queue...')
while dummy != []:
temp = dummy.pop()
print(temp)
pq.insert(temp)
print('\nPushing complete! Peeking...')
print(pq.peek())
print('\npq is {} objects long!'.format(len(pq)))
return
def array_to_list(llist, source):
"""
-------------------------------------------------------
Appends contests of source to llist. At finish, source is empty.
Last element in source is at rear of llist,
first element in source is at front of llist.
Use: array_to_list(llist, source)
-------------------------------------------------------
Parameters:
llist - a List object (List)
source - a Python list (list)
Returns:
None
-------------------------------------------------------
"""
while source:
llist.append(source.pop(0))
return
def list_to_array(llist, target):
"""
-------------------------------------------------------
Removes contents of llist into target. At finish, llist is empty.
Front element of llist is at front of target,
rear element of llist is at rear of target.
Use: list_to_array(llist, target)
-------------------------------------------------------
Parameters:
llist - a List object (List)
target - a Python list (list)
Returns:
None
-------------------------------------------------------
"""
while llist.is_empty() == False:
target.append(llist.pop(0))
return
def list_test(a):
"""
-------------------------------------------------------
Tests list implementation.
The methods of List are tested for both empty and
non-empty lists using the data in a:
is_empty, insert, remove, append, index, __contains__,
find, count, max, min, __getitem__, __setitem__
Use: list_test(a)
-------------------------------------------------------
Parameters:
a - list of data (list of ?)
Returns:
None
-------------------------------------------------------
"""
lst = List()
if lst.is_empty() == True:
print('lst is empty.')
array_to_list(lst, a)
print('Converting a into a lst...')
if lst.is_empty() == False:
print('a has been transferred into lst!')
print('The movie at index 0 is {}'.format(lst[0]))
print('/nRemoving the movie at index 0...')
temp = lst.remove(lst[0])
print('Now the movie at index 0 is {}'.format(lst[0]))
print('/nInserting the movie at index 1...')
lst.insert(1, temp)
print('Now the movie at index 1 is {}'.format(lst[1]))
print('/nRemoving the movie at index 0...')
temp = lst.remove(lst[0])
print('/nAppending the movie...')
lst.append(temp)
print('Peeking...')
print(lst.peek())
print('/nThe index of the movie is {}'.format(lst.index(temp)))
print('/n{} appears {} time(s)'.format(temp, lst.count(temp)))
print('/nThe max is {}'.format(lst.max()))
print('The min is {}'.format(lst.min()))
print('/nThe movie is at index {}'.format(lst.find(temp)))
return
<|reserved_special_token_1|>
"""
-------------------------------------------------------
Stack utilities
-------------------------------------------------------
Author: Evan Attfield
ID: 180817010
Email: attf7010@mylaurier.ca
__updated__ = "Jan 22, 2019"
-------------------------------------------------------
"""
from Stack_array import Stack
from Queue_array import Queue
from Priority_Queue_array import Priority_Queue
from List_array import List
def array_to_stack(stack, source):
"""
-------------------------------------------------------
Pushes contents of source onto stack. At finish, source is empty.
Last value in source is at bottom of stack,
first value in source is on top of stack.
Use: array_to_stack(stack, source)
-------------------------------------------------------
Parameters:
stack - a Stack object (Stack)
source - a Python list (list)
Returns:
None
-------------------------------------------------------
"""
while source != []:
temp = source.pop()
stack.push(temp)
return
def stack_to_array(stack, target):
"""
-------------------------------------------------------
Pops contents of stack into target. At finish, stack is empty.
Top value of stack is at end of target,
bottom value of stack is at beginning of target.
Use: stack_to_array(stack, target)
-------------------------------------------------------
Parameters:
stack - a Stack object (Stack)
target - a Python list (list)
Returns:
None
-------------------------------------------------------
"""
while stack.is_empty() == False:
temp = stack.pop()
target.insert(0, temp) #adds temp to the beginning, while append adds temp to the end
return
def stack_test(source):
"""
-------------------------------------------------------
Tests the methods of Stack for empty and
non-empty stacks using the data in source:
is_empty, push, pop, peek
(Testing pop and peek while empty throws exceptions)
Use: stack_test(source)
-------------------------------------------------------
Parameters:
source - list of data (list of ?)
Returns:
None
-------------------------------------------------------
"""
stack = Stack()
dummy = []
if stack.is_empty() == True:
print('Stack is empty.')
array_to_stack(stack, source)
print('Converting source into a stack...')
if stack.is_empty() == False:
print('source has been transferred into stack!')
print('\nPopping stack...')
while stack.is_empty() == False:
temp = stack.pop()
print(temp)
dummy.append(temp)
print('\nstack is empty. Pushing values back into stack...')
while dummy != []:
temp = dummy.pop()
print(temp)
stack.push(temp)
print('\nPushing complete! Peeking...')
print(stack.peek())
return
def array_to_queue(queue, source):
"""
-------------------------------------------------------
Inserts contents of source into queue. At finish, source is empty.
Last value in source is at rear of queue,
first value in source is at front of queue.
Use: array_to_queue(queue, source)
-------------------------------------------------------
Parameters:
queue - a Queue object (Queue)
source - a Python list (list)
Returns:
None
-------------------------------------------------------
"""
temp = None
while source != []:
temp = source.pop(0)
queue.insert(temp)
return
def queue_to_array(queue, target):
"""
-------------------------------------------------------
Removes contents of queue into target. At finish, queue is empty.
Front value of queue is at front of target,
rear value of queue is at end of target.
Use: queue_to_array(queue, target)
-------------------------------------------------------
Parameters:
queue - a Queue object (Queue)
target - a Python list (list)
Returns:
None
-------------------------------------------------------
"""
temp = None
while queue.is_empty() == False:
temp = queue.remove()
target.append(temp)
return
def array_to_pq(pq, source):
"""
-------------------------------------------------------
Inserts contents of source into pq. At finish, source is empty.
Last value in source is at rear of pq,
first value in source is at front of pq.
Use: array_to_pq(pq, source)
-------------------------------------------------------
Parameters:
pq - a Priority_Queue object (Priority_Queue)
source - a Python list (list)
Returns:
None
-------------------------------------------------------
"""
temp = None
while source != []:
temp = source.pop(0)
pq.insert(temp)
return
def pq_to_array(pq, target):
"""
-------------------------------------------------------
Removes contents of pq into target. At finish, pq is empty.
Highest priority value in pq is at front of target,
lowest priority value in pq is at end of target.
Use: pq_to_array(pq, target)
-------------------------------------------------------
Parameters:
pq - a Priority_Queue object (Priority_Queue)
target - a Python list (list)
Returns:
None
-------------------------------------------------------
"""
temp = None
while pq.is_empty() == False:
temp = pq.remove()
target.append(temp)
return
def queue_test(a):
"""
-------------------------------------------------------
Tests queue implementation.
Use: queue_test(a)
-------------------------------------------------------
Parameters:
a - list of data (list of ?)
Returns:
the methods of Queue are tested for both empty and
non-empty queues using the data in a:
is_empty, insert, remove, peek, len
-------------------------------------------------------
"""
queue = Queue()
dummy = []
if queue.is_empty() == True:
print('Queue is empty.')
array_to_queue(queue, a)
print('Converting a into a queue...')
if queue.is_empty() == False:
print('a has been transferred into queue!')
print('\nRemoving queue...')
while queue.is_empty() == False:
temp = queue.remove()
print(temp)
dummy.append(temp)
print('\nqueue is empty. Inserting values back into queue...')
while dummy != []:
temp = dummy.pop()
print(temp)
queue.insert(temp)
print('\nPushing complete! Peeking...')
print(queue.peek())
print('\nqueue is {} objects long!'.format(len(queue)))
return
def priority_queue_test(a):
"""
-------------------------------------------------------
Tests priority queue implementation.
Use: pq_test(a)
-------------------------------------------------------
Parameters:
a - list of data (list of ?)
Returns:
the methods of Priority_Queue are tested for both empty and
non-empty priority queues using the data in a:
is_empty, insert, remove, peek
-------------------------------------------------------
"""
pq = Priority_Queue()
dummy = []
if pq.is_empty() == True:
print('pq is empty.')
array_to_pq(pq, a)
print('Converting a into a pq...')
if pq.is_empty() == False:
print('a has been transferred into pq!')
print('\nRemoving pq...')
while pq.is_empty() == False:
temp = pq.remove()
print(temp)
dummy.append(temp)
print('\pq is empty. Inserting values back into queue...')
while dummy != []:
temp = dummy.pop()
print(temp)
pq.insert(temp)
print('\nPushing complete! Peeking...')
print(pq.peek())
print('\npq is {} objects long!'.format(len(pq)))
return
def array_to_list(llist, source):
"""
-------------------------------------------------------
Appends contests of source to llist. At finish, source is empty.
Last element in source is at rear of llist,
first element in source is at front of llist.
Use: array_to_list(llist, source)
-------------------------------------------------------
Parameters:
llist - a List object (List)
source - a Python list (list)
Returns:
None
-------------------------------------------------------
"""
while source: #a list is considered True as long as it is not empty
llist.append(source.pop(0))
return
def list_to_array(llist, target):
"""
-------------------------------------------------------
Removes contents of llist into target. At finish, llist is empty.
Front element of llist is at front of target,
rear element of llist is at rear of target.
Use: list_to_array(llist, target)
-------------------------------------------------------
Parameters:
llist - a List object (List)
target - a Python list (list)
Returns:
None
-------------------------------------------------------
"""
while llist.is_empty() == False:
target.append(llist.pop(0))
return
def list_test(a):
"""
-------------------------------------------------------
Tests list implementation.
The methods of List are tested for both empty and
non-empty lists using the data in a:
is_empty, insert, remove, append, index, __contains__,
find, count, max, min, __getitem__, __setitem__
Use: list_test(a)
-------------------------------------------------------
Parameters:
a - list of data (list of ?)
Returns:
None
-------------------------------------------------------
"""
lst = List()
if lst.is_empty() == True:
print('lst is empty.')
array_to_list(lst, a)
print('Converting a into a lst...')
if lst.is_empty() == False:
print('a has been transferred into lst!')
print('The movie at index 0 is {}'.format(lst[0]))
print('/nRemoving the movie at index 0...')
temp = lst.remove(lst[0])
print('Now the movie at index 0 is {}'.format(lst[0]))
print('/nInserting the movie at index 1...')
lst.insert(1, temp)
print('Now the movie at index 1 is {}'.format(lst[1]))
print('/nRemoving the movie at index 0...')
temp = lst.remove(lst[0])
print('/nAppending the movie...')
lst.append(temp)
print('Peeking...')
print(lst.peek())
print('/nThe index of the movie is {}'.format(lst.index(temp)))
print('/n{} appears {} time(s)'.format(temp, lst.count(temp)))
print('/nThe max is {}'. format(lst.max()))
print('The min is {}'. format(lst.min()))
print('/nThe movie is at index {}'.format(lst.find(temp)))
return
|
flexible
|
{
"blob_id": "dab9b58b08b562d902ee0ae1104198cb1ebbffe5",
"index": 1928,
"step-1": "<mask token>\n\n\ndef array_to_stack(stack, source):\n \"\"\"\n -------------------------------------------------------\n Pushes contents of source onto stack. At finish, source is empty.\n Last value in source is at bottom of stack, \n first value in source is on top of stack.\n Use: array_to_stack(stack, source)\n -------------------------------------------------------\n Parameters:\n stack - a Stack object (Stack)\n source - a Python list (list)\n Returns:\n None\n -------------------------------------------------------\n \"\"\"\n while source != []:\n temp = source.pop()\n stack.push(temp)\n return\n\n\n<mask token>\n\n\ndef queue_to_array(queue, target):\n \"\"\"\n -------------------------------------------------------\n Removes contents of queue into target. At finish, queue is empty.\n Front value of queue is at front of target,\n rear value of queue is at end of target.\n Use: queue_to_array(queue, target)\n -------------------------------------------------------\n Parameters:\n queue - a Queue object (Queue)\n target - a Python list (list)\n Returns:\n None\n -------------------------------------------------------\n \"\"\"\n temp = None\n while queue.is_empty() == False:\n temp = queue.remove()\n target.append(temp)\n return\n\n\ndef array_to_pq(pq, source):\n \"\"\"\n -------------------------------------------------------\n Inserts contents of source into pq. At finish, source is empty.\n Last value in source is at rear of pq, \n first value in source is at front of pq.\n Use: array_to_pq(pq, source)\n -------------------------------------------------------\n Parameters:\n pq - a Priority_Queue object (Priority_Queue)\n source - a Python list (list)\n Returns:\n None\n -------------------------------------------------------\n \"\"\"\n temp = None\n while source != []:\n temp = source.pop(0)\n pq.insert(temp)\n return\n\n\n<mask token>\n\n\ndef list_to_array(llist, target):\n \"\"\"\n -------------------------------------------------------\n Removes contents of llist into target. At finish, llist is empty.\n Front element of llist is at front of target,\n rear element of llist is at rear of target.\n Use: list_to_array(llist, target)\n -------------------------------------------------------\n Parameters:\n llist - a List object (List)\n target - a Python list (list)\n Returns:\n None\n -------------------------------------------------------\n \"\"\"\n while llist.is_empty() == False:\n target.append(llist.pop(0))\n return\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef array_to_stack(stack, source):\n \"\"\"\n -------------------------------------------------------\n Pushes contents of source onto stack. At finish, source is empty.\n Last value in source is at bottom of stack, \n first value in source is on top of stack.\n Use: array_to_stack(stack, source)\n -------------------------------------------------------\n Parameters:\n stack - a Stack object (Stack)\n source - a Python list (list)\n Returns:\n None\n -------------------------------------------------------\n \"\"\"\n while source != []:\n temp = source.pop()\n stack.push(temp)\n return\n\n\ndef stack_to_array(stack, target):\n \"\"\"\n -------------------------------------------------------\n Pops contents of stack into target. At finish, stack is empty.\n Top value of stack is at end of target,\n bottom value of stack is at beginning of target.\n Use: stack_to_array(stack, target)\n -------------------------------------------------------\n Parameters:\n stack - a Stack object (Stack)\n target - a Python list (list)\n Returns:\n None\n -------------------------------------------------------\n \"\"\"\n while stack.is_empty() == False:\n temp = stack.pop()\n target.insert(0, temp)\n return\n\n\ndef stack_test(source):\n \"\"\"\n -------------------------------------------------------\n Tests the methods of Stack for empty and \n non-empty stacks using the data in source:\n is_empty, push, pop, peek\n (Testing pop and peek while empty throws exceptions)\n Use: stack_test(source)\n -------------------------------------------------------\n Parameters:\n source - list of data (list of ?)\n Returns:\n None\n -------------------------------------------------------\n \"\"\"\n stack = Stack()\n dummy = []\n if stack.is_empty() == True:\n print('Stack is empty.')\n array_to_stack(stack, source)\n print('Converting source into a stack...')\n if stack.is_empty() == False:\n print('source has been transferred into stack!')\n print('\\nPopping stack...')\n while stack.is_empty() == False:\n temp = stack.pop()\n print(temp)\n dummy.append(temp)\n print('\\nstack is empty. Pushing values back into stack...')\n while dummy != []:\n temp = dummy.pop()\n print(temp)\n stack.push(temp)\n print('\\nPushing complete! Peeking...')\n print(stack.peek())\n return\n\n\ndef array_to_queue(queue, source):\n \"\"\"\n -------------------------------------------------------\n Inserts contents of source into queue. At finish, source is empty.\n Last value in source is at rear of queue, \n first value in source is at front of queue.\n Use: array_to_queue(queue, source)\n -------------------------------------------------------\n Parameters:\n queue - a Queue object (Queue)\n source - a Python list (list)\n Returns:\n None\n -------------------------------------------------------\n \"\"\"\n temp = None\n while source != []:\n temp = source.pop(0)\n queue.insert(temp)\n return\n\n\ndef queue_to_array(queue, target):\n \"\"\"\n -------------------------------------------------------\n Removes contents of queue into target. At finish, queue is empty.\n Front value of queue is at front of target,\n rear value of queue is at end of target.\n Use: queue_to_array(queue, target)\n -------------------------------------------------------\n Parameters:\n queue - a Queue object (Queue)\n target - a Python list (list)\n Returns:\n None\n -------------------------------------------------------\n \"\"\"\n temp = None\n while queue.is_empty() == False:\n temp = queue.remove()\n target.append(temp)\n return\n\n\ndef array_to_pq(pq, source):\n \"\"\"\n -------------------------------------------------------\n Inserts contents of source into pq. At finish, source is empty.\n Last value in source is at rear of pq, \n first value in source is at front of pq.\n Use: array_to_pq(pq, source)\n -------------------------------------------------------\n Parameters:\n pq - a Priority_Queue object (Priority_Queue)\n source - a Python list (list)\n Returns:\n None\n -------------------------------------------------------\n \"\"\"\n temp = None\n while source != []:\n temp = source.pop(0)\n pq.insert(temp)\n return\n\n\n<mask token>\n\n\ndef priority_queue_test(a):\n \"\"\"\n -------------------------------------------------------\n Tests priority queue implementation.\n Use: pq_test(a)\n -------------------------------------------------------\n Parameters:\n a - list of data (list of ?)\n Returns:\n the methods of Priority_Queue are tested for both empty and \n non-empty priority queues using the data in a:\n is_empty, insert, remove, peek\n -------------------------------------------------------\n \"\"\"\n pq = Priority_Queue()\n dummy = []\n if pq.is_empty() == True:\n print('pq is empty.')\n array_to_pq(pq, a)\n print('Converting a into a pq...')\n if pq.is_empty() == False:\n print('a has been transferred into pq!')\n print('\\nRemoving pq...')\n while pq.is_empty() == False:\n temp = pq.remove()\n print(temp)\n dummy.append(temp)\n print('\\\\pq is empty. Inserting values back into queue...')\n while dummy != []:\n temp = dummy.pop()\n print(temp)\n pq.insert(temp)\n print('\\nPushing complete! Peeking...')\n print(pq.peek())\n print('\\npq is {} objects long!'.format(len(pq)))\n return\n\n\n<mask token>\n\n\ndef list_to_array(llist, target):\n \"\"\"\n -------------------------------------------------------\n Removes contents of llist into target. At finish, llist is empty.\n Front element of llist is at front of target,\n rear element of llist is at rear of target.\n Use: list_to_array(llist, target)\n -------------------------------------------------------\n Parameters:\n llist - a List object (List)\n target - a Python list (list)\n Returns:\n None\n -------------------------------------------------------\n \"\"\"\n while llist.is_empty() == False:\n target.append(llist.pop(0))\n return\n\n\ndef list_test(a):\n \"\"\"\n -------------------------------------------------------\n Tests list implementation.\n The methods of List are tested for both empty and \n non-empty lists using the data in a:\n is_empty, insert, remove, append, index, __contains__,\n find, count, max, min, __getitem__, __setitem__\n Use: list_test(a)\n -------------------------------------------------------\n Parameters:\n a - list of data (list of ?)\n Returns:\n None\n -------------------------------------------------------\n \"\"\"\n lst = List()\n if lst.is_empty() == True:\n print('lst is empty.')\n array_to_list(lst, a)\n print('Converting a into a lst...')\n if lst.is_empty() == False:\n print('a has been transferred into lst!')\n print('The movie at index 0 is {}'.format(lst[0]))\n print('/nRemoving the movie at index 0...')\n temp = lst.remove(lst[0])\n print('Now the movie at index 0 is {}'.format(lst[0]))\n print('/nInserting the movie at index 1...')\n lst.insert(1, temp)\n print('Now the movie at index 1 is {}'.format(lst[1]))\n print('/nRemoving the movie at index 0...')\n temp = lst.remove(lst[0])\n print('/nAppending the movie...')\n lst.append(temp)\n print('Peeking...')\n print(lst.peek())\n print('/nThe index of the movie is {}'.format(lst.index(temp)))\n print('/n{} appears {} time(s)'.format(temp, lst.count(temp)))\n print('/nThe max is {}'.format(lst.max()))\n print('The min is {}'.format(lst.min()))\n print('/nThe movie is at index {}'.format(lst.find(temp)))\n return\n",
"step-3": "<mask token>\n\n\ndef array_to_stack(stack, source):\n \"\"\"\n -------------------------------------------------------\n Pushes contents of source onto stack. At finish, source is empty.\n Last value in source is at bottom of stack, \n first value in source is on top of stack.\n Use: array_to_stack(stack, source)\n -------------------------------------------------------\n Parameters:\n stack - a Stack object (Stack)\n source - a Python list (list)\n Returns:\n None\n -------------------------------------------------------\n \"\"\"\n while source != []:\n temp = source.pop()\n stack.push(temp)\n return\n\n\ndef stack_to_array(stack, target):\n \"\"\"\n -------------------------------------------------------\n Pops contents of stack into target. At finish, stack is empty.\n Top value of stack is at end of target,\n bottom value of stack is at beginning of target.\n Use: stack_to_array(stack, target)\n -------------------------------------------------------\n Parameters:\n stack - a Stack object (Stack)\n target - a Python list (list)\n Returns:\n None\n -------------------------------------------------------\n \"\"\"\n while stack.is_empty() == False:\n temp = stack.pop()\n target.insert(0, temp)\n return\n\n\ndef stack_test(source):\n \"\"\"\n -------------------------------------------------------\n Tests the methods of Stack for empty and \n non-empty stacks using the data in source:\n is_empty, push, pop, peek\n (Testing pop and peek while empty throws exceptions)\n Use: stack_test(source)\n -------------------------------------------------------\n Parameters:\n source - list of data (list of ?)\n Returns:\n None\n -------------------------------------------------------\n \"\"\"\n stack = Stack()\n dummy = []\n if stack.is_empty() == True:\n print('Stack is empty.')\n array_to_stack(stack, source)\n print('Converting source into a stack...')\n if stack.is_empty() == False:\n print('source has been transferred into stack!')\n print('\\nPopping stack...')\n while stack.is_empty() == False:\n temp = stack.pop()\n print(temp)\n dummy.append(temp)\n print('\\nstack is empty. Pushing values back into stack...')\n while dummy != []:\n temp = dummy.pop()\n print(temp)\n stack.push(temp)\n print('\\nPushing complete! Peeking...')\n print(stack.peek())\n return\n\n\ndef array_to_queue(queue, source):\n \"\"\"\n -------------------------------------------------------\n Inserts contents of source into queue. At finish, source is empty.\n Last value in source is at rear of queue, \n first value in source is at front of queue.\n Use: array_to_queue(queue, source)\n -------------------------------------------------------\n Parameters:\n queue - a Queue object (Queue)\n source - a Python list (list)\n Returns:\n None\n -------------------------------------------------------\n \"\"\"\n temp = None\n while source != []:\n temp = source.pop(0)\n queue.insert(temp)\n return\n\n\ndef queue_to_array(queue, target):\n \"\"\"\n -------------------------------------------------------\n Removes contents of queue into target. At finish, queue is empty.\n Front value of queue is at front of target,\n rear value of queue is at end of target.\n Use: queue_to_array(queue, target)\n -------------------------------------------------------\n Parameters:\n queue - a Queue object (Queue)\n target - a Python list (list)\n Returns:\n None\n -------------------------------------------------------\n \"\"\"\n temp = None\n while queue.is_empty() == False:\n temp = queue.remove()\n target.append(temp)\n return\n\n\ndef array_to_pq(pq, source):\n \"\"\"\n -------------------------------------------------------\n Inserts contents of source into pq. At finish, source is empty.\n Last value in source is at rear of pq, \n first value in source is at front of pq.\n Use: array_to_pq(pq, source)\n -------------------------------------------------------\n Parameters:\n pq - a Priority_Queue object (Priority_Queue)\n source - a Python list (list)\n Returns:\n None\n -------------------------------------------------------\n \"\"\"\n temp = None\n while source != []:\n temp = source.pop(0)\n pq.insert(temp)\n return\n\n\ndef pq_to_array(pq, target):\n \"\"\"\n -------------------------------------------------------\n Removes contents of pq into target. At finish, pq is empty.\n Highest priority value in pq is at front of target,\n lowest priority value in pq is at end of target.\n Use: pq_to_array(pq, target)\n -------------------------------------------------------\n Parameters:\n pq - a Priority_Queue object (Priority_Queue)\n target - a Python list (list)\n Returns:\n None\n -------------------------------------------------------\n \"\"\"\n temp = None\n while pq.is_empty() == False:\n temp = pq.remove()\n target.append(temp)\n return\n\n\ndef queue_test(a):\n \"\"\"\n -------------------------------------------------------\n Tests queue implementation.\n Use: queue_test(a)\n -------------------------------------------------------\n Parameters:\n a - list of data (list of ?)\n Returns:\n the methods of Queue are tested for both empty and \n non-empty queues using the data in a:\n is_empty, insert, remove, peek, len\n -------------------------------------------------------\n \"\"\"\n queue = Queue()\n dummy = []\n if queue.is_empty() == True:\n print('Queue is empty.')\n array_to_queue(queue, a)\n print('Converting a into a queue...')\n if queue.is_empty() == False:\n print('a has been transferred into queue!')\n print('\\nRemoving queue...')\n while queue.is_empty() == False:\n temp = queue.remove()\n print(temp)\n dummy.append(temp)\n print('\\nqueue is empty. Inserting values back into queue...')\n while dummy != []:\n temp = dummy.pop()\n print(temp)\n queue.insert(temp)\n print('\\nPushing complete! Peeking...')\n print(queue.peek())\n print('\\nqueue is {} objects long!'.format(len(queue)))\n return\n\n\ndef priority_queue_test(a):\n \"\"\"\n -------------------------------------------------------\n Tests priority queue implementation.\n Use: pq_test(a)\n -------------------------------------------------------\n Parameters:\n a - list of data (list of ?)\n Returns:\n the methods of Priority_Queue are tested for both empty and \n non-empty priority queues using the data in a:\n is_empty, insert, remove, peek\n -------------------------------------------------------\n \"\"\"\n pq = Priority_Queue()\n dummy = []\n if pq.is_empty() == True:\n print('pq is empty.')\n array_to_pq(pq, a)\n print('Converting a into a pq...')\n if pq.is_empty() == False:\n print('a has been transferred into pq!')\n print('\\nRemoving pq...')\n while pq.is_empty() == False:\n temp = pq.remove()\n print(temp)\n dummy.append(temp)\n print('\\\\pq is empty. Inserting values back into queue...')\n while dummy != []:\n temp = dummy.pop()\n print(temp)\n pq.insert(temp)\n print('\\nPushing complete! Peeking...')\n print(pq.peek())\n print('\\npq is {} objects long!'.format(len(pq)))\n return\n\n\ndef array_to_list(llist, source):\n \"\"\"\n -------------------------------------------------------\n Appends contests of source to llist. At finish, source is empty.\n Last element in source is at rear of llist, \n first element in source is at front of llist.\n Use: array_to_list(llist, source)\n -------------------------------------------------------\n Parameters:\n llist - a List object (List)\n source - a Python list (list)\n Returns:\n None\n -------------------------------------------------------\n \"\"\"\n while source:\n llist.append(source.pop(0))\n return\n\n\ndef list_to_array(llist, target):\n \"\"\"\n -------------------------------------------------------\n Removes contents of llist into target. At finish, llist is empty.\n Front element of llist is at front of target,\n rear element of llist is at rear of target.\n Use: list_to_array(llist, target)\n -------------------------------------------------------\n Parameters:\n llist - a List object (List)\n target - a Python list (list)\n Returns:\n None\n -------------------------------------------------------\n \"\"\"\n while llist.is_empty() == False:\n target.append(llist.pop(0))\n return\n\n\ndef list_test(a):\n \"\"\"\n -------------------------------------------------------\n Tests list implementation.\n The methods of List are tested for both empty and \n non-empty lists using the data in a:\n is_empty, insert, remove, append, index, __contains__,\n find, count, max, min, __getitem__, __setitem__\n Use: list_test(a)\n -------------------------------------------------------\n Parameters:\n a - list of data (list of ?)\n Returns:\n None\n -------------------------------------------------------\n \"\"\"\n lst = List()\n if lst.is_empty() == True:\n print('lst is empty.')\n array_to_list(lst, a)\n print('Converting a into a lst...')\n if lst.is_empty() == False:\n print('a has been transferred into lst!')\n print('The movie at index 0 is {}'.format(lst[0]))\n print('/nRemoving the movie at index 0...')\n temp = lst.remove(lst[0])\n print('Now the movie at index 0 is {}'.format(lst[0]))\n print('/nInserting the movie at index 1...')\n lst.insert(1, temp)\n print('Now the movie at index 1 is {}'.format(lst[1]))\n print('/nRemoving the movie at index 0...')\n temp = lst.remove(lst[0])\n print('/nAppending the movie...')\n lst.append(temp)\n print('Peeking...')\n print(lst.peek())\n print('/nThe index of the movie is {}'.format(lst.index(temp)))\n print('/n{} appears {} time(s)'.format(temp, lst.count(temp)))\n print('/nThe max is {}'.format(lst.max()))\n print('The min is {}'.format(lst.min()))\n print('/nThe movie is at index {}'.format(lst.find(temp)))\n return\n",
"step-4": "<mask token>\nfrom Stack_array import Stack\nfrom Queue_array import Queue\nfrom Priority_Queue_array import Priority_Queue\nfrom List_array import List\n\n\ndef array_to_stack(stack, source):\n \"\"\"\n -------------------------------------------------------\n Pushes contents of source onto stack. At finish, source is empty.\n Last value in source is at bottom of stack, \n first value in source is on top of stack.\n Use: array_to_stack(stack, source)\n -------------------------------------------------------\n Parameters:\n stack - a Stack object (Stack)\n source - a Python list (list)\n Returns:\n None\n -------------------------------------------------------\n \"\"\"\n while source != []:\n temp = source.pop()\n stack.push(temp)\n return\n\n\ndef stack_to_array(stack, target):\n \"\"\"\n -------------------------------------------------------\n Pops contents of stack into target. At finish, stack is empty.\n Top value of stack is at end of target,\n bottom value of stack is at beginning of target.\n Use: stack_to_array(stack, target)\n -------------------------------------------------------\n Parameters:\n stack - a Stack object (Stack)\n target - a Python list (list)\n Returns:\n None\n -------------------------------------------------------\n \"\"\"\n while stack.is_empty() == False:\n temp = stack.pop()\n target.insert(0, temp)\n return\n\n\ndef stack_test(source):\n \"\"\"\n -------------------------------------------------------\n Tests the methods of Stack for empty and \n non-empty stacks using the data in source:\n is_empty, push, pop, peek\n (Testing pop and peek while empty throws exceptions)\n Use: stack_test(source)\n -------------------------------------------------------\n Parameters:\n source - list of data (list of ?)\n Returns:\n None\n -------------------------------------------------------\n \"\"\"\n stack = Stack()\n dummy = []\n if stack.is_empty() == True:\n print('Stack is empty.')\n array_to_stack(stack, source)\n print('Converting source into a stack...')\n if stack.is_empty() == False:\n print('source has been transferred into stack!')\n print('\\nPopping stack...')\n while stack.is_empty() == False:\n temp = stack.pop()\n print(temp)\n dummy.append(temp)\n print('\\nstack is empty. Pushing values back into stack...')\n while dummy != []:\n temp = dummy.pop()\n print(temp)\n stack.push(temp)\n print('\\nPushing complete! Peeking...')\n print(stack.peek())\n return\n\n\ndef array_to_queue(queue, source):\n \"\"\"\n -------------------------------------------------------\n Inserts contents of source into queue. At finish, source is empty.\n Last value in source is at rear of queue, \n first value in source is at front of queue.\n Use: array_to_queue(queue, source)\n -------------------------------------------------------\n Parameters:\n queue - a Queue object (Queue)\n source - a Python list (list)\n Returns:\n None\n -------------------------------------------------------\n \"\"\"\n temp = None\n while source != []:\n temp = source.pop(0)\n queue.insert(temp)\n return\n\n\ndef queue_to_array(queue, target):\n \"\"\"\n -------------------------------------------------------\n Removes contents of queue into target. At finish, queue is empty.\n Front value of queue is at front of target,\n rear value of queue is at end of target.\n Use: queue_to_array(queue, target)\n -------------------------------------------------------\n Parameters:\n queue - a Queue object (Queue)\n target - a Python list (list)\n Returns:\n None\n -------------------------------------------------------\n \"\"\"\n temp = None\n while queue.is_empty() == False:\n temp = queue.remove()\n target.append(temp)\n return\n\n\ndef array_to_pq(pq, source):\n \"\"\"\n -------------------------------------------------------\n Inserts contents of source into pq. At finish, source is empty.\n Last value in source is at rear of pq, \n first value in source is at front of pq.\n Use: array_to_pq(pq, source)\n -------------------------------------------------------\n Parameters:\n pq - a Priority_Queue object (Priority_Queue)\n source - a Python list (list)\n Returns:\n None\n -------------------------------------------------------\n \"\"\"\n temp = None\n while source != []:\n temp = source.pop(0)\n pq.insert(temp)\n return\n\n\ndef pq_to_array(pq, target):\n \"\"\"\n -------------------------------------------------------\n Removes contents of pq into target. At finish, pq is empty.\n Highest priority value in pq is at front of target,\n lowest priority value in pq is at end of target.\n Use: pq_to_array(pq, target)\n -------------------------------------------------------\n Parameters:\n pq - a Priority_Queue object (Priority_Queue)\n target - a Python list (list)\n Returns:\n None\n -------------------------------------------------------\n \"\"\"\n temp = None\n while pq.is_empty() == False:\n temp = pq.remove()\n target.append(temp)\n return\n\n\ndef queue_test(a):\n \"\"\"\n -------------------------------------------------------\n Tests queue implementation.\n Use: queue_test(a)\n -------------------------------------------------------\n Parameters:\n a - list of data (list of ?)\n Returns:\n the methods of Queue are tested for both empty and \n non-empty queues using the data in a:\n is_empty, insert, remove, peek, len\n -------------------------------------------------------\n \"\"\"\n queue = Queue()\n dummy = []\n if queue.is_empty() == True:\n print('Queue is empty.')\n array_to_queue(queue, a)\n print('Converting a into a queue...')\n if queue.is_empty() == False:\n print('a has been transferred into queue!')\n print('\\nRemoving queue...')\n while queue.is_empty() == False:\n temp = queue.remove()\n print(temp)\n dummy.append(temp)\n print('\\nqueue is empty. Inserting values back into queue...')\n while dummy != []:\n temp = dummy.pop()\n print(temp)\n queue.insert(temp)\n print('\\nPushing complete! Peeking...')\n print(queue.peek())\n print('\\nqueue is {} objects long!'.format(len(queue)))\n return\n\n\ndef priority_queue_test(a):\n \"\"\"\n -------------------------------------------------------\n Tests priority queue implementation.\n Use: pq_test(a)\n -------------------------------------------------------\n Parameters:\n a - list of data (list of ?)\n Returns:\n the methods of Priority_Queue are tested for both empty and \n non-empty priority queues using the data in a:\n is_empty, insert, remove, peek\n -------------------------------------------------------\n \"\"\"\n pq = Priority_Queue()\n dummy = []\n if pq.is_empty() == True:\n print('pq is empty.')\n array_to_pq(pq, a)\n print('Converting a into a pq...')\n if pq.is_empty() == False:\n print('a has been transferred into pq!')\n print('\\nRemoving pq...')\n while pq.is_empty() == False:\n temp = pq.remove()\n print(temp)\n dummy.append(temp)\n print('\\\\pq is empty. Inserting values back into queue...')\n while dummy != []:\n temp = dummy.pop()\n print(temp)\n pq.insert(temp)\n print('\\nPushing complete! Peeking...')\n print(pq.peek())\n print('\\npq is {} objects long!'.format(len(pq)))\n return\n\n\ndef array_to_list(llist, source):\n \"\"\"\n -------------------------------------------------------\n Appends contests of source to llist. At finish, source is empty.\n Last element in source is at rear of llist, \n first element in source is at front of llist.\n Use: array_to_list(llist, source)\n -------------------------------------------------------\n Parameters:\n llist - a List object (List)\n source - a Python list (list)\n Returns:\n None\n -------------------------------------------------------\n \"\"\"\n while source:\n llist.append(source.pop(0))\n return\n\n\ndef list_to_array(llist, target):\n \"\"\"\n -------------------------------------------------------\n Removes contents of llist into target. At finish, llist is empty.\n Front element of llist is at front of target,\n rear element of llist is at rear of target.\n Use: list_to_array(llist, target)\n -------------------------------------------------------\n Parameters:\n llist - a List object (List)\n target - a Python list (list)\n Returns:\n None\n -------------------------------------------------------\n \"\"\"\n while llist.is_empty() == False:\n target.append(llist.pop(0))\n return\n\n\ndef list_test(a):\n \"\"\"\n -------------------------------------------------------\n Tests list implementation.\n The methods of List are tested for both empty and \n non-empty lists using the data in a:\n is_empty, insert, remove, append, index, __contains__,\n find, count, max, min, __getitem__, __setitem__\n Use: list_test(a)\n -------------------------------------------------------\n Parameters:\n a - list of data (list of ?)\n Returns:\n None\n -------------------------------------------------------\n \"\"\"\n lst = List()\n if lst.is_empty() == True:\n print('lst is empty.')\n array_to_list(lst, a)\n print('Converting a into a lst...')\n if lst.is_empty() == False:\n print('a has been transferred into lst!')\n print('The movie at index 0 is {}'.format(lst[0]))\n print('/nRemoving the movie at index 0...')\n temp = lst.remove(lst[0])\n print('Now the movie at index 0 is {}'.format(lst[0]))\n print('/nInserting the movie at index 1...')\n lst.insert(1, temp)\n print('Now the movie at index 1 is {}'.format(lst[1]))\n print('/nRemoving the movie at index 0...')\n temp = lst.remove(lst[0])\n print('/nAppending the movie...')\n lst.append(temp)\n print('Peeking...')\n print(lst.peek())\n print('/nThe index of the movie is {}'.format(lst.index(temp)))\n print('/n{} appears {} time(s)'.format(temp, lst.count(temp)))\n print('/nThe max is {}'.format(lst.max()))\n print('The min is {}'.format(lst.min()))\n print('/nThe movie is at index {}'.format(lst.find(temp)))\n return\n",
"step-5": "\"\"\"\r\n-------------------------------------------------------\r\nStack utilities\r\n-------------------------------------------------------\r\nAuthor: Evan Attfield\r\nID: 180817010\r\nEmail: attf7010@mylaurier.ca\r\n__updated__ = \"Jan 22, 2019\"\r\n-------------------------------------------------------\r\n\"\"\"\r\nfrom Stack_array import Stack\r\nfrom Queue_array import Queue\r\nfrom Priority_Queue_array import Priority_Queue\r\nfrom List_array import List\r\n\r\ndef array_to_stack(stack, source):\r\n \"\"\"\r\n -------------------------------------------------------\r\n Pushes contents of source onto stack. At finish, source is empty.\r\n Last value in source is at bottom of stack, \r\n first value in source is on top of stack.\r\n Use: array_to_stack(stack, source)\r\n -------------------------------------------------------\r\n Parameters:\r\n stack - a Stack object (Stack)\r\n source - a Python list (list)\r\n Returns:\r\n None\r\n -------------------------------------------------------\r\n \"\"\"\r\n \r\n while source != []:\r\n temp = source.pop()\r\n stack.push(temp)\r\n \r\n return \r\n\r\ndef stack_to_array(stack, target):\r\n \"\"\"\r\n -------------------------------------------------------\r\n Pops contents of stack into target. At finish, stack is empty.\r\n Top value of stack is at end of target,\r\n bottom value of stack is at beginning of target.\r\n Use: stack_to_array(stack, target)\r\n -------------------------------------------------------\r\n Parameters:\r\n stack - a Stack object (Stack)\r\n target - a Python list (list)\r\n Returns:\r\n None\r\n -------------------------------------------------------\r\n \"\"\"\r\n \r\n while stack.is_empty() == False:\r\n temp = stack.pop()\r\n target.insert(0, temp) #adds temp to the beginning, while append adds temp to the end\r\n return \r\n \r\ndef stack_test(source):\r\n \"\"\"\r\n -------------------------------------------------------\r\n Tests the methods of Stack for empty and \r\n non-empty stacks using the data in source:\r\n is_empty, push, pop, peek\r\n (Testing pop and peek while empty throws exceptions)\r\n Use: stack_test(source)\r\n -------------------------------------------------------\r\n Parameters:\r\n source - list of data (list of ?)\r\n Returns:\r\n None\r\n -------------------------------------------------------\r\n \"\"\"\r\n stack = Stack()\r\n dummy = []\r\n if stack.is_empty() == True:\r\n print('Stack is empty.')\r\n \r\n array_to_stack(stack, source)\r\n print('Converting source into a stack...')\r\n \r\n if stack.is_empty() == False:\r\n print('source has been transferred into stack!')\r\n \r\n print('\\nPopping stack...')\r\n while stack.is_empty() == False:\r\n temp = stack.pop()\r\n print(temp)\r\n dummy.append(temp)\r\n \r\n print('\\nstack is empty. Pushing values back into stack...')\r\n while dummy != []:\r\n temp = dummy.pop()\r\n print(temp)\r\n stack.push(temp)\r\n \r\n print('\\nPushing complete! Peeking...')\r\n print(stack.peek())\r\n \r\n return\r\n \r\ndef array_to_queue(queue, source):\r\n \"\"\"\r\n -------------------------------------------------------\r\n Inserts contents of source into queue. At finish, source is empty.\r\n Last value in source is at rear of queue, \r\n first value in source is at front of queue.\r\n Use: array_to_queue(queue, source)\r\n -------------------------------------------------------\r\n Parameters:\r\n queue - a Queue object (Queue)\r\n source - a Python list (list)\r\n Returns:\r\n None\r\n -------------------------------------------------------\r\n \"\"\"\r\n temp = None\r\n \r\n while source != []:\r\n temp = source.pop(0)\r\n queue.insert(temp)\r\n\r\n return\r\ndef queue_to_array(queue, target):\r\n \"\"\"\r\n -------------------------------------------------------\r\n Removes contents of queue into target. At finish, queue is empty.\r\n Front value of queue is at front of target,\r\n rear value of queue is at end of target.\r\n Use: queue_to_array(queue, target)\r\n -------------------------------------------------------\r\n Parameters:\r\n queue - a Queue object (Queue)\r\n target - a Python list (list)\r\n Returns:\r\n None\r\n -------------------------------------------------------\r\n \"\"\"\r\n temp = None\r\n \r\n while queue.is_empty() == False:\r\n temp = queue.remove()\r\n target.append(temp)\r\n\r\n return\r\n\r\ndef array_to_pq(pq, source):\r\n \"\"\"\r\n -------------------------------------------------------\r\n Inserts contents of source into pq. At finish, source is empty.\r\n Last value in source is at rear of pq, \r\n first value in source is at front of pq.\r\n Use: array_to_pq(pq, source)\r\n -------------------------------------------------------\r\n Parameters:\r\n pq - a Priority_Queue object (Priority_Queue)\r\n source - a Python list (list)\r\n Returns:\r\n None\r\n -------------------------------------------------------\r\n \"\"\"\r\n temp = None\r\n \r\n while source != []:\r\n temp = source.pop(0)\r\n pq.insert(temp)\r\n\r\n return\r\n\r\ndef pq_to_array(pq, target):\r\n \"\"\"\r\n -------------------------------------------------------\r\n Removes contents of pq into target. At finish, pq is empty.\r\n Highest priority value in pq is at front of target,\r\n lowest priority value in pq is at end of target.\r\n Use: pq_to_array(pq, target)\r\n -------------------------------------------------------\r\n Parameters:\r\n pq - a Priority_Queue object (Priority_Queue)\r\n target - a Python list (list)\r\n Returns:\r\n None\r\n -------------------------------------------------------\r\n \"\"\"\r\n temp = None\r\n \r\n while pq.is_empty() == False:\r\n temp = pq.remove()\r\n target.append(temp)\r\n \r\n return\r\n\r\ndef queue_test(a):\r\n \"\"\"\r\n -------------------------------------------------------\r\n Tests queue implementation.\r\n Use: queue_test(a)\r\n -------------------------------------------------------\r\n Parameters:\r\n a - list of data (list of ?)\r\n Returns:\r\n the methods of Queue are tested for both empty and \r\n non-empty queues using the data in a:\r\n is_empty, insert, remove, peek, len\r\n -------------------------------------------------------\r\n \"\"\"\r\n queue = Queue()\r\n dummy = []\r\n if queue.is_empty() == True:\r\n print('Queue is empty.')\r\n \r\n array_to_queue(queue, a)\r\n print('Converting a into a queue...')\r\n \r\n if queue.is_empty() == False:\r\n print('a has been transferred into queue!')\r\n \r\n print('\\nRemoving queue...')\r\n while queue.is_empty() == False:\r\n temp = queue.remove()\r\n print(temp)\r\n dummy.append(temp)\r\n \r\n print('\\nqueue is empty. Inserting values back into queue...')\r\n while dummy != []:\r\n temp = dummy.pop()\r\n print(temp)\r\n queue.insert(temp)\r\n \r\n print('\\nPushing complete! Peeking...')\r\n print(queue.peek())\r\n \r\n print('\\nqueue is {} objects long!'.format(len(queue)))\r\n\r\n return\r\n\r\ndef priority_queue_test(a):\r\n \"\"\"\r\n -------------------------------------------------------\r\n Tests priority queue implementation.\r\n Use: pq_test(a)\r\n -------------------------------------------------------\r\n Parameters:\r\n a - list of data (list of ?)\r\n Returns:\r\n the methods of Priority_Queue are tested for both empty and \r\n non-empty priority queues using the data in a:\r\n is_empty, insert, remove, peek\r\n -------------------------------------------------------\r\n \"\"\"\r\n pq = Priority_Queue()\r\n dummy = []\r\n if pq.is_empty() == True:\r\n print('pq is empty.')\r\n \r\n array_to_pq(pq, a)\r\n print('Converting a into a pq...')\r\n \r\n if pq.is_empty() == False:\r\n print('a has been transferred into pq!')\r\n \r\n print('\\nRemoving pq...')\r\n while pq.is_empty() == False:\r\n temp = pq.remove()\r\n print(temp)\r\n dummy.append(temp)\r\n \r\n print('\\pq is empty. Inserting values back into queue...')\r\n while dummy != []:\r\n temp = dummy.pop()\r\n print(temp)\r\n pq.insert(temp)\r\n \r\n print('\\nPushing complete! Peeking...')\r\n print(pq.peek())\r\n \r\n print('\\npq is {} objects long!'.format(len(pq)))\r\n\r\n return\r\n\r\ndef array_to_list(llist, source):\r\n \"\"\"\r\n -------------------------------------------------------\r\n Appends contests of source to llist. At finish, source is empty.\r\n Last element in source is at rear of llist, \r\n first element in source is at front of llist.\r\n Use: array_to_list(llist, source)\r\n -------------------------------------------------------\r\n Parameters:\r\n llist - a List object (List)\r\n source - a Python list (list)\r\n Returns:\r\n None\r\n -------------------------------------------------------\r\n \"\"\"\r\n while source: #a list is considered True as long as it is not empty\r\n llist.append(source.pop(0))\r\n \r\n return\r\n\r\ndef list_to_array(llist, target):\r\n \"\"\"\r\n -------------------------------------------------------\r\n Removes contents of llist into target. At finish, llist is empty.\r\n Front element of llist is at front of target,\r\n rear element of llist is at rear of target.\r\n Use: list_to_array(llist, target)\r\n -------------------------------------------------------\r\n Parameters:\r\n llist - a List object (List)\r\n target - a Python list (list)\r\n Returns:\r\n None\r\n -------------------------------------------------------\r\n \"\"\"\r\n while llist.is_empty() == False:\r\n target.append(llist.pop(0))\r\n \r\n return\r\n\r\ndef list_test(a):\r\n \"\"\"\r\n -------------------------------------------------------\r\n Tests list implementation.\r\n The methods of List are tested for both empty and \r\n non-empty lists using the data in a:\r\n is_empty, insert, remove, append, index, __contains__,\r\n find, count, max, min, __getitem__, __setitem__\r\n Use: list_test(a)\r\n -------------------------------------------------------\r\n Parameters:\r\n a - list of data (list of ?)\r\n Returns:\r\n None\r\n -------------------------------------------------------\r\n \"\"\"\r\n lst = List()\r\n \r\n if lst.is_empty() == True:\r\n print('lst is empty.')\r\n \r\n array_to_list(lst, a)\r\n print('Converting a into a lst...')\r\n \r\n if lst.is_empty() == False:\r\n print('a has been transferred into lst!')\r\n \r\n print('The movie at index 0 is {}'.format(lst[0]))\r\n \r\n print('/nRemoving the movie at index 0...')\r\n temp = lst.remove(lst[0])\r\n print('Now the movie at index 0 is {}'.format(lst[0]))\r\n \r\n print('/nInserting the movie at index 1...')\r\n lst.insert(1, temp)\r\n print('Now the movie at index 1 is {}'.format(lst[1]))\r\n \r\n print('/nRemoving the movie at index 0...')\r\n temp = lst.remove(lst[0])\r\n \r\n print('/nAppending the movie...')\r\n lst.append(temp)\r\n \r\n print('Peeking...')\r\n print(lst.peek())\r\n \r\n print('/nThe index of the movie is {}'.format(lst.index(temp)))\r\n \r\n print('/n{} appears {} time(s)'.format(temp, lst.count(temp)))\r\n \r\n print('/nThe max is {}'. format(lst.max()))\r\n print('The min is {}'. format(lst.min()))\r\n \r\n print('/nThe movie is at index {}'.format(lst.find(temp)))\r\n \r\n \r\n\r\n return\r\n",
"step-ids": [
4,
9,
12,
13,
14
]
}
|
[
4,
9,
12,
13,
14
] |
<|reserved_special_token_0|>
class PingWindow:
<|reserved_special_token_0|>
def __init__(self, last_parent):
self.last_parent = last_parent
self.main_widget = QWidget()
self.main_widget.setMaximumHeight(400)
self.parent_layout = QVBoxLayout()
self.ping_log_layout = QHBoxLayout()
self.progress_bar_layout = QHBoxLayout()
self.secondary_progress_layout = QVBoxLayout()
self.control_button_layout = QGridLayout()
self.up_ip_layout = QVBoxLayout()
self.up_ip_btn = QtGui.QLabel('UP Nodes')
self.up_ip_btn.setFont(QtGui.QFont('Verdana', 10, QtGui.QFont.Bold))
self.up_ip_btn.setStyleSheet(
'background-color:white ;color:Green;border: 2px solid black')
self.up_ip_layout.addWidget(self.up_ip_btn)
self.up_ip_btn.setToolTip('Please click here to open UP NODE file.. !')
self.up_ip_btn.setStyleSheet(
'QToolTip { background-color: #00bfff; color: black; border: black solid 1px }'
)
self.up_ip_textbox = QPlainTextEdit()
self.up_ip_textbox.setFont(QtGui.QFont('Verdana', 10, QtGui.QFont.Bold)
)
self.up_ip_textbox.setStyleSheet(
'background-color: rgb(150,240,190) ;color:rgb(9,57,31);border: 2px solid black; '
)
self.up_ip_textbox.setReadOnly(True)
self.up_ip_layout.addWidget(self.up_ip_textbox)
self.down_ip_layout = QVBoxLayout()
self.down_ip_btn = QtGui.QLabel('DOWN Nodes')
self.down_ip_btn.setFont(QtGui.QFont('Verdana', 10, QtGui.QFont.Bold))
self.down_ip_btn.setStyleSheet(
'QPushButton {background-color: white ;color:Red;border: 2px solid black}'
)
self.down_ip_layout.addWidget(self.down_ip_btn)
self.down_ip_btn.setToolTip(
'Please click here to open UP NODE file.. !')
self.down_ip_btn.setStyleSheet(
'QToolTip { background-color: #00bfff; color: black; border: black solid 1px }'
)
self.down_ip_textbox = QPlainTextEdit()
self.down_ip_textbox.setFont(QtGui.QFont('Verdana', 10, QtGui.QFont
.Bold))
self.down_ip_textbox.setStyleSheet(
'background-color: rgb(250,210,210);color:rgb(118,14,16);border: 2px solid black; '
)
self.down_ip_textbox.setReadOnly(True)
self.down_ip_layout.addWidget(self.down_ip_textbox)
self.progress_bar_layout.addLayout(self.secondary_progress_layout)
self.progress_bar_layout.addLayout(self.control_button_layout)
self.ping_log_layout.addLayout(self.up_ip_layout)
self.ping_log_layout.addLayout(self.down_ip_layout)
self.parent_layout.addLayout(self.ping_log_layout)
self.parent_layout.addLayout(self.progress_bar_layout)
self.progressBar = QtGui.QProgressBar()
self.progressLabel = QtGui.QLabel(
'Ping process is in progress .... Please wait until the log file is generated...!'
)
self.cancel_button = QtGui.QPushButton('Cancel')
self.progressBar.setProperty('Current status', 0)
self.progressBar.setObjectName('progressBar')
self.progressBar.setMaximumHeight(15)
self.progressBar.setTextVisible(True)
self.progressBar.setValue(0)
self.progressBar.setRange(0, 100)
self.progressLabel.setFont(QtGui.QFont('verdana', 9, QtGui.QFont.
Normal))
self.secondary_progress_layout.addWidget(self.progressBar)
self.secondary_progress_layout.addWidget(self.progressLabel)
self.progress_bar_layout.addWidget(self.cancel_button)
self.main_widget.setLayout(self.parent_layout)
self.last_parent.right_base_layout_v.addWidget(self.main_widget)
self.main_widget.hide()
<|reserved_special_token_0|>
def closeEvent(self, event):
self.wind_close_flg = True
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class PingWindow:
<|reserved_special_token_0|>
def __init__(self, last_parent):
self.last_parent = last_parent
self.main_widget = QWidget()
self.main_widget.setMaximumHeight(400)
self.parent_layout = QVBoxLayout()
self.ping_log_layout = QHBoxLayout()
self.progress_bar_layout = QHBoxLayout()
self.secondary_progress_layout = QVBoxLayout()
self.control_button_layout = QGridLayout()
self.up_ip_layout = QVBoxLayout()
self.up_ip_btn = QtGui.QLabel('UP Nodes')
self.up_ip_btn.setFont(QtGui.QFont('Verdana', 10, QtGui.QFont.Bold))
self.up_ip_btn.setStyleSheet(
'background-color:white ;color:Green;border: 2px solid black')
self.up_ip_layout.addWidget(self.up_ip_btn)
self.up_ip_btn.setToolTip('Please click here to open UP NODE file.. !')
self.up_ip_btn.setStyleSheet(
'QToolTip { background-color: #00bfff; color: black; border: black solid 1px }'
)
self.up_ip_textbox = QPlainTextEdit()
self.up_ip_textbox.setFont(QtGui.QFont('Verdana', 10, QtGui.QFont.Bold)
)
self.up_ip_textbox.setStyleSheet(
'background-color: rgb(150,240,190) ;color:rgb(9,57,31);border: 2px solid black; '
)
self.up_ip_textbox.setReadOnly(True)
self.up_ip_layout.addWidget(self.up_ip_textbox)
self.down_ip_layout = QVBoxLayout()
self.down_ip_btn = QtGui.QLabel('DOWN Nodes')
self.down_ip_btn.setFont(QtGui.QFont('Verdana', 10, QtGui.QFont.Bold))
self.down_ip_btn.setStyleSheet(
'QPushButton {background-color: white ;color:Red;border: 2px solid black}'
)
self.down_ip_layout.addWidget(self.down_ip_btn)
self.down_ip_btn.setToolTip(
'Please click here to open UP NODE file.. !')
self.down_ip_btn.setStyleSheet(
'QToolTip { background-color: #00bfff; color: black; border: black solid 1px }'
)
self.down_ip_textbox = QPlainTextEdit()
self.down_ip_textbox.setFont(QtGui.QFont('Verdana', 10, QtGui.QFont
.Bold))
self.down_ip_textbox.setStyleSheet(
'background-color: rgb(250,210,210);color:rgb(118,14,16);border: 2px solid black; '
)
self.down_ip_textbox.setReadOnly(True)
self.down_ip_layout.addWidget(self.down_ip_textbox)
self.progress_bar_layout.addLayout(self.secondary_progress_layout)
self.progress_bar_layout.addLayout(self.control_button_layout)
self.ping_log_layout.addLayout(self.up_ip_layout)
self.ping_log_layout.addLayout(self.down_ip_layout)
self.parent_layout.addLayout(self.ping_log_layout)
self.parent_layout.addLayout(self.progress_bar_layout)
self.progressBar = QtGui.QProgressBar()
self.progressLabel = QtGui.QLabel(
'Ping process is in progress .... Please wait until the log file is generated...!'
)
self.cancel_button = QtGui.QPushButton('Cancel')
self.progressBar.setProperty('Current status', 0)
self.progressBar.setObjectName('progressBar')
self.progressBar.setMaximumHeight(15)
self.progressBar.setTextVisible(True)
self.progressBar.setValue(0)
self.progressBar.setRange(0, 100)
self.progressLabel.setFont(QtGui.QFont('verdana', 9, QtGui.QFont.
Normal))
self.secondary_progress_layout.addWidget(self.progressBar)
self.secondary_progress_layout.addWidget(self.progressLabel)
self.progress_bar_layout.addWidget(self.cancel_button)
self.main_widget.setLayout(self.parent_layout)
self.last_parent.right_base_layout_v.addWidget(self.main_widget)
self.main_widget.hide()
def prepare_window(self):
self.progressBar.show()
self.progressLabel.show()
self.cancel_button.show()
self.up_ip_textbox.clear()
self.down_ip_textbox.clear()
self.main_widget.show()
def closeEvent(self, event):
self.wind_close_flg = True
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class PingWindow:
wind_close_flg = False
def __init__(self, last_parent):
self.last_parent = last_parent
self.main_widget = QWidget()
self.main_widget.setMaximumHeight(400)
self.parent_layout = QVBoxLayout()
self.ping_log_layout = QHBoxLayout()
self.progress_bar_layout = QHBoxLayout()
self.secondary_progress_layout = QVBoxLayout()
self.control_button_layout = QGridLayout()
self.up_ip_layout = QVBoxLayout()
self.up_ip_btn = QtGui.QLabel('UP Nodes')
self.up_ip_btn.setFont(QtGui.QFont('Verdana', 10, QtGui.QFont.Bold))
self.up_ip_btn.setStyleSheet(
'background-color:white ;color:Green;border: 2px solid black')
self.up_ip_layout.addWidget(self.up_ip_btn)
self.up_ip_btn.setToolTip('Please click here to open UP NODE file.. !')
self.up_ip_btn.setStyleSheet(
'QToolTip { background-color: #00bfff; color: black; border: black solid 1px }'
)
self.up_ip_textbox = QPlainTextEdit()
self.up_ip_textbox.setFont(QtGui.QFont('Verdana', 10, QtGui.QFont.Bold)
)
self.up_ip_textbox.setStyleSheet(
'background-color: rgb(150,240,190) ;color:rgb(9,57,31);border: 2px solid black; '
)
self.up_ip_textbox.setReadOnly(True)
self.up_ip_layout.addWidget(self.up_ip_textbox)
self.down_ip_layout = QVBoxLayout()
self.down_ip_btn = QtGui.QLabel('DOWN Nodes')
self.down_ip_btn.setFont(QtGui.QFont('Verdana', 10, QtGui.QFont.Bold))
self.down_ip_btn.setStyleSheet(
'QPushButton {background-color: white ;color:Red;border: 2px solid black}'
)
self.down_ip_layout.addWidget(self.down_ip_btn)
self.down_ip_btn.setToolTip(
'Please click here to open UP NODE file.. !')
self.down_ip_btn.setStyleSheet(
'QToolTip { background-color: #00bfff; color: black; border: black solid 1px }'
)
self.down_ip_textbox = QPlainTextEdit()
self.down_ip_textbox.setFont(QtGui.QFont('Verdana', 10, QtGui.QFont
.Bold))
self.down_ip_textbox.setStyleSheet(
'background-color: rgb(250,210,210);color:rgb(118,14,16);border: 2px solid black; '
)
self.down_ip_textbox.setReadOnly(True)
self.down_ip_layout.addWidget(self.down_ip_textbox)
self.progress_bar_layout.addLayout(self.secondary_progress_layout)
self.progress_bar_layout.addLayout(self.control_button_layout)
self.ping_log_layout.addLayout(self.up_ip_layout)
self.ping_log_layout.addLayout(self.down_ip_layout)
self.parent_layout.addLayout(self.ping_log_layout)
self.parent_layout.addLayout(self.progress_bar_layout)
self.progressBar = QtGui.QProgressBar()
self.progressLabel = QtGui.QLabel(
'Ping process is in progress .... Please wait until the log file is generated...!'
)
self.cancel_button = QtGui.QPushButton('Cancel')
self.progressBar.setProperty('Current status', 0)
self.progressBar.setObjectName('progressBar')
self.progressBar.setMaximumHeight(15)
self.progressBar.setTextVisible(True)
self.progressBar.setValue(0)
self.progressBar.setRange(0, 100)
self.progressLabel.setFont(QtGui.QFont('verdana', 9, QtGui.QFont.
Normal))
self.secondary_progress_layout.addWidget(self.progressBar)
self.secondary_progress_layout.addWidget(self.progressLabel)
self.progress_bar_layout.addWidget(self.cancel_button)
self.main_widget.setLayout(self.parent_layout)
self.last_parent.right_base_layout_v.addWidget(self.main_widget)
self.main_widget.hide()
def prepare_window(self):
self.progressBar.show()
self.progressLabel.show()
self.cancel_button.show()
self.up_ip_textbox.clear()
self.down_ip_textbox.clear()
self.main_widget.show()
def closeEvent(self, event):
self.wind_close_flg = True
<|reserved_special_token_1|>
<|reserved_special_token_0|>
import sys
from PySide.QtGui import *
from PySide.QtCore import *
from PySide import QtGui
from PySide import QtCore
class PingWindow:
wind_close_flg = False
def __init__(self, last_parent):
self.last_parent = last_parent
self.main_widget = QWidget()
self.main_widget.setMaximumHeight(400)
self.parent_layout = QVBoxLayout()
self.ping_log_layout = QHBoxLayout()
self.progress_bar_layout = QHBoxLayout()
self.secondary_progress_layout = QVBoxLayout()
self.control_button_layout = QGridLayout()
self.up_ip_layout = QVBoxLayout()
self.up_ip_btn = QtGui.QLabel('UP Nodes')
self.up_ip_btn.setFont(QtGui.QFont('Verdana', 10, QtGui.QFont.Bold))
self.up_ip_btn.setStyleSheet(
'background-color:white ;color:Green;border: 2px solid black')
self.up_ip_layout.addWidget(self.up_ip_btn)
self.up_ip_btn.setToolTip('Please click here to open UP NODE file.. !')
self.up_ip_btn.setStyleSheet(
'QToolTip { background-color: #00bfff; color: black; border: black solid 1px }'
)
self.up_ip_textbox = QPlainTextEdit()
self.up_ip_textbox.setFont(QtGui.QFont('Verdana', 10, QtGui.QFont.Bold)
)
self.up_ip_textbox.setStyleSheet(
'background-color: rgb(150,240,190) ;color:rgb(9,57,31);border: 2px solid black; '
)
self.up_ip_textbox.setReadOnly(True)
self.up_ip_layout.addWidget(self.up_ip_textbox)
self.down_ip_layout = QVBoxLayout()
self.down_ip_btn = QtGui.QLabel('DOWN Nodes')
self.down_ip_btn.setFont(QtGui.QFont('Verdana', 10, QtGui.QFont.Bold))
self.down_ip_btn.setStyleSheet(
'QPushButton {background-color: white ;color:Red;border: 2px solid black}'
)
self.down_ip_layout.addWidget(self.down_ip_btn)
self.down_ip_btn.setToolTip(
'Please click here to open UP NODE file.. !')
self.down_ip_btn.setStyleSheet(
'QToolTip { background-color: #00bfff; color: black; border: black solid 1px }'
)
self.down_ip_textbox = QPlainTextEdit()
self.down_ip_textbox.setFont(QtGui.QFont('Verdana', 10, QtGui.QFont
.Bold))
self.down_ip_textbox.setStyleSheet(
'background-color: rgb(250,210,210);color:rgb(118,14,16);border: 2px solid black; '
)
self.down_ip_textbox.setReadOnly(True)
self.down_ip_layout.addWidget(self.down_ip_textbox)
self.progress_bar_layout.addLayout(self.secondary_progress_layout)
self.progress_bar_layout.addLayout(self.control_button_layout)
self.ping_log_layout.addLayout(self.up_ip_layout)
self.ping_log_layout.addLayout(self.down_ip_layout)
self.parent_layout.addLayout(self.ping_log_layout)
self.parent_layout.addLayout(self.progress_bar_layout)
self.progressBar = QtGui.QProgressBar()
self.progressLabel = QtGui.QLabel(
'Ping process is in progress .... Please wait until the log file is generated...!'
)
self.cancel_button = QtGui.QPushButton('Cancel')
self.progressBar.setProperty('Current status', 0)
self.progressBar.setObjectName('progressBar')
self.progressBar.setMaximumHeight(15)
self.progressBar.setTextVisible(True)
self.progressBar.setValue(0)
self.progressBar.setRange(0, 100)
self.progressLabel.setFont(QtGui.QFont('verdana', 9, QtGui.QFont.
Normal))
self.secondary_progress_layout.addWidget(self.progressBar)
self.secondary_progress_layout.addWidget(self.progressLabel)
self.progress_bar_layout.addWidget(self.cancel_button)
self.main_widget.setLayout(self.parent_layout)
self.last_parent.right_base_layout_v.addWidget(self.main_widget)
self.main_widget.hide()
def prepare_window(self):
self.progressBar.show()
self.progressLabel.show()
self.cancel_button.show()
self.up_ip_textbox.clear()
self.down_ip_textbox.clear()
self.main_widget.show()
def closeEvent(self, event):
self.wind_close_flg = True
<|reserved_special_token_1|>
'''
Created on 17 june, 2018
@author: sp977u@att.com (Satish Palnati)
This class is for
'''
import sys
from PySide.QtGui import *
from PySide.QtCore import *
from PySide import QtGui
from PySide import QtCore
class PingWindow:
wind_close_flg = False
def __init__(self,last_parent):
self.last_parent = last_parent
self.main_widget = QWidget()
self.main_widget.setMaximumHeight(400)
self.parent_layout = QVBoxLayout()
#self.last_parent.right_base_layout_v
self.ping_log_layout = QHBoxLayout()
self.progress_bar_layout = QHBoxLayout() #to incorporate the progress bar and the buttons
self.secondary_progress_layout = QVBoxLayout() #just the progress bar
self.control_button_layout = QGridLayout() #cancel,close,open valid / invalid file
# UP ip layout for ping logs
self.up_ip_layout = QVBoxLayout()
self.up_ip_btn = QtGui.QLabel("UP Nodes")
self.up_ip_btn.setFont(QtGui.QFont("Verdana", 10, QtGui.QFont.Bold))
self.up_ip_btn.setStyleSheet("background-color:white ;color:Green;border: 2px solid black")
self.up_ip_layout.addWidget(self.up_ip_btn)
self.up_ip_btn.setToolTip("Please click here to open UP NODE file.. !")
self.up_ip_btn.setStyleSheet("""QToolTip { background-color: #00bfff; color: black; border: black solid 1px }""")
self.up_ip_textbox = QPlainTextEdit()
self.up_ip_textbox.setFont(QtGui.QFont("Verdana", 10, QtGui.QFont.Bold))
self.up_ip_textbox.setStyleSheet("background-color: rgb(150,240,190) ;color:rgb(9,57,31);border: 2px solid black; ")
self.up_ip_textbox.setReadOnly(True)
self.up_ip_layout.addWidget(self.up_ip_textbox)
# DOWN ip layout for ping logs
self.down_ip_layout = QVBoxLayout()
self.down_ip_btn = QtGui.QLabel("DOWN Nodes")
self.down_ip_btn.setFont(QtGui.QFont("Verdana", 10, QtGui.QFont.Bold))
self.down_ip_btn.setStyleSheet("QPushButton {background-color: white ;color:Red;border: 2px solid black}")
self.down_ip_layout.addWidget(self.down_ip_btn)
self.down_ip_btn.setToolTip("Please click here to open UP NODE file.. !")
self.down_ip_btn.setStyleSheet("""QToolTip { background-color: #00bfff; color: black; border: black solid 1px }""")
self.down_ip_textbox = QPlainTextEdit()
self.down_ip_textbox.setFont(QtGui.QFont("Verdana", 10, QtGui.QFont.Bold))
self.down_ip_textbox.setStyleSheet("background-color: rgb(250,210,210);color:rgb(118,14,16);border: 2px solid black; ")
self.down_ip_textbox.setReadOnly(True)
self.down_ip_layout.addWidget(self.down_ip_textbox)
self.progress_bar_layout.addLayout(self.secondary_progress_layout)
self.progress_bar_layout.addLayout(self.control_button_layout)
self.ping_log_layout.addLayout(self.up_ip_layout)
self.ping_log_layout.addLayout(self.down_ip_layout)
self.parent_layout.addLayout(self.ping_log_layout)
self.parent_layout.addLayout(self.progress_bar_layout)
self.progressBar = QtGui.QProgressBar()
self.progressLabel = QtGui.QLabel("Ping process is in progress .... Please wait until the log file is generated...!")
self.cancel_button = QtGui.QPushButton("Cancel")
# self.progressBar.setGeometry(QtCore.QRect(100, 645, 710, 17))
self.progressBar.setProperty("Current status", 0)
self.progressBar.setObjectName("progressBar")
self.progressBar.setMaximumHeight(15)
self.progressBar.setTextVisible(True)
self.progressBar.setValue(0)
self.progressBar.setRange(0,100)
self.progressLabel.setFont(QtGui.QFont("verdana", 9, QtGui.QFont.Normal))
self.secondary_progress_layout.addWidget(self.progressBar)
self.secondary_progress_layout.addWidget(self.progressLabel)
self.progress_bar_layout.addWidget(self.cancel_button)
# self.last_parent.msgBox.information(,'Job status!',"Ping logs process has been closed.!", QtGui.QMessageBox.Ok)
self.main_widget.setLayout(self.parent_layout)
self.last_parent.right_base_layout_v.addWidget(self.main_widget)
self.main_widget.hide()
def prepare_window(self,):
self.progressBar.show()
self.progressLabel.show()
self.cancel_button.show()
self.up_ip_textbox.clear()
self.down_ip_textbox.clear()
self.main_widget.show()
def closeEvent(self,event):
self.wind_close_flg = True
|
flexible
|
{
"blob_id": "75b1d2fb927063669a962f72deb57323001c0b7a",
"index": 5657,
"step-1": "<mask token>\n\n\nclass PingWindow:\n <mask token>\n\n def __init__(self, last_parent):\n self.last_parent = last_parent\n self.main_widget = QWidget()\n self.main_widget.setMaximumHeight(400)\n self.parent_layout = QVBoxLayout()\n self.ping_log_layout = QHBoxLayout()\n self.progress_bar_layout = QHBoxLayout()\n self.secondary_progress_layout = QVBoxLayout()\n self.control_button_layout = QGridLayout()\n self.up_ip_layout = QVBoxLayout()\n self.up_ip_btn = QtGui.QLabel('UP Nodes')\n self.up_ip_btn.setFont(QtGui.QFont('Verdana', 10, QtGui.QFont.Bold))\n self.up_ip_btn.setStyleSheet(\n 'background-color:white ;color:Green;border: 2px solid black')\n self.up_ip_layout.addWidget(self.up_ip_btn)\n self.up_ip_btn.setToolTip('Please click here to open UP NODE file.. !')\n self.up_ip_btn.setStyleSheet(\n 'QToolTip { background-color: #00bfff; color: black; border: black solid 1px }'\n )\n self.up_ip_textbox = QPlainTextEdit()\n self.up_ip_textbox.setFont(QtGui.QFont('Verdana', 10, QtGui.QFont.Bold)\n )\n self.up_ip_textbox.setStyleSheet(\n 'background-color: rgb(150,240,190) ;color:rgb(9,57,31);border: 2px solid black; '\n )\n self.up_ip_textbox.setReadOnly(True)\n self.up_ip_layout.addWidget(self.up_ip_textbox)\n self.down_ip_layout = QVBoxLayout()\n self.down_ip_btn = QtGui.QLabel('DOWN Nodes')\n self.down_ip_btn.setFont(QtGui.QFont('Verdana', 10, QtGui.QFont.Bold))\n self.down_ip_btn.setStyleSheet(\n 'QPushButton {background-color: white ;color:Red;border: 2px solid black}'\n )\n self.down_ip_layout.addWidget(self.down_ip_btn)\n self.down_ip_btn.setToolTip(\n 'Please click here to open UP NODE file.. !')\n self.down_ip_btn.setStyleSheet(\n 'QToolTip { background-color: #00bfff; color: black; border: black solid 1px }'\n )\n self.down_ip_textbox = QPlainTextEdit()\n self.down_ip_textbox.setFont(QtGui.QFont('Verdana', 10, QtGui.QFont\n .Bold))\n self.down_ip_textbox.setStyleSheet(\n 'background-color: rgb(250,210,210);color:rgb(118,14,16);border: 2px solid black; '\n )\n self.down_ip_textbox.setReadOnly(True)\n self.down_ip_layout.addWidget(self.down_ip_textbox)\n self.progress_bar_layout.addLayout(self.secondary_progress_layout)\n self.progress_bar_layout.addLayout(self.control_button_layout)\n self.ping_log_layout.addLayout(self.up_ip_layout)\n self.ping_log_layout.addLayout(self.down_ip_layout)\n self.parent_layout.addLayout(self.ping_log_layout)\n self.parent_layout.addLayout(self.progress_bar_layout)\n self.progressBar = QtGui.QProgressBar()\n self.progressLabel = QtGui.QLabel(\n 'Ping process is in progress .... Please wait until the log file is generated...!'\n )\n self.cancel_button = QtGui.QPushButton('Cancel')\n self.progressBar.setProperty('Current status', 0)\n self.progressBar.setObjectName('progressBar')\n self.progressBar.setMaximumHeight(15)\n self.progressBar.setTextVisible(True)\n self.progressBar.setValue(0)\n self.progressBar.setRange(0, 100)\n self.progressLabel.setFont(QtGui.QFont('verdana', 9, QtGui.QFont.\n Normal))\n self.secondary_progress_layout.addWidget(self.progressBar)\n self.secondary_progress_layout.addWidget(self.progressLabel)\n self.progress_bar_layout.addWidget(self.cancel_button)\n self.main_widget.setLayout(self.parent_layout)\n self.last_parent.right_base_layout_v.addWidget(self.main_widget)\n self.main_widget.hide()\n <mask token>\n\n def closeEvent(self, event):\n self.wind_close_flg = True\n",
"step-2": "<mask token>\n\n\nclass PingWindow:\n <mask token>\n\n def __init__(self, last_parent):\n self.last_parent = last_parent\n self.main_widget = QWidget()\n self.main_widget.setMaximumHeight(400)\n self.parent_layout = QVBoxLayout()\n self.ping_log_layout = QHBoxLayout()\n self.progress_bar_layout = QHBoxLayout()\n self.secondary_progress_layout = QVBoxLayout()\n self.control_button_layout = QGridLayout()\n self.up_ip_layout = QVBoxLayout()\n self.up_ip_btn = QtGui.QLabel('UP Nodes')\n self.up_ip_btn.setFont(QtGui.QFont('Verdana', 10, QtGui.QFont.Bold))\n self.up_ip_btn.setStyleSheet(\n 'background-color:white ;color:Green;border: 2px solid black')\n self.up_ip_layout.addWidget(self.up_ip_btn)\n self.up_ip_btn.setToolTip('Please click here to open UP NODE file.. !')\n self.up_ip_btn.setStyleSheet(\n 'QToolTip { background-color: #00bfff; color: black; border: black solid 1px }'\n )\n self.up_ip_textbox = QPlainTextEdit()\n self.up_ip_textbox.setFont(QtGui.QFont('Verdana', 10, QtGui.QFont.Bold)\n )\n self.up_ip_textbox.setStyleSheet(\n 'background-color: rgb(150,240,190) ;color:rgb(9,57,31);border: 2px solid black; '\n )\n self.up_ip_textbox.setReadOnly(True)\n self.up_ip_layout.addWidget(self.up_ip_textbox)\n self.down_ip_layout = QVBoxLayout()\n self.down_ip_btn = QtGui.QLabel('DOWN Nodes')\n self.down_ip_btn.setFont(QtGui.QFont('Verdana', 10, QtGui.QFont.Bold))\n self.down_ip_btn.setStyleSheet(\n 'QPushButton {background-color: white ;color:Red;border: 2px solid black}'\n )\n self.down_ip_layout.addWidget(self.down_ip_btn)\n self.down_ip_btn.setToolTip(\n 'Please click here to open UP NODE file.. !')\n self.down_ip_btn.setStyleSheet(\n 'QToolTip { background-color: #00bfff; color: black; border: black solid 1px }'\n )\n self.down_ip_textbox = QPlainTextEdit()\n self.down_ip_textbox.setFont(QtGui.QFont('Verdana', 10, QtGui.QFont\n .Bold))\n self.down_ip_textbox.setStyleSheet(\n 'background-color: rgb(250,210,210);color:rgb(118,14,16);border: 2px solid black; '\n )\n self.down_ip_textbox.setReadOnly(True)\n self.down_ip_layout.addWidget(self.down_ip_textbox)\n self.progress_bar_layout.addLayout(self.secondary_progress_layout)\n self.progress_bar_layout.addLayout(self.control_button_layout)\n self.ping_log_layout.addLayout(self.up_ip_layout)\n self.ping_log_layout.addLayout(self.down_ip_layout)\n self.parent_layout.addLayout(self.ping_log_layout)\n self.parent_layout.addLayout(self.progress_bar_layout)\n self.progressBar = QtGui.QProgressBar()\n self.progressLabel = QtGui.QLabel(\n 'Ping process is in progress .... Please wait until the log file is generated...!'\n )\n self.cancel_button = QtGui.QPushButton('Cancel')\n self.progressBar.setProperty('Current status', 0)\n self.progressBar.setObjectName('progressBar')\n self.progressBar.setMaximumHeight(15)\n self.progressBar.setTextVisible(True)\n self.progressBar.setValue(0)\n self.progressBar.setRange(0, 100)\n self.progressLabel.setFont(QtGui.QFont('verdana', 9, QtGui.QFont.\n Normal))\n self.secondary_progress_layout.addWidget(self.progressBar)\n self.secondary_progress_layout.addWidget(self.progressLabel)\n self.progress_bar_layout.addWidget(self.cancel_button)\n self.main_widget.setLayout(self.parent_layout)\n self.last_parent.right_base_layout_v.addWidget(self.main_widget)\n self.main_widget.hide()\n\n def prepare_window(self):\n self.progressBar.show()\n self.progressLabel.show()\n self.cancel_button.show()\n self.up_ip_textbox.clear()\n self.down_ip_textbox.clear()\n self.main_widget.show()\n\n def closeEvent(self, event):\n self.wind_close_flg = True\n",
"step-3": "<mask token>\n\n\nclass PingWindow:\n wind_close_flg = False\n\n def __init__(self, last_parent):\n self.last_parent = last_parent\n self.main_widget = QWidget()\n self.main_widget.setMaximumHeight(400)\n self.parent_layout = QVBoxLayout()\n self.ping_log_layout = QHBoxLayout()\n self.progress_bar_layout = QHBoxLayout()\n self.secondary_progress_layout = QVBoxLayout()\n self.control_button_layout = QGridLayout()\n self.up_ip_layout = QVBoxLayout()\n self.up_ip_btn = QtGui.QLabel('UP Nodes')\n self.up_ip_btn.setFont(QtGui.QFont('Verdana', 10, QtGui.QFont.Bold))\n self.up_ip_btn.setStyleSheet(\n 'background-color:white ;color:Green;border: 2px solid black')\n self.up_ip_layout.addWidget(self.up_ip_btn)\n self.up_ip_btn.setToolTip('Please click here to open UP NODE file.. !')\n self.up_ip_btn.setStyleSheet(\n 'QToolTip { background-color: #00bfff; color: black; border: black solid 1px }'\n )\n self.up_ip_textbox = QPlainTextEdit()\n self.up_ip_textbox.setFont(QtGui.QFont('Verdana', 10, QtGui.QFont.Bold)\n )\n self.up_ip_textbox.setStyleSheet(\n 'background-color: rgb(150,240,190) ;color:rgb(9,57,31);border: 2px solid black; '\n )\n self.up_ip_textbox.setReadOnly(True)\n self.up_ip_layout.addWidget(self.up_ip_textbox)\n self.down_ip_layout = QVBoxLayout()\n self.down_ip_btn = QtGui.QLabel('DOWN Nodes')\n self.down_ip_btn.setFont(QtGui.QFont('Verdana', 10, QtGui.QFont.Bold))\n self.down_ip_btn.setStyleSheet(\n 'QPushButton {background-color: white ;color:Red;border: 2px solid black}'\n )\n self.down_ip_layout.addWidget(self.down_ip_btn)\n self.down_ip_btn.setToolTip(\n 'Please click here to open UP NODE file.. !')\n self.down_ip_btn.setStyleSheet(\n 'QToolTip { background-color: #00bfff; color: black; border: black solid 1px }'\n )\n self.down_ip_textbox = QPlainTextEdit()\n self.down_ip_textbox.setFont(QtGui.QFont('Verdana', 10, QtGui.QFont\n .Bold))\n self.down_ip_textbox.setStyleSheet(\n 'background-color: rgb(250,210,210);color:rgb(118,14,16);border: 2px solid black; '\n )\n self.down_ip_textbox.setReadOnly(True)\n self.down_ip_layout.addWidget(self.down_ip_textbox)\n self.progress_bar_layout.addLayout(self.secondary_progress_layout)\n self.progress_bar_layout.addLayout(self.control_button_layout)\n self.ping_log_layout.addLayout(self.up_ip_layout)\n self.ping_log_layout.addLayout(self.down_ip_layout)\n self.parent_layout.addLayout(self.ping_log_layout)\n self.parent_layout.addLayout(self.progress_bar_layout)\n self.progressBar = QtGui.QProgressBar()\n self.progressLabel = QtGui.QLabel(\n 'Ping process is in progress .... Please wait until the log file is generated...!'\n )\n self.cancel_button = QtGui.QPushButton('Cancel')\n self.progressBar.setProperty('Current status', 0)\n self.progressBar.setObjectName('progressBar')\n self.progressBar.setMaximumHeight(15)\n self.progressBar.setTextVisible(True)\n self.progressBar.setValue(0)\n self.progressBar.setRange(0, 100)\n self.progressLabel.setFont(QtGui.QFont('verdana', 9, QtGui.QFont.\n Normal))\n self.secondary_progress_layout.addWidget(self.progressBar)\n self.secondary_progress_layout.addWidget(self.progressLabel)\n self.progress_bar_layout.addWidget(self.cancel_button)\n self.main_widget.setLayout(self.parent_layout)\n self.last_parent.right_base_layout_v.addWidget(self.main_widget)\n self.main_widget.hide()\n\n def prepare_window(self):\n self.progressBar.show()\n self.progressLabel.show()\n self.cancel_button.show()\n self.up_ip_textbox.clear()\n self.down_ip_textbox.clear()\n self.main_widget.show()\n\n def closeEvent(self, event):\n self.wind_close_flg = True\n",
"step-4": "<mask token>\nimport sys\nfrom PySide.QtGui import *\nfrom PySide.QtCore import *\nfrom PySide import QtGui\nfrom PySide import QtCore\n\n\nclass PingWindow:\n wind_close_flg = False\n\n def __init__(self, last_parent):\n self.last_parent = last_parent\n self.main_widget = QWidget()\n self.main_widget.setMaximumHeight(400)\n self.parent_layout = QVBoxLayout()\n self.ping_log_layout = QHBoxLayout()\n self.progress_bar_layout = QHBoxLayout()\n self.secondary_progress_layout = QVBoxLayout()\n self.control_button_layout = QGridLayout()\n self.up_ip_layout = QVBoxLayout()\n self.up_ip_btn = QtGui.QLabel('UP Nodes')\n self.up_ip_btn.setFont(QtGui.QFont('Verdana', 10, QtGui.QFont.Bold))\n self.up_ip_btn.setStyleSheet(\n 'background-color:white ;color:Green;border: 2px solid black')\n self.up_ip_layout.addWidget(self.up_ip_btn)\n self.up_ip_btn.setToolTip('Please click here to open UP NODE file.. !')\n self.up_ip_btn.setStyleSheet(\n 'QToolTip { background-color: #00bfff; color: black; border: black solid 1px }'\n )\n self.up_ip_textbox = QPlainTextEdit()\n self.up_ip_textbox.setFont(QtGui.QFont('Verdana', 10, QtGui.QFont.Bold)\n )\n self.up_ip_textbox.setStyleSheet(\n 'background-color: rgb(150,240,190) ;color:rgb(9,57,31);border: 2px solid black; '\n )\n self.up_ip_textbox.setReadOnly(True)\n self.up_ip_layout.addWidget(self.up_ip_textbox)\n self.down_ip_layout = QVBoxLayout()\n self.down_ip_btn = QtGui.QLabel('DOWN Nodes')\n self.down_ip_btn.setFont(QtGui.QFont('Verdana', 10, QtGui.QFont.Bold))\n self.down_ip_btn.setStyleSheet(\n 'QPushButton {background-color: white ;color:Red;border: 2px solid black}'\n )\n self.down_ip_layout.addWidget(self.down_ip_btn)\n self.down_ip_btn.setToolTip(\n 'Please click here to open UP NODE file.. !')\n self.down_ip_btn.setStyleSheet(\n 'QToolTip { background-color: #00bfff; color: black; border: black solid 1px }'\n )\n self.down_ip_textbox = QPlainTextEdit()\n self.down_ip_textbox.setFont(QtGui.QFont('Verdana', 10, QtGui.QFont\n .Bold))\n self.down_ip_textbox.setStyleSheet(\n 'background-color: rgb(250,210,210);color:rgb(118,14,16);border: 2px solid black; '\n )\n self.down_ip_textbox.setReadOnly(True)\n self.down_ip_layout.addWidget(self.down_ip_textbox)\n self.progress_bar_layout.addLayout(self.secondary_progress_layout)\n self.progress_bar_layout.addLayout(self.control_button_layout)\n self.ping_log_layout.addLayout(self.up_ip_layout)\n self.ping_log_layout.addLayout(self.down_ip_layout)\n self.parent_layout.addLayout(self.ping_log_layout)\n self.parent_layout.addLayout(self.progress_bar_layout)\n self.progressBar = QtGui.QProgressBar()\n self.progressLabel = QtGui.QLabel(\n 'Ping process is in progress .... Please wait until the log file is generated...!'\n )\n self.cancel_button = QtGui.QPushButton('Cancel')\n self.progressBar.setProperty('Current status', 0)\n self.progressBar.setObjectName('progressBar')\n self.progressBar.setMaximumHeight(15)\n self.progressBar.setTextVisible(True)\n self.progressBar.setValue(0)\n self.progressBar.setRange(0, 100)\n self.progressLabel.setFont(QtGui.QFont('verdana', 9, QtGui.QFont.\n Normal))\n self.secondary_progress_layout.addWidget(self.progressBar)\n self.secondary_progress_layout.addWidget(self.progressLabel)\n self.progress_bar_layout.addWidget(self.cancel_button)\n self.main_widget.setLayout(self.parent_layout)\n self.last_parent.right_base_layout_v.addWidget(self.main_widget)\n self.main_widget.hide()\n\n def prepare_window(self):\n self.progressBar.show()\n self.progressLabel.show()\n self.cancel_button.show()\n self.up_ip_textbox.clear()\n self.down_ip_textbox.clear()\n self.main_widget.show()\n\n def closeEvent(self, event):\n self.wind_close_flg = True\n",
"step-5": "'''\r\nCreated on 17 june, 2018\r\n@author: sp977u@att.com (Satish Palnati)\r\nThis class is for \r\n\r\n'''\r\nimport sys\r\nfrom PySide.QtGui import * \r\nfrom PySide.QtCore import *\r\nfrom PySide import QtGui\r\nfrom PySide import QtCore\r\n\r\nclass PingWindow:\r\n\r\n wind_close_flg = False\r\n\r\n def __init__(self,last_parent):\r\n \r\n self.last_parent = last_parent\r\n\r\n self.main_widget = QWidget()\r\n self.main_widget.setMaximumHeight(400)\r\n \r\n self.parent_layout = QVBoxLayout()\r\n #self.last_parent.right_base_layout_v\r\n\r\n \r\n self.ping_log_layout = QHBoxLayout()\r\n \r\n self.progress_bar_layout = QHBoxLayout() #to incorporate the progress bar and the buttons\r\n \r\n self.secondary_progress_layout = QVBoxLayout() #just the progress bar\r\n \r\n self.control_button_layout = QGridLayout() #cancel,close,open valid / invalid file\r\n \r\n \r\n # UP ip layout for ping logs \r\n self.up_ip_layout = QVBoxLayout()\r\n \r\n self.up_ip_btn = QtGui.QLabel(\"UP Nodes\")\r\n self.up_ip_btn.setFont(QtGui.QFont(\"Verdana\", 10, QtGui.QFont.Bold))\r\n self.up_ip_btn.setStyleSheet(\"background-color:white ;color:Green;border: 2px solid black\")\r\n self.up_ip_layout.addWidget(self.up_ip_btn)\r\n \r\n self.up_ip_btn.setToolTip(\"Please click here to open UP NODE file.. !\")\r\n self.up_ip_btn.setStyleSheet(\"\"\"QToolTip { background-color: #00bfff; color: black; border: black solid 1px }\"\"\")\r\n\r\n self.up_ip_textbox = QPlainTextEdit()\r\n self.up_ip_textbox.setFont(QtGui.QFont(\"Verdana\", 10, QtGui.QFont.Bold))\r\n self.up_ip_textbox.setStyleSheet(\"background-color: rgb(150,240,190) ;color:rgb(9,57,31);border: 2px solid black; \")\r\n self.up_ip_textbox.setReadOnly(True)\r\n self.up_ip_layout.addWidget(self.up_ip_textbox)\r\n \r\n # DOWN ip layout for ping logs\r\n self.down_ip_layout = QVBoxLayout()\r\n \r\n self.down_ip_btn = QtGui.QLabel(\"DOWN Nodes\")\r\n self.down_ip_btn.setFont(QtGui.QFont(\"Verdana\", 10, QtGui.QFont.Bold))\r\n self.down_ip_btn.setStyleSheet(\"QPushButton {background-color: white ;color:Red;border: 2px solid black}\")\r\n self.down_ip_layout.addWidget(self.down_ip_btn)\r\n \r\n self.down_ip_btn.setToolTip(\"Please click here to open UP NODE file.. !\")\r\n self.down_ip_btn.setStyleSheet(\"\"\"QToolTip { background-color: #00bfff; color: black; border: black solid 1px }\"\"\")\r\n \r\n self.down_ip_textbox = QPlainTextEdit()\r\n self.down_ip_textbox.setFont(QtGui.QFont(\"Verdana\", 10, QtGui.QFont.Bold))\r\n self.down_ip_textbox.setStyleSheet(\"background-color: rgb(250,210,210);color:rgb(118,14,16);border: 2px solid black; \")\r\n self.down_ip_textbox.setReadOnly(True)\r\n self.down_ip_layout.addWidget(self.down_ip_textbox)\r\n \r\n self.progress_bar_layout.addLayout(self.secondary_progress_layout)\r\n self.progress_bar_layout.addLayout(self.control_button_layout)\r\n \r\n self.ping_log_layout.addLayout(self.up_ip_layout)\r\n self.ping_log_layout.addLayout(self.down_ip_layout)\r\n \r\n self.parent_layout.addLayout(self.ping_log_layout)\r\n self.parent_layout.addLayout(self.progress_bar_layout)\r\n \r\n \r\n self.progressBar = QtGui.QProgressBar()\r\n self.progressLabel = QtGui.QLabel(\"Ping process is in progress .... Please wait until the log file is generated...!\")\r\n self.cancel_button = QtGui.QPushButton(\"Cancel\")\r\n \r\n# self.progressBar.setGeometry(QtCore.QRect(100, 645, 710, 17))\r\n self.progressBar.setProperty(\"Current status\", 0)\r\n self.progressBar.setObjectName(\"progressBar\")\r\n self.progressBar.setMaximumHeight(15)\r\n self.progressBar.setTextVisible(True)\r\n self.progressBar.setValue(0)\r\n self.progressBar.setRange(0,100)\r\n \r\n \r\n self.progressLabel.setFont(QtGui.QFont(\"verdana\", 9, QtGui.QFont.Normal))\r\n \r\n self.secondary_progress_layout.addWidget(self.progressBar)\r\n self.secondary_progress_layout.addWidget(self.progressLabel)\r\n self.progress_bar_layout.addWidget(self.cancel_button)\r\n # self.last_parent.msgBox.information(,'Job status!',\"Ping logs process has been closed.!\", QtGui.QMessageBox.Ok)\r\n \r\n self.main_widget.setLayout(self.parent_layout) \r\n \r\n \r\n self.last_parent.right_base_layout_v.addWidget(self.main_widget)\r\n \r\n self.main_widget.hide()\r\n \r\n \r\n def prepare_window(self,):\r\n \r\n self.progressBar.show()\r\n self.progressLabel.show()\r\n self.cancel_button.show()\r\n self.up_ip_textbox.clear()\r\n self.down_ip_textbox.clear()\r\n self.main_widget.show()\r\n \r\n def closeEvent(self,event):\r\n \r\n \r\n self.wind_close_flg = True\r\n \r\n \r\n ",
"step-ids": [
3,
4,
5,
6,
7
]
}
|
[
3,
4,
5,
6,
7
] |
import boto3
import pprint
import yaml
#initialize empty dictionary to store values
new_dict = {}
count = 0
new_dict2 = {}
# dev = boto3.session.Session(profile_name='shipt')
mybatch = boto3.client('batch')
#load config properties
with open('config.yml') as f:
content = yaml.load(f)
# pprint.pprint(content) #to print config properties in file
#get current job definition
response = mybatch.describe_job_definitions(
jobDefinitions = [
'axiom-staging-abcfinewine:1'
# 'axiom-staging-costco:1'
],
status='ACTIVE'
)
# print(type(response))
for k, v in response.items():
if k == 'jobDefinitions':
# pprint.pprint(v) #to print container properties
# pprint.pprint(v[0]['containerProperties'])
new_dict = v[0]['containerProperties']
#check if config properties match with current job definition properties
# for key in new_dict.keys():
# if key in content.keys():
# count = count + 1
# if content[key] == new_dict[key]:
# new_dict2[key] == content[key]
print(content.items())
# new_dict2 = dict(content.items() & new_dict.items())
print(new_dict2)
# if v == new_dict[k]:
# # print('woooh00!')
# print(content[k])
# print(v)
# print(new_dict[k])
# for k,v in new_dict.items():
# print(v)
# if content != new_dict:
# print('\n\n\n\twooohooo!')
# print(response)
# pp = pprint.PrettyPrinter(indent = 4)
# pp.pprint(response)
|
normal
|
{
"blob_id": "3ba9ff00b0d6a2006c714a9818c8b561d884e252",
"index": 2302,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nwith open('config.yml') as f:\n content = yaml.load(f)\n<mask token>\nfor k, v in response.items():\n if k == 'jobDefinitions':\n new_dict = v[0]['containerProperties']\nprint(content.items())\nprint(new_dict2)\n",
"step-3": "<mask token>\nnew_dict = {}\ncount = 0\nnew_dict2 = {}\nmybatch = boto3.client('batch')\nwith open('config.yml') as f:\n content = yaml.load(f)\nresponse = mybatch.describe_job_definitions(jobDefinitions=[\n 'axiom-staging-abcfinewine:1'], status='ACTIVE')\nfor k, v in response.items():\n if k == 'jobDefinitions':\n new_dict = v[0]['containerProperties']\nprint(content.items())\nprint(new_dict2)\n",
"step-4": "import boto3\nimport pprint\nimport yaml\nnew_dict = {}\ncount = 0\nnew_dict2 = {}\nmybatch = boto3.client('batch')\nwith open('config.yml') as f:\n content = yaml.load(f)\nresponse = mybatch.describe_job_definitions(jobDefinitions=[\n 'axiom-staging-abcfinewine:1'], status='ACTIVE')\nfor k, v in response.items():\n if k == 'jobDefinitions':\n new_dict = v[0]['containerProperties']\nprint(content.items())\nprint(new_dict2)\n",
"step-5": "import boto3\nimport pprint\nimport yaml\n\n#initialize empty dictionary to store values\nnew_dict = {}\ncount = 0\nnew_dict2 = {}\n\n# dev = boto3.session.Session(profile_name='shipt')\nmybatch = boto3.client('batch')\n\n#load config properties\nwith open('config.yml') as f:\n content = yaml.load(f)\n\n# pprint.pprint(content) #to print config properties in file\n\n#get current job definition\nresponse = mybatch.describe_job_definitions(\n jobDefinitions = [\n 'axiom-staging-abcfinewine:1'\n # 'axiom-staging-costco:1'\n ],\n status='ACTIVE'\n)\n\n# print(type(response))\n\nfor k, v in response.items():\n if k == 'jobDefinitions':\n # pprint.pprint(v) #to print container properties\n # pprint.pprint(v[0]['containerProperties'])\n new_dict = v[0]['containerProperties']\n\n\n#check if config properties match with current job definition properties\n # for key in new_dict.keys():\n # if key in content.keys():\n # count = count + 1\n # if content[key] == new_dict[key]:\n # new_dict2[key] == content[key]\n\nprint(content.items())\n# new_dict2 = dict(content.items() & new_dict.items())\n\nprint(new_dict2)\n # if v == new_dict[k]:\n # # print('woooh00!')\n # print(content[k])\n # print(v)\n # print(new_dict[k])\n\n# for k,v in new_dict.items():\n# print(v)\n# if content != new_dict:\n# print('\\n\\n\\n\\twooohooo!')\n\n\n# print(response)\n# pp = pprint.PrettyPrinter(indent = 4)\n# pp.pprint(response)\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
import sys
sys.path.append("../circos_report/cnv_anno2conf")
from cnv_anno2conf import main_cnv
tarfile = {"yaml": "data/test_app.yaml"}
def test_main_cnv():
main_cnv(tarfile)
if __name__ == "__main__":
test_main_cnv()
|
normal
|
{
"blob_id": "3c0beb7be29953ca2d7b390627305f4541b56efa",
"index": 69,
"step-1": "<mask token>\n\n\ndef test_main_cnv():\n main_cnv(tarfile)\n\n\n<mask token>\n",
"step-2": "<mask token>\nsys.path.append('../circos_report/cnv_anno2conf')\n<mask token>\n\n\ndef test_main_cnv():\n main_cnv(tarfile)\n\n\nif __name__ == '__main__':\n test_main_cnv()\n",
"step-3": "<mask token>\nsys.path.append('../circos_report/cnv_anno2conf')\n<mask token>\ntarfile = {'yaml': 'data/test_app.yaml'}\n\n\ndef test_main_cnv():\n main_cnv(tarfile)\n\n\nif __name__ == '__main__':\n test_main_cnv()\n",
"step-4": "import sys\nsys.path.append('../circos_report/cnv_anno2conf')\nfrom cnv_anno2conf import main_cnv\ntarfile = {'yaml': 'data/test_app.yaml'}\n\n\ndef test_main_cnv():\n main_cnv(tarfile)\n\n\nif __name__ == '__main__':\n test_main_cnv()\n",
"step-5": "import sys\nsys.path.append(\"../circos_report/cnv_anno2conf\")\nfrom cnv_anno2conf import main_cnv\n\n\ntarfile = {\"yaml\": \"data/test_app.yaml\"}\n\ndef test_main_cnv():\n main_cnv(tarfile)\n\nif __name__ == \"__main__\":\n test_main_cnv()\n\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
<|reserved_special_token_0|>
class CardRarityParam(CardTextParameter):
<|reserved_special_token_0|>
@classmethod
def get_parameter_name(cls) ->str:
return 'rarity'
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def validate(self, query_context: QueryContext) ->None:
try:
self.rarity = Rarity.objects.get(Q(symbol__iexact=self.value) |
Q(name__iexact=self.value))
except Rarity.DoesNotExist:
raise QueryValidationError(f'Couldn\'t find rarity "{self.value}"')
<|reserved_special_token_0|>
def get_pretty_str(self, query_context: QueryContext) ->str:
return 'the rarity ' + ("isn't" if self.negated else 'is') + (' ' +
OPERATOR_TO_WORDY_MAPPING[self.operator] if self.operator not in
(':', '=') else '') + f' {self.rarity.name.lower()}'
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class CardRarityParam(CardTextParameter):
<|reserved_special_token_0|>
@classmethod
def get_parameter_name(cls) ->str:
return 'rarity'
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def __init__(self, negated: bool, param_args: ParameterArgs):
super().__init__(negated, param_args)
self.rarity: Optional[Rarity] = None
if self.operator == ':':
self.operator = '='
def validate(self, query_context: QueryContext) ->None:
try:
self.rarity = Rarity.objects.get(Q(symbol__iexact=self.value) |
Q(name__iexact=self.value))
except Rarity.DoesNotExist:
raise QueryValidationError(f'Couldn\'t find rarity "{self.value}"')
<|reserved_special_token_0|>
def get_pretty_str(self, query_context: QueryContext) ->str:
return 'the rarity ' + ("isn't" if self.negated else 'is') + (' ' +
OPERATOR_TO_WORDY_MAPPING[self.operator] if self.operator not in
(':', '=') else '') + f' {self.rarity.name.lower()}'
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class CardRarityParam(CardTextParameter):
<|reserved_special_token_0|>
@classmethod
def get_parameter_name(cls) ->str:
return 'rarity'
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def get_default_search_context(self) ->CardSearchContext:
return CardSearchContext.PRINTING
def __init__(self, negated: bool, param_args: ParameterArgs):
super().__init__(negated, param_args)
self.rarity: Optional[Rarity] = None
if self.operator == ':':
self.operator = '='
def validate(self, query_context: QueryContext) ->None:
try:
self.rarity = Rarity.objects.get(Q(symbol__iexact=self.value) |
Q(name__iexact=self.value))
except Rarity.DoesNotExist:
raise QueryValidationError(f'Couldn\'t find rarity "{self.value}"')
<|reserved_special_token_0|>
def get_pretty_str(self, query_context: QueryContext) ->str:
return 'the rarity ' + ("isn't" if self.negated else 'is') + (' ' +
OPERATOR_TO_WORDY_MAPPING[self.operator] if self.operator not in
(':', '=') else '') + f' {self.rarity.name.lower()}'
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class CardRarityParam(CardTextParameter):
<|reserved_special_token_0|>
@classmethod
def get_parameter_name(cls) ->str:
return 'rarity'
@classmethod
def get_search_operators(cls) ->List[str]:
return [':', '=', '<=', '<', '>', '>=']
<|reserved_special_token_0|>
def get_default_search_context(self) ->CardSearchContext:
return CardSearchContext.PRINTING
def __init__(self, negated: bool, param_args: ParameterArgs):
super().__init__(negated, param_args)
self.rarity: Optional[Rarity] = None
if self.operator == ':':
self.operator = '='
def validate(self, query_context: QueryContext) ->None:
try:
self.rarity = Rarity.objects.get(Q(symbol__iexact=self.value) |
Q(name__iexact=self.value))
except Rarity.DoesNotExist:
raise QueryValidationError(f'Couldn\'t find rarity "{self.value}"')
def query(self, query_context: QueryContext) ->Q:
if self.operator == '=':
query = Q(rarity=self.rarity)
else:
filter_ = f'rarity__display_order{OPERATOR_MAPPING[self.operator]}'
query = Q(**{filter_: self.rarity.display_order})
return ~query if self.negated else query
def get_pretty_str(self, query_context: QueryContext) ->str:
return 'the rarity ' + ("isn't" if self.negated else 'is') + (' ' +
OPERATOR_TO_WORDY_MAPPING[self.operator] if self.operator not in
(':', '=') else '') + f' {self.rarity.name.lower()}'
<|reserved_special_token_1|>
"""
Card rarity parameters
"""
from typing import List, Optional
from django.db.models.query import Q
from cards.models.rarity import Rarity
from cardsearch.parameters.base_parameters import (
OPERATOR_MAPPING,
OPERATOR_TO_WORDY_MAPPING,
CardTextParameter,
CardSearchContext,
ParameterArgs,
QueryContext,
QueryValidationError,
)
class CardRarityParam(CardTextParameter):
"""
The parameter for searching by a card's rarity
"""
@classmethod
def get_parameter_name(cls) -> str:
return "rarity"
@classmethod
def get_search_operators(cls) -> List[str]:
return [":", "=", "<=", "<", ">", ">="]
@classmethod
def get_search_keywords(cls) -> List[str]:
return ["rarity", "r"]
def get_default_search_context(self) -> CardSearchContext:
return CardSearchContext.PRINTING
def __init__(self, negated: bool, param_args: ParameterArgs):
super().__init__(negated, param_args)
self.rarity: Optional[Rarity] = None
if self.operator == ":":
self.operator = "="
def validate(self, query_context: QueryContext) -> None:
try:
self.rarity = Rarity.objects.get(
Q(symbol__iexact=self.value) | Q(name__iexact=self.value)
)
except Rarity.DoesNotExist:
raise QueryValidationError(f'Couldn\'t find rarity "{self.value}"')
def query(self, query_context: QueryContext) -> Q:
if self.operator == "=":
query = Q(rarity=self.rarity)
else:
filter_ = f"rarity__display_order{OPERATOR_MAPPING[self.operator]}"
query = Q(**{filter_: self.rarity.display_order})
return ~query if self.negated else query
def get_pretty_str(self, query_context: QueryContext) -> str:
return (
"the rarity "
+ ("isn't" if self.negated else "is")
+ (
" " + OPERATOR_TO_WORDY_MAPPING[self.operator]
if self.operator not in (":", "=")
else ""
)
+ f" {self.rarity.name.lower()}"
)
|
flexible
|
{
"blob_id": "c7d9bbdff9148c5d928de66f4406ee8b4e1bcdac",
"index": 2672,
"step-1": "<mask token>\n\n\nclass CardRarityParam(CardTextParameter):\n <mask token>\n\n @classmethod\n def get_parameter_name(cls) ->str:\n return 'rarity'\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def validate(self, query_context: QueryContext) ->None:\n try:\n self.rarity = Rarity.objects.get(Q(symbol__iexact=self.value) |\n Q(name__iexact=self.value))\n except Rarity.DoesNotExist:\n raise QueryValidationError(f'Couldn\\'t find rarity \"{self.value}\"')\n <mask token>\n\n def get_pretty_str(self, query_context: QueryContext) ->str:\n return 'the rarity ' + (\"isn't\" if self.negated else 'is') + (' ' +\n OPERATOR_TO_WORDY_MAPPING[self.operator] if self.operator not in\n (':', '=') else '') + f' {self.rarity.name.lower()}'\n",
"step-2": "<mask token>\n\n\nclass CardRarityParam(CardTextParameter):\n <mask token>\n\n @classmethod\n def get_parameter_name(cls) ->str:\n return 'rarity'\n <mask token>\n <mask token>\n <mask token>\n\n def __init__(self, negated: bool, param_args: ParameterArgs):\n super().__init__(negated, param_args)\n self.rarity: Optional[Rarity] = None\n if self.operator == ':':\n self.operator = '='\n\n def validate(self, query_context: QueryContext) ->None:\n try:\n self.rarity = Rarity.objects.get(Q(symbol__iexact=self.value) |\n Q(name__iexact=self.value))\n except Rarity.DoesNotExist:\n raise QueryValidationError(f'Couldn\\'t find rarity \"{self.value}\"')\n <mask token>\n\n def get_pretty_str(self, query_context: QueryContext) ->str:\n return 'the rarity ' + (\"isn't\" if self.negated else 'is') + (' ' +\n OPERATOR_TO_WORDY_MAPPING[self.operator] if self.operator not in\n (':', '=') else '') + f' {self.rarity.name.lower()}'\n",
"step-3": "<mask token>\n\n\nclass CardRarityParam(CardTextParameter):\n <mask token>\n\n @classmethod\n def get_parameter_name(cls) ->str:\n return 'rarity'\n <mask token>\n <mask token>\n\n def get_default_search_context(self) ->CardSearchContext:\n return CardSearchContext.PRINTING\n\n def __init__(self, negated: bool, param_args: ParameterArgs):\n super().__init__(negated, param_args)\n self.rarity: Optional[Rarity] = None\n if self.operator == ':':\n self.operator = '='\n\n def validate(self, query_context: QueryContext) ->None:\n try:\n self.rarity = Rarity.objects.get(Q(symbol__iexact=self.value) |\n Q(name__iexact=self.value))\n except Rarity.DoesNotExist:\n raise QueryValidationError(f'Couldn\\'t find rarity \"{self.value}\"')\n <mask token>\n\n def get_pretty_str(self, query_context: QueryContext) ->str:\n return 'the rarity ' + (\"isn't\" if self.negated else 'is') + (' ' +\n OPERATOR_TO_WORDY_MAPPING[self.operator] if self.operator not in\n (':', '=') else '') + f' {self.rarity.name.lower()}'\n",
"step-4": "<mask token>\n\n\nclass CardRarityParam(CardTextParameter):\n <mask token>\n\n @classmethod\n def get_parameter_name(cls) ->str:\n return 'rarity'\n\n @classmethod\n def get_search_operators(cls) ->List[str]:\n return [':', '=', '<=', '<', '>', '>=']\n <mask token>\n\n def get_default_search_context(self) ->CardSearchContext:\n return CardSearchContext.PRINTING\n\n def __init__(self, negated: bool, param_args: ParameterArgs):\n super().__init__(negated, param_args)\n self.rarity: Optional[Rarity] = None\n if self.operator == ':':\n self.operator = '='\n\n def validate(self, query_context: QueryContext) ->None:\n try:\n self.rarity = Rarity.objects.get(Q(symbol__iexact=self.value) |\n Q(name__iexact=self.value))\n except Rarity.DoesNotExist:\n raise QueryValidationError(f'Couldn\\'t find rarity \"{self.value}\"')\n\n def query(self, query_context: QueryContext) ->Q:\n if self.operator == '=':\n query = Q(rarity=self.rarity)\n else:\n filter_ = f'rarity__display_order{OPERATOR_MAPPING[self.operator]}'\n query = Q(**{filter_: self.rarity.display_order})\n return ~query if self.negated else query\n\n def get_pretty_str(self, query_context: QueryContext) ->str:\n return 'the rarity ' + (\"isn't\" if self.negated else 'is') + (' ' +\n OPERATOR_TO_WORDY_MAPPING[self.operator] if self.operator not in\n (':', '=') else '') + f' {self.rarity.name.lower()}'\n",
"step-5": "\"\"\"\nCard rarity parameters\n\"\"\"\nfrom typing import List, Optional\n\nfrom django.db.models.query import Q\n\nfrom cards.models.rarity import Rarity\nfrom cardsearch.parameters.base_parameters import (\n OPERATOR_MAPPING,\n OPERATOR_TO_WORDY_MAPPING,\n CardTextParameter,\n CardSearchContext,\n ParameterArgs,\n QueryContext,\n QueryValidationError,\n)\n\n\nclass CardRarityParam(CardTextParameter):\n \"\"\"\n The parameter for searching by a card's rarity\n \"\"\"\n\n @classmethod\n def get_parameter_name(cls) -> str:\n return \"rarity\"\n\n @classmethod\n def get_search_operators(cls) -> List[str]:\n return [\":\", \"=\", \"<=\", \"<\", \">\", \">=\"]\n\n @classmethod\n def get_search_keywords(cls) -> List[str]:\n return [\"rarity\", \"r\"]\n\n def get_default_search_context(self) -> CardSearchContext:\n return CardSearchContext.PRINTING\n\n def __init__(self, negated: bool, param_args: ParameterArgs):\n super().__init__(negated, param_args)\n self.rarity: Optional[Rarity] = None\n if self.operator == \":\":\n self.operator = \"=\"\n\n def validate(self, query_context: QueryContext) -> None:\n try:\n self.rarity = Rarity.objects.get(\n Q(symbol__iexact=self.value) | Q(name__iexact=self.value)\n )\n except Rarity.DoesNotExist:\n raise QueryValidationError(f'Couldn\\'t find rarity \"{self.value}\"')\n\n def query(self, query_context: QueryContext) -> Q:\n if self.operator == \"=\":\n query = Q(rarity=self.rarity)\n else:\n filter_ = f\"rarity__display_order{OPERATOR_MAPPING[self.operator]}\"\n query = Q(**{filter_: self.rarity.display_order})\n return ~query if self.negated else query\n\n def get_pretty_str(self, query_context: QueryContext) -> str:\n return (\n \"the rarity \"\n + (\"isn't\" if self.negated else \"is\")\n + (\n \" \" + OPERATOR_TO_WORDY_MAPPING[self.operator]\n if self.operator not in (\":\", \"=\")\n else \"\"\n )\n + f\" {self.rarity.name.lower()}\"\n )\n",
"step-ids": [
4,
5,
6,
8,
12
]
}
|
[
4,
5,
6,
8,
12
] |
# -*- encoding:utf-8 -*-
import os
import unittest
from HTMLTestRunner_cn import HTMLTestRunner
from time import sleep
from framework.SunFlower import SunFlower
from testcase.TestCRM import TestCRM
class TestCRMcreateCustomer(TestCRM):
# 创建客户
def createCustomer(self):
# 点击客户图标
self.driver.click("text= 客户 ")
# 点击添加客户按钮
self.driver.click("text=sYVInwAAAABJRU5ErkJggg==")
#输入客户名称
self.driver.send_keys("xpath=//*[@text=\"请输入\"][1]","crm000001")
#输入客户编号
self.driver.send_keys("xpath=//*[@text=\"请输入\"][1]","c000001")
#选择客户信息来源
self.driver.click_index("class=android.view.View",59)
self.driver.click("text=电话营销")
#保存
self.driver.click("text=保存")
#点击返回
self.driver.click_index("class=android.view.View",10)
# sleep(5)
# # # 向上滑动屏幕
# # self.driver.swipe_up(n=3)
def test_weiChat(self):
self.login()
self.createCustomer()
self.logout()
if __name__ == "__main__":
report_path = os.path.dirname(__file__) + "/report/" + "TestCRM_report.html"
suite = unittest.TestLoader().loadTestsFromTestCase(TestCRM)
runer = HTMLTestRunner(title="悟空CRM测试报告", description="登录", stream=open(report_path, "wb"),
verbosity=2, retry=0, save_last_try=True)
runer.run(suite)
|
normal
|
{
"blob_id": "74bc530d53cd86c52c44ba8e98d4d8f502032340",
"index": 2423,
"step-1": "<mask token>\n\n\nclass TestCRMcreateCustomer(TestCRM):\n <mask token>\n\n def test_weiChat(self):\n self.login()\n self.createCustomer()\n self.logout()\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass TestCRMcreateCustomer(TestCRM):\n\n def createCustomer(self):\n self.driver.click('text= 客户 ')\n self.driver.click('text=sYVInwAAAABJRU5ErkJggg==')\n self.driver.send_keys('xpath=//*[@text=\"请输入\"][1]', 'crm000001')\n self.driver.send_keys('xpath=//*[@text=\"请输入\"][1]', 'c000001')\n self.driver.click_index('class=android.view.View', 59)\n self.driver.click('text=电话营销')\n self.driver.click('text=保存')\n self.driver.click_index('class=android.view.View', 10)\n\n def test_weiChat(self):\n self.login()\n self.createCustomer()\n self.logout()\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass TestCRMcreateCustomer(TestCRM):\n\n def createCustomer(self):\n self.driver.click('text= 客户 ')\n self.driver.click('text=sYVInwAAAABJRU5ErkJggg==')\n self.driver.send_keys('xpath=//*[@text=\"请输入\"][1]', 'crm000001')\n self.driver.send_keys('xpath=//*[@text=\"请输入\"][1]', 'c000001')\n self.driver.click_index('class=android.view.View', 59)\n self.driver.click('text=电话营销')\n self.driver.click('text=保存')\n self.driver.click_index('class=android.view.View', 10)\n\n def test_weiChat(self):\n self.login()\n self.createCustomer()\n self.logout()\n\n\nif __name__ == '__main__':\n report_path = os.path.dirname(__file__\n ) + '/report/' + 'TestCRM_report.html'\n suite = unittest.TestLoader().loadTestsFromTestCase(TestCRM)\n runer = HTMLTestRunner(title='悟空CRM测试报告', description='登录', stream=open\n (report_path, 'wb'), verbosity=2, retry=0, save_last_try=True)\n runer.run(suite)\n",
"step-4": "import os\nimport unittest\nfrom HTMLTestRunner_cn import HTMLTestRunner\nfrom time import sleep\nfrom framework.SunFlower import SunFlower\nfrom testcase.TestCRM import TestCRM\n\n\nclass TestCRMcreateCustomer(TestCRM):\n\n def createCustomer(self):\n self.driver.click('text= 客户 ')\n self.driver.click('text=sYVInwAAAABJRU5ErkJggg==')\n self.driver.send_keys('xpath=//*[@text=\"请输入\"][1]', 'crm000001')\n self.driver.send_keys('xpath=//*[@text=\"请输入\"][1]', 'c000001')\n self.driver.click_index('class=android.view.View', 59)\n self.driver.click('text=电话营销')\n self.driver.click('text=保存')\n self.driver.click_index('class=android.view.View', 10)\n\n def test_weiChat(self):\n self.login()\n self.createCustomer()\n self.logout()\n\n\nif __name__ == '__main__':\n report_path = os.path.dirname(__file__\n ) + '/report/' + 'TestCRM_report.html'\n suite = unittest.TestLoader().loadTestsFromTestCase(TestCRM)\n runer = HTMLTestRunner(title='悟空CRM测试报告', description='登录', stream=open\n (report_path, 'wb'), verbosity=2, retry=0, save_last_try=True)\n runer.run(suite)\n",
"step-5": "# -*- encoding:utf-8 -*-\nimport os\nimport unittest\nfrom HTMLTestRunner_cn import HTMLTestRunner\nfrom time import sleep\n\nfrom framework.SunFlower import SunFlower\nfrom testcase.TestCRM import TestCRM\n\n\nclass TestCRMcreateCustomer(TestCRM):\n\n # 创建客户\n def createCustomer(self):\n\n # 点击客户图标\n self.driver.click(\"text= 客户 \")\n # 点击添加客户按钮\n self.driver.click(\"text=sYVInwAAAABJRU5ErkJggg==\")\n #输入客户名称\n self.driver.send_keys(\"xpath=//*[@text=\\\"请输入\\\"][1]\",\"crm000001\")\n\n #输入客户编号\n self.driver.send_keys(\"xpath=//*[@text=\\\"请输入\\\"][1]\",\"c000001\")\n #选择客户信息来源\n self.driver.click_index(\"class=android.view.View\",59)\n self.driver.click(\"text=电话营销\")\n #保存\n self.driver.click(\"text=保存\")\n #点击返回\n self.driver.click_index(\"class=android.view.View\",10)\n # sleep(5)\n # # # 向上滑动屏幕\n # # self.driver.swipe_up(n=3)\n\n def test_weiChat(self):\n self.login()\n self.createCustomer()\n self.logout()\n\n\nif __name__ == \"__main__\":\n report_path = os.path.dirname(__file__) + \"/report/\" + \"TestCRM_report.html\"\n suite = unittest.TestLoader().loadTestsFromTestCase(TestCRM)\n runer = HTMLTestRunner(title=\"悟空CRM测试报告\", description=\"登录\", stream=open(report_path, \"wb\"),\n verbosity=2, retry=0, save_last_try=True)\n runer.run(suite)\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
with open('/Users/ste/Desktop/Ste/Python/AlgorithmsCourse/KargerMinCut.txt'
) as v_list_file:
for line in v_list_file:
node = map(int, line.split())
for adjacent in node[1:]:
edges.append([node[0], adjacent])
with open(
'/Users/ste/Desktop/Ste/C++/Programs/AlgorithmCourse/GraphSearch/KargerMinCut(edges).txt'
, 'w+') as outfile:
for edge in edges:
outfile.write(str(edge[0]) + ' ' + str(edge[1]) + '\n')
<|reserved_special_token_1|>
<|reserved_special_token_0|>
edges = []
with open('/Users/ste/Desktop/Ste/Python/AlgorithmsCourse/KargerMinCut.txt'
) as v_list_file:
for line in v_list_file:
node = map(int, line.split())
for adjacent in node[1:]:
edges.append([node[0], adjacent])
with open(
'/Users/ste/Desktop/Ste/C++/Programs/AlgorithmCourse/GraphSearch/KargerMinCut(edges).txt'
, 'w+') as outfile:
for edge in edges:
outfile.write(str(edge[0]) + ' ' + str(edge[1]) + '\n')
<|reserved_special_token_1|>
# -*- coding: utf-8 -*-
"""
Created on Mon Jul 29 20:33:32 2013
@author: ste
"""
#Convert input file for graph from adjacency list version, where each line is
#vertex adjacent adjacent adjacent ...
#to edge representation where each line is
#tail head
edges=[]
with open("/Users/ste/Desktop/Ste/Python/AlgorithmsCourse/KargerMinCut.txt") as v_list_file:
for line in v_list_file:
node=map(int, line.split())
for adjacent in node[1:]:
edges.append([node[0], adjacent])
with open("/Users/ste/Desktop/Ste/C++/Programs/AlgorithmCourse/GraphSearch/KargerMinCut(edges).txt", "w+") as outfile:
for edge in edges:
outfile.write(str(edge[0])+' '+str(edge[1])+'\n')
|
flexible
|
{
"blob_id": "1b7b94a0331e2462f83f4f77bcfaefbeefdf24f4",
"index": 3754,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nwith open('/Users/ste/Desktop/Ste/Python/AlgorithmsCourse/KargerMinCut.txt'\n ) as v_list_file:\n for line in v_list_file:\n node = map(int, line.split())\n for adjacent in node[1:]:\n edges.append([node[0], adjacent])\nwith open(\n '/Users/ste/Desktop/Ste/C++/Programs/AlgorithmCourse/GraphSearch/KargerMinCut(edges).txt'\n , 'w+') as outfile:\n for edge in edges:\n outfile.write(str(edge[0]) + ' ' + str(edge[1]) + '\\n')\n",
"step-3": "<mask token>\nedges = []\nwith open('/Users/ste/Desktop/Ste/Python/AlgorithmsCourse/KargerMinCut.txt'\n ) as v_list_file:\n for line in v_list_file:\n node = map(int, line.split())\n for adjacent in node[1:]:\n edges.append([node[0], adjacent])\nwith open(\n '/Users/ste/Desktop/Ste/C++/Programs/AlgorithmCourse/GraphSearch/KargerMinCut(edges).txt'\n , 'w+') as outfile:\n for edge in edges:\n outfile.write(str(edge[0]) + ' ' + str(edge[1]) + '\\n')\n",
"step-4": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Jul 29 20:33:32 2013\n\n@author: ste\n\"\"\"\n\n#Convert input file for graph from adjacency list version, where each line is\n#vertex adjacent adjacent adjacent ...\n#to edge representation where each line is\n#tail head\n\nedges=[]\nwith open(\"/Users/ste/Desktop/Ste/Python/AlgorithmsCourse/KargerMinCut.txt\") as v_list_file:\n for line in v_list_file:\n node=map(int, line.split())\n for adjacent in node[1:]:\n edges.append([node[0], adjacent])\n\nwith open(\"/Users/ste/Desktop/Ste/C++/Programs/AlgorithmCourse/GraphSearch/KargerMinCut(edges).txt\", \"w+\") as outfile:\n for edge in edges:\n outfile.write(str(edge[0])+' '+str(edge[1])+'\\n')\n ",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
def encrypt(message, passphrase):
passphrase = trans(passphrase)
IV = Random.new().read(BLOCK_SIZE)
aes = AES.new(passphrase, AES.MODE_CFB, IV)
return base64.b32encode(IV + aes.encrypt(message)).decode('utf-8')
def decrypt(encrypted, passphrase):
passphrase = trans(passphrase)
encrypted = base64.b32decode(encrypted)
IV = encrypted[:BLOCK_SIZE]
aes = AES.new(passphrase, AES.MODE_CFB, IV)
return aes.decrypt(encrypted[BLOCK_SIZE:]).decode('utf-8')
def mokum_message(message):
try:
postdata = {'post': {'timelines': ['user'], 'text': message,
'comments_disabled': True, 'nsfw': False}, '_uuid': str(uuid.
uuid4())}
req = urllib.request.Request('https://mokum.place/api/v1/posts.json')
req.add_header('Content-Type', 'application/json')
req.add_header('Accept', 'application/json')
req.add_header('X-API-Token', postapikey)
resp = urllib.request.urlopen(req, json.dumps(postdata).encode('utf-8')
)
message = json.loads(resp.read().decode('utf-8'))
if message['post']['id']:
return message['post']['id']
except:
return False
<|reserved_special_token_0|>
@app.route('/')
def main():
return render_template('post.html')
@app.route('/post', methods=['POST'])
def post():
posttext = request.form['post']
id = mokum_message(posttext)
mokum_comment(id, 'click to comment --> ' + appurl + '/c/' + encrypt(
str(id), secretkey))
return redirect(mainurl + str(id))
@app.route('/c/<cid>')
def comm(cid):
return render_template('comment.html', cid=cid)
@app.route('/comment', methods=['POST'])
def commented():
postid = decrypt(request.form['cid'], secretkey)
posttext = request.form['comment']
mokum_comment(postid, posttext)
return redirect(mainurl + postid)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def trans(key):
return hashlib.md5(key.encode('utf-8')).digest()
def encrypt(message, passphrase):
passphrase = trans(passphrase)
IV = Random.new().read(BLOCK_SIZE)
aes = AES.new(passphrase, AES.MODE_CFB, IV)
return base64.b32encode(IV + aes.encrypt(message)).decode('utf-8')
def decrypt(encrypted, passphrase):
passphrase = trans(passphrase)
encrypted = base64.b32decode(encrypted)
IV = encrypted[:BLOCK_SIZE]
aes = AES.new(passphrase, AES.MODE_CFB, IV)
return aes.decrypt(encrypted[BLOCK_SIZE:]).decode('utf-8')
def mokum_message(message):
try:
postdata = {'post': {'timelines': ['user'], 'text': message,
'comments_disabled': True, 'nsfw': False}, '_uuid': str(uuid.
uuid4())}
req = urllib.request.Request('https://mokum.place/api/v1/posts.json')
req.add_header('Content-Type', 'application/json')
req.add_header('Accept', 'application/json')
req.add_header('X-API-Token', postapikey)
resp = urllib.request.urlopen(req, json.dumps(postdata).encode('utf-8')
)
message = json.loads(resp.read().decode('utf-8'))
if message['post']['id']:
return message['post']['id']
except:
return False
def mokum_comment(messageid, comment):
try:
posturl = 'https://mokum.place/api/v1/posts/' + str(messageid
) + '/comments.json'
postdata = {'comment': {'text': comment}, '_uuid': str(uuid.uuid4())}
req = urllib.request.Request(posturl)
req.add_header('Content-Type', 'application/json')
req.add_header('Accept', 'application/json')
req.add_header('X-API-Token', postapikey)
resp = urllib.request.urlopen(req, json.dumps(postdata).encode('utf-8')
)
message = json.loads(resp.read().decode('utf-8'))
if message['id']:
return message['id']
except:
return False
@app.route('/')
def main():
return render_template('post.html')
@app.route('/post', methods=['POST'])
def post():
posttext = request.form['post']
id = mokum_message(posttext)
mokum_comment(id, 'click to comment --> ' + appurl + '/c/' + encrypt(
str(id), secretkey))
return redirect(mainurl + str(id))
@app.route('/c/<cid>')
def comm(cid):
return render_template('comment.html', cid=cid)
@app.route('/comment', methods=['POST'])
def commented():
postid = decrypt(request.form['cid'], secretkey)
posttext = request.form['comment']
mokum_comment(postid, posttext)
return redirect(mainurl + postid)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
Bootstrap(app)
with open('app_config.yml', 'r') as ymlfile:
cfg = yaml.load(ymlfile)
<|reserved_special_token_0|>
def trans(key):
return hashlib.md5(key.encode('utf-8')).digest()
def encrypt(message, passphrase):
passphrase = trans(passphrase)
IV = Random.new().read(BLOCK_SIZE)
aes = AES.new(passphrase, AES.MODE_CFB, IV)
return base64.b32encode(IV + aes.encrypt(message)).decode('utf-8')
def decrypt(encrypted, passphrase):
passphrase = trans(passphrase)
encrypted = base64.b32decode(encrypted)
IV = encrypted[:BLOCK_SIZE]
aes = AES.new(passphrase, AES.MODE_CFB, IV)
return aes.decrypt(encrypted[BLOCK_SIZE:]).decode('utf-8')
def mokum_message(message):
try:
postdata = {'post': {'timelines': ['user'], 'text': message,
'comments_disabled': True, 'nsfw': False}, '_uuid': str(uuid.
uuid4())}
req = urllib.request.Request('https://mokum.place/api/v1/posts.json')
req.add_header('Content-Type', 'application/json')
req.add_header('Accept', 'application/json')
req.add_header('X-API-Token', postapikey)
resp = urllib.request.urlopen(req, json.dumps(postdata).encode('utf-8')
)
message = json.loads(resp.read().decode('utf-8'))
if message['post']['id']:
return message['post']['id']
except:
return False
def mokum_comment(messageid, comment):
try:
posturl = 'https://mokum.place/api/v1/posts/' + str(messageid
) + '/comments.json'
postdata = {'comment': {'text': comment}, '_uuid': str(uuid.uuid4())}
req = urllib.request.Request(posturl)
req.add_header('Content-Type', 'application/json')
req.add_header('Accept', 'application/json')
req.add_header('X-API-Token', postapikey)
resp = urllib.request.urlopen(req, json.dumps(postdata).encode('utf-8')
)
message = json.loads(resp.read().decode('utf-8'))
if message['id']:
return message['id']
except:
return False
@app.route('/')
def main():
return render_template('post.html')
@app.route('/post', methods=['POST'])
def post():
posttext = request.form['post']
id = mokum_message(posttext)
mokum_comment(id, 'click to comment --> ' + appurl + '/c/' + encrypt(
str(id), secretkey))
return redirect(mainurl + str(id))
@app.route('/c/<cid>')
def comm(cid):
return render_template('comment.html', cid=cid)
@app.route('/comment', methods=['POST'])
def commented():
postid = decrypt(request.form['cid'], secretkey)
posttext = request.form['comment']
mokum_comment(postid, posttext)
return redirect(mainurl + postid)
if __name__ == '__main__':
app.run(debug=True)
<|reserved_special_token_1|>
from flask import Flask
from flask import request, redirect, render_template
from flask_bootstrap import Bootstrap
import urllib.request
import urllib.parse
import json
import uuid
import yaml
import hashlib
from Crypto import Random
from Crypto.Cipher import AES
import base64
app = Flask(__name__)
Bootstrap(app)
with open('app_config.yml', 'r') as ymlfile:
cfg = yaml.load(ymlfile)
postapikey = cfg['app']['postapikey']
mainurl = cfg['app']['mainurl']
appurl = cfg['app']['appurl']
secretkey = cfg['app']['secret']
BLOCK_SIZE = 16
def trans(key):
return hashlib.md5(key.encode('utf-8')).digest()
def encrypt(message, passphrase):
passphrase = trans(passphrase)
IV = Random.new().read(BLOCK_SIZE)
aes = AES.new(passphrase, AES.MODE_CFB, IV)
return base64.b32encode(IV + aes.encrypt(message)).decode('utf-8')
def decrypt(encrypted, passphrase):
passphrase = trans(passphrase)
encrypted = base64.b32decode(encrypted)
IV = encrypted[:BLOCK_SIZE]
aes = AES.new(passphrase, AES.MODE_CFB, IV)
return aes.decrypt(encrypted[BLOCK_SIZE:]).decode('utf-8')
def mokum_message(message):
try:
postdata = {'post': {'timelines': ['user'], 'text': message,
'comments_disabled': True, 'nsfw': False}, '_uuid': str(uuid.
uuid4())}
req = urllib.request.Request('https://mokum.place/api/v1/posts.json')
req.add_header('Content-Type', 'application/json')
req.add_header('Accept', 'application/json')
req.add_header('X-API-Token', postapikey)
resp = urllib.request.urlopen(req, json.dumps(postdata).encode('utf-8')
)
message = json.loads(resp.read().decode('utf-8'))
if message['post']['id']:
return message['post']['id']
except:
return False
def mokum_comment(messageid, comment):
try:
posturl = 'https://mokum.place/api/v1/posts/' + str(messageid
) + '/comments.json'
postdata = {'comment': {'text': comment}, '_uuid': str(uuid.uuid4())}
req = urllib.request.Request(posturl)
req.add_header('Content-Type', 'application/json')
req.add_header('Accept', 'application/json')
req.add_header('X-API-Token', postapikey)
resp = urllib.request.urlopen(req, json.dumps(postdata).encode('utf-8')
)
message = json.loads(resp.read().decode('utf-8'))
if message['id']:
return message['id']
except:
return False
@app.route('/')
def main():
return render_template('post.html')
@app.route('/post', methods=['POST'])
def post():
posttext = request.form['post']
id = mokum_message(posttext)
mokum_comment(id, 'click to comment --> ' + appurl + '/c/' + encrypt(
str(id), secretkey))
return redirect(mainurl + str(id))
@app.route('/c/<cid>')
def comm(cid):
return render_template('comment.html', cid=cid)
@app.route('/comment', methods=['POST'])
def commented():
postid = decrypt(request.form['cid'], secretkey)
posttext = request.form['comment']
mokum_comment(postid, posttext)
return redirect(mainurl + postid)
if __name__ == '__main__':
app.run(debug=True)
<|reserved_special_token_1|>
from flask import Flask
from flask import request, redirect, render_template
from flask_bootstrap import Bootstrap
import urllib.request
import urllib.parse
import json
import uuid
import yaml
import hashlib
from Crypto import Random
from Crypto.Cipher import AES
import base64
app = Flask(__name__)
Bootstrap(app)
with open("app_config.yml", 'r') as ymlfile:
cfg = yaml.load(ymlfile)
postapikey = cfg['app']['postapikey']
mainurl = cfg['app']['mainurl']
appurl = cfg['app']['appurl']
secretkey = cfg['app']['secret']
# Some crypto staff
BLOCK_SIZE = 16
def trans(key):
return hashlib.md5(key.encode("utf-8")).digest()
def encrypt(message, passphrase):
passphrase = trans(passphrase)
IV = Random.new().read(BLOCK_SIZE)
aes = AES.new(passphrase, AES.MODE_CFB, IV)
return base64.b32encode(IV + aes.encrypt(message)).decode("utf-8")
def decrypt(encrypted, passphrase):
passphrase = trans(passphrase)
encrypted = base64.b32decode(encrypted)
IV = encrypted[:BLOCK_SIZE]
aes = AES.new(passphrase, AES.MODE_CFB, IV)
return aes.decrypt(encrypted[BLOCK_SIZE:]).decode("utf-8")
def mokum_message(message):
try:
postdata = {"post": {"timelines": ["user"],
"text": message,
"comments_disabled": True,
"nsfw": False},
"_uuid": str(uuid.uuid4())
}
req = urllib.request.Request("https://mokum.place/api/v1/posts.json")
req.add_header('Content-Type', 'application/json')
req.add_header('Accept', 'application/json')
req.add_header('X-API-Token', postapikey)
resp = urllib.request.urlopen(req, json.dumps(postdata).encode("utf-8"))
message = json.loads(resp.read().decode("utf-8"))
if message['post']['id']:
return message['post']['id']
except:
return False
def mokum_comment(messageid, comment):
try:
posturl = "https://mokum.place/api/v1/posts/" + str(messageid) + "/comments.json"
postdata = {"comment": {"text": comment,
# "platform": "anonymous device"
},
"_uuid": str(uuid.uuid4())}
req = urllib.request.Request(posturl)
req.add_header('Content-Type', 'application/json')
req.add_header('Accept', 'application/json')
req.add_header('X-API-Token', postapikey)
resp = urllib.request.urlopen(req, json.dumps(postdata).encode("utf-8"))
message = json.loads(resp.read().decode("utf-8"))
if message['id']:
return message['id']
except:
return False
@app.route('/')
def main():
return render_template('post.html')
@app.route('/post', methods=['POST'])
def post():
posttext = request.form['post']
id = mokum_message(posttext)
mokum_comment(id, "click to comment --> " + appurl + "/c/" + encrypt(str(id), secretkey))
return redirect(mainurl + str(id))
@app.route('/c/<cid>')
def comm(cid):
return render_template('comment.html', cid=cid)
@app.route('/comment', methods=['POST'])
def commented():
postid = decrypt(request.form['cid'], secretkey)
posttext = request.form['comment']
mokum_comment(postid, posttext)
return redirect(mainurl + postid)
if __name__ == '__main__':
app.run(debug=True)
|
flexible
|
{
"blob_id": "e55115a65ebee5d41dcd01a5cbabc328acf152da",
"index": 6079,
"step-1": "<mask token>\n\n\ndef encrypt(message, passphrase):\n passphrase = trans(passphrase)\n IV = Random.new().read(BLOCK_SIZE)\n aes = AES.new(passphrase, AES.MODE_CFB, IV)\n return base64.b32encode(IV + aes.encrypt(message)).decode('utf-8')\n\n\ndef decrypt(encrypted, passphrase):\n passphrase = trans(passphrase)\n encrypted = base64.b32decode(encrypted)\n IV = encrypted[:BLOCK_SIZE]\n aes = AES.new(passphrase, AES.MODE_CFB, IV)\n return aes.decrypt(encrypted[BLOCK_SIZE:]).decode('utf-8')\n\n\ndef mokum_message(message):\n try:\n postdata = {'post': {'timelines': ['user'], 'text': message,\n 'comments_disabled': True, 'nsfw': False}, '_uuid': str(uuid.\n uuid4())}\n req = urllib.request.Request('https://mokum.place/api/v1/posts.json')\n req.add_header('Content-Type', 'application/json')\n req.add_header('Accept', 'application/json')\n req.add_header('X-API-Token', postapikey)\n resp = urllib.request.urlopen(req, json.dumps(postdata).encode('utf-8')\n )\n message = json.loads(resp.read().decode('utf-8'))\n if message['post']['id']:\n return message['post']['id']\n except:\n return False\n\n\n<mask token>\n\n\n@app.route('/')\ndef main():\n return render_template('post.html')\n\n\n@app.route('/post', methods=['POST'])\ndef post():\n posttext = request.form['post']\n id = mokum_message(posttext)\n mokum_comment(id, 'click to comment --> ' + appurl + '/c/' + encrypt(\n str(id), secretkey))\n return redirect(mainurl + str(id))\n\n\n@app.route('/c/<cid>')\ndef comm(cid):\n return render_template('comment.html', cid=cid)\n\n\n@app.route('/comment', methods=['POST'])\ndef commented():\n postid = decrypt(request.form['cid'], secretkey)\n posttext = request.form['comment']\n mokum_comment(postid, posttext)\n return redirect(mainurl + postid)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef trans(key):\n return hashlib.md5(key.encode('utf-8')).digest()\n\n\ndef encrypt(message, passphrase):\n passphrase = trans(passphrase)\n IV = Random.new().read(BLOCK_SIZE)\n aes = AES.new(passphrase, AES.MODE_CFB, IV)\n return base64.b32encode(IV + aes.encrypt(message)).decode('utf-8')\n\n\ndef decrypt(encrypted, passphrase):\n passphrase = trans(passphrase)\n encrypted = base64.b32decode(encrypted)\n IV = encrypted[:BLOCK_SIZE]\n aes = AES.new(passphrase, AES.MODE_CFB, IV)\n return aes.decrypt(encrypted[BLOCK_SIZE:]).decode('utf-8')\n\n\ndef mokum_message(message):\n try:\n postdata = {'post': {'timelines': ['user'], 'text': message,\n 'comments_disabled': True, 'nsfw': False}, '_uuid': str(uuid.\n uuid4())}\n req = urllib.request.Request('https://mokum.place/api/v1/posts.json')\n req.add_header('Content-Type', 'application/json')\n req.add_header('Accept', 'application/json')\n req.add_header('X-API-Token', postapikey)\n resp = urllib.request.urlopen(req, json.dumps(postdata).encode('utf-8')\n )\n message = json.loads(resp.read().decode('utf-8'))\n if message['post']['id']:\n return message['post']['id']\n except:\n return False\n\n\ndef mokum_comment(messageid, comment):\n try:\n posturl = 'https://mokum.place/api/v1/posts/' + str(messageid\n ) + '/comments.json'\n postdata = {'comment': {'text': comment}, '_uuid': str(uuid.uuid4())}\n req = urllib.request.Request(posturl)\n req.add_header('Content-Type', 'application/json')\n req.add_header('Accept', 'application/json')\n req.add_header('X-API-Token', postapikey)\n resp = urllib.request.urlopen(req, json.dumps(postdata).encode('utf-8')\n )\n message = json.loads(resp.read().decode('utf-8'))\n if message['id']:\n return message['id']\n except:\n return False\n\n\n@app.route('/')\ndef main():\n return render_template('post.html')\n\n\n@app.route('/post', methods=['POST'])\ndef post():\n posttext = request.form['post']\n id = mokum_message(posttext)\n mokum_comment(id, 'click to comment --> ' + appurl + '/c/' + encrypt(\n str(id), secretkey))\n return redirect(mainurl + str(id))\n\n\n@app.route('/c/<cid>')\ndef comm(cid):\n return render_template('comment.html', cid=cid)\n\n\n@app.route('/comment', methods=['POST'])\ndef commented():\n postid = decrypt(request.form['cid'], secretkey)\n posttext = request.form['comment']\n mokum_comment(postid, posttext)\n return redirect(mainurl + postid)\n\n\n<mask token>\n",
"step-3": "<mask token>\nBootstrap(app)\nwith open('app_config.yml', 'r') as ymlfile:\n cfg = yaml.load(ymlfile)\n<mask token>\n\n\ndef trans(key):\n return hashlib.md5(key.encode('utf-8')).digest()\n\n\ndef encrypt(message, passphrase):\n passphrase = trans(passphrase)\n IV = Random.new().read(BLOCK_SIZE)\n aes = AES.new(passphrase, AES.MODE_CFB, IV)\n return base64.b32encode(IV + aes.encrypt(message)).decode('utf-8')\n\n\ndef decrypt(encrypted, passphrase):\n passphrase = trans(passphrase)\n encrypted = base64.b32decode(encrypted)\n IV = encrypted[:BLOCK_SIZE]\n aes = AES.new(passphrase, AES.MODE_CFB, IV)\n return aes.decrypt(encrypted[BLOCK_SIZE:]).decode('utf-8')\n\n\ndef mokum_message(message):\n try:\n postdata = {'post': {'timelines': ['user'], 'text': message,\n 'comments_disabled': True, 'nsfw': False}, '_uuid': str(uuid.\n uuid4())}\n req = urllib.request.Request('https://mokum.place/api/v1/posts.json')\n req.add_header('Content-Type', 'application/json')\n req.add_header('Accept', 'application/json')\n req.add_header('X-API-Token', postapikey)\n resp = urllib.request.urlopen(req, json.dumps(postdata).encode('utf-8')\n )\n message = json.loads(resp.read().decode('utf-8'))\n if message['post']['id']:\n return message['post']['id']\n except:\n return False\n\n\ndef mokum_comment(messageid, comment):\n try:\n posturl = 'https://mokum.place/api/v1/posts/' + str(messageid\n ) + '/comments.json'\n postdata = {'comment': {'text': comment}, '_uuid': str(uuid.uuid4())}\n req = urllib.request.Request(posturl)\n req.add_header('Content-Type', 'application/json')\n req.add_header('Accept', 'application/json')\n req.add_header('X-API-Token', postapikey)\n resp = urllib.request.urlopen(req, json.dumps(postdata).encode('utf-8')\n )\n message = json.loads(resp.read().decode('utf-8'))\n if message['id']:\n return message['id']\n except:\n return False\n\n\n@app.route('/')\ndef main():\n return render_template('post.html')\n\n\n@app.route('/post', methods=['POST'])\ndef post():\n posttext = request.form['post']\n id = mokum_message(posttext)\n mokum_comment(id, 'click to comment --> ' + appurl + '/c/' + encrypt(\n str(id), secretkey))\n return redirect(mainurl + str(id))\n\n\n@app.route('/c/<cid>')\ndef comm(cid):\n return render_template('comment.html', cid=cid)\n\n\n@app.route('/comment', methods=['POST'])\ndef commented():\n postid = decrypt(request.form['cid'], secretkey)\n posttext = request.form['comment']\n mokum_comment(postid, posttext)\n return redirect(mainurl + postid)\n\n\nif __name__ == '__main__':\n app.run(debug=True)\n",
"step-4": "from flask import Flask\nfrom flask import request, redirect, render_template\nfrom flask_bootstrap import Bootstrap\nimport urllib.request\nimport urllib.parse\nimport json\nimport uuid\nimport yaml\nimport hashlib\nfrom Crypto import Random\nfrom Crypto.Cipher import AES\nimport base64\napp = Flask(__name__)\nBootstrap(app)\nwith open('app_config.yml', 'r') as ymlfile:\n cfg = yaml.load(ymlfile)\npostapikey = cfg['app']['postapikey']\nmainurl = cfg['app']['mainurl']\nappurl = cfg['app']['appurl']\nsecretkey = cfg['app']['secret']\nBLOCK_SIZE = 16\n\n\ndef trans(key):\n return hashlib.md5(key.encode('utf-8')).digest()\n\n\ndef encrypt(message, passphrase):\n passphrase = trans(passphrase)\n IV = Random.new().read(BLOCK_SIZE)\n aes = AES.new(passphrase, AES.MODE_CFB, IV)\n return base64.b32encode(IV + aes.encrypt(message)).decode('utf-8')\n\n\ndef decrypt(encrypted, passphrase):\n passphrase = trans(passphrase)\n encrypted = base64.b32decode(encrypted)\n IV = encrypted[:BLOCK_SIZE]\n aes = AES.new(passphrase, AES.MODE_CFB, IV)\n return aes.decrypt(encrypted[BLOCK_SIZE:]).decode('utf-8')\n\n\ndef mokum_message(message):\n try:\n postdata = {'post': {'timelines': ['user'], 'text': message,\n 'comments_disabled': True, 'nsfw': False}, '_uuid': str(uuid.\n uuid4())}\n req = urllib.request.Request('https://mokum.place/api/v1/posts.json')\n req.add_header('Content-Type', 'application/json')\n req.add_header('Accept', 'application/json')\n req.add_header('X-API-Token', postapikey)\n resp = urllib.request.urlopen(req, json.dumps(postdata).encode('utf-8')\n )\n message = json.loads(resp.read().decode('utf-8'))\n if message['post']['id']:\n return message['post']['id']\n except:\n return False\n\n\ndef mokum_comment(messageid, comment):\n try:\n posturl = 'https://mokum.place/api/v1/posts/' + str(messageid\n ) + '/comments.json'\n postdata = {'comment': {'text': comment}, '_uuid': str(uuid.uuid4())}\n req = urllib.request.Request(posturl)\n req.add_header('Content-Type', 'application/json')\n req.add_header('Accept', 'application/json')\n req.add_header('X-API-Token', postapikey)\n resp = urllib.request.urlopen(req, json.dumps(postdata).encode('utf-8')\n )\n message = json.loads(resp.read().decode('utf-8'))\n if message['id']:\n return message['id']\n except:\n return False\n\n\n@app.route('/')\ndef main():\n return render_template('post.html')\n\n\n@app.route('/post', methods=['POST'])\ndef post():\n posttext = request.form['post']\n id = mokum_message(posttext)\n mokum_comment(id, 'click to comment --> ' + appurl + '/c/' + encrypt(\n str(id), secretkey))\n return redirect(mainurl + str(id))\n\n\n@app.route('/c/<cid>')\ndef comm(cid):\n return render_template('comment.html', cid=cid)\n\n\n@app.route('/comment', methods=['POST'])\ndef commented():\n postid = decrypt(request.form['cid'], secretkey)\n posttext = request.form['comment']\n mokum_comment(postid, posttext)\n return redirect(mainurl + postid)\n\n\nif __name__ == '__main__':\n app.run(debug=True)\n",
"step-5": "from flask import Flask\nfrom flask import request, redirect, render_template\nfrom flask_bootstrap import Bootstrap\nimport urllib.request\nimport urllib.parse\nimport json\nimport uuid\nimport yaml\nimport hashlib\nfrom Crypto import Random\nfrom Crypto.Cipher import AES\nimport base64\n\n\n\n\napp = Flask(__name__)\nBootstrap(app)\n\nwith open(\"app_config.yml\", 'r') as ymlfile:\n cfg = yaml.load(ymlfile)\n\npostapikey = cfg['app']['postapikey']\nmainurl = cfg['app']['mainurl']\nappurl = cfg['app']['appurl']\nsecretkey = cfg['app']['secret']\n\n# Some crypto staff\n\nBLOCK_SIZE = 16\n\n\n\ndef trans(key):\n return hashlib.md5(key.encode(\"utf-8\")).digest()\n\n\ndef encrypt(message, passphrase):\n passphrase = trans(passphrase)\n IV = Random.new().read(BLOCK_SIZE)\n aes = AES.new(passphrase, AES.MODE_CFB, IV)\n return base64.b32encode(IV + aes.encrypt(message)).decode(\"utf-8\")\n\n\ndef decrypt(encrypted, passphrase):\n passphrase = trans(passphrase)\n encrypted = base64.b32decode(encrypted)\n IV = encrypted[:BLOCK_SIZE]\n aes = AES.new(passphrase, AES.MODE_CFB, IV)\n return aes.decrypt(encrypted[BLOCK_SIZE:]).decode(\"utf-8\")\n\n\ndef mokum_message(message):\n try:\n postdata = {\"post\": {\"timelines\": [\"user\"],\n \"text\": message,\n \"comments_disabled\": True,\n \"nsfw\": False},\n \"_uuid\": str(uuid.uuid4())\n }\n\n req = urllib.request.Request(\"https://mokum.place/api/v1/posts.json\")\n req.add_header('Content-Type', 'application/json')\n req.add_header('Accept', 'application/json')\n req.add_header('X-API-Token', postapikey)\n\n resp = urllib.request.urlopen(req, json.dumps(postdata).encode(\"utf-8\"))\n\n message = json.loads(resp.read().decode(\"utf-8\"))\n\n if message['post']['id']:\n return message['post']['id']\n except:\n return False\n\n\ndef mokum_comment(messageid, comment):\n try:\n posturl = \"https://mokum.place/api/v1/posts/\" + str(messageid) + \"/comments.json\"\n postdata = {\"comment\": {\"text\": comment,\n # \"platform\": \"anonymous device\"\n },\n \"_uuid\": str(uuid.uuid4())}\n\n req = urllib.request.Request(posturl)\n req.add_header('Content-Type', 'application/json')\n req.add_header('Accept', 'application/json')\n req.add_header('X-API-Token', postapikey)\n\n resp = urllib.request.urlopen(req, json.dumps(postdata).encode(\"utf-8\"))\n\n message = json.loads(resp.read().decode(\"utf-8\"))\n\n if message['id']:\n return message['id']\n\n except:\n return False\n\n\n@app.route('/')\ndef main():\n return render_template('post.html')\n\n\n@app.route('/post', methods=['POST'])\ndef post():\n posttext = request.form['post']\n id = mokum_message(posttext)\n mokum_comment(id, \"click to comment --> \" + appurl + \"/c/\" + encrypt(str(id), secretkey))\n return redirect(mainurl + str(id))\n\n\n@app.route('/c/<cid>')\ndef comm(cid):\n return render_template('comment.html', cid=cid)\n\n\n@app.route('/comment', methods=['POST'])\ndef commented():\n postid = decrypt(request.form['cid'], secretkey)\n posttext = request.form['comment']\n mokum_comment(postid, posttext)\n return redirect(mainurl + postid)\n\n\nif __name__ == '__main__':\n app.run(debug=True)\n",
"step-ids": [
7,
9,
10,
12,
13
]
}
|
[
7,
9,
10,
12,
13
] |
<|reserved_special_token_0|>
def emit_all_users(channel):
"""emits all users"""
all_users = [user.name for user in db.session.query(models.User1).all()]
socketio.emit(channel, {'allUsers': all_users})
return channel
<|reserved_special_token_0|>
@socketio.on('new google user')
def on_new_google_user(data):
"""new user when log in"""
print('Got an event for new google user input with data:', data)
push_new_user_to_db(data['name'], data['email'], data['pic'], data['room'])
emit_all_users(USERS_UPDATED_CHANNEL)
return USERS_UPDATED_CHANNEL
@socketio.on('email results')
def on_send_results(data):
name = 'Madison'
msg = 'Hello ' + name + """! After taking your questionnaire us here at Covid Catcher recommended the following...
"""
msg += data['results']
print(msg)
print(requests.post(
'https://api.mailgun.net/v3/sandbox65fda9f953cb42baacd1bdd34356b8c4.mailgun.org/messages'
, auth=('api', os.environ['MAIL_API_KEY']), data={'from':
'Excited User <mailgun@sandbox65fda9f953cb42baacd1bdd34356b8c4.mailgun.org>'
, 'to': ['miatkem@gmail.com'], 'subject':
'Covid Catcher Questionnaire Results', 'text': msg}).text)
<|reserved_special_token_0|>
@socketio.on('faq questions')
def on_faq_questions(category):
"""get questions and answers in a category"""
if category == '' or category == None:
faqs = get_all_questions()
else:
faqs = get_all_questions(category)
response = []
for faq in faqs:
response.append({'question': faq.question, 'answer': faq.answer})
socketio.emit('faq list', response)
def push_new_user_to_db(name, email, picture, room):
"""puts new user in the database"""
global login
all_users = [user.email for user in db.session.query(models.User1).all()]
if email in all_users:
print(email, ' is already a user in the database!')
else:
db.session.add(models.User1(name, email, picture, room))
db.session.commit()
login = 1
userLog()
emit_all_users(USERS_UPDATED_CHANNEL)
return name
def get_state_colors():
"""Colors for USA map"""
state_colors = []
state_cases = []
state_active = []
for i in get_covid_stats_for_all_states():
state_colors.append(i.color)
state_cases.append(i.cases)
state_active.append(i.activeCases)
socketio.emit('colors', {'colors': state_colors, 'cases': state_cases,
'active': state_active})
def userLog():
"""User Login Check"""
if login == 1:
socketio.emit(NEWUSER, {'login': 1})
return True
@socketio.on('search loc')
def search_loc(data):
"""Search for location covid stats"""
state = data['loc']
push_stat_data(state)
@socketio.on('connect')
def on_connect():
"""Socket for when user connects"""
articleList()
get_state_colors()
ip = request.environ['HTTP_X_FORWARDED_FOR']
loc = get_location(ip)
push_stat_data(loc.state)
return True
@socketio.on('search location')
def searching(data):
"""Search location"""
a = data['area']
areaLoc = search_user(a)
allsites = get_sites(areaLoc[0], areaLoc[1])
title_list = []
address_list = []
lat_list = []
lng_list = []
phone_list = []
web_list = []
miles_list = []
counter = 0
for site in allsites:
if counter != 3:
title_list.append(site.title)
address_list.append(site.entireAddress)
lat_list.append(site.latitude)
lng_list.append(site.longitude)
phone_list.append(site.phone)
web_list.append(site.web)
miles_list.append(site.miles)
counter += 1
else:
break
socketio.emit(SITE, {'user_lat': areaLoc[0], 'user_lng': areaLoc[1],
'title': title_list, 'address': address_list, 'latitude': lat_list,
'longitude': lng_list, 'phone': phone_list, 'web': web_list,
'miles': miles_list, 'key': api_k}, room=request.sid)
return True
<|reserved_special_token_0|>
def articleList():
"""Calls the Article API"""
articles = get_news(5, since=news.YESTERDAY.strftime('%yyyy-%mm-%dd'),
query='covid')
title_list = []
desc_list = []
url_list = []
image_list = []
source_list = []
for art in articles:
image_list.append(art.image)
title_list.append(art.title)
source_list.append(art.source)
desc_list.append(art.description)
url_list.append(art.url)
socketio.emit(ARTICLE, {'title': title_list, 'desc': desc_list, 'url':
url_list, 'img': image_list, 'sources': source_list})
return True
@app.route('/')
def index():
"""loads page"""
models.db.create_all()
db.session.commit()
return flask.render_template('index.html')
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def emit_all_users(channel):
"""emits all users"""
all_users = [user.name for user in db.session.query(models.User1).all()]
socketio.emit(channel, {'allUsers': all_users})
return channel
def push_stat_data(state):
"""Calls Covid API"""
information = get_covid_stats_by_state(state)
print(state)
case = information.cases
newCases = information.todaysCases
death = information.deaths
newDeaths = information.todayDeaths
rec = information.recovered
county_list = []
county_confirmed = []
county_deaths = []
county_rec = []
updated = []
print('CASES DEATHS AND RECOVERED: ', case, death, rec)
allcounty = get_covid_stats_by_county(state, '')
for x in allcounty:
county_list.append(x.county)
county_confirmed.append(x.confirmed)
county_deaths.append(x.deaths)
county_rec.append(x.recovered)
updated.append(x.updatedAt)
socketio.emit(STATISTICS, {'state': state, 'cases': case, 'new_cases':
newCases, 'deaths': death, 'new_deaths': newDeaths, 'recovered':
rec, 'countyNames': county_list, 'countyCases': county_confirmed,
'countyDeaths': county_deaths, 'countyRecovered': county_rec,
'updated': updated}, room=request.sid)
r = 'stats are pushed'
return r
@socketio.on('new google user')
def on_new_google_user(data):
"""new user when log in"""
print('Got an event for new google user input with data:', data)
push_new_user_to_db(data['name'], data['email'], data['pic'], data['room'])
emit_all_users(USERS_UPDATED_CHANNEL)
return USERS_UPDATED_CHANNEL
@socketio.on('email results')
def on_send_results(data):
name = 'Madison'
msg = 'Hello ' + name + """! After taking your questionnaire us here at Covid Catcher recommended the following...
"""
msg += data['results']
print(msg)
print(requests.post(
'https://api.mailgun.net/v3/sandbox65fda9f953cb42baacd1bdd34356b8c4.mailgun.org/messages'
, auth=('api', os.environ['MAIL_API_KEY']), data={'from':
'Excited User <mailgun@sandbox65fda9f953cb42baacd1bdd34356b8c4.mailgun.org>'
, 'to': ['miatkem@gmail.com'], 'subject':
'Covid Catcher Questionnaire Results', 'text': msg}).text)
<|reserved_special_token_0|>
@socketio.on('faq questions')
def on_faq_questions(category):
"""get questions and answers in a category"""
if category == '' or category == None:
faqs = get_all_questions()
else:
faqs = get_all_questions(category)
response = []
for faq in faqs:
response.append({'question': faq.question, 'answer': faq.answer})
socketio.emit('faq list', response)
def push_new_user_to_db(name, email, picture, room):
"""puts new user in the database"""
global login
all_users = [user.email for user in db.session.query(models.User1).all()]
if email in all_users:
print(email, ' is already a user in the database!')
else:
db.session.add(models.User1(name, email, picture, room))
db.session.commit()
login = 1
userLog()
emit_all_users(USERS_UPDATED_CHANNEL)
return name
def get_state_colors():
"""Colors for USA map"""
state_colors = []
state_cases = []
state_active = []
for i in get_covid_stats_for_all_states():
state_colors.append(i.color)
state_cases.append(i.cases)
state_active.append(i.activeCases)
socketio.emit('colors', {'colors': state_colors, 'cases': state_cases,
'active': state_active})
def userLog():
"""User Login Check"""
if login == 1:
socketio.emit(NEWUSER, {'login': 1})
return True
@socketio.on('search loc')
def search_loc(data):
"""Search for location covid stats"""
state = data['loc']
push_stat_data(state)
@socketio.on('connect')
def on_connect():
"""Socket for when user connects"""
articleList()
get_state_colors()
ip = request.environ['HTTP_X_FORWARDED_FOR']
loc = get_location(ip)
push_stat_data(loc.state)
return True
@socketio.on('search location')
def searching(data):
"""Search location"""
a = data['area']
areaLoc = search_user(a)
allsites = get_sites(areaLoc[0], areaLoc[1])
title_list = []
address_list = []
lat_list = []
lng_list = []
phone_list = []
web_list = []
miles_list = []
counter = 0
for site in allsites:
if counter != 3:
title_list.append(site.title)
address_list.append(site.entireAddress)
lat_list.append(site.latitude)
lng_list.append(site.longitude)
phone_list.append(site.phone)
web_list.append(site.web)
miles_list.append(site.miles)
counter += 1
else:
break
socketio.emit(SITE, {'user_lat': areaLoc[0], 'user_lng': areaLoc[1],
'title': title_list, 'address': address_list, 'latitude': lat_list,
'longitude': lng_list, 'phone': phone_list, 'web': web_list,
'miles': miles_list, 'key': api_k}, room=request.sid)
return True
<|reserved_special_token_0|>
def articleList():
"""Calls the Article API"""
articles = get_news(5, since=news.YESTERDAY.strftime('%yyyy-%mm-%dd'),
query='covid')
title_list = []
desc_list = []
url_list = []
image_list = []
source_list = []
for art in articles:
image_list.append(art.image)
title_list.append(art.title)
source_list.append(art.source)
desc_list.append(art.description)
url_list.append(art.url)
socketio.emit(ARTICLE, {'title': title_list, 'desc': desc_list, 'url':
url_list, 'img': image_list, 'sources': source_list})
return True
@app.route('/')
def index():
"""loads page"""
models.db.create_all()
db.session.commit()
return flask.render_template('index.html')
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def emit_all_users(channel):
"""emits all users"""
all_users = [user.name for user in db.session.query(models.User1).all()]
socketio.emit(channel, {'allUsers': all_users})
return channel
def push_stat_data(state):
"""Calls Covid API"""
information = get_covid_stats_by_state(state)
print(state)
case = information.cases
newCases = information.todaysCases
death = information.deaths
newDeaths = information.todayDeaths
rec = information.recovered
county_list = []
county_confirmed = []
county_deaths = []
county_rec = []
updated = []
print('CASES DEATHS AND RECOVERED: ', case, death, rec)
allcounty = get_covid_stats_by_county(state, '')
for x in allcounty:
county_list.append(x.county)
county_confirmed.append(x.confirmed)
county_deaths.append(x.deaths)
county_rec.append(x.recovered)
updated.append(x.updatedAt)
socketio.emit(STATISTICS, {'state': state, 'cases': case, 'new_cases':
newCases, 'deaths': death, 'new_deaths': newDeaths, 'recovered':
rec, 'countyNames': county_list, 'countyCases': county_confirmed,
'countyDeaths': county_deaths, 'countyRecovered': county_rec,
'updated': updated}, room=request.sid)
r = 'stats are pushed'
return r
@socketio.on('new google user')
def on_new_google_user(data):
"""new user when log in"""
print('Got an event for new google user input with data:', data)
push_new_user_to_db(data['name'], data['email'], data['pic'], data['room'])
emit_all_users(USERS_UPDATED_CHANNEL)
return USERS_UPDATED_CHANNEL
@socketio.on('email results')
def on_send_results(data):
name = 'Madison'
msg = 'Hello ' + name + """! After taking your questionnaire us here at Covid Catcher recommended the following...
"""
msg += data['results']
print(msg)
print(requests.post(
'https://api.mailgun.net/v3/sandbox65fda9f953cb42baacd1bdd34356b8c4.mailgun.org/messages'
, auth=('api', os.environ['MAIL_API_KEY']), data={'from':
'Excited User <mailgun@sandbox65fda9f953cb42baacd1bdd34356b8c4.mailgun.org>'
, 'to': ['miatkem@gmail.com'], 'subject':
'Covid Catcher Questionnaire Results', 'text': msg}).text)
<|reserved_special_token_0|>
@socketio.on('faq questions')
def on_faq_questions(category):
"""get questions and answers in a category"""
if category == '' or category == None:
faqs = get_all_questions()
else:
faqs = get_all_questions(category)
response = []
for faq in faqs:
response.append({'question': faq.question, 'answer': faq.answer})
socketio.emit('faq list', response)
def push_new_user_to_db(name, email, picture, room):
"""puts new user in the database"""
global login
all_users = [user.email for user in db.session.query(models.User1).all()]
if email in all_users:
print(email, ' is already a user in the database!')
else:
db.session.add(models.User1(name, email, picture, room))
db.session.commit()
login = 1
userLog()
emit_all_users(USERS_UPDATED_CHANNEL)
return name
def get_state_colors():
"""Colors for USA map"""
state_colors = []
state_cases = []
state_active = []
for i in get_covid_stats_for_all_states():
state_colors.append(i.color)
state_cases.append(i.cases)
state_active.append(i.activeCases)
socketio.emit('colors', {'colors': state_colors, 'cases': state_cases,
'active': state_active})
def userLog():
"""User Login Check"""
if login == 1:
socketio.emit(NEWUSER, {'login': 1})
return True
@socketio.on('search loc')
def search_loc(data):
"""Search for location covid stats"""
state = data['loc']
push_stat_data(state)
@socketio.on('connect')
def on_connect():
"""Socket for when user connects"""
articleList()
get_state_colors()
ip = request.environ['HTTP_X_FORWARDED_FOR']
loc = get_location(ip)
push_stat_data(loc.state)
return True
@socketio.on('search location')
def searching(data):
"""Search location"""
a = data['area']
areaLoc = search_user(a)
allsites = get_sites(areaLoc[0], areaLoc[1])
title_list = []
address_list = []
lat_list = []
lng_list = []
phone_list = []
web_list = []
miles_list = []
counter = 0
for site in allsites:
if counter != 3:
title_list.append(site.title)
address_list.append(site.entireAddress)
lat_list.append(site.latitude)
lng_list.append(site.longitude)
phone_list.append(site.phone)
web_list.append(site.web)
miles_list.append(site.miles)
counter += 1
else:
break
socketio.emit(SITE, {'user_lat': areaLoc[0], 'user_lng': areaLoc[1],
'title': title_list, 'address': address_list, 'latitude': lat_list,
'longitude': lng_list, 'phone': phone_list, 'web': web_list,
'miles': miles_list, 'key': api_k}, room=request.sid)
return True
<|reserved_special_token_0|>
def articleList():
"""Calls the Article API"""
articles = get_news(5, since=news.YESTERDAY.strftime('%yyyy-%mm-%dd'),
query='covid')
title_list = []
desc_list = []
url_list = []
image_list = []
source_list = []
for art in articles:
image_list.append(art.image)
title_list.append(art.title)
source_list.append(art.source)
desc_list.append(art.description)
url_list.append(art.url)
socketio.emit(ARTICLE, {'title': title_list, 'desc': desc_list, 'url':
url_list, 'img': image_list, 'sources': source_list})
return True
@app.route('/')
def index():
"""loads page"""
models.db.create_all()
db.session.commit()
return flask.render_template('index.html')
@app.errorhandler(404)
def page_not_found(e):
"""Handles Page Not Found"""
return flask.render_template('index.html')
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
import os
from os.path import join, dirname
import json
import requests
import flask
from flask import request
import flask_sqlalchemy
import flask_socketio
from dotenv import load_dotenv
from covid import get_covid_stats_by_state
from covid import get_covid_stats_by_county
from covid import get_covid_stats_for_all_states
from faq import get_all_questions
from faq import get_all_categories
from faq import FAQ
import news
from news import get_news
import location
from location import get_location
import sites
from sites import get_sites
from sites import search_user
from sites import TestingSites
app = flask.Flask(__name__)
socketio = flask_socketio.SocketIO(app)
socketio.init_app(app, cors_allowed_origins='*')
dotenv_path = join(dirname(__file__), 'sql.env')
load_dotenv(dotenv_path)
dotenv_path = join(dirname(__file__), 'api-keys.env')
load_dotenv(dotenv_path)
database_uri = os.environ['DATABASE_URL']
api_k = os.environ['MAP_API_KEY']
app.config['SQLALCHEMY_DATABASE_URI'] = database_uri
login = 0
db = flask_sqlalchemy.SQLAlchemy(app)
db.init_app(app)
db.app = app
USERS_UPDATED_CHANNEL = 'users updated'
STATISTICS = 'stats'
NEWUSER = 'new user'
FAQS = 'faq lists'
ARTICLE = 'article list'
SITE = 'site page'
SEARCH = 'searching'
import models
def emit_all_users(channel):
"""emits all users"""
all_users = [user.name for user in db.session.query(models.User1).all()]
socketio.emit(channel, {'allUsers': all_users})
return channel
def push_stat_data(state):
"""Calls Covid API"""
information = get_covid_stats_by_state(state)
print(state)
case = information.cases
newCases = information.todaysCases
death = information.deaths
newDeaths = information.todayDeaths
rec = information.recovered
county_list = []
county_confirmed = []
county_deaths = []
county_rec = []
updated = []
print('CASES DEATHS AND RECOVERED: ', case, death, rec)
allcounty = get_covid_stats_by_county(state, '')
for x in allcounty:
county_list.append(x.county)
county_confirmed.append(x.confirmed)
county_deaths.append(x.deaths)
county_rec.append(x.recovered)
updated.append(x.updatedAt)
socketio.emit(STATISTICS, {'state': state, 'cases': case, 'new_cases':
newCases, 'deaths': death, 'new_deaths': newDeaths, 'recovered':
rec, 'countyNames': county_list, 'countyCases': county_confirmed,
'countyDeaths': county_deaths, 'countyRecovered': county_rec,
'updated': updated}, room=request.sid)
r = 'stats are pushed'
return r
@socketio.on('new google user')
def on_new_google_user(data):
"""new user when log in"""
print('Got an event for new google user input with data:', data)
push_new_user_to_db(data['name'], data['email'], data['pic'], data['room'])
emit_all_users(USERS_UPDATED_CHANNEL)
return USERS_UPDATED_CHANNEL
@socketio.on('email results')
def on_send_results(data):
name = 'Madison'
msg = 'Hello ' + name + """! After taking your questionnaire us here at Covid Catcher recommended the following...
"""
msg += data['results']
print(msg)
print(requests.post(
'https://api.mailgun.net/v3/sandbox65fda9f953cb42baacd1bdd34356b8c4.mailgun.org/messages'
, auth=('api', os.environ['MAIL_API_KEY']), data={'from':
'Excited User <mailgun@sandbox65fda9f953cb42baacd1bdd34356b8c4.mailgun.org>'
, 'to': ['miatkem@gmail.com'], 'subject':
'Covid Catcher Questionnaire Results', 'text': msg}).text)
@socketio.on('faq categories')
def on_faq_categories():
"""get all categories for faqs"""
categories = get_all_categories()
socketio.emit('faq category list', categories)
@socketio.on('faq questions')
def on_faq_questions(category):
"""get questions and answers in a category"""
if category == '' or category == None:
faqs = get_all_questions()
else:
faqs = get_all_questions(category)
response = []
for faq in faqs:
response.append({'question': faq.question, 'answer': faq.answer})
socketio.emit('faq list', response)
def push_new_user_to_db(name, email, picture, room):
"""puts new user in the database"""
global login
all_users = [user.email for user in db.session.query(models.User1).all()]
if email in all_users:
print(email, ' is already a user in the database!')
else:
db.session.add(models.User1(name, email, picture, room))
db.session.commit()
login = 1
userLog()
emit_all_users(USERS_UPDATED_CHANNEL)
return name
def get_state_colors():
"""Colors for USA map"""
state_colors = []
state_cases = []
state_active = []
for i in get_covid_stats_for_all_states():
state_colors.append(i.color)
state_cases.append(i.cases)
state_active.append(i.activeCases)
socketio.emit('colors', {'colors': state_colors, 'cases': state_cases,
'active': state_active})
def userLog():
"""User Login Check"""
if login == 1:
socketio.emit(NEWUSER, {'login': 1})
return True
@socketio.on('search loc')
def search_loc(data):
"""Search for location covid stats"""
state = data['loc']
push_stat_data(state)
@socketio.on('connect')
def on_connect():
"""Socket for when user connects"""
articleList()
get_state_colors()
ip = request.environ['HTTP_X_FORWARDED_FOR']
loc = get_location(ip)
push_stat_data(loc.state)
return True
@socketio.on('search location')
def searching(data):
"""Search location"""
a = data['area']
areaLoc = search_user(a)
allsites = get_sites(areaLoc[0], areaLoc[1])
title_list = []
address_list = []
lat_list = []
lng_list = []
phone_list = []
web_list = []
miles_list = []
counter = 0
for site in allsites:
if counter != 3:
title_list.append(site.title)
address_list.append(site.entireAddress)
lat_list.append(site.latitude)
lng_list.append(site.longitude)
phone_list.append(site.phone)
web_list.append(site.web)
miles_list.append(site.miles)
counter += 1
else:
break
socketio.emit(SITE, {'user_lat': areaLoc[0], 'user_lng': areaLoc[1],
'title': title_list, 'address': address_list, 'latitude': lat_list,
'longitude': lng_list, 'phone': phone_list, 'web': web_list,
'miles': miles_list, 'key': api_k}, room=request.sid)
return True
<|reserved_special_token_0|>
def articleList():
"""Calls the Article API"""
articles = get_news(5, since=news.YESTERDAY.strftime('%yyyy-%mm-%dd'),
query='covid')
title_list = []
desc_list = []
url_list = []
image_list = []
source_list = []
for art in articles:
image_list.append(art.image)
title_list.append(art.title)
source_list.append(art.source)
desc_list.append(art.description)
url_list.append(art.url)
socketio.emit(ARTICLE, {'title': title_list, 'desc': desc_list, 'url':
url_list, 'img': image_list, 'sources': source_list})
return True
@app.route('/')
def index():
"""loads page"""
models.db.create_all()
db.session.commit()
return flask.render_template('index.html')
@app.errorhandler(404)
def page_not_found(e):
"""Handles Page Not Found"""
return flask.render_template('index.html')
if __name__ == '__main__':
socketio.run(app, host=os.getenv('IP', '0.0.0.0'), port=int(os.getenv(
'PORT', 8080)), debug=True)
<|reserved_special_token_1|>
# pylint: disable=C0103, C0413, E1101, W0611
"""Covid Catcher Backend"""
import os
from os.path import join, dirname
import json
import requests
import flask
from flask import request
import flask_sqlalchemy
import flask_socketio
from dotenv import load_dotenv
from covid import get_covid_stats_by_state
from covid import get_covid_stats_by_county
from covid import get_covid_stats_for_all_states
from faq import get_all_questions
from faq import get_all_categories
from faq import FAQ
import news
from news import get_news
import location
from location import get_location
import sites
from sites import get_sites
from sites import search_user
from sites import TestingSites
app = flask.Flask(__name__)
socketio = flask_socketio.SocketIO(app)
socketio.init_app(app, cors_allowed_origins="*")
dotenv_path = join(dirname(__file__), "sql.env")
load_dotenv(dotenv_path)
dotenv_path = join(dirname(__file__), "api-keys.env")
load_dotenv(dotenv_path)
database_uri = os.environ["DATABASE_URL"]
api_k = os.environ["MAP_API_KEY"]
app.config["SQLALCHEMY_DATABASE_URI"] = database_uri
login = 0
db = flask_sqlalchemy.SQLAlchemy(app)
db.init_app(app)
db.app = app
USERS_UPDATED_CHANNEL = "users updated"
STATISTICS = "stats"
NEWUSER = "new user"
FAQS = "faq lists"
ARTICLE = "article list"
SITE = "site page"
SEARCH = "searching"
import models
def emit_all_users(channel):
"""emits all users"""
all_users = [user.name for user in db.session.query(models.User1).all()]
socketio.emit(channel, {"allUsers": all_users})
return channel
def push_stat_data(state):
"""Calls Covid API"""
information = get_covid_stats_by_state(state)
print(state)
case = information.cases
newCases = information.todaysCases
death = information.deaths
newDeaths = information.todayDeaths
rec = information.recovered
county_list = []
county_confirmed = []
county_deaths = []
county_rec = []
updated = []
print("CASES DEATHS AND RECOVERED: ", case, death, rec)
allcounty = get_covid_stats_by_county(state, "")
for x in allcounty:
county_list.append(x.county)
county_confirmed.append(x.confirmed)
county_deaths.append(x.deaths)
county_rec.append(x.recovered)
updated.append(x.updatedAt)
socketio.emit(
STATISTICS,
{
"state": state,
"cases": case,
"new_cases": newCases,
"deaths": death,
"new_deaths": newDeaths,
"recovered": rec,
"countyNames": county_list,
"countyCases": county_confirmed,
"countyDeaths": county_deaths,
"countyRecovered": county_rec,
"updated": updated,
},
room=request.sid,
)
r = "stats are pushed"
return r
@socketio.on("new google user")
def on_new_google_user(data):
"""new user when log in"""
print("Got an event for new google user input with data:", data)
push_new_user_to_db(data["name"], data["email"], data["pic"], data["room"])
emit_all_users(USERS_UPDATED_CHANNEL)
return USERS_UPDATED_CHANNEL
@socketio.on("email results")
def on_send_results(data):
#This name would be the user but mailgun will not allow emails to be sent to
# unverified users without paying.
name="Madison"
msg = "Hello "+name+"! After taking your questionnaire us here at Covid Catcher recommended the following...\n"
msg += data['results']
print(msg)
print(requests.post(
"https://api.mailgun.net/v3/sandbox65fda9f953cb42baacd1bdd34356b8c4.mailgun.org/messages",
auth=("api", os.environ["MAIL_API_KEY"]),
data={"from": "Excited User <mailgun@sandbox65fda9f953cb42baacd1bdd34356b8c4.mailgun.org>",
#This only sends to madison becuase mailgun for free can only send to verified emails
#To send to the specific users email simply pull the email from the database at this socket
# number and send it there
"to": ["miatkem@gmail.com"],
"subject": "Covid Catcher Questionnaire Results",
"text":msg}).text)
@socketio.on("faq categories")
def on_faq_categories():
"""get all categories for faqs"""
categories = get_all_categories()
socketio.emit("faq category list", categories)
@socketio.on("faq questions")
def on_faq_questions(category):
"""get questions and answers in a category"""
if category == "" or category == None:
faqs = get_all_questions()
else:
faqs = get_all_questions(category)
response = []
for faq in faqs:
response.append(
{
"question": faq.question,
"answer": faq.answer,
}
)
socketio.emit("faq list", response)
def push_new_user_to_db(name, email, picture, room):
"""puts new user in the database"""
global login
all_users = [user.email for user in db.session.query(models.User1).all()]
if email in all_users:
print(email, " is already a user in the database!")
else:
db.session.add(models.User1(name, email, picture, room))
db.session.commit()
login = 1
userLog()
emit_all_users(USERS_UPDATED_CHANNEL)
return name
def get_state_colors():
"""Colors for USA map"""
state_colors = []
state_cases = []
state_active = []
for i in get_covid_stats_for_all_states():
state_colors.append(i.color)
state_cases.append(i.cases)
state_active.append(i.activeCases)
socketio.emit(
"colors", {"colors": state_colors, "cases": state_cases, "active": state_active}
)
def userLog():
"""User Login Check"""
if login == 1:
socketio.emit(NEWUSER, {"login": 1})
return True
@socketio.on("search loc")
def search_loc(data):
"""Search for location covid stats"""
state = data["loc"]
push_stat_data(state)
@socketio.on("connect")
def on_connect():
"""Socket for when user connects"""
articleList()
#test_location()
get_state_colors()
ip = request.environ["HTTP_X_FORWARDED_FOR"]
loc = get_location(ip)
push_stat_data(loc.state)
return True
@socketio.on("search location")
def searching(data):
"""Search location"""
a = data["area"]
areaLoc = search_user(a)
allsites = get_sites(areaLoc[0], areaLoc[1])
title_list = []
address_list = []
lat_list = []
lng_list = []
phone_list = []
web_list = []
miles_list = []
counter = 0
for site in allsites:
if counter != 3:
title_list.append(site.title)
address_list.append(site.entireAddress)
lat_list.append(site.latitude)
lng_list.append(site.longitude)
phone_list.append(site.phone)
web_list.append(site.web)
miles_list.append(site.miles)
counter += 1
else:
break
socketio.emit(
SITE,
{
"user_lat": areaLoc[0],
"user_lng": areaLoc[1],
"title": title_list,
"address": address_list,
"latitude": lat_list,
"longitude": lng_list,
"phone": phone_list,
"web": web_list,
"miles": miles_list,
"key": api_k,
}, room=request.sid
)
return True
'''
def test_location():
"""Get testing locations"""
ip = request.environ["HTTP_X_FORWARDED_FOR"]
loc = get_location(ip)
lat = loc.latitude
lng = loc.longitude
allsites = get_sites(lat, lng)
title_list = []
address_list = []
lat_list = []
lng_list = []
phone_list = []
web_list = []
miles_list = []
counter = 0
for site in allsites:
if counter != 3:
title_list.append(site.title)
address_list.append(site.entireAddress)
lat_list.append(site.latitude)
lng_list.append(site.longitude)
phone_list.append(site.phone)
web_list.append(site.web)
miles_list.append(site.miles)
counter += 1
else:
break
socketio.emit(
SITE,
{
"user_lat": lat,
"user_lng": lng,
"title": title_list,
"address": address_list,
"latitude": lat_list,
"longitude": lng_list,
"phone": phone_list,
"web": web_list,
"miles": miles_list,
"key": api_k,
},
)
return True'''
def articleList():
"""Calls the Article API"""
articles = get_news(
5, since=news.YESTERDAY.strftime("%yyyy-%mm-%dd"), query="covid"
)
title_list = []
desc_list = []
url_list = []
image_list = []
source_list = []
for art in articles:
image_list.append(art.image)
title_list.append(art.title)
source_list.append(art.source)
desc_list.append(art.description)
url_list.append(art.url)
socketio.emit(
ARTICLE,
{
"title": title_list,
"desc": desc_list,
"url": url_list,
"img": image_list,
"sources": source_list,
},
)
return True
@app.route("/")
def index():
"""loads page"""
models.db.create_all()
db.session.commit()
return flask.render_template("index.html")
@app.errorhandler(404)
def page_not_found(e):
"""Handles Page Not Found"""
return flask.render_template("index.html")
if __name__ == "__main__":
socketio.run(
app,
host=os.getenv("IP", "0.0.0.0"),
port=int(os.getenv("PORT", 8080)),
debug=True,
)
|
flexible
|
{
"blob_id": "8d48b5b831edb62b2d9624bc23cae45d390fd224",
"index": 8035,
"step-1": "<mask token>\n\n\ndef emit_all_users(channel):\n \"\"\"emits all users\"\"\"\n all_users = [user.name for user in db.session.query(models.User1).all()]\n socketio.emit(channel, {'allUsers': all_users})\n return channel\n\n\n<mask token>\n\n\n@socketio.on('new google user')\ndef on_new_google_user(data):\n \"\"\"new user when log in\"\"\"\n print('Got an event for new google user input with data:', data)\n push_new_user_to_db(data['name'], data['email'], data['pic'], data['room'])\n emit_all_users(USERS_UPDATED_CHANNEL)\n return USERS_UPDATED_CHANNEL\n\n\n@socketio.on('email results')\ndef on_send_results(data):\n name = 'Madison'\n msg = 'Hello ' + name + \"\"\"! After taking your questionnaire us here at Covid Catcher recommended the following...\n\"\"\"\n msg += data['results']\n print(msg)\n print(requests.post(\n 'https://api.mailgun.net/v3/sandbox65fda9f953cb42baacd1bdd34356b8c4.mailgun.org/messages'\n , auth=('api', os.environ['MAIL_API_KEY']), data={'from':\n 'Excited User <mailgun@sandbox65fda9f953cb42baacd1bdd34356b8c4.mailgun.org>'\n , 'to': ['miatkem@gmail.com'], 'subject':\n 'Covid Catcher Questionnaire Results', 'text': msg}).text)\n\n\n<mask token>\n\n\n@socketio.on('faq questions')\ndef on_faq_questions(category):\n \"\"\"get questions and answers in a category\"\"\"\n if category == '' or category == None:\n faqs = get_all_questions()\n else:\n faqs = get_all_questions(category)\n response = []\n for faq in faqs:\n response.append({'question': faq.question, 'answer': faq.answer})\n socketio.emit('faq list', response)\n\n\ndef push_new_user_to_db(name, email, picture, room):\n \"\"\"puts new user in the database\"\"\"\n global login\n all_users = [user.email for user in db.session.query(models.User1).all()]\n if email in all_users:\n print(email, ' is already a user in the database!')\n else:\n db.session.add(models.User1(name, email, picture, room))\n db.session.commit()\n login = 1\n userLog()\n emit_all_users(USERS_UPDATED_CHANNEL)\n return name\n\n\ndef get_state_colors():\n \"\"\"Colors for USA map\"\"\"\n state_colors = []\n state_cases = []\n state_active = []\n for i in get_covid_stats_for_all_states():\n state_colors.append(i.color)\n state_cases.append(i.cases)\n state_active.append(i.activeCases)\n socketio.emit('colors', {'colors': state_colors, 'cases': state_cases,\n 'active': state_active})\n\n\ndef userLog():\n \"\"\"User Login Check\"\"\"\n if login == 1:\n socketio.emit(NEWUSER, {'login': 1})\n return True\n\n\n@socketio.on('search loc')\ndef search_loc(data):\n \"\"\"Search for location covid stats\"\"\"\n state = data['loc']\n push_stat_data(state)\n\n\n@socketio.on('connect')\ndef on_connect():\n \"\"\"Socket for when user connects\"\"\"\n articleList()\n get_state_colors()\n ip = request.environ['HTTP_X_FORWARDED_FOR']\n loc = get_location(ip)\n push_stat_data(loc.state)\n return True\n\n\n@socketio.on('search location')\ndef searching(data):\n \"\"\"Search location\"\"\"\n a = data['area']\n areaLoc = search_user(a)\n allsites = get_sites(areaLoc[0], areaLoc[1])\n title_list = []\n address_list = []\n lat_list = []\n lng_list = []\n phone_list = []\n web_list = []\n miles_list = []\n counter = 0\n for site in allsites:\n if counter != 3:\n title_list.append(site.title)\n address_list.append(site.entireAddress)\n lat_list.append(site.latitude)\n lng_list.append(site.longitude)\n phone_list.append(site.phone)\n web_list.append(site.web)\n miles_list.append(site.miles)\n counter += 1\n else:\n break\n socketio.emit(SITE, {'user_lat': areaLoc[0], 'user_lng': areaLoc[1],\n 'title': title_list, 'address': address_list, 'latitude': lat_list,\n 'longitude': lng_list, 'phone': phone_list, 'web': web_list,\n 'miles': miles_list, 'key': api_k}, room=request.sid)\n return True\n\n\n<mask token>\n\n\ndef articleList():\n \"\"\"Calls the Article API\"\"\"\n articles = get_news(5, since=news.YESTERDAY.strftime('%yyyy-%mm-%dd'),\n query='covid')\n title_list = []\n desc_list = []\n url_list = []\n image_list = []\n source_list = []\n for art in articles:\n image_list.append(art.image)\n title_list.append(art.title)\n source_list.append(art.source)\n desc_list.append(art.description)\n url_list.append(art.url)\n socketio.emit(ARTICLE, {'title': title_list, 'desc': desc_list, 'url':\n url_list, 'img': image_list, 'sources': source_list})\n return True\n\n\n@app.route('/')\ndef index():\n \"\"\"loads page\"\"\"\n models.db.create_all()\n db.session.commit()\n return flask.render_template('index.html')\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef emit_all_users(channel):\n \"\"\"emits all users\"\"\"\n all_users = [user.name for user in db.session.query(models.User1).all()]\n socketio.emit(channel, {'allUsers': all_users})\n return channel\n\n\ndef push_stat_data(state):\n \"\"\"Calls Covid API\"\"\"\n information = get_covid_stats_by_state(state)\n print(state)\n case = information.cases\n newCases = information.todaysCases\n death = information.deaths\n newDeaths = information.todayDeaths\n rec = information.recovered\n county_list = []\n county_confirmed = []\n county_deaths = []\n county_rec = []\n updated = []\n print('CASES DEATHS AND RECOVERED: ', case, death, rec)\n allcounty = get_covid_stats_by_county(state, '')\n for x in allcounty:\n county_list.append(x.county)\n county_confirmed.append(x.confirmed)\n county_deaths.append(x.deaths)\n county_rec.append(x.recovered)\n updated.append(x.updatedAt)\n socketio.emit(STATISTICS, {'state': state, 'cases': case, 'new_cases':\n newCases, 'deaths': death, 'new_deaths': newDeaths, 'recovered':\n rec, 'countyNames': county_list, 'countyCases': county_confirmed,\n 'countyDeaths': county_deaths, 'countyRecovered': county_rec,\n 'updated': updated}, room=request.sid)\n r = 'stats are pushed'\n return r\n\n\n@socketio.on('new google user')\ndef on_new_google_user(data):\n \"\"\"new user when log in\"\"\"\n print('Got an event for new google user input with data:', data)\n push_new_user_to_db(data['name'], data['email'], data['pic'], data['room'])\n emit_all_users(USERS_UPDATED_CHANNEL)\n return USERS_UPDATED_CHANNEL\n\n\n@socketio.on('email results')\ndef on_send_results(data):\n name = 'Madison'\n msg = 'Hello ' + name + \"\"\"! After taking your questionnaire us here at Covid Catcher recommended the following...\n\"\"\"\n msg += data['results']\n print(msg)\n print(requests.post(\n 'https://api.mailgun.net/v3/sandbox65fda9f953cb42baacd1bdd34356b8c4.mailgun.org/messages'\n , auth=('api', os.environ['MAIL_API_KEY']), data={'from':\n 'Excited User <mailgun@sandbox65fda9f953cb42baacd1bdd34356b8c4.mailgun.org>'\n , 'to': ['miatkem@gmail.com'], 'subject':\n 'Covid Catcher Questionnaire Results', 'text': msg}).text)\n\n\n<mask token>\n\n\n@socketio.on('faq questions')\ndef on_faq_questions(category):\n \"\"\"get questions and answers in a category\"\"\"\n if category == '' or category == None:\n faqs = get_all_questions()\n else:\n faqs = get_all_questions(category)\n response = []\n for faq in faqs:\n response.append({'question': faq.question, 'answer': faq.answer})\n socketio.emit('faq list', response)\n\n\ndef push_new_user_to_db(name, email, picture, room):\n \"\"\"puts new user in the database\"\"\"\n global login\n all_users = [user.email for user in db.session.query(models.User1).all()]\n if email in all_users:\n print(email, ' is already a user in the database!')\n else:\n db.session.add(models.User1(name, email, picture, room))\n db.session.commit()\n login = 1\n userLog()\n emit_all_users(USERS_UPDATED_CHANNEL)\n return name\n\n\ndef get_state_colors():\n \"\"\"Colors for USA map\"\"\"\n state_colors = []\n state_cases = []\n state_active = []\n for i in get_covid_stats_for_all_states():\n state_colors.append(i.color)\n state_cases.append(i.cases)\n state_active.append(i.activeCases)\n socketio.emit('colors', {'colors': state_colors, 'cases': state_cases,\n 'active': state_active})\n\n\ndef userLog():\n \"\"\"User Login Check\"\"\"\n if login == 1:\n socketio.emit(NEWUSER, {'login': 1})\n return True\n\n\n@socketio.on('search loc')\ndef search_loc(data):\n \"\"\"Search for location covid stats\"\"\"\n state = data['loc']\n push_stat_data(state)\n\n\n@socketio.on('connect')\ndef on_connect():\n \"\"\"Socket for when user connects\"\"\"\n articleList()\n get_state_colors()\n ip = request.environ['HTTP_X_FORWARDED_FOR']\n loc = get_location(ip)\n push_stat_data(loc.state)\n return True\n\n\n@socketio.on('search location')\ndef searching(data):\n \"\"\"Search location\"\"\"\n a = data['area']\n areaLoc = search_user(a)\n allsites = get_sites(areaLoc[0], areaLoc[1])\n title_list = []\n address_list = []\n lat_list = []\n lng_list = []\n phone_list = []\n web_list = []\n miles_list = []\n counter = 0\n for site in allsites:\n if counter != 3:\n title_list.append(site.title)\n address_list.append(site.entireAddress)\n lat_list.append(site.latitude)\n lng_list.append(site.longitude)\n phone_list.append(site.phone)\n web_list.append(site.web)\n miles_list.append(site.miles)\n counter += 1\n else:\n break\n socketio.emit(SITE, {'user_lat': areaLoc[0], 'user_lng': areaLoc[1],\n 'title': title_list, 'address': address_list, 'latitude': lat_list,\n 'longitude': lng_list, 'phone': phone_list, 'web': web_list,\n 'miles': miles_list, 'key': api_k}, room=request.sid)\n return True\n\n\n<mask token>\n\n\ndef articleList():\n \"\"\"Calls the Article API\"\"\"\n articles = get_news(5, since=news.YESTERDAY.strftime('%yyyy-%mm-%dd'),\n query='covid')\n title_list = []\n desc_list = []\n url_list = []\n image_list = []\n source_list = []\n for art in articles:\n image_list.append(art.image)\n title_list.append(art.title)\n source_list.append(art.source)\n desc_list.append(art.description)\n url_list.append(art.url)\n socketio.emit(ARTICLE, {'title': title_list, 'desc': desc_list, 'url':\n url_list, 'img': image_list, 'sources': source_list})\n return True\n\n\n@app.route('/')\ndef index():\n \"\"\"loads page\"\"\"\n models.db.create_all()\n db.session.commit()\n return flask.render_template('index.html')\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef emit_all_users(channel):\n \"\"\"emits all users\"\"\"\n all_users = [user.name for user in db.session.query(models.User1).all()]\n socketio.emit(channel, {'allUsers': all_users})\n return channel\n\n\ndef push_stat_data(state):\n \"\"\"Calls Covid API\"\"\"\n information = get_covid_stats_by_state(state)\n print(state)\n case = information.cases\n newCases = information.todaysCases\n death = information.deaths\n newDeaths = information.todayDeaths\n rec = information.recovered\n county_list = []\n county_confirmed = []\n county_deaths = []\n county_rec = []\n updated = []\n print('CASES DEATHS AND RECOVERED: ', case, death, rec)\n allcounty = get_covid_stats_by_county(state, '')\n for x in allcounty:\n county_list.append(x.county)\n county_confirmed.append(x.confirmed)\n county_deaths.append(x.deaths)\n county_rec.append(x.recovered)\n updated.append(x.updatedAt)\n socketio.emit(STATISTICS, {'state': state, 'cases': case, 'new_cases':\n newCases, 'deaths': death, 'new_deaths': newDeaths, 'recovered':\n rec, 'countyNames': county_list, 'countyCases': county_confirmed,\n 'countyDeaths': county_deaths, 'countyRecovered': county_rec,\n 'updated': updated}, room=request.sid)\n r = 'stats are pushed'\n return r\n\n\n@socketio.on('new google user')\ndef on_new_google_user(data):\n \"\"\"new user when log in\"\"\"\n print('Got an event for new google user input with data:', data)\n push_new_user_to_db(data['name'], data['email'], data['pic'], data['room'])\n emit_all_users(USERS_UPDATED_CHANNEL)\n return USERS_UPDATED_CHANNEL\n\n\n@socketio.on('email results')\ndef on_send_results(data):\n name = 'Madison'\n msg = 'Hello ' + name + \"\"\"! After taking your questionnaire us here at Covid Catcher recommended the following...\n\"\"\"\n msg += data['results']\n print(msg)\n print(requests.post(\n 'https://api.mailgun.net/v3/sandbox65fda9f953cb42baacd1bdd34356b8c4.mailgun.org/messages'\n , auth=('api', os.environ['MAIL_API_KEY']), data={'from':\n 'Excited User <mailgun@sandbox65fda9f953cb42baacd1bdd34356b8c4.mailgun.org>'\n , 'to': ['miatkem@gmail.com'], 'subject':\n 'Covid Catcher Questionnaire Results', 'text': msg}).text)\n\n\n<mask token>\n\n\n@socketio.on('faq questions')\ndef on_faq_questions(category):\n \"\"\"get questions and answers in a category\"\"\"\n if category == '' or category == None:\n faqs = get_all_questions()\n else:\n faqs = get_all_questions(category)\n response = []\n for faq in faqs:\n response.append({'question': faq.question, 'answer': faq.answer})\n socketio.emit('faq list', response)\n\n\ndef push_new_user_to_db(name, email, picture, room):\n \"\"\"puts new user in the database\"\"\"\n global login\n all_users = [user.email for user in db.session.query(models.User1).all()]\n if email in all_users:\n print(email, ' is already a user in the database!')\n else:\n db.session.add(models.User1(name, email, picture, room))\n db.session.commit()\n login = 1\n userLog()\n emit_all_users(USERS_UPDATED_CHANNEL)\n return name\n\n\ndef get_state_colors():\n \"\"\"Colors for USA map\"\"\"\n state_colors = []\n state_cases = []\n state_active = []\n for i in get_covid_stats_for_all_states():\n state_colors.append(i.color)\n state_cases.append(i.cases)\n state_active.append(i.activeCases)\n socketio.emit('colors', {'colors': state_colors, 'cases': state_cases,\n 'active': state_active})\n\n\ndef userLog():\n \"\"\"User Login Check\"\"\"\n if login == 1:\n socketio.emit(NEWUSER, {'login': 1})\n return True\n\n\n@socketio.on('search loc')\ndef search_loc(data):\n \"\"\"Search for location covid stats\"\"\"\n state = data['loc']\n push_stat_data(state)\n\n\n@socketio.on('connect')\ndef on_connect():\n \"\"\"Socket for when user connects\"\"\"\n articleList()\n get_state_colors()\n ip = request.environ['HTTP_X_FORWARDED_FOR']\n loc = get_location(ip)\n push_stat_data(loc.state)\n return True\n\n\n@socketio.on('search location')\ndef searching(data):\n \"\"\"Search location\"\"\"\n a = data['area']\n areaLoc = search_user(a)\n allsites = get_sites(areaLoc[0], areaLoc[1])\n title_list = []\n address_list = []\n lat_list = []\n lng_list = []\n phone_list = []\n web_list = []\n miles_list = []\n counter = 0\n for site in allsites:\n if counter != 3:\n title_list.append(site.title)\n address_list.append(site.entireAddress)\n lat_list.append(site.latitude)\n lng_list.append(site.longitude)\n phone_list.append(site.phone)\n web_list.append(site.web)\n miles_list.append(site.miles)\n counter += 1\n else:\n break\n socketio.emit(SITE, {'user_lat': areaLoc[0], 'user_lng': areaLoc[1],\n 'title': title_list, 'address': address_list, 'latitude': lat_list,\n 'longitude': lng_list, 'phone': phone_list, 'web': web_list,\n 'miles': miles_list, 'key': api_k}, room=request.sid)\n return True\n\n\n<mask token>\n\n\ndef articleList():\n \"\"\"Calls the Article API\"\"\"\n articles = get_news(5, since=news.YESTERDAY.strftime('%yyyy-%mm-%dd'),\n query='covid')\n title_list = []\n desc_list = []\n url_list = []\n image_list = []\n source_list = []\n for art in articles:\n image_list.append(art.image)\n title_list.append(art.title)\n source_list.append(art.source)\n desc_list.append(art.description)\n url_list.append(art.url)\n socketio.emit(ARTICLE, {'title': title_list, 'desc': desc_list, 'url':\n url_list, 'img': image_list, 'sources': source_list})\n return True\n\n\n@app.route('/')\ndef index():\n \"\"\"loads page\"\"\"\n models.db.create_all()\n db.session.commit()\n return flask.render_template('index.html')\n\n\n@app.errorhandler(404)\ndef page_not_found(e):\n \"\"\"Handles Page Not Found\"\"\"\n return flask.render_template('index.html')\n\n\n<mask token>\n",
"step-4": "<mask token>\nimport os\nfrom os.path import join, dirname\nimport json\nimport requests\nimport flask\nfrom flask import request\nimport flask_sqlalchemy\nimport flask_socketio\nfrom dotenv import load_dotenv\nfrom covid import get_covid_stats_by_state\nfrom covid import get_covid_stats_by_county\nfrom covid import get_covid_stats_for_all_states\nfrom faq import get_all_questions\nfrom faq import get_all_categories\nfrom faq import FAQ\nimport news\nfrom news import get_news\nimport location\nfrom location import get_location\nimport sites\nfrom sites import get_sites\nfrom sites import search_user\nfrom sites import TestingSites\napp = flask.Flask(__name__)\nsocketio = flask_socketio.SocketIO(app)\nsocketio.init_app(app, cors_allowed_origins='*')\ndotenv_path = join(dirname(__file__), 'sql.env')\nload_dotenv(dotenv_path)\ndotenv_path = join(dirname(__file__), 'api-keys.env')\nload_dotenv(dotenv_path)\ndatabase_uri = os.environ['DATABASE_URL']\napi_k = os.environ['MAP_API_KEY']\napp.config['SQLALCHEMY_DATABASE_URI'] = database_uri\nlogin = 0\ndb = flask_sqlalchemy.SQLAlchemy(app)\ndb.init_app(app)\ndb.app = app\nUSERS_UPDATED_CHANNEL = 'users updated'\nSTATISTICS = 'stats'\nNEWUSER = 'new user'\nFAQS = 'faq lists'\nARTICLE = 'article list'\nSITE = 'site page'\nSEARCH = 'searching'\nimport models\n\n\ndef emit_all_users(channel):\n \"\"\"emits all users\"\"\"\n all_users = [user.name for user in db.session.query(models.User1).all()]\n socketio.emit(channel, {'allUsers': all_users})\n return channel\n\n\ndef push_stat_data(state):\n \"\"\"Calls Covid API\"\"\"\n information = get_covid_stats_by_state(state)\n print(state)\n case = information.cases\n newCases = information.todaysCases\n death = information.deaths\n newDeaths = information.todayDeaths\n rec = information.recovered\n county_list = []\n county_confirmed = []\n county_deaths = []\n county_rec = []\n updated = []\n print('CASES DEATHS AND RECOVERED: ', case, death, rec)\n allcounty = get_covid_stats_by_county(state, '')\n for x in allcounty:\n county_list.append(x.county)\n county_confirmed.append(x.confirmed)\n county_deaths.append(x.deaths)\n county_rec.append(x.recovered)\n updated.append(x.updatedAt)\n socketio.emit(STATISTICS, {'state': state, 'cases': case, 'new_cases':\n newCases, 'deaths': death, 'new_deaths': newDeaths, 'recovered':\n rec, 'countyNames': county_list, 'countyCases': county_confirmed,\n 'countyDeaths': county_deaths, 'countyRecovered': county_rec,\n 'updated': updated}, room=request.sid)\n r = 'stats are pushed'\n return r\n\n\n@socketio.on('new google user')\ndef on_new_google_user(data):\n \"\"\"new user when log in\"\"\"\n print('Got an event for new google user input with data:', data)\n push_new_user_to_db(data['name'], data['email'], data['pic'], data['room'])\n emit_all_users(USERS_UPDATED_CHANNEL)\n return USERS_UPDATED_CHANNEL\n\n\n@socketio.on('email results')\ndef on_send_results(data):\n name = 'Madison'\n msg = 'Hello ' + name + \"\"\"! After taking your questionnaire us here at Covid Catcher recommended the following...\n\"\"\"\n msg += data['results']\n print(msg)\n print(requests.post(\n 'https://api.mailgun.net/v3/sandbox65fda9f953cb42baacd1bdd34356b8c4.mailgun.org/messages'\n , auth=('api', os.environ['MAIL_API_KEY']), data={'from':\n 'Excited User <mailgun@sandbox65fda9f953cb42baacd1bdd34356b8c4.mailgun.org>'\n , 'to': ['miatkem@gmail.com'], 'subject':\n 'Covid Catcher Questionnaire Results', 'text': msg}).text)\n\n\n@socketio.on('faq categories')\ndef on_faq_categories():\n \"\"\"get all categories for faqs\"\"\"\n categories = get_all_categories()\n socketio.emit('faq category list', categories)\n\n\n@socketio.on('faq questions')\ndef on_faq_questions(category):\n \"\"\"get questions and answers in a category\"\"\"\n if category == '' or category == None:\n faqs = get_all_questions()\n else:\n faqs = get_all_questions(category)\n response = []\n for faq in faqs:\n response.append({'question': faq.question, 'answer': faq.answer})\n socketio.emit('faq list', response)\n\n\ndef push_new_user_to_db(name, email, picture, room):\n \"\"\"puts new user in the database\"\"\"\n global login\n all_users = [user.email for user in db.session.query(models.User1).all()]\n if email in all_users:\n print(email, ' is already a user in the database!')\n else:\n db.session.add(models.User1(name, email, picture, room))\n db.session.commit()\n login = 1\n userLog()\n emit_all_users(USERS_UPDATED_CHANNEL)\n return name\n\n\ndef get_state_colors():\n \"\"\"Colors for USA map\"\"\"\n state_colors = []\n state_cases = []\n state_active = []\n for i in get_covid_stats_for_all_states():\n state_colors.append(i.color)\n state_cases.append(i.cases)\n state_active.append(i.activeCases)\n socketio.emit('colors', {'colors': state_colors, 'cases': state_cases,\n 'active': state_active})\n\n\ndef userLog():\n \"\"\"User Login Check\"\"\"\n if login == 1:\n socketio.emit(NEWUSER, {'login': 1})\n return True\n\n\n@socketio.on('search loc')\ndef search_loc(data):\n \"\"\"Search for location covid stats\"\"\"\n state = data['loc']\n push_stat_data(state)\n\n\n@socketio.on('connect')\ndef on_connect():\n \"\"\"Socket for when user connects\"\"\"\n articleList()\n get_state_colors()\n ip = request.environ['HTTP_X_FORWARDED_FOR']\n loc = get_location(ip)\n push_stat_data(loc.state)\n return True\n\n\n@socketio.on('search location')\ndef searching(data):\n \"\"\"Search location\"\"\"\n a = data['area']\n areaLoc = search_user(a)\n allsites = get_sites(areaLoc[0], areaLoc[1])\n title_list = []\n address_list = []\n lat_list = []\n lng_list = []\n phone_list = []\n web_list = []\n miles_list = []\n counter = 0\n for site in allsites:\n if counter != 3:\n title_list.append(site.title)\n address_list.append(site.entireAddress)\n lat_list.append(site.latitude)\n lng_list.append(site.longitude)\n phone_list.append(site.phone)\n web_list.append(site.web)\n miles_list.append(site.miles)\n counter += 1\n else:\n break\n socketio.emit(SITE, {'user_lat': areaLoc[0], 'user_lng': areaLoc[1],\n 'title': title_list, 'address': address_list, 'latitude': lat_list,\n 'longitude': lng_list, 'phone': phone_list, 'web': web_list,\n 'miles': miles_list, 'key': api_k}, room=request.sid)\n return True\n\n\n<mask token>\n\n\ndef articleList():\n \"\"\"Calls the Article API\"\"\"\n articles = get_news(5, since=news.YESTERDAY.strftime('%yyyy-%mm-%dd'),\n query='covid')\n title_list = []\n desc_list = []\n url_list = []\n image_list = []\n source_list = []\n for art in articles:\n image_list.append(art.image)\n title_list.append(art.title)\n source_list.append(art.source)\n desc_list.append(art.description)\n url_list.append(art.url)\n socketio.emit(ARTICLE, {'title': title_list, 'desc': desc_list, 'url':\n url_list, 'img': image_list, 'sources': source_list})\n return True\n\n\n@app.route('/')\ndef index():\n \"\"\"loads page\"\"\"\n models.db.create_all()\n db.session.commit()\n return flask.render_template('index.html')\n\n\n@app.errorhandler(404)\ndef page_not_found(e):\n \"\"\"Handles Page Not Found\"\"\"\n return flask.render_template('index.html')\n\n\nif __name__ == '__main__':\n socketio.run(app, host=os.getenv('IP', '0.0.0.0'), port=int(os.getenv(\n 'PORT', 8080)), debug=True)\n",
"step-5": "# pylint: disable=C0103, C0413, E1101, W0611\n\"\"\"Covid Catcher Backend\"\"\"\nimport os\nfrom os.path import join, dirname\nimport json\nimport requests\nimport flask\nfrom flask import request\nimport flask_sqlalchemy\nimport flask_socketio\nfrom dotenv import load_dotenv\nfrom covid import get_covid_stats_by_state\nfrom covid import get_covid_stats_by_county\nfrom covid import get_covid_stats_for_all_states\nfrom faq import get_all_questions\nfrom faq import get_all_categories\nfrom faq import FAQ\nimport news\nfrom news import get_news\nimport location\nfrom location import get_location\nimport sites\nfrom sites import get_sites\nfrom sites import search_user\nfrom sites import TestingSites\n\napp = flask.Flask(__name__)\nsocketio = flask_socketio.SocketIO(app)\nsocketio.init_app(app, cors_allowed_origins=\"*\")\ndotenv_path = join(dirname(__file__), \"sql.env\")\nload_dotenv(dotenv_path)\ndotenv_path = join(dirname(__file__), \"api-keys.env\")\nload_dotenv(dotenv_path)\ndatabase_uri = os.environ[\"DATABASE_URL\"]\napi_k = os.environ[\"MAP_API_KEY\"]\napp.config[\"SQLALCHEMY_DATABASE_URI\"] = database_uri\nlogin = 0\n\ndb = flask_sqlalchemy.SQLAlchemy(app)\ndb.init_app(app)\ndb.app = app\nUSERS_UPDATED_CHANNEL = \"users updated\"\nSTATISTICS = \"stats\"\nNEWUSER = \"new user\"\nFAQS = \"faq lists\"\nARTICLE = \"article list\"\nSITE = \"site page\"\nSEARCH = \"searching\"\nimport models\n\n\ndef emit_all_users(channel):\n \"\"\"emits all users\"\"\"\n all_users = [user.name for user in db.session.query(models.User1).all()]\n socketio.emit(channel, {\"allUsers\": all_users})\n return channel\n\n\ndef push_stat_data(state):\n \"\"\"Calls Covid API\"\"\"\n information = get_covid_stats_by_state(state)\n print(state)\n case = information.cases\n newCases = information.todaysCases\n death = information.deaths\n newDeaths = information.todayDeaths\n rec = information.recovered\n county_list = []\n county_confirmed = []\n county_deaths = []\n county_rec = []\n updated = []\n\n print(\"CASES DEATHS AND RECOVERED: \", case, death, rec)\n allcounty = get_covid_stats_by_county(state, \"\")\n for x in allcounty:\n county_list.append(x.county)\n county_confirmed.append(x.confirmed)\n county_deaths.append(x.deaths)\n county_rec.append(x.recovered)\n updated.append(x.updatedAt)\n\n socketio.emit(\n STATISTICS,\n {\n \"state\": state,\n \"cases\": case,\n \"new_cases\": newCases,\n \"deaths\": death,\n \"new_deaths\": newDeaths,\n \"recovered\": rec,\n \"countyNames\": county_list,\n \"countyCases\": county_confirmed,\n \"countyDeaths\": county_deaths,\n \"countyRecovered\": county_rec,\n \"updated\": updated,\n },\n room=request.sid,\n )\n r = \"stats are pushed\"\n return r\n\n\n@socketio.on(\"new google user\")\ndef on_new_google_user(data):\n \"\"\"new user when log in\"\"\"\n print(\"Got an event for new google user input with data:\", data)\n push_new_user_to_db(data[\"name\"], data[\"email\"], data[\"pic\"], data[\"room\"])\n emit_all_users(USERS_UPDATED_CHANNEL)\n return USERS_UPDATED_CHANNEL\n\n\n@socketio.on(\"email results\")\ndef on_send_results(data):\n #This name would be the user but mailgun will not allow emails to be sent to\n # unverified users without paying.\n name=\"Madison\"\n msg = \"Hello \"+name+\"! After taking your questionnaire us here at Covid Catcher recommended the following...\\n\"\n msg += data['results']\n print(msg)\n print(requests.post(\n\t \"https://api.mailgun.net/v3/sandbox65fda9f953cb42baacd1bdd34356b8c4.mailgun.org/messages\",\n\t\tauth=(\"api\", os.environ[\"MAIL_API_KEY\"]),\n\t\tdata={\"from\": \"Excited User <mailgun@sandbox65fda9f953cb42baacd1bdd34356b8c4.mailgun.org>\",\n\t\t #This only sends to madison becuase mailgun for free can only send to verified emails\n\t\t #To send to the specific users email simply pull the email from the database at this socket\n\t\t # number and send it there\n\t\t\t\"to\": [\"miatkem@gmail.com\"],\n\t\t\t\"subject\": \"Covid Catcher Questionnaire Results\",\n\t\t\t\"text\":msg}).text)\n\n\n@socketio.on(\"faq categories\")\ndef on_faq_categories():\n \"\"\"get all categories for faqs\"\"\"\n categories = get_all_categories()\n socketio.emit(\"faq category list\", categories)\n\n\n@socketio.on(\"faq questions\")\ndef on_faq_questions(category):\n \"\"\"get questions and answers in a category\"\"\"\n if category == \"\" or category == None:\n faqs = get_all_questions()\n else:\n faqs = get_all_questions(category)\n response = []\n for faq in faqs:\n response.append(\n {\n \"question\": faq.question,\n \"answer\": faq.answer,\n }\n )\n socketio.emit(\"faq list\", response)\n\n\ndef push_new_user_to_db(name, email, picture, room):\n \"\"\"puts new user in the database\"\"\"\n global login\n all_users = [user.email for user in db.session.query(models.User1).all()]\n if email in all_users:\n print(email, \" is already a user in the database!\")\n else:\n db.session.add(models.User1(name, email, picture, room))\n db.session.commit()\n login = 1\n userLog()\n emit_all_users(USERS_UPDATED_CHANNEL)\n return name\n\n\ndef get_state_colors():\n \"\"\"Colors for USA map\"\"\"\n state_colors = []\n state_cases = []\n state_active = []\n for i in get_covid_stats_for_all_states():\n state_colors.append(i.color)\n state_cases.append(i.cases)\n state_active.append(i.activeCases)\n socketio.emit(\n \"colors\", {\"colors\": state_colors, \"cases\": state_cases, \"active\": state_active}\n )\n\n\ndef userLog():\n \"\"\"User Login Check\"\"\"\n if login == 1:\n socketio.emit(NEWUSER, {\"login\": 1})\n return True\n\n\n@socketio.on(\"search loc\")\ndef search_loc(data):\n \"\"\"Search for location covid stats\"\"\"\n state = data[\"loc\"]\n push_stat_data(state)\n\n\n@socketio.on(\"connect\")\ndef on_connect():\n \"\"\"Socket for when user connects\"\"\"\n articleList()\n #test_location()\n get_state_colors()\n ip = request.environ[\"HTTP_X_FORWARDED_FOR\"]\n loc = get_location(ip)\n push_stat_data(loc.state)\n return True\n\n\n@socketio.on(\"search location\")\ndef searching(data):\n \"\"\"Search location\"\"\"\n a = data[\"area\"]\n areaLoc = search_user(a)\n allsites = get_sites(areaLoc[0], areaLoc[1])\n title_list = []\n address_list = []\n lat_list = []\n lng_list = []\n phone_list = []\n web_list = []\n miles_list = []\n counter = 0\n for site in allsites:\n if counter != 3:\n title_list.append(site.title)\n address_list.append(site.entireAddress)\n lat_list.append(site.latitude)\n lng_list.append(site.longitude)\n phone_list.append(site.phone)\n web_list.append(site.web)\n miles_list.append(site.miles)\n counter += 1\n else:\n break\n\n socketio.emit(\n SITE,\n {\n \"user_lat\": areaLoc[0],\n \"user_lng\": areaLoc[1],\n \"title\": title_list,\n \"address\": address_list,\n \"latitude\": lat_list,\n \"longitude\": lng_list,\n \"phone\": phone_list,\n \"web\": web_list,\n \"miles\": miles_list,\n \"key\": api_k,\n }, room=request.sid\n )\n return True\n\n'''\ndef test_location():\n \"\"\"Get testing locations\"\"\"\n ip = request.environ[\"HTTP_X_FORWARDED_FOR\"]\n loc = get_location(ip)\n lat = loc.latitude\n lng = loc.longitude\n allsites = get_sites(lat, lng)\n title_list = []\n address_list = []\n lat_list = []\n lng_list = []\n phone_list = []\n web_list = []\n miles_list = []\n counter = 0\n for site in allsites:\n if counter != 3:\n title_list.append(site.title)\n address_list.append(site.entireAddress)\n lat_list.append(site.latitude)\n lng_list.append(site.longitude)\n phone_list.append(site.phone)\n web_list.append(site.web)\n miles_list.append(site.miles)\n counter += 1\n else:\n break\n\n socketio.emit(\n SITE,\n {\n \"user_lat\": lat,\n \"user_lng\": lng,\n \"title\": title_list,\n \"address\": address_list,\n \"latitude\": lat_list,\n \"longitude\": lng_list,\n \"phone\": phone_list,\n \"web\": web_list,\n \"miles\": miles_list,\n \"key\": api_k,\n },\n )\n return True'''\n\n\ndef articleList():\n \"\"\"Calls the Article API\"\"\"\n articles = get_news(\n 5, since=news.YESTERDAY.strftime(\"%yyyy-%mm-%dd\"), query=\"covid\"\n )\n title_list = []\n desc_list = []\n url_list = []\n image_list = []\n source_list = []\n for art in articles:\n image_list.append(art.image)\n title_list.append(art.title)\n source_list.append(art.source)\n desc_list.append(art.description)\n url_list.append(art.url)\n socketio.emit(\n ARTICLE,\n {\n \"title\": title_list,\n \"desc\": desc_list,\n \"url\": url_list,\n \"img\": image_list,\n \"sources\": source_list,\n },\n )\n return True\n\n\n@app.route(\"/\")\ndef index():\n \"\"\"loads page\"\"\"\n models.db.create_all()\n db.session.commit()\n return flask.render_template(\"index.html\")\n\n\n@app.errorhandler(404)\ndef page_not_found(e):\n \"\"\"Handles Page Not Found\"\"\"\n return flask.render_template(\"index.html\")\n\n\nif __name__ == \"__main__\":\n socketio.run(\n app,\n host=os.getenv(\"IP\", \"0.0.0.0\"),\n port=int(os.getenv(\"PORT\", 8080)),\n debug=True,\n )\n",
"step-ids": [
12,
13,
14,
18,
19
]
}
|
[
12,
13,
14,
18,
19
] |
class BaseService:
def __init__(self, context):
self._context = context
def post(self, path, body):
result = self._context.http.post(path, body)
return result.json()["Data"]
|
normal
|
{
"blob_id": "5000663e3cde9c1a1100c9022707ccae13db0034",
"index": 1426,
"step-1": "<mask token>\n",
"step-2": "class BaseService:\n <mask token>\n <mask token>\n",
"step-3": "class BaseService:\n <mask token>\n\n def post(self, path, body):\n result = self._context.http.post(path, body)\n return result.json()['Data']\n",
"step-4": "class BaseService:\n\n def __init__(self, context):\n self._context = context\n\n def post(self, path, body):\n result = self._context.http.post(path, body)\n return result.json()['Data']\n",
"step-5": "class BaseService:\n\n def __init__(self, context):\n self._context = context\n\n def post(self, path, body):\n result = self._context.http.post(path, body)\n\n return result.json()[\"Data\"]\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
# THIS FILE WAS CREATED IN THIS DIRECTORY EARLIER, NOW MOIVED TO ROOT OF THE REPO
print "Hello buddy"
print "Let's get started"
spy_name = raw_input ("What is your spy name? ")
if len(spy_name) >3:
print "Welcome " + spy_name + ". Glad to have you with us."
spy_salutation= raw_input("What's your title? ")
if spy_salutation == "Mr." or spy_salutation =="Ms.":
spy_name = spy_salutation + " " + spy_name
print "Welcome " + spy_name + ". Let me know about you a bit more."
spy_age = input("Please enter your age")
if 50>spy_age>18:
print "Your age is Correct."
spy_rating = input("Please enter your rating ")
if spy_rating>=5.0:
print "Great spy"
elif 3.5<=spy_rating<5.0:
print "Good spy"
elif 2<=spy_rating<3.5:
print "Not bad."
else :
print "Not good. Need hardwork"
spy_is_active = True
print "Authentication process completed successfully. Welcome " +spy_name+ "age: " + str(spy_age) + " and rating: " + str(spy_rating) + " Glad to have ypou with us."
else:
print "Sorry, you are not eligible to be a spy"
else:
print "Invalid Information."
else:
print "Opps! please enter a valid name."
|
normal
|
{
"blob_id": "79f03af05fb40f5f5247b582eabae2dc125e6b52",
"index": 4522,
"step-1": "# THIS FILE WAS CREATED IN THIS DIRECTORY EARLIER, NOW MOIVED TO ROOT OF THE REPO\r\n\r\n\r\nprint \"Hello buddy\"\r\nprint \"Let's get started\"\r\nspy_name = raw_input (\"What is your spy name? \")\r\nif len(spy_name) >3:\r\n print \"Welcome \" + spy_name + \". Glad to have you with us.\"\r\n spy_salutation= raw_input(\"What's your title? \")\r\n if spy_salutation == \"Mr.\" or spy_salutation ==\"Ms.\":\r\n spy_name = spy_salutation + \" \" + spy_name\r\n print \"Welcome \" + spy_name + \". Let me know about you a bit more.\"\r\n spy_age = input(\"Please enter your age\")\r\n if 50>spy_age>18:\r\n print \"Your age is Correct.\"\r\n spy_rating = input(\"Please enter your rating \")\r\n if spy_rating>=5.0:\r\n print \"Great spy\"\r\n elif 3.5<=spy_rating<5.0:\r\n print \"Good spy\"\r\n elif 2<=spy_rating<3.5:\r\n print \"Not bad.\"\r\n else :\r\n print \"Not good. Need hardwork\"\r\n spy_is_active = True\r\n print \"Authentication process completed successfully. Welcome \" +spy_name+ \"age: \" + str(spy_age) + \" and rating: \" + str(spy_rating) + \" Glad to have ypou with us.\"\r\n\r\n else:\r\n print \"Sorry, you are not eligible to be a spy\"\r\n else:\r\n print \"Invalid Information.\"\r\nelse:\r\n print \"Opps! please enter a valid name.\"\r\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
<|reserved_special_token_0|>
@pytest.fixture
def runner() ->CliRunner:
"""Fixture for invoking command-line interfaces."""
return CliRunner()
def test_main_succeeds(runner: CliRunner) ->None:
"""It exits with a status code of zero."""
with runner.isolated_filesystem():
df = generate_test_data()
df.to_csv('test_file.csv', index=False)
result = runner.invoke(__main__.main, ['test_file.csv'])
assert result.exit_code == 0
<|reserved_special_token_0|>
def test_002_header_style() ->None:
"""Tests that the header style optional argument works."""
df = generate_test_data()
skim(df, header_style='italic green')
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
@pytest.fixture
def runner() ->CliRunner:
"""Fixture for invoking command-line interfaces."""
return CliRunner()
def test_main_succeeds(runner: CliRunner) ->None:
"""It exits with a status code of zero."""
with runner.isolated_filesystem():
df = generate_test_data()
df.to_csv('test_file.csv', index=False)
result = runner.invoke(__main__.main, ['test_file.csv'])
assert result.exit_code == 0
def test_000_basic_functionality() ->None:
"""Tests that a skim of the test data works."""
df = generate_test_data()
skim(df)
def test_001_colour_kwargs() ->None:
"""Tests that colour keyword arguments work."""
df = generate_test_data()
skim(df, datetime='chartreuse1')
def test_002_header_style() ->None:
"""Tests that the header style optional argument works."""
df = generate_test_data()
skim(df, header_style='italic green')
<|reserved_special_token_0|>
def test_004_when_df_is_named() ->None:
"""Tests what happens when df has a name."""
df = generate_test_data()
df.name = 'Named dataframe'
skim(df)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
@pytest.fixture
def runner() ->CliRunner:
"""Fixture for invoking command-line interfaces."""
return CliRunner()
def test_main_succeeds(runner: CliRunner) ->None:
"""It exits with a status code of zero."""
with runner.isolated_filesystem():
df = generate_test_data()
df.to_csv('test_file.csv', index=False)
result = runner.invoke(__main__.main, ['test_file.csv'])
assert result.exit_code == 0
def test_000_basic_functionality() ->None:
"""Tests that a skim of the test data works."""
df = generate_test_data()
skim(df)
def test_001_colour_kwargs() ->None:
"""Tests that colour keyword arguments work."""
df = generate_test_data()
skim(df, datetime='chartreuse1')
def test_002_header_style() ->None:
"""Tests that the header style optional argument works."""
df = generate_test_data()
skim(df, header_style='italic green')
def test_003_not_enough_datetimes() ->None:
"""Tests logic branch with too few datetimes for freq inference."""
df = generate_test_data()
df = df.head(2)
skim(df)
def test_004_when_df_is_named() ->None:
"""Tests what happens when df has a name."""
df = generate_test_data()
df.name = 'Named dataframe'
skim(df)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
import pytest
from click.testing import CliRunner
from skimpy import __main__
from skimpy import generate_test_data
from skimpy import skim
@pytest.fixture
def runner() ->CliRunner:
"""Fixture for invoking command-line interfaces."""
return CliRunner()
def test_main_succeeds(runner: CliRunner) ->None:
"""It exits with a status code of zero."""
with runner.isolated_filesystem():
df = generate_test_data()
df.to_csv('test_file.csv', index=False)
result = runner.invoke(__main__.main, ['test_file.csv'])
assert result.exit_code == 0
def test_000_basic_functionality() ->None:
"""Tests that a skim of the test data works."""
df = generate_test_data()
skim(df)
def test_001_colour_kwargs() ->None:
"""Tests that colour keyword arguments work."""
df = generate_test_data()
skim(df, datetime='chartreuse1')
def test_002_header_style() ->None:
"""Tests that the header style optional argument works."""
df = generate_test_data()
skim(df, header_style='italic green')
def test_003_not_enough_datetimes() ->None:
"""Tests logic branch with too few datetimes for freq inference."""
df = generate_test_data()
df = df.head(2)
skim(df)
def test_004_when_df_is_named() ->None:
"""Tests what happens when df has a name."""
df = generate_test_data()
df.name = 'Named dataframe'
skim(df)
<|reserved_special_token_1|>
"""Test cases for the __main__ module."""
import pytest
from click.testing import CliRunner
from skimpy import __main__
from skimpy import generate_test_data
from skimpy import skim
@pytest.fixture
def runner() -> CliRunner:
"""Fixture for invoking command-line interfaces."""
return CliRunner()
def test_main_succeeds(runner: CliRunner) -> None:
"""It exits with a status code of zero."""
with runner.isolated_filesystem():
df = generate_test_data()
df.to_csv("test_file.csv", index=False)
result = runner.invoke(__main__.main, ["test_file.csv"])
assert result.exit_code == 0
def test_000_basic_functionality() -> None:
"""Tests that a skim of the test data works."""
df = generate_test_data()
skim(df)
def test_001_colour_kwargs() -> None:
"""Tests that colour keyword arguments work."""
df = generate_test_data()
skim(df, datetime="chartreuse1")
def test_002_header_style() -> None:
"""Tests that the header style optional argument works."""
df = generate_test_data()
skim(df, header_style="italic green")
def test_003_not_enough_datetimes() -> None:
"""Tests logic branch with too few datetimes for freq inference."""
df = generate_test_data()
df = df.head(2)
skim(df)
def test_004_when_df_is_named() -> None:
"""Tests what happens when df has a name."""
df = generate_test_data()
df.name = "Named dataframe"
skim(df)
|
flexible
|
{
"blob_id": "97a51d959ad642467c508cedc8786f636e4050bb",
"index": 1333,
"step-1": "<mask token>\n\n\n@pytest.fixture\ndef runner() ->CliRunner:\n \"\"\"Fixture for invoking command-line interfaces.\"\"\"\n return CliRunner()\n\n\ndef test_main_succeeds(runner: CliRunner) ->None:\n \"\"\"It exits with a status code of zero.\"\"\"\n with runner.isolated_filesystem():\n df = generate_test_data()\n df.to_csv('test_file.csv', index=False)\n result = runner.invoke(__main__.main, ['test_file.csv'])\n assert result.exit_code == 0\n\n\n<mask token>\n\n\ndef test_002_header_style() ->None:\n \"\"\"Tests that the header style optional argument works.\"\"\"\n df = generate_test_data()\n skim(df, header_style='italic green')\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\n@pytest.fixture\ndef runner() ->CliRunner:\n \"\"\"Fixture for invoking command-line interfaces.\"\"\"\n return CliRunner()\n\n\ndef test_main_succeeds(runner: CliRunner) ->None:\n \"\"\"It exits with a status code of zero.\"\"\"\n with runner.isolated_filesystem():\n df = generate_test_data()\n df.to_csv('test_file.csv', index=False)\n result = runner.invoke(__main__.main, ['test_file.csv'])\n assert result.exit_code == 0\n\n\ndef test_000_basic_functionality() ->None:\n \"\"\"Tests that a skim of the test data works.\"\"\"\n df = generate_test_data()\n skim(df)\n\n\ndef test_001_colour_kwargs() ->None:\n \"\"\"Tests that colour keyword arguments work.\"\"\"\n df = generate_test_data()\n skim(df, datetime='chartreuse1')\n\n\ndef test_002_header_style() ->None:\n \"\"\"Tests that the header style optional argument works.\"\"\"\n df = generate_test_data()\n skim(df, header_style='italic green')\n\n\n<mask token>\n\n\ndef test_004_when_df_is_named() ->None:\n \"\"\"Tests what happens when df has a name.\"\"\"\n df = generate_test_data()\n df.name = 'Named dataframe'\n skim(df)\n",
"step-3": "<mask token>\n\n\n@pytest.fixture\ndef runner() ->CliRunner:\n \"\"\"Fixture for invoking command-line interfaces.\"\"\"\n return CliRunner()\n\n\ndef test_main_succeeds(runner: CliRunner) ->None:\n \"\"\"It exits with a status code of zero.\"\"\"\n with runner.isolated_filesystem():\n df = generate_test_data()\n df.to_csv('test_file.csv', index=False)\n result = runner.invoke(__main__.main, ['test_file.csv'])\n assert result.exit_code == 0\n\n\ndef test_000_basic_functionality() ->None:\n \"\"\"Tests that a skim of the test data works.\"\"\"\n df = generate_test_data()\n skim(df)\n\n\ndef test_001_colour_kwargs() ->None:\n \"\"\"Tests that colour keyword arguments work.\"\"\"\n df = generate_test_data()\n skim(df, datetime='chartreuse1')\n\n\ndef test_002_header_style() ->None:\n \"\"\"Tests that the header style optional argument works.\"\"\"\n df = generate_test_data()\n skim(df, header_style='italic green')\n\n\ndef test_003_not_enough_datetimes() ->None:\n \"\"\"Tests logic branch with too few datetimes for freq inference.\"\"\"\n df = generate_test_data()\n df = df.head(2)\n skim(df)\n\n\ndef test_004_when_df_is_named() ->None:\n \"\"\"Tests what happens when df has a name.\"\"\"\n df = generate_test_data()\n df.name = 'Named dataframe'\n skim(df)\n",
"step-4": "<mask token>\nimport pytest\nfrom click.testing import CliRunner\nfrom skimpy import __main__\nfrom skimpy import generate_test_data\nfrom skimpy import skim\n\n\n@pytest.fixture\ndef runner() ->CliRunner:\n \"\"\"Fixture for invoking command-line interfaces.\"\"\"\n return CliRunner()\n\n\ndef test_main_succeeds(runner: CliRunner) ->None:\n \"\"\"It exits with a status code of zero.\"\"\"\n with runner.isolated_filesystem():\n df = generate_test_data()\n df.to_csv('test_file.csv', index=False)\n result = runner.invoke(__main__.main, ['test_file.csv'])\n assert result.exit_code == 0\n\n\ndef test_000_basic_functionality() ->None:\n \"\"\"Tests that a skim of the test data works.\"\"\"\n df = generate_test_data()\n skim(df)\n\n\ndef test_001_colour_kwargs() ->None:\n \"\"\"Tests that colour keyword arguments work.\"\"\"\n df = generate_test_data()\n skim(df, datetime='chartreuse1')\n\n\ndef test_002_header_style() ->None:\n \"\"\"Tests that the header style optional argument works.\"\"\"\n df = generate_test_data()\n skim(df, header_style='italic green')\n\n\ndef test_003_not_enough_datetimes() ->None:\n \"\"\"Tests logic branch with too few datetimes for freq inference.\"\"\"\n df = generate_test_data()\n df = df.head(2)\n skim(df)\n\n\ndef test_004_when_df_is_named() ->None:\n \"\"\"Tests what happens when df has a name.\"\"\"\n df = generate_test_data()\n df.name = 'Named dataframe'\n skim(df)\n",
"step-5": "\"\"\"Test cases for the __main__ module.\"\"\"\nimport pytest\nfrom click.testing import CliRunner\n\nfrom skimpy import __main__\nfrom skimpy import generate_test_data\nfrom skimpy import skim\n\n\n@pytest.fixture\ndef runner() -> CliRunner:\n \"\"\"Fixture for invoking command-line interfaces.\"\"\"\n return CliRunner()\n\n\ndef test_main_succeeds(runner: CliRunner) -> None:\n \"\"\"It exits with a status code of zero.\"\"\"\n with runner.isolated_filesystem():\n df = generate_test_data()\n df.to_csv(\"test_file.csv\", index=False)\n result = runner.invoke(__main__.main, [\"test_file.csv\"])\n assert result.exit_code == 0\n\n\ndef test_000_basic_functionality() -> None:\n \"\"\"Tests that a skim of the test data works.\"\"\"\n df = generate_test_data()\n skim(df)\n\n\ndef test_001_colour_kwargs() -> None:\n \"\"\"Tests that colour keyword arguments work.\"\"\"\n df = generate_test_data()\n skim(df, datetime=\"chartreuse1\")\n\n\ndef test_002_header_style() -> None:\n \"\"\"Tests that the header style optional argument works.\"\"\"\n df = generate_test_data()\n skim(df, header_style=\"italic green\")\n\n\ndef test_003_not_enough_datetimes() -> None:\n \"\"\"Tests logic branch with too few datetimes for freq inference.\"\"\"\n df = generate_test_data()\n df = df.head(2)\n skim(df)\n\n\ndef test_004_when_df_is_named() -> None:\n \"\"\"Tests what happens when df has a name.\"\"\"\n df = generate_test_data()\n df.name = \"Named dataframe\"\n skim(df)\n",
"step-ids": [
3,
6,
7,
8,
9
]
}
|
[
3,
6,
7,
8,
9
] |
import argparse
import cv2
import numpy as np
refPt = []
cropping = False
def click_and_crop(event, x, y, flags, param):
global refPt, cropping
if event == cv2.EVENT_LBUTTONDOWN:
refPt = [(x, y)]
cropping = True
elif event == cv2.EVENT_LBUTTONUP:
refPt.append((x, y))
cropping = False
cv2.rectangle(image, refPt[0], refPt[1], (0, 255, 0), 2)
cv2.imshow("image", image)
ap = argparse.ArgumentParser()
ap.add_argument("-i", "--image", required=True, help="Path to the image")
args = vars(ap.parse_args())
image = cv2.imread(args["image"])
clone = image.copy()
cv2.namedWindow("image")
cv2.setMouseCallback("image", click_and_crop)
while True:
cv2.imshow("image", image)
key = cv2.waitKey(1) & 0xFF
if key == ord("r"):
image = clone.copy()
elif key == ord("c"):
break
if len(refPt) == 2:
roi = clone[refPt[0][1]:refPt[1][1], refPt[0][0]:refPt[1][0]]
cv2.imshow("ROI", roi)
count=0
sum=np.array([0,0,0])
for i in range (0,np.size(roi,0)):
for j in range(0,np.size(roi,1)):
count+=1
sum+=roi[i,j]
print "Average bgr: ",sum/count
cv2.waitKey(0)
cv2.destroyAllWindows()
|
normal
|
{
"blob_id": "986df5a41bc87ecb390dfbd1db9e1f5cd6c5b8fb",
"index": 9702,
"step-1": "\nimport argparse\nimport cv2\nimport numpy as np\n \n\nrefPt = []\ncropping = False\n \ndef click_and_crop(event, x, y, flags, param):\n\tglobal refPt, cropping\n \n\tif event == cv2.EVENT_LBUTTONDOWN:\n\t\trefPt = [(x, y)]\n\t\tcropping = True\n \n\telif event == cv2.EVENT_LBUTTONUP:\n\t\trefPt.append((x, y))\n\t\tcropping = False\n \n\t\n\t\tcv2.rectangle(image, refPt[0], refPt[1], (0, 255, 0), 2)\n\t\tcv2.imshow(\"image\", image)\n\n\nap = argparse.ArgumentParser()\nap.add_argument(\"-i\", \"--image\", required=True, help=\"Path to the image\")\nargs = vars(ap.parse_args())\n \nimage = cv2.imread(args[\"image\"])\nclone = image.copy()\ncv2.namedWindow(\"image\")\ncv2.setMouseCallback(\"image\", click_and_crop)\n \n\nwhile True:\n\tcv2.imshow(\"image\", image)\n\tkey = cv2.waitKey(1) & 0xFF\n \n\n\tif key == ord(\"r\"):\n\t\timage = clone.copy()\n \n\telif key == ord(\"c\"):\n\t\tbreak\n \n\nif len(refPt) == 2:\n\troi = clone[refPt[0][1]:refPt[1][1], refPt[0][0]:refPt[1][0]]\n\tcv2.imshow(\"ROI\", roi)\n\tcount=0\n\tsum=np.array([0,0,0])\n\tfor i in range (0,np.size(roi,0)):\n\t\tfor j in range(0,np.size(roi,1)):\n\t\t\tcount+=1\n\t\t\tsum+=roi[i,j]\n\tprint \"Average bgr: \",sum/count\n\tcv2.waitKey(0)\n \n\ncv2.destroyAllWindows()",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
import os
import closet
import unittest
import tempfile
def in_response(response, value):
return value.encode() in response.data
def is_404(response):
response.status_code == 404
class ClosetTestBase(unittest.TestCase):
def setUp(self):
"""Set up test environment befor each test"""
self.db_fd, closet.app.config['DATABASE'] = tempfile.mkstemp()
closet.app.config['TESTING'] = True
self.app = closet.app.test_client()
closet.init_db()
def tearDown(self):
"""Tear down test environment after each test"""
os.close(self.db_fd)
os.unlink(closet.app.config['DATABASE'])
def login(self, username, password):
"""Login to test website as specified user with the specified
password
"""
return self.app.post('/login', data=dict(
username=username,
password=password
), follow_redirects=True)
def logout(self):
"""Logout of test website"""
return self.app.get('/logout', follow_redirects=True)
def authenticate(self):
"""Login to test website as the standard test user"""
self.login(closet.app.config['USERNAME'],
closet.app.config['PASSWORD'])
class ClosetTestCase(ClosetTestBase):
# Generic Tests
def test_empty_db(self):
"""Start with a blank database."""
rv = self.app.get('/')
assert b'Your closet is empty.' in rv.data
def test_login_logout(self):
"""Make sure login and logout works"""
rv = self.login(closet.app.config['USERNAME'],
closet.app.config['PASSWORD'])
assert b'You were logged in' in rv.data
rv = self.logout()
assert b'You were logged out' in rv.data
rv = self.login(closet.app.config['USERNAME'] + 'x',
closet.app.config['PASSWORD'])
assert b'Invalid username' in rv.data
rv = self.login(closet.app.config['USERNAME'],
closet.app.config['PASSWORD'] + 'x')
assert b'Invalid password' in rv.data
class ModelBase(unittest.TestCase):
# Model based view test helpers
def __init__(self, *args, **kwargs):
super(ModelBase, self).__init__(*args, **kwargs)
self.base_url = '/'
self.add_url = 'add'
self.edit_url = 'edit'
self.delete_url = 'delete'
self.name = ''
self.nice_name = ''
self.name_field = 'name'
self.id_field = 'slug'
self.fields = {}
def get_url(self, *args):
"""Create a URL from a tuple of strings based on the base url"""
try:
url = '/'.join((self.base_url, ) + args)
except TypeError:
url = '/'.join((self.base_url, ) + args[0])
return url.rstrip('/')
def get(self, url):
"""Process a GET request to the app"""
return self.app.get(get_url(url), follow_redirects=True)
def post(self, url, data):
"""Process a POST request to the app"""
return self.app.post(get_url(url), data=data, follow_redirects=True)
def verify_object(self, data):
"""Verify the model object data"""
rv = self.get(data[self.id_field])
result = not is_404(rv)
if result:
for key, value in data:
if not in_response(rv, value):
return False
return result
def get_add_form(self):
"""Test that the "add" form is accessible and contains all the
fields
"""
rv = self.get(self.add_url)
assert not is_404(rv)
assert in_response(rv, 'Add {}'.format(self.nice_name))
for field, name in self.fields:
assert in_response(rv, name)
return rv
def get_edit_form(self, data):
"""Test that the edit form is accessible and contains all the
fields
"""
self.add_success(data)
rv = self.get((data[self.id_field], self.edit_url))
assert not is_404(rv)
assert in_response(rv, 'Edit {}'.format(data[self.name_field]))
for field, name in self.fields:
assert in_response(rv, name)
return rv
def get_delete_confirmation_form(self, data):
"""Test that the delete confirmation form is accessible"""
self.add_success(data)
rv = self.get((data[self.id_field], self.delete_url))
assert not is_404(rv)
assert in_response(rv, 'Delete {}'.format(data[self.name_field]))
return rv
def add_success(self, data):
"""Test that adding a model with the given data succeeds"""
rv = self.post(self.add_url, data)
assert not in_response(rv, 'Add {}'.format(self.nice_name))
assert self.verify_object(data)
return rv
def edit_success(self, id_, data):
"""Test that updating a model with the given data succeeds"""
rv = self.post((id_, self.edit_url), data)
assert not in_response(rv, 'Edit {}'.format(data[self.name_field]))
assert self.verify_object(data)
return rv
def update_success(self, data, new_data):
"""Test that updating a model with the given data succeeds"""
self.add_success(data)
return self.edit_success(data[self.id_field], new_data)
def delete_success(self, id_):
"""Test that deleting the specified model succeeds"""
rv = self.post((id_, self.delete_url), dict(post='yes'))
assert not self.verify_object({self.id_field: id_})
return rv
def add_fail(self, data, message):
"""Test that adding a model with the given data fails"""
rv = self.post(self.add_url, data)
assert in_response(rv, 'Add {}'.format(self.nice_name))
assert in_response(rv, message)
return rv
def edit_fail(self, id_, data, message):
"""Test that updating a model with the given data fails"""
rv = self.post((id_, self.edit_url), data)
assert in_response(rv, 'Edit {}'.format(data[self.name_field]))
assert in_response(rv, message)
return rv
def update_fail(self, data, new_data, message):
"""Test that updating a model with the given data fails"""
self.add_success(data)
return self.edit_fail(data[self.id_field], new_data, message)
def delete_fail(self, id_, message):
"""Test that deleting the specified model fails"""
rv = self.post((id_, self.delete_url), dict(post='yes'))
assert in_response(rv, message)
assert self.verify_object({self.id_field: id_})
return rv
def bad_data_fail(self, good_data, bad_data, message):
"""Test that adding and updating a model with the given data
fails
"""
self.add_fail(bad_data, message)
self.update_fail(good_data, bad_data, message)
def add_required_field_fail(self, field, data):
"""Test that adding a model with a blank or missing required
field fails
"""
message = '{} is required'.format(self.fields[field])
data = data.copy()
data[field] = ''
self.add_fail(data, message)
assert not self.verify_object(data)
del data[field]
self.add_fail(data, message)
assert not self.verify_object(data)
def update_required_field_fail(self, field, data):
"""Test that updating a model with a blank or missing required
field fails
"""
message = '{} is required'.format(self.fields[field])
data = data.copy()
id_ = data[self.id_field]
self.add_success(data)
data[field] = ''
self.edit_fail(id_, data, message)
assert not self.verify_object(data)
del data[field]
self.edit_fail(id_, data, message)
assert not self.verify_object(data)
# Delete base model?
def required_field_fail(self, field, data):
"""Test that adding and updating a model with a blank or missing
required field fails
"""
self.add_required_field_fail(field, data)
self.update_required_field_fail(field, data)
def add_existing_key_fail(self, data):
"""Test that adding a model with an existing key fails"""
message = 'exists'
rv = self.add_success(data)
assert not in_response(rv, message)
return self.add_fail(data, message)
def update_existing_key_fail(self, data, new_data):
"""Test that adding a model with an existing key fails"""
message = 'exists'
rv = self.add_success(data)
assert not in_response(rv, message)
rv = self.add_success(new_data)
assert not in_response(rv, message)
rv = self.update_fail(data, message)
assert self.verify_object(new_data)
return rv
def existing_key_fail(self, data, new_data):
"""Test that adding and updating a model with an existing key
fails
"""
message = 'exists'
rv = self.add_success(data)
assert not in_response(rv, message)
self.add_fail(data, message)
rv = self.add_success(new_data)
assert not in_response(rv, message)
self.update_fail(data, message)
assert self.verify_object(new_data)
def data_sorted(self, before_data, after_data, url):
"""Test that the models will be sorted in the correct order"""
self.add_success(after_data)
self.add_success(before_data)
rv = self.get(url)
after_index = rv.data.index(after_data[self.name_field].encode())
before_index = rv.data.index(before_data[self.name_field].encode())
assert after_index > before_index
def delete_does_not_exist_fail(self, id_):
"""Test that deleting a model that does not exist fails"""
assert is_404(self.get((id_, self.delete_url)))
self.delete_fail(id_, 'does not exist')
class CategoryTestCase(ClosetTestBase, ModelBase):
def __init__(self, *args, **kwargs):
super(ModelBase, self).__init__(*args, **kwargs)
self.base_url = '/categories'
self.name = 'category'
self.nice_name = 'Category'
self.fields = {
'name': 'Name',
'parent': 'Parent'}
self.test_data = {
'pants': {
'name': 'Pants',
'slug': 'pants'},
'shirts': {
'name': 'Shirts',
'slug': 'shirts'},
'jeans': {
'name': 'Jeans',
'slug': 'jeans',
'parent': 'pants'},
't-shirts': {
'name': 'T-shirts',
'slug': 't-shirts',
'parent': 'shirts'},
'hats': {
'name': 'Hats',
'slug': 'hats',
'parent': 'spam'},
'polo-shirts': {
'name': 'Polo Shirts',
'slug': 'polo-shirts'},
'symbols': {
'name': ':)',
'slug': ''},
'keyword': {
'name': 'Add',
'slug': 'add-1'}}
def setUp(self):
super(CategoryTestCase, self).setUp()
self.authenticate()
def test_get_category_forms(self):
"""Test that the category forms are accessible"""
self.get_add_form()
self.get_edit_form(self.test_data['pants'])
self.get_delete_confirmation_form(self.test_data['shirts'])
def test_add_category(self):
"""Test that adding a category works"""
self.add_success(self.test_data['pants'])
def test_update_category(self):
"""Test that updating a category works"""
self.update_success(self.test_data['pants'], self.test_data['shirts'])
def test_delete_category(self):
"""Test that deleting a category works"""
self.add_success(self.test_data['pants'])
self.delete_success('pants')
def test_add_child_category(self):
"""Test that adding a child category works"""
self.add_success(self.test_data['pants'])
rv = self.get('pants')
assert in_response(rv, 'This category is empty.')
self.add_success(self.test_data['jeans'])
rv = self.get('pants')
assert not in_response(rv, 'This category is empty.')
assert in_response(rv, 'Jeans')
def test_update_child_category(self):
"""Test that updating child categories works"""
self.add_success(self.test_data['pants'])
self.add_success(self.test_data['shirts'])
self.add_success(self.test_data['jeans'])
rv = self.get('pants')
assert not in_response(rv, 'This category is empty.')
assert in_response(rv, 'Jeans')
self.edit_success('jeans', self.test_data['t-shirts'])
rv = self.get('pants')
assert in_response(rv, 'This category is empty.')
assert not in_response(rv, 'Jeans')
assert not in_response(rv, 'T-Shirts')
rv = self.get('shirts')
assert not in_response(rv, 'This category is empty.')
assert in_response(rv, 'T-Shirts')
assert not in_response(rv, 'Jeans')
def test_name_required(self):
"""Test that adding/updating a category without a name fails"""
self.required_field_fail('name', self.test_data['pants'])
def test_parent_does_not_exist(self):
"""Test that adding/updating a category with a non-existent
parent fails
"""
self.bad_data_fail(self.test_data['pants'],
self.test_data['hats'], 'Parent does not exist')
def test_category_already_exists(self):
self.existing_key_fail(
self.test_data['pants'],
self.test_data['shirts'])
def test_categories_are_sorted(self):
"""Test that categories are sorted alphabetically by name"""
self.data_sorted(self.test_data['shirts'], self.test_data['pants'])
def test_delete_category_does_not_exist(self):
"""Test that deleting a category that doesn't exist fails"""
self.delete_does_not_exist_fail('hats')
def test_add_category_slug_special(self):
"""Test that adding a category with an incorrect name fails"""
self.add_success(self.test_data['polo-shirts'])
assert self.verify_object(dict(name='Polo Shirts', slug='polo-shirts'))
self.add_fail(self.test_data['symbols'], '')
self.add_success('Add')
def test_update_category_slug_special(self):
"""Test that updating a category with an incorrect slug fails"""
rv = self.app.post(self.get_category_url('add'), data=dict(
name='Pants', slug='pants'
), follow_redirects=True)
rv = self.app.post(self.get_category_url('pants', 'edit'), data=dict(
name='Polo Shirts', slug='polo shirts'
), follow_redirects=True)
assert b'Edit Pants' in rv.data
assert b'Slug is formatted incorrectly' in rv.data
rv = self.app.post(self.get_category_url('pants', 'edit'), data=dict(
name=':)', slug=':)'
), follow_redirects=True)
assert b'Edit Pants' in rv.data
assert b'Slug is formatted incorrectly' in rv.data
rv = self.app.post(self.get_category_url('pants', 'edit'), data=dict(
name='Add', slug='add'
), follow_redirects=True)
assert b'Edit Pants' in rv.data
assert b'Slug "add" is not allowed' in rv.data
if __name__ == '__main__':
unittest.main()
|
normal
|
{
"blob_id": "a5856e12c281ed6a252f499a380f9c51082ea740",
"index": 3711,
"step-1": "<mask token>\n\n\nclass ModelBase(unittest.TestCase):\n <mask token>\n <mask token>\n\n def get(self, url):\n \"\"\"Process a GET request to the app\"\"\"\n return self.app.get(get_url(url), follow_redirects=True)\n <mask token>\n\n def verify_object(self, data):\n \"\"\"Verify the model object data\"\"\"\n rv = self.get(data[self.id_field])\n result = not is_404(rv)\n if result:\n for key, value in data:\n if not in_response(rv, value):\n return False\n return result\n\n def get_add_form(self):\n \"\"\"Test that the \"add\" form is accessible and contains all the\n fields\n \"\"\"\n rv = self.get(self.add_url)\n assert not is_404(rv)\n assert in_response(rv, 'Add {}'.format(self.nice_name))\n for field, name in self.fields:\n assert in_response(rv, name)\n return rv\n <mask token>\n\n def get_delete_confirmation_form(self, data):\n \"\"\"Test that the delete confirmation form is accessible\"\"\"\n self.add_success(data)\n rv = self.get((data[self.id_field], self.delete_url))\n assert not is_404(rv)\n assert in_response(rv, 'Delete {}'.format(data[self.name_field]))\n return rv\n\n def add_success(self, data):\n \"\"\"Test that adding a model with the given data succeeds\"\"\"\n rv = self.post(self.add_url, data)\n assert not in_response(rv, 'Add {}'.format(self.nice_name))\n assert self.verify_object(data)\n return rv\n\n def edit_success(self, id_, data):\n \"\"\"Test that updating a model with the given data succeeds\"\"\"\n rv = self.post((id_, self.edit_url), data)\n assert not in_response(rv, 'Edit {}'.format(data[self.name_field]))\n assert self.verify_object(data)\n return rv\n <mask token>\n\n def delete_success(self, id_):\n \"\"\"Test that deleting the specified model succeeds\"\"\"\n rv = self.post((id_, self.delete_url), dict(post='yes'))\n assert not self.verify_object({self.id_field: id_})\n return rv\n\n def add_fail(self, data, message):\n \"\"\"Test that adding a model with the given data fails\"\"\"\n rv = self.post(self.add_url, data)\n assert in_response(rv, 'Add {}'.format(self.nice_name))\n assert in_response(rv, message)\n return rv\n <mask token>\n <mask token>\n\n def delete_fail(self, id_, message):\n \"\"\"Test that deleting the specified model fails\"\"\"\n rv = self.post((id_, self.delete_url), dict(post='yes'))\n assert in_response(rv, message)\n assert self.verify_object({self.id_field: id_})\n return rv\n\n def bad_data_fail(self, good_data, bad_data, message):\n \"\"\"Test that adding and updating a model with the given data\n fails\n \"\"\"\n self.add_fail(bad_data, message)\n self.update_fail(good_data, bad_data, message)\n\n def add_required_field_fail(self, field, data):\n \"\"\"Test that adding a model with a blank or missing required\n field fails\n \"\"\"\n message = '{} is required'.format(self.fields[field])\n data = data.copy()\n data[field] = ''\n self.add_fail(data, message)\n assert not self.verify_object(data)\n del data[field]\n self.add_fail(data, message)\n assert not self.verify_object(data)\n <mask token>\n\n def required_field_fail(self, field, data):\n \"\"\"Test that adding and updating a model with a blank or missing\n required field fails\n \"\"\"\n self.add_required_field_fail(field, data)\n self.update_required_field_fail(field, data)\n\n def add_existing_key_fail(self, data):\n \"\"\"Test that adding a model with an existing key fails\"\"\"\n message = 'exists'\n rv = self.add_success(data)\n assert not in_response(rv, message)\n return self.add_fail(data, message)\n <mask token>\n\n def existing_key_fail(self, data, new_data):\n \"\"\"Test that adding and updating a model with an existing key\n fails\n \"\"\"\n message = 'exists'\n rv = self.add_success(data)\n assert not in_response(rv, message)\n self.add_fail(data, message)\n rv = self.add_success(new_data)\n assert not in_response(rv, message)\n self.update_fail(data, message)\n assert self.verify_object(new_data)\n <mask token>\n <mask token>\n\n\nclass CategoryTestCase(ClosetTestBase, ModelBase):\n\n def __init__(self, *args, **kwargs):\n super(ModelBase, self).__init__(*args, **kwargs)\n self.base_url = '/categories'\n self.name = 'category'\n self.nice_name = 'Category'\n self.fields = {'name': 'Name', 'parent': 'Parent'}\n self.test_data = {'pants': {'name': 'Pants', 'slug': 'pants'},\n 'shirts': {'name': 'Shirts', 'slug': 'shirts'}, 'jeans': {\n 'name': 'Jeans', 'slug': 'jeans', 'parent': 'pants'},\n 't-shirts': {'name': 'T-shirts', 'slug': 't-shirts', 'parent':\n 'shirts'}, 'hats': {'name': 'Hats', 'slug': 'hats', 'parent':\n 'spam'}, 'polo-shirts': {'name': 'Polo Shirts', 'slug':\n 'polo-shirts'}, 'symbols': {'name': ':)', 'slug': ''},\n 'keyword': {'name': 'Add', 'slug': 'add-1'}}\n\n def setUp(self):\n super(CategoryTestCase, self).setUp()\n self.authenticate()\n\n def test_get_category_forms(self):\n \"\"\"Test that the category forms are accessible\"\"\"\n self.get_add_form()\n self.get_edit_form(self.test_data['pants'])\n self.get_delete_confirmation_form(self.test_data['shirts'])\n\n def test_add_category(self):\n \"\"\"Test that adding a category works\"\"\"\n self.add_success(self.test_data['pants'])\n\n def test_update_category(self):\n \"\"\"Test that updating a category works\"\"\"\n self.update_success(self.test_data['pants'], self.test_data['shirts'])\n\n def test_delete_category(self):\n \"\"\"Test that deleting a category works\"\"\"\n self.add_success(self.test_data['pants'])\n self.delete_success('pants')\n\n def test_add_child_category(self):\n \"\"\"Test that adding a child category works\"\"\"\n self.add_success(self.test_data['pants'])\n rv = self.get('pants')\n assert in_response(rv, 'This category is empty.')\n self.add_success(self.test_data['jeans'])\n rv = self.get('pants')\n assert not in_response(rv, 'This category is empty.')\n assert in_response(rv, 'Jeans')\n\n def test_update_child_category(self):\n \"\"\"Test that updating child categories works\"\"\"\n self.add_success(self.test_data['pants'])\n self.add_success(self.test_data['shirts'])\n self.add_success(self.test_data['jeans'])\n rv = self.get('pants')\n assert not in_response(rv, 'This category is empty.')\n assert in_response(rv, 'Jeans')\n self.edit_success('jeans', self.test_data['t-shirts'])\n rv = self.get('pants')\n assert in_response(rv, 'This category is empty.')\n assert not in_response(rv, 'Jeans')\n assert not in_response(rv, 'T-Shirts')\n rv = self.get('shirts')\n assert not in_response(rv, 'This category is empty.')\n assert in_response(rv, 'T-Shirts')\n assert not in_response(rv, 'Jeans')\n\n def test_name_required(self):\n \"\"\"Test that adding/updating a category without a name fails\"\"\"\n self.required_field_fail('name', self.test_data['pants'])\n\n def test_parent_does_not_exist(self):\n \"\"\"Test that adding/updating a category with a non-existent\n parent fails\n \"\"\"\n self.bad_data_fail(self.test_data['pants'], self.test_data['hats'],\n 'Parent does not exist')\n\n def test_category_already_exists(self):\n self.existing_key_fail(self.test_data['pants'], self.test_data[\n 'shirts'])\n\n def test_categories_are_sorted(self):\n \"\"\"Test that categories are sorted alphabetically by name\"\"\"\n self.data_sorted(self.test_data['shirts'], self.test_data['pants'])\n\n def test_delete_category_does_not_exist(self):\n \"\"\"Test that deleting a category that doesn't exist fails\"\"\"\n self.delete_does_not_exist_fail('hats')\n\n def test_add_category_slug_special(self):\n \"\"\"Test that adding a category with an incorrect name fails\"\"\"\n self.add_success(self.test_data['polo-shirts'])\n assert self.verify_object(dict(name='Polo Shirts', slug='polo-shirts'))\n self.add_fail(self.test_data['symbols'], '')\n self.add_success('Add')\n\n def test_update_category_slug_special(self):\n \"\"\"Test that updating a category with an incorrect slug fails\"\"\"\n rv = self.app.post(self.get_category_url('add'), data=dict(name=\n 'Pants', slug='pants'), follow_redirects=True)\n rv = self.app.post(self.get_category_url('pants', 'edit'), data=\n dict(name='Polo Shirts', slug='polo shirts'), follow_redirects=True\n )\n assert b'Edit Pants' in rv.data\n assert b'Slug is formatted incorrectly' in rv.data\n rv = self.app.post(self.get_category_url('pants', 'edit'), data=\n dict(name=':)', slug=':)'), follow_redirects=True)\n assert b'Edit Pants' in rv.data\n assert b'Slug is formatted incorrectly' in rv.data\n rv = self.app.post(self.get_category_url('pants', 'edit'), data=\n dict(name='Add', slug='add'), follow_redirects=True)\n assert b'Edit Pants' in rv.data\n assert b'Slug \"add\" is not allowed' in rv.data\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass ClosetTestCase(ClosetTestBase):\n <mask token>\n <mask token>\n\n\nclass ModelBase(unittest.TestCase):\n\n def __init__(self, *args, **kwargs):\n super(ModelBase, self).__init__(*args, **kwargs)\n self.base_url = '/'\n self.add_url = 'add'\n self.edit_url = 'edit'\n self.delete_url = 'delete'\n self.name = ''\n self.nice_name = ''\n self.name_field = 'name'\n self.id_field = 'slug'\n self.fields = {}\n\n def get_url(self, *args):\n \"\"\"Create a URL from a tuple of strings based on the base url\"\"\"\n try:\n url = '/'.join((self.base_url,) + args)\n except TypeError:\n url = '/'.join((self.base_url,) + args[0])\n return url.rstrip('/')\n\n def get(self, url):\n \"\"\"Process a GET request to the app\"\"\"\n return self.app.get(get_url(url), follow_redirects=True)\n\n def post(self, url, data):\n \"\"\"Process a POST request to the app\"\"\"\n return self.app.post(get_url(url), data=data, follow_redirects=True)\n\n def verify_object(self, data):\n \"\"\"Verify the model object data\"\"\"\n rv = self.get(data[self.id_field])\n result = not is_404(rv)\n if result:\n for key, value in data:\n if not in_response(rv, value):\n return False\n return result\n\n def get_add_form(self):\n \"\"\"Test that the \"add\" form is accessible and contains all the\n fields\n \"\"\"\n rv = self.get(self.add_url)\n assert not is_404(rv)\n assert in_response(rv, 'Add {}'.format(self.nice_name))\n for field, name in self.fields:\n assert in_response(rv, name)\n return rv\n\n def get_edit_form(self, data):\n \"\"\"Test that the edit form is accessible and contains all the\n fields\n \"\"\"\n self.add_success(data)\n rv = self.get((data[self.id_field], self.edit_url))\n assert not is_404(rv)\n assert in_response(rv, 'Edit {}'.format(data[self.name_field]))\n for field, name in self.fields:\n assert in_response(rv, name)\n return rv\n\n def get_delete_confirmation_form(self, data):\n \"\"\"Test that the delete confirmation form is accessible\"\"\"\n self.add_success(data)\n rv = self.get((data[self.id_field], self.delete_url))\n assert not is_404(rv)\n assert in_response(rv, 'Delete {}'.format(data[self.name_field]))\n return rv\n\n def add_success(self, data):\n \"\"\"Test that adding a model with the given data succeeds\"\"\"\n rv = self.post(self.add_url, data)\n assert not in_response(rv, 'Add {}'.format(self.nice_name))\n assert self.verify_object(data)\n return rv\n\n def edit_success(self, id_, data):\n \"\"\"Test that updating a model with the given data succeeds\"\"\"\n rv = self.post((id_, self.edit_url), data)\n assert not in_response(rv, 'Edit {}'.format(data[self.name_field]))\n assert self.verify_object(data)\n return rv\n\n def update_success(self, data, new_data):\n \"\"\"Test that updating a model with the given data succeeds\"\"\"\n self.add_success(data)\n return self.edit_success(data[self.id_field], new_data)\n\n def delete_success(self, id_):\n \"\"\"Test that deleting the specified model succeeds\"\"\"\n rv = self.post((id_, self.delete_url), dict(post='yes'))\n assert not self.verify_object({self.id_field: id_})\n return rv\n\n def add_fail(self, data, message):\n \"\"\"Test that adding a model with the given data fails\"\"\"\n rv = self.post(self.add_url, data)\n assert in_response(rv, 'Add {}'.format(self.nice_name))\n assert in_response(rv, message)\n return rv\n\n def edit_fail(self, id_, data, message):\n \"\"\"Test that updating a model with the given data fails\"\"\"\n rv = self.post((id_, self.edit_url), data)\n assert in_response(rv, 'Edit {}'.format(data[self.name_field]))\n assert in_response(rv, message)\n return rv\n\n def update_fail(self, data, new_data, message):\n \"\"\"Test that updating a model with the given data fails\"\"\"\n self.add_success(data)\n return self.edit_fail(data[self.id_field], new_data, message)\n\n def delete_fail(self, id_, message):\n \"\"\"Test that deleting the specified model fails\"\"\"\n rv = self.post((id_, self.delete_url), dict(post='yes'))\n assert in_response(rv, message)\n assert self.verify_object({self.id_field: id_})\n return rv\n\n def bad_data_fail(self, good_data, bad_data, message):\n \"\"\"Test that adding and updating a model with the given data\n fails\n \"\"\"\n self.add_fail(bad_data, message)\n self.update_fail(good_data, bad_data, message)\n\n def add_required_field_fail(self, field, data):\n \"\"\"Test that adding a model with a blank or missing required\n field fails\n \"\"\"\n message = '{} is required'.format(self.fields[field])\n data = data.copy()\n data[field] = ''\n self.add_fail(data, message)\n assert not self.verify_object(data)\n del data[field]\n self.add_fail(data, message)\n assert not self.verify_object(data)\n\n def update_required_field_fail(self, field, data):\n \"\"\"Test that updating a model with a blank or missing required\n field fails\n \"\"\"\n message = '{} is required'.format(self.fields[field])\n data = data.copy()\n id_ = data[self.id_field]\n self.add_success(data)\n data[field] = ''\n self.edit_fail(id_, data, message)\n assert not self.verify_object(data)\n del data[field]\n self.edit_fail(id_, data, message)\n assert not self.verify_object(data)\n\n def required_field_fail(self, field, data):\n \"\"\"Test that adding and updating a model with a blank or missing\n required field fails\n \"\"\"\n self.add_required_field_fail(field, data)\n self.update_required_field_fail(field, data)\n\n def add_existing_key_fail(self, data):\n \"\"\"Test that adding a model with an existing key fails\"\"\"\n message = 'exists'\n rv = self.add_success(data)\n assert not in_response(rv, message)\n return self.add_fail(data, message)\n\n def update_existing_key_fail(self, data, new_data):\n \"\"\"Test that adding a model with an existing key fails\"\"\"\n message = 'exists'\n rv = self.add_success(data)\n assert not in_response(rv, message)\n rv = self.add_success(new_data)\n assert not in_response(rv, message)\n rv = self.update_fail(data, message)\n assert self.verify_object(new_data)\n return rv\n\n def existing_key_fail(self, data, new_data):\n \"\"\"Test that adding and updating a model with an existing key\n fails\n \"\"\"\n message = 'exists'\n rv = self.add_success(data)\n assert not in_response(rv, message)\n self.add_fail(data, message)\n rv = self.add_success(new_data)\n assert not in_response(rv, message)\n self.update_fail(data, message)\n assert self.verify_object(new_data)\n\n def data_sorted(self, before_data, after_data, url):\n \"\"\"Test that the models will be sorted in the correct order\"\"\"\n self.add_success(after_data)\n self.add_success(before_data)\n rv = self.get(url)\n after_index = rv.data.index(after_data[self.name_field].encode())\n before_index = rv.data.index(before_data[self.name_field].encode())\n assert after_index > before_index\n\n def delete_does_not_exist_fail(self, id_):\n \"\"\"Test that deleting a model that does not exist fails\"\"\"\n assert is_404(self.get((id_, self.delete_url)))\n self.delete_fail(id_, 'does not exist')\n\n\nclass CategoryTestCase(ClosetTestBase, ModelBase):\n\n def __init__(self, *args, **kwargs):\n super(ModelBase, self).__init__(*args, **kwargs)\n self.base_url = '/categories'\n self.name = 'category'\n self.nice_name = 'Category'\n self.fields = {'name': 'Name', 'parent': 'Parent'}\n self.test_data = {'pants': {'name': 'Pants', 'slug': 'pants'},\n 'shirts': {'name': 'Shirts', 'slug': 'shirts'}, 'jeans': {\n 'name': 'Jeans', 'slug': 'jeans', 'parent': 'pants'},\n 't-shirts': {'name': 'T-shirts', 'slug': 't-shirts', 'parent':\n 'shirts'}, 'hats': {'name': 'Hats', 'slug': 'hats', 'parent':\n 'spam'}, 'polo-shirts': {'name': 'Polo Shirts', 'slug':\n 'polo-shirts'}, 'symbols': {'name': ':)', 'slug': ''},\n 'keyword': {'name': 'Add', 'slug': 'add-1'}}\n\n def setUp(self):\n super(CategoryTestCase, self).setUp()\n self.authenticate()\n\n def test_get_category_forms(self):\n \"\"\"Test that the category forms are accessible\"\"\"\n self.get_add_form()\n self.get_edit_form(self.test_data['pants'])\n self.get_delete_confirmation_form(self.test_data['shirts'])\n\n def test_add_category(self):\n \"\"\"Test that adding a category works\"\"\"\n self.add_success(self.test_data['pants'])\n\n def test_update_category(self):\n \"\"\"Test that updating a category works\"\"\"\n self.update_success(self.test_data['pants'], self.test_data['shirts'])\n\n def test_delete_category(self):\n \"\"\"Test that deleting a category works\"\"\"\n self.add_success(self.test_data['pants'])\n self.delete_success('pants')\n\n def test_add_child_category(self):\n \"\"\"Test that adding a child category works\"\"\"\n self.add_success(self.test_data['pants'])\n rv = self.get('pants')\n assert in_response(rv, 'This category is empty.')\n self.add_success(self.test_data['jeans'])\n rv = self.get('pants')\n assert not in_response(rv, 'This category is empty.')\n assert in_response(rv, 'Jeans')\n\n def test_update_child_category(self):\n \"\"\"Test that updating child categories works\"\"\"\n self.add_success(self.test_data['pants'])\n self.add_success(self.test_data['shirts'])\n self.add_success(self.test_data['jeans'])\n rv = self.get('pants')\n assert not in_response(rv, 'This category is empty.')\n assert in_response(rv, 'Jeans')\n self.edit_success('jeans', self.test_data['t-shirts'])\n rv = self.get('pants')\n assert in_response(rv, 'This category is empty.')\n assert not in_response(rv, 'Jeans')\n assert not in_response(rv, 'T-Shirts')\n rv = self.get('shirts')\n assert not in_response(rv, 'This category is empty.')\n assert in_response(rv, 'T-Shirts')\n assert not in_response(rv, 'Jeans')\n\n def test_name_required(self):\n \"\"\"Test that adding/updating a category without a name fails\"\"\"\n self.required_field_fail('name', self.test_data['pants'])\n\n def test_parent_does_not_exist(self):\n \"\"\"Test that adding/updating a category with a non-existent\n parent fails\n \"\"\"\n self.bad_data_fail(self.test_data['pants'], self.test_data['hats'],\n 'Parent does not exist')\n\n def test_category_already_exists(self):\n self.existing_key_fail(self.test_data['pants'], self.test_data[\n 'shirts'])\n\n def test_categories_are_sorted(self):\n \"\"\"Test that categories are sorted alphabetically by name\"\"\"\n self.data_sorted(self.test_data['shirts'], self.test_data['pants'])\n\n def test_delete_category_does_not_exist(self):\n \"\"\"Test that deleting a category that doesn't exist fails\"\"\"\n self.delete_does_not_exist_fail('hats')\n\n def test_add_category_slug_special(self):\n \"\"\"Test that adding a category with an incorrect name fails\"\"\"\n self.add_success(self.test_data['polo-shirts'])\n assert self.verify_object(dict(name='Polo Shirts', slug='polo-shirts'))\n self.add_fail(self.test_data['symbols'], '')\n self.add_success('Add')\n\n def test_update_category_slug_special(self):\n \"\"\"Test that updating a category with an incorrect slug fails\"\"\"\n rv = self.app.post(self.get_category_url('add'), data=dict(name=\n 'Pants', slug='pants'), follow_redirects=True)\n rv = self.app.post(self.get_category_url('pants', 'edit'), data=\n dict(name='Polo Shirts', slug='polo shirts'), follow_redirects=True\n )\n assert b'Edit Pants' in rv.data\n assert b'Slug is formatted incorrectly' in rv.data\n rv = self.app.post(self.get_category_url('pants', 'edit'), data=\n dict(name=':)', slug=':)'), follow_redirects=True)\n assert b'Edit Pants' in rv.data\n assert b'Slug is formatted incorrectly' in rv.data\n rv = self.app.post(self.get_category_url('pants', 'edit'), data=\n dict(name='Add', slug='add'), follow_redirects=True)\n assert b'Edit Pants' in rv.data\n assert b'Slug \"add\" is not allowed' in rv.data\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass ClosetTestCase(ClosetTestBase):\n <mask token>\n\n def test_login_logout(self):\n \"\"\"Make sure login and logout works\"\"\"\n rv = self.login(closet.app.config['USERNAME'], closet.app.config[\n 'PASSWORD'])\n assert b'You were logged in' in rv.data\n rv = self.logout()\n assert b'You were logged out' in rv.data\n rv = self.login(closet.app.config['USERNAME'] + 'x', closet.app.\n config['PASSWORD'])\n assert b'Invalid username' in rv.data\n rv = self.login(closet.app.config['USERNAME'], closet.app.config[\n 'PASSWORD'] + 'x')\n assert b'Invalid password' in rv.data\n\n\nclass ModelBase(unittest.TestCase):\n\n def __init__(self, *args, **kwargs):\n super(ModelBase, self).__init__(*args, **kwargs)\n self.base_url = '/'\n self.add_url = 'add'\n self.edit_url = 'edit'\n self.delete_url = 'delete'\n self.name = ''\n self.nice_name = ''\n self.name_field = 'name'\n self.id_field = 'slug'\n self.fields = {}\n\n def get_url(self, *args):\n \"\"\"Create a URL from a tuple of strings based on the base url\"\"\"\n try:\n url = '/'.join((self.base_url,) + args)\n except TypeError:\n url = '/'.join((self.base_url,) + args[0])\n return url.rstrip('/')\n\n def get(self, url):\n \"\"\"Process a GET request to the app\"\"\"\n return self.app.get(get_url(url), follow_redirects=True)\n\n def post(self, url, data):\n \"\"\"Process a POST request to the app\"\"\"\n return self.app.post(get_url(url), data=data, follow_redirects=True)\n\n def verify_object(self, data):\n \"\"\"Verify the model object data\"\"\"\n rv = self.get(data[self.id_field])\n result = not is_404(rv)\n if result:\n for key, value in data:\n if not in_response(rv, value):\n return False\n return result\n\n def get_add_form(self):\n \"\"\"Test that the \"add\" form is accessible and contains all the\n fields\n \"\"\"\n rv = self.get(self.add_url)\n assert not is_404(rv)\n assert in_response(rv, 'Add {}'.format(self.nice_name))\n for field, name in self.fields:\n assert in_response(rv, name)\n return rv\n\n def get_edit_form(self, data):\n \"\"\"Test that the edit form is accessible and contains all the\n fields\n \"\"\"\n self.add_success(data)\n rv = self.get((data[self.id_field], self.edit_url))\n assert not is_404(rv)\n assert in_response(rv, 'Edit {}'.format(data[self.name_field]))\n for field, name in self.fields:\n assert in_response(rv, name)\n return rv\n\n def get_delete_confirmation_form(self, data):\n \"\"\"Test that the delete confirmation form is accessible\"\"\"\n self.add_success(data)\n rv = self.get((data[self.id_field], self.delete_url))\n assert not is_404(rv)\n assert in_response(rv, 'Delete {}'.format(data[self.name_field]))\n return rv\n\n def add_success(self, data):\n \"\"\"Test that adding a model with the given data succeeds\"\"\"\n rv = self.post(self.add_url, data)\n assert not in_response(rv, 'Add {}'.format(self.nice_name))\n assert self.verify_object(data)\n return rv\n\n def edit_success(self, id_, data):\n \"\"\"Test that updating a model with the given data succeeds\"\"\"\n rv = self.post((id_, self.edit_url), data)\n assert not in_response(rv, 'Edit {}'.format(data[self.name_field]))\n assert self.verify_object(data)\n return rv\n\n def update_success(self, data, new_data):\n \"\"\"Test that updating a model with the given data succeeds\"\"\"\n self.add_success(data)\n return self.edit_success(data[self.id_field], new_data)\n\n def delete_success(self, id_):\n \"\"\"Test that deleting the specified model succeeds\"\"\"\n rv = self.post((id_, self.delete_url), dict(post='yes'))\n assert not self.verify_object({self.id_field: id_})\n return rv\n\n def add_fail(self, data, message):\n \"\"\"Test that adding a model with the given data fails\"\"\"\n rv = self.post(self.add_url, data)\n assert in_response(rv, 'Add {}'.format(self.nice_name))\n assert in_response(rv, message)\n return rv\n\n def edit_fail(self, id_, data, message):\n \"\"\"Test that updating a model with the given data fails\"\"\"\n rv = self.post((id_, self.edit_url), data)\n assert in_response(rv, 'Edit {}'.format(data[self.name_field]))\n assert in_response(rv, message)\n return rv\n\n def update_fail(self, data, new_data, message):\n \"\"\"Test that updating a model with the given data fails\"\"\"\n self.add_success(data)\n return self.edit_fail(data[self.id_field], new_data, message)\n\n def delete_fail(self, id_, message):\n \"\"\"Test that deleting the specified model fails\"\"\"\n rv = self.post((id_, self.delete_url), dict(post='yes'))\n assert in_response(rv, message)\n assert self.verify_object({self.id_field: id_})\n return rv\n\n def bad_data_fail(self, good_data, bad_data, message):\n \"\"\"Test that adding and updating a model with the given data\n fails\n \"\"\"\n self.add_fail(bad_data, message)\n self.update_fail(good_data, bad_data, message)\n\n def add_required_field_fail(self, field, data):\n \"\"\"Test that adding a model with a blank or missing required\n field fails\n \"\"\"\n message = '{} is required'.format(self.fields[field])\n data = data.copy()\n data[field] = ''\n self.add_fail(data, message)\n assert not self.verify_object(data)\n del data[field]\n self.add_fail(data, message)\n assert not self.verify_object(data)\n\n def update_required_field_fail(self, field, data):\n \"\"\"Test that updating a model with a blank or missing required\n field fails\n \"\"\"\n message = '{} is required'.format(self.fields[field])\n data = data.copy()\n id_ = data[self.id_field]\n self.add_success(data)\n data[field] = ''\n self.edit_fail(id_, data, message)\n assert not self.verify_object(data)\n del data[field]\n self.edit_fail(id_, data, message)\n assert not self.verify_object(data)\n\n def required_field_fail(self, field, data):\n \"\"\"Test that adding and updating a model with a blank or missing\n required field fails\n \"\"\"\n self.add_required_field_fail(field, data)\n self.update_required_field_fail(field, data)\n\n def add_existing_key_fail(self, data):\n \"\"\"Test that adding a model with an existing key fails\"\"\"\n message = 'exists'\n rv = self.add_success(data)\n assert not in_response(rv, message)\n return self.add_fail(data, message)\n\n def update_existing_key_fail(self, data, new_data):\n \"\"\"Test that adding a model with an existing key fails\"\"\"\n message = 'exists'\n rv = self.add_success(data)\n assert not in_response(rv, message)\n rv = self.add_success(new_data)\n assert not in_response(rv, message)\n rv = self.update_fail(data, message)\n assert self.verify_object(new_data)\n return rv\n\n def existing_key_fail(self, data, new_data):\n \"\"\"Test that adding and updating a model with an existing key\n fails\n \"\"\"\n message = 'exists'\n rv = self.add_success(data)\n assert not in_response(rv, message)\n self.add_fail(data, message)\n rv = self.add_success(new_data)\n assert not in_response(rv, message)\n self.update_fail(data, message)\n assert self.verify_object(new_data)\n\n def data_sorted(self, before_data, after_data, url):\n \"\"\"Test that the models will be sorted in the correct order\"\"\"\n self.add_success(after_data)\n self.add_success(before_data)\n rv = self.get(url)\n after_index = rv.data.index(after_data[self.name_field].encode())\n before_index = rv.data.index(before_data[self.name_field].encode())\n assert after_index > before_index\n\n def delete_does_not_exist_fail(self, id_):\n \"\"\"Test that deleting a model that does not exist fails\"\"\"\n assert is_404(self.get((id_, self.delete_url)))\n self.delete_fail(id_, 'does not exist')\n\n\nclass CategoryTestCase(ClosetTestBase, ModelBase):\n\n def __init__(self, *args, **kwargs):\n super(ModelBase, self).__init__(*args, **kwargs)\n self.base_url = '/categories'\n self.name = 'category'\n self.nice_name = 'Category'\n self.fields = {'name': 'Name', 'parent': 'Parent'}\n self.test_data = {'pants': {'name': 'Pants', 'slug': 'pants'},\n 'shirts': {'name': 'Shirts', 'slug': 'shirts'}, 'jeans': {\n 'name': 'Jeans', 'slug': 'jeans', 'parent': 'pants'},\n 't-shirts': {'name': 'T-shirts', 'slug': 't-shirts', 'parent':\n 'shirts'}, 'hats': {'name': 'Hats', 'slug': 'hats', 'parent':\n 'spam'}, 'polo-shirts': {'name': 'Polo Shirts', 'slug':\n 'polo-shirts'}, 'symbols': {'name': ':)', 'slug': ''},\n 'keyword': {'name': 'Add', 'slug': 'add-1'}}\n\n def setUp(self):\n super(CategoryTestCase, self).setUp()\n self.authenticate()\n\n def test_get_category_forms(self):\n \"\"\"Test that the category forms are accessible\"\"\"\n self.get_add_form()\n self.get_edit_form(self.test_data['pants'])\n self.get_delete_confirmation_form(self.test_data['shirts'])\n\n def test_add_category(self):\n \"\"\"Test that adding a category works\"\"\"\n self.add_success(self.test_data['pants'])\n\n def test_update_category(self):\n \"\"\"Test that updating a category works\"\"\"\n self.update_success(self.test_data['pants'], self.test_data['shirts'])\n\n def test_delete_category(self):\n \"\"\"Test that deleting a category works\"\"\"\n self.add_success(self.test_data['pants'])\n self.delete_success('pants')\n\n def test_add_child_category(self):\n \"\"\"Test that adding a child category works\"\"\"\n self.add_success(self.test_data['pants'])\n rv = self.get('pants')\n assert in_response(rv, 'This category is empty.')\n self.add_success(self.test_data['jeans'])\n rv = self.get('pants')\n assert not in_response(rv, 'This category is empty.')\n assert in_response(rv, 'Jeans')\n\n def test_update_child_category(self):\n \"\"\"Test that updating child categories works\"\"\"\n self.add_success(self.test_data['pants'])\n self.add_success(self.test_data['shirts'])\n self.add_success(self.test_data['jeans'])\n rv = self.get('pants')\n assert not in_response(rv, 'This category is empty.')\n assert in_response(rv, 'Jeans')\n self.edit_success('jeans', self.test_data['t-shirts'])\n rv = self.get('pants')\n assert in_response(rv, 'This category is empty.')\n assert not in_response(rv, 'Jeans')\n assert not in_response(rv, 'T-Shirts')\n rv = self.get('shirts')\n assert not in_response(rv, 'This category is empty.')\n assert in_response(rv, 'T-Shirts')\n assert not in_response(rv, 'Jeans')\n\n def test_name_required(self):\n \"\"\"Test that adding/updating a category without a name fails\"\"\"\n self.required_field_fail('name', self.test_data['pants'])\n\n def test_parent_does_not_exist(self):\n \"\"\"Test that adding/updating a category with a non-existent\n parent fails\n \"\"\"\n self.bad_data_fail(self.test_data['pants'], self.test_data['hats'],\n 'Parent does not exist')\n\n def test_category_already_exists(self):\n self.existing_key_fail(self.test_data['pants'], self.test_data[\n 'shirts'])\n\n def test_categories_are_sorted(self):\n \"\"\"Test that categories are sorted alphabetically by name\"\"\"\n self.data_sorted(self.test_data['shirts'], self.test_data['pants'])\n\n def test_delete_category_does_not_exist(self):\n \"\"\"Test that deleting a category that doesn't exist fails\"\"\"\n self.delete_does_not_exist_fail('hats')\n\n def test_add_category_slug_special(self):\n \"\"\"Test that adding a category with an incorrect name fails\"\"\"\n self.add_success(self.test_data['polo-shirts'])\n assert self.verify_object(dict(name='Polo Shirts', slug='polo-shirts'))\n self.add_fail(self.test_data['symbols'], '')\n self.add_success('Add')\n\n def test_update_category_slug_special(self):\n \"\"\"Test that updating a category with an incorrect slug fails\"\"\"\n rv = self.app.post(self.get_category_url('add'), data=dict(name=\n 'Pants', slug='pants'), follow_redirects=True)\n rv = self.app.post(self.get_category_url('pants', 'edit'), data=\n dict(name='Polo Shirts', slug='polo shirts'), follow_redirects=True\n )\n assert b'Edit Pants' in rv.data\n assert b'Slug is formatted incorrectly' in rv.data\n rv = self.app.post(self.get_category_url('pants', 'edit'), data=\n dict(name=':)', slug=':)'), follow_redirects=True)\n assert b'Edit Pants' in rv.data\n assert b'Slug is formatted incorrectly' in rv.data\n rv = self.app.post(self.get_category_url('pants', 'edit'), data=\n dict(name='Add', slug='add'), follow_redirects=True)\n assert b'Edit Pants' in rv.data\n assert b'Slug \"add\" is not allowed' in rv.data\n\n\n<mask token>\n",
"step-4": "<mask token>\n\n\nclass ClosetTestBase(unittest.TestCase):\n\n def setUp(self):\n \"\"\"Set up test environment befor each test\"\"\"\n self.db_fd, closet.app.config['DATABASE'] = tempfile.mkstemp()\n closet.app.config['TESTING'] = True\n self.app = closet.app.test_client()\n closet.init_db()\n <mask token>\n\n def login(self, username, password):\n \"\"\"Login to test website as specified user with the specified\n password\n \"\"\"\n return self.app.post('/login', data=dict(username=username,\n password=password), follow_redirects=True)\n <mask token>\n\n def authenticate(self):\n \"\"\"Login to test website as the standard test user\"\"\"\n self.login(closet.app.config['USERNAME'], closet.app.config['PASSWORD']\n )\n\n\nclass ClosetTestCase(ClosetTestBase):\n\n def test_empty_db(self):\n \"\"\"Start with a blank database.\"\"\"\n rv = self.app.get('/')\n assert b'Your closet is empty.' in rv.data\n\n def test_login_logout(self):\n \"\"\"Make sure login and logout works\"\"\"\n rv = self.login(closet.app.config['USERNAME'], closet.app.config[\n 'PASSWORD'])\n assert b'You were logged in' in rv.data\n rv = self.logout()\n assert b'You were logged out' in rv.data\n rv = self.login(closet.app.config['USERNAME'] + 'x', closet.app.\n config['PASSWORD'])\n assert b'Invalid username' in rv.data\n rv = self.login(closet.app.config['USERNAME'], closet.app.config[\n 'PASSWORD'] + 'x')\n assert b'Invalid password' in rv.data\n\n\nclass ModelBase(unittest.TestCase):\n\n def __init__(self, *args, **kwargs):\n super(ModelBase, self).__init__(*args, **kwargs)\n self.base_url = '/'\n self.add_url = 'add'\n self.edit_url = 'edit'\n self.delete_url = 'delete'\n self.name = ''\n self.nice_name = ''\n self.name_field = 'name'\n self.id_field = 'slug'\n self.fields = {}\n\n def get_url(self, *args):\n \"\"\"Create a URL from a tuple of strings based on the base url\"\"\"\n try:\n url = '/'.join((self.base_url,) + args)\n except TypeError:\n url = '/'.join((self.base_url,) + args[0])\n return url.rstrip('/')\n\n def get(self, url):\n \"\"\"Process a GET request to the app\"\"\"\n return self.app.get(get_url(url), follow_redirects=True)\n\n def post(self, url, data):\n \"\"\"Process a POST request to the app\"\"\"\n return self.app.post(get_url(url), data=data, follow_redirects=True)\n\n def verify_object(self, data):\n \"\"\"Verify the model object data\"\"\"\n rv = self.get(data[self.id_field])\n result = not is_404(rv)\n if result:\n for key, value in data:\n if not in_response(rv, value):\n return False\n return result\n\n def get_add_form(self):\n \"\"\"Test that the \"add\" form is accessible and contains all the\n fields\n \"\"\"\n rv = self.get(self.add_url)\n assert not is_404(rv)\n assert in_response(rv, 'Add {}'.format(self.nice_name))\n for field, name in self.fields:\n assert in_response(rv, name)\n return rv\n\n def get_edit_form(self, data):\n \"\"\"Test that the edit form is accessible and contains all the\n fields\n \"\"\"\n self.add_success(data)\n rv = self.get((data[self.id_field], self.edit_url))\n assert not is_404(rv)\n assert in_response(rv, 'Edit {}'.format(data[self.name_field]))\n for field, name in self.fields:\n assert in_response(rv, name)\n return rv\n\n def get_delete_confirmation_form(self, data):\n \"\"\"Test that the delete confirmation form is accessible\"\"\"\n self.add_success(data)\n rv = self.get((data[self.id_field], self.delete_url))\n assert not is_404(rv)\n assert in_response(rv, 'Delete {}'.format(data[self.name_field]))\n return rv\n\n def add_success(self, data):\n \"\"\"Test that adding a model with the given data succeeds\"\"\"\n rv = self.post(self.add_url, data)\n assert not in_response(rv, 'Add {}'.format(self.nice_name))\n assert self.verify_object(data)\n return rv\n\n def edit_success(self, id_, data):\n \"\"\"Test that updating a model with the given data succeeds\"\"\"\n rv = self.post((id_, self.edit_url), data)\n assert not in_response(rv, 'Edit {}'.format(data[self.name_field]))\n assert self.verify_object(data)\n return rv\n\n def update_success(self, data, new_data):\n \"\"\"Test that updating a model with the given data succeeds\"\"\"\n self.add_success(data)\n return self.edit_success(data[self.id_field], new_data)\n\n def delete_success(self, id_):\n \"\"\"Test that deleting the specified model succeeds\"\"\"\n rv = self.post((id_, self.delete_url), dict(post='yes'))\n assert not self.verify_object({self.id_field: id_})\n return rv\n\n def add_fail(self, data, message):\n \"\"\"Test that adding a model with the given data fails\"\"\"\n rv = self.post(self.add_url, data)\n assert in_response(rv, 'Add {}'.format(self.nice_name))\n assert in_response(rv, message)\n return rv\n\n def edit_fail(self, id_, data, message):\n \"\"\"Test that updating a model with the given data fails\"\"\"\n rv = self.post((id_, self.edit_url), data)\n assert in_response(rv, 'Edit {}'.format(data[self.name_field]))\n assert in_response(rv, message)\n return rv\n\n def update_fail(self, data, new_data, message):\n \"\"\"Test that updating a model with the given data fails\"\"\"\n self.add_success(data)\n return self.edit_fail(data[self.id_field], new_data, message)\n\n def delete_fail(self, id_, message):\n \"\"\"Test that deleting the specified model fails\"\"\"\n rv = self.post((id_, self.delete_url), dict(post='yes'))\n assert in_response(rv, message)\n assert self.verify_object({self.id_field: id_})\n return rv\n\n def bad_data_fail(self, good_data, bad_data, message):\n \"\"\"Test that adding and updating a model with the given data\n fails\n \"\"\"\n self.add_fail(bad_data, message)\n self.update_fail(good_data, bad_data, message)\n\n def add_required_field_fail(self, field, data):\n \"\"\"Test that adding a model with a blank or missing required\n field fails\n \"\"\"\n message = '{} is required'.format(self.fields[field])\n data = data.copy()\n data[field] = ''\n self.add_fail(data, message)\n assert not self.verify_object(data)\n del data[field]\n self.add_fail(data, message)\n assert not self.verify_object(data)\n\n def update_required_field_fail(self, field, data):\n \"\"\"Test that updating a model with a blank or missing required\n field fails\n \"\"\"\n message = '{} is required'.format(self.fields[field])\n data = data.copy()\n id_ = data[self.id_field]\n self.add_success(data)\n data[field] = ''\n self.edit_fail(id_, data, message)\n assert not self.verify_object(data)\n del data[field]\n self.edit_fail(id_, data, message)\n assert not self.verify_object(data)\n\n def required_field_fail(self, field, data):\n \"\"\"Test that adding and updating a model with a blank or missing\n required field fails\n \"\"\"\n self.add_required_field_fail(field, data)\n self.update_required_field_fail(field, data)\n\n def add_existing_key_fail(self, data):\n \"\"\"Test that adding a model with an existing key fails\"\"\"\n message = 'exists'\n rv = self.add_success(data)\n assert not in_response(rv, message)\n return self.add_fail(data, message)\n\n def update_existing_key_fail(self, data, new_data):\n \"\"\"Test that adding a model with an existing key fails\"\"\"\n message = 'exists'\n rv = self.add_success(data)\n assert not in_response(rv, message)\n rv = self.add_success(new_data)\n assert not in_response(rv, message)\n rv = self.update_fail(data, message)\n assert self.verify_object(new_data)\n return rv\n\n def existing_key_fail(self, data, new_data):\n \"\"\"Test that adding and updating a model with an existing key\n fails\n \"\"\"\n message = 'exists'\n rv = self.add_success(data)\n assert not in_response(rv, message)\n self.add_fail(data, message)\n rv = self.add_success(new_data)\n assert not in_response(rv, message)\n self.update_fail(data, message)\n assert self.verify_object(new_data)\n\n def data_sorted(self, before_data, after_data, url):\n \"\"\"Test that the models will be sorted in the correct order\"\"\"\n self.add_success(after_data)\n self.add_success(before_data)\n rv = self.get(url)\n after_index = rv.data.index(after_data[self.name_field].encode())\n before_index = rv.data.index(before_data[self.name_field].encode())\n assert after_index > before_index\n\n def delete_does_not_exist_fail(self, id_):\n \"\"\"Test that deleting a model that does not exist fails\"\"\"\n assert is_404(self.get((id_, self.delete_url)))\n self.delete_fail(id_, 'does not exist')\n\n\nclass CategoryTestCase(ClosetTestBase, ModelBase):\n\n def __init__(self, *args, **kwargs):\n super(ModelBase, self).__init__(*args, **kwargs)\n self.base_url = '/categories'\n self.name = 'category'\n self.nice_name = 'Category'\n self.fields = {'name': 'Name', 'parent': 'Parent'}\n self.test_data = {'pants': {'name': 'Pants', 'slug': 'pants'},\n 'shirts': {'name': 'Shirts', 'slug': 'shirts'}, 'jeans': {\n 'name': 'Jeans', 'slug': 'jeans', 'parent': 'pants'},\n 't-shirts': {'name': 'T-shirts', 'slug': 't-shirts', 'parent':\n 'shirts'}, 'hats': {'name': 'Hats', 'slug': 'hats', 'parent':\n 'spam'}, 'polo-shirts': {'name': 'Polo Shirts', 'slug':\n 'polo-shirts'}, 'symbols': {'name': ':)', 'slug': ''},\n 'keyword': {'name': 'Add', 'slug': 'add-1'}}\n\n def setUp(self):\n super(CategoryTestCase, self).setUp()\n self.authenticate()\n\n def test_get_category_forms(self):\n \"\"\"Test that the category forms are accessible\"\"\"\n self.get_add_form()\n self.get_edit_form(self.test_data['pants'])\n self.get_delete_confirmation_form(self.test_data['shirts'])\n\n def test_add_category(self):\n \"\"\"Test that adding a category works\"\"\"\n self.add_success(self.test_data['pants'])\n\n def test_update_category(self):\n \"\"\"Test that updating a category works\"\"\"\n self.update_success(self.test_data['pants'], self.test_data['shirts'])\n\n def test_delete_category(self):\n \"\"\"Test that deleting a category works\"\"\"\n self.add_success(self.test_data['pants'])\n self.delete_success('pants')\n\n def test_add_child_category(self):\n \"\"\"Test that adding a child category works\"\"\"\n self.add_success(self.test_data['pants'])\n rv = self.get('pants')\n assert in_response(rv, 'This category is empty.')\n self.add_success(self.test_data['jeans'])\n rv = self.get('pants')\n assert not in_response(rv, 'This category is empty.')\n assert in_response(rv, 'Jeans')\n\n def test_update_child_category(self):\n \"\"\"Test that updating child categories works\"\"\"\n self.add_success(self.test_data['pants'])\n self.add_success(self.test_data['shirts'])\n self.add_success(self.test_data['jeans'])\n rv = self.get('pants')\n assert not in_response(rv, 'This category is empty.')\n assert in_response(rv, 'Jeans')\n self.edit_success('jeans', self.test_data['t-shirts'])\n rv = self.get('pants')\n assert in_response(rv, 'This category is empty.')\n assert not in_response(rv, 'Jeans')\n assert not in_response(rv, 'T-Shirts')\n rv = self.get('shirts')\n assert not in_response(rv, 'This category is empty.')\n assert in_response(rv, 'T-Shirts')\n assert not in_response(rv, 'Jeans')\n\n def test_name_required(self):\n \"\"\"Test that adding/updating a category without a name fails\"\"\"\n self.required_field_fail('name', self.test_data['pants'])\n\n def test_parent_does_not_exist(self):\n \"\"\"Test that adding/updating a category with a non-existent\n parent fails\n \"\"\"\n self.bad_data_fail(self.test_data['pants'], self.test_data['hats'],\n 'Parent does not exist')\n\n def test_category_already_exists(self):\n self.existing_key_fail(self.test_data['pants'], self.test_data[\n 'shirts'])\n\n def test_categories_are_sorted(self):\n \"\"\"Test that categories are sorted alphabetically by name\"\"\"\n self.data_sorted(self.test_data['shirts'], self.test_data['pants'])\n\n def test_delete_category_does_not_exist(self):\n \"\"\"Test that deleting a category that doesn't exist fails\"\"\"\n self.delete_does_not_exist_fail('hats')\n\n def test_add_category_slug_special(self):\n \"\"\"Test that adding a category with an incorrect name fails\"\"\"\n self.add_success(self.test_data['polo-shirts'])\n assert self.verify_object(dict(name='Polo Shirts', slug='polo-shirts'))\n self.add_fail(self.test_data['symbols'], '')\n self.add_success('Add')\n\n def test_update_category_slug_special(self):\n \"\"\"Test that updating a category with an incorrect slug fails\"\"\"\n rv = self.app.post(self.get_category_url('add'), data=dict(name=\n 'Pants', slug='pants'), follow_redirects=True)\n rv = self.app.post(self.get_category_url('pants', 'edit'), data=\n dict(name='Polo Shirts', slug='polo shirts'), follow_redirects=True\n )\n assert b'Edit Pants' in rv.data\n assert b'Slug is formatted incorrectly' in rv.data\n rv = self.app.post(self.get_category_url('pants', 'edit'), data=\n dict(name=':)', slug=':)'), follow_redirects=True)\n assert b'Edit Pants' in rv.data\n assert b'Slug is formatted incorrectly' in rv.data\n rv = self.app.post(self.get_category_url('pants', 'edit'), data=\n dict(name='Add', slug='add'), follow_redirects=True)\n assert b'Edit Pants' in rv.data\n assert b'Slug \"add\" is not allowed' in rv.data\n\n\n<mask token>\n",
"step-5": "import os\nimport closet\nimport unittest\nimport tempfile\n\n\ndef in_response(response, value):\n return value.encode() in response.data\n\n\ndef is_404(response):\n response.status_code == 404\n\n\nclass ClosetTestBase(unittest.TestCase):\n\n def setUp(self):\n \"\"\"Set up test environment befor each test\"\"\"\n self.db_fd, closet.app.config['DATABASE'] = tempfile.mkstemp()\n closet.app.config['TESTING'] = True\n self.app = closet.app.test_client()\n closet.init_db()\n\n def tearDown(self):\n \"\"\"Tear down test environment after each test\"\"\"\n os.close(self.db_fd)\n os.unlink(closet.app.config['DATABASE'])\n\n def login(self, username, password):\n \"\"\"Login to test website as specified user with the specified\n password\n \"\"\"\n return self.app.post('/login', data=dict(\n username=username,\n password=password\n ), follow_redirects=True)\n\n def logout(self):\n \"\"\"Logout of test website\"\"\"\n return self.app.get('/logout', follow_redirects=True)\n\n def authenticate(self):\n \"\"\"Login to test website as the standard test user\"\"\"\n self.login(closet.app.config['USERNAME'],\n closet.app.config['PASSWORD'])\n\n\nclass ClosetTestCase(ClosetTestBase):\n\n # Generic Tests\n\n def test_empty_db(self):\n \"\"\"Start with a blank database.\"\"\"\n rv = self.app.get('/')\n assert b'Your closet is empty.' in rv.data\n\n def test_login_logout(self):\n \"\"\"Make sure login and logout works\"\"\"\n rv = self.login(closet.app.config['USERNAME'],\n closet.app.config['PASSWORD'])\n assert b'You were logged in' in rv.data\n rv = self.logout()\n assert b'You were logged out' in rv.data\n rv = self.login(closet.app.config['USERNAME'] + 'x',\n closet.app.config['PASSWORD'])\n assert b'Invalid username' in rv.data\n rv = self.login(closet.app.config['USERNAME'],\n closet.app.config['PASSWORD'] + 'x')\n assert b'Invalid password' in rv.data\n\n\nclass ModelBase(unittest.TestCase):\n\n # Model based view test helpers\n\n def __init__(self, *args, **kwargs):\n super(ModelBase, self).__init__(*args, **kwargs)\n self.base_url = '/'\n self.add_url = 'add'\n self.edit_url = 'edit'\n self.delete_url = 'delete'\n self.name = ''\n self.nice_name = ''\n self.name_field = 'name'\n self.id_field = 'slug'\n self.fields = {}\n\n def get_url(self, *args):\n \"\"\"Create a URL from a tuple of strings based on the base url\"\"\"\n try:\n url = '/'.join((self.base_url, ) + args)\n except TypeError:\n url = '/'.join((self.base_url, ) + args[0])\n return url.rstrip('/')\n\n def get(self, url):\n \"\"\"Process a GET request to the app\"\"\"\n return self.app.get(get_url(url), follow_redirects=True)\n\n def post(self, url, data):\n \"\"\"Process a POST request to the app\"\"\"\n return self.app.post(get_url(url), data=data, follow_redirects=True)\n\n def verify_object(self, data):\n \"\"\"Verify the model object data\"\"\"\n rv = self.get(data[self.id_field])\n result = not is_404(rv)\n if result:\n for key, value in data:\n if not in_response(rv, value):\n return False\n return result\n\n def get_add_form(self):\n \"\"\"Test that the \"add\" form is accessible and contains all the\n fields\n \"\"\"\n rv = self.get(self.add_url)\n assert not is_404(rv)\n assert in_response(rv, 'Add {}'.format(self.nice_name))\n for field, name in self.fields:\n assert in_response(rv, name)\n return rv\n\n def get_edit_form(self, data):\n \"\"\"Test that the edit form is accessible and contains all the\n fields\n \"\"\"\n self.add_success(data)\n rv = self.get((data[self.id_field], self.edit_url))\n assert not is_404(rv)\n assert in_response(rv, 'Edit {}'.format(data[self.name_field]))\n for field, name in self.fields:\n assert in_response(rv, name)\n return rv\n\n def get_delete_confirmation_form(self, data):\n \"\"\"Test that the delete confirmation form is accessible\"\"\"\n self.add_success(data)\n rv = self.get((data[self.id_field], self.delete_url))\n assert not is_404(rv)\n assert in_response(rv, 'Delete {}'.format(data[self.name_field]))\n return rv\n\n def add_success(self, data):\n \"\"\"Test that adding a model with the given data succeeds\"\"\"\n rv = self.post(self.add_url, data)\n assert not in_response(rv, 'Add {}'.format(self.nice_name))\n assert self.verify_object(data)\n return rv\n\n def edit_success(self, id_, data):\n \"\"\"Test that updating a model with the given data succeeds\"\"\"\n rv = self.post((id_, self.edit_url), data)\n assert not in_response(rv, 'Edit {}'.format(data[self.name_field]))\n assert self.verify_object(data)\n return rv\n\n def update_success(self, data, new_data):\n \"\"\"Test that updating a model with the given data succeeds\"\"\"\n self.add_success(data)\n return self.edit_success(data[self.id_field], new_data)\n\n def delete_success(self, id_):\n \"\"\"Test that deleting the specified model succeeds\"\"\"\n rv = self.post((id_, self.delete_url), dict(post='yes'))\n assert not self.verify_object({self.id_field: id_})\n return rv\n\n def add_fail(self, data, message):\n \"\"\"Test that adding a model with the given data fails\"\"\"\n rv = self.post(self.add_url, data)\n assert in_response(rv, 'Add {}'.format(self.nice_name))\n assert in_response(rv, message)\n return rv\n\n def edit_fail(self, id_, data, message):\n \"\"\"Test that updating a model with the given data fails\"\"\"\n rv = self.post((id_, self.edit_url), data)\n assert in_response(rv, 'Edit {}'.format(data[self.name_field]))\n assert in_response(rv, message)\n return rv\n\n def update_fail(self, data, new_data, message):\n \"\"\"Test that updating a model with the given data fails\"\"\"\n self.add_success(data)\n return self.edit_fail(data[self.id_field], new_data, message)\n\n def delete_fail(self, id_, message):\n \"\"\"Test that deleting the specified model fails\"\"\"\n rv = self.post((id_, self.delete_url), dict(post='yes'))\n assert in_response(rv, message)\n assert self.verify_object({self.id_field: id_})\n return rv\n\n def bad_data_fail(self, good_data, bad_data, message):\n \"\"\"Test that adding and updating a model with the given data\n fails\n \"\"\"\n self.add_fail(bad_data, message)\n self.update_fail(good_data, bad_data, message)\n\n def add_required_field_fail(self, field, data):\n \"\"\"Test that adding a model with a blank or missing required\n field fails\n \"\"\"\n message = '{} is required'.format(self.fields[field])\n data = data.copy()\n\n data[field] = ''\n self.add_fail(data, message)\n assert not self.verify_object(data)\n\n del data[field]\n self.add_fail(data, message)\n assert not self.verify_object(data)\n\n def update_required_field_fail(self, field, data):\n \"\"\"Test that updating a model with a blank or missing required\n field fails\n \"\"\"\n message = '{} is required'.format(self.fields[field])\n data = data.copy()\n id_ = data[self.id_field]\n self.add_success(data)\n\n data[field] = ''\n self.edit_fail(id_, data, message)\n assert not self.verify_object(data)\n\n del data[field]\n self.edit_fail(id_, data, message)\n assert not self.verify_object(data)\n\n # Delete base model?\n\n def required_field_fail(self, field, data):\n \"\"\"Test that adding and updating a model with a blank or missing\n required field fails\n \"\"\"\n self.add_required_field_fail(field, data)\n self.update_required_field_fail(field, data)\n\n def add_existing_key_fail(self, data):\n \"\"\"Test that adding a model with an existing key fails\"\"\"\n message = 'exists'\n rv = self.add_success(data)\n assert not in_response(rv, message)\n return self.add_fail(data, message)\n\n def update_existing_key_fail(self, data, new_data):\n \"\"\"Test that adding a model with an existing key fails\"\"\"\n message = 'exists'\n rv = self.add_success(data)\n assert not in_response(rv, message)\n rv = self.add_success(new_data)\n assert not in_response(rv, message)\n rv = self.update_fail(data, message)\n assert self.verify_object(new_data)\n return rv\n\n def existing_key_fail(self, data, new_data):\n \"\"\"Test that adding and updating a model with an existing key\n fails\n \"\"\"\n message = 'exists'\n rv = self.add_success(data)\n assert not in_response(rv, message)\n self.add_fail(data, message)\n rv = self.add_success(new_data)\n assert not in_response(rv, message)\n self.update_fail(data, message)\n assert self.verify_object(new_data)\n\n def data_sorted(self, before_data, after_data, url):\n \"\"\"Test that the models will be sorted in the correct order\"\"\"\n self.add_success(after_data)\n self.add_success(before_data)\n\n rv = self.get(url)\n after_index = rv.data.index(after_data[self.name_field].encode())\n before_index = rv.data.index(before_data[self.name_field].encode())\n assert after_index > before_index\n\n def delete_does_not_exist_fail(self, id_):\n \"\"\"Test that deleting a model that does not exist fails\"\"\"\n assert is_404(self.get((id_, self.delete_url)))\n self.delete_fail(id_, 'does not exist')\n\n\nclass CategoryTestCase(ClosetTestBase, ModelBase):\n\n def __init__(self, *args, **kwargs):\n super(ModelBase, self).__init__(*args, **kwargs)\n self.base_url = '/categories'\n self.name = 'category'\n self.nice_name = 'Category'\n self.fields = {\n 'name': 'Name',\n 'parent': 'Parent'}\n self.test_data = {\n 'pants': {\n 'name': 'Pants',\n 'slug': 'pants'},\n 'shirts': {\n 'name': 'Shirts',\n 'slug': 'shirts'},\n 'jeans': {\n 'name': 'Jeans',\n 'slug': 'jeans',\n 'parent': 'pants'},\n 't-shirts': {\n 'name': 'T-shirts',\n 'slug': 't-shirts',\n 'parent': 'shirts'},\n 'hats': {\n 'name': 'Hats',\n 'slug': 'hats',\n 'parent': 'spam'},\n 'polo-shirts': {\n 'name': 'Polo Shirts',\n 'slug': 'polo-shirts'},\n 'symbols': {\n 'name': ':)',\n 'slug': ''},\n 'keyword': {\n 'name': 'Add',\n 'slug': 'add-1'}}\n\n def setUp(self):\n super(CategoryTestCase, self).setUp()\n self.authenticate()\n\n def test_get_category_forms(self):\n \"\"\"Test that the category forms are accessible\"\"\"\n self.get_add_form()\n self.get_edit_form(self.test_data['pants'])\n self.get_delete_confirmation_form(self.test_data['shirts'])\n\n def test_add_category(self):\n \"\"\"Test that adding a category works\"\"\"\n self.add_success(self.test_data['pants'])\n\n def test_update_category(self):\n \"\"\"Test that updating a category works\"\"\"\n self.update_success(self.test_data['pants'], self.test_data['shirts'])\n\n def test_delete_category(self):\n \"\"\"Test that deleting a category works\"\"\"\n self.add_success(self.test_data['pants'])\n self.delete_success('pants')\n\n def test_add_child_category(self):\n \"\"\"Test that adding a child category works\"\"\"\n self.add_success(self.test_data['pants'])\n rv = self.get('pants')\n assert in_response(rv, 'This category is empty.')\n self.add_success(self.test_data['jeans'])\n rv = self.get('pants')\n assert not in_response(rv, 'This category is empty.')\n assert in_response(rv, 'Jeans')\n\n def test_update_child_category(self):\n \"\"\"Test that updating child categories works\"\"\"\n self.add_success(self.test_data['pants'])\n self.add_success(self.test_data['shirts'])\n\n self.add_success(self.test_data['jeans'])\n rv = self.get('pants')\n assert not in_response(rv, 'This category is empty.')\n assert in_response(rv, 'Jeans')\n\n self.edit_success('jeans', self.test_data['t-shirts'])\n rv = self.get('pants')\n assert in_response(rv, 'This category is empty.')\n assert not in_response(rv, 'Jeans')\n assert not in_response(rv, 'T-Shirts')\n rv = self.get('shirts')\n assert not in_response(rv, 'This category is empty.')\n assert in_response(rv, 'T-Shirts')\n assert not in_response(rv, 'Jeans')\n\n def test_name_required(self):\n \"\"\"Test that adding/updating a category without a name fails\"\"\"\n self.required_field_fail('name', self.test_data['pants'])\n\n def test_parent_does_not_exist(self):\n \"\"\"Test that adding/updating a category with a non-existent\n parent fails\n \"\"\"\n self.bad_data_fail(self.test_data['pants'],\n self.test_data['hats'], 'Parent does not exist')\n\n def test_category_already_exists(self):\n self.existing_key_fail(\n self.test_data['pants'],\n self.test_data['shirts'])\n\n def test_categories_are_sorted(self):\n \"\"\"Test that categories are sorted alphabetically by name\"\"\"\n self.data_sorted(self.test_data['shirts'], self.test_data['pants'])\n\n def test_delete_category_does_not_exist(self):\n \"\"\"Test that deleting a category that doesn't exist fails\"\"\"\n self.delete_does_not_exist_fail('hats')\n\n def test_add_category_slug_special(self):\n \"\"\"Test that adding a category with an incorrect name fails\"\"\"\n self.add_success(self.test_data['polo-shirts'])\n assert self.verify_object(dict(name='Polo Shirts', slug='polo-shirts'))\n\n self.add_fail(self.test_data['symbols'], '')\n\n self.add_success('Add')\n\n def test_update_category_slug_special(self):\n \"\"\"Test that updating a category with an incorrect slug fails\"\"\"\n rv = self.app.post(self.get_category_url('add'), data=dict(\n name='Pants', slug='pants'\n ), follow_redirects=True)\n\n rv = self.app.post(self.get_category_url('pants', 'edit'), data=dict(\n name='Polo Shirts', slug='polo shirts'\n ), follow_redirects=True)\n assert b'Edit Pants' in rv.data\n assert b'Slug is formatted incorrectly' in rv.data\n\n rv = self.app.post(self.get_category_url('pants', 'edit'), data=dict(\n name=':)', slug=':)'\n ), follow_redirects=True)\n assert b'Edit Pants' in rv.data\n assert b'Slug is formatted incorrectly' in rv.data\n\n rv = self.app.post(self.get_category_url('pants', 'edit'), data=dict(\n name='Add', slug='add'\n ), follow_redirects=True)\n assert b'Edit Pants' in rv.data\n assert b'Slug \"add\" is not allowed' in rv.data\n\n\nif __name__ == '__main__':\n unittest.main()\n",
"step-ids": [
31,
43,
44,
49,
56
]
}
|
[
31,
43,
44,
49,
56
] |
import subprocess
import logging
import time
import argparse
import threading
import os
import matplotlib.pyplot as plt
import numpy as np
import argparse
def runWeka(wekapath, modelpath, datapath):
os.chdir(wekapath)
proc = subprocess.Popen(['/usr/bin/java', '-classpath', 'weka.jar', 'weka.classifiers.functions.MultilayerPerceptron', '-l', modelpath, '-T', datapath, '-p', '0'], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out, err = proc.communicate()
return out
"""
Test offline trained model in Weka on a test set
"""
if __name__ == '__main__':
#Input arguments
my_arg_parser = argparse.ArgumentParser()
my_arg_parser.add_argument("-p","--weka-path", help="Path to Weka application folder", dest="wekapath")
my_arg_parser.add_argument("-m","--weka-model", help="Path to Weka serialized model", dest="modelpath")
my_arg_parser.add_argument("-d","--weka-dataset", help="Path to testset", default="", dest="datapath")
my_args = my_arg_parser.parse_args()
#wekapath="/home/mkulin/Desktop/eWINE/Experiments/Repository/Testing/Weka_stable-3-6/weka/"
#modelpath="/home/mkulin/Desktop/eWINE/Experiments/Repository/Testing/Neural_network_MACperf_prediction.model"
#datapath="/home/mkulin/Desktop/eWINE/Experiments/Repository/Testing/802_15_4_perf_30s_testset_Weka.csv"
predictions=runWeka(my_args.wekapath, my_args.modelpath, my_args.datapath)
k=1
matrix = []
for row in predictions.split('\n'):
if k<6:
k=k+1
continue
else:
if row=='':
continue
instance, actual, predicted, error=row.split()
matrix.append([int(instance), float(actual), float(predicted)])
matrix=np.array(matrix)
matrix[:,2][matrix[:,2]<0]=0 #disable negative predictions
#Visualize results
plt.style.use('ggplot')
f=plt.figure(1)
plt.plot(matrix[:,0], matrix[:,1], label='actual', color='red')
plt.plot(matrix[:,0], matrix[:,2], label='predicted', color='royalblue')
plt.xlabel('Instance number')
plt.ylabel('Packet Loss Rate')
plt.grid(True)
plt.legend(loc=1)
plt.show()
|
normal
|
{
"blob_id": "a1f0eced5d122fe8557ebc4d707c87b4194513e3",
"index": 4976,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef runWeka(wekapath, modelpath, datapath):\n os.chdir(wekapath)\n proc = subprocess.Popen(['/usr/bin/java', '-classpath', 'weka.jar',\n 'weka.classifiers.functions.MultilayerPerceptron', '-l', modelpath,\n '-T', datapath, '-p', '0'], stdout=subprocess.PIPE, stderr=\n subprocess.PIPE)\n out, err = proc.communicate()\n return out\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef runWeka(wekapath, modelpath, datapath):\n os.chdir(wekapath)\n proc = subprocess.Popen(['/usr/bin/java', '-classpath', 'weka.jar',\n 'weka.classifiers.functions.MultilayerPerceptron', '-l', modelpath,\n '-T', datapath, '-p', '0'], stdout=subprocess.PIPE, stderr=\n subprocess.PIPE)\n out, err = proc.communicate()\n return out\n\n\n<mask token>\nif __name__ == '__main__':\n my_arg_parser = argparse.ArgumentParser()\n my_arg_parser.add_argument('-p', '--weka-path', help=\n 'Path to Weka application folder', dest='wekapath')\n my_arg_parser.add_argument('-m', '--weka-model', help=\n 'Path to Weka serialized model', dest='modelpath')\n my_arg_parser.add_argument('-d', '--weka-dataset', help=\n 'Path to testset', default='', dest='datapath')\n my_args = my_arg_parser.parse_args()\n predictions = runWeka(my_args.wekapath, my_args.modelpath, my_args.datapath\n )\n k = 1\n matrix = []\n for row in predictions.split('\\n'):\n if k < 6:\n k = k + 1\n continue\n else:\n if row == '':\n continue\n instance, actual, predicted, error = row.split()\n matrix.append([int(instance), float(actual), float(predicted)])\n matrix = np.array(matrix)\n matrix[:, 2][matrix[:, 2] < 0] = 0\n plt.style.use('ggplot')\n f = plt.figure(1)\n plt.plot(matrix[:, 0], matrix[:, 1], label='actual', color='red')\n plt.plot(matrix[:, 0], matrix[:, 2], label='predicted', color='royalblue')\n plt.xlabel('Instance number')\n plt.ylabel('Packet Loss Rate')\n plt.grid(True)\n plt.legend(loc=1)\n plt.show()\n",
"step-4": "import subprocess\nimport logging\nimport time\nimport argparse\nimport threading\nimport os\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport argparse\n\n\ndef runWeka(wekapath, modelpath, datapath):\n os.chdir(wekapath)\n proc = subprocess.Popen(['/usr/bin/java', '-classpath', 'weka.jar',\n 'weka.classifiers.functions.MultilayerPerceptron', '-l', modelpath,\n '-T', datapath, '-p', '0'], stdout=subprocess.PIPE, stderr=\n subprocess.PIPE)\n out, err = proc.communicate()\n return out\n\n\n<mask token>\nif __name__ == '__main__':\n my_arg_parser = argparse.ArgumentParser()\n my_arg_parser.add_argument('-p', '--weka-path', help=\n 'Path to Weka application folder', dest='wekapath')\n my_arg_parser.add_argument('-m', '--weka-model', help=\n 'Path to Weka serialized model', dest='modelpath')\n my_arg_parser.add_argument('-d', '--weka-dataset', help=\n 'Path to testset', default='', dest='datapath')\n my_args = my_arg_parser.parse_args()\n predictions = runWeka(my_args.wekapath, my_args.modelpath, my_args.datapath\n )\n k = 1\n matrix = []\n for row in predictions.split('\\n'):\n if k < 6:\n k = k + 1\n continue\n else:\n if row == '':\n continue\n instance, actual, predicted, error = row.split()\n matrix.append([int(instance), float(actual), float(predicted)])\n matrix = np.array(matrix)\n matrix[:, 2][matrix[:, 2] < 0] = 0\n plt.style.use('ggplot')\n f = plt.figure(1)\n plt.plot(matrix[:, 0], matrix[:, 1], label='actual', color='red')\n plt.plot(matrix[:, 0], matrix[:, 2], label='predicted', color='royalblue')\n plt.xlabel('Instance number')\n plt.ylabel('Packet Loss Rate')\n plt.grid(True)\n plt.legend(loc=1)\n plt.show()\n",
"step-5": "import subprocess\nimport logging\nimport time\nimport argparse\nimport threading\nimport os\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport argparse\n\ndef runWeka(wekapath, modelpath, datapath):\n os.chdir(wekapath)\n proc = subprocess.Popen(['/usr/bin/java', '-classpath', 'weka.jar', 'weka.classifiers.functions.MultilayerPerceptron', '-l', modelpath, '-T', datapath, '-p', '0'], stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n out, err = proc.communicate()\n return out\n\n\n\"\"\"\nTest offline trained model in Weka on a test set\n\"\"\"\nif __name__ == '__main__':\n \n #Input arguments\n my_arg_parser = argparse.ArgumentParser()\n my_arg_parser.add_argument(\"-p\",\"--weka-path\", help=\"Path to Weka application folder\", dest=\"wekapath\")\n my_arg_parser.add_argument(\"-m\",\"--weka-model\", help=\"Path to Weka serialized model\", dest=\"modelpath\")\n my_arg_parser.add_argument(\"-d\",\"--weka-dataset\", help=\"Path to testset\", default=\"\", dest=\"datapath\")\n\n my_args = my_arg_parser.parse_args()\n \n #wekapath=\"/home/mkulin/Desktop/eWINE/Experiments/Repository/Testing/Weka_stable-3-6/weka/\"\n #modelpath=\"/home/mkulin/Desktop/eWINE/Experiments/Repository/Testing/Neural_network_MACperf_prediction.model\"\n #datapath=\"/home/mkulin/Desktop/eWINE/Experiments/Repository/Testing/802_15_4_perf_30s_testset_Weka.csv\"\n \n predictions=runWeka(my_args.wekapath, my_args.modelpath, my_args.datapath)\n \n k=1\n matrix = []\n for row in predictions.split('\\n'):\n \n if k<6:\n k=k+1\n continue\n else:\n if row=='':\n continue\n instance, actual, predicted, error=row.split()\n matrix.append([int(instance), float(actual), float(predicted)])\n \n matrix=np.array(matrix) \n matrix[:,2][matrix[:,2]<0]=0 #disable negative predictions\n \n #Visualize results \n plt.style.use('ggplot')\n f=plt.figure(1)\n plt.plot(matrix[:,0], matrix[:,1], label='actual', color='red')\n plt.plot(matrix[:,0], matrix[:,2], label='predicted', color='royalblue')\n plt.xlabel('Instance number')\n plt.ylabel('Packet Loss Rate')\n plt.grid(True)\n plt.legend(loc=1)\n \n plt.show()\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
"""
爱丽丝和鲍勃有不同大小的糖果棒:A[i] 是爱丽丝拥有的第 i 根糖果棒的大小,B[j] 是鲍勃拥有的第 j 根糖果棒的大小。
因为他们是朋友,所以他们想交换一根糖果棒,这样交换后,他们都有相同的糖果总量。(一个人拥有的糖果总量是他们拥有的糖果棒大小的总和。)
返回一个整数数组 ans,其中 ans[0] 是爱丽丝必须交换的糖果棒的大小,ans[1] 是 Bob 必须交换的糖果棒的大小。
如果有多个答案,你可以返回其中任何一个。保证答案存在。
"""
def fairCandySwap(A, B):
sumA, sumB = sum(A), sum(B)
setA, setB = set(A), set(B)
delta = (sumA -sumB) // 2
for j in setB:
if j + delta in setA:
return (j+delta, j)
print(fairCandySwap(A = [1,1], B = [2,2]))
print(fairCandySwap(A = [1,2], B = [2,3]))
print(fairCandySwap(A = [2], B = [1,3]))
print(fairCandySwap(A = [1,2,5], B = [2,4]))
|
normal
|
{
"blob_id": "9abc5f18e2eb07afe6bc31d6bd27298350707d1d",
"index": 962,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef fairCandySwap(A, B):\n sumA, sumB = sum(A), sum(B)\n setA, setB = set(A), set(B)\n delta = (sumA - sumB) // 2\n for j in setB:\n if j + delta in setA:\n return j + delta, j\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef fairCandySwap(A, B):\n sumA, sumB = sum(A), sum(B)\n setA, setB = set(A), set(B)\n delta = (sumA - sumB) // 2\n for j in setB:\n if j + delta in setA:\n return j + delta, j\n\n\nprint(fairCandySwap(A=[1, 1], B=[2, 2]))\nprint(fairCandySwap(A=[1, 2], B=[2, 3]))\nprint(fairCandySwap(A=[2], B=[1, 3]))\nprint(fairCandySwap(A=[1, 2, 5], B=[2, 4]))\n",
"step-4": "\"\"\"\n爱丽丝和鲍勃有不同大小的糖果棒:A[i] 是爱丽丝拥有的第 i 根糖果棒的大小,B[j] 是鲍勃拥有的第 j 根糖果棒的大小。\n\n因为他们是朋友,所以他们想交换一根糖果棒,这样交换后,他们都有相同的糖果总量。(一个人拥有的糖果总量是他们拥有的糖果棒大小的总和。)\n\n返回一个整数数组 ans,其中 ans[0] 是爱丽丝必须交换的糖果棒的大小,ans[1] 是 Bob 必须交换的糖果棒的大小。\n\n如果有多个答案,你可以返回其中任何一个。保证答案存在。\n\"\"\"\n\ndef fairCandySwap(A, B):\n sumA, sumB = sum(A), sum(B)\n setA, setB = set(A), set(B)\n delta = (sumA -sumB) // 2\n for j in setB:\n if j + delta in setA:\n return (j+delta, j)\n\nprint(fairCandySwap(A = [1,1], B = [2,2]))\nprint(fairCandySwap(A = [1,2], B = [2,3]))\nprint(fairCandySwap(A = [2], B = [1,3]))\nprint(fairCandySwap(A = [1,2,5], B = [2,4]))\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
"""
Registers $v0 and $v1 are used to return values from functions.
Registers $t0 – $t9 are caller-saved registers that are used to
hold temporary quantities that need not be preserved across calls
Registers $s0 – $s7 (16–23) are callee-saved registers that hold long-lived
values that should be preserved across calls. They are preserved across calls
Register $gp is a global pointer that points to the middle of a 64K block
of memory in the static data segment. Preserve across calls
Register $fp is the frame pointer. Register $fp is saved by every procedure
that allocates a new stack frame.Preserve across calls
Register $sp is the stack pointer, which points to the last location on
the stack(Points to Free Memory). Preserve across calls
Register $ra only needs to be saved if the callee itself makes a call.
Register $s0 <- Prototypes table
Register $s1 <- Class Names table
Register $s2 <- Class parents table
0($fp): some local variable
4(%fp): old $ra
8(%fp): old $fp
12(%fp): 1st argument Self
.....
Class Name table layout
offset 0 - "Class1"
offset 4 - "Class2"
offset 8 - "Class3"
.....
Prototypes Table layout
offset 0 - protObj1
offset 4 - Obj1_init
offset 8 - protObj2
offset 12 - Obj2_init
.....
Dispatch Table layout:
offset 0 - addres of method m0
offset 1 - addres of method m1
.....
Prototype layout:
offset 0 - Class tag : int that identifies the class of the object
offset 4 - Object size :(in 32-bit words) = 12 + 4 * (number of attributes)
offset 8 - Dispatch pointer : pointer to the table of virtual methods
offset 12. . . Attributes
"""
import sys
sys.path.append('..')
import commons.cil_ast as cil
import commons.visitor as visitor
from commons.settings import *
class MipsVisitor:
"""
Mips Visitor Class.
This visitor will process the AST of the generated CIL and write the mips code to a file.
"""
def __init__(self, inherit_graph, output_file="mips_code.mips"):
self.inherit_graph, _ = inherit_graph
self.offset = dict()
self.type_index = []
self.dispatchtable_code = []
self.prototypes_code = []
self.cur_labels_id = 0
self.output_file = output_file
# ======================================================================
# =[ UTILS ]============================================================
# ======================================================================
def push(self):
self.write_file('sw $a0 0($sp)')
self.write_file('addiu $sp $sp -4')
def pop(self, dest=None):
self.write_file(f'addiu $sp $sp 4')
def write_file(self, msg, mode = "a", tabbed=True):
f = open(self.output_file, mode)
f.write("{}{}\n".format("\t" if tabbed else "", msg))
f.close()
def allocate_memory(self, size=None, register=False):
if register:
self.write_file('move $a0 {}'.format(size))
else:
if size:
self.write_file('li $a0 {}'.format(size))
self.write_file('li $v0 9')
self.write_file('syscall')
def new_labels_id(self):
self.cur_labels_id += 1
return self.cur_labels_id
# ======================================================================
@visitor.on('node')
def visit(self, node):
pass
################################ PROGRAM #####################################
@visitor.when(cil.Program)
def visit(self, node: cil.Program):
self.write_file('', "w")
#-------------------- DATA SECTION ----------------------------
self.write_file('.data', tabbed = False)
# Declare static data
self.static_datas()
# Transpile CIL data section
for data in node.data_section:
self.visit(data)
self.write_file('')
# Declare class name strings and map class index
for i in range(len(node.type_section)):
self.type_index.append(node.type_section[i].type_name)
self.write_file('classname_{}: .asciiz \"{}\"'.format(node.type_section[i].type_name,node.type_section[i].type_name))
# Declare void type
self.write_file(f'{VOID_MIPS_NAME}: .asciiz \"\"')
#-------------------- TEXT SECTION ----------------------------
self.write_file('\n.text')
self.entry()
self.write_file('\n########## STATIC FUNCTIONS ##########\n')
# CONFORMS
self.conforms()
# IS_VOID
self.isvoid()
# OBJECT
self.object_abort()
self.object_copy()
self.object_typename()
# STRING
self.string_length()
self.string_concat()
self.string_substr()
# IO
self.io_in_int()
self.io_in_string()
self.io_out_int()
self.io_out_string()
for t in node.type_section:
self.visit(t)
self.write_file('\n############## TABLES ################\n')
# Generate method that creates classes's name table
self.write_file('function_build_class_name_table:', tabbed=False)
self.allocate_memory(len(node.type_section) * 4)
self.write_file('move $s1 $v0') # save the address of the table in a register
for i in range(len(node.type_section)):
self.write_file('la $t1 classname_{}'.format(node.type_section[i].type_name))
self.write_file('sw $t1 {}($s1)'.format(4 * i))
self.write_file('')
# Generate method that allocates memory for prototypes table
self.write_file('function_allocate_prototypes_table:', tabbed=False)
self.allocate_memory(8 * len(self.type_index))
self.write_file('move $s0 $v0') # save the address of the table in a register
self.write_file('')
# Generate mips method that builds prototypes
self.write_file('function_build_prototypes:', tabbed=False)
for ins in self.prototypes_code:
self.write_file(ins)
self.write_file('')
# Generate mips method that builds dispatch tables
self.write_file('function_build_dispatch_tables:', tabbed=False)
for ins in self.dispatchtable_code:
self.write_file(ins)
self.write_file('')
# Generate method that builds class parents table
self.write_file('function_build_class_parents_table:', tabbed=False)
self.allocate_memory(4 * len(self.type_index))
self.write_file('move $s2 $v0') # save the address of the table in a register
self.write_file('')
# Fill table entry for each class type
for parent in self.inherit_graph.keys():
p_index = self.type_index.index(parent)
for child in self.inherit_graph[parent]:
ch_index = self.type_index.index(child.name)
self.write_file(f'li $t0 {ch_index}')
self.write_file(f'mul $t0 $t0 4')
self.write_file(f'add $t0 $t0 $s2')
self.write_file(f'li $t1 {p_index}')
self.write_file(f'sw $t1 0($t0)')
self.write_file('')
self.write_file('')
# Generate COOL functions
self.write_file('\n########### COOL FUNCTIONS ##########\n')
for func in node.code_section:
is_built_in = False
if not INIT_CIL_SUFFIX in func.name:
is_built_in = [x for x in BUILT_IN_CLASSES if f'{x}_' in func.name] != []
if not is_built_in:
self.visit(func)
self.write_file('\n#####################################\n')
################################ .DATA #######################################
@visitor.when(cil.Data)
def visit(self, node: cil.Data):
self.write_file(f'{node.dest}: .asciiz \"{str(node.value.encode())[2:-1]}\"')
################################ TYPES #######################################
@visitor.when(cil.Type)
def visit(self, node: cil.Type):
# Allocate
self.dispatchtable_code.append(f'# Type {node.type_name}')
self.dispatchtable_code.append('li $a0 {}'.format(4 * len(node.methods)))
self.dispatchtable_code.append('li $v0 9')
self.dispatchtable_code.append('syscall')
# Add dispatch table code
for i in range(len(node.methods)):
self.dispatchtable_code.append('la $t1 function_{}'.format(node.methods[i].function_name))
self.dispatchtable_code.append('sw $t1 {}($v0)'.format(4 * i))
self.dispatchtable_code.append('lw $t0 {}($s0)'.format(8 * self.type_index.index(node.type_name)))
self.dispatchtable_code.append('sw $v0 8($t0)')
self.dispatchtable_code.append('')
# Allocate
self.prototypes_code.append(f'# Type {node.type_name}')
self.prototypes_code.append('li $a0 {}'.format(12 + 4 * len(node.attributes)))
self.prototypes_code.append('li $v0 9')
self.prototypes_code.append('syscall')
# Add prototype code
class_index = self.type_index.index(node.type_name)
self.prototypes_code.append('li $a0 {}'.format(class_index))
self.prototypes_code.append('sw $a0 0($v0)')
self.prototypes_code.append('li $a0 {}'.format(12 + 4 * len(node.attributes)))
self.prototypes_code.append('sw $a0 4($v0)')
self.prototypes_code.append('sw $v0 {}($s0)'.format(8 * class_index))
self.prototypes_code.append('')
@visitor.when(cil.Function)
def visit(self, node: cil.Function):
self.write_file(f'function_{node.name}:', tabbed=False)
# Set up stack frame
self.write_file(f'move $fp, $sp')
self.write_file(f'subiu $sp, $sp, {4 * len(node.vlocals)}')
# Register arguments offsets
for i in range(len(node.args)):
self.offset[node.args[i].name] = 12 + i * 4
# Register locals offsets
for i in range(len(node.vlocals)):
self.offset[node.vlocals[i].name] = i * (-4)
# Generate mips code for the function's body
for inst in node.body:
# Equal node needs unique id for its labels
if isinstance(inst, cil.Equal) or isinstance(inst, cil.Div):
inst.id = self.new_labels_id()
self.visit(inst)
# Pop the stack frame
self.write_file(f'addiu $sp, $sp, {4 * len(node.vlocals)}')
# Return
self.write_file('jr $ra')
self.write_file('')
############################## ASSIGNMENT ####################################
@visitor.when(cil.Assign)
def visit(self, node: cil.Assign):
self.write_file('# ASSIGN')
self.write_file('lw $a0, {}($fp)'.format(self.offset[node.source]))
self.write_file('sw $a0, {}($fp)'.format(self.offset[node.dest]))
self.write_file('')
############################# ARITHMETICS ####################################
@visitor.when(cil.Plus)
def visit(self, node: cil.Plus):
self.write_file('# +')
self.write_file('lw $a0, {}($fp)'.format(self.offset[node.left]))
self.write_file('lw $a1, {}($fp)'.format(self.offset[node.right]))
self.write_file('add $a0, $a0, $a1')
self.write_file('sw $a0, {}($fp)'.format(self.offset[node.dest]))
self.write_file('')
@visitor.when(cil.Minus)
def visit(self, node: cil.Minus):
self.write_file('# -')
if isinstance(node.left, int):
self.write_file('li $a0 {}'.format(node.left))
else:
self.write_file('lw $a0, {}($fp)'.format(self.offset[node.left]))
self.write_file('lw $a1, {}($fp)'.format(self.offset[node.right]))
self.write_file('sub $a0, $a0, $a1')
self.write_file('sw $a0, {}($fp)'.format(self.offset[node.dest]))
self.write_file('')
@visitor.when(cil.Mult)
def visit(self, node: cil.Mult):
self.write_file('# *')
self.write_file('lw $a0, {}($fp)'.format(self.offset[node.left]))
self.write_file('lw $a1, {}($fp)'.format(self.offset[node.right]))
self.write_file('mul $a0, $a0, $a1')
self.write_file('sw $a0, {}($fp)'.format(self.offset[node.dest]))
self.write_file('')
@visitor.when(cil.Div)
def visit(self, node: cil.Div):
self.write_file('# /')
self.write_file('lw $a0, {}($fp)'.format(self.offset[node.left]))
self.write_file('lw $a1, {}($fp)'.format(self.offset[node.right]))
self.write_file(f'beqz $a1 _div_error_{node.id}_')
self.write_file('div $a0, $a0, $a1')
self.write_file('sw $a0, {}($fp)'.format(self.offset[node.dest]))
self.write_file(f'b _div_end_{node.id}_')
self.write_file(f'_div_error_{node.id}_:',tabbed=False)
self.write_file('la $a0 _div_zero_msg')
self.write_file('li $v0 4')
self.write_file('syscall')
self.write_file('la $a0 _abort_msg')
self.write_file('li $v0 4')
self.write_file('syscall')
self.write_file('li $v0 10')
self.write_file('syscall')
self.write_file(f'_div_end_{node.id}_:',tabbed=False)
############################# COMPARISONS ####################################
@visitor.when(cil.Equal)
def visit(self, node: cil.Equal):
self.write_file('lw $t0 {}($fp)'.format(self.offset[node.left]))
self.write_file('lw $t1 {}($fp)'.format(self.offset[node.right]))
self.write_file(f'beq $t0 $zero _eq_false_{node.id}_') # $t0 can't also be void
self.write_file(f'beq $t1 $zero _eq_false_{node.id}_') # $t1 can't also be void
self.write_file('lw $a0 0($t0)') # get object 1 tag
self.write_file('lw $a1 0($t1)') # get object 2 tag
self.write_file(f'bne $a0 $a1 _eq_false_{node.id}_') # compare tags
self.write_file('li $a2 {}'.format(self.type_index.index(INTEGER_CLASS))) # load int tag
self.write_file(f'beq $a0 $a2 _eq_int_bool_{node.id}') # Integers
self.write_file('li $a2 {}'.format(self.type_index.index(BOOLEAN_CLASS))) # load bool tag
self.write_file(f'beq $a0 $a2 _eq_int_bool_{node.id}') # Booleans
self.write_file('li $a2 {}'.format(self.type_index.index(STRING_CLASS))) # load string tag
self.write_file(f'bne $a0 $a2 _not_basic_type_{node.id}_') # Not a primitive type
# equal strings
# verify len of the strings
self.write_file(f'_eq_str_{node.id}_:', tabbed = False) # handle strings
self.write_file('lw $t3 12($t0)') # get string_1 size
self.write_file('lw $t3 12($t3)') # unbox string_1 size
self.write_file('lw $t4, 12($t1)') # get string_2 size
self.write_file('lw $t4, 12($t4)') # unbox string_2 size
self.write_file(f'bne $t3 $t4 _eq_false_{node.id}_') # string size are distinct
self.write_file(f'beq $t3 $0 _eq_true_{node.id}_') # if strings are empty
# Verify ascii secuences
self.write_file('addu $t0 $t0 16') # Point to start of string s1
self.write_file('lw $t0 0($t0)')
self.write_file('addu $t1 $t1 16') # Point to start of string s2
self.write_file('lw $t1 0($t1)')
self.write_file('move $t2 $t3') # Keep string length as counter
self.write_file(f'_verify_ascii_sequences_{node.id}_:', tabbed = False)
self.write_file('lb $a0 0($t0)') # get char of s1
self.write_file('lb $a1 0($t1)') # get char of s2
self.write_file(f'bne $a0 $a1 _eq_false_{node.id}_') # char s1 /= char s2
self.write_file('addu $t0 $t0 1')
self.write_file('addu $t1 $t1 1')
self.write_file('addiu $t2 $t2 -1') # Decrement counter
self.write_file(f'bnez $t2 _verify_ascii_sequences_{node.id}_')
self.write_file(f'b _eq_true_{node.id}_') # end of strings
self.write_file(f'_not_basic_type_{node.id}_:', tabbed = False)
self.write_file(f'bne $t0 $t1 _eq_false_{node.id}_')
self.write_file(f'b _eq_true_{node.id}_')
# equal int or boolf
self.write_file(f'_eq_int_bool_{node.id}:', tabbed = False) # handles booleans and ints
self.write_file('lw $a3 12($t0)') # load value variable_1
self.write_file('lw $t4 12($t1)') # load variable_2
self.write_file(f'bne $a3 $t4 _eq_false_{node.id}_') # value of int or bool are distinct
#return true
self.write_file(f'_eq_true_{node.id}_:', tabbed = False)
self.write_file('li $a0 1')
self.write_file('sw $a0 {}($fp)'.format(self.offset[node.dest]))
self.write_file(f'b end_equal_{node.id}_')
#return false
self.write_file(f'_eq_false_{node.id}_:', tabbed = False)
self.write_file('li $a0 0')
self.write_file('sw $a0 {}($fp)'.format(self.offset[node.dest]))
self.write_file(f'end_equal_{node.id}_:', tabbed = False)
@visitor.when(cil.LessThan)
def visit(self, node: cil.LessThan):
self.write_file('# <')
self.write_file('lw $a1, {}($fp)'.format(self.offset[node.left]))
self.write_file('lw $a2, {}($fp)'.format(self.offset[node.right]))
self.write_file('slt $a0, $a1, $a2'.format(self.offset[node.right]))
self.write_file('sw $a0, {}($fp)'.format(self.offset[node.dest]))
self.write_file('')
@visitor.when(cil.EqualOrLessThan)
def visit(self, node: cil.EqualOrLessThan):
self.write_file('# <=')
self.write_file('lw $a1, {}($fp)'.format(self.offset[node.left]))
self.write_file('lw $a2, {}($fp)'.format(self.offset[node.right]))
self.write_file('sle $a0, $a1, $a2'.format(self.offset[node.right]))
self.write_file('sw $a0, {}($fp)'.format(self.offset[node.dest]))
self.write_file('')
############################## ATTRIBUTES ####################################
@visitor.when(cil.GetAttrib)
def visit(self, node: cil.GetAttrib):
self.write_file('# GETATTR')
self.write_file(f'lw $a1 {self.offset[node.instance]}($fp)')
self.write_file(f'lw $a0 {12 + 4 * node.attribute}($a1)')
self.write_file(f'sw $a0 {self.offset[node.dest]}($fp)')
self.write_file('')
@visitor.when(cil.SetAttrib)
def visit(self, node: cil.SetAttrib):
self.write_file('# SETATTR')
self.write_file(f'lw $a1 {self.offset[node.instance]}($fp)')
if isinstance(node.src, int):
self.write_file(f'li $a0, {node.src}')
elif node.src[:5] == "data_":
self.write_file(f'la $a0, {node.src}')
else:
self.write_file(f'lw $a0 {self.offset[node.src]}($fp)')
self.write_file(f'sw $a0 {12 + 4 * node.attribute}($a1)')
self.write_file('')
################################ MEMORY ######################################
@visitor.when(cil.TypeOf)
def visit(self, node: cil.TypeOf):
self.write_file('# TYPEOF')
self.write_file(f'lw $a1 {self.offset[node.instance]}($fp)')
self.write_file(f'lw $a0 0($a1)')
self.write_file(f'sw $a0 {self.offset[node.dest]}($fp)')
self.write_file('')
@visitor.when(cil.Allocate)
def visit(self, node: cil.Allocate):
self.write_file('# ALLOCATE')
if node.ttype == VOID_TYPE:
self.write_file(f'la $v0 {VOID_MIPS_NAME}')
self.write_file(f'sw $v0 {self.offset[node.dest]}($fp)')
else:
offset_proto = self.type_index.index(node.ttype) * 8
self.write_file('lw $t0 {}($s0)'.format(offset_proto))
self.write_file('sw $t0, 0($sp)')
self.write_file('addiu $sp, $sp, -4')
self.write_file('')
self.visit(cil.Call(dest = node.dest, f = "Object_copy"))
self.write_file('addiu $sp, $sp, 4')
self.write_file('')
########################## DISPATCH STATEMENTS ###############################
@visitor.when(cil.Call)
def visit(self, node: cil.Call):
self.write_file('# CALL')
# Save return address and frame pointer
self.write_file(f'addiu $sp, $sp, -8')
self.write_file(f'sw $ra, 4($sp)')
self.write_file(f'sw $fp, 8($sp)')
# Call the function
self.write_file(f'jal function_{node.f}')
# Restore return address and frame pointer
self.write_file(f'lw $fp, 8($sp)')
self.write_file(f'lw $ra, 4($sp)')
self.write_file(f'addiu $sp, $sp, 8')
if node.dest:
self.write_file(f'sw $v0 {self.offset[node.dest]}($fp)')
self.write_file('')
@visitor.when(cil.VCall)
def visit(self, node: cil.VCall):
self.write_file('# VCALL')
# Save return address and frame pointer
self.write_file(f'addiu $sp, $sp, -8')
self.write_file(f'sw $ra, 4($sp)')
self.write_file(f'sw $fp, 8($sp)')
if node.ttype[0] == "_":
# If node.type is a local CIL variable
self.write_file(f'lw $a2, {self.offset[node.ttype]}($fp)')
else:
# If node.type a type name
self.write_file(f'li $a2, {self.type_index.index(node.ttype)}')
self.write_file(f'mulu $a2, $a2, 8')
self.write_file(f'addu $a2, $a2, $s0')
self.write_file(f'lw $a1, 0($a2)')
# Check the dispatch table for the method's address
self.write_file(f'lw $a2, 8($a1)')
self.write_file(f'lw $a0 {node.f * 4}($a2)')
# Call the function at 0($a0)
self.write_file(f'jalr $a0')
# Restore return address and frame pointer
self.write_file(f'lw $fp, 8($sp)')
self.write_file(f'lw $ra, 4($sp)')
self.write_file(f'addiu $sp, $sp, 8')
# Save value after restoring $fp
self.write_file(f'sw $v0 {self.offset[node.dest]}($fp)')
# Check prototypes table for the dynamic type
if node.ttype[0] != '_':
self.write_file(f'li $a2, {self.type_index.index(node.ttype)}')
else:
self.write_file(f'lw $a2, {self.offset[node.ttype]}($fp)')
self.write_file('')
@visitor.when(cil.PushParam)
def visit(self, node: cil.PushParam):
self.write_file('# PUSHPARAM')
if node.name[0] != "_":
self.write_file('li $a0, {}'.format(self.type_index.index(node.name)))
else:
self.write_file('lw $a0, {}($fp)'.format(self.offset[node.name]))
self.push()
self.write_file('')
@visitor.when(cil.PopParam)
def visit(self, node: cil.PopParam):
self.write_file('# POPPARAM')
self.pop(node.name)
self.write_file('')
@visitor.when(cil.Return)
def visit(self, node: cil.Return):
self.write_file('# RETURN')
self.write_file('lw $v0, {}($fp)'.format(self.offset[node.value]))
################################# JUMPS ######################################
@visitor.when(cil.Label)
def visit(self, node: cil.Label):
self.write_file('_cil_label_{}:'.format(node.name), tabbed=False)
@visitor.when(cil.Goto)
def visit(self, node: cil.Goto):
self.write_file('# GOTO')
self.write_file('j _cil_label_{}'.format(node.label))
self.write_file('')
@visitor.when(cil.IfGoto)
def visit(self, node: cil.IfGoto):
self.write_file('# IF GOTO')
self.write_file('lw $a0, {}($fp)'.format(self.offset[node.condition]))
self.write_file('bnez $a0, _cil_label_{}'.format(node.label))
self.write_file('')
############################## STATIC CODE ###################################
#----- STATIC DATAs
def static_datas(self):
# Buffer for reading strings
self.write_file('str_buffer: .space 1025')
self.write_file('')
# Declare error mensages
self.write_file('_index_negative_msg: .asciiz \"Index to substr is negative\\n\"')
self.write_file('_index_out_msg: .asciiz \"Index out range exception\\n\"')
self.write_file('_abort_msg: \"Execution aborted\\n\"')
self.write_file('_div_zero_msg: \"Division by zero exception\\n\"')
self.write_file('')
#----- ENTRY FUNCTION
def entry(self):
self.write_file('entry:', tabbed=False)
self.visit(cil.Call(dest = None, f = 'build_class_name_table'))
self.visit(cil.Call(dest = None, f = 'allocate_prototypes_table'))
self.visit(cil.Call(dest = None, f = 'build_prototypes'))
self.visit(cil.Call(dest = None, f = 'build_dispatch_tables'))
self.visit(cil.Call(dest = None, f = 'build_class_parents_table'))
self.visit(cil.Allocate(dest = None, ttype = 'Main'))
# Push main self
self.write_file('sw $v0 0($sp)')
self.write_file('addiu $sp $sp -4')
self.visit(cil.Call(dest = None, f = f'Main_{INIT_CIL_SUFFIX}'))
self.write_file('addiu $sp $sp 4')
# Push main self
self.write_file('sw $v0 0($sp)')
self.write_file('addiu $sp $sp -4')
self.visit(cil.Call(dest = None, f = 'Main_main'))
self.write_file('addiu $sp $sp 4')
self.write_file('li $v0 10')
self.write_file('syscall')
#----- OBJECT METHODS
def object_abort(self):
self.write_file('function_Object_abort:', tabbed=False)
# Set up stack frame
self.write_file(f'move $fp, $sp')
self.write_file('jr $ra')
self.write_file('')
def object_copy(self):
self.write_file('function_Object_copy:', tabbed=False)
# Set up stack frame
self.write_file(f'move $fp, $sp')
self.write_file('lw $t0 12($fp)')# recoger la instancia a copiar
self.write_file('lw $a0 4($t0)')
self.write_file('move $t4 $a0')
self.write_file('li $v0 9')
self.write_file('syscall')# guarda en v0 la direccion de memoria que se reservo
self.write_file('move $t2 $v0')# salvar la direccion donde comienza el objeto
self.write_file('li $t3 0') # size ya copiado
self.write_file('_objcopy_loop:', tabbed=False)
self.write_file('lw $t1 0($t0)') # cargar la palabra por la que voy
self.write_file('sw $t1 0($v0)') # copiar la palabra
self.write_file('addiu $t0 $t0 4') # posiciona el puntero en la proxima palabra a copiar
self.write_file('addiu $v0 $v0 4') # posiciona el puntero en la direccion donde copiar la proxima palabra
self.write_file('addiu $t3 $t3 4') # actualizar el size copiado
self.write_file('ble $t4 $t3 _objcopy_loop') # verificar si la condicion es igual o menor igual
self.write_file('_objcopy_div_end_:', tabbed=False)
self.write_file('move $v0 $t2') # dejar en v0 la direccion donde empieza el nuevo objeto
self.write_file('jr $ra')
self.write_file('')
def object_typename(self):
self.write_file('function_Object_type_name:', tabbed=False)
# Set up stack frame
self.write_file(f'move $fp, $sp')
# Box the string reference
self.visit(cil.Allocate(dest = None, ttype = STRING_CLASS)) # Create new String object
self.write_file('move $v1 $v0')
# Box string's length
self.visit(cil.Allocate(dest = None, ttype = INTEGER_CLASS) ) # Create new Int object
self.write_file('lw $a1 12($fp)') # self
self.write_file('lw $a1 0($a1)')
self.write_file('mulu $a1 $a1 4') # self's class tag
self.write_file('addu $a1 $a1 $s1') # class name table entry address
self.write_file('lw $a1 0($a1)') # Get class name address
self.write_file('move $a2 $0') # Compute string's length
self.write_file('move $t2 $a1')
self.write_file('_str_len_clsname_:', tabbed=False)
self.write_file('lb $a0 0($t2)')
self.write_file('beq $a0 $0 _end_clsname_len_')
self.write_file('addiu $a2 $a2 1')
self.write_file('addiu $t2 $t2 1')
self.write_file('j _str_len_clsname_')
self.write_file('_end_clsname_len_:', tabbed=False)
self.write_file('sw $a2, 12($v0)') # Store string's length
self.write_file('sw $v0, 12($v1)') # Fill String attributes
self.write_file('sw $a1, 16($v1)')
self.write_file('move $v0 $v1')
self.write_file('jr $ra')
self.write_file('')
#----- STRING METHODS
def string_length(self):
self.write_file('function_String_length:', tabbed=False)
# Set up stack frame
self.write_file(f'move $fp, $sp')
self.write_file('lw $a0 12($fp)') # Self
self.write_file('lw $v0 12($a0)')
self.write_file('jr $ra')
self.write_file('')
def string_concat(self):
self.write_file('function_String_concat:', tabbed=False)
# Set up stack frame
self.write_file(f'move $fp, $sp')
self.visit(cil.Allocate(dest = None, ttype = INTEGER_CLASS)) # Create new Int object
self.write_file('move $v1 $v0') # Save new Int Object
self.visit(cil.Allocate(dest = None, ttype = STRING_CLASS)) # Create new String object
self.write_file('move $t3 $v0') # Store new String object
self.write_file('lw $a1 12($fp)') # Self
self.write_file('lw $a2 16($fp)') # Boxed String to concat
self.write_file('lw $t1 12($a1)') # Self's length Int object
self.write_file('lw $t1 12($t1)') # Self's length
self.write_file('lw $t2 12($a2)') # strings to concat's length Int object
self.write_file('lw $t2 12($t2)') # strings to concat's length
self.write_file('addu $t0 $t2 $t1') # New string's length
self.write_file('sw $t0 12($v1)') # Store new string's length into box
self.write_file('lw $a1 16($a1)') # Unbox strings
self.write_file('lw $a2 16($a2)')
self.write_file('addiu $t0 $t0 1') # Add space for \0
self.allocate_memory('$t0', register=True) # Allocate memory for new string
self.write_file('move $t5 $v0') # Keep the string's reference in v0 and use t7
# a1: self's string a2: 2nd string t1: length self t2: 2nd string length
# v1: new string's int object
self.write_file('move $t4 $a1') # Index for iterating the self string
self.write_file('addu $a1 $a1 $t1') # self's copy limit
self.write_file('_strcat_copy_:', tabbed=False)
self.write_file('beq $t4 $a1 _end_strcat_copy_') # No more characters to copy
self.write_file('lb $a0 0($t4)') # Copy the character
self.write_file('sb $a0 0($t5)')
self.write_file('addiu $t5 $t5 1') # Advance indices
self.write_file('addiu $t4 $t4 1')
self.write_file('j _strcat_copy_')
self.write_file('_end_strcat_copy_:', tabbed=False)
# Copy 2nd string
self.write_file('move $t4 $a2') # Index for iterating the strings
self.write_file('addu $a2 $a2 $t2') # self's copy limit
self.write_file('_strcat_copy_snd_:', tabbed=False)
self.write_file('beq $t4 $a2 _end_strcat_copy_snd_') # No more characters to copy
self.write_file('lb $a0 0($t4)') # Copy the character
self.write_file('sb $a0 0($t5)')
self.write_file('addiu $t5 $t5 1') # Advance indices
self.write_file('addiu $t4 $t4 1')
self.write_file('j _strcat_copy_snd_')
self.write_file('_end_strcat_copy_snd_:', tabbed=False)
self.write_file('sb $0 0($t5)') # End string with \0
# $v0: reference to new string $v1: length int object
# $t3: new string object
# -> Create boxed string
self.write_file('sw $v1 12($t3)') # New length
self.write_file('sw $v0 16($t3)') # New string
self.write_file('move $v0 $t3') # Return new String object in $v0
self.write_file('jr $ra')
self.write_file('')
def string_substr(self):
self.write_file('function_String_substr:', tabbed=False)
# Set up stack frame
self.write_file(f'move $fp, $sp')
self.write_file(f'lw $t5 12($fp)') # self param
self.write_file(f'lw $a1 16($fp)') # reference of object int that represent i
self.write_file(f'lw $a1 12($a1)') # value of i
self.write_file(f'lw $a2 20($fp)') # reference of object int that represent j
self.write_file(f'lw $a2 12($a2)') # value of j that is length to copy
self.write_file(f'blt $a1 $0 _index_negative') # index i is negative
self.write_file(f'blt $a2 $0 _index_negative') # length j is negative
self.write_file(f'add $a2 $a1 $a2') # finish index
self.write_file(f'lw $a3 12($t5)')
self.write_file(f'lw $a3 12($a3)') # length of string
self.write_file(f'bgt $a2 $a3 _index_out') # j > lenght
# not errors
self.visit(cil.Allocate(dest = None, ttype = STRING_CLASS))
self.write_file(f'move $v1 $v0') # new string
self.visit(cil.Allocate(dest = None, ttype = INTEGER_CLASS))
self.write_file(f'move $t0 $v0') # lenght of string
self.write_file(f'move $t7 $a2')
self.write_file(f'subu $t7 $t7 $a1')
self.write_file(f'sw $t7 12($t0)') # save number that represent lenght of new string
self.allocate_memory('$a2', register=True) # $v0 -> address of the string
self.write_file(f'sw $t0 12($v1)') # store length
self.write_file(f'sw $v0 16($v1)') # store address of new string to String object
# generate substring
self.write_file('move $t1 $v0') # Index for iterating the new string
self.write_file('lw $t5 16($t5)') # Index for iterating the self string
self.write_file('move $t4 $t5')
self.write_file('addu $t4 $t4 $a1') # self's copy start
self.write_file('addu $t5 $t5 $a2') # self's copy limit
self.write_file('_substr_copy_:', tabbed=False)
self.write_file('bge $t4 $t5 _end_substr_copy_') # No more characters to copy
self.write_file('lb $a0 0($t4)') # Copy the character
self.write_file('sb $a0 0($t1)')
self.write_file('addiu $t1 $t1 1') # Advance indices
self.write_file('addiu $t4 $t4 1')
self.write_file('j _substr_copy_')
# errors sections
self.write_file(f'_index_negative:',tabbed=False)
self.write_file(f'la $a0 _index_negative_msg')
self.write_file(f'b _subst_abort')
self.write_file(f'_index_out:',tabbed=False)
self.write_file(f'la $a0 _index_out_msg')
self.write_file(f'b _subst_abort')
# abort execution
self.write_file(f'_subst_abort:',tabbed=False)
self.write_file(f'li $v0 4')
self.write_file(f'syscall')
self.write_file('la $a0 _abort_msg')
self.write_file(f'li $v0 4')
self.write_file(f'syscall')
self.write_file(f'li $v0 10')
self.write_file(f'syscall') # exit
# successful execution
self.write_file('_end_substr_copy_:', tabbed=False)
self.write_file('move $v0 $v1')
self.write_file('jr $ra')
self.write_file('')
#----- IO
def io_in_int(self):
self.write_file('function_IO_in_int:', tabbed=False)
# Set up stack frame
self.write_file(f'move $fp, $sp')
self.visit(cil.Allocate(dest = None, ttype = INTEGER_CLASS)) # Create new Int object
self.write_file('move $t0 $v0') # Save Int object
self.write_file('li $v0 5') # Read int
self.write_file('syscall')
self.write_file('sw $v0 12($t0)') # Store int
self.write_file('move $v0 $t0')
self.write_file('jr $ra')
self.write_file('')
def io_in_string(self):
self.write_file('function_IO_in_string:', tabbed=False)
# Set up stack frame
self.write_file(f'move $fp, $sp')
self.visit(cil.Allocate(dest = None, ttype = INTEGER_CLASS)) # Create new Int object for string's length
self.write_file('move $v1 $v0') # $v1: Int pbject
self.visit(cil.Allocate(dest = None, ttype = STRING_CLASS)) # Create new String object
self.write_file('sw $v1 12($v0)')
self.write_file('move $t5 $v0') # $t5: String object
# Read String and store in a temp buffer
self.write_file('la $a0 str_buffer')
self.write_file('li $a1 1025')
self.write_file('li $v0 8') # Read string
self.write_file('syscall')
# Compute string's length
self.write_file('move $a0 $0')
self.write_file('la $t2 str_buffer')
self.write_file('_in_string_str_len_:', tabbed=False)
self.write_file('lb $t0 0($t2)')
self.write_file('beq $t0 $0 _end_in_string_str_len_')
self.write_file('beq $t0 10 _end_in_string_str_len_')
self.write_file('addiu $a0 $a0 1')
self.write_file('addiu $t2 $t2 1')
self.write_file('j _in_string_str_len_')
self.write_file('_end_in_string_str_len_:', tabbed=False)
# Store string's length into Integer class
self.write_file('sw $a0 12($v1)')
# Allocate size in $a0 ... string's length
self.allocate_memory()
# $a0: string's length $v0: string's new address $t5: String object
# Copy string from buffer to new address
self.write_file('la $t4 str_buffer') # Index for iterating the string buffer
self.write_file('move $t1 $v0') # Index for iterating new string address
self.write_file('_in_str_copy_:', tabbed=False)
self.write_file('lb $t0 0($t4)') # Load a character
self.write_file('beq $t0 $0 _end_in_str_copy_') # No more characters to copy
self.write_file('beq $t0 10 _end_in_str_copy_') # No more characters to copy
self.write_file('sb $t0 0($t1)') # Copy the character
self.write_file('addiu $t4 $t4 1') # Advance indices
self.write_file('addiu $t1 $t1 1')
self.write_file('j _in_str_copy_')
self.write_file('_end_in_str_copy_:', tabbed=False)
# Store string
self.write_file('sw $v0 16($t5)')
# Clean string buffer
self.write_file('la $t4 str_buffer') # Index for iterating the string buffer
self.write_file('_in_str_clean_:', tabbed=False)
self.write_file('lb $t0 0($t4)') # Load a character
self.write_file('beq $t0 $0 _end_in_str_clean_') # No more characters to clean
self.write_file('sb $0 0($t4)') # Clean the character
self.write_file('addiu $t4 $t4 1') # Advance indices
self.write_file('j _in_str_clean_')
self.write_file('_end_in_str_clean_:', tabbed=False)
# Return new string in $v0
self.write_file('move $v0 $t5')
self.write_file('jr $ra')
self.write_file('')
def io_out_int(self):
self.write_file('function_IO_out_int:', tabbed=False)
# Set up stack frame
self.write_file(f'move $fp, $sp')
self.write_file('lw $a0 16($fp)') # Get Int object
self.write_file('lw $a0 12($a0)')
self.write_file('li $v0 1') # Print int
self.write_file('syscall')
self.write_file('lw $v0 12($fp)') # Return self
self.write_file('jr $ra')
self.write_file('')
def io_out_string(self):
self.write_file('function_IO_out_string:', tabbed=False)
# Set up stack frame
self.write_file(f'move $fp, $sp')
self.write_file('lw $a0 16($fp)') # Get String object
self.write_file('lw $a0 16($a0)')
self.write_file('li $v0 4') # Print string
self.write_file('syscall')
self.write_file('lw $v0 12($fp)') # Return self
self.write_file('jr $ra')
self.write_file('')
#------ CONFORMS
def conforms(self):
self.write_file(f'function_{CONFORMS_FUNC}:', tabbed=False)
# Set up stack frame
self.write_file(f'move $fp, $sp')
self.write_file(f'lw $t0 12($fp)') # First arg's class tag
self.write_file(f'lw $t1 16($fp)') # Second arg's class tag
# 2nd arg == Object -> return true
self.write_file(f'beq $t1 {self.type_index.index(OBJECT_CLASS)} _conforms_ret_true_')
self.write_file('_conforms_loop_:', tabbed=False)
# current == 2nd arg -> return true
self.write_file('beq $t0 $t1 _conforms_ret_true_')
# current == Object -> return false
self.write_file(f'beq $t0 {self.type_index.index(OBJECT_CLASS)} _conforms_ret_false_')
# Query parents's class tag from $s2 ... class parent table
self.write_file('mulu $t0 $t0 4')
self.write_file('addu $t0 $t0 $s2')
self.write_file('lw $t0 0($t0)') # current = current.parent
self.write_file('j _conforms_loop_')
self.write_file('_conforms_ret_true_:', tabbed=False)
self.write_file('li $v0 1')
self.write_file('j _conforms_ret_')
self.write_file('_conforms_ret_false_:', tabbed=False)
self.write_file('li $v0 0')
# No need to store result in a Bool class
self.write_file('_conforms_ret_:')
self.write_file('jr $ra')
self.write_file('')
#------ ISVOID
def isvoid(self):
self.write_file(f'function_{ISVOID_FUNC}:', tabbed=False)
# Set up stack frame
self.write_file(f'move $fp, $sp')
self.visit(cil.Allocate(dest = None, ttype = BOOLEAN_CLASS))
# $v0 contains new Bool object
self.write_file(f'lw $t0 12($fp)') # 1st arg is an object address
self.write_file(f'la $t1 {VOID_MIPS_NAME}')
self.write_file(f'beq $t0 $t1 _is_void_true_') # arg == void type
self.write_file(f'sw $0 12($v0)') # return False
self.write_file(f'j _is_void_end_')
self.write_file(f'_is_void_true_:', tabbed=False)
self.write_file(f'li $t0 1')
self.write_file(f'sw $t0 12($v0)') # return True
self.write_file(f'_is_void_end_:', tabbed=False)
# Return Bool object in $v0
self.write_file(f'jr $ra')
self.write_file(f'')
|
normal
|
{
"blob_id": "63bc191a81a200d3c257de429c082cc8d13c98f4",
"index": 9952,
"step-1": "<mask token>\n\n\nclass MipsVisitor:\n <mask token>\n\n def __init__(self, inherit_graph, output_file='mips_code.mips'):\n self.inherit_graph, _ = inherit_graph\n self.offset = dict()\n self.type_index = []\n self.dispatchtable_code = []\n self.prototypes_code = []\n self.cur_labels_id = 0\n self.output_file = output_file\n <mask token>\n <mask token>\n\n def write_file(self, msg, mode='a', tabbed=True):\n f = open(self.output_file, mode)\n f.write('{}{}\\n'.format('\\t' if tabbed else '', msg))\n f.close()\n\n def allocate_memory(self, size=None, register=False):\n if register:\n self.write_file('move $a0 {}'.format(size))\n elif size:\n self.write_file('li $a0 {}'.format(size))\n self.write_file('li $v0 9')\n self.write_file('syscall')\n <mask token>\n <mask token>\n\n @visitor.when(cil.Program)\n def visit(self, node: cil.Program):\n self.write_file('', 'w')\n self.write_file('.data', tabbed=False)\n self.static_datas()\n for data in node.data_section:\n self.visit(data)\n self.write_file('')\n for i in range(len(node.type_section)):\n self.type_index.append(node.type_section[i].type_name)\n self.write_file('classname_{}: .asciiz \"{}\"'.format(node.\n type_section[i].type_name, node.type_section[i].type_name))\n self.write_file(f'{VOID_MIPS_NAME}: .asciiz \"\"')\n self.write_file('\\n.text')\n self.entry()\n self.write_file('\\n########## STATIC FUNCTIONS ##########\\n')\n self.conforms()\n self.isvoid()\n self.object_abort()\n self.object_copy()\n self.object_typename()\n self.string_length()\n self.string_concat()\n self.string_substr()\n self.io_in_int()\n self.io_in_string()\n self.io_out_int()\n self.io_out_string()\n for t in node.type_section:\n self.visit(t)\n self.write_file('\\n############## TABLES ################\\n')\n self.write_file('function_build_class_name_table:', tabbed=False)\n self.allocate_memory(len(node.type_section) * 4)\n self.write_file('move $s1 $v0')\n for i in range(len(node.type_section)):\n self.write_file('la $t1 classname_{}'.format(node.type_section[\n i].type_name))\n self.write_file('sw $t1 {}($s1)'.format(4 * i))\n self.write_file('')\n self.write_file('function_allocate_prototypes_table:', tabbed=False)\n self.allocate_memory(8 * len(self.type_index))\n self.write_file('move $s0 $v0')\n self.write_file('')\n self.write_file('function_build_prototypes:', tabbed=False)\n for ins in self.prototypes_code:\n self.write_file(ins)\n self.write_file('')\n self.write_file('function_build_dispatch_tables:', tabbed=False)\n for ins in self.dispatchtable_code:\n self.write_file(ins)\n self.write_file('')\n self.write_file('function_build_class_parents_table:', tabbed=False)\n self.allocate_memory(4 * len(self.type_index))\n self.write_file('move $s2 $v0')\n self.write_file('')\n for parent in self.inherit_graph.keys():\n p_index = self.type_index.index(parent)\n for child in self.inherit_graph[parent]:\n ch_index = self.type_index.index(child.name)\n self.write_file(f'li $t0 {ch_index}')\n self.write_file(f'mul $t0 $t0 4')\n self.write_file(f'add $t0 $t0 $s2')\n self.write_file(f'li $t1 {p_index}')\n self.write_file(f'sw $t1 0($t0)')\n self.write_file('')\n self.write_file('')\n self.write_file('\\n########### COOL FUNCTIONS ##########\\n')\n for func in node.code_section:\n is_built_in = False\n if not INIT_CIL_SUFFIX in func.name:\n is_built_in = [x for x in BUILT_IN_CLASSES if f'{x}_' in\n func.name] != []\n if not is_built_in:\n self.visit(func)\n self.write_file('\\n#####################################\\n')\n <mask token>\n\n @visitor.when(cil.Type)\n def visit(self, node: cil.Type):\n self.dispatchtable_code.append(f'# Type {node.type_name}')\n self.dispatchtable_code.append('li $a0 {}'.format(4 * len(node.\n methods)))\n self.dispatchtable_code.append('li $v0 9')\n self.dispatchtable_code.append('syscall')\n for i in range(len(node.methods)):\n self.dispatchtable_code.append('la $t1 function_{}'.format(node\n .methods[i].function_name))\n self.dispatchtable_code.append('sw $t1 {}($v0)'.format(4 * i))\n self.dispatchtable_code.append('lw $t0 {}($s0)'.format(8 * self.\n type_index.index(node.type_name)))\n self.dispatchtable_code.append('sw $v0 8($t0)')\n self.dispatchtable_code.append('')\n self.prototypes_code.append(f'# Type {node.type_name}')\n self.prototypes_code.append('li $a0 {}'.format(12 + 4 * len(node.\n attributes)))\n self.prototypes_code.append('li $v0 9')\n self.prototypes_code.append('syscall')\n class_index = self.type_index.index(node.type_name)\n self.prototypes_code.append('li $a0 {}'.format(class_index))\n self.prototypes_code.append('sw $a0 0($v0)')\n self.prototypes_code.append('li $a0 {}'.format(12 + 4 * len(node.\n attributes)))\n self.prototypes_code.append('sw $a0 4($v0)')\n self.prototypes_code.append('sw $v0 {}($s0)'.format(8 * class_index))\n self.prototypes_code.append('')\n <mask token>\n\n @visitor.when(cil.Assign)\n def visit(self, node: cil.Assign):\n self.write_file('# ASSIGN')\n self.write_file('lw $a0, {}($fp)'.format(self.offset[node.source]))\n self.write_file('sw $a0, {}($fp)'.format(self.offset[node.dest]))\n self.write_file('')\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n @visitor.when(cil.Equal)\n def visit(self, node: cil.Equal):\n self.write_file('lw $t0 {}($fp)'.format(self.offset[node.left]))\n self.write_file('lw $t1 {}($fp)'.format(self.offset[node.right]))\n self.write_file(f'beq $t0 $zero _eq_false_{node.id}_')\n self.write_file(f'beq $t1 $zero _eq_false_{node.id}_')\n self.write_file('lw $a0 0($t0)')\n self.write_file('lw $a1 0($t1)')\n self.write_file(f'bne $a0 $a1 _eq_false_{node.id}_')\n self.write_file('li $a2 {}'.format(self.type_index.index(\n INTEGER_CLASS)))\n self.write_file(f'beq $a0 $a2 _eq_int_bool_{node.id}')\n self.write_file('li $a2 {}'.format(self.type_index.index(\n BOOLEAN_CLASS)))\n self.write_file(f'beq $a0 $a2 _eq_int_bool_{node.id}')\n self.write_file('li $a2 {}'.format(self.type_index.index(STRING_CLASS))\n )\n self.write_file(f'bne $a0 $a2 _not_basic_type_{node.id}_')\n self.write_file(f'_eq_str_{node.id}_:', tabbed=False)\n self.write_file('lw\\t$t3 12($t0)')\n self.write_file('lw\\t$t3 12($t3)')\n self.write_file('lw\\t$t4, 12($t1)')\n self.write_file('lw\\t$t4, 12($t4)')\n self.write_file(f'bne $t3 $t4 _eq_false_{node.id}_')\n self.write_file(f'beq $t3 $0 _eq_true_{node.id}_')\n self.write_file('addu $t0 $t0 16')\n self.write_file('lw $t0 0($t0)')\n self.write_file('addu $t1 $t1 16')\n self.write_file('lw $t1 0($t1)')\n self.write_file('move $t2 $t3')\n self.write_file(f'_verify_ascii_sequences_{node.id}_:', tabbed=False)\n self.write_file('lb $a0 0($t0)')\n self.write_file('lb $a1 0($t1)')\n self.write_file(f'bne $a0 $a1 _eq_false_{node.id}_')\n self.write_file('addu $t0 $t0 1')\n self.write_file('addu $t1 $t1 1')\n self.write_file('addiu $t2 $t2 -1')\n self.write_file(f'bnez $t2 _verify_ascii_sequences_{node.id}_')\n self.write_file(f'b _eq_true_{node.id}_')\n self.write_file(f'_not_basic_type_{node.id}_:', tabbed=False)\n self.write_file(f'bne $t0 $t1 _eq_false_{node.id}_')\n self.write_file(f'b _eq_true_{node.id}_')\n self.write_file(f'_eq_int_bool_{node.id}:', tabbed=False)\n self.write_file('lw $a3 12($t0)')\n self.write_file('lw $t4 12($t1)')\n self.write_file(f'bne $a3 $t4 _eq_false_{node.id}_')\n self.write_file(f'_eq_true_{node.id}_:', tabbed=False)\n self.write_file('li $a0 1')\n self.write_file('sw $a0 {}($fp)'.format(self.offset[node.dest]))\n self.write_file(f'b end_equal_{node.id}_')\n self.write_file(f'_eq_false_{node.id}_:', tabbed=False)\n self.write_file('li $a0 0')\n self.write_file('sw $a0 {}($fp)'.format(self.offset[node.dest]))\n self.write_file(f'end_equal_{node.id}_:', tabbed=False)\n <mask token>\n\n @visitor.when(cil.EqualOrLessThan)\n def visit(self, node: cil.EqualOrLessThan):\n self.write_file('# <=')\n self.write_file('lw $a1, {}($fp)'.format(self.offset[node.left]))\n self.write_file('lw $a2, {}($fp)'.format(self.offset[node.right]))\n self.write_file('sle $a0, $a1, $a2'.format(self.offset[node.right]))\n self.write_file('sw $a0, {}($fp)'.format(self.offset[node.dest]))\n self.write_file('')\n\n @visitor.when(cil.GetAttrib)\n def visit(self, node: cil.GetAttrib):\n self.write_file('# GETATTR')\n self.write_file(f'lw $a1 {self.offset[node.instance]}($fp)')\n self.write_file(f'lw $a0 {12 + 4 * node.attribute}($a1)')\n self.write_file(f'sw $a0 {self.offset[node.dest]}($fp)')\n self.write_file('')\n\n @visitor.when(cil.SetAttrib)\n def visit(self, node: cil.SetAttrib):\n self.write_file('# SETATTR')\n self.write_file(f'lw $a1 {self.offset[node.instance]}($fp)')\n if isinstance(node.src, int):\n self.write_file(f'li $a0, {node.src}')\n elif node.src[:5] == 'data_':\n self.write_file(f'la $a0, {node.src}')\n else:\n self.write_file(f'lw $a0 {self.offset[node.src]}($fp)')\n self.write_file(f'sw $a0 {12 + 4 * node.attribute}($a1)')\n self.write_file('')\n\n @visitor.when(cil.TypeOf)\n def visit(self, node: cil.TypeOf):\n self.write_file('# TYPEOF')\n self.write_file(f'lw $a1 {self.offset[node.instance]}($fp)')\n self.write_file(f'lw $a0 0($a1)')\n self.write_file(f'sw $a0 {self.offset[node.dest]}($fp)')\n self.write_file('')\n <mask token>\n\n @visitor.when(cil.Call)\n def visit(self, node: cil.Call):\n self.write_file('# CALL')\n self.write_file(f'addiu $sp, $sp, -8')\n self.write_file(f'sw $ra, 4($sp)')\n self.write_file(f'sw $fp, 8($sp)')\n self.write_file(f'jal function_{node.f}')\n self.write_file(f'lw $fp, 8($sp)')\n self.write_file(f'lw $ra, 4($sp)')\n self.write_file(f'addiu $sp, $sp, 8')\n if node.dest:\n self.write_file(f'sw $v0 {self.offset[node.dest]}($fp)')\n self.write_file('')\n\n @visitor.when(cil.VCall)\n def visit(self, node: cil.VCall):\n self.write_file('# VCALL')\n self.write_file(f'addiu $sp, $sp, -8')\n self.write_file(f'sw $ra, 4($sp)')\n self.write_file(f'sw $fp, 8($sp)')\n if node.ttype[0] == '_':\n self.write_file(f'lw $a2, {self.offset[node.ttype]}($fp)')\n else:\n self.write_file(f'li $a2, {self.type_index.index(node.ttype)}')\n self.write_file(f'mulu $a2, $a2, 8')\n self.write_file(f'addu $a2, $a2, $s0')\n self.write_file(f'lw $a1, 0($a2)')\n self.write_file(f'lw $a2, 8($a1)')\n self.write_file(f'lw $a0 {node.f * 4}($a2)')\n self.write_file(f'jalr $a0')\n self.write_file(f'lw $fp, 8($sp)')\n self.write_file(f'lw $ra, 4($sp)')\n self.write_file(f'addiu $sp, $sp, 8')\n self.write_file(f'sw $v0 {self.offset[node.dest]}($fp)')\n if node.ttype[0] != '_':\n self.write_file(f'li $a2, {self.type_index.index(node.ttype)}')\n else:\n self.write_file(f'lw $a2, {self.offset[node.ttype]}($fp)')\n self.write_file('')\n\n @visitor.when(cil.PushParam)\n def visit(self, node: cil.PushParam):\n self.write_file('# PUSHPARAM')\n if node.name[0] != '_':\n self.write_file('li $a0, {}'.format(self.type_index.index(node.\n name)))\n else:\n self.write_file('lw $a0, {}($fp)'.format(self.offset[node.name]))\n self.push()\n self.write_file('')\n\n @visitor.when(cil.PopParam)\n def visit(self, node: cil.PopParam):\n self.write_file('# POPPARAM')\n self.pop(node.name)\n self.write_file('')\n\n @visitor.when(cil.Return)\n def visit(self, node: cil.Return):\n self.write_file('# RETURN')\n self.write_file('lw $v0, {}($fp)'.format(self.offset[node.value]))\n\n @visitor.when(cil.Label)\n def visit(self, node: cil.Label):\n self.write_file('_cil_label_{}:'.format(node.name), tabbed=False)\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def object_copy(self):\n self.write_file('function_Object_copy:', tabbed=False)\n self.write_file(f'move $fp, $sp')\n self.write_file('lw $t0 12($fp)')\n self.write_file('lw $a0 4($t0)')\n self.write_file('move $t4 $a0')\n self.write_file('li $v0 9')\n self.write_file('syscall')\n self.write_file('move $t2 $v0')\n self.write_file('li $t3 0')\n self.write_file('_objcopy_loop:', tabbed=False)\n self.write_file('lw $t1 0($t0)')\n self.write_file('sw $t1 0($v0)')\n self.write_file('addiu $t0 $t0 4')\n self.write_file('addiu $v0 $v0 4')\n self.write_file('addiu $t3 $t3 4')\n self.write_file('ble $t4 $t3 _objcopy_loop')\n self.write_file('_objcopy_div_end_:', tabbed=False)\n self.write_file('move $v0 $t2')\n self.write_file('jr $ra')\n self.write_file('')\n\n def object_typename(self):\n self.write_file('function_Object_type_name:', tabbed=False)\n self.write_file(f'move $fp, $sp')\n self.visit(cil.Allocate(dest=None, ttype=STRING_CLASS))\n self.write_file('move $v1 $v0')\n self.visit(cil.Allocate(dest=None, ttype=INTEGER_CLASS))\n self.write_file('lw $a1 12($fp)')\n self.write_file('lw $a1 0($a1)')\n self.write_file('mulu $a1 $a1 4')\n self.write_file('addu $a1 $a1 $s1')\n self.write_file('lw $a1 0($a1)')\n self.write_file('move $a2 $0')\n self.write_file('move $t2 $a1')\n self.write_file('_str_len_clsname_:', tabbed=False)\n self.write_file('lb $a0 0($t2)')\n self.write_file('beq $a0 $0 _end_clsname_len_')\n self.write_file('addiu $a2 $a2 1')\n self.write_file('addiu $t2 $t2 1')\n self.write_file('j _str_len_clsname_')\n self.write_file('_end_clsname_len_:', tabbed=False)\n self.write_file('sw $a2, 12($v0)')\n self.write_file('sw $v0, 12($v1)')\n self.write_file('sw $a1, 16($v1)')\n self.write_file('move $v0 $v1')\n self.write_file('jr $ra')\n self.write_file('')\n\n def string_length(self):\n self.write_file('function_String_length:', tabbed=False)\n self.write_file(f'move $fp, $sp')\n self.write_file('lw $a0 12($fp)')\n self.write_file('lw $v0 12($a0)')\n self.write_file('jr $ra')\n self.write_file('')\n\n def string_concat(self):\n self.write_file('function_String_concat:', tabbed=False)\n self.write_file(f'move $fp, $sp')\n self.visit(cil.Allocate(dest=None, ttype=INTEGER_CLASS))\n self.write_file('move $v1 $v0')\n self.visit(cil.Allocate(dest=None, ttype=STRING_CLASS))\n self.write_file('move $t3 $v0')\n self.write_file('lw $a1 12($fp)')\n self.write_file('lw $a2 16($fp)')\n self.write_file('lw $t1 12($a1)')\n self.write_file('lw $t1 12($t1)')\n self.write_file('lw $t2 12($a2)')\n self.write_file('lw $t2 12($t2)')\n self.write_file('addu $t0 $t2 $t1')\n self.write_file('sw $t0 12($v1)')\n self.write_file('lw $a1 16($a1)')\n self.write_file('lw $a2 16($a2)')\n self.write_file('addiu $t0 $t0 1')\n self.allocate_memory('$t0', register=True)\n self.write_file('move $t5 $v0')\n self.write_file('move $t4 $a1')\n self.write_file('addu $a1 $a1 $t1')\n self.write_file('_strcat_copy_:', tabbed=False)\n self.write_file('beq $t4 $a1 _end_strcat_copy_')\n self.write_file('lb $a0 0($t4)')\n self.write_file('sb $a0 0($t5)')\n self.write_file('addiu $t5 $t5 1')\n self.write_file('addiu $t4 $t4 1')\n self.write_file('j _strcat_copy_')\n self.write_file('_end_strcat_copy_:', tabbed=False)\n self.write_file('move $t4 $a2')\n self.write_file('addu $a2 $a2 $t2')\n self.write_file('_strcat_copy_snd_:', tabbed=False)\n self.write_file('beq $t4 $a2 _end_strcat_copy_snd_')\n self.write_file('lb $a0 0($t4)')\n self.write_file('sb $a0 0($t5)')\n self.write_file('addiu $t5 $t5 1')\n self.write_file('addiu $t4 $t4 1')\n self.write_file('j _strcat_copy_snd_')\n self.write_file('_end_strcat_copy_snd_:', tabbed=False)\n self.write_file('sb $0 0($t5)')\n self.write_file('sw $v1 12($t3)')\n self.write_file('sw $v0 16($t3)')\n self.write_file('move $v0 $t3')\n self.write_file('jr $ra')\n self.write_file('')\n\n def string_substr(self):\n self.write_file('function_String_substr:', tabbed=False)\n self.write_file(f'move $fp, $sp')\n self.write_file(f'lw $t5 12($fp)')\n self.write_file(f'lw $a1 16($fp)')\n self.write_file(f'lw $a1 12($a1)')\n self.write_file(f'lw $a2 20($fp)')\n self.write_file(f'lw $a2 12($a2)')\n self.write_file(f'blt $a1 $0 _index_negative')\n self.write_file(f'blt $a2 $0 _index_negative')\n self.write_file(f'add $a2 $a1 $a2')\n self.write_file(f'lw $a3 12($t5)')\n self.write_file(f'lw $a3 12($a3)')\n self.write_file(f'bgt $a2 $a3 _index_out')\n self.visit(cil.Allocate(dest=None, ttype=STRING_CLASS))\n self.write_file(f'move $v1 $v0')\n self.visit(cil.Allocate(dest=None, ttype=INTEGER_CLASS))\n self.write_file(f'move $t0 $v0')\n self.write_file(f'move $t7 $a2')\n self.write_file(f'subu $t7 $t7 $a1')\n self.write_file(f'sw $t7 12($t0)')\n self.allocate_memory('$a2', register=True)\n self.write_file(f'sw $t0 12($v1)')\n self.write_file(f'sw $v0 16($v1)')\n self.write_file('move $t1 $v0')\n self.write_file('lw $t5 16($t5)')\n self.write_file('move $t4 $t5')\n self.write_file('addu $t4 $t4 $a1')\n self.write_file('addu $t5 $t5 $a2')\n self.write_file('_substr_copy_:', tabbed=False)\n self.write_file('bge $t4 $t5 _end_substr_copy_')\n self.write_file('lb $a0 0($t4)')\n self.write_file('sb $a0 0($t1)')\n self.write_file('addiu $t1 $t1 1')\n self.write_file('addiu $t4 $t4 1')\n self.write_file('j _substr_copy_')\n self.write_file(f'_index_negative:', tabbed=False)\n self.write_file(f'la $a0 _index_negative_msg')\n self.write_file(f'b _subst_abort')\n self.write_file(f'_index_out:', tabbed=False)\n self.write_file(f'la $a0 _index_out_msg')\n self.write_file(f'b _subst_abort')\n self.write_file(f'_subst_abort:', tabbed=False)\n self.write_file(f'li $v0 4')\n self.write_file(f'syscall')\n self.write_file('la\\t$a0 _abort_msg')\n self.write_file(f'li $v0 4')\n self.write_file(f'syscall')\n self.write_file(f'li $v0 10')\n self.write_file(f'syscall')\n self.write_file('_end_substr_copy_:', tabbed=False)\n self.write_file('move $v0 $v1')\n self.write_file('jr $ra')\n self.write_file('')\n <mask token>\n <mask token>\n\n def io_out_int(self):\n self.write_file('function_IO_out_int:', tabbed=False)\n self.write_file(f'move $fp, $sp')\n self.write_file('lw $a0 16($fp)')\n self.write_file('lw $a0 12($a0)')\n self.write_file('li $v0 1')\n self.write_file('syscall')\n self.write_file('lw $v0 12($fp)')\n self.write_file('jr $ra')\n self.write_file('')\n <mask token>\n <mask token>\n\n def isvoid(self):\n self.write_file(f'function_{ISVOID_FUNC}:', tabbed=False)\n self.write_file(f'move $fp, $sp')\n self.visit(cil.Allocate(dest=None, ttype=BOOLEAN_CLASS))\n self.write_file(f'lw $t0 12($fp)')\n self.write_file(f'la $t1 {VOID_MIPS_NAME}')\n self.write_file(f'beq $t0 $t1 _is_void_true_')\n self.write_file(f'sw $0 12($v0)')\n self.write_file(f'j _is_void_end_')\n self.write_file(f'_is_void_true_:', tabbed=False)\n self.write_file(f'li $t0 1')\n self.write_file(f'sw $t0 12($v0)')\n self.write_file(f'_is_void_end_:', tabbed=False)\n self.write_file(f'jr $ra')\n self.write_file(f'')\n",
"step-2": "<mask token>\n\n\nclass MipsVisitor:\n <mask token>\n\n def __init__(self, inherit_graph, output_file='mips_code.mips'):\n self.inherit_graph, _ = inherit_graph\n self.offset = dict()\n self.type_index = []\n self.dispatchtable_code = []\n self.prototypes_code = []\n self.cur_labels_id = 0\n self.output_file = output_file\n <mask token>\n <mask token>\n\n def write_file(self, msg, mode='a', tabbed=True):\n f = open(self.output_file, mode)\n f.write('{}{}\\n'.format('\\t' if tabbed else '', msg))\n f.close()\n\n def allocate_memory(self, size=None, register=False):\n if register:\n self.write_file('move $a0 {}'.format(size))\n elif size:\n self.write_file('li $a0 {}'.format(size))\n self.write_file('li $v0 9')\n self.write_file('syscall')\n <mask token>\n <mask token>\n\n @visitor.when(cil.Program)\n def visit(self, node: cil.Program):\n self.write_file('', 'w')\n self.write_file('.data', tabbed=False)\n self.static_datas()\n for data in node.data_section:\n self.visit(data)\n self.write_file('')\n for i in range(len(node.type_section)):\n self.type_index.append(node.type_section[i].type_name)\n self.write_file('classname_{}: .asciiz \"{}\"'.format(node.\n type_section[i].type_name, node.type_section[i].type_name))\n self.write_file(f'{VOID_MIPS_NAME}: .asciiz \"\"')\n self.write_file('\\n.text')\n self.entry()\n self.write_file('\\n########## STATIC FUNCTIONS ##########\\n')\n self.conforms()\n self.isvoid()\n self.object_abort()\n self.object_copy()\n self.object_typename()\n self.string_length()\n self.string_concat()\n self.string_substr()\n self.io_in_int()\n self.io_in_string()\n self.io_out_int()\n self.io_out_string()\n for t in node.type_section:\n self.visit(t)\n self.write_file('\\n############## TABLES ################\\n')\n self.write_file('function_build_class_name_table:', tabbed=False)\n self.allocate_memory(len(node.type_section) * 4)\n self.write_file('move $s1 $v0')\n for i in range(len(node.type_section)):\n self.write_file('la $t1 classname_{}'.format(node.type_section[\n i].type_name))\n self.write_file('sw $t1 {}($s1)'.format(4 * i))\n self.write_file('')\n self.write_file('function_allocate_prototypes_table:', tabbed=False)\n self.allocate_memory(8 * len(self.type_index))\n self.write_file('move $s0 $v0')\n self.write_file('')\n self.write_file('function_build_prototypes:', tabbed=False)\n for ins in self.prototypes_code:\n self.write_file(ins)\n self.write_file('')\n self.write_file('function_build_dispatch_tables:', tabbed=False)\n for ins in self.dispatchtable_code:\n self.write_file(ins)\n self.write_file('')\n self.write_file('function_build_class_parents_table:', tabbed=False)\n self.allocate_memory(4 * len(self.type_index))\n self.write_file('move $s2 $v0')\n self.write_file('')\n for parent in self.inherit_graph.keys():\n p_index = self.type_index.index(parent)\n for child in self.inherit_graph[parent]:\n ch_index = self.type_index.index(child.name)\n self.write_file(f'li $t0 {ch_index}')\n self.write_file(f'mul $t0 $t0 4')\n self.write_file(f'add $t0 $t0 $s2')\n self.write_file(f'li $t1 {p_index}')\n self.write_file(f'sw $t1 0($t0)')\n self.write_file('')\n self.write_file('')\n self.write_file('\\n########### COOL FUNCTIONS ##########\\n')\n for func in node.code_section:\n is_built_in = False\n if not INIT_CIL_SUFFIX in func.name:\n is_built_in = [x for x in BUILT_IN_CLASSES if f'{x}_' in\n func.name] != []\n if not is_built_in:\n self.visit(func)\n self.write_file('\\n#####################################\\n')\n <mask token>\n\n @visitor.when(cil.Type)\n def visit(self, node: cil.Type):\n self.dispatchtable_code.append(f'# Type {node.type_name}')\n self.dispatchtable_code.append('li $a0 {}'.format(4 * len(node.\n methods)))\n self.dispatchtable_code.append('li $v0 9')\n self.dispatchtable_code.append('syscall')\n for i in range(len(node.methods)):\n self.dispatchtable_code.append('la $t1 function_{}'.format(node\n .methods[i].function_name))\n self.dispatchtable_code.append('sw $t1 {}($v0)'.format(4 * i))\n self.dispatchtable_code.append('lw $t0 {}($s0)'.format(8 * self.\n type_index.index(node.type_name)))\n self.dispatchtable_code.append('sw $v0 8($t0)')\n self.dispatchtable_code.append('')\n self.prototypes_code.append(f'# Type {node.type_name}')\n self.prototypes_code.append('li $a0 {}'.format(12 + 4 * len(node.\n attributes)))\n self.prototypes_code.append('li $v0 9')\n self.prototypes_code.append('syscall')\n class_index = self.type_index.index(node.type_name)\n self.prototypes_code.append('li $a0 {}'.format(class_index))\n self.prototypes_code.append('sw $a0 0($v0)')\n self.prototypes_code.append('li $a0 {}'.format(12 + 4 * len(node.\n attributes)))\n self.prototypes_code.append('sw $a0 4($v0)')\n self.prototypes_code.append('sw $v0 {}($s0)'.format(8 * class_index))\n self.prototypes_code.append('')\n\n @visitor.when(cil.Function)\n def visit(self, node: cil.Function):\n self.write_file(f'function_{node.name}:', tabbed=False)\n self.write_file(f'move $fp, $sp')\n self.write_file(f'subiu $sp, $sp, {4 * len(node.vlocals)}')\n for i in range(len(node.args)):\n self.offset[node.args[i].name] = 12 + i * 4\n for i in range(len(node.vlocals)):\n self.offset[node.vlocals[i].name] = i * -4\n for inst in node.body:\n if isinstance(inst, cil.Equal) or isinstance(inst, cil.Div):\n inst.id = self.new_labels_id()\n self.visit(inst)\n self.write_file(f'addiu $sp, $sp, {4 * len(node.vlocals)}')\n self.write_file('jr $ra')\n self.write_file('')\n\n @visitor.when(cil.Assign)\n def visit(self, node: cil.Assign):\n self.write_file('# ASSIGN')\n self.write_file('lw $a0, {}($fp)'.format(self.offset[node.source]))\n self.write_file('sw $a0, {}($fp)'.format(self.offset[node.dest]))\n self.write_file('')\n <mask token>\n <mask token>\n\n @visitor.when(cil.Mult)\n def visit(self, node: cil.Mult):\n self.write_file('# *')\n self.write_file('lw $a0, {}($fp)'.format(self.offset[node.left]))\n self.write_file('lw $a1, {}($fp)'.format(self.offset[node.right]))\n self.write_file('mul $a0, $a0, $a1')\n self.write_file('sw $a0, {}($fp)'.format(self.offset[node.dest]))\n self.write_file('')\n <mask token>\n\n @visitor.when(cil.Equal)\n def visit(self, node: cil.Equal):\n self.write_file('lw $t0 {}($fp)'.format(self.offset[node.left]))\n self.write_file('lw $t1 {}($fp)'.format(self.offset[node.right]))\n self.write_file(f'beq $t0 $zero _eq_false_{node.id}_')\n self.write_file(f'beq $t1 $zero _eq_false_{node.id}_')\n self.write_file('lw $a0 0($t0)')\n self.write_file('lw $a1 0($t1)')\n self.write_file(f'bne $a0 $a1 _eq_false_{node.id}_')\n self.write_file('li $a2 {}'.format(self.type_index.index(\n INTEGER_CLASS)))\n self.write_file(f'beq $a0 $a2 _eq_int_bool_{node.id}')\n self.write_file('li $a2 {}'.format(self.type_index.index(\n BOOLEAN_CLASS)))\n self.write_file(f'beq $a0 $a2 _eq_int_bool_{node.id}')\n self.write_file('li $a2 {}'.format(self.type_index.index(STRING_CLASS))\n )\n self.write_file(f'bne $a0 $a2 _not_basic_type_{node.id}_')\n self.write_file(f'_eq_str_{node.id}_:', tabbed=False)\n self.write_file('lw\\t$t3 12($t0)')\n self.write_file('lw\\t$t3 12($t3)')\n self.write_file('lw\\t$t4, 12($t1)')\n self.write_file('lw\\t$t4, 12($t4)')\n self.write_file(f'bne $t3 $t4 _eq_false_{node.id}_')\n self.write_file(f'beq $t3 $0 _eq_true_{node.id}_')\n self.write_file('addu $t0 $t0 16')\n self.write_file('lw $t0 0($t0)')\n self.write_file('addu $t1 $t1 16')\n self.write_file('lw $t1 0($t1)')\n self.write_file('move $t2 $t3')\n self.write_file(f'_verify_ascii_sequences_{node.id}_:', tabbed=False)\n self.write_file('lb $a0 0($t0)')\n self.write_file('lb $a1 0($t1)')\n self.write_file(f'bne $a0 $a1 _eq_false_{node.id}_')\n self.write_file('addu $t0 $t0 1')\n self.write_file('addu $t1 $t1 1')\n self.write_file('addiu $t2 $t2 -1')\n self.write_file(f'bnez $t2 _verify_ascii_sequences_{node.id}_')\n self.write_file(f'b _eq_true_{node.id}_')\n self.write_file(f'_not_basic_type_{node.id}_:', tabbed=False)\n self.write_file(f'bne $t0 $t1 _eq_false_{node.id}_')\n self.write_file(f'b _eq_true_{node.id}_')\n self.write_file(f'_eq_int_bool_{node.id}:', tabbed=False)\n self.write_file('lw $a3 12($t0)')\n self.write_file('lw $t4 12($t1)')\n self.write_file(f'bne $a3 $t4 _eq_false_{node.id}_')\n self.write_file(f'_eq_true_{node.id}_:', tabbed=False)\n self.write_file('li $a0 1')\n self.write_file('sw $a0 {}($fp)'.format(self.offset[node.dest]))\n self.write_file(f'b end_equal_{node.id}_')\n self.write_file(f'_eq_false_{node.id}_:', tabbed=False)\n self.write_file('li $a0 0')\n self.write_file('sw $a0 {}($fp)'.format(self.offset[node.dest]))\n self.write_file(f'end_equal_{node.id}_:', tabbed=False)\n <mask token>\n\n @visitor.when(cil.EqualOrLessThan)\n def visit(self, node: cil.EqualOrLessThan):\n self.write_file('# <=')\n self.write_file('lw $a1, {}($fp)'.format(self.offset[node.left]))\n self.write_file('lw $a2, {}($fp)'.format(self.offset[node.right]))\n self.write_file('sle $a0, $a1, $a2'.format(self.offset[node.right]))\n self.write_file('sw $a0, {}($fp)'.format(self.offset[node.dest]))\n self.write_file('')\n\n @visitor.when(cil.GetAttrib)\n def visit(self, node: cil.GetAttrib):\n self.write_file('# GETATTR')\n self.write_file(f'lw $a1 {self.offset[node.instance]}($fp)')\n self.write_file(f'lw $a0 {12 + 4 * node.attribute}($a1)')\n self.write_file(f'sw $a0 {self.offset[node.dest]}($fp)')\n self.write_file('')\n\n @visitor.when(cil.SetAttrib)\n def visit(self, node: cil.SetAttrib):\n self.write_file('# SETATTR')\n self.write_file(f'lw $a1 {self.offset[node.instance]}($fp)')\n if isinstance(node.src, int):\n self.write_file(f'li $a0, {node.src}')\n elif node.src[:5] == 'data_':\n self.write_file(f'la $a0, {node.src}')\n else:\n self.write_file(f'lw $a0 {self.offset[node.src]}($fp)')\n self.write_file(f'sw $a0 {12 + 4 * node.attribute}($a1)')\n self.write_file('')\n\n @visitor.when(cil.TypeOf)\n def visit(self, node: cil.TypeOf):\n self.write_file('# TYPEOF')\n self.write_file(f'lw $a1 {self.offset[node.instance]}($fp)')\n self.write_file(f'lw $a0 0($a1)')\n self.write_file(f'sw $a0 {self.offset[node.dest]}($fp)')\n self.write_file('')\n\n @visitor.when(cil.Allocate)\n def visit(self, node: cil.Allocate):\n self.write_file('# ALLOCATE')\n if node.ttype == VOID_TYPE:\n self.write_file(f'la $v0 {VOID_MIPS_NAME}')\n self.write_file(f'sw $v0 {self.offset[node.dest]}($fp)')\n else:\n offset_proto = self.type_index.index(node.ttype) * 8\n self.write_file('lw $t0 {}($s0)'.format(offset_proto))\n self.write_file('sw $t0, 0($sp)')\n self.write_file('addiu $sp, $sp, -4')\n self.write_file('')\n self.visit(cil.Call(dest=node.dest, f='Object_copy'))\n self.write_file('addiu $sp, $sp, 4')\n self.write_file('')\n\n @visitor.when(cil.Call)\n def visit(self, node: cil.Call):\n self.write_file('# CALL')\n self.write_file(f'addiu $sp, $sp, -8')\n self.write_file(f'sw $ra, 4($sp)')\n self.write_file(f'sw $fp, 8($sp)')\n self.write_file(f'jal function_{node.f}')\n self.write_file(f'lw $fp, 8($sp)')\n self.write_file(f'lw $ra, 4($sp)')\n self.write_file(f'addiu $sp, $sp, 8')\n if node.dest:\n self.write_file(f'sw $v0 {self.offset[node.dest]}($fp)')\n self.write_file('')\n\n @visitor.when(cil.VCall)\n def visit(self, node: cil.VCall):\n self.write_file('# VCALL')\n self.write_file(f'addiu $sp, $sp, -8')\n self.write_file(f'sw $ra, 4($sp)')\n self.write_file(f'sw $fp, 8($sp)')\n if node.ttype[0] == '_':\n self.write_file(f'lw $a2, {self.offset[node.ttype]}($fp)')\n else:\n self.write_file(f'li $a2, {self.type_index.index(node.ttype)}')\n self.write_file(f'mulu $a2, $a2, 8')\n self.write_file(f'addu $a2, $a2, $s0')\n self.write_file(f'lw $a1, 0($a2)')\n self.write_file(f'lw $a2, 8($a1)')\n self.write_file(f'lw $a0 {node.f * 4}($a2)')\n self.write_file(f'jalr $a0')\n self.write_file(f'lw $fp, 8($sp)')\n self.write_file(f'lw $ra, 4($sp)')\n self.write_file(f'addiu $sp, $sp, 8')\n self.write_file(f'sw $v0 {self.offset[node.dest]}($fp)')\n if node.ttype[0] != '_':\n self.write_file(f'li $a2, {self.type_index.index(node.ttype)}')\n else:\n self.write_file(f'lw $a2, {self.offset[node.ttype]}($fp)')\n self.write_file('')\n\n @visitor.when(cil.PushParam)\n def visit(self, node: cil.PushParam):\n self.write_file('# PUSHPARAM')\n if node.name[0] != '_':\n self.write_file('li $a0, {}'.format(self.type_index.index(node.\n name)))\n else:\n self.write_file('lw $a0, {}($fp)'.format(self.offset[node.name]))\n self.push()\n self.write_file('')\n\n @visitor.when(cil.PopParam)\n def visit(self, node: cil.PopParam):\n self.write_file('# POPPARAM')\n self.pop(node.name)\n self.write_file('')\n\n @visitor.when(cil.Return)\n def visit(self, node: cil.Return):\n self.write_file('# RETURN')\n self.write_file('lw $v0, {}($fp)'.format(self.offset[node.value]))\n\n @visitor.when(cil.Label)\n def visit(self, node: cil.Label):\n self.write_file('_cil_label_{}:'.format(node.name), tabbed=False)\n\n @visitor.when(cil.Goto)\n def visit(self, node: cil.Goto):\n self.write_file('# GOTO')\n self.write_file('j _cil_label_{}'.format(node.label))\n self.write_file('')\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def object_copy(self):\n self.write_file('function_Object_copy:', tabbed=False)\n self.write_file(f'move $fp, $sp')\n self.write_file('lw $t0 12($fp)')\n self.write_file('lw $a0 4($t0)')\n self.write_file('move $t4 $a0')\n self.write_file('li $v0 9')\n self.write_file('syscall')\n self.write_file('move $t2 $v0')\n self.write_file('li $t3 0')\n self.write_file('_objcopy_loop:', tabbed=False)\n self.write_file('lw $t1 0($t0)')\n self.write_file('sw $t1 0($v0)')\n self.write_file('addiu $t0 $t0 4')\n self.write_file('addiu $v0 $v0 4')\n self.write_file('addiu $t3 $t3 4')\n self.write_file('ble $t4 $t3 _objcopy_loop')\n self.write_file('_objcopy_div_end_:', tabbed=False)\n self.write_file('move $v0 $t2')\n self.write_file('jr $ra')\n self.write_file('')\n\n def object_typename(self):\n self.write_file('function_Object_type_name:', tabbed=False)\n self.write_file(f'move $fp, $sp')\n self.visit(cil.Allocate(dest=None, ttype=STRING_CLASS))\n self.write_file('move $v1 $v0')\n self.visit(cil.Allocate(dest=None, ttype=INTEGER_CLASS))\n self.write_file('lw $a1 12($fp)')\n self.write_file('lw $a1 0($a1)')\n self.write_file('mulu $a1 $a1 4')\n self.write_file('addu $a1 $a1 $s1')\n self.write_file('lw $a1 0($a1)')\n self.write_file('move $a2 $0')\n self.write_file('move $t2 $a1')\n self.write_file('_str_len_clsname_:', tabbed=False)\n self.write_file('lb $a0 0($t2)')\n self.write_file('beq $a0 $0 _end_clsname_len_')\n self.write_file('addiu $a2 $a2 1')\n self.write_file('addiu $t2 $t2 1')\n self.write_file('j _str_len_clsname_')\n self.write_file('_end_clsname_len_:', tabbed=False)\n self.write_file('sw $a2, 12($v0)')\n self.write_file('sw $v0, 12($v1)')\n self.write_file('sw $a1, 16($v1)')\n self.write_file('move $v0 $v1')\n self.write_file('jr $ra')\n self.write_file('')\n\n def string_length(self):\n self.write_file('function_String_length:', tabbed=False)\n self.write_file(f'move $fp, $sp')\n self.write_file('lw $a0 12($fp)')\n self.write_file('lw $v0 12($a0)')\n self.write_file('jr $ra')\n self.write_file('')\n\n def string_concat(self):\n self.write_file('function_String_concat:', tabbed=False)\n self.write_file(f'move $fp, $sp')\n self.visit(cil.Allocate(dest=None, ttype=INTEGER_CLASS))\n self.write_file('move $v1 $v0')\n self.visit(cil.Allocate(dest=None, ttype=STRING_CLASS))\n self.write_file('move $t3 $v0')\n self.write_file('lw $a1 12($fp)')\n self.write_file('lw $a2 16($fp)')\n self.write_file('lw $t1 12($a1)')\n self.write_file('lw $t1 12($t1)')\n self.write_file('lw $t2 12($a2)')\n self.write_file('lw $t2 12($t2)')\n self.write_file('addu $t0 $t2 $t1')\n self.write_file('sw $t0 12($v1)')\n self.write_file('lw $a1 16($a1)')\n self.write_file('lw $a2 16($a2)')\n self.write_file('addiu $t0 $t0 1')\n self.allocate_memory('$t0', register=True)\n self.write_file('move $t5 $v0')\n self.write_file('move $t4 $a1')\n self.write_file('addu $a1 $a1 $t1')\n self.write_file('_strcat_copy_:', tabbed=False)\n self.write_file('beq $t4 $a1 _end_strcat_copy_')\n self.write_file('lb $a0 0($t4)')\n self.write_file('sb $a0 0($t5)')\n self.write_file('addiu $t5 $t5 1')\n self.write_file('addiu $t4 $t4 1')\n self.write_file('j _strcat_copy_')\n self.write_file('_end_strcat_copy_:', tabbed=False)\n self.write_file('move $t4 $a2')\n self.write_file('addu $a2 $a2 $t2')\n self.write_file('_strcat_copy_snd_:', tabbed=False)\n self.write_file('beq $t4 $a2 _end_strcat_copy_snd_')\n self.write_file('lb $a0 0($t4)')\n self.write_file('sb $a0 0($t5)')\n self.write_file('addiu $t5 $t5 1')\n self.write_file('addiu $t4 $t4 1')\n self.write_file('j _strcat_copy_snd_')\n self.write_file('_end_strcat_copy_snd_:', tabbed=False)\n self.write_file('sb $0 0($t5)')\n self.write_file('sw $v1 12($t3)')\n self.write_file('sw $v0 16($t3)')\n self.write_file('move $v0 $t3')\n self.write_file('jr $ra')\n self.write_file('')\n\n def string_substr(self):\n self.write_file('function_String_substr:', tabbed=False)\n self.write_file(f'move $fp, $sp')\n self.write_file(f'lw $t5 12($fp)')\n self.write_file(f'lw $a1 16($fp)')\n self.write_file(f'lw $a1 12($a1)')\n self.write_file(f'lw $a2 20($fp)')\n self.write_file(f'lw $a2 12($a2)')\n self.write_file(f'blt $a1 $0 _index_negative')\n self.write_file(f'blt $a2 $0 _index_negative')\n self.write_file(f'add $a2 $a1 $a2')\n self.write_file(f'lw $a3 12($t5)')\n self.write_file(f'lw $a3 12($a3)')\n self.write_file(f'bgt $a2 $a3 _index_out')\n self.visit(cil.Allocate(dest=None, ttype=STRING_CLASS))\n self.write_file(f'move $v1 $v0')\n self.visit(cil.Allocate(dest=None, ttype=INTEGER_CLASS))\n self.write_file(f'move $t0 $v0')\n self.write_file(f'move $t7 $a2')\n self.write_file(f'subu $t7 $t7 $a1')\n self.write_file(f'sw $t7 12($t0)')\n self.allocate_memory('$a2', register=True)\n self.write_file(f'sw $t0 12($v1)')\n self.write_file(f'sw $v0 16($v1)')\n self.write_file('move $t1 $v0')\n self.write_file('lw $t5 16($t5)')\n self.write_file('move $t4 $t5')\n self.write_file('addu $t4 $t4 $a1')\n self.write_file('addu $t5 $t5 $a2')\n self.write_file('_substr_copy_:', tabbed=False)\n self.write_file('bge $t4 $t5 _end_substr_copy_')\n self.write_file('lb $a0 0($t4)')\n self.write_file('sb $a0 0($t1)')\n self.write_file('addiu $t1 $t1 1')\n self.write_file('addiu $t4 $t4 1')\n self.write_file('j _substr_copy_')\n self.write_file(f'_index_negative:', tabbed=False)\n self.write_file(f'la $a0 _index_negative_msg')\n self.write_file(f'b _subst_abort')\n self.write_file(f'_index_out:', tabbed=False)\n self.write_file(f'la $a0 _index_out_msg')\n self.write_file(f'b _subst_abort')\n self.write_file(f'_subst_abort:', tabbed=False)\n self.write_file(f'li $v0 4')\n self.write_file(f'syscall')\n self.write_file('la\\t$a0 _abort_msg')\n self.write_file(f'li $v0 4')\n self.write_file(f'syscall')\n self.write_file(f'li $v0 10')\n self.write_file(f'syscall')\n self.write_file('_end_substr_copy_:', tabbed=False)\n self.write_file('move $v0 $v1')\n self.write_file('jr $ra')\n self.write_file('')\n <mask token>\n\n def io_in_string(self):\n self.write_file('function_IO_in_string:', tabbed=False)\n self.write_file(f'move $fp, $sp')\n self.visit(cil.Allocate(dest=None, ttype=INTEGER_CLASS))\n self.write_file('move $v1 $v0')\n self.visit(cil.Allocate(dest=None, ttype=STRING_CLASS))\n self.write_file('sw $v1 12($v0)')\n self.write_file('move $t5 $v0')\n self.write_file('la $a0 str_buffer')\n self.write_file('li $a1 1025')\n self.write_file('li $v0 8')\n self.write_file('syscall')\n self.write_file('move $a0 $0')\n self.write_file('la $t2 str_buffer')\n self.write_file('_in_string_str_len_:', tabbed=False)\n self.write_file('lb $t0 0($t2)')\n self.write_file('beq $t0 $0 _end_in_string_str_len_')\n self.write_file('beq $t0 10 _end_in_string_str_len_')\n self.write_file('addiu $a0 $a0 1')\n self.write_file('addiu $t2 $t2 1')\n self.write_file('j _in_string_str_len_')\n self.write_file('_end_in_string_str_len_:', tabbed=False)\n self.write_file('sw $a0 12($v1)')\n self.allocate_memory()\n self.write_file('la $t4 str_buffer')\n self.write_file('move $t1 $v0')\n self.write_file('_in_str_copy_:', tabbed=False)\n self.write_file('lb $t0 0($t4)')\n self.write_file('beq $t0 $0 _end_in_str_copy_')\n self.write_file('beq $t0 10 _end_in_str_copy_')\n self.write_file('sb $t0 0($t1)')\n self.write_file('addiu $t4 $t4 1')\n self.write_file('addiu $t1 $t1 1')\n self.write_file('j _in_str_copy_')\n self.write_file('_end_in_str_copy_:', tabbed=False)\n self.write_file('sw $v0 16($t5)')\n self.write_file('la $t4 str_buffer')\n self.write_file('_in_str_clean_:', tabbed=False)\n self.write_file('lb $t0 0($t4)')\n self.write_file('beq $t0 $0 _end_in_str_clean_')\n self.write_file('sb $0 0($t4)')\n self.write_file('addiu $t4 $t4 1')\n self.write_file('j _in_str_clean_')\n self.write_file('_end_in_str_clean_:', tabbed=False)\n self.write_file('move $v0 $t5')\n self.write_file('jr $ra')\n self.write_file('')\n\n def io_out_int(self):\n self.write_file('function_IO_out_int:', tabbed=False)\n self.write_file(f'move $fp, $sp')\n self.write_file('lw $a0 16($fp)')\n self.write_file('lw $a0 12($a0)')\n self.write_file('li $v0 1')\n self.write_file('syscall')\n self.write_file('lw $v0 12($fp)')\n self.write_file('jr $ra')\n self.write_file('')\n\n def io_out_string(self):\n self.write_file('function_IO_out_string:', tabbed=False)\n self.write_file(f'move $fp, $sp')\n self.write_file('lw $a0 16($fp)')\n self.write_file('lw $a0 16($a0)')\n self.write_file('li $v0 4')\n self.write_file('syscall')\n self.write_file('lw $v0 12($fp)')\n self.write_file('jr $ra')\n self.write_file('')\n <mask token>\n\n def isvoid(self):\n self.write_file(f'function_{ISVOID_FUNC}:', tabbed=False)\n self.write_file(f'move $fp, $sp')\n self.visit(cil.Allocate(dest=None, ttype=BOOLEAN_CLASS))\n self.write_file(f'lw $t0 12($fp)')\n self.write_file(f'la $t1 {VOID_MIPS_NAME}')\n self.write_file(f'beq $t0 $t1 _is_void_true_')\n self.write_file(f'sw $0 12($v0)')\n self.write_file(f'j _is_void_end_')\n self.write_file(f'_is_void_true_:', tabbed=False)\n self.write_file(f'li $t0 1')\n self.write_file(f'sw $t0 12($v0)')\n self.write_file(f'_is_void_end_:', tabbed=False)\n self.write_file(f'jr $ra')\n self.write_file(f'')\n",
"step-3": "<mask token>\n\n\nclass MipsVisitor:\n <mask token>\n\n def __init__(self, inherit_graph, output_file='mips_code.mips'):\n self.inherit_graph, _ = inherit_graph\n self.offset = dict()\n self.type_index = []\n self.dispatchtable_code = []\n self.prototypes_code = []\n self.cur_labels_id = 0\n self.output_file = output_file\n\n def push(self):\n self.write_file('sw $a0 0($sp)')\n self.write_file('addiu $sp $sp -4')\n <mask token>\n\n def write_file(self, msg, mode='a', tabbed=True):\n f = open(self.output_file, mode)\n f.write('{}{}\\n'.format('\\t' if tabbed else '', msg))\n f.close()\n\n def allocate_memory(self, size=None, register=False):\n if register:\n self.write_file('move $a0 {}'.format(size))\n elif size:\n self.write_file('li $a0 {}'.format(size))\n self.write_file('li $v0 9')\n self.write_file('syscall')\n <mask token>\n <mask token>\n\n @visitor.when(cil.Program)\n def visit(self, node: cil.Program):\n self.write_file('', 'w')\n self.write_file('.data', tabbed=False)\n self.static_datas()\n for data in node.data_section:\n self.visit(data)\n self.write_file('')\n for i in range(len(node.type_section)):\n self.type_index.append(node.type_section[i].type_name)\n self.write_file('classname_{}: .asciiz \"{}\"'.format(node.\n type_section[i].type_name, node.type_section[i].type_name))\n self.write_file(f'{VOID_MIPS_NAME}: .asciiz \"\"')\n self.write_file('\\n.text')\n self.entry()\n self.write_file('\\n########## STATIC FUNCTIONS ##########\\n')\n self.conforms()\n self.isvoid()\n self.object_abort()\n self.object_copy()\n self.object_typename()\n self.string_length()\n self.string_concat()\n self.string_substr()\n self.io_in_int()\n self.io_in_string()\n self.io_out_int()\n self.io_out_string()\n for t in node.type_section:\n self.visit(t)\n self.write_file('\\n############## TABLES ################\\n')\n self.write_file('function_build_class_name_table:', tabbed=False)\n self.allocate_memory(len(node.type_section) * 4)\n self.write_file('move $s1 $v0')\n for i in range(len(node.type_section)):\n self.write_file('la $t1 classname_{}'.format(node.type_section[\n i].type_name))\n self.write_file('sw $t1 {}($s1)'.format(4 * i))\n self.write_file('')\n self.write_file('function_allocate_prototypes_table:', tabbed=False)\n self.allocate_memory(8 * len(self.type_index))\n self.write_file('move $s0 $v0')\n self.write_file('')\n self.write_file('function_build_prototypes:', tabbed=False)\n for ins in self.prototypes_code:\n self.write_file(ins)\n self.write_file('')\n self.write_file('function_build_dispatch_tables:', tabbed=False)\n for ins in self.dispatchtable_code:\n self.write_file(ins)\n self.write_file('')\n self.write_file('function_build_class_parents_table:', tabbed=False)\n self.allocate_memory(4 * len(self.type_index))\n self.write_file('move $s2 $v0')\n self.write_file('')\n for parent in self.inherit_graph.keys():\n p_index = self.type_index.index(parent)\n for child in self.inherit_graph[parent]:\n ch_index = self.type_index.index(child.name)\n self.write_file(f'li $t0 {ch_index}')\n self.write_file(f'mul $t0 $t0 4')\n self.write_file(f'add $t0 $t0 $s2')\n self.write_file(f'li $t1 {p_index}')\n self.write_file(f'sw $t1 0($t0)')\n self.write_file('')\n self.write_file('')\n self.write_file('\\n########### COOL FUNCTIONS ##########\\n')\n for func in node.code_section:\n is_built_in = False\n if not INIT_CIL_SUFFIX in func.name:\n is_built_in = [x for x in BUILT_IN_CLASSES if f'{x}_' in\n func.name] != []\n if not is_built_in:\n self.visit(func)\n self.write_file('\\n#####################################\\n')\n <mask token>\n\n @visitor.when(cil.Type)\n def visit(self, node: cil.Type):\n self.dispatchtable_code.append(f'# Type {node.type_name}')\n self.dispatchtable_code.append('li $a0 {}'.format(4 * len(node.\n methods)))\n self.dispatchtable_code.append('li $v0 9')\n self.dispatchtable_code.append('syscall')\n for i in range(len(node.methods)):\n self.dispatchtable_code.append('la $t1 function_{}'.format(node\n .methods[i].function_name))\n self.dispatchtable_code.append('sw $t1 {}($v0)'.format(4 * i))\n self.dispatchtable_code.append('lw $t0 {}($s0)'.format(8 * self.\n type_index.index(node.type_name)))\n self.dispatchtable_code.append('sw $v0 8($t0)')\n self.dispatchtable_code.append('')\n self.prototypes_code.append(f'# Type {node.type_name}')\n self.prototypes_code.append('li $a0 {}'.format(12 + 4 * len(node.\n attributes)))\n self.prototypes_code.append('li $v0 9')\n self.prototypes_code.append('syscall')\n class_index = self.type_index.index(node.type_name)\n self.prototypes_code.append('li $a0 {}'.format(class_index))\n self.prototypes_code.append('sw $a0 0($v0)')\n self.prototypes_code.append('li $a0 {}'.format(12 + 4 * len(node.\n attributes)))\n self.prototypes_code.append('sw $a0 4($v0)')\n self.prototypes_code.append('sw $v0 {}($s0)'.format(8 * class_index))\n self.prototypes_code.append('')\n\n @visitor.when(cil.Function)\n def visit(self, node: cil.Function):\n self.write_file(f'function_{node.name}:', tabbed=False)\n self.write_file(f'move $fp, $sp')\n self.write_file(f'subiu $sp, $sp, {4 * len(node.vlocals)}')\n for i in range(len(node.args)):\n self.offset[node.args[i].name] = 12 + i * 4\n for i in range(len(node.vlocals)):\n self.offset[node.vlocals[i].name] = i * -4\n for inst in node.body:\n if isinstance(inst, cil.Equal) or isinstance(inst, cil.Div):\n inst.id = self.new_labels_id()\n self.visit(inst)\n self.write_file(f'addiu $sp, $sp, {4 * len(node.vlocals)}')\n self.write_file('jr $ra')\n self.write_file('')\n\n @visitor.when(cil.Assign)\n def visit(self, node: cil.Assign):\n self.write_file('# ASSIGN')\n self.write_file('lw $a0, {}($fp)'.format(self.offset[node.source]))\n self.write_file('sw $a0, {}($fp)'.format(self.offset[node.dest]))\n self.write_file('')\n <mask token>\n <mask token>\n\n @visitor.when(cil.Mult)\n def visit(self, node: cil.Mult):\n self.write_file('# *')\n self.write_file('lw $a0, {}($fp)'.format(self.offset[node.left]))\n self.write_file('lw $a1, {}($fp)'.format(self.offset[node.right]))\n self.write_file('mul $a0, $a0, $a1')\n self.write_file('sw $a0, {}($fp)'.format(self.offset[node.dest]))\n self.write_file('')\n <mask token>\n\n @visitor.when(cil.Equal)\n def visit(self, node: cil.Equal):\n self.write_file('lw $t0 {}($fp)'.format(self.offset[node.left]))\n self.write_file('lw $t1 {}($fp)'.format(self.offset[node.right]))\n self.write_file(f'beq $t0 $zero _eq_false_{node.id}_')\n self.write_file(f'beq $t1 $zero _eq_false_{node.id}_')\n self.write_file('lw $a0 0($t0)')\n self.write_file('lw $a1 0($t1)')\n self.write_file(f'bne $a0 $a1 _eq_false_{node.id}_')\n self.write_file('li $a2 {}'.format(self.type_index.index(\n INTEGER_CLASS)))\n self.write_file(f'beq $a0 $a2 _eq_int_bool_{node.id}')\n self.write_file('li $a2 {}'.format(self.type_index.index(\n BOOLEAN_CLASS)))\n self.write_file(f'beq $a0 $a2 _eq_int_bool_{node.id}')\n self.write_file('li $a2 {}'.format(self.type_index.index(STRING_CLASS))\n )\n self.write_file(f'bne $a0 $a2 _not_basic_type_{node.id}_')\n self.write_file(f'_eq_str_{node.id}_:', tabbed=False)\n self.write_file('lw\\t$t3 12($t0)')\n self.write_file('lw\\t$t3 12($t3)')\n self.write_file('lw\\t$t4, 12($t1)')\n self.write_file('lw\\t$t4, 12($t4)')\n self.write_file(f'bne $t3 $t4 _eq_false_{node.id}_')\n self.write_file(f'beq $t3 $0 _eq_true_{node.id}_')\n self.write_file('addu $t0 $t0 16')\n self.write_file('lw $t0 0($t0)')\n self.write_file('addu $t1 $t1 16')\n self.write_file('lw $t1 0($t1)')\n self.write_file('move $t2 $t3')\n self.write_file(f'_verify_ascii_sequences_{node.id}_:', tabbed=False)\n self.write_file('lb $a0 0($t0)')\n self.write_file('lb $a1 0($t1)')\n self.write_file(f'bne $a0 $a1 _eq_false_{node.id}_')\n self.write_file('addu $t0 $t0 1')\n self.write_file('addu $t1 $t1 1')\n self.write_file('addiu $t2 $t2 -1')\n self.write_file(f'bnez $t2 _verify_ascii_sequences_{node.id}_')\n self.write_file(f'b _eq_true_{node.id}_')\n self.write_file(f'_not_basic_type_{node.id}_:', tabbed=False)\n self.write_file(f'bne $t0 $t1 _eq_false_{node.id}_')\n self.write_file(f'b _eq_true_{node.id}_')\n self.write_file(f'_eq_int_bool_{node.id}:', tabbed=False)\n self.write_file('lw $a3 12($t0)')\n self.write_file('lw $t4 12($t1)')\n self.write_file(f'bne $a3 $t4 _eq_false_{node.id}_')\n self.write_file(f'_eq_true_{node.id}_:', tabbed=False)\n self.write_file('li $a0 1')\n self.write_file('sw $a0 {}($fp)'.format(self.offset[node.dest]))\n self.write_file(f'b end_equal_{node.id}_')\n self.write_file(f'_eq_false_{node.id}_:', tabbed=False)\n self.write_file('li $a0 0')\n self.write_file('sw $a0 {}($fp)'.format(self.offset[node.dest]))\n self.write_file(f'end_equal_{node.id}_:', tabbed=False)\n <mask token>\n\n @visitor.when(cil.EqualOrLessThan)\n def visit(self, node: cil.EqualOrLessThan):\n self.write_file('# <=')\n self.write_file('lw $a1, {}($fp)'.format(self.offset[node.left]))\n self.write_file('lw $a2, {}($fp)'.format(self.offset[node.right]))\n self.write_file('sle $a0, $a1, $a2'.format(self.offset[node.right]))\n self.write_file('sw $a0, {}($fp)'.format(self.offset[node.dest]))\n self.write_file('')\n\n @visitor.when(cil.GetAttrib)\n def visit(self, node: cil.GetAttrib):\n self.write_file('# GETATTR')\n self.write_file(f'lw $a1 {self.offset[node.instance]}($fp)')\n self.write_file(f'lw $a0 {12 + 4 * node.attribute}($a1)')\n self.write_file(f'sw $a0 {self.offset[node.dest]}($fp)')\n self.write_file('')\n\n @visitor.when(cil.SetAttrib)\n def visit(self, node: cil.SetAttrib):\n self.write_file('# SETATTR')\n self.write_file(f'lw $a1 {self.offset[node.instance]}($fp)')\n if isinstance(node.src, int):\n self.write_file(f'li $a0, {node.src}')\n elif node.src[:5] == 'data_':\n self.write_file(f'la $a0, {node.src}')\n else:\n self.write_file(f'lw $a0 {self.offset[node.src]}($fp)')\n self.write_file(f'sw $a0 {12 + 4 * node.attribute}($a1)')\n self.write_file('')\n\n @visitor.when(cil.TypeOf)\n def visit(self, node: cil.TypeOf):\n self.write_file('# TYPEOF')\n self.write_file(f'lw $a1 {self.offset[node.instance]}($fp)')\n self.write_file(f'lw $a0 0($a1)')\n self.write_file(f'sw $a0 {self.offset[node.dest]}($fp)')\n self.write_file('')\n\n @visitor.when(cil.Allocate)\n def visit(self, node: cil.Allocate):\n self.write_file('# ALLOCATE')\n if node.ttype == VOID_TYPE:\n self.write_file(f'la $v0 {VOID_MIPS_NAME}')\n self.write_file(f'sw $v0 {self.offset[node.dest]}($fp)')\n else:\n offset_proto = self.type_index.index(node.ttype) * 8\n self.write_file('lw $t0 {}($s0)'.format(offset_proto))\n self.write_file('sw $t0, 0($sp)')\n self.write_file('addiu $sp, $sp, -4')\n self.write_file('')\n self.visit(cil.Call(dest=node.dest, f='Object_copy'))\n self.write_file('addiu $sp, $sp, 4')\n self.write_file('')\n\n @visitor.when(cil.Call)\n def visit(self, node: cil.Call):\n self.write_file('# CALL')\n self.write_file(f'addiu $sp, $sp, -8')\n self.write_file(f'sw $ra, 4($sp)')\n self.write_file(f'sw $fp, 8($sp)')\n self.write_file(f'jal function_{node.f}')\n self.write_file(f'lw $fp, 8($sp)')\n self.write_file(f'lw $ra, 4($sp)')\n self.write_file(f'addiu $sp, $sp, 8')\n if node.dest:\n self.write_file(f'sw $v0 {self.offset[node.dest]}($fp)')\n self.write_file('')\n\n @visitor.when(cil.VCall)\n def visit(self, node: cil.VCall):\n self.write_file('# VCALL')\n self.write_file(f'addiu $sp, $sp, -8')\n self.write_file(f'sw $ra, 4($sp)')\n self.write_file(f'sw $fp, 8($sp)')\n if node.ttype[0] == '_':\n self.write_file(f'lw $a2, {self.offset[node.ttype]}($fp)')\n else:\n self.write_file(f'li $a2, {self.type_index.index(node.ttype)}')\n self.write_file(f'mulu $a2, $a2, 8')\n self.write_file(f'addu $a2, $a2, $s0')\n self.write_file(f'lw $a1, 0($a2)')\n self.write_file(f'lw $a2, 8($a1)')\n self.write_file(f'lw $a0 {node.f * 4}($a2)')\n self.write_file(f'jalr $a0')\n self.write_file(f'lw $fp, 8($sp)')\n self.write_file(f'lw $ra, 4($sp)')\n self.write_file(f'addiu $sp, $sp, 8')\n self.write_file(f'sw $v0 {self.offset[node.dest]}($fp)')\n if node.ttype[0] != '_':\n self.write_file(f'li $a2, {self.type_index.index(node.ttype)}')\n else:\n self.write_file(f'lw $a2, {self.offset[node.ttype]}($fp)')\n self.write_file('')\n\n @visitor.when(cil.PushParam)\n def visit(self, node: cil.PushParam):\n self.write_file('# PUSHPARAM')\n if node.name[0] != '_':\n self.write_file('li $a0, {}'.format(self.type_index.index(node.\n name)))\n else:\n self.write_file('lw $a0, {}($fp)'.format(self.offset[node.name]))\n self.push()\n self.write_file('')\n\n @visitor.when(cil.PopParam)\n def visit(self, node: cil.PopParam):\n self.write_file('# POPPARAM')\n self.pop(node.name)\n self.write_file('')\n\n @visitor.when(cil.Return)\n def visit(self, node: cil.Return):\n self.write_file('# RETURN')\n self.write_file('lw $v0, {}($fp)'.format(self.offset[node.value]))\n\n @visitor.when(cil.Label)\n def visit(self, node: cil.Label):\n self.write_file('_cil_label_{}:'.format(node.name), tabbed=False)\n\n @visitor.when(cil.Goto)\n def visit(self, node: cil.Goto):\n self.write_file('# GOTO')\n self.write_file('j _cil_label_{}'.format(node.label))\n self.write_file('')\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def object_copy(self):\n self.write_file('function_Object_copy:', tabbed=False)\n self.write_file(f'move $fp, $sp')\n self.write_file('lw $t0 12($fp)')\n self.write_file('lw $a0 4($t0)')\n self.write_file('move $t4 $a0')\n self.write_file('li $v0 9')\n self.write_file('syscall')\n self.write_file('move $t2 $v0')\n self.write_file('li $t3 0')\n self.write_file('_objcopy_loop:', tabbed=False)\n self.write_file('lw $t1 0($t0)')\n self.write_file('sw $t1 0($v0)')\n self.write_file('addiu $t0 $t0 4')\n self.write_file('addiu $v0 $v0 4')\n self.write_file('addiu $t3 $t3 4')\n self.write_file('ble $t4 $t3 _objcopy_loop')\n self.write_file('_objcopy_div_end_:', tabbed=False)\n self.write_file('move $v0 $t2')\n self.write_file('jr $ra')\n self.write_file('')\n\n def object_typename(self):\n self.write_file('function_Object_type_name:', tabbed=False)\n self.write_file(f'move $fp, $sp')\n self.visit(cil.Allocate(dest=None, ttype=STRING_CLASS))\n self.write_file('move $v1 $v0')\n self.visit(cil.Allocate(dest=None, ttype=INTEGER_CLASS))\n self.write_file('lw $a1 12($fp)')\n self.write_file('lw $a1 0($a1)')\n self.write_file('mulu $a1 $a1 4')\n self.write_file('addu $a1 $a1 $s1')\n self.write_file('lw $a1 0($a1)')\n self.write_file('move $a2 $0')\n self.write_file('move $t2 $a1')\n self.write_file('_str_len_clsname_:', tabbed=False)\n self.write_file('lb $a0 0($t2)')\n self.write_file('beq $a0 $0 _end_clsname_len_')\n self.write_file('addiu $a2 $a2 1')\n self.write_file('addiu $t2 $t2 1')\n self.write_file('j _str_len_clsname_')\n self.write_file('_end_clsname_len_:', tabbed=False)\n self.write_file('sw $a2, 12($v0)')\n self.write_file('sw $v0, 12($v1)')\n self.write_file('sw $a1, 16($v1)')\n self.write_file('move $v0 $v1')\n self.write_file('jr $ra')\n self.write_file('')\n\n def string_length(self):\n self.write_file('function_String_length:', tabbed=False)\n self.write_file(f'move $fp, $sp')\n self.write_file('lw $a0 12($fp)')\n self.write_file('lw $v0 12($a0)')\n self.write_file('jr $ra')\n self.write_file('')\n\n def string_concat(self):\n self.write_file('function_String_concat:', tabbed=False)\n self.write_file(f'move $fp, $sp')\n self.visit(cil.Allocate(dest=None, ttype=INTEGER_CLASS))\n self.write_file('move $v1 $v0')\n self.visit(cil.Allocate(dest=None, ttype=STRING_CLASS))\n self.write_file('move $t3 $v0')\n self.write_file('lw $a1 12($fp)')\n self.write_file('lw $a2 16($fp)')\n self.write_file('lw $t1 12($a1)')\n self.write_file('lw $t1 12($t1)')\n self.write_file('lw $t2 12($a2)')\n self.write_file('lw $t2 12($t2)')\n self.write_file('addu $t0 $t2 $t1')\n self.write_file('sw $t0 12($v1)')\n self.write_file('lw $a1 16($a1)')\n self.write_file('lw $a2 16($a2)')\n self.write_file('addiu $t0 $t0 1')\n self.allocate_memory('$t0', register=True)\n self.write_file('move $t5 $v0')\n self.write_file('move $t4 $a1')\n self.write_file('addu $a1 $a1 $t1')\n self.write_file('_strcat_copy_:', tabbed=False)\n self.write_file('beq $t4 $a1 _end_strcat_copy_')\n self.write_file('lb $a0 0($t4)')\n self.write_file('sb $a0 0($t5)')\n self.write_file('addiu $t5 $t5 1')\n self.write_file('addiu $t4 $t4 1')\n self.write_file('j _strcat_copy_')\n self.write_file('_end_strcat_copy_:', tabbed=False)\n self.write_file('move $t4 $a2')\n self.write_file('addu $a2 $a2 $t2')\n self.write_file('_strcat_copy_snd_:', tabbed=False)\n self.write_file('beq $t4 $a2 _end_strcat_copy_snd_')\n self.write_file('lb $a0 0($t4)')\n self.write_file('sb $a0 0($t5)')\n self.write_file('addiu $t5 $t5 1')\n self.write_file('addiu $t4 $t4 1')\n self.write_file('j _strcat_copy_snd_')\n self.write_file('_end_strcat_copy_snd_:', tabbed=False)\n self.write_file('sb $0 0($t5)')\n self.write_file('sw $v1 12($t3)')\n self.write_file('sw $v0 16($t3)')\n self.write_file('move $v0 $t3')\n self.write_file('jr $ra')\n self.write_file('')\n\n def string_substr(self):\n self.write_file('function_String_substr:', tabbed=False)\n self.write_file(f'move $fp, $sp')\n self.write_file(f'lw $t5 12($fp)')\n self.write_file(f'lw $a1 16($fp)')\n self.write_file(f'lw $a1 12($a1)')\n self.write_file(f'lw $a2 20($fp)')\n self.write_file(f'lw $a2 12($a2)')\n self.write_file(f'blt $a1 $0 _index_negative')\n self.write_file(f'blt $a2 $0 _index_negative')\n self.write_file(f'add $a2 $a1 $a2')\n self.write_file(f'lw $a3 12($t5)')\n self.write_file(f'lw $a3 12($a3)')\n self.write_file(f'bgt $a2 $a3 _index_out')\n self.visit(cil.Allocate(dest=None, ttype=STRING_CLASS))\n self.write_file(f'move $v1 $v0')\n self.visit(cil.Allocate(dest=None, ttype=INTEGER_CLASS))\n self.write_file(f'move $t0 $v0')\n self.write_file(f'move $t7 $a2')\n self.write_file(f'subu $t7 $t7 $a1')\n self.write_file(f'sw $t7 12($t0)')\n self.allocate_memory('$a2', register=True)\n self.write_file(f'sw $t0 12($v1)')\n self.write_file(f'sw $v0 16($v1)')\n self.write_file('move $t1 $v0')\n self.write_file('lw $t5 16($t5)')\n self.write_file('move $t4 $t5')\n self.write_file('addu $t4 $t4 $a1')\n self.write_file('addu $t5 $t5 $a2')\n self.write_file('_substr_copy_:', tabbed=False)\n self.write_file('bge $t4 $t5 _end_substr_copy_')\n self.write_file('lb $a0 0($t4)')\n self.write_file('sb $a0 0($t1)')\n self.write_file('addiu $t1 $t1 1')\n self.write_file('addiu $t4 $t4 1')\n self.write_file('j _substr_copy_')\n self.write_file(f'_index_negative:', tabbed=False)\n self.write_file(f'la $a0 _index_negative_msg')\n self.write_file(f'b _subst_abort')\n self.write_file(f'_index_out:', tabbed=False)\n self.write_file(f'la $a0 _index_out_msg')\n self.write_file(f'b _subst_abort')\n self.write_file(f'_subst_abort:', tabbed=False)\n self.write_file(f'li $v0 4')\n self.write_file(f'syscall')\n self.write_file('la\\t$a0 _abort_msg')\n self.write_file(f'li $v0 4')\n self.write_file(f'syscall')\n self.write_file(f'li $v0 10')\n self.write_file(f'syscall')\n self.write_file('_end_substr_copy_:', tabbed=False)\n self.write_file('move $v0 $v1')\n self.write_file('jr $ra')\n self.write_file('')\n <mask token>\n\n def io_in_string(self):\n self.write_file('function_IO_in_string:', tabbed=False)\n self.write_file(f'move $fp, $sp')\n self.visit(cil.Allocate(dest=None, ttype=INTEGER_CLASS))\n self.write_file('move $v1 $v0')\n self.visit(cil.Allocate(dest=None, ttype=STRING_CLASS))\n self.write_file('sw $v1 12($v0)')\n self.write_file('move $t5 $v0')\n self.write_file('la $a0 str_buffer')\n self.write_file('li $a1 1025')\n self.write_file('li $v0 8')\n self.write_file('syscall')\n self.write_file('move $a0 $0')\n self.write_file('la $t2 str_buffer')\n self.write_file('_in_string_str_len_:', tabbed=False)\n self.write_file('lb $t0 0($t2)')\n self.write_file('beq $t0 $0 _end_in_string_str_len_')\n self.write_file('beq $t0 10 _end_in_string_str_len_')\n self.write_file('addiu $a0 $a0 1')\n self.write_file('addiu $t2 $t2 1')\n self.write_file('j _in_string_str_len_')\n self.write_file('_end_in_string_str_len_:', tabbed=False)\n self.write_file('sw $a0 12($v1)')\n self.allocate_memory()\n self.write_file('la $t4 str_buffer')\n self.write_file('move $t1 $v0')\n self.write_file('_in_str_copy_:', tabbed=False)\n self.write_file('lb $t0 0($t4)')\n self.write_file('beq $t0 $0 _end_in_str_copy_')\n self.write_file('beq $t0 10 _end_in_str_copy_')\n self.write_file('sb $t0 0($t1)')\n self.write_file('addiu $t4 $t4 1')\n self.write_file('addiu $t1 $t1 1')\n self.write_file('j _in_str_copy_')\n self.write_file('_end_in_str_copy_:', tabbed=False)\n self.write_file('sw $v0 16($t5)')\n self.write_file('la $t4 str_buffer')\n self.write_file('_in_str_clean_:', tabbed=False)\n self.write_file('lb $t0 0($t4)')\n self.write_file('beq $t0 $0 _end_in_str_clean_')\n self.write_file('sb $0 0($t4)')\n self.write_file('addiu $t4 $t4 1')\n self.write_file('j _in_str_clean_')\n self.write_file('_end_in_str_clean_:', tabbed=False)\n self.write_file('move $v0 $t5')\n self.write_file('jr $ra')\n self.write_file('')\n\n def io_out_int(self):\n self.write_file('function_IO_out_int:', tabbed=False)\n self.write_file(f'move $fp, $sp')\n self.write_file('lw $a0 16($fp)')\n self.write_file('lw $a0 12($a0)')\n self.write_file('li $v0 1')\n self.write_file('syscall')\n self.write_file('lw $v0 12($fp)')\n self.write_file('jr $ra')\n self.write_file('')\n\n def io_out_string(self):\n self.write_file('function_IO_out_string:', tabbed=False)\n self.write_file(f'move $fp, $sp')\n self.write_file('lw $a0 16($fp)')\n self.write_file('lw $a0 16($a0)')\n self.write_file('li $v0 4')\n self.write_file('syscall')\n self.write_file('lw $v0 12($fp)')\n self.write_file('jr $ra')\n self.write_file('')\n <mask token>\n\n def isvoid(self):\n self.write_file(f'function_{ISVOID_FUNC}:', tabbed=False)\n self.write_file(f'move $fp, $sp')\n self.visit(cil.Allocate(dest=None, ttype=BOOLEAN_CLASS))\n self.write_file(f'lw $t0 12($fp)')\n self.write_file(f'la $t1 {VOID_MIPS_NAME}')\n self.write_file(f'beq $t0 $t1 _is_void_true_')\n self.write_file(f'sw $0 12($v0)')\n self.write_file(f'j _is_void_end_')\n self.write_file(f'_is_void_true_:', tabbed=False)\n self.write_file(f'li $t0 1')\n self.write_file(f'sw $t0 12($v0)')\n self.write_file(f'_is_void_end_:', tabbed=False)\n self.write_file(f'jr $ra')\n self.write_file(f'')\n",
"step-4": "<mask token>\nsys.path.append('..')\n<mask token>\n\n\nclass MipsVisitor:\n \"\"\"\n\tMips Visitor Class.\n\n\tThis visitor will process the AST of the generated CIL and write the mips code to a file.\n\t\"\"\"\n\n def __init__(self, inherit_graph, output_file='mips_code.mips'):\n self.inherit_graph, _ = inherit_graph\n self.offset = dict()\n self.type_index = []\n self.dispatchtable_code = []\n self.prototypes_code = []\n self.cur_labels_id = 0\n self.output_file = output_file\n\n def push(self):\n self.write_file('sw $a0 0($sp)')\n self.write_file('addiu $sp $sp -4')\n\n def pop(self, dest=None):\n self.write_file(f'addiu $sp $sp 4')\n\n def write_file(self, msg, mode='a', tabbed=True):\n f = open(self.output_file, mode)\n f.write('{}{}\\n'.format('\\t' if tabbed else '', msg))\n f.close()\n\n def allocate_memory(self, size=None, register=False):\n if register:\n self.write_file('move $a0 {}'.format(size))\n elif size:\n self.write_file('li $a0 {}'.format(size))\n self.write_file('li $v0 9')\n self.write_file('syscall')\n\n def new_labels_id(self):\n self.cur_labels_id += 1\n return self.cur_labels_id\n\n @visitor.on('node')\n def visit(self, node):\n pass\n\n @visitor.when(cil.Program)\n def visit(self, node: cil.Program):\n self.write_file('', 'w')\n self.write_file('.data', tabbed=False)\n self.static_datas()\n for data in node.data_section:\n self.visit(data)\n self.write_file('')\n for i in range(len(node.type_section)):\n self.type_index.append(node.type_section[i].type_name)\n self.write_file('classname_{}: .asciiz \"{}\"'.format(node.\n type_section[i].type_name, node.type_section[i].type_name))\n self.write_file(f'{VOID_MIPS_NAME}: .asciiz \"\"')\n self.write_file('\\n.text')\n self.entry()\n self.write_file('\\n########## STATIC FUNCTIONS ##########\\n')\n self.conforms()\n self.isvoid()\n self.object_abort()\n self.object_copy()\n self.object_typename()\n self.string_length()\n self.string_concat()\n self.string_substr()\n self.io_in_int()\n self.io_in_string()\n self.io_out_int()\n self.io_out_string()\n for t in node.type_section:\n self.visit(t)\n self.write_file('\\n############## TABLES ################\\n')\n self.write_file('function_build_class_name_table:', tabbed=False)\n self.allocate_memory(len(node.type_section) * 4)\n self.write_file('move $s1 $v0')\n for i in range(len(node.type_section)):\n self.write_file('la $t1 classname_{}'.format(node.type_section[\n i].type_name))\n self.write_file('sw $t1 {}($s1)'.format(4 * i))\n self.write_file('')\n self.write_file('function_allocate_prototypes_table:', tabbed=False)\n self.allocate_memory(8 * len(self.type_index))\n self.write_file('move $s0 $v0')\n self.write_file('')\n self.write_file('function_build_prototypes:', tabbed=False)\n for ins in self.prototypes_code:\n self.write_file(ins)\n self.write_file('')\n self.write_file('function_build_dispatch_tables:', tabbed=False)\n for ins in self.dispatchtable_code:\n self.write_file(ins)\n self.write_file('')\n self.write_file('function_build_class_parents_table:', tabbed=False)\n self.allocate_memory(4 * len(self.type_index))\n self.write_file('move $s2 $v0')\n self.write_file('')\n for parent in self.inherit_graph.keys():\n p_index = self.type_index.index(parent)\n for child in self.inherit_graph[parent]:\n ch_index = self.type_index.index(child.name)\n self.write_file(f'li $t0 {ch_index}')\n self.write_file(f'mul $t0 $t0 4')\n self.write_file(f'add $t0 $t0 $s2')\n self.write_file(f'li $t1 {p_index}')\n self.write_file(f'sw $t1 0($t0)')\n self.write_file('')\n self.write_file('')\n self.write_file('\\n########### COOL FUNCTIONS ##########\\n')\n for func in node.code_section:\n is_built_in = False\n if not INIT_CIL_SUFFIX in func.name:\n is_built_in = [x for x in BUILT_IN_CLASSES if f'{x}_' in\n func.name] != []\n if not is_built_in:\n self.visit(func)\n self.write_file('\\n#####################################\\n')\n\n @visitor.when(cil.Data)\n def visit(self, node: cil.Data):\n self.write_file(\n f'{node.dest}: .asciiz \"{str(node.value.encode())[2:-1]}\"')\n\n @visitor.when(cil.Type)\n def visit(self, node: cil.Type):\n self.dispatchtable_code.append(f'# Type {node.type_name}')\n self.dispatchtable_code.append('li $a0 {}'.format(4 * len(node.\n methods)))\n self.dispatchtable_code.append('li $v0 9')\n self.dispatchtable_code.append('syscall')\n for i in range(len(node.methods)):\n self.dispatchtable_code.append('la $t1 function_{}'.format(node\n .methods[i].function_name))\n self.dispatchtable_code.append('sw $t1 {}($v0)'.format(4 * i))\n self.dispatchtable_code.append('lw $t0 {}($s0)'.format(8 * self.\n type_index.index(node.type_name)))\n self.dispatchtable_code.append('sw $v0 8($t0)')\n self.dispatchtable_code.append('')\n self.prototypes_code.append(f'# Type {node.type_name}')\n self.prototypes_code.append('li $a0 {}'.format(12 + 4 * len(node.\n attributes)))\n self.prototypes_code.append('li $v0 9')\n self.prototypes_code.append('syscall')\n class_index = self.type_index.index(node.type_name)\n self.prototypes_code.append('li $a0 {}'.format(class_index))\n self.prototypes_code.append('sw $a0 0($v0)')\n self.prototypes_code.append('li $a0 {}'.format(12 + 4 * len(node.\n attributes)))\n self.prototypes_code.append('sw $a0 4($v0)')\n self.prototypes_code.append('sw $v0 {}($s0)'.format(8 * class_index))\n self.prototypes_code.append('')\n\n @visitor.when(cil.Function)\n def visit(self, node: cil.Function):\n self.write_file(f'function_{node.name}:', tabbed=False)\n self.write_file(f'move $fp, $sp')\n self.write_file(f'subiu $sp, $sp, {4 * len(node.vlocals)}')\n for i in range(len(node.args)):\n self.offset[node.args[i].name] = 12 + i * 4\n for i in range(len(node.vlocals)):\n self.offset[node.vlocals[i].name] = i * -4\n for inst in node.body:\n if isinstance(inst, cil.Equal) or isinstance(inst, cil.Div):\n inst.id = self.new_labels_id()\n self.visit(inst)\n self.write_file(f'addiu $sp, $sp, {4 * len(node.vlocals)}')\n self.write_file('jr $ra')\n self.write_file('')\n\n @visitor.when(cil.Assign)\n def visit(self, node: cil.Assign):\n self.write_file('# ASSIGN')\n self.write_file('lw $a0, {}($fp)'.format(self.offset[node.source]))\n self.write_file('sw $a0, {}($fp)'.format(self.offset[node.dest]))\n self.write_file('')\n\n @visitor.when(cil.Plus)\n def visit(self, node: cil.Plus):\n self.write_file('# +')\n self.write_file('lw $a0, {}($fp)'.format(self.offset[node.left]))\n self.write_file('lw $a1, {}($fp)'.format(self.offset[node.right]))\n self.write_file('add $a0, $a0, $a1')\n self.write_file('sw $a0, {}($fp)'.format(self.offset[node.dest]))\n self.write_file('')\n\n @visitor.when(cil.Minus)\n def visit(self, node: cil.Minus):\n self.write_file('# -')\n if isinstance(node.left, int):\n self.write_file('li $a0 {}'.format(node.left))\n else:\n self.write_file('lw $a0, {}($fp)'.format(self.offset[node.left]))\n self.write_file('lw $a1, {}($fp)'.format(self.offset[node.right]))\n self.write_file('sub $a0, $a0, $a1')\n self.write_file('sw $a0, {}($fp)'.format(self.offset[node.dest]))\n self.write_file('')\n\n @visitor.when(cil.Mult)\n def visit(self, node: cil.Mult):\n self.write_file('# *')\n self.write_file('lw $a0, {}($fp)'.format(self.offset[node.left]))\n self.write_file('lw $a1, {}($fp)'.format(self.offset[node.right]))\n self.write_file('mul $a0, $a0, $a1')\n self.write_file('sw $a0, {}($fp)'.format(self.offset[node.dest]))\n self.write_file('')\n\n @visitor.when(cil.Div)\n def visit(self, node: cil.Div):\n self.write_file('# /')\n self.write_file('lw $a0, {}($fp)'.format(self.offset[node.left]))\n self.write_file('lw $a1, {}($fp)'.format(self.offset[node.right]))\n self.write_file(f'beqz $a1 _div_error_{node.id}_')\n self.write_file('div $a0, $a0, $a1')\n self.write_file('sw $a0, {}($fp)'.format(self.offset[node.dest]))\n self.write_file(f'b _div_end_{node.id}_')\n self.write_file(f'_div_error_{node.id}_:', tabbed=False)\n self.write_file('la $a0 _div_zero_msg')\n self.write_file('li $v0 4')\n self.write_file('syscall')\n self.write_file('la $a0 _abort_msg')\n self.write_file('li $v0 4')\n self.write_file('syscall')\n self.write_file('li $v0 10')\n self.write_file('syscall')\n self.write_file(f'_div_end_{node.id}_:', tabbed=False)\n\n @visitor.when(cil.Equal)\n def visit(self, node: cil.Equal):\n self.write_file('lw $t0 {}($fp)'.format(self.offset[node.left]))\n self.write_file('lw $t1 {}($fp)'.format(self.offset[node.right]))\n self.write_file(f'beq $t0 $zero _eq_false_{node.id}_')\n self.write_file(f'beq $t1 $zero _eq_false_{node.id}_')\n self.write_file('lw $a0 0($t0)')\n self.write_file('lw $a1 0($t1)')\n self.write_file(f'bne $a0 $a1 _eq_false_{node.id}_')\n self.write_file('li $a2 {}'.format(self.type_index.index(\n INTEGER_CLASS)))\n self.write_file(f'beq $a0 $a2 _eq_int_bool_{node.id}')\n self.write_file('li $a2 {}'.format(self.type_index.index(\n BOOLEAN_CLASS)))\n self.write_file(f'beq $a0 $a2 _eq_int_bool_{node.id}')\n self.write_file('li $a2 {}'.format(self.type_index.index(STRING_CLASS))\n )\n self.write_file(f'bne $a0 $a2 _not_basic_type_{node.id}_')\n self.write_file(f'_eq_str_{node.id}_:', tabbed=False)\n self.write_file('lw\\t$t3 12($t0)')\n self.write_file('lw\\t$t3 12($t3)')\n self.write_file('lw\\t$t4, 12($t1)')\n self.write_file('lw\\t$t4, 12($t4)')\n self.write_file(f'bne $t3 $t4 _eq_false_{node.id}_')\n self.write_file(f'beq $t3 $0 _eq_true_{node.id}_')\n self.write_file('addu $t0 $t0 16')\n self.write_file('lw $t0 0($t0)')\n self.write_file('addu $t1 $t1 16')\n self.write_file('lw $t1 0($t1)')\n self.write_file('move $t2 $t3')\n self.write_file(f'_verify_ascii_sequences_{node.id}_:', tabbed=False)\n self.write_file('lb $a0 0($t0)')\n self.write_file('lb $a1 0($t1)')\n self.write_file(f'bne $a0 $a1 _eq_false_{node.id}_')\n self.write_file('addu $t0 $t0 1')\n self.write_file('addu $t1 $t1 1')\n self.write_file('addiu $t2 $t2 -1')\n self.write_file(f'bnez $t2 _verify_ascii_sequences_{node.id}_')\n self.write_file(f'b _eq_true_{node.id}_')\n self.write_file(f'_not_basic_type_{node.id}_:', tabbed=False)\n self.write_file(f'bne $t0 $t1 _eq_false_{node.id}_')\n self.write_file(f'b _eq_true_{node.id}_')\n self.write_file(f'_eq_int_bool_{node.id}:', tabbed=False)\n self.write_file('lw $a3 12($t0)')\n self.write_file('lw $t4 12($t1)')\n self.write_file(f'bne $a3 $t4 _eq_false_{node.id}_')\n self.write_file(f'_eq_true_{node.id}_:', tabbed=False)\n self.write_file('li $a0 1')\n self.write_file('sw $a0 {}($fp)'.format(self.offset[node.dest]))\n self.write_file(f'b end_equal_{node.id}_')\n self.write_file(f'_eq_false_{node.id}_:', tabbed=False)\n self.write_file('li $a0 0')\n self.write_file('sw $a0 {}($fp)'.format(self.offset[node.dest]))\n self.write_file(f'end_equal_{node.id}_:', tabbed=False)\n\n @visitor.when(cil.LessThan)\n def visit(self, node: cil.LessThan):\n self.write_file('# <')\n self.write_file('lw $a1, {}($fp)'.format(self.offset[node.left]))\n self.write_file('lw $a2, {}($fp)'.format(self.offset[node.right]))\n self.write_file('slt $a0, $a1, $a2'.format(self.offset[node.right]))\n self.write_file('sw $a0, {}($fp)'.format(self.offset[node.dest]))\n self.write_file('')\n\n @visitor.when(cil.EqualOrLessThan)\n def visit(self, node: cil.EqualOrLessThan):\n self.write_file('# <=')\n self.write_file('lw $a1, {}($fp)'.format(self.offset[node.left]))\n self.write_file('lw $a2, {}($fp)'.format(self.offset[node.right]))\n self.write_file('sle $a0, $a1, $a2'.format(self.offset[node.right]))\n self.write_file('sw $a0, {}($fp)'.format(self.offset[node.dest]))\n self.write_file('')\n\n @visitor.when(cil.GetAttrib)\n def visit(self, node: cil.GetAttrib):\n self.write_file('# GETATTR')\n self.write_file(f'lw $a1 {self.offset[node.instance]}($fp)')\n self.write_file(f'lw $a0 {12 + 4 * node.attribute}($a1)')\n self.write_file(f'sw $a0 {self.offset[node.dest]}($fp)')\n self.write_file('')\n\n @visitor.when(cil.SetAttrib)\n def visit(self, node: cil.SetAttrib):\n self.write_file('# SETATTR')\n self.write_file(f'lw $a1 {self.offset[node.instance]}($fp)')\n if isinstance(node.src, int):\n self.write_file(f'li $a0, {node.src}')\n elif node.src[:5] == 'data_':\n self.write_file(f'la $a0, {node.src}')\n else:\n self.write_file(f'lw $a0 {self.offset[node.src]}($fp)')\n self.write_file(f'sw $a0 {12 + 4 * node.attribute}($a1)')\n self.write_file('')\n\n @visitor.when(cil.TypeOf)\n def visit(self, node: cil.TypeOf):\n self.write_file('# TYPEOF')\n self.write_file(f'lw $a1 {self.offset[node.instance]}($fp)')\n self.write_file(f'lw $a0 0($a1)')\n self.write_file(f'sw $a0 {self.offset[node.dest]}($fp)')\n self.write_file('')\n\n @visitor.when(cil.Allocate)\n def visit(self, node: cil.Allocate):\n self.write_file('# ALLOCATE')\n if node.ttype == VOID_TYPE:\n self.write_file(f'la $v0 {VOID_MIPS_NAME}')\n self.write_file(f'sw $v0 {self.offset[node.dest]}($fp)')\n else:\n offset_proto = self.type_index.index(node.ttype) * 8\n self.write_file('lw $t0 {}($s0)'.format(offset_proto))\n self.write_file('sw $t0, 0($sp)')\n self.write_file('addiu $sp, $sp, -4')\n self.write_file('')\n self.visit(cil.Call(dest=node.dest, f='Object_copy'))\n self.write_file('addiu $sp, $sp, 4')\n self.write_file('')\n\n @visitor.when(cil.Call)\n def visit(self, node: cil.Call):\n self.write_file('# CALL')\n self.write_file(f'addiu $sp, $sp, -8')\n self.write_file(f'sw $ra, 4($sp)')\n self.write_file(f'sw $fp, 8($sp)')\n self.write_file(f'jal function_{node.f}')\n self.write_file(f'lw $fp, 8($sp)')\n self.write_file(f'lw $ra, 4($sp)')\n self.write_file(f'addiu $sp, $sp, 8')\n if node.dest:\n self.write_file(f'sw $v0 {self.offset[node.dest]}($fp)')\n self.write_file('')\n\n @visitor.when(cil.VCall)\n def visit(self, node: cil.VCall):\n self.write_file('# VCALL')\n self.write_file(f'addiu $sp, $sp, -8')\n self.write_file(f'sw $ra, 4($sp)')\n self.write_file(f'sw $fp, 8($sp)')\n if node.ttype[0] == '_':\n self.write_file(f'lw $a2, {self.offset[node.ttype]}($fp)')\n else:\n self.write_file(f'li $a2, {self.type_index.index(node.ttype)}')\n self.write_file(f'mulu $a2, $a2, 8')\n self.write_file(f'addu $a2, $a2, $s0')\n self.write_file(f'lw $a1, 0($a2)')\n self.write_file(f'lw $a2, 8($a1)')\n self.write_file(f'lw $a0 {node.f * 4}($a2)')\n self.write_file(f'jalr $a0')\n self.write_file(f'lw $fp, 8($sp)')\n self.write_file(f'lw $ra, 4($sp)')\n self.write_file(f'addiu $sp, $sp, 8')\n self.write_file(f'sw $v0 {self.offset[node.dest]}($fp)')\n if node.ttype[0] != '_':\n self.write_file(f'li $a2, {self.type_index.index(node.ttype)}')\n else:\n self.write_file(f'lw $a2, {self.offset[node.ttype]}($fp)')\n self.write_file('')\n\n @visitor.when(cil.PushParam)\n def visit(self, node: cil.PushParam):\n self.write_file('# PUSHPARAM')\n if node.name[0] != '_':\n self.write_file('li $a0, {}'.format(self.type_index.index(node.\n name)))\n else:\n self.write_file('lw $a0, {}($fp)'.format(self.offset[node.name]))\n self.push()\n self.write_file('')\n\n @visitor.when(cil.PopParam)\n def visit(self, node: cil.PopParam):\n self.write_file('# POPPARAM')\n self.pop(node.name)\n self.write_file('')\n\n @visitor.when(cil.Return)\n def visit(self, node: cil.Return):\n self.write_file('# RETURN')\n self.write_file('lw $v0, {}($fp)'.format(self.offset[node.value]))\n\n @visitor.when(cil.Label)\n def visit(self, node: cil.Label):\n self.write_file('_cil_label_{}:'.format(node.name), tabbed=False)\n\n @visitor.when(cil.Goto)\n def visit(self, node: cil.Goto):\n self.write_file('# GOTO')\n self.write_file('j _cil_label_{}'.format(node.label))\n self.write_file('')\n\n @visitor.when(cil.IfGoto)\n def visit(self, node: cil.IfGoto):\n self.write_file('# IF GOTO')\n self.write_file('lw $a0, {}($fp)'.format(self.offset[node.condition]))\n self.write_file('bnez $a0, _cil_label_{}'.format(node.label))\n self.write_file('')\n\n def static_datas(self):\n self.write_file('str_buffer: .space 1025')\n self.write_file('')\n self.write_file(\n '_index_negative_msg: .asciiz \"Index to substr is negative\\\\n\"')\n self.write_file(\n '_index_out_msg: .asciiz \"Index out range exception\\\\n\"')\n self.write_file('_abort_msg: \"Execution aborted\\\\n\"')\n self.write_file('_div_zero_msg: \"Division by zero exception\\\\n\"')\n self.write_file('')\n\n def entry(self):\n self.write_file('entry:', tabbed=False)\n self.visit(cil.Call(dest=None, f='build_class_name_table'))\n self.visit(cil.Call(dest=None, f='allocate_prototypes_table'))\n self.visit(cil.Call(dest=None, f='build_prototypes'))\n self.visit(cil.Call(dest=None, f='build_dispatch_tables'))\n self.visit(cil.Call(dest=None, f='build_class_parents_table'))\n self.visit(cil.Allocate(dest=None, ttype='Main'))\n self.write_file('sw $v0 0($sp)')\n self.write_file('addiu $sp $sp -4')\n self.visit(cil.Call(dest=None, f=f'Main_{INIT_CIL_SUFFIX}'))\n self.write_file('addiu $sp $sp 4')\n self.write_file('sw $v0 0($sp)')\n self.write_file('addiu $sp $sp -4')\n self.visit(cil.Call(dest=None, f='Main_main'))\n self.write_file('addiu $sp $sp 4')\n self.write_file('li $v0 10')\n self.write_file('syscall')\n\n def object_abort(self):\n self.write_file('function_Object_abort:', tabbed=False)\n self.write_file(f'move $fp, $sp')\n self.write_file('jr $ra')\n self.write_file('')\n\n def object_copy(self):\n self.write_file('function_Object_copy:', tabbed=False)\n self.write_file(f'move $fp, $sp')\n self.write_file('lw $t0 12($fp)')\n self.write_file('lw $a0 4($t0)')\n self.write_file('move $t4 $a0')\n self.write_file('li $v0 9')\n self.write_file('syscall')\n self.write_file('move $t2 $v0')\n self.write_file('li $t3 0')\n self.write_file('_objcopy_loop:', tabbed=False)\n self.write_file('lw $t1 0($t0)')\n self.write_file('sw $t1 0($v0)')\n self.write_file('addiu $t0 $t0 4')\n self.write_file('addiu $v0 $v0 4')\n self.write_file('addiu $t3 $t3 4')\n self.write_file('ble $t4 $t3 _objcopy_loop')\n self.write_file('_objcopy_div_end_:', tabbed=False)\n self.write_file('move $v0 $t2')\n self.write_file('jr $ra')\n self.write_file('')\n\n def object_typename(self):\n self.write_file('function_Object_type_name:', tabbed=False)\n self.write_file(f'move $fp, $sp')\n self.visit(cil.Allocate(dest=None, ttype=STRING_CLASS))\n self.write_file('move $v1 $v0')\n self.visit(cil.Allocate(dest=None, ttype=INTEGER_CLASS))\n self.write_file('lw $a1 12($fp)')\n self.write_file('lw $a1 0($a1)')\n self.write_file('mulu $a1 $a1 4')\n self.write_file('addu $a1 $a1 $s1')\n self.write_file('lw $a1 0($a1)')\n self.write_file('move $a2 $0')\n self.write_file('move $t2 $a1')\n self.write_file('_str_len_clsname_:', tabbed=False)\n self.write_file('lb $a0 0($t2)')\n self.write_file('beq $a0 $0 _end_clsname_len_')\n self.write_file('addiu $a2 $a2 1')\n self.write_file('addiu $t2 $t2 1')\n self.write_file('j _str_len_clsname_')\n self.write_file('_end_clsname_len_:', tabbed=False)\n self.write_file('sw $a2, 12($v0)')\n self.write_file('sw $v0, 12($v1)')\n self.write_file('sw $a1, 16($v1)')\n self.write_file('move $v0 $v1')\n self.write_file('jr $ra')\n self.write_file('')\n\n def string_length(self):\n self.write_file('function_String_length:', tabbed=False)\n self.write_file(f'move $fp, $sp')\n self.write_file('lw $a0 12($fp)')\n self.write_file('lw $v0 12($a0)')\n self.write_file('jr $ra')\n self.write_file('')\n\n def string_concat(self):\n self.write_file('function_String_concat:', tabbed=False)\n self.write_file(f'move $fp, $sp')\n self.visit(cil.Allocate(dest=None, ttype=INTEGER_CLASS))\n self.write_file('move $v1 $v0')\n self.visit(cil.Allocate(dest=None, ttype=STRING_CLASS))\n self.write_file('move $t3 $v0')\n self.write_file('lw $a1 12($fp)')\n self.write_file('lw $a2 16($fp)')\n self.write_file('lw $t1 12($a1)')\n self.write_file('lw $t1 12($t1)')\n self.write_file('lw $t2 12($a2)')\n self.write_file('lw $t2 12($t2)')\n self.write_file('addu $t0 $t2 $t1')\n self.write_file('sw $t0 12($v1)')\n self.write_file('lw $a1 16($a1)')\n self.write_file('lw $a2 16($a2)')\n self.write_file('addiu $t0 $t0 1')\n self.allocate_memory('$t0', register=True)\n self.write_file('move $t5 $v0')\n self.write_file('move $t4 $a1')\n self.write_file('addu $a1 $a1 $t1')\n self.write_file('_strcat_copy_:', tabbed=False)\n self.write_file('beq $t4 $a1 _end_strcat_copy_')\n self.write_file('lb $a0 0($t4)')\n self.write_file('sb $a0 0($t5)')\n self.write_file('addiu $t5 $t5 1')\n self.write_file('addiu $t4 $t4 1')\n self.write_file('j _strcat_copy_')\n self.write_file('_end_strcat_copy_:', tabbed=False)\n self.write_file('move $t4 $a2')\n self.write_file('addu $a2 $a2 $t2')\n self.write_file('_strcat_copy_snd_:', tabbed=False)\n self.write_file('beq $t4 $a2 _end_strcat_copy_snd_')\n self.write_file('lb $a0 0($t4)')\n self.write_file('sb $a0 0($t5)')\n self.write_file('addiu $t5 $t5 1')\n self.write_file('addiu $t4 $t4 1')\n self.write_file('j _strcat_copy_snd_')\n self.write_file('_end_strcat_copy_snd_:', tabbed=False)\n self.write_file('sb $0 0($t5)')\n self.write_file('sw $v1 12($t3)')\n self.write_file('sw $v0 16($t3)')\n self.write_file('move $v0 $t3')\n self.write_file('jr $ra')\n self.write_file('')\n\n def string_substr(self):\n self.write_file('function_String_substr:', tabbed=False)\n self.write_file(f'move $fp, $sp')\n self.write_file(f'lw $t5 12($fp)')\n self.write_file(f'lw $a1 16($fp)')\n self.write_file(f'lw $a1 12($a1)')\n self.write_file(f'lw $a2 20($fp)')\n self.write_file(f'lw $a2 12($a2)')\n self.write_file(f'blt $a1 $0 _index_negative')\n self.write_file(f'blt $a2 $0 _index_negative')\n self.write_file(f'add $a2 $a1 $a2')\n self.write_file(f'lw $a3 12($t5)')\n self.write_file(f'lw $a3 12($a3)')\n self.write_file(f'bgt $a2 $a3 _index_out')\n self.visit(cil.Allocate(dest=None, ttype=STRING_CLASS))\n self.write_file(f'move $v1 $v0')\n self.visit(cil.Allocate(dest=None, ttype=INTEGER_CLASS))\n self.write_file(f'move $t0 $v0')\n self.write_file(f'move $t7 $a2')\n self.write_file(f'subu $t7 $t7 $a1')\n self.write_file(f'sw $t7 12($t0)')\n self.allocate_memory('$a2', register=True)\n self.write_file(f'sw $t0 12($v1)')\n self.write_file(f'sw $v0 16($v1)')\n self.write_file('move $t1 $v0')\n self.write_file('lw $t5 16($t5)')\n self.write_file('move $t4 $t5')\n self.write_file('addu $t4 $t4 $a1')\n self.write_file('addu $t5 $t5 $a2')\n self.write_file('_substr_copy_:', tabbed=False)\n self.write_file('bge $t4 $t5 _end_substr_copy_')\n self.write_file('lb $a0 0($t4)')\n self.write_file('sb $a0 0($t1)')\n self.write_file('addiu $t1 $t1 1')\n self.write_file('addiu $t4 $t4 1')\n self.write_file('j _substr_copy_')\n self.write_file(f'_index_negative:', tabbed=False)\n self.write_file(f'la $a0 _index_negative_msg')\n self.write_file(f'b _subst_abort')\n self.write_file(f'_index_out:', tabbed=False)\n self.write_file(f'la $a0 _index_out_msg')\n self.write_file(f'b _subst_abort')\n self.write_file(f'_subst_abort:', tabbed=False)\n self.write_file(f'li $v0 4')\n self.write_file(f'syscall')\n self.write_file('la\\t$a0 _abort_msg')\n self.write_file(f'li $v0 4')\n self.write_file(f'syscall')\n self.write_file(f'li $v0 10')\n self.write_file(f'syscall')\n self.write_file('_end_substr_copy_:', tabbed=False)\n self.write_file('move $v0 $v1')\n self.write_file('jr $ra')\n self.write_file('')\n\n def io_in_int(self):\n self.write_file('function_IO_in_int:', tabbed=False)\n self.write_file(f'move $fp, $sp')\n self.visit(cil.Allocate(dest=None, ttype=INTEGER_CLASS))\n self.write_file('move $t0 $v0')\n self.write_file('li $v0 5')\n self.write_file('syscall')\n self.write_file('sw $v0 12($t0)')\n self.write_file('move $v0 $t0')\n self.write_file('jr $ra')\n self.write_file('')\n\n def io_in_string(self):\n self.write_file('function_IO_in_string:', tabbed=False)\n self.write_file(f'move $fp, $sp')\n self.visit(cil.Allocate(dest=None, ttype=INTEGER_CLASS))\n self.write_file('move $v1 $v0')\n self.visit(cil.Allocate(dest=None, ttype=STRING_CLASS))\n self.write_file('sw $v1 12($v0)')\n self.write_file('move $t5 $v0')\n self.write_file('la $a0 str_buffer')\n self.write_file('li $a1 1025')\n self.write_file('li $v0 8')\n self.write_file('syscall')\n self.write_file('move $a0 $0')\n self.write_file('la $t2 str_buffer')\n self.write_file('_in_string_str_len_:', tabbed=False)\n self.write_file('lb $t0 0($t2)')\n self.write_file('beq $t0 $0 _end_in_string_str_len_')\n self.write_file('beq $t0 10 _end_in_string_str_len_')\n self.write_file('addiu $a0 $a0 1')\n self.write_file('addiu $t2 $t2 1')\n self.write_file('j _in_string_str_len_')\n self.write_file('_end_in_string_str_len_:', tabbed=False)\n self.write_file('sw $a0 12($v1)')\n self.allocate_memory()\n self.write_file('la $t4 str_buffer')\n self.write_file('move $t1 $v0')\n self.write_file('_in_str_copy_:', tabbed=False)\n self.write_file('lb $t0 0($t4)')\n self.write_file('beq $t0 $0 _end_in_str_copy_')\n self.write_file('beq $t0 10 _end_in_str_copy_')\n self.write_file('sb $t0 0($t1)')\n self.write_file('addiu $t4 $t4 1')\n self.write_file('addiu $t1 $t1 1')\n self.write_file('j _in_str_copy_')\n self.write_file('_end_in_str_copy_:', tabbed=False)\n self.write_file('sw $v0 16($t5)')\n self.write_file('la $t4 str_buffer')\n self.write_file('_in_str_clean_:', tabbed=False)\n self.write_file('lb $t0 0($t4)')\n self.write_file('beq $t0 $0 _end_in_str_clean_')\n self.write_file('sb $0 0($t4)')\n self.write_file('addiu $t4 $t4 1')\n self.write_file('j _in_str_clean_')\n self.write_file('_end_in_str_clean_:', tabbed=False)\n self.write_file('move $v0 $t5')\n self.write_file('jr $ra')\n self.write_file('')\n\n def io_out_int(self):\n self.write_file('function_IO_out_int:', tabbed=False)\n self.write_file(f'move $fp, $sp')\n self.write_file('lw $a0 16($fp)')\n self.write_file('lw $a0 12($a0)')\n self.write_file('li $v0 1')\n self.write_file('syscall')\n self.write_file('lw $v0 12($fp)')\n self.write_file('jr $ra')\n self.write_file('')\n\n def io_out_string(self):\n self.write_file('function_IO_out_string:', tabbed=False)\n self.write_file(f'move $fp, $sp')\n self.write_file('lw $a0 16($fp)')\n self.write_file('lw $a0 16($a0)')\n self.write_file('li $v0 4')\n self.write_file('syscall')\n self.write_file('lw $v0 12($fp)')\n self.write_file('jr $ra')\n self.write_file('')\n\n def conforms(self):\n self.write_file(f'function_{CONFORMS_FUNC}:', tabbed=False)\n self.write_file(f'move $fp, $sp')\n self.write_file(f'lw $t0 12($fp)')\n self.write_file(f'lw $t1 16($fp)')\n self.write_file(\n f'beq $t1 {self.type_index.index(OBJECT_CLASS)} _conforms_ret_true_'\n )\n self.write_file('_conforms_loop_:', tabbed=False)\n self.write_file('beq $t0 $t1 _conforms_ret_true_')\n self.write_file(\n f'beq $t0 {self.type_index.index(OBJECT_CLASS)} _conforms_ret_false_'\n )\n self.write_file('mulu $t0 $t0 4')\n self.write_file('addu $t0 $t0 $s2')\n self.write_file('lw $t0 0($t0)')\n self.write_file('j _conforms_loop_')\n self.write_file('_conforms_ret_true_:', tabbed=False)\n self.write_file('li $v0 1')\n self.write_file('j _conforms_ret_')\n self.write_file('_conforms_ret_false_:', tabbed=False)\n self.write_file('li $v0 0')\n self.write_file('_conforms_ret_:')\n self.write_file('jr $ra')\n self.write_file('')\n\n def isvoid(self):\n self.write_file(f'function_{ISVOID_FUNC}:', tabbed=False)\n self.write_file(f'move $fp, $sp')\n self.visit(cil.Allocate(dest=None, ttype=BOOLEAN_CLASS))\n self.write_file(f'lw $t0 12($fp)')\n self.write_file(f'la $t1 {VOID_MIPS_NAME}')\n self.write_file(f'beq $t0 $t1 _is_void_true_')\n self.write_file(f'sw $0 12($v0)')\n self.write_file(f'j _is_void_end_')\n self.write_file(f'_is_void_true_:', tabbed=False)\n self.write_file(f'li $t0 1')\n self.write_file(f'sw $t0 12($v0)')\n self.write_file(f'_is_void_end_:', tabbed=False)\n self.write_file(f'jr $ra')\n self.write_file(f'')\n",
"step-5": "\n\"\"\"\nRegisters $v0 and $v1 are used to return values from functions.\nRegisters $t0 – $t9 are caller-saved registers that are used to\nhold temporary quantities that need not be preserved across calls\nRegisters $s0 – $s7 (16–23) are callee-saved registers that hold long-lived\nvalues that should be preserved across calls. They are preserved across calls\nRegister $gp is a global pointer that points to the middle of a 64K block\nof memory in the static data segment. Preserve across calls\nRegister $fp is the frame pointer. Register $fp is saved by every procedure\nthat allocates a new stack frame.Preserve across calls\nRegister $sp is the stack pointer, which points to the last location on\nthe stack(Points to Free Memory). Preserve across calls\nRegister $ra only needs to be saved if the callee itself makes a call.\nRegister $s0 <- Prototypes table\nRegister $s1 <- Class Names table\nRegister $s2 <- Class parents table\n\n0($fp): some local variable\n4(%fp): old $ra\n8(%fp): old $fp\n12(%fp): 1st argument Self\n.....\n\n\tClass Name table layout\noffset 0 - \"Class1\"\noffset 4 - \"Class2\"\noffset 8 - \"Class3\"\n.....\n\n\tPrototypes Table layout\noffset 0 - protObj1\noffset 4 - Obj1_init\noffset 8 - protObj2\noffset 12 - Obj2_init\n.....\n\n\tDispatch Table layout:\noffset 0 - addres of method m0\noffset 1 - addres of method m1\n.....\n\n Prototype layout:\noffset 0 - Class tag : int that identifies the class of the object\noffset 4 - Object size :(in 32-bit words) = 12 + 4 * (number of attributes)\noffset 8 - Dispatch pointer : pointer to the table of virtual methods\noffset 12. . . Attributes\n\"\"\"\n\nimport sys\nsys.path.append('..')\n\nimport commons.cil_ast as cil\nimport commons.visitor as visitor\nfrom commons.settings import *\n\n\n\n\nclass MipsVisitor:\n\t\"\"\"\n\tMips Visitor Class.\n\n\tThis visitor will process the AST of the generated CIL and write the mips code to a file.\n\t\"\"\"\n\n\tdef __init__(self, inherit_graph, output_file=\"mips_code.mips\"):\n\t\tself.inherit_graph, _ = inherit_graph\n\t\t\n\t\tself.offset = dict()\n\t\tself.type_index = []\n\t\tself.dispatchtable_code = []\n\t\tself.prototypes_code = []\n\t\tself.cur_labels_id = 0\n\n\t\tself.output_file = output_file\n\n\t# ======================================================================\n\t# =[ UTILS ]============================================================\n\t# ======================================================================\n\n\n\tdef push(self):\n\t\tself.write_file('sw $a0 0($sp)')\n\t\tself.write_file('addiu $sp $sp -4')\n\n\tdef pop(self, dest=None):\n\t\tself.write_file(f'addiu $sp $sp 4')\n\n\n\tdef write_file(self, msg, mode = \"a\", tabbed=True):\n\t\tf = open(self.output_file, mode)\n\t\tf.write(\"{}{}\\n\".format(\"\\t\" if tabbed else \"\", msg))\n\t\tf.close()\n\n\tdef allocate_memory(self, size=None, register=False):\n\t\tif register:\n\t\t\tself.write_file('move $a0 {}'.format(size))\n\t\telse:\n\t\t\tif size:\n\t\t\t\tself.write_file('li $a0 {}'.format(size))\n\t\tself.write_file('li $v0 9')\n\t\tself.write_file('syscall')\n\n\tdef new_labels_id(self):\n\t\tself.cur_labels_id += 1\n\t\treturn self.cur_labels_id\n\n\t# ======================================================================\n\n\t@visitor.on('node')\n\tdef visit(self, node):\n\t\tpass\n\n\n################################ PROGRAM #####################################\n\n\n\t@visitor.when(cil.Program)\n\tdef visit(self, node: cil.Program):\n\t\tself.write_file('', \"w\")\n\n\t\t#-------------------- DATA SECTION ----------------------------\n\n\t\tself.write_file('.data', tabbed = False)\n\n\t\t# Declare static data\n\t\tself.static_datas()\n\n\t\t# Transpile CIL data section\n\t\tfor data in node.data_section:\n\t\t\tself.visit(data)\n\t\tself.write_file('')\n\n\t\t# Declare class name strings and map class index\n\t\tfor i in range(len(node.type_section)):\n\t\t\tself.type_index.append(node.type_section[i].type_name)\n\t\t\tself.write_file('classname_{}: .asciiz \\\"{}\\\"'.format(node.type_section[i].type_name,node.type_section[i].type_name))\n\n\t\t# Declare void type\n\t\tself.write_file(f'{VOID_MIPS_NAME}: .asciiz \\\"\\\"')\n\n\t\t#-------------------- TEXT SECTION ----------------------------\n\n\t\tself.write_file('\\n.text')\n\t\tself.entry()\n\n\t\tself.write_file('\\n########## STATIC FUNCTIONS ##########\\n')\n\t\t# CONFORMS\n\t\tself.conforms()\n\t\t# IS_VOID\n\t\tself.isvoid()\n\t\t# OBJECT\n\t\tself.object_abort()\n\t\tself.object_copy()\n\t\tself.object_typename()\n\t\t# STRING\n\t\tself.string_length()\n\t\tself.string_concat()\n\t\tself.string_substr()\n\t\t# IO\n\t\tself.io_in_int()\n\t\tself.io_in_string()\n\t\tself.io_out_int()\n\t\tself.io_out_string()\n\n\t\tfor t in node.type_section:\n\t\t\tself.visit(t)\n\n\t\tself.write_file('\\n############## TABLES ################\\n')\n\n\t\t# Generate method that creates classes's name table\n\t\tself.write_file('function_build_class_name_table:', tabbed=False)\n\t\tself.allocate_memory(len(node.type_section) * 4)\n\t\tself.write_file('move $s1 $v0') # save the address of the table in a register\n\t\tfor i in range(len(node.type_section)):\n\t\t\tself.write_file('la $t1 classname_{}'.format(node.type_section[i].type_name))\n\t\t\tself.write_file('sw $t1 {}($s1)'.format(4 * i))\n\t\tself.write_file('')\n\n\t\t# Generate method that allocates memory for prototypes table\n\t\tself.write_file('function_allocate_prototypes_table:', tabbed=False)\n\t\tself.allocate_memory(8 * len(self.type_index))\n\t\tself.write_file('move $s0 $v0') # save the address of the table in a register\n\t\tself.write_file('')\n\n\t\t# Generate mips method that builds prototypes\n\t\tself.write_file('function_build_prototypes:', tabbed=False)\n\t\tfor ins in self.prototypes_code:\n\t\t\tself.write_file(ins)\n\t\tself.write_file('')\n\n\t\t# Generate mips method that builds dispatch tables\n\t\tself.write_file('function_build_dispatch_tables:', tabbed=False)\n\t\tfor ins in self.dispatchtable_code:\n \t\t\tself.write_file(ins)\n\t\tself.write_file('')\n\t\t\n\t\t# Generate method that builds class parents table\n\t\tself.write_file('function_build_class_parents_table:', tabbed=False)\n\t\tself.allocate_memory(4 * len(self.type_index))\n\t\tself.write_file('move $s2 $v0') # save the address of the table in a register\n\t\tself.write_file('')\n\n\t\t# Fill table entry for each class type\n\t\tfor parent in self.inherit_graph.keys():\n\t\t\tp_index = self.type_index.index(parent)\n\t\t\tfor child in self.inherit_graph[parent]:\n\t\t\t\tch_index = self.type_index.index(child.name)\n\t\t\t\tself.write_file(f'li $t0 {ch_index}')\n\t\t\t\tself.write_file(f'mul $t0 $t0 4')\n\t\t\t\tself.write_file(f'add $t0 $t0 $s2')\n\t\t\t\tself.write_file(f'li $t1 {p_index}')\n\t\t\t\tself.write_file(f'sw $t1 0($t0)')\n\t\t\t\tself.write_file('')\n\n\t\tself.write_file('')\n\n\n\t\t# Generate COOL functions\n\t\tself.write_file('\\n########### COOL FUNCTIONS ##########\\n')\n\t\tfor func in node.code_section:\n\t\t\tis_built_in = False\n\t\t\tif not INIT_CIL_SUFFIX in func.name:\n\t\t\t\tis_built_in = [x for x in BUILT_IN_CLASSES if f'{x}_' in func.name] != []\n\t\t\tif not is_built_in:\n\t\t\t\tself.visit(func)\n\t\tself.write_file('\\n#####################################\\n')\n\n\n\n################################ .DATA #######################################\n\n\n\t@visitor.when(cil.Data)\n\tdef visit(self, node: cil.Data):\n\t\tself.write_file(f'{node.dest}: .asciiz \\\"{str(node.value.encode())[2:-1]}\\\"')\n\n\n################################ TYPES #######################################\n\n\n\t@visitor.when(cil.Type)\n\tdef visit(self, node: cil.Type):\n\t\t# Allocate\n\t\tself.dispatchtable_code.append(f'# Type {node.type_name}')\n\t\tself.dispatchtable_code.append('li $a0 {}'.format(4 * len(node.methods)))\n\t\tself.dispatchtable_code.append('li $v0 9')\n\t\tself.dispatchtable_code.append('syscall')\n\n\t\t# Add dispatch table code\n\t\tfor i in range(len(node.methods)):\n\t\t\tself.dispatchtable_code.append('la $t1 function_{}'.format(node.methods[i].function_name))\n\t\t\tself.dispatchtable_code.append('sw $t1 {}($v0)'.format(4 * i))\n\t\tself.dispatchtable_code.append('lw $t0 {}($s0)'.format(8 * self.type_index.index(node.type_name)))\n\t\tself.dispatchtable_code.append('sw $v0 8($t0)')\n\t\tself.dispatchtable_code.append('')\n\n\t\t# Allocate\n\t\tself.prototypes_code.append(f'# Type {node.type_name}')\n\t\tself.prototypes_code.append('li $a0 {}'.format(12 + 4 * len(node.attributes)))\n\t\tself.prototypes_code.append('li $v0 9')\n\t\tself.prototypes_code.append('syscall')\n\n\t\t# Add prototype code\n\t\tclass_index = self.type_index.index(node.type_name)\n\t\tself.prototypes_code.append('li $a0 {}'.format(class_index))\n\t\tself.prototypes_code.append('sw $a0 0($v0)')\n\t\tself.prototypes_code.append('li $a0 {}'.format(12 + 4 * len(node.attributes)))\n\t\tself.prototypes_code.append('sw $a0 4($v0)')\n\t\tself.prototypes_code.append('sw $v0 {}($s0)'.format(8 * class_index))\n\t\tself.prototypes_code.append('')\n\n\n\t@visitor.when(cil.Function)\n\tdef visit(self, node: cil.Function):\n\t\tself.write_file(f'function_{node.name}:', tabbed=False)\n\n\t\t# Set up stack frame\n\t\tself.write_file(f'move $fp, $sp')\n\t\tself.write_file(f'subiu $sp, $sp, {4 * len(node.vlocals)}')\n\n\t\t# Register arguments offsets\n\t\tfor i in range(len(node.args)):\n\t\t\tself.offset[node.args[i].name] = 12 + i * 4\n\n\t\t# Register locals offsets\n\t\tfor i in range(len(node.vlocals)):\n\t\t\tself.offset[node.vlocals[i].name] = i * (-4)\n\n\t\t# Generate mips code for the function's body\n\t\tfor inst in node.body:\n\t\t\t# Equal node needs unique id for its labels\n\t\t\tif isinstance(inst, cil.Equal) or isinstance(inst, cil.Div):\n\t\t\t\tinst.id = self.new_labels_id()\n\n\t\t\tself.visit(inst)\n\n\t\t# Pop the stack frame\n\t\tself.write_file(f'addiu $sp, $sp, {4 * len(node.vlocals)}')\n\n\t\t# Return\n\t\tself.write_file('jr $ra')\n\n\t\tself.write_file('')\n\n\n############################## ASSIGNMENT ####################################\n\n\n\t@visitor.when(cil.Assign)\n\tdef visit(self, node: cil.Assign):\n\t\tself.write_file('# ASSIGN')\n\t\tself.write_file('lw $a0, {}($fp)'.format(self.offset[node.source]))\n\t\tself.write_file('sw $a0, {}($fp)'.format(self.offset[node.dest]))\n\t\tself.write_file('')\n\n\n############################# ARITHMETICS ####################################\n\n\n\t@visitor.when(cil.Plus)\n\tdef visit(self, node: cil.Plus):\n\t\tself.write_file('# +')\n\t\tself.write_file('lw $a0, {}($fp)'.format(self.offset[node.left]))\n\t\tself.write_file('lw $a1, {}($fp)'.format(self.offset[node.right]))\n\t\tself.write_file('add $a0, $a0, $a1')\n\t\tself.write_file('sw $a0, {}($fp)'.format(self.offset[node.dest]))\n\t\tself.write_file('')\n\n\t@visitor.when(cil.Minus)\n\tdef visit(self, node: cil.Minus):\n\t\tself.write_file('# -')\n\t\tif isinstance(node.left, int):\n\t\t\tself.write_file('li $a0 {}'.format(node.left))\n\t\telse:\n\t\t\tself.write_file('lw $a0, {}($fp)'.format(self.offset[node.left]))\n\t\tself.write_file('lw $a1, {}($fp)'.format(self.offset[node.right]))\n\t\tself.write_file('sub $a0, $a0, $a1')\n\t\tself.write_file('sw $a0, {}($fp)'.format(self.offset[node.dest]))\n\t\tself.write_file('')\n\n\t@visitor.when(cil.Mult)\n\tdef visit(self, node: cil.Mult):\n\t\tself.write_file('# *')\n\t\tself.write_file('lw $a0, {}($fp)'.format(self.offset[node.left]))\n\t\tself.write_file('lw $a1, {}($fp)'.format(self.offset[node.right]))\n\t\tself.write_file('mul $a0, $a0, $a1')\n\t\tself.write_file('sw $a0, {}($fp)'.format(self.offset[node.dest]))\n\t\tself.write_file('')\n\n\t@visitor.when(cil.Div)\n\tdef visit(self, node: cil.Div):\n\t\tself.write_file('# /')\n\t\tself.write_file('lw $a0, {}($fp)'.format(self.offset[node.left]))\n\t\tself.write_file('lw $a1, {}($fp)'.format(self.offset[node.right]))\n\t\tself.write_file(f'beqz $a1 _div_error_{node.id}_')\n\t\tself.write_file('div $a0, $a0, $a1')\n\t\tself.write_file('sw $a0, {}($fp)'.format(self.offset[node.dest]))\n\t\tself.write_file(f'b _div_end_{node.id}_')\n\t\tself.write_file(f'_div_error_{node.id}_:',tabbed=False)\n\t\tself.write_file('la $a0 _div_zero_msg')\n\t\tself.write_file('li $v0 4')\n\t\tself.write_file('syscall')\n\t\tself.write_file('la $a0 _abort_msg')\n\t\tself.write_file('li $v0 4')\n\t\tself.write_file('syscall')\n\t\tself.write_file('li $v0 10')\n\t\tself.write_file('syscall')\n\t\tself.write_file(f'_div_end_{node.id}_:',tabbed=False)\n\n\n############################# COMPARISONS ####################################\n\n\n\t@visitor.when(cil.Equal)\n\tdef visit(self, node: cil.Equal):\n\t\tself.write_file('lw $t0 {}($fp)'.format(self.offset[node.left]))\n\t\tself.write_file('lw $t1 {}($fp)'.format(self.offset[node.right]))\n\t\tself.write_file(f'beq $t0 $zero _eq_false_{node.id}_') # $t0 can't also be void\n\t\tself.write_file(f'beq $t1 $zero _eq_false_{node.id}_') # $t1 can't also be void\n\t\tself.write_file('lw $a0 0($t0)')\t# get object 1 tag\n\t\tself.write_file('lw $a1 0($t1)')\t# get object 2 tag\n\t\tself.write_file(f'bne $a0 $a1 _eq_false_{node.id}_')\t# compare tags\n\t\tself.write_file('li $a2 {}'.format(self.type_index.index(INTEGER_CLASS)))\t# load int tag\n\t\tself.write_file(f'beq $a0 $a2 _eq_int_bool_{node.id}')\t# Integers\n\t\tself.write_file('li $a2 {}'.format(self.type_index.index(BOOLEAN_CLASS)))\t# load bool tag\n\t\tself.write_file(f'beq $a0 $a2 _eq_int_bool_{node.id}')\t# Booleans\n\t\tself.write_file('li $a2 {}'.format(self.type_index.index(STRING_CLASS))) # load string tag\n\t\tself.write_file(f'bne $a0 $a2 _not_basic_type_{node.id}_') # Not a primitive type\n\n\t\t# equal strings\n\t\t# verify len of the strings\n\t\tself.write_file(f'_eq_str_{node.id}_:', tabbed = False) \t# handle strings\n\t\tself.write_file('lw\t$t3 12($t0)') # get string_1 size\n\t\tself.write_file('lw\t$t3 12($t3)') # unbox string_1 size\n\t\tself.write_file('lw\t$t4, 12($t1)') # get string_2 size\n\t\tself.write_file('lw\t$t4, 12($t4)') # unbox string_2 size\n\t\tself.write_file(f'bne $t3 $t4 _eq_false_{node.id}_') # string size are distinct\n\t\tself.write_file(f'beq $t3 $0 _eq_true_{node.id}_')\t # if strings are empty\n\n\t\t# Verify ascii secuences\n\t\tself.write_file('addu $t0 $t0 16')\t# Point to start of string s1\n\t\tself.write_file('lw $t0 0($t0)')\n\t\tself.write_file('addu $t1 $t1 16') \t# Point to start of string s2\n\t\tself.write_file('lw $t1 0($t1)')\n\t\tself.write_file('move $t2 $t3')\t\t# Keep string length as counter\n\t\tself.write_file(f'_verify_ascii_sequences_{node.id}_:', tabbed = False)\n\t\tself.write_file('lb $a0 0($t0)')\t# get char of s1\n\t\tself.write_file('lb $a1 0($t1)')\t# get char of s2\n\t\tself.write_file(f'bne $a0 $a1 _eq_false_{node.id}_') # char s1 /= char s2\n\t\tself.write_file('addu $t0 $t0 1')\n\t\tself.write_file('addu $t1 $t1 1')\n\t\tself.write_file('addiu $t2 $t2 -1')\t# Decrement counter\n\t\tself.write_file(f'bnez $t2 _verify_ascii_sequences_{node.id}_')\n\t\tself.write_file(f'b _eq_true_{node.id}_')\t\t# end of strings\n\n\t\tself.write_file(f'_not_basic_type_{node.id}_:', tabbed = False)\n\t\tself.write_file(f'bne $t0 $t1 _eq_false_{node.id}_')\n\t\tself.write_file(f'b _eq_true_{node.id}_')\n\n\t\t# equal int or boolf\n\t\tself.write_file(f'_eq_int_bool_{node.id}:', tabbed = False)\t# handles booleans and ints\n\t\tself.write_file('lw $a3 12($t0)')\t# load value variable_1\n\t\tself.write_file('lw $t4 12($t1)') # load variable_2\n\t\tself.write_file(f'bne $a3 $t4 _eq_false_{node.id}_') # value of int or bool are distinct\n\n\t\t#return true\n\t\tself.write_file(f'_eq_true_{node.id}_:', tabbed = False)\n\t\tself.write_file('li $a0 1')\n\t\tself.write_file('sw $a0 {}($fp)'.format(self.offset[node.dest]))\n\t\tself.write_file(f'b end_equal_{node.id}_')\n\n\t\t#return false\n\t\tself.write_file(f'_eq_false_{node.id}_:', tabbed = False)\n\t\tself.write_file('li $a0 0')\n\t\tself.write_file('sw $a0 {}($fp)'.format(self.offset[node.dest]))\n\t\tself.write_file(f'end_equal_{node.id}_:', tabbed = False)\n\n\t@visitor.when(cil.LessThan)\n\tdef visit(self, node: cil.LessThan):\n\t\tself.write_file('# <')\n\t\tself.write_file('lw $a1, {}($fp)'.format(self.offset[node.left]))\n\t\tself.write_file('lw $a2, {}($fp)'.format(self.offset[node.right]))\n\t\tself.write_file('slt $a0, $a1, $a2'.format(self.offset[node.right]))\n\t\tself.write_file('sw $a0, {}($fp)'.format(self.offset[node.dest]))\n\t\tself.write_file('')\n\n\t@visitor.when(cil.EqualOrLessThan)\n\tdef visit(self, node: cil.EqualOrLessThan):\n\t\tself.write_file('# <=')\n\t\tself.write_file('lw $a1, {}($fp)'.format(self.offset[node.left]))\n\t\tself.write_file('lw $a2, {}($fp)'.format(self.offset[node.right]))\n\t\tself.write_file('sle $a0, $a1, $a2'.format(self.offset[node.right]))\n\t\tself.write_file('sw $a0, {}($fp)'.format(self.offset[node.dest]))\n\t\tself.write_file('')\n\n\n############################## ATTRIBUTES ####################################\n\n\n\t@visitor.when(cil.GetAttrib)\n\tdef visit(self, node: cil.GetAttrib):\n\t\tself.write_file('# GETATTR')\n\t\tself.write_file(f'lw $a1 {self.offset[node.instance]}($fp)')\n\t\tself.write_file(f'lw $a0 {12 + 4 * node.attribute}($a1)')\n\t\tself.write_file(f'sw $a0 {self.offset[node.dest]}($fp)')\n\t\tself.write_file('')\n\n\n\t@visitor.when(cil.SetAttrib)\n\tdef visit(self, node: cil.SetAttrib):\n\t\tself.write_file('# SETATTR')\n\t\tself.write_file(f'lw $a1 {self.offset[node.instance]}($fp)')\n\t\tif isinstance(node.src, int):\n\t\t\tself.write_file(f'li $a0, {node.src}')\n\t\telif node.src[:5] == \"data_\":\n\t\t\tself.write_file(f'la $a0, {node.src}')\n\t\telse:\n\t\t\tself.write_file(f'lw $a0 {self.offset[node.src]}($fp)')\n\t\tself.write_file(f'sw $a0 {12 + 4 * node.attribute}($a1)')\n\t\tself.write_file('')\n\n\n################################ MEMORY ######################################\n\n\n\t@visitor.when(cil.TypeOf)\n\tdef visit(self, node: cil.TypeOf):\n\t\tself.write_file('# TYPEOF')\n\t\tself.write_file(f'lw $a1 {self.offset[node.instance]}($fp)')\n\t\tself.write_file(f'lw $a0 0($a1)')\n\t\tself.write_file(f'sw $a0 {self.offset[node.dest]}($fp)')\n\t\tself.write_file('')\n\n\n\t@visitor.when(cil.Allocate)\n\tdef visit(self, node: cil.Allocate):\n\t\tself.write_file('# ALLOCATE')\n\t\tif node.ttype == VOID_TYPE:\n\t\t\tself.write_file(f'la $v0 {VOID_MIPS_NAME}')\n\t\t\tself.write_file(f'sw $v0 {self.offset[node.dest]}($fp)')\t\t\t\n\t\telse:\n\t\t\toffset_proto = self.type_index.index(node.ttype) * 8\n\t\t\tself.write_file('lw $t0 {}($s0)'.format(offset_proto))\n\t\t\tself.write_file('sw $t0, 0($sp)')\n\t\t\tself.write_file('addiu $sp, $sp, -4')\n\t\t\tself.write_file('')\n\t\t\tself.visit(cil.Call(dest = node.dest, f = \"Object_copy\"))\n\t\t\tself.write_file('addiu $sp, $sp, 4')\n\t\tself.write_file('')\n\n\n########################## DISPATCH STATEMENTS ###############################\n\n\n\t@visitor.when(cil.Call)\n\tdef visit(self, node: cil.Call):\n\t\tself.write_file('# CALL')\n\n\t\t# Save return address and frame pointer\n\t\tself.write_file(f'addiu $sp, $sp, -8')\n\t\tself.write_file(f'sw $ra, 4($sp)')\n\t\tself.write_file(f'sw $fp, 8($sp)')\n\n\t\t# Call the function\n\t\tself.write_file(f'jal function_{node.f}')\n\n\t\t# Restore return address and frame pointer\n\t\tself.write_file(f'lw $fp, 8($sp)')\n\t\tself.write_file(f'lw $ra, 4($sp)')\n\t\tself.write_file(f'addiu $sp, $sp, 8')\n\n\t\tif node.dest:\n\t\t\tself.write_file(f'sw $v0 {self.offset[node.dest]}($fp)')\n\n\t\tself.write_file('')\n\n\n\t@visitor.when(cil.VCall)\n\tdef visit(self, node: cil.VCall):\n\t\tself.write_file('# VCALL')\n\n\t\t# Save return address and frame pointer\n\t\tself.write_file(f'addiu $sp, $sp, -8')\n\t\tself.write_file(f'sw $ra, 4($sp)')\n\t\tself.write_file(f'sw $fp, 8($sp)')\n\n\t\tif node.ttype[0] == \"_\":\n\t\t\t# If node.type is a local CIL variable\n\t\t\tself.write_file(f'lw $a2, {self.offset[node.ttype]}($fp)')\n\t\telse:\n\t\t\t# If node.type a type name\n\t\t\tself.write_file(f'li $a2, {self.type_index.index(node.ttype)}')\n\t\tself.write_file(f'mulu $a2, $a2, 8')\n\t\tself.write_file(f'addu $a2, $a2, $s0')\n\t\tself.write_file(f'lw $a1, 0($a2)')\n\n\t\t# Check the dispatch table for the method's address\n\t\tself.write_file(f'lw $a2, 8($a1)')\n\t\tself.write_file(f'lw $a0 {node.f * 4}($a2)')\n\n\t\t# Call the function at 0($a0)\n\t\tself.write_file(f'jalr $a0')\n\n\t\t# Restore return address and frame pointer\n\t\tself.write_file(f'lw $fp, 8($sp)')\n\t\tself.write_file(f'lw $ra, 4($sp)')\n\t\tself.write_file(f'addiu $sp, $sp, 8')\n\n\t\t# Save value after restoring $fp\n\t\tself.write_file(f'sw $v0 {self.offset[node.dest]}($fp)')\n\n\t\t# Check prototypes table for the dynamic type\n\t\tif node.ttype[0] != '_':\n\t\t\tself.write_file(f'li $a2, {self.type_index.index(node.ttype)}')\n\t\telse:\n\t\t\tself.write_file(f'lw $a2, {self.offset[node.ttype]}($fp)')\n\n\t\tself.write_file('')\n\n\n\t@visitor.when(cil.PushParam)\n\tdef visit(self, node: cil.PushParam):\n\t\tself.write_file('# PUSHPARAM')\n\t\tif node.name[0] != \"_\":\n\t\t\tself.write_file('li $a0, {}'.format(self.type_index.index(node.name)))\n\t\telse:\n\t\t\tself.write_file('lw $a0, {}($fp)'.format(self.offset[node.name]))\n\t\tself.push()\n\t\tself.write_file('')\n\n\n\t@visitor.when(cil.PopParam)\n\tdef visit(self, node: cil.PopParam):\n\t\tself.write_file('# POPPARAM')\n\t\tself.pop(node.name)\n\t\tself.write_file('')\n\n\n\t@visitor.when(cil.Return)\n\tdef visit(self, node: cil.Return):\n\t\tself.write_file('# RETURN')\n\t\tself.write_file('lw $v0, {}($fp)'.format(self.offset[node.value]))\n\n\n################################# JUMPS ######################################\n\n\n\t@visitor.when(cil.Label)\n\tdef visit(self, node: cil.Label):\n\t\tself.write_file('_cil_label_{}:'.format(node.name), tabbed=False)\n\n\n\t@visitor.when(cil.Goto)\n\tdef visit(self, node: cil.Goto):\n\t\tself.write_file('# GOTO')\n\t\tself.write_file('j _cil_label_{}'.format(node.label))\n\t\tself.write_file('')\n\n\n\t@visitor.when(cil.IfGoto)\n\tdef visit(self, node: cil.IfGoto):\n\t\tself.write_file('# IF GOTO')\n\t\tself.write_file('lw $a0, {}($fp)'.format(self.offset[node.condition]))\n\t\tself.write_file('bnez $a0, _cil_label_{}'.format(node.label))\n\t\tself.write_file('')\n\n\n############################## STATIC CODE ###################################\n\n\t#----- STATIC DATAs\n\n\tdef static_datas(self):\n\t\t# Buffer for reading strings\n\t\tself.write_file('str_buffer: .space 1025')\t\t\n\t\tself.write_file('')\n\n\t\t# Declare error mensages\n\t\tself.write_file('_index_negative_msg: .asciiz \\\"Index to substr is negative\\\\n\\\"')\n\t\tself.write_file('_index_out_msg: .asciiz \\\"Index out range exception\\\\n\\\"')\n\t\tself.write_file('_abort_msg: \\\"Execution aborted\\\\n\\\"')\n\t\tself.write_file('_div_zero_msg: \\\"Division by zero exception\\\\n\\\"')\n\n\t\tself.write_file('')\n\n\t#----- ENTRY FUNCTION\n\n\tdef entry(self):\n\t\tself.write_file('entry:', tabbed=False)\n\t\tself.visit(cil.Call(dest = None, f = 'build_class_name_table'))\n\t\tself.visit(cil.Call(dest = None, f = 'allocate_prototypes_table'))\n\t\tself.visit(cil.Call(dest = None, f = 'build_prototypes'))\n\t\tself.visit(cil.Call(dest = None, f = 'build_dispatch_tables'))\n\t\tself.visit(cil.Call(dest = None, f = 'build_class_parents_table'))\n\t\tself.visit(cil.Allocate(dest = None, ttype = 'Main'))\n\n\t\t# Push main self\n\t\tself.write_file('sw $v0 0($sp)')\n\t\tself.write_file('addiu $sp $sp -4')\n\n\t\tself.visit(cil.Call(dest = None, f = f'Main_{INIT_CIL_SUFFIX}'))\n\t\tself.write_file('addiu $sp $sp 4')\n\n\t\t# Push main self\n\t\tself.write_file('sw $v0 0($sp)')\n\t\tself.write_file('addiu $sp $sp -4')\n\n\t\tself.visit(cil.Call(dest = None, f = 'Main_main'))\n\t\tself.write_file('addiu $sp $sp 4')\n\n\t\tself.write_file('li $v0 10')\n\t\tself.write_file('syscall')\n\n\t#----- OBJECT METHODS\n\n\tdef object_abort(self):\n\t\tself.write_file('function_Object_abort:', tabbed=False)\n\t\t# Set up stack frame\n\t\tself.write_file(f'move $fp, $sp')\n\n\t\tself.write_file('jr $ra')\n\t\tself.write_file('')\n\n\tdef object_copy(self):\n\t\tself.write_file('function_Object_copy:', tabbed=False)\n\t\t# Set up stack frame\n\t\tself.write_file(f'move $fp, $sp')\n\n\t\tself.write_file('lw $t0 12($fp)')# recoger la instancia a copiar\n\t\tself.write_file('lw $a0 4($t0)')\n\t\tself.write_file('move $t4 $a0')\n\t\tself.write_file('li $v0 9')\n\t\tself.write_file('syscall')# guarda en v0 la direccion de memoria que se reservo\n\t\tself.write_file('move $t2 $v0')# salvar la direccion donde comienza el objeto\n\t\tself.write_file('li $t3 0') # size ya copiado\n\t\tself.write_file('_objcopy_loop:', tabbed=False)\n\t\tself.write_file('lw $t1 0($t0)') # cargar la palabra por la que voy\n\t\tself.write_file('sw $t1 0($v0)') # copiar la palabra\n\t\tself.write_file('addiu $t0 $t0 4') # posiciona el puntero en la proxima palabra a copiar\n\t\tself.write_file('addiu $v0 $v0 4')\t# posiciona el puntero en la direccion donde copiar la proxima palabra\n\t\tself.write_file('addiu $t3 $t3 4') # actualizar el size copiado\n\t\tself.write_file('ble $t4 $t3 _objcopy_loop') # verificar si la condicion es igual o menor igual\n\t\tself.write_file('_objcopy_div_end_:', tabbed=False)\n\t\tself.write_file('move $v0 $t2') # dejar en v0 la direccion donde empieza el nuevo objeto\n\t\tself.write_file('jr $ra')\n\t\tself.write_file('')\n\n\tdef object_typename(self):\n\t\tself.write_file('function_Object_type_name:', tabbed=False)\n\t\t# Set up stack frame\n\t\tself.write_file(f'move $fp, $sp')\n\n\t\t# Box the string reference\n\t\tself.visit(cil.Allocate(dest = None, ttype = STRING_CLASS))\t\t# Create new String object\n\t\tself.write_file('move $v1 $v0')\n\n\t\t# Box string's length\n\t\tself.visit(cil.Allocate(dest = None, ttype = INTEGER_CLASS)\t)\t\t# Create new Int object\n\n\t\tself.write_file('lw $a1 12($fp)')\t\t\t# self\n\t\tself.write_file('lw $a1 0($a1)')\n\t\tself.write_file('mulu $a1 $a1 4')\t\t\t# self's class tag\n\t\tself.write_file('addu $a1 $a1 $s1')\t\t\t# class name table entry address\n\t\tself.write_file('lw $a1 0($a1)')\t\t\t\t# Get class name address\n\n\t\tself.write_file('move $a2 $0')\t\t\t\t# Compute string's length\n\t\tself.write_file('move $t2 $a1')\n\t\tself.write_file('_str_len_clsname_:', tabbed=False)\n\t\tself.write_file('lb $a0 0($t2)')\n\t\tself.write_file('beq $a0 $0 _end_clsname_len_')\n\t\tself.write_file('addiu $a2 $a2 1')\n\t\tself.write_file('addiu $t2 $t2 1')\n\t\tself.write_file('j _str_len_clsname_')\n\t\tself.write_file('_end_clsname_len_:', tabbed=False)\n\n\t\tself.write_file('sw $a2, 12($v0)')\t\t\t# Store string's length\n\n\t\tself.write_file('sw $v0, 12($v1)')\t\t\t# Fill String attributes\n\t\tself.write_file('sw $a1, 16($v1)')\n\n\t\tself.write_file('move $v0 $v1')\n\t\tself.write_file('jr $ra')\n\t\tself.write_file('')\n\n\n\t#----- STRING METHODS\n\n\tdef string_length(self):\n\t\tself.write_file('function_String_length:', tabbed=False)\n\t\t# Set up stack frame\n\t\tself.write_file(f'move $fp, $sp')\n\n\t\tself.write_file('lw $a0 12($fp)')\t\t\t# Self\n\t\tself.write_file('lw $v0 12($a0)')\n\t\tself.write_file('jr $ra')\n\t\tself.write_file('')\n\n\tdef string_concat(self):\n\t\tself.write_file('function_String_concat:', tabbed=False)\n\t\t# Set up stack frame\n\t\tself.write_file(f'move $fp, $sp')\n\n\t\tself.visit(cil.Allocate(dest = None, ttype = INTEGER_CLASS))\t\t# Create new Int object\n\t\tself.write_file('move $v1 $v0')\t\t\t\t\t\t\t\t\t\t\t\t# Save new Int Object\n\n\t\tself.visit(cil.Allocate(dest = None, ttype = STRING_CLASS))\t\t# Create new String object\n\t\tself.write_file('move $t3 $v0')\t\t\t# Store new String object\n\n\t\tself.write_file('lw $a1 12($fp)')\t\t# Self\n\t\tself.write_file('lw $a2 16($fp)')\t\t# Boxed String to concat\n\n\t\tself.write_file('lw $t1 12($a1)')\t\t# Self's length Int object\n\t\tself.write_file('lw $t1 12($t1)')\t\t# Self's length\n\n\t\tself.write_file('lw $t2 12($a2)')\t\t# strings to concat's length Int object\n\t\tself.write_file('lw $t2 12($t2)')\t\t# strings to concat's length\n\n\t\tself.write_file('addu $t0 $t2 $t1') \t\t# New string's length\n\t\tself.write_file('sw $t0 12($v1)')\t\t\t# Store new string's length into box\n\n\t\tself.write_file('lw $a1 16($a1)')\t\t# Unbox strings\n\t\tself.write_file('lw $a2 16($a2)')\n\n\t\tself.write_file('addiu $t0 $t0 1')\t\t# Add space for \\0\n\t\tself.allocate_memory('$t0', register=True)\t# Allocate memory for new string\n\t\tself.write_file('move $t5 $v0')\t\t\t\t\t# Keep the string's reference in v0 and use t7\n\n\n\t\t# a1: self's string\t\ta2: 2nd string\t\t\tt1: length self t2: 2nd string length\n\t\t#\t\t\t\t\t\t\t\t\tv1: new string's int object\n\n\t\tself.write_file('move $t4 $a1')\t\t\t# Index for iterating the self string\n\t\tself.write_file('addu $a1 $a1 $t1')\t\t# self's copy limit\n\t\tself.write_file('_strcat_copy_:', tabbed=False)\n\t\tself.write_file('beq $t4 $a1 _end_strcat_copy_')\t# No more characters to copy\n\n\t\tself.write_file('lb $a0 0($t4)')\t\t\t# Copy the character\n\t\tself.write_file('sb $a0 0($t5)')\n\n\t\tself.write_file('addiu $t5 $t5 1')\t\t# Advance indices\n\t\tself.write_file('addiu $t4 $t4 1')\n\t\tself.write_file('j _strcat_copy_')\n\t\tself.write_file('_end_strcat_copy_:', tabbed=False)\n\n\t\t# Copy 2nd string\n\n\t\tself.write_file('move $t4 $a2')\t\t\t# Index for iterating the strings\n\t\tself.write_file('addu $a2 $a2 $t2')\t\t# self's copy limit\n\t\tself.write_file('_strcat_copy_snd_:', tabbed=False)\n\t\tself.write_file('beq $t4 $a2 _end_strcat_copy_snd_')\t# No more characters to copy\n\n\t\tself.write_file('lb $a0 0($t4)')\t\t\t# Copy the character\n\t\tself.write_file('sb $a0 0($t5)')\n\n\t\tself.write_file('addiu $t5 $t5 1')\t\t# Advance indices\n\t\tself.write_file('addiu $t4 $t4 1')\n\t\tself.write_file('j _strcat_copy_snd_')\n\t\tself.write_file('_end_strcat_copy_snd_:', tabbed=False)\n\n\t\tself.write_file('sb $0 0($t5)')\t\t\t# End string with \\0\n\n\t\t# $v0: reference to new string\t\t\t$v1: length int object\n\t\t# \t\t\t\t\t\t$t3: new string object\n\t\t# -> Create boxed string\n\n\t\tself.write_file('sw $v1 12($t3)')\t\t# New length\n\t\tself.write_file('sw $v0 16($t3)')\t\t# New string\n\n\t\tself.write_file('move $v0 $t3')\t\t\t# Return new String object in $v0\n\t\tself.write_file('jr $ra')\n\t\tself.write_file('')\n\n\tdef string_substr(self):\n\t\tself.write_file('function_String_substr:', tabbed=False)\n\t\t# Set up stack frame\n\t\tself.write_file(f'move $fp, $sp')\n\t\tself.write_file(f'lw $t5 12($fp)') # self param\n\t\tself.write_file(f'lw $a1 16($fp)') # reference of object int that represent i\n\t\tself.write_file(f'lw $a1 12($a1)') # value of i\n\t\tself.write_file(f'lw $a2 20($fp)') # reference of object int that represent j\n\t\tself.write_file(f'lw $a2 12($a2)') # value of j that is length to copy\n\t\tself.write_file(f'blt $a1 $0 _index_negative') # index i is negative\n\t\tself.write_file(f'blt $a2 $0 _index_negative') # length j is negative\n\t\tself.write_file(f'add $a2 $a1 $a2') # finish index\n\t\tself.write_file(f'lw $a3 12($t5)')\n\t\tself.write_file(f'lw $a3 12($a3)') # length of string\n\t\tself.write_file(f'bgt $a2 $a3 _index_out') # j > lenght\n\n\t\t# not errors\n\t\tself.visit(cil.Allocate(dest = None, ttype = STRING_CLASS))\n\t\tself.write_file(f'move $v1 $v0') # new string\n\n\t\tself.visit(cil.Allocate(dest = None, ttype = INTEGER_CLASS))\n\t\tself.write_file(f'move $t0 $v0') # lenght of string\n\t\tself.write_file(f'move $t7 $a2')\n\t\tself.write_file(f'subu $t7 $t7 $a1')\n\t\tself.write_file(f'sw $t7 12($t0)') # save number that represent lenght of new string\n\n\t\tself.allocate_memory('$a2', register=True)\t# $v0 -> address of the string\n\n\t\tself.write_file(f'sw $t0 12($v1)') # store length\n\t\tself.write_file(f'sw $v0 16($v1)') # store address of new string to String object\n\n\t\t# generate substring\n\t\tself.write_file('move $t1 $v0')\t\t\t\t# Index for iterating the new string\t\n\t\t\n\t\tself.write_file('lw $t5 16($t5)')\t\t\t# Index for iterating the self string\n\t\tself.write_file('move $t4 $t5')\n\t\tself.write_file('addu $t4 $t4 $a1') # self's copy start\n\t\tself.write_file('addu $t5 $t5 $a2')\t# self's copy limit\n\n\t\tself.write_file('_substr_copy_:', tabbed=False)\n\t\tself.write_file('bge $t4 $t5 _end_substr_copy_')\t# No more characters to copy\n\n\t\tself.write_file('lb $a0 0($t4)')\t\t\t# Copy the character\n\t\tself.write_file('sb $a0 0($t1)')\n\n\t\tself.write_file('addiu $t1 $t1 1')\t\t# Advance indices\n\t\tself.write_file('addiu $t4 $t4 1')\n\t\tself.write_file('j _substr_copy_')\n\n\t\t# errors sections\n\t\tself.write_file(f'_index_negative:',tabbed=False)\n\t\tself.write_file(f'la $a0 _index_negative_msg')\t\n\t\tself.write_file(f'b _subst_abort')\n\n\t\tself.write_file(f'_index_out:',tabbed=False)\n\t\tself.write_file(f'la $a0 _index_out_msg')\t\n\t\tself.write_file(f'b _subst_abort')\n\n\t\t# abort execution \n\t\tself.write_file(f'_subst_abort:',tabbed=False)\n\t\tself.write_file(f'li $v0 4') \n\t\tself.write_file(f'syscall')\n\t\tself.write_file('la\t$a0 _abort_msg')\n\t\tself.write_file(f'li $v0 4')\n\t\tself.write_file(f'syscall')\n\t\tself.write_file(f'li $v0 10')\n\t\tself.write_file(f'syscall') # exit\n\n\t\t# successful execution \n\t\tself.write_file('_end_substr_copy_:', tabbed=False)\n\n\t\tself.write_file('move $v0 $v1')\n\t\tself.write_file('jr $ra')\n\t\tself.write_file('')\n\n\t#----- IO\n\n\tdef io_in_int(self):\n\t\tself.write_file('function_IO_in_int:', tabbed=False)\n\t\t# Set up stack frame\n\t\tself.write_file(f'move $fp, $sp')\n\n\t\tself.visit(cil.Allocate(dest = None, ttype = INTEGER_CLASS))\t\t\t# Create new Int object\n\n\t\tself.write_file('move $t0 $v0')\t\t\t\t# Save Int object\n\n\t\tself.write_file('li $v0 5')\t\t\t\t\t# Read int\n\t\tself.write_file('syscall')\n\n\t\tself.write_file('sw $v0 12($t0)')\t\t\t# Store int\n\n\t\tself.write_file('move $v0 $t0')\n\t\tself.write_file('jr $ra')\n\t\tself.write_file('')\n\n\tdef io_in_string(self):\n\t\tself.write_file('function_IO_in_string:', tabbed=False)\n\t\t# Set up stack frame\n\t\tself.write_file(f'move $fp, $sp')\n\n\t\tself.visit(cil.Allocate(dest = None, ttype = INTEGER_CLASS))\t\t# Create new Int object for string's length\n\t\tself.write_file('move $v1 $v0')\t\t\t# $v1: Int pbject\n\n\t\tself.visit(cil.Allocate(dest = None, ttype = STRING_CLASS))\t\t\t# Create new String object\n\t\tself.write_file('sw $v1 12($v0)')\n\t\tself.write_file('move $t5 $v0')\t\t\t# $t5: String object\n\n\t\t# Read String and store in a temp buffer\n\t\tself.write_file('la $a0 str_buffer')\n\t\tself.write_file('li $a1 1025')\n\t\tself.write_file('li $v0 8')\t\t\t\t\t# Read string\n\t\tself.write_file('syscall')\n\n\t\t# Compute string's length\n\t\tself.write_file('move $a0 $0')\n\t\tself.write_file('la $t2 str_buffer')\n\t\tself.write_file('_in_string_str_len_:', tabbed=False)\n\t\tself.write_file('lb $t0 0($t2)')\n\t\tself.write_file('beq $t0 $0 _end_in_string_str_len_')\n\t\tself.write_file('beq $t0 10 _end_in_string_str_len_')\n\t\tself.write_file('addiu $a0 $a0 1')\n\t\tself.write_file('addiu $t2 $t2 1')\n\t\tself.write_file('j _in_string_str_len_')\n\t\tself.write_file('_end_in_string_str_len_:', tabbed=False)\n\n\t\t# Store string's length into Integer class\n\t\tself.write_file('sw $a0 12($v1)')\n\n\t\t# Allocate size in $a0 ... string's length\n\t\tself.allocate_memory()\n\n\t\t# $a0: string's length \t\t\t$v0: string's new address\t\t\t$t5: String object\n\n\t\t# Copy string from buffer to new address\n\t\tself.write_file('la $t4 str_buffer')\t\t\t# Index for iterating the string buffer\n\t\tself.write_file('move $t1 $v0')\t\t\t\t\t# Index for iterating new string address\n\n\t\tself.write_file('_in_str_copy_:', tabbed=False)\n\t\tself.write_file('lb $t0 0($t4)')\t\t\t# Load a character\n\t\tself.write_file('beq $t0 $0 _end_in_str_copy_')\t# No more characters to copy\n\t\tself.write_file('beq $t0 10 _end_in_str_copy_')\t# No more characters to copy\n\n\t\tself.write_file('sb $t0 0($t1)')\t\t\t# Copy the character\n\n\t\tself.write_file('addiu $t4 $t4 1')\t\t# Advance indices\n\t\tself.write_file('addiu $t1 $t1 1')\n\t\tself.write_file('j _in_str_copy_')\n\t\tself.write_file('_end_in_str_copy_:', tabbed=False)\n\n\t\t# Store string\n\t\tself.write_file('sw $v0 16($t5)')\t\n\n\t\t# Clean string buffer\n\t\tself.write_file('la $t4 str_buffer')\t\t\t# Index for iterating the string buffer\n\t\tself.write_file('_in_str_clean_:', tabbed=False)\n\t\tself.write_file('lb $t0 0($t4)')\t\t\t# Load a character\n\t\tself.write_file('beq $t0 $0 _end_in_str_clean_')\t# No more characters to clean\n\n\t\tself.write_file('sb $0 0($t4)')\t\t\t# Clean the character\n\n\t\tself.write_file('addiu $t4 $t4 1')\t\t# Advance indices\n\t\tself.write_file('j _in_str_clean_')\n\t\tself.write_file('_end_in_str_clean_:', tabbed=False)\n\n\t\t# Return new string in $v0\n\t\tself.write_file('move $v0 $t5')\n\t\tself.write_file('jr $ra')\n\t\tself.write_file('')\n\n\tdef io_out_int(self):\n\t\tself.write_file('function_IO_out_int:', tabbed=False)\n\t\t# Set up stack frame\n\t\tself.write_file(f'move $fp, $sp')\n\n\t\tself.write_file('lw $a0 16($fp)')\t\t\t# Get Int object\n\t\tself.write_file('lw $a0 12($a0)')\n\n\t\tself.write_file('li $v0 1')\t\t\t\t\t# Print int\n\t\tself.write_file('syscall')\n\n\t\tself.write_file('lw $v0 12($fp)')\t\t\t\t# Return self\n\t\tself.write_file('jr $ra')\n\t\tself.write_file('')\n\n\tdef io_out_string(self):\n\t\tself.write_file('function_IO_out_string:', tabbed=False)\n\t\t# Set up stack frame\n\t\tself.write_file(f'move $fp, $sp')\n\n\t\tself.write_file('lw $a0 16($fp)')\t\t\t# Get String object\n\t\tself.write_file('lw $a0 16($a0)')\n\n\t\tself.write_file('li $v0 4')\t\t\t\t\t# Print string\n\t\tself.write_file('syscall')\n\n\t\tself.write_file('lw $v0 12($fp)')\t\t\t\t# Return self\n\t\tself.write_file('jr $ra')\n\t\tself.write_file('')\n\n\t#------ CONFORMS\n\n\tdef conforms(self):\n\t\tself.write_file(f'function_{CONFORMS_FUNC}:', tabbed=False)\n\t\t# Set up stack frame\n\t\tself.write_file(f'move $fp, $sp')\n\n\t\tself.write_file(f'lw $t0 12($fp)')\t\t# First arg's class tag\n\t\tself.write_file(f'lw $t1 16($fp)')\t\t# Second arg's class tag\n\n\t\t# 2nd arg == Object -> return true\n\t\tself.write_file(f'beq $t1 {self.type_index.index(OBJECT_CLASS)} _conforms_ret_true_')\t\n\n\t\tself.write_file('_conforms_loop_:', tabbed=False)\n\n\t\t# current == 2nd arg -> return true\n\t\tself.write_file('beq $t0 $t1 _conforms_ret_true_')\t\n\n\t\t# current == Object -> return false\n\t\tself.write_file(f'beq $t0 {self.type_index.index(OBJECT_CLASS)} _conforms_ret_false_')\t\t\n\n\t\t# Query parents's class tag from $s2 ... class parent table\n\t\tself.write_file('mulu $t0 $t0 4')\n\t\tself.write_file('addu $t0 $t0 $s2')\t\t\n\t\tself.write_file('lw $t0 0($t0)')\t\t\t# current = current.parent\n\t\tself.write_file('j _conforms_loop_')\n\t\t\n\t\tself.write_file('_conforms_ret_true_:', tabbed=False)\n\t\tself.write_file('li $v0 1')\n\t\tself.write_file('j _conforms_ret_')\n\n\t\tself.write_file('_conforms_ret_false_:', tabbed=False)\n\t\tself.write_file('li $v0 0')\n\t\t\n\t\t# No need to store result in a Bool class\n\t\tself.write_file('_conforms_ret_:')\n\t\tself.write_file('jr $ra')\n\t\tself.write_file('')\n\n\t#------ ISVOID\n\n\tdef isvoid(self):\n\t\tself.write_file(f'function_{ISVOID_FUNC}:', tabbed=False)\n\t\t# Set up stack frame\n\t\tself.write_file(f'move $fp, $sp')\n\n\t\tself.visit(cil.Allocate(dest = None, ttype = BOOLEAN_CLASS))\n\t\t# $v0 contains new Bool object\n\n\t\tself.write_file(f'lw $t0 12($fp)')\t\t\t\t\t# 1st arg is an object address\n\t\tself.write_file(f'la $t1 {VOID_MIPS_NAME}')\n\n\t\tself.write_file(f'beq $t0 $t1 _is_void_true_')\t# arg == void type\n\t\tself.write_file(f'sw $0 12($v0)')\t\t\t\t\t# return False\n\t\tself.write_file(f'j _is_void_end_')\n\n\t\tself.write_file(f'_is_void_true_:', tabbed=False)\n\t\tself.write_file(f'li $t0 1')\n\t\tself.write_file(f'sw $t0 12($v0)')\t\t\t\t\t# return True\n\t\tself.write_file(f'_is_void_end_:', tabbed=False)\n\n\t\t# Return Bool object in $v0\n\t\tself.write_file(f'jr $ra')\n\t\tself.write_file(f'')",
"step-ids": [
25,
31,
32,
48,
50
]
}
|
[
25,
31,
32,
48,
50
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
array = [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
solution = Solution.Find(6, array)
<|reserved_special_token_1|>
from find import Solution
array = [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
solution = Solution.Find(6, array)
|
flexible
|
{
"blob_id": "d4361b169bf75d3af82eca3d26609961ccc2f27e",
"index": 2405,
"step-1": "<mask token>\n",
"step-2": "<mask token>\narray = [[1, 2, 3], [4, 5, 6], [7, 8, 9]]\nsolution = Solution.Find(6, array)\n",
"step-3": "from find import Solution\narray = [[1, 2, 3], [4, 5, 6], [7, 8, 9]]\nsolution = Solution.Find(6, array)\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
# coding:utf-8
from flask_sqlalchemy import SQLAlchemy
from config.manager import app
from config.db import db
class Category(db.Model):
__tablename__ = 'category'
id = db.Column(db.Integer, primary_key=True) # 编号
name = db.Column(db.String(20), nullable=False) # 账号
addtime = db.Column(db.DateTime, nullable=False) # 注册时间
def __repr__(self):
return "<User %r>" % self.name
if __name__ == '__main__':
db.create_all()
|
normal
|
{
"blob_id": "743aa4ccbb9a131b5ef3d04475789d3d1da1a2fa",
"index": 2407,
"step-1": "<mask token>\n\n\nclass Category(db.Model):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass Category(db.Model):\n __tablename__ = 'category'\n id = db.Column(db.Integer, primary_key=True)\n name = db.Column(db.String(20), nullable=False)\n addtime = db.Column(db.DateTime, nullable=False)\n\n def __repr__(self):\n return '<User %r>' % self.name\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass Category(db.Model):\n __tablename__ = 'category'\n id = db.Column(db.Integer, primary_key=True)\n name = db.Column(db.String(20), nullable=False)\n addtime = db.Column(db.DateTime, nullable=False)\n\n def __repr__(self):\n return '<User %r>' % self.name\n\n\nif __name__ == '__main__':\n db.create_all()\n",
"step-4": "from flask_sqlalchemy import SQLAlchemy\nfrom config.manager import app\nfrom config.db import db\n\n\nclass Category(db.Model):\n __tablename__ = 'category'\n id = db.Column(db.Integer, primary_key=True)\n name = db.Column(db.String(20), nullable=False)\n addtime = db.Column(db.DateTime, nullable=False)\n\n def __repr__(self):\n return '<User %r>' % self.name\n\n\nif __name__ == '__main__':\n db.create_all()\n",
"step-5": "# coding:utf-8\nfrom flask_sqlalchemy import SQLAlchemy\nfrom config.manager import app\nfrom config.db import db\n\n\nclass Category(db.Model):\n __tablename__ = 'category'\n id = db.Column(db.Integer, primary_key=True) # 编号\n name = db.Column(db.String(20), nullable=False) # 账号\n addtime = db.Column(db.DateTime, nullable=False) # 注册时间\n\n def __repr__(self):\n return \"<User %r>\" % self.name\n\n\nif __name__ == '__main__':\n db.create_all()\n",
"step-ids": [
1,
3,
4,
5,
6
]
}
|
[
1,
3,
4,
5,
6
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
for _ in range(5):
sum_value += int(input())
print(sum_value)
<|reserved_special_token_1|>
sum_value = 0
for _ in range(5):
sum_value += int(input())
print(sum_value)
|
flexible
|
{
"blob_id": "4add80894036e0395a6e6eb13e8a2db0d963de8c",
"index": 9654,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor _ in range(5):\n sum_value += int(input())\nprint(sum_value)\n",
"step-3": "sum_value = 0\nfor _ in range(5):\n sum_value += int(input())\nprint(sum_value)\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
<|reserved_special_token_0|>
def pdfToFolder(projectName):
os.chdir('/home/gmclaughlin/Downloads')
if projectName.find('DEM') != -1:
shutil.move('/home/gmclaughlin/Downloads/Detail Report - Basic.pdf',
'/home/gmclaughlin/Python/Safety Project/Demo/%s/%s-Detail Report.pdf'
% (projectName, projectName))
elif projectName.find('JDC') != -1:
shutil.move('/home/gmclaughlin/Downloads/Detail Report - Basic.pdf',
'/home/gmclaughlin/Python/Safety Project/JDC/%s/%s-Detail Report.pdf'
% (projectName, projectName))
elif projectName.find('NEW') != -1:
shutil.move('/home/gmclaughlin/Downloads/Detail Report - Basic.pdf',
'/home/gmclaughlin/Python/Safety Project/NewRoads/%s/%s-Detail Report.pdf'
% (projectName, projectName))
elif projectName.find('Site') != -1:
shutil.move('/home/gmclaughlin/Downloads/Detail Report - Basic.pdf',
'/home/gmclaughlin/Python/Safety Project/SiteCrew/%s/%s-Detail Report.pdf'
% (projectName, projectName))
else:
shutil.move('/home/gmclaughlin/Downloads/Detail Report - Basic.pdf',
'/home/gmclaughlin/Python/Safety Project/Other/%s/%s-Detail Report.pdf'
% (projectName, projectName))
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
browser.get(
'https://safetynet.predictivesolutions.com/CRMApp/default_login.jsp?loginZoneID=10459&originalHostName=jdc.predictivesolutions.com'
)
<|reserved_special_token_0|>
userElem.send_keys('temp')
<|reserved_special_token_0|>
passElem.send_keys('temp')
passElem.submit()
time.sleep(3)
<|reserved_special_token_0|>
linkElem.click()
time.sleep(2)
<|reserved_special_token_0|>
linkElem.click()
time.sleep(4)
def pdfToFolder(projectName):
os.chdir('/home/gmclaughlin/Downloads')
if projectName.find('DEM') != -1:
shutil.move('/home/gmclaughlin/Downloads/Detail Report - Basic.pdf',
'/home/gmclaughlin/Python/Safety Project/Demo/%s/%s-Detail Report.pdf'
% (projectName, projectName))
elif projectName.find('JDC') != -1:
shutil.move('/home/gmclaughlin/Downloads/Detail Report - Basic.pdf',
'/home/gmclaughlin/Python/Safety Project/JDC/%s/%s-Detail Report.pdf'
% (projectName, projectName))
elif projectName.find('NEW') != -1:
shutil.move('/home/gmclaughlin/Downloads/Detail Report - Basic.pdf',
'/home/gmclaughlin/Python/Safety Project/NewRoads/%s/%s-Detail Report.pdf'
% (projectName, projectName))
elif projectName.find('Site') != -1:
shutil.move('/home/gmclaughlin/Downloads/Detail Report - Basic.pdf',
'/home/gmclaughlin/Python/Safety Project/SiteCrew/%s/%s-Detail Report.pdf'
% (projectName, projectName))
else:
shutil.move('/home/gmclaughlin/Downloads/Detail Report - Basic.pdf',
'/home/gmclaughlin/Python/Safety Project/Other/%s/%s-Detail Report.pdf'
% (projectName, projectName))
<|reserved_special_token_0|>
for cellObj in sheet['A']:
if (cellObj.value != 'Project' and cellObj.value !=
'JDC-Winchester HS Enabling (CONSIG'):
linkElem = browser.find_element_by_name('clear')
linkElem.click()
time.sleep(4)
linkElem = browser.find_element_by_name('showSafeAndUnsafeDetails')
linkElem.click()
time.sleep(1)
linkElem = browser.find_element_by_name('showImages')
linkElem.click()
time.sleep(1)
linkElem = browser.find_element_by_name('datePickerRadio')
linkElem.click()
time.sleep(1)
projectElem = browser.find_elements_by_xpath("//input[@type='text']")
print(cellObj.value)
projectElem[5 + addValue].send_keys('01/01/2010')
time.sleep(1)
projectElem[6 + addValue].send_keys('08/15/2017')
time.sleep(1)
projectElem[8 + addValue].clear()
projectElem[8 + addValue].send_keys(cellObj.value)
time.sleep(1)
projectElem[8 + addValue].send_keys(Keys.ENTER)
time.sleep(3)
linkElem = browser.find_element_by_xpath("//input[@type='submit']")
linkElem.click()
time.sleep(10)
linkElem = browser.find_element_by_name('pdf')
linkElem.click()
time.sleep(70)
addValue = 1
pdfToFolder(cellObj.value)
counter = counter + 1
<|reserved_special_token_1|>
<|reserved_special_token_0|>
wb = openpyxl.load_workbook('ProjectSummary.xlsx')
sheet = wb.active
browser = webdriver.Firefox()
browser.get(
'https://safetynet.predictivesolutions.com/CRMApp/default_login.jsp?loginZoneID=10459&originalHostName=jdc.predictivesolutions.com'
)
userElem = browser.find_element_by_id('username')
userElem.send_keys('temp')
passElem = browser.find_element_by_id('password')
passElem.send_keys('temp')
passElem.submit()
time.sleep(3)
linkElem = browser.find_element_by_link_text('Reports')
linkElem.click()
time.sleep(2)
linkElem = browser.find_element_by_link_text('Detail Report')
linkElem.click()
time.sleep(4)
def pdfToFolder(projectName):
os.chdir('/home/gmclaughlin/Downloads')
if projectName.find('DEM') != -1:
shutil.move('/home/gmclaughlin/Downloads/Detail Report - Basic.pdf',
'/home/gmclaughlin/Python/Safety Project/Demo/%s/%s-Detail Report.pdf'
% (projectName, projectName))
elif projectName.find('JDC') != -1:
shutil.move('/home/gmclaughlin/Downloads/Detail Report - Basic.pdf',
'/home/gmclaughlin/Python/Safety Project/JDC/%s/%s-Detail Report.pdf'
% (projectName, projectName))
elif projectName.find('NEW') != -1:
shutil.move('/home/gmclaughlin/Downloads/Detail Report - Basic.pdf',
'/home/gmclaughlin/Python/Safety Project/NewRoads/%s/%s-Detail Report.pdf'
% (projectName, projectName))
elif projectName.find('Site') != -1:
shutil.move('/home/gmclaughlin/Downloads/Detail Report - Basic.pdf',
'/home/gmclaughlin/Python/Safety Project/SiteCrew/%s/%s-Detail Report.pdf'
% (projectName, projectName))
else:
shutil.move('/home/gmclaughlin/Downloads/Detail Report - Basic.pdf',
'/home/gmclaughlin/Python/Safety Project/Other/%s/%s-Detail Report.pdf'
% (projectName, projectName))
finsihedFlag = False
addValue = 0
counter = 0
for cellObj in sheet['A']:
if (cellObj.value != 'Project' and cellObj.value !=
'JDC-Winchester HS Enabling (CONSIG'):
linkElem = browser.find_element_by_name('clear')
linkElem.click()
time.sleep(4)
linkElem = browser.find_element_by_name('showSafeAndUnsafeDetails')
linkElem.click()
time.sleep(1)
linkElem = browser.find_element_by_name('showImages')
linkElem.click()
time.sleep(1)
linkElem = browser.find_element_by_name('datePickerRadio')
linkElem.click()
time.sleep(1)
projectElem = browser.find_elements_by_xpath("//input[@type='text']")
print(cellObj.value)
projectElem[5 + addValue].send_keys('01/01/2010')
time.sleep(1)
projectElem[6 + addValue].send_keys('08/15/2017')
time.sleep(1)
projectElem[8 + addValue].clear()
projectElem[8 + addValue].send_keys(cellObj.value)
time.sleep(1)
projectElem[8 + addValue].send_keys(Keys.ENTER)
time.sleep(3)
linkElem = browser.find_element_by_xpath("//input[@type='submit']")
linkElem.click()
time.sleep(10)
linkElem = browser.find_element_by_name('pdf')
linkElem.click()
time.sleep(70)
addValue = 1
pdfToFolder(cellObj.value)
counter = counter + 1
<|reserved_special_token_1|>
import os, openpyxl, time, shutil
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
wb = openpyxl.load_workbook('ProjectSummary.xlsx')
sheet = wb.active
browser = webdriver.Firefox()
browser.get(
'https://safetynet.predictivesolutions.com/CRMApp/default_login.jsp?loginZoneID=10459&originalHostName=jdc.predictivesolutions.com'
)
userElem = browser.find_element_by_id('username')
userElem.send_keys('temp')
passElem = browser.find_element_by_id('password')
passElem.send_keys('temp')
passElem.submit()
time.sleep(3)
linkElem = browser.find_element_by_link_text('Reports')
linkElem.click()
time.sleep(2)
linkElem = browser.find_element_by_link_text('Detail Report')
linkElem.click()
time.sleep(4)
def pdfToFolder(projectName):
os.chdir('/home/gmclaughlin/Downloads')
if projectName.find('DEM') != -1:
shutil.move('/home/gmclaughlin/Downloads/Detail Report - Basic.pdf',
'/home/gmclaughlin/Python/Safety Project/Demo/%s/%s-Detail Report.pdf'
% (projectName, projectName))
elif projectName.find('JDC') != -1:
shutil.move('/home/gmclaughlin/Downloads/Detail Report - Basic.pdf',
'/home/gmclaughlin/Python/Safety Project/JDC/%s/%s-Detail Report.pdf'
% (projectName, projectName))
elif projectName.find('NEW') != -1:
shutil.move('/home/gmclaughlin/Downloads/Detail Report - Basic.pdf',
'/home/gmclaughlin/Python/Safety Project/NewRoads/%s/%s-Detail Report.pdf'
% (projectName, projectName))
elif projectName.find('Site') != -1:
shutil.move('/home/gmclaughlin/Downloads/Detail Report - Basic.pdf',
'/home/gmclaughlin/Python/Safety Project/SiteCrew/%s/%s-Detail Report.pdf'
% (projectName, projectName))
else:
shutil.move('/home/gmclaughlin/Downloads/Detail Report - Basic.pdf',
'/home/gmclaughlin/Python/Safety Project/Other/%s/%s-Detail Report.pdf'
% (projectName, projectName))
finsihedFlag = False
addValue = 0
counter = 0
for cellObj in sheet['A']:
if (cellObj.value != 'Project' and cellObj.value !=
'JDC-Winchester HS Enabling (CONSIG'):
linkElem = browser.find_element_by_name('clear')
linkElem.click()
time.sleep(4)
linkElem = browser.find_element_by_name('showSafeAndUnsafeDetails')
linkElem.click()
time.sleep(1)
linkElem = browser.find_element_by_name('showImages')
linkElem.click()
time.sleep(1)
linkElem = browser.find_element_by_name('datePickerRadio')
linkElem.click()
time.sleep(1)
projectElem = browser.find_elements_by_xpath("//input[@type='text']")
print(cellObj.value)
projectElem[5 + addValue].send_keys('01/01/2010')
time.sleep(1)
projectElem[6 + addValue].send_keys('08/15/2017')
time.sleep(1)
projectElem[8 + addValue].clear()
projectElem[8 + addValue].send_keys(cellObj.value)
time.sleep(1)
projectElem[8 + addValue].send_keys(Keys.ENTER)
time.sleep(3)
linkElem = browser.find_element_by_xpath("//input[@type='submit']")
linkElem.click()
time.sleep(10)
linkElem = browser.find_element_by_name('pdf')
linkElem.click()
time.sleep(70)
addValue = 1
pdfToFolder(cellObj.value)
counter = counter + 1
<|reserved_special_token_1|>
#downloads project detail reports from the web and places them in the correct project folder created by makeFolders.py
import os, openpyxl, time, shutil
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
wb = openpyxl.load_workbook('ProjectSummary.xlsx')
sheet = wb.active
browser = webdriver.Firefox()
browser.get('https://safetynet.predictivesolutions.com/CRMApp/default_login.jsp?loginZoneID=10459&originalHostName=jdc.predictivesolutions.com')
userElem = browser.find_element_by_id('username')
userElem.send_keys('temp')
passElem = browser.find_element_by_id('password')
passElem.send_keys('temp')
passElem.submit()
time.sleep(3)
linkElem = browser.find_element_by_link_text('Reports')
linkElem.click()
time.sleep(2)
linkElem = browser.find_element_by_link_text('Detail Report')
linkElem.click()
time.sleep(4)
def pdfToFolder(projectName):
os.chdir('/home/gmclaughlin/Downloads')
if projectName.find("DEM") != -1:
shutil.move('/home/gmclaughlin/Downloads/Detail Report - Basic.pdf','/home/gmclaughlin/Python/Safety Project/Demo/%s/%s-Detail Report.pdf' % (projectName, projectName))
elif projectName.find("JDC") != -1:
shutil.move('/home/gmclaughlin/Downloads/Detail Report - Basic.pdf','/home/gmclaughlin/Python/Safety Project/JDC/%s/%s-Detail Report.pdf' % (projectName, projectName))
elif projectName.find("NEW") != -1:
shutil.move('/home/gmclaughlin/Downloads/Detail Report - Basic.pdf','/home/gmclaughlin/Python/Safety Project/NewRoads/%s/%s-Detail Report.pdf' % (projectName, projectName))
elif projectName.find("Site") != -1:
shutil.move('/home/gmclaughlin/Downloads/Detail Report - Basic.pdf','/home/gmclaughlin/Python/Safety Project/SiteCrew/%s/%s-Detail Report.pdf' % (projectName, projectName))
else:
shutil.move('/home/gmclaughlin/Downloads/Detail Report - Basic.pdf','/home/gmclaughlin/Python/Safety Project/Other/%s/%s-Detail Report.pdf' % (projectName, projectName))
finsihedFlag = False
addValue = 0
counter = 0
for cellObj in sheet['A']:
if cellObj.value != 'Project' and cellObj.value != 'JDC-Winchester HS Enabling (CONSIG':
linkElem = browser.find_element_by_name('clear') #clear existing settings
linkElem.click()
time.sleep(4)
linkElem = browser.find_element_by_name('showSafeAndUnsafeDetails') #select all reports
linkElem.click()
time.sleep(1)
linkElem = browser.find_element_by_name('showImages') #show images in reports
linkElem.click()
time.sleep(1)
linkElem = browser.find_element_by_name('datePickerRadio')
linkElem.click()
time.sleep(1)
projectElem = browser.find_elements_by_xpath("//input[@type='text']") #find and use text fields
print(cellObj.value)
#projectElem = browser.find_element_by_xpath("//input[4]")
#time.sleep(2)
#projectElem[5+addValue].clear()
projectElem[5+addValue].send_keys('01/01/2010')
time.sleep(1)
#projectElem[6+addValue].clear()
projectElem[6+addValue].send_keys('08/15/2017')
time.sleep(1)
projectElem[8+addValue].clear() #this is the project name box
projectElem[8+addValue].send_keys(cellObj.value)
time.sleep(1)
projectElem[8+addValue].send_keys(Keys.ENTER)
time.sleep(3)
linkElem = browser.find_element_by_xpath("//input[@type='submit']") #submit request for report
linkElem.click()
time.sleep(10)
linkElem = browser.find_element_by_name('pdf') #download as PDF
linkElem.click()
time.sleep(70)
addValue = 1
pdfToFolder(cellObj.value)
counter = counter + 1
|
flexible
|
{
"blob_id": "6e9fd8ee2a187888df07c9dd1c32fe59a111c869",
"index": 8823,
"step-1": "<mask token>\n\n\ndef pdfToFolder(projectName):\n os.chdir('/home/gmclaughlin/Downloads')\n if projectName.find('DEM') != -1:\n shutil.move('/home/gmclaughlin/Downloads/Detail Report - Basic.pdf',\n \n '/home/gmclaughlin/Python/Safety Project/Demo/%s/%s-Detail Report.pdf'\n % (projectName, projectName))\n elif projectName.find('JDC') != -1:\n shutil.move('/home/gmclaughlin/Downloads/Detail Report - Basic.pdf',\n \n '/home/gmclaughlin/Python/Safety Project/JDC/%s/%s-Detail Report.pdf'\n % (projectName, projectName))\n elif projectName.find('NEW') != -1:\n shutil.move('/home/gmclaughlin/Downloads/Detail Report - Basic.pdf',\n \n '/home/gmclaughlin/Python/Safety Project/NewRoads/%s/%s-Detail Report.pdf'\n % (projectName, projectName))\n elif projectName.find('Site') != -1:\n shutil.move('/home/gmclaughlin/Downloads/Detail Report - Basic.pdf',\n \n '/home/gmclaughlin/Python/Safety Project/SiteCrew/%s/%s-Detail Report.pdf'\n % (projectName, projectName))\n else:\n shutil.move('/home/gmclaughlin/Downloads/Detail Report - Basic.pdf',\n \n '/home/gmclaughlin/Python/Safety Project/Other/%s/%s-Detail Report.pdf'\n % (projectName, projectName))\n\n\n<mask token>\n",
"step-2": "<mask token>\nbrowser.get(\n 'https://safetynet.predictivesolutions.com/CRMApp/default_login.jsp?loginZoneID=10459&originalHostName=jdc.predictivesolutions.com'\n )\n<mask token>\nuserElem.send_keys('temp')\n<mask token>\npassElem.send_keys('temp')\npassElem.submit()\ntime.sleep(3)\n<mask token>\nlinkElem.click()\ntime.sleep(2)\n<mask token>\nlinkElem.click()\ntime.sleep(4)\n\n\ndef pdfToFolder(projectName):\n os.chdir('/home/gmclaughlin/Downloads')\n if projectName.find('DEM') != -1:\n shutil.move('/home/gmclaughlin/Downloads/Detail Report - Basic.pdf',\n \n '/home/gmclaughlin/Python/Safety Project/Demo/%s/%s-Detail Report.pdf'\n % (projectName, projectName))\n elif projectName.find('JDC') != -1:\n shutil.move('/home/gmclaughlin/Downloads/Detail Report - Basic.pdf',\n \n '/home/gmclaughlin/Python/Safety Project/JDC/%s/%s-Detail Report.pdf'\n % (projectName, projectName))\n elif projectName.find('NEW') != -1:\n shutil.move('/home/gmclaughlin/Downloads/Detail Report - Basic.pdf',\n \n '/home/gmclaughlin/Python/Safety Project/NewRoads/%s/%s-Detail Report.pdf'\n % (projectName, projectName))\n elif projectName.find('Site') != -1:\n shutil.move('/home/gmclaughlin/Downloads/Detail Report - Basic.pdf',\n \n '/home/gmclaughlin/Python/Safety Project/SiteCrew/%s/%s-Detail Report.pdf'\n % (projectName, projectName))\n else:\n shutil.move('/home/gmclaughlin/Downloads/Detail Report - Basic.pdf',\n \n '/home/gmclaughlin/Python/Safety Project/Other/%s/%s-Detail Report.pdf'\n % (projectName, projectName))\n\n\n<mask token>\nfor cellObj in sheet['A']:\n if (cellObj.value != 'Project' and cellObj.value !=\n 'JDC-Winchester HS Enabling (CONSIG'):\n linkElem = browser.find_element_by_name('clear')\n linkElem.click()\n time.sleep(4)\n linkElem = browser.find_element_by_name('showSafeAndUnsafeDetails')\n linkElem.click()\n time.sleep(1)\n linkElem = browser.find_element_by_name('showImages')\n linkElem.click()\n time.sleep(1)\n linkElem = browser.find_element_by_name('datePickerRadio')\n linkElem.click()\n time.sleep(1)\n projectElem = browser.find_elements_by_xpath(\"//input[@type='text']\")\n print(cellObj.value)\n projectElem[5 + addValue].send_keys('01/01/2010')\n time.sleep(1)\n projectElem[6 + addValue].send_keys('08/15/2017')\n time.sleep(1)\n projectElem[8 + addValue].clear()\n projectElem[8 + addValue].send_keys(cellObj.value)\n time.sleep(1)\n projectElem[8 + addValue].send_keys(Keys.ENTER)\n time.sleep(3)\n linkElem = browser.find_element_by_xpath(\"//input[@type='submit']\")\n linkElem.click()\n time.sleep(10)\n linkElem = browser.find_element_by_name('pdf')\n linkElem.click()\n time.sleep(70)\n addValue = 1\n pdfToFolder(cellObj.value)\n counter = counter + 1\n",
"step-3": "<mask token>\nwb = openpyxl.load_workbook('ProjectSummary.xlsx')\nsheet = wb.active\nbrowser = webdriver.Firefox()\nbrowser.get(\n 'https://safetynet.predictivesolutions.com/CRMApp/default_login.jsp?loginZoneID=10459&originalHostName=jdc.predictivesolutions.com'\n )\nuserElem = browser.find_element_by_id('username')\nuserElem.send_keys('temp')\npassElem = browser.find_element_by_id('password')\npassElem.send_keys('temp')\npassElem.submit()\ntime.sleep(3)\nlinkElem = browser.find_element_by_link_text('Reports')\nlinkElem.click()\ntime.sleep(2)\nlinkElem = browser.find_element_by_link_text('Detail Report')\nlinkElem.click()\ntime.sleep(4)\n\n\ndef pdfToFolder(projectName):\n os.chdir('/home/gmclaughlin/Downloads')\n if projectName.find('DEM') != -1:\n shutil.move('/home/gmclaughlin/Downloads/Detail Report - Basic.pdf',\n \n '/home/gmclaughlin/Python/Safety Project/Demo/%s/%s-Detail Report.pdf'\n % (projectName, projectName))\n elif projectName.find('JDC') != -1:\n shutil.move('/home/gmclaughlin/Downloads/Detail Report - Basic.pdf',\n \n '/home/gmclaughlin/Python/Safety Project/JDC/%s/%s-Detail Report.pdf'\n % (projectName, projectName))\n elif projectName.find('NEW') != -1:\n shutil.move('/home/gmclaughlin/Downloads/Detail Report - Basic.pdf',\n \n '/home/gmclaughlin/Python/Safety Project/NewRoads/%s/%s-Detail Report.pdf'\n % (projectName, projectName))\n elif projectName.find('Site') != -1:\n shutil.move('/home/gmclaughlin/Downloads/Detail Report - Basic.pdf',\n \n '/home/gmclaughlin/Python/Safety Project/SiteCrew/%s/%s-Detail Report.pdf'\n % (projectName, projectName))\n else:\n shutil.move('/home/gmclaughlin/Downloads/Detail Report - Basic.pdf',\n \n '/home/gmclaughlin/Python/Safety Project/Other/%s/%s-Detail Report.pdf'\n % (projectName, projectName))\n\n\nfinsihedFlag = False\naddValue = 0\ncounter = 0\nfor cellObj in sheet['A']:\n if (cellObj.value != 'Project' and cellObj.value !=\n 'JDC-Winchester HS Enabling (CONSIG'):\n linkElem = browser.find_element_by_name('clear')\n linkElem.click()\n time.sleep(4)\n linkElem = browser.find_element_by_name('showSafeAndUnsafeDetails')\n linkElem.click()\n time.sleep(1)\n linkElem = browser.find_element_by_name('showImages')\n linkElem.click()\n time.sleep(1)\n linkElem = browser.find_element_by_name('datePickerRadio')\n linkElem.click()\n time.sleep(1)\n projectElem = browser.find_elements_by_xpath(\"//input[@type='text']\")\n print(cellObj.value)\n projectElem[5 + addValue].send_keys('01/01/2010')\n time.sleep(1)\n projectElem[6 + addValue].send_keys('08/15/2017')\n time.sleep(1)\n projectElem[8 + addValue].clear()\n projectElem[8 + addValue].send_keys(cellObj.value)\n time.sleep(1)\n projectElem[8 + addValue].send_keys(Keys.ENTER)\n time.sleep(3)\n linkElem = browser.find_element_by_xpath(\"//input[@type='submit']\")\n linkElem.click()\n time.sleep(10)\n linkElem = browser.find_element_by_name('pdf')\n linkElem.click()\n time.sleep(70)\n addValue = 1\n pdfToFolder(cellObj.value)\n counter = counter + 1\n",
"step-4": "import os, openpyxl, time, shutil\nfrom selenium import webdriver\nfrom selenium.webdriver.common.keys import Keys\nwb = openpyxl.load_workbook('ProjectSummary.xlsx')\nsheet = wb.active\nbrowser = webdriver.Firefox()\nbrowser.get(\n 'https://safetynet.predictivesolutions.com/CRMApp/default_login.jsp?loginZoneID=10459&originalHostName=jdc.predictivesolutions.com'\n )\nuserElem = browser.find_element_by_id('username')\nuserElem.send_keys('temp')\npassElem = browser.find_element_by_id('password')\npassElem.send_keys('temp')\npassElem.submit()\ntime.sleep(3)\nlinkElem = browser.find_element_by_link_text('Reports')\nlinkElem.click()\ntime.sleep(2)\nlinkElem = browser.find_element_by_link_text('Detail Report')\nlinkElem.click()\ntime.sleep(4)\n\n\ndef pdfToFolder(projectName):\n os.chdir('/home/gmclaughlin/Downloads')\n if projectName.find('DEM') != -1:\n shutil.move('/home/gmclaughlin/Downloads/Detail Report - Basic.pdf',\n \n '/home/gmclaughlin/Python/Safety Project/Demo/%s/%s-Detail Report.pdf'\n % (projectName, projectName))\n elif projectName.find('JDC') != -1:\n shutil.move('/home/gmclaughlin/Downloads/Detail Report - Basic.pdf',\n \n '/home/gmclaughlin/Python/Safety Project/JDC/%s/%s-Detail Report.pdf'\n % (projectName, projectName))\n elif projectName.find('NEW') != -1:\n shutil.move('/home/gmclaughlin/Downloads/Detail Report - Basic.pdf',\n \n '/home/gmclaughlin/Python/Safety Project/NewRoads/%s/%s-Detail Report.pdf'\n % (projectName, projectName))\n elif projectName.find('Site') != -1:\n shutil.move('/home/gmclaughlin/Downloads/Detail Report - Basic.pdf',\n \n '/home/gmclaughlin/Python/Safety Project/SiteCrew/%s/%s-Detail Report.pdf'\n % (projectName, projectName))\n else:\n shutil.move('/home/gmclaughlin/Downloads/Detail Report - Basic.pdf',\n \n '/home/gmclaughlin/Python/Safety Project/Other/%s/%s-Detail Report.pdf'\n % (projectName, projectName))\n\n\nfinsihedFlag = False\naddValue = 0\ncounter = 0\nfor cellObj in sheet['A']:\n if (cellObj.value != 'Project' and cellObj.value !=\n 'JDC-Winchester HS Enabling (CONSIG'):\n linkElem = browser.find_element_by_name('clear')\n linkElem.click()\n time.sleep(4)\n linkElem = browser.find_element_by_name('showSafeAndUnsafeDetails')\n linkElem.click()\n time.sleep(1)\n linkElem = browser.find_element_by_name('showImages')\n linkElem.click()\n time.sleep(1)\n linkElem = browser.find_element_by_name('datePickerRadio')\n linkElem.click()\n time.sleep(1)\n projectElem = browser.find_elements_by_xpath(\"//input[@type='text']\")\n print(cellObj.value)\n projectElem[5 + addValue].send_keys('01/01/2010')\n time.sleep(1)\n projectElem[6 + addValue].send_keys('08/15/2017')\n time.sleep(1)\n projectElem[8 + addValue].clear()\n projectElem[8 + addValue].send_keys(cellObj.value)\n time.sleep(1)\n projectElem[8 + addValue].send_keys(Keys.ENTER)\n time.sleep(3)\n linkElem = browser.find_element_by_xpath(\"//input[@type='submit']\")\n linkElem.click()\n time.sleep(10)\n linkElem = browser.find_element_by_name('pdf')\n linkElem.click()\n time.sleep(70)\n addValue = 1\n pdfToFolder(cellObj.value)\n counter = counter + 1\n",
"step-5": "#downloads project detail reports from the web and places them in the correct project folder created by makeFolders.py\n\nimport os, openpyxl, time, shutil\nfrom selenium import webdriver\nfrom selenium.webdriver.common.keys import Keys\n\nwb = openpyxl.load_workbook('ProjectSummary.xlsx')\nsheet = wb.active\n\nbrowser = webdriver.Firefox()\nbrowser.get('https://safetynet.predictivesolutions.com/CRMApp/default_login.jsp?loginZoneID=10459&originalHostName=jdc.predictivesolutions.com')\n\nuserElem = browser.find_element_by_id('username')\nuserElem.send_keys('temp')\npassElem = browser.find_element_by_id('password')\npassElem.send_keys('temp')\npassElem.submit()\n\ntime.sleep(3)\nlinkElem = browser.find_element_by_link_text('Reports')\nlinkElem.click()\ntime.sleep(2)\nlinkElem = browser.find_element_by_link_text('Detail Report')\nlinkElem.click()\ntime.sleep(4)\n\ndef pdfToFolder(projectName):\n os.chdir('/home/gmclaughlin/Downloads')\n if projectName.find(\"DEM\") != -1:\n shutil.move('/home/gmclaughlin/Downloads/Detail Report - Basic.pdf','/home/gmclaughlin/Python/Safety Project/Demo/%s/%s-Detail Report.pdf' % (projectName, projectName))\n\n elif projectName.find(\"JDC\") != -1:\n shutil.move('/home/gmclaughlin/Downloads/Detail Report - Basic.pdf','/home/gmclaughlin/Python/Safety Project/JDC/%s/%s-Detail Report.pdf' % (projectName, projectName))\n\n elif projectName.find(\"NEW\") != -1:\n shutil.move('/home/gmclaughlin/Downloads/Detail Report - Basic.pdf','/home/gmclaughlin/Python/Safety Project/NewRoads/%s/%s-Detail Report.pdf' % (projectName, projectName))\n\n elif projectName.find(\"Site\") != -1:\n shutil.move('/home/gmclaughlin/Downloads/Detail Report - Basic.pdf','/home/gmclaughlin/Python/Safety Project/SiteCrew/%s/%s-Detail Report.pdf' % (projectName, projectName))\n\n else:\n shutil.move('/home/gmclaughlin/Downloads/Detail Report - Basic.pdf','/home/gmclaughlin/Python/Safety Project/Other/%s/%s-Detail Report.pdf' % (projectName, projectName))\n\nfinsihedFlag = False\naddValue = 0\ncounter = 0\nfor cellObj in sheet['A']:\n if cellObj.value != 'Project' and cellObj.value != 'JDC-Winchester HS Enabling (CONSIG':\n\n linkElem = browser.find_element_by_name('clear') #clear existing settings\n linkElem.click()\n time.sleep(4)\n\n linkElem = browser.find_element_by_name('showSafeAndUnsafeDetails') #select all reports\n linkElem.click()\n time.sleep(1)\n\n linkElem = browser.find_element_by_name('showImages') #show images in reports\n linkElem.click()\n time.sleep(1)\n\n linkElem = browser.find_element_by_name('datePickerRadio')\n linkElem.click()\n time.sleep(1)\n\n projectElem = browser.find_elements_by_xpath(\"//input[@type='text']\") #find and use text fields\n print(cellObj.value)\n #projectElem = browser.find_element_by_xpath(\"//input[4]\")\n #time.sleep(2)\n #projectElem[5+addValue].clear()\n projectElem[5+addValue].send_keys('01/01/2010')\n time.sleep(1)\n #projectElem[6+addValue].clear()\n projectElem[6+addValue].send_keys('08/15/2017')\n time.sleep(1)\n projectElem[8+addValue].clear() #this is the project name box\n projectElem[8+addValue].send_keys(cellObj.value)\n time.sleep(1)\n projectElem[8+addValue].send_keys(Keys.ENTER)\n time.sleep(3)\n\n linkElem = browser.find_element_by_xpath(\"//input[@type='submit']\") #submit request for report\n linkElem.click()\n time.sleep(10)\n\n linkElem = browser.find_element_by_name('pdf') #download as PDF\n linkElem.click()\n time.sleep(70)\n addValue = 1\n\n pdfToFolder(cellObj.value)\n\n counter = counter + 1\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
print(best_fit1)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
objective_func = F1
problem_size = 30
domain_range = [-15, 15]
log = True
epoch = 100
pop_size = 50
p = 0.8
md1 = BaseFPA(objective_func, problem_size, domain_range, log, epoch,
pop_size, p)
best_pos1, best_fit1, list_loss1 = md1._train__()
print(best_fit1)
<|reserved_special_token_1|>
from mealpy.evolutionary_based.FPA import BaseFPA
from opfunu.cec_basic.cec2014_nobias import *
objective_func = F1
problem_size = 30
domain_range = [-15, 15]
log = True
epoch = 100
pop_size = 50
p = 0.8
md1 = BaseFPA(objective_func, problem_size, domain_range, log, epoch,
pop_size, p)
best_pos1, best_fit1, list_loss1 = md1._train__()
print(best_fit1)
<|reserved_special_token_1|>
#!/usr/bin/env python
# ------------------------------------------------------------------------------------------------------%
# Created by "Thieu Nguyen" at 19:47, 08/04/2020 %
# %
# Email: nguyenthieu2102@gmail.com %
# Homepage: https://www.researchgate.net/profile/Thieu_Nguyen6 %
# Github: https://github.com/thieunguyen5991 %
#-------------------------------------------------------------------------------------------------------%
from mealpy.evolutionary_based.FPA import BaseFPA
from opfunu.cec_basic.cec2014_nobias import *
## Setting parameters
objective_func = F1
problem_size = 30
domain_range = [-15, 15]
log = True
epoch = 100
pop_size = 50
p = 0.8
md1 = BaseFPA(objective_func, problem_size, domain_range, log, epoch, pop_size, p)
best_pos1, best_fit1, list_loss1 = md1._train__()
print(best_fit1)
|
flexible
|
{
"blob_id": "93b12d1e936331c81522790f3f45faa3383d249e",
"index": 3515,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint(best_fit1)\n",
"step-3": "<mask token>\nobjective_func = F1\nproblem_size = 30\ndomain_range = [-15, 15]\nlog = True\nepoch = 100\npop_size = 50\np = 0.8\nmd1 = BaseFPA(objective_func, problem_size, domain_range, log, epoch,\n pop_size, p)\nbest_pos1, best_fit1, list_loss1 = md1._train__()\nprint(best_fit1)\n",
"step-4": "from mealpy.evolutionary_based.FPA import BaseFPA\nfrom opfunu.cec_basic.cec2014_nobias import *\nobjective_func = F1\nproblem_size = 30\ndomain_range = [-15, 15]\nlog = True\nepoch = 100\npop_size = 50\np = 0.8\nmd1 = BaseFPA(objective_func, problem_size, domain_range, log, epoch,\n pop_size, p)\nbest_pos1, best_fit1, list_loss1 = md1._train__()\nprint(best_fit1)\n",
"step-5": "#!/usr/bin/env python\n# ------------------------------------------------------------------------------------------------------%\n# Created by \"Thieu Nguyen\" at 19:47, 08/04/2020 %\n# %\n# Email: nguyenthieu2102@gmail.com %\n# Homepage: https://www.researchgate.net/profile/Thieu_Nguyen6 %\n# Github: https://github.com/thieunguyen5991 %\n#-------------------------------------------------------------------------------------------------------%\n\nfrom mealpy.evolutionary_based.FPA import BaseFPA\nfrom opfunu.cec_basic.cec2014_nobias import *\n\n## Setting parameters\nobjective_func = F1\nproblem_size = 30\ndomain_range = [-15, 15]\nlog = True\n\nepoch = 100\npop_size = 50\np = 0.8\n\nmd1 = BaseFPA(objective_func, problem_size, domain_range, log, epoch, pop_size, p)\nbest_pos1, best_fit1, list_loss1 = md1._train__()\nprint(best_fit1)",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
# # -*- coding:utf-8 -*-
import sys
reload(sys)
sys.setdefaultencoding( "utf-8" )
import urllib
import urllib2
import cookielib
from excel import *
from user import *
List=[]
cookie = cookielib.CookieJar()
opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(cookie))
postdata = urllib.urlencode({'zjh':user(0),'mm':user(1)})
loginUrl = 'http://zhjw.dlut.edu.cn/loginAction.do'
result = opener.open(loginUrl,postdata)
gradeUrl = 'http://zhjw.dlut.edu.cn/xkAction.do?actionType=6'
result = opener.open(gradeUrl)
html = etree.HTML(result.read().decode('gbk'))
schedule = html.xpath('//td[@class="pageAlign"]/table[@border="1"]')
write_schedule(cut(get_son(schedule[0],List)))
|
normal
|
{
"blob_id": "3c7280bbd23bd3472915da0760efbfd03bfe995d",
"index": 9314,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nreload(sys)\nsys.setdefaultencoding('utf-8')\n<mask token>\nwrite_schedule(cut(get_son(schedule[0], List)))\n",
"step-3": "<mask token>\nreload(sys)\nsys.setdefaultencoding('utf-8')\n<mask token>\nList = []\ncookie = cookielib.CookieJar()\nopener = urllib2.build_opener(urllib2.HTTPCookieProcessor(cookie))\npostdata = urllib.urlencode({'zjh': user(0), 'mm': user(1)})\nloginUrl = 'http://zhjw.dlut.edu.cn/loginAction.do'\nresult = opener.open(loginUrl, postdata)\ngradeUrl = 'http://zhjw.dlut.edu.cn/xkAction.do?actionType=6'\nresult = opener.open(gradeUrl)\nhtml = etree.HTML(result.read().decode('gbk'))\nschedule = html.xpath('//td[@class=\"pageAlign\"]/table[@border=\"1\"]')\nwrite_schedule(cut(get_son(schedule[0], List)))\n",
"step-4": "import sys\nreload(sys)\nsys.setdefaultencoding('utf-8')\nimport urllib\nimport urllib2\nimport cookielib\nfrom excel import *\nfrom user import *\nList = []\ncookie = cookielib.CookieJar()\nopener = urllib2.build_opener(urllib2.HTTPCookieProcessor(cookie))\npostdata = urllib.urlencode({'zjh': user(0), 'mm': user(1)})\nloginUrl = 'http://zhjw.dlut.edu.cn/loginAction.do'\nresult = opener.open(loginUrl, postdata)\ngradeUrl = 'http://zhjw.dlut.edu.cn/xkAction.do?actionType=6'\nresult = opener.open(gradeUrl)\nhtml = etree.HTML(result.read().decode('gbk'))\nschedule = html.xpath('//td[@class=\"pageAlign\"]/table[@border=\"1\"]')\nwrite_schedule(cut(get_son(schedule[0], List)))\n",
"step-5": "# # -*- coding:utf-8 -*-\nimport sys\nreload(sys)\nsys.setdefaultencoding( \"utf-8\" )\nimport urllib\nimport urllib2\nimport cookielib\nfrom excel import *\nfrom user import *\n\nList=[]\ncookie = cookielib.CookieJar()\nopener = urllib2.build_opener(urllib2.HTTPCookieProcessor(cookie))\npostdata = urllib.urlencode({'zjh':user(0),'mm':user(1)})\nloginUrl = 'http://zhjw.dlut.edu.cn/loginAction.do'\nresult = opener.open(loginUrl,postdata)\ngradeUrl = 'http://zhjw.dlut.edu.cn/xkAction.do?actionType=6'\nresult = opener.open(gradeUrl)\nhtml = etree.HTML(result.read().decode('gbk'))\nschedule = html.xpath('//td[@class=\"pageAlign\"]/table[@border=\"1\"]')\nwrite_schedule(cut(get_son(schedule[0],List)))",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Migration(migrations.Migration):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Migration(migrations.Migration):
dependencies = [('cryptocurrency',
'0012_rename_cancel_exists_order_cancel_exist')]
operations = [migrations.AlterField(model_name='order', name=
'created_at', field=models.IntegerField(blank=True, null=True))]
<|reserved_special_token_1|>
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [('cryptocurrency',
'0012_rename_cancel_exists_order_cancel_exist')]
operations = [migrations.AlterField(model_name='order', name=
'created_at', field=models.IntegerField(blank=True, null=True))]
<|reserved_special_token_1|>
# Generated by Django 3.2.7 on 2021-09-11 19:38
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('cryptocurrency', '0012_rename_cancel_exists_order_cancel_exist'),
]
operations = [
migrations.AlterField(
model_name='order',
name='created_at',
field=models.IntegerField(blank=True, null=True),
),
]
|
flexible
|
{
"blob_id": "de347b41cd88947690cb42e043880a80d81e2c5c",
"index": 436,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Migration(migrations.Migration):\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Migration(migrations.Migration):\n dependencies = [('cryptocurrency',\n '0012_rename_cancel_exists_order_cancel_exist')]\n operations = [migrations.AlterField(model_name='order', name=\n 'created_at', field=models.IntegerField(blank=True, null=True))]\n",
"step-4": "from django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n dependencies = [('cryptocurrency',\n '0012_rename_cancel_exists_order_cancel_exist')]\n operations = [migrations.AlterField(model_name='order', name=\n 'created_at', field=models.IntegerField(blank=True, null=True))]\n",
"step-5": "# Generated by Django 3.2.7 on 2021-09-11 19:38\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('cryptocurrency', '0012_rename_cancel_exists_order_cancel_exist'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='order',\n name='created_at',\n field=models.IntegerField(blank=True, null=True),\n ),\n ]\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
"""This module provides constants for locale-dependent providers."""
import typing as t
from mimesis.enums import Locale
from mimesis.exceptions import LocaleError
__all__ = ["Locale", "validate_locale"]
def validate_locale(locale: t.Union[Locale, str]) -> Locale:
if isinstance(locale, str):
try:
return Locale(locale)
except ValueError:
raise LocaleError(locale)
if not isinstance(locale, Locale):
raise LocaleError(locale)
return locale
|
normal
|
{
"blob_id": "779445aa22145d5076940ea5b214c25ad233dd0e",
"index": 3087,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef validate_locale(locale: t.Union[Locale, str]) ->Locale:\n if isinstance(locale, str):\n try:\n return Locale(locale)\n except ValueError:\n raise LocaleError(locale)\n if not isinstance(locale, Locale):\n raise LocaleError(locale)\n return locale\n",
"step-3": "<mask token>\n__all__ = ['Locale', 'validate_locale']\n\n\ndef validate_locale(locale: t.Union[Locale, str]) ->Locale:\n if isinstance(locale, str):\n try:\n return Locale(locale)\n except ValueError:\n raise LocaleError(locale)\n if not isinstance(locale, Locale):\n raise LocaleError(locale)\n return locale\n",
"step-4": "<mask token>\nimport typing as t\nfrom mimesis.enums import Locale\nfrom mimesis.exceptions import LocaleError\n__all__ = ['Locale', 'validate_locale']\n\n\ndef validate_locale(locale: t.Union[Locale, str]) ->Locale:\n if isinstance(locale, str):\n try:\n return Locale(locale)\n except ValueError:\n raise LocaleError(locale)\n if not isinstance(locale, Locale):\n raise LocaleError(locale)\n return locale\n",
"step-5": "\"\"\"This module provides constants for locale-dependent providers.\"\"\"\n\nimport typing as t\n\nfrom mimesis.enums import Locale\nfrom mimesis.exceptions import LocaleError\n\n__all__ = [\"Locale\", \"validate_locale\"]\n\n\ndef validate_locale(locale: t.Union[Locale, str]) -> Locale:\n if isinstance(locale, str):\n try:\n return Locale(locale)\n except ValueError:\n raise LocaleError(locale)\n\n if not isinstance(locale, Locale):\n raise LocaleError(locale)\n\n return locale\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
#!/usr/bin/env python
# ------------------------------------------------------------------------------------------------------%
# Created by "Thieu Nguyen" at 19:47, 08/04/2020 %
# %
# Email: nguyenthieu2102@gmail.com %
# Homepage: https://www.researchgate.net/profile/Thieu_Nguyen6 %
# Github: https://github.com/thieunguyen5991 %
#-------------------------------------------------------------------------------------------------------%
from mealpy.evolutionary_based.FPA import BaseFPA
from opfunu.cec_basic.cec2014_nobias import *
## Setting parameters
objective_func = F1
problem_size = 30
domain_range = [-15, 15]
log = True
epoch = 100
pop_size = 50
p = 0.8
md1 = BaseFPA(objective_func, problem_size, domain_range, log, epoch, pop_size, p)
best_pos1, best_fit1, list_loss1 = md1._train__()
print(best_fit1)
|
normal
|
{
"blob_id": "93b12d1e936331c81522790f3f45faa3383d249e",
"index": 3515,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint(best_fit1)\n",
"step-3": "<mask token>\nobjective_func = F1\nproblem_size = 30\ndomain_range = [-15, 15]\nlog = True\nepoch = 100\npop_size = 50\np = 0.8\nmd1 = BaseFPA(objective_func, problem_size, domain_range, log, epoch,\n pop_size, p)\nbest_pos1, best_fit1, list_loss1 = md1._train__()\nprint(best_fit1)\n",
"step-4": "from mealpy.evolutionary_based.FPA import BaseFPA\nfrom opfunu.cec_basic.cec2014_nobias import *\nobjective_func = F1\nproblem_size = 30\ndomain_range = [-15, 15]\nlog = True\nepoch = 100\npop_size = 50\np = 0.8\nmd1 = BaseFPA(objective_func, problem_size, domain_range, log, epoch,\n pop_size, p)\nbest_pos1, best_fit1, list_loss1 = md1._train__()\nprint(best_fit1)\n",
"step-5": "#!/usr/bin/env python\n# ------------------------------------------------------------------------------------------------------%\n# Created by \"Thieu Nguyen\" at 19:47, 08/04/2020 %\n# %\n# Email: nguyenthieu2102@gmail.com %\n# Homepage: https://www.researchgate.net/profile/Thieu_Nguyen6 %\n# Github: https://github.com/thieunguyen5991 %\n#-------------------------------------------------------------------------------------------------------%\n\nfrom mealpy.evolutionary_based.FPA import BaseFPA\nfrom opfunu.cec_basic.cec2014_nobias import *\n\n## Setting parameters\nobjective_func = F1\nproblem_size = 30\ndomain_range = [-15, 15]\nlog = True\n\nepoch = 100\npop_size = 50\np = 0.8\n\nmd1 = BaseFPA(objective_func, problem_size, domain_range, log, epoch, pop_size, p)\nbest_pos1, best_fit1, list_loss1 = md1._train__()\nprint(best_fit1)",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
class MedianFinder:
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
class MedianFinder:
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def findMedian(self) ->float:
if len(self.maxheap) == len(self.minheap):
return (self.minheap[0] + -self.maxheap[0]) / 2.0
return self.minheap[0]
<|reserved_special_token_1|>
class MedianFinder:
def __init__(self):
"""
initialize your data structure here.
"""
self.minheap = []
self.maxheap = []
<|reserved_special_token_0|>
def findMedian(self) ->float:
if len(self.maxheap) == len(self.minheap):
return (self.minheap[0] + -self.maxheap[0]) / 2.0
return self.minheap[0]
<|reserved_special_token_1|>
class MedianFinder:
def __init__(self):
"""
initialize your data structure here.
"""
self.minheap = []
self.maxheap = []
def addNum(self, num: int) ->None:
heapq.heappush(self.maxheap, -heapq.heappushpop(self.minheap, num))
if len(self.maxheap) > len(self.minheap):
heapq.heappush(self.minheap, -heapq.heappop(self.maxheap))
def findMedian(self) ->float:
if len(self.maxheap) == len(self.minheap):
return (self.minheap[0] + -self.maxheap[0]) / 2.0
return self.minheap[0]
<|reserved_special_token_1|>
class MedianFinder:
def __init__(self):
"""
initialize your data structure here.
"""
self.minheap = []
self.maxheap = []
def addNum(self, num: int) -> None:
heapq.heappush (self.maxheap ,-heapq.heappushpop(self.minheap , num) )
if len(self.maxheap) > len(self.minheap):
heapq.heappush( self.minheap, -heapq.heappop(self.maxheap))
def findMedian(self) -> float:
if len(self.maxheap) == len(self.minheap):
return (self.minheap[0] + -self.maxheap[0]) / 2.
return self.minheap[0]
# Your MedianFinder object will be instantiated and called as such:
# obj = MedianFinder()
# obj.addNum(num)
# param_2 = obj.findMedian()
|
flexible
|
{
"blob_id": "e7699bb3f6080c78517f11445e2c48a0e40f3332",
"index": 3209,
"step-1": "class MedianFinder:\n <mask token>\n <mask token>\n <mask token>\n",
"step-2": "class MedianFinder:\n <mask token>\n <mask token>\n\n def findMedian(self) ->float:\n if len(self.maxheap) == len(self.minheap):\n return (self.minheap[0] + -self.maxheap[0]) / 2.0\n return self.minheap[0]\n",
"step-3": "class MedianFinder:\n\n def __init__(self):\n \"\"\"\n initialize your data structure here.\n \"\"\"\n self.minheap = []\n self.maxheap = []\n <mask token>\n\n def findMedian(self) ->float:\n if len(self.maxheap) == len(self.minheap):\n return (self.minheap[0] + -self.maxheap[0]) / 2.0\n return self.minheap[0]\n",
"step-4": "class MedianFinder:\n\n def __init__(self):\n \"\"\"\n initialize your data structure here.\n \"\"\"\n self.minheap = []\n self.maxheap = []\n\n def addNum(self, num: int) ->None:\n heapq.heappush(self.maxheap, -heapq.heappushpop(self.minheap, num))\n if len(self.maxheap) > len(self.minheap):\n heapq.heappush(self.minheap, -heapq.heappop(self.maxheap))\n\n def findMedian(self) ->float:\n if len(self.maxheap) == len(self.minheap):\n return (self.minheap[0] + -self.maxheap[0]) / 2.0\n return self.minheap[0]\n",
"step-5": "class MedianFinder:\n\n def __init__(self):\n \"\"\"\n initialize your data structure here.\n \"\"\"\n self.minheap = []\n self.maxheap = []\n\n def addNum(self, num: int) -> None:\n heapq.heappush (self.maxheap ,-heapq.heappushpop(self.minheap , num) )\n if len(self.maxheap) > len(self.minheap):\n heapq.heappush( self.minheap, -heapq.heappop(self.maxheap))\n\n \n\n def findMedian(self) -> float:\n if len(self.maxheap) == len(self.minheap):\n return (self.minheap[0] + -self.maxheap[0]) / 2.\n return self.minheap[0]\n\n\n# Your MedianFinder object will be instantiated and called as such:\n# obj = MedianFinder()\n# obj.addNum(num)\n# param_2 = obj.findMedian()",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
<|reserved_special_token_0|>
class ZabbixItem:
def __init__(self, user, password, ext_group, ext_template, zabbix_host):
self.user = user
self.password = password
self.zabbix_host = zabbix_host
self.zabbix_api = f'http://{zabbix_host}'
self.connection = self.connection_init()
self.template_id = self.get_template(ext_template)
self.group_id = self.get_group(ext_group)
<|reserved_special_token_0|>
def get_template(self, template_name):
"""
Get template id by template name
:param template_name:
:return: template id as string
"""
ext_template = self.connection.do_request('template.get', {'filter':
{'host': [template_name]}, 'output': 'template_id'}).get('result')
if ext_template:
result = ext_template[0].get('templateid')
else:
result = False
return result
<|reserved_special_token_0|>
def clear_ping(self, value):
"""
clear ping value from text
:param value: raw data, 50 ms as example
:return: integer value
"""
try:
result = int(value[:value.find(' ')])
except IndexError:
result = False
except ValueError:
result = False
return result
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def send_data(self, data):
"""
Send data to server
:param data: data dict
:return:
"""
sender_data = []
host_id = data.get('ext')
zbx_sender = ZabbixSender(self.zabbix_host)
extension_ip = ZabbixMetric(host_id, 'extPhoneIpAddress', data.get(
'ip_address'))
sender_data.append(extension_ip)
extension_ping = ZabbixMetric(host_id, 'extPhonePing', self.
clear_ping(data.get('ping', 10000)))
sender_data.append(extension_ping)
extension_status = ZabbixMetric(host_id, 'extStatus', data.get(
'status', ''))
sender_data.append(extension_status)
extension_user = ZabbixMetric(host_id, 'extUser', data.get('user', ''))
sender_data.append(extension_user)
extension_useragent = ZabbixMetric(host_id, 'extUserAgent', data.
get('user_agent', ''))
sender_data.append(extension_useragent)
zbx_sender.send(sender_data)
def worker(self, data):
"""
Check host. If extension exists - send new data, otherwise - create extension's host in zabbix and send data.
:param data: dict with data
:return: host id
"""
print(data)
host_raw = self.connection.do_request('host.get', {'filter': {
'host': data['ext']}, 'output': ['hostid']}).get('result')
if host_raw:
host_id = host_raw[0].get('hostid')
else:
host_new = self.connection.do_request('host.create', {'host':
f"{data.get('ext')}", 'templates': [{'templateid': self.
template_id}], 'groups': [{'groupid': self.group_id}]})
host_id = host_new.get('result').get('hostids')[0]
self.send_data(data)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class ZabbixItem:
def __init__(self, user, password, ext_group, ext_template, zabbix_host):
self.user = user
self.password = password
self.zabbix_host = zabbix_host
self.zabbix_api = f'http://{zabbix_host}'
self.connection = self.connection_init()
self.template_id = self.get_template(ext_template)
self.group_id = self.get_group(ext_group)
<|reserved_special_token_0|>
def get_template(self, template_name):
"""
Get template id by template name
:param template_name:
:return: template id as string
"""
ext_template = self.connection.do_request('template.get', {'filter':
{'host': [template_name]}, 'output': 'template_id'}).get('result')
if ext_template:
result = ext_template[0].get('templateid')
else:
result = False
return result
<|reserved_special_token_0|>
def clear_ping(self, value):
"""
clear ping value from text
:param value: raw data, 50 ms as example
:return: integer value
"""
try:
result = int(value[:value.find(' ')])
except IndexError:
result = False
except ValueError:
result = False
return result
def host_create(self, data):
"""
Create host item
:param host_params:
:return: host id
"""
return self.connection.do_request('host.create', data)[0].get('result')
def assign_template_to_host(self, host_id):
"""
Assign template to host
:param host_id: host id
:return:
"""
return self.connection.do_request('template.update', teamplateid=
self.template_id, hosts=[host_id])
def send_data(self, data):
"""
Send data to server
:param data: data dict
:return:
"""
sender_data = []
host_id = data.get('ext')
zbx_sender = ZabbixSender(self.zabbix_host)
extension_ip = ZabbixMetric(host_id, 'extPhoneIpAddress', data.get(
'ip_address'))
sender_data.append(extension_ip)
extension_ping = ZabbixMetric(host_id, 'extPhonePing', self.
clear_ping(data.get('ping', 10000)))
sender_data.append(extension_ping)
extension_status = ZabbixMetric(host_id, 'extStatus', data.get(
'status', ''))
sender_data.append(extension_status)
extension_user = ZabbixMetric(host_id, 'extUser', data.get('user', ''))
sender_data.append(extension_user)
extension_useragent = ZabbixMetric(host_id, 'extUserAgent', data.
get('user_agent', ''))
sender_data.append(extension_useragent)
zbx_sender.send(sender_data)
def worker(self, data):
"""
Check host. If extension exists - send new data, otherwise - create extension's host in zabbix and send data.
:param data: dict with data
:return: host id
"""
print(data)
host_raw = self.connection.do_request('host.get', {'filter': {
'host': data['ext']}, 'output': ['hostid']}).get('result')
if host_raw:
host_id = host_raw[0].get('hostid')
else:
host_new = self.connection.do_request('host.create', {'host':
f"{data.get('ext')}", 'templates': [{'templateid': self.
template_id}], 'groups': [{'groupid': self.group_id}]})
host_id = host_new.get('result').get('hostids')[0]
self.send_data(data)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class ZabbixItem:
def __init__(self, user, password, ext_group, ext_template, zabbix_host):
self.user = user
self.password = password
self.zabbix_host = zabbix_host
self.zabbix_api = f'http://{zabbix_host}'
self.connection = self.connection_init()
self.template_id = self.get_template(ext_template)
self.group_id = self.get_group(ext_group)
<|reserved_special_token_0|>
def get_template(self, template_name):
"""
Get template id by template name
:param template_name:
:return: template id as string
"""
ext_template = self.connection.do_request('template.get', {'filter':
{'host': [template_name]}, 'output': 'template_id'}).get('result')
if ext_template:
result = ext_template[0].get('templateid')
else:
result = False
return result
def get_group(self, group_name):
"""
Get group Id
:param group_name:
:return: group ID
"""
group = self.connection.do_request('hostgroup.get', {'filter': {
'name': [group_name]}, 'output': 'extend'}).get('result')
if group:
result = group[0].get('groupid')
else:
result = False
return result
def clear_ping(self, value):
"""
clear ping value from text
:param value: raw data, 50 ms as example
:return: integer value
"""
try:
result = int(value[:value.find(' ')])
except IndexError:
result = False
except ValueError:
result = False
return result
def host_create(self, data):
"""
Create host item
:param host_params:
:return: host id
"""
return self.connection.do_request('host.create', data)[0].get('result')
def assign_template_to_host(self, host_id):
"""
Assign template to host
:param host_id: host id
:return:
"""
return self.connection.do_request('template.update', teamplateid=
self.template_id, hosts=[host_id])
def send_data(self, data):
"""
Send data to server
:param data: data dict
:return:
"""
sender_data = []
host_id = data.get('ext')
zbx_sender = ZabbixSender(self.zabbix_host)
extension_ip = ZabbixMetric(host_id, 'extPhoneIpAddress', data.get(
'ip_address'))
sender_data.append(extension_ip)
extension_ping = ZabbixMetric(host_id, 'extPhonePing', self.
clear_ping(data.get('ping', 10000)))
sender_data.append(extension_ping)
extension_status = ZabbixMetric(host_id, 'extStatus', data.get(
'status', ''))
sender_data.append(extension_status)
extension_user = ZabbixMetric(host_id, 'extUser', data.get('user', ''))
sender_data.append(extension_user)
extension_useragent = ZabbixMetric(host_id, 'extUserAgent', data.
get('user_agent', ''))
sender_data.append(extension_useragent)
zbx_sender.send(sender_data)
def worker(self, data):
"""
Check host. If extension exists - send new data, otherwise - create extension's host in zabbix and send data.
:param data: dict with data
:return: host id
"""
print(data)
host_raw = self.connection.do_request('host.get', {'filter': {
'host': data['ext']}, 'output': ['hostid']}).get('result')
if host_raw:
host_id = host_raw[0].get('hostid')
else:
host_new = self.connection.do_request('host.create', {'host':
f"{data.get('ext')}", 'templates': [{'templateid': self.
template_id}], 'groups': [{'groupid': self.group_id}]})
host_id = host_new.get('result').get('hostids')[0]
self.send_data(data)
<|reserved_special_token_1|>
from pyzabbix import ZabbixMetric, ZabbixSender, ZabbixAPI
from datetime import datetime
from re import findall
class ZabbixItem:
def __init__(self, user, password, ext_group, ext_template, zabbix_host):
self.user = user
self.password = password
self.zabbix_host = zabbix_host
self.zabbix_api = f'http://{zabbix_host}'
self.connection = self.connection_init()
self.template_id = self.get_template(ext_template)
self.group_id = self.get_group(ext_group)
def connection_init(self):
"""
Zabbix connection init
:return: connection
"""
return ZabbixAPI(f'http://{self.zabbix_host}', user=self.user,
password=self.password)
def get_template(self, template_name):
"""
Get template id by template name
:param template_name:
:return: template id as string
"""
ext_template = self.connection.do_request('template.get', {'filter':
{'host': [template_name]}, 'output': 'template_id'}).get('result')
if ext_template:
result = ext_template[0].get('templateid')
else:
result = False
return result
def get_group(self, group_name):
"""
Get group Id
:param group_name:
:return: group ID
"""
group = self.connection.do_request('hostgroup.get', {'filter': {
'name': [group_name]}, 'output': 'extend'}).get('result')
if group:
result = group[0].get('groupid')
else:
result = False
return result
def clear_ping(self, value):
"""
clear ping value from text
:param value: raw data, 50 ms as example
:return: integer value
"""
try:
result = int(value[:value.find(' ')])
except IndexError:
result = False
except ValueError:
result = False
return result
def host_create(self, data):
"""
Create host item
:param host_params:
:return: host id
"""
return self.connection.do_request('host.create', data)[0].get('result')
def assign_template_to_host(self, host_id):
"""
Assign template to host
:param host_id: host id
:return:
"""
return self.connection.do_request('template.update', teamplateid=
self.template_id, hosts=[host_id])
def send_data(self, data):
"""
Send data to server
:param data: data dict
:return:
"""
sender_data = []
host_id = data.get('ext')
zbx_sender = ZabbixSender(self.zabbix_host)
extension_ip = ZabbixMetric(host_id, 'extPhoneIpAddress', data.get(
'ip_address'))
sender_data.append(extension_ip)
extension_ping = ZabbixMetric(host_id, 'extPhonePing', self.
clear_ping(data.get('ping', 10000)))
sender_data.append(extension_ping)
extension_status = ZabbixMetric(host_id, 'extStatus', data.get(
'status', ''))
sender_data.append(extension_status)
extension_user = ZabbixMetric(host_id, 'extUser', data.get('user', ''))
sender_data.append(extension_user)
extension_useragent = ZabbixMetric(host_id, 'extUserAgent', data.
get('user_agent', ''))
sender_data.append(extension_useragent)
zbx_sender.send(sender_data)
def worker(self, data):
"""
Check host. If extension exists - send new data, otherwise - create extension's host in zabbix and send data.
:param data: dict with data
:return: host id
"""
print(data)
host_raw = self.connection.do_request('host.get', {'filter': {
'host': data['ext']}, 'output': ['hostid']}).get('result')
if host_raw:
host_id = host_raw[0].get('hostid')
else:
host_new = self.connection.do_request('host.create', {'host':
f"{data.get('ext')}", 'templates': [{'templateid': self.
template_id}], 'groups': [{'groupid': self.group_id}]})
host_id = host_new.get('result').get('hostids')[0]
self.send_data(data)
<|reserved_special_token_1|>
from pyzabbix import ZabbixMetric, ZabbixSender, ZabbixAPI
from datetime import datetime
from re import findall
# current_time = datetime.now().strftime("%H:%M:%S %d.%m.%Y")
class ZabbixItem():
def __init__(self, user, password, ext_group, ext_template, zabbix_host):
self.user = user
self.password = password
self.zabbix_host = zabbix_host
self.zabbix_api = f"http://{zabbix_host}"
self.connection = self.connection_init()
self.template_id = self.get_template(ext_template)
self.group_id = self.get_group(ext_group)
# print(self.get_group(EXT_GROUP))
def connection_init(self):
'''
Zabbix connection init
:return: connection
'''
return ZabbixAPI(f"http://{self.zabbix_host}", user=self.user, password=self.password)
def get_template(self, template_name):
'''
Get template id by template name
:param template_name:
:return: template id as string
'''
ext_template = self.connection.do_request("template.get", {
"filter": {"host": [template_name]},
"output": "template_id"
}).get("result")
if ext_template:
result = ext_template[0].get("templateid")
else:
result = False
return result
def get_group(self, group_name):
"""
Get group Id
:param group_name:
:return: group ID
"""
group = self.connection.do_request("hostgroup.get", {
"filter": {"name": [group_name]},
"output": "extend"
}).get("result")
if group:
result = group[0].get("groupid")
else:
# print("create Group")
result = False
return result
def clear_ping(self, value):
"""
clear ping value from text
:param value: raw data, 50 ms as example
:return: integer value
"""
try:
result = int(value[:value.find(" ")])
except IndexError:
result = False
except ValueError:
# print(value)
result = False
return result
def host_create(self, data):
'''
Create host item
:param host_params:
:return: host id
'''
return self.connection.do_request('host.create', data)[0].get("result")
def assign_template_to_host(self, host_id):
"""
Assign template to host
:param host_id: host id
:return:
"""
return self.connection.do_request("template.update", teamplateid=self.template_id, hosts=[host_id])
def send_data(self, data):
"""
Send data to server
:param data: data dict
:return:
"""
# test_dict = {'ext': '1105', 'ip_address': '192.168.10.55', 'status': 'OK', 'ping': '5 ms', 'user': 'Secretary',
# 'user_agent': 'Cisco/SPA508G-7.4.9a'}
sender_data = []
host_id = data.get("ext")
# print(ZABBIX_HOST)
zbx_sender = ZabbixSender(self.zabbix_host)
extension_ip = ZabbixMetric(host_id, 'extPhoneIpAddress', data.get("ip_address"))
sender_data.append(extension_ip)
extension_ping = ZabbixMetric(host_id, "extPhonePing", self.clear_ping(data.get("ping", 10000)))
sender_data.append(extension_ping)
extension_status = ZabbixMetric(host_id, "extStatus", data.get("status", ""))
sender_data.append(extension_status)
extension_user = ZabbixMetric(host_id, "extUser", data.get("user", ""))
sender_data.append(extension_user)
extension_useragent = ZabbixMetric(host_id, "extUserAgent", data.get("user_agent", ""))
sender_data.append(extension_useragent)
zbx_sender.send(sender_data)
def worker(self, data):
"""
Check host. If extension exists - send new data, otherwise - create extension's host in zabbix and send data.
:param data: dict with data
:return: host id
"""
print(data)
host_raw = self.connection.do_request('host.get', {
'filter': {'host': data["ext"]},
'output': ['hostid']
}).get("result")
# print("host_raw", host_raw)
if host_raw:
host_id = host_raw[0].get("hostid")
else:
host_new = self.connection.do_request('host.create', {"host" : f"{data.get('ext')}",
"templates": [
{"templateid" : self.template_id}
],
"groups": [
{"groupid": self.group_id}
]
})
host_id = host_new.get("result").get("hostids")[0]
self.send_data(data)
|
flexible
|
{
"blob_id": "14826b5b121ba2939519492c1e1d8700c32396d2",
"index": 8963,
"step-1": "<mask token>\n\n\nclass ZabbixItem:\n\n def __init__(self, user, password, ext_group, ext_template, zabbix_host):\n self.user = user\n self.password = password\n self.zabbix_host = zabbix_host\n self.zabbix_api = f'http://{zabbix_host}'\n self.connection = self.connection_init()\n self.template_id = self.get_template(ext_template)\n self.group_id = self.get_group(ext_group)\n <mask token>\n\n def get_template(self, template_name):\n \"\"\"\n Get template id by template name\n :param template_name:\n :return: template id as string\n \"\"\"\n ext_template = self.connection.do_request('template.get', {'filter':\n {'host': [template_name]}, 'output': 'template_id'}).get('result')\n if ext_template:\n result = ext_template[0].get('templateid')\n else:\n result = False\n return result\n <mask token>\n\n def clear_ping(self, value):\n \"\"\"\n clear ping value from text\n :param value: raw data, 50 ms as example\n :return: integer value\n \"\"\"\n try:\n result = int(value[:value.find(' ')])\n except IndexError:\n result = False\n except ValueError:\n result = False\n return result\n <mask token>\n <mask token>\n\n def send_data(self, data):\n \"\"\"\n Send data to server\n :param data: data dict\n :return:\n \"\"\"\n sender_data = []\n host_id = data.get('ext')\n zbx_sender = ZabbixSender(self.zabbix_host)\n extension_ip = ZabbixMetric(host_id, 'extPhoneIpAddress', data.get(\n 'ip_address'))\n sender_data.append(extension_ip)\n extension_ping = ZabbixMetric(host_id, 'extPhonePing', self.\n clear_ping(data.get('ping', 10000)))\n sender_data.append(extension_ping)\n extension_status = ZabbixMetric(host_id, 'extStatus', data.get(\n 'status', ''))\n sender_data.append(extension_status)\n extension_user = ZabbixMetric(host_id, 'extUser', data.get('user', ''))\n sender_data.append(extension_user)\n extension_useragent = ZabbixMetric(host_id, 'extUserAgent', data.\n get('user_agent', ''))\n sender_data.append(extension_useragent)\n zbx_sender.send(sender_data)\n\n def worker(self, data):\n \"\"\"\n Check host. If extension exists - send new data, otherwise - create extension's host in zabbix and send data.\n :param data: dict with data\n :return: host id\n \"\"\"\n print(data)\n host_raw = self.connection.do_request('host.get', {'filter': {\n 'host': data['ext']}, 'output': ['hostid']}).get('result')\n if host_raw:\n host_id = host_raw[0].get('hostid')\n else:\n host_new = self.connection.do_request('host.create', {'host':\n f\"{data.get('ext')}\", 'templates': [{'templateid': self.\n template_id}], 'groups': [{'groupid': self.group_id}]})\n host_id = host_new.get('result').get('hostids')[0]\n self.send_data(data)\n",
"step-2": "<mask token>\n\n\nclass ZabbixItem:\n\n def __init__(self, user, password, ext_group, ext_template, zabbix_host):\n self.user = user\n self.password = password\n self.zabbix_host = zabbix_host\n self.zabbix_api = f'http://{zabbix_host}'\n self.connection = self.connection_init()\n self.template_id = self.get_template(ext_template)\n self.group_id = self.get_group(ext_group)\n <mask token>\n\n def get_template(self, template_name):\n \"\"\"\n Get template id by template name\n :param template_name:\n :return: template id as string\n \"\"\"\n ext_template = self.connection.do_request('template.get', {'filter':\n {'host': [template_name]}, 'output': 'template_id'}).get('result')\n if ext_template:\n result = ext_template[0].get('templateid')\n else:\n result = False\n return result\n <mask token>\n\n def clear_ping(self, value):\n \"\"\"\n clear ping value from text\n :param value: raw data, 50 ms as example\n :return: integer value\n \"\"\"\n try:\n result = int(value[:value.find(' ')])\n except IndexError:\n result = False\n except ValueError:\n result = False\n return result\n\n def host_create(self, data):\n \"\"\"\n Create host item\n :param host_params:\n :return: host id\n \"\"\"\n return self.connection.do_request('host.create', data)[0].get('result')\n\n def assign_template_to_host(self, host_id):\n \"\"\"\n Assign template to host\n :param host_id: host id\n :return:\n \"\"\"\n return self.connection.do_request('template.update', teamplateid=\n self.template_id, hosts=[host_id])\n\n def send_data(self, data):\n \"\"\"\n Send data to server\n :param data: data dict\n :return:\n \"\"\"\n sender_data = []\n host_id = data.get('ext')\n zbx_sender = ZabbixSender(self.zabbix_host)\n extension_ip = ZabbixMetric(host_id, 'extPhoneIpAddress', data.get(\n 'ip_address'))\n sender_data.append(extension_ip)\n extension_ping = ZabbixMetric(host_id, 'extPhonePing', self.\n clear_ping(data.get('ping', 10000)))\n sender_data.append(extension_ping)\n extension_status = ZabbixMetric(host_id, 'extStatus', data.get(\n 'status', ''))\n sender_data.append(extension_status)\n extension_user = ZabbixMetric(host_id, 'extUser', data.get('user', ''))\n sender_data.append(extension_user)\n extension_useragent = ZabbixMetric(host_id, 'extUserAgent', data.\n get('user_agent', ''))\n sender_data.append(extension_useragent)\n zbx_sender.send(sender_data)\n\n def worker(self, data):\n \"\"\"\n Check host. If extension exists - send new data, otherwise - create extension's host in zabbix and send data.\n :param data: dict with data\n :return: host id\n \"\"\"\n print(data)\n host_raw = self.connection.do_request('host.get', {'filter': {\n 'host': data['ext']}, 'output': ['hostid']}).get('result')\n if host_raw:\n host_id = host_raw[0].get('hostid')\n else:\n host_new = self.connection.do_request('host.create', {'host':\n f\"{data.get('ext')}\", 'templates': [{'templateid': self.\n template_id}], 'groups': [{'groupid': self.group_id}]})\n host_id = host_new.get('result').get('hostids')[0]\n self.send_data(data)\n",
"step-3": "<mask token>\n\n\nclass ZabbixItem:\n\n def __init__(self, user, password, ext_group, ext_template, zabbix_host):\n self.user = user\n self.password = password\n self.zabbix_host = zabbix_host\n self.zabbix_api = f'http://{zabbix_host}'\n self.connection = self.connection_init()\n self.template_id = self.get_template(ext_template)\n self.group_id = self.get_group(ext_group)\n <mask token>\n\n def get_template(self, template_name):\n \"\"\"\n Get template id by template name\n :param template_name:\n :return: template id as string\n \"\"\"\n ext_template = self.connection.do_request('template.get', {'filter':\n {'host': [template_name]}, 'output': 'template_id'}).get('result')\n if ext_template:\n result = ext_template[0].get('templateid')\n else:\n result = False\n return result\n\n def get_group(self, group_name):\n \"\"\"\n Get group Id\n :param group_name:\n :return: group ID\n \"\"\"\n group = self.connection.do_request('hostgroup.get', {'filter': {\n 'name': [group_name]}, 'output': 'extend'}).get('result')\n if group:\n result = group[0].get('groupid')\n else:\n result = False\n return result\n\n def clear_ping(self, value):\n \"\"\"\n clear ping value from text\n :param value: raw data, 50 ms as example\n :return: integer value\n \"\"\"\n try:\n result = int(value[:value.find(' ')])\n except IndexError:\n result = False\n except ValueError:\n result = False\n return result\n\n def host_create(self, data):\n \"\"\"\n Create host item\n :param host_params:\n :return: host id\n \"\"\"\n return self.connection.do_request('host.create', data)[0].get('result')\n\n def assign_template_to_host(self, host_id):\n \"\"\"\n Assign template to host\n :param host_id: host id\n :return:\n \"\"\"\n return self.connection.do_request('template.update', teamplateid=\n self.template_id, hosts=[host_id])\n\n def send_data(self, data):\n \"\"\"\n Send data to server\n :param data: data dict\n :return:\n \"\"\"\n sender_data = []\n host_id = data.get('ext')\n zbx_sender = ZabbixSender(self.zabbix_host)\n extension_ip = ZabbixMetric(host_id, 'extPhoneIpAddress', data.get(\n 'ip_address'))\n sender_data.append(extension_ip)\n extension_ping = ZabbixMetric(host_id, 'extPhonePing', self.\n clear_ping(data.get('ping', 10000)))\n sender_data.append(extension_ping)\n extension_status = ZabbixMetric(host_id, 'extStatus', data.get(\n 'status', ''))\n sender_data.append(extension_status)\n extension_user = ZabbixMetric(host_id, 'extUser', data.get('user', ''))\n sender_data.append(extension_user)\n extension_useragent = ZabbixMetric(host_id, 'extUserAgent', data.\n get('user_agent', ''))\n sender_data.append(extension_useragent)\n zbx_sender.send(sender_data)\n\n def worker(self, data):\n \"\"\"\n Check host. If extension exists - send new data, otherwise - create extension's host in zabbix and send data.\n :param data: dict with data\n :return: host id\n \"\"\"\n print(data)\n host_raw = self.connection.do_request('host.get', {'filter': {\n 'host': data['ext']}, 'output': ['hostid']}).get('result')\n if host_raw:\n host_id = host_raw[0].get('hostid')\n else:\n host_new = self.connection.do_request('host.create', {'host':\n f\"{data.get('ext')}\", 'templates': [{'templateid': self.\n template_id}], 'groups': [{'groupid': self.group_id}]})\n host_id = host_new.get('result').get('hostids')[0]\n self.send_data(data)\n",
"step-4": "from pyzabbix import ZabbixMetric, ZabbixSender, ZabbixAPI\nfrom datetime import datetime\nfrom re import findall\n\n\nclass ZabbixItem:\n\n def __init__(self, user, password, ext_group, ext_template, zabbix_host):\n self.user = user\n self.password = password\n self.zabbix_host = zabbix_host\n self.zabbix_api = f'http://{zabbix_host}'\n self.connection = self.connection_init()\n self.template_id = self.get_template(ext_template)\n self.group_id = self.get_group(ext_group)\n\n def connection_init(self):\n \"\"\"\n Zabbix connection init\n :return: connection\n \"\"\"\n return ZabbixAPI(f'http://{self.zabbix_host}', user=self.user,\n password=self.password)\n\n def get_template(self, template_name):\n \"\"\"\n Get template id by template name\n :param template_name:\n :return: template id as string\n \"\"\"\n ext_template = self.connection.do_request('template.get', {'filter':\n {'host': [template_name]}, 'output': 'template_id'}).get('result')\n if ext_template:\n result = ext_template[0].get('templateid')\n else:\n result = False\n return result\n\n def get_group(self, group_name):\n \"\"\"\n Get group Id\n :param group_name:\n :return: group ID\n \"\"\"\n group = self.connection.do_request('hostgroup.get', {'filter': {\n 'name': [group_name]}, 'output': 'extend'}).get('result')\n if group:\n result = group[0].get('groupid')\n else:\n result = False\n return result\n\n def clear_ping(self, value):\n \"\"\"\n clear ping value from text\n :param value: raw data, 50 ms as example\n :return: integer value\n \"\"\"\n try:\n result = int(value[:value.find(' ')])\n except IndexError:\n result = False\n except ValueError:\n result = False\n return result\n\n def host_create(self, data):\n \"\"\"\n Create host item\n :param host_params:\n :return: host id\n \"\"\"\n return self.connection.do_request('host.create', data)[0].get('result')\n\n def assign_template_to_host(self, host_id):\n \"\"\"\n Assign template to host\n :param host_id: host id\n :return:\n \"\"\"\n return self.connection.do_request('template.update', teamplateid=\n self.template_id, hosts=[host_id])\n\n def send_data(self, data):\n \"\"\"\n Send data to server\n :param data: data dict\n :return:\n \"\"\"\n sender_data = []\n host_id = data.get('ext')\n zbx_sender = ZabbixSender(self.zabbix_host)\n extension_ip = ZabbixMetric(host_id, 'extPhoneIpAddress', data.get(\n 'ip_address'))\n sender_data.append(extension_ip)\n extension_ping = ZabbixMetric(host_id, 'extPhonePing', self.\n clear_ping(data.get('ping', 10000)))\n sender_data.append(extension_ping)\n extension_status = ZabbixMetric(host_id, 'extStatus', data.get(\n 'status', ''))\n sender_data.append(extension_status)\n extension_user = ZabbixMetric(host_id, 'extUser', data.get('user', ''))\n sender_data.append(extension_user)\n extension_useragent = ZabbixMetric(host_id, 'extUserAgent', data.\n get('user_agent', ''))\n sender_data.append(extension_useragent)\n zbx_sender.send(sender_data)\n\n def worker(self, data):\n \"\"\"\n Check host. If extension exists - send new data, otherwise - create extension's host in zabbix and send data.\n :param data: dict with data\n :return: host id\n \"\"\"\n print(data)\n host_raw = self.connection.do_request('host.get', {'filter': {\n 'host': data['ext']}, 'output': ['hostid']}).get('result')\n if host_raw:\n host_id = host_raw[0].get('hostid')\n else:\n host_new = self.connection.do_request('host.create', {'host':\n f\"{data.get('ext')}\", 'templates': [{'templateid': self.\n template_id}], 'groups': [{'groupid': self.group_id}]})\n host_id = host_new.get('result').get('hostids')[0]\n self.send_data(data)\n",
"step-5": "from pyzabbix import ZabbixMetric, ZabbixSender, ZabbixAPI\nfrom datetime import datetime\nfrom re import findall\n\n# current_time = datetime.now().strftime(\"%H:%M:%S %d.%m.%Y\")\n\nclass ZabbixItem():\n\n def __init__(self, user, password, ext_group, ext_template, zabbix_host):\n self.user = user\n self.password = password\n self.zabbix_host = zabbix_host\n self.zabbix_api = f\"http://{zabbix_host}\"\n\n self.connection = self.connection_init()\n self.template_id = self.get_template(ext_template)\n self.group_id = self.get_group(ext_group)\n # print(self.get_group(EXT_GROUP))\n\n def connection_init(self):\n '''\n Zabbix connection init\n :return: connection\n '''\n return ZabbixAPI(f\"http://{self.zabbix_host}\", user=self.user, password=self.password)\n\n def get_template(self, template_name):\n '''\n Get template id by template name\n :param template_name:\n :return: template id as string\n '''\n\n ext_template = self.connection.do_request(\"template.get\", {\n \"filter\": {\"host\": [template_name]},\n \"output\": \"template_id\"\n }).get(\"result\")\n\n if ext_template:\n result = ext_template[0].get(\"templateid\")\n else:\n result = False\n return result\n\n def get_group(self, group_name):\n \"\"\"\n Get group Id\n :param group_name:\n :return: group ID\n \"\"\"\n\n group = self.connection.do_request(\"hostgroup.get\", {\n \"filter\": {\"name\": [group_name]},\n \"output\": \"extend\"\n }).get(\"result\")\n\n if group:\n result = group[0].get(\"groupid\")\n else:\n # print(\"create Group\")\n result = False\n return result\n\n def clear_ping(self, value):\n \"\"\"\n clear ping value from text\n :param value: raw data, 50 ms as example\n :return: integer value\n \"\"\"\n\n try:\n result = int(value[:value.find(\" \")])\n except IndexError:\n result = False\n except ValueError:\n # print(value)\n result = False\n return result\n\n\n def host_create(self, data):\n '''\n Create host item\n :param host_params:\n :return: host id\n '''\n\n return self.connection.do_request('host.create', data)[0].get(\"result\")\n def assign_template_to_host(self, host_id):\n \"\"\"\n Assign template to host\n :param host_id: host id\n :return:\n \"\"\"\n\n return self.connection.do_request(\"template.update\", teamplateid=self.template_id, hosts=[host_id])\n\n def send_data(self, data):\n \"\"\"\n Send data to server\n :param data: data dict\n :return:\n \"\"\"\n # test_dict = {'ext': '1105', 'ip_address': '192.168.10.55', 'status': 'OK', 'ping': '5 ms', 'user': 'Secretary',\n # 'user_agent': 'Cisco/SPA508G-7.4.9a'}\n\n sender_data = []\n host_id = data.get(\"ext\")\n # print(ZABBIX_HOST)\n zbx_sender = ZabbixSender(self.zabbix_host)\n extension_ip = ZabbixMetric(host_id, 'extPhoneIpAddress', data.get(\"ip_address\"))\n sender_data.append(extension_ip)\n\n extension_ping = ZabbixMetric(host_id, \"extPhonePing\", self.clear_ping(data.get(\"ping\", 10000)))\n sender_data.append(extension_ping)\n\n extension_status = ZabbixMetric(host_id, \"extStatus\", data.get(\"status\", \"\"))\n sender_data.append(extension_status)\n\n extension_user = ZabbixMetric(host_id, \"extUser\", data.get(\"user\", \"\"))\n sender_data.append(extension_user)\n\n extension_useragent = ZabbixMetric(host_id, \"extUserAgent\", data.get(\"user_agent\", \"\"))\n sender_data.append(extension_useragent)\n zbx_sender.send(sender_data)\n\n def worker(self, data):\n \"\"\"\n Check host. If extension exists - send new data, otherwise - create extension's host in zabbix and send data.\n :param data: dict with data\n :return: host id\n \"\"\"\n print(data)\n host_raw = self.connection.do_request('host.get', {\n 'filter': {'host': data[\"ext\"]},\n 'output': ['hostid']\n }).get(\"result\")\n # print(\"host_raw\", host_raw)\n if host_raw:\n host_id = host_raw[0].get(\"hostid\")\n\n else:\n host_new = self.connection.do_request('host.create', {\"host\" : f\"{data.get('ext')}\",\n \"templates\": [\n {\"templateid\" : self.template_id}\n ],\n \"groups\": [\n {\"groupid\": self.group_id}\n ]\n\n })\n\n host_id = host_new.get(\"result\").get(\"hostids\")[0]\n self.send_data(data)\n",
"step-ids": [
6,
8,
9,
11,
12
]
}
|
[
6,
8,
9,
11,
12
] |
<|reserved_special_token_0|>
def calc_auc(subm):
preds = subm['target'].values
labels = subm['labels'].values
if len(set(labels)) == 1:
print('warning calc_auc with single label dataset, return 0')
return 0
return metrics.roc_auc_score(labels, preds)
def save_submission(df, name, do_submit=False):
df_submission = df[['image_name', 'target']]
df_submission.to_csv(name, index=False)
if do_submit:
name_with_quotes = '"' + name + '"'
os.system(
f'kaggle competitions submit -c siim-isic-melanoma-classification -f {name_with_quotes} -m {name_with_quotes}'
)
def main(nfolds, work_dir):
val_avg_tta_le_auc = None
val_avg_tta_auc = None
tta_type = 'tta_'
for le in ['', 'le']:
for m_type in ['', tta_type]:
a = []
for fold in range(nfolds):
if len(le) > 0:
name = f'val_le_{fold}_single_model_{m_type}submission.csv'
else:
name = f'val_{fold}_single_model_{m_type}submission.csv'
filename = os.path.join(work_dir, name)
if os.path.exists(filename):
sub = pd.read_csv(filename)
a.append(calc_auc(sub))
print(f'{le}_val_single_model_{m_type}metrics={a}')
print(f'{le}_val_single_model_{m_type}avg_metric={np.mean(a)}')
if m_type == tta_type:
if le == 'le':
val_avg_tta_le_auc = np.mean(a)
else:
val_avg_tta_auc = np.mean(a)
for le in ['', 'le']:
for m_type in ['', 'tta_']:
a = []
subs = []
for fold in range(nfolds):
if le == '':
name = f'test_{fold}_single_model_{m_type}submission.csv'
else:
name = (
f'test_{le}_{fold}_single_model_{m_type}submission.csv'
)
filename = os.path.join(work_dir, name)
if os.path.exists(filename):
sub = pd.read_csv(filename)
a.append(calc_auc(sub))
save_submission(sub, os.path.join(work_dir, 'kaggle_' +
name))
subs.append(sub)
if subs:
avg_sub = submission.aggregate_submissions(subs)
auc_avg_sub = calc_auc(avg_sub)
save_submission(avg_sub, os.path.join(work_dir, 'kaggle_' +
f'test_{le}_{m_type}.csv'))
else:
auc_avg_sub = None
print(f'{le}_test_single_model_{m_type}metrics={a}')
print(f'{le}_test_single_model_{m_type}avg_metric={np.mean(a)}')
print(f'{le}_test_avg_model_{m_type}_metric={auc_avg_sub}')
return val_avg_tta_le_auc, val_avg_tta_auc
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def calc_auc(subm):
preds = subm['target'].values
labels = subm['labels'].values
if len(set(labels)) == 1:
print('warning calc_auc with single label dataset, return 0')
return 0
return metrics.roc_auc_score(labels, preds)
def save_submission(df, name, do_submit=False):
df_submission = df[['image_name', 'target']]
df_submission.to_csv(name, index=False)
if do_submit:
name_with_quotes = '"' + name + '"'
os.system(
f'kaggle competitions submit -c siim-isic-melanoma-classification -f {name_with_quotes} -m {name_with_quotes}'
)
def main(nfolds, work_dir):
val_avg_tta_le_auc = None
val_avg_tta_auc = None
tta_type = 'tta_'
for le in ['', 'le']:
for m_type in ['', tta_type]:
a = []
for fold in range(nfolds):
if len(le) > 0:
name = f'val_le_{fold}_single_model_{m_type}submission.csv'
else:
name = f'val_{fold}_single_model_{m_type}submission.csv'
filename = os.path.join(work_dir, name)
if os.path.exists(filename):
sub = pd.read_csv(filename)
a.append(calc_auc(sub))
print(f'{le}_val_single_model_{m_type}metrics={a}')
print(f'{le}_val_single_model_{m_type}avg_metric={np.mean(a)}')
if m_type == tta_type:
if le == 'le':
val_avg_tta_le_auc = np.mean(a)
else:
val_avg_tta_auc = np.mean(a)
for le in ['', 'le']:
for m_type in ['', 'tta_']:
a = []
subs = []
for fold in range(nfolds):
if le == '':
name = f'test_{fold}_single_model_{m_type}submission.csv'
else:
name = (
f'test_{le}_{fold}_single_model_{m_type}submission.csv'
)
filename = os.path.join(work_dir, name)
if os.path.exists(filename):
sub = pd.read_csv(filename)
a.append(calc_auc(sub))
save_submission(sub, os.path.join(work_dir, 'kaggle_' +
name))
subs.append(sub)
if subs:
avg_sub = submission.aggregate_submissions(subs)
auc_avg_sub = calc_auc(avg_sub)
save_submission(avg_sub, os.path.join(work_dir, 'kaggle_' +
f'test_{le}_{m_type}.csv'))
else:
auc_avg_sub = None
print(f'{le}_test_single_model_{m_type}metrics={a}')
print(f'{le}_test_single_model_{m_type}avg_metric={np.mean(a)}')
print(f'{le}_test_avg_model_{m_type}_metric={auc_avg_sub}')
return val_avg_tta_le_auc, val_avg_tta_auc
<|reserved_special_token_0|>
parser.add_argument('--work_dir', type=str)
parser.add_argument('--folds', type=int, default=0)
if __name__ == '__main__':
args = parser.parse_args()
if args.folds == 0:
nfolds = len(glob.glob(os.path.join(args.work_dir, 'loss*.png')))
print(f' --folds not specified, will use {nfolds}')
else:
nfolds = args.folds
main(nfolds, args.work_dir)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def calc_auc(subm):
preds = subm['target'].values
labels = subm['labels'].values
if len(set(labels)) == 1:
print('warning calc_auc with single label dataset, return 0')
return 0
return metrics.roc_auc_score(labels, preds)
def save_submission(df, name, do_submit=False):
df_submission = df[['image_name', 'target']]
df_submission.to_csv(name, index=False)
if do_submit:
name_with_quotes = '"' + name + '"'
os.system(
f'kaggle competitions submit -c siim-isic-melanoma-classification -f {name_with_quotes} -m {name_with_quotes}'
)
def main(nfolds, work_dir):
val_avg_tta_le_auc = None
val_avg_tta_auc = None
tta_type = 'tta_'
for le in ['', 'le']:
for m_type in ['', tta_type]:
a = []
for fold in range(nfolds):
if len(le) > 0:
name = f'val_le_{fold}_single_model_{m_type}submission.csv'
else:
name = f'val_{fold}_single_model_{m_type}submission.csv'
filename = os.path.join(work_dir, name)
if os.path.exists(filename):
sub = pd.read_csv(filename)
a.append(calc_auc(sub))
print(f'{le}_val_single_model_{m_type}metrics={a}')
print(f'{le}_val_single_model_{m_type}avg_metric={np.mean(a)}')
if m_type == tta_type:
if le == 'le':
val_avg_tta_le_auc = np.mean(a)
else:
val_avg_tta_auc = np.mean(a)
for le in ['', 'le']:
for m_type in ['', 'tta_']:
a = []
subs = []
for fold in range(nfolds):
if le == '':
name = f'test_{fold}_single_model_{m_type}submission.csv'
else:
name = (
f'test_{le}_{fold}_single_model_{m_type}submission.csv'
)
filename = os.path.join(work_dir, name)
if os.path.exists(filename):
sub = pd.read_csv(filename)
a.append(calc_auc(sub))
save_submission(sub, os.path.join(work_dir, 'kaggle_' +
name))
subs.append(sub)
if subs:
avg_sub = submission.aggregate_submissions(subs)
auc_avg_sub = calc_auc(avg_sub)
save_submission(avg_sub, os.path.join(work_dir, 'kaggle_' +
f'test_{le}_{m_type}.csv'))
else:
auc_avg_sub = None
print(f'{le}_test_single_model_{m_type}metrics={a}')
print(f'{le}_test_single_model_{m_type}avg_metric={np.mean(a)}')
print(f'{le}_test_avg_model_{m_type}_metric={auc_avg_sub}')
return val_avg_tta_le_auc, val_avg_tta_auc
parser = argparse.ArgumentParser()
parser.add_argument('--work_dir', type=str)
parser.add_argument('--folds', type=int, default=0)
if __name__ == '__main__':
args = parser.parse_args()
if args.folds == 0:
nfolds = len(glob.glob(os.path.join(args.work_dir, 'loss*.png')))
print(f' --folds not specified, will use {nfolds}')
else:
nfolds = args.folds
main(nfolds, args.work_dir)
<|reserved_special_token_1|>
import os
from sklearn import metrics
import pandas as pd
import numpy as np
from submission import submission
import argparse
import glob
def calc_auc(subm):
preds = subm['target'].values
labels = subm['labels'].values
if len(set(labels)) == 1:
print('warning calc_auc with single label dataset, return 0')
return 0
return metrics.roc_auc_score(labels, preds)
def save_submission(df, name, do_submit=False):
df_submission = df[['image_name', 'target']]
df_submission.to_csv(name, index=False)
if do_submit:
name_with_quotes = '"' + name + '"'
os.system(
f'kaggle competitions submit -c siim-isic-melanoma-classification -f {name_with_quotes} -m {name_with_quotes}'
)
def main(nfolds, work_dir):
val_avg_tta_le_auc = None
val_avg_tta_auc = None
tta_type = 'tta_'
for le in ['', 'le']:
for m_type in ['', tta_type]:
a = []
for fold in range(nfolds):
if len(le) > 0:
name = f'val_le_{fold}_single_model_{m_type}submission.csv'
else:
name = f'val_{fold}_single_model_{m_type}submission.csv'
filename = os.path.join(work_dir, name)
if os.path.exists(filename):
sub = pd.read_csv(filename)
a.append(calc_auc(sub))
print(f'{le}_val_single_model_{m_type}metrics={a}')
print(f'{le}_val_single_model_{m_type}avg_metric={np.mean(a)}')
if m_type == tta_type:
if le == 'le':
val_avg_tta_le_auc = np.mean(a)
else:
val_avg_tta_auc = np.mean(a)
for le in ['', 'le']:
for m_type in ['', 'tta_']:
a = []
subs = []
for fold in range(nfolds):
if le == '':
name = f'test_{fold}_single_model_{m_type}submission.csv'
else:
name = (
f'test_{le}_{fold}_single_model_{m_type}submission.csv'
)
filename = os.path.join(work_dir, name)
if os.path.exists(filename):
sub = pd.read_csv(filename)
a.append(calc_auc(sub))
save_submission(sub, os.path.join(work_dir, 'kaggle_' +
name))
subs.append(sub)
if subs:
avg_sub = submission.aggregate_submissions(subs)
auc_avg_sub = calc_auc(avg_sub)
save_submission(avg_sub, os.path.join(work_dir, 'kaggle_' +
f'test_{le}_{m_type}.csv'))
else:
auc_avg_sub = None
print(f'{le}_test_single_model_{m_type}metrics={a}')
print(f'{le}_test_single_model_{m_type}avg_metric={np.mean(a)}')
print(f'{le}_test_avg_model_{m_type}_metric={auc_avg_sub}')
return val_avg_tta_le_auc, val_avg_tta_auc
parser = argparse.ArgumentParser()
parser.add_argument('--work_dir', type=str)
parser.add_argument('--folds', type=int, default=0)
if __name__ == '__main__':
args = parser.parse_args()
if args.folds == 0:
nfolds = len(glob.glob(os.path.join(args.work_dir, 'loss*.png')))
print(f' --folds not specified, will use {nfolds}')
else:
nfolds = args.folds
main(nfolds, args.work_dir)
<|reserved_special_token_1|>
import os
from sklearn import metrics
import pandas as pd
import numpy as np
from submission import submission
import argparse
import glob
def calc_auc(subm):
preds=subm['target'].values
labels=subm['labels'].values
if len(set(labels))==1:
print('warning calc_auc with single label dataset, return 0')
return 0
return metrics.roc_auc_score(labels, preds)
def save_submission(df, name, do_submit=False):
df_submission = df[['image_name', 'target']]
df_submission.to_csv(name, index=False)
if do_submit:
name_with_quotes='\"'+name+'\"'
os.system(f'kaggle competitions submit -c siim-isic-melanoma-classification -f {name_with_quotes} -m {name_with_quotes}')
def main(nfolds, work_dir):
val_avg_tta_le_auc=None
val_avg_tta_auc = None
tta_type='tta_'
for le in ['', 'le']:
for m_type in ['', tta_type]:
a = []
for fold in range(nfolds):
if len(le)>0:
name = f'val_le_{fold}_single_model_{m_type}submission.csv'
else:
name = f'val_{fold}_single_model_{m_type}submission.csv'
filename=os.path.join(work_dir, name)
if os.path.exists(filename):
sub = pd.read_csv(filename)
a.append(calc_auc(sub))
print(f'{le}_val_single_model_{m_type}metrics={a}')
print(f'{le}_val_single_model_{m_type}avg_metric={np.mean(a)}')
if m_type==tta_type:
if le=='le':
val_avg_tta_le_auc=np.mean(a)
else:
val_avg_tta_auc=np.mean(a)
for le in ['', 'le']:
for m_type in ['', 'tta_']:
a = []
subs = []
for fold in range(nfolds):
if le=='':
name = f'test_{fold}_single_model_{m_type}submission.csv'
else:
name = f'test_{le}_{fold}_single_model_{m_type}submission.csv'
filename=os.path.join(work_dir, name)
if os.path.exists(filename):
sub = pd.read_csv(filename)
a.append(calc_auc(sub))
save_submission(sub, os.path.join(work_dir, 'kaggle_' + name))
subs.append(sub)
if subs:
avg_sub = submission.aggregate_submissions(subs)
auc_avg_sub=calc_auc(avg_sub)
save_submission(avg_sub, os.path.join(work_dir, 'kaggle_' + f'test_{le}_{m_type}.csv'))
else:
auc_avg_sub=None
print(f'{le}_test_single_model_{m_type}metrics={a}')
print(f'{le}_test_single_model_{m_type}avg_metric={np.mean(a)}')
print(f'{le}_test_avg_model_{m_type}_metric={auc_avg_sub}')
return val_avg_tta_le_auc, val_avg_tta_auc
parser = argparse.ArgumentParser()
parser.add_argument('--work_dir',type=str)
parser.add_argument('--folds',type=int, default=0)
if __name__=="__main__":
args=parser.parse_args()
if args.folds==0:
nfolds = len(glob.glob(os.path.join(args.work_dir,'loss*.png')))
print(f' --folds not specified, will use {nfolds}')
else:
nfolds=args.folds
main(nfolds,args.work_dir)
|
flexible
|
{
"blob_id": "fe0b21deb2e48ad74449b264265729cb328090ea",
"index": 6380,
"step-1": "<mask token>\n\n\ndef calc_auc(subm):\n preds = subm['target'].values\n labels = subm['labels'].values\n if len(set(labels)) == 1:\n print('warning calc_auc with single label dataset, return 0')\n return 0\n return metrics.roc_auc_score(labels, preds)\n\n\ndef save_submission(df, name, do_submit=False):\n df_submission = df[['image_name', 'target']]\n df_submission.to_csv(name, index=False)\n if do_submit:\n name_with_quotes = '\"' + name + '\"'\n os.system(\n f'kaggle competitions submit -c siim-isic-melanoma-classification -f {name_with_quotes} -m {name_with_quotes}'\n )\n\n\ndef main(nfolds, work_dir):\n val_avg_tta_le_auc = None\n val_avg_tta_auc = None\n tta_type = 'tta_'\n for le in ['', 'le']:\n for m_type in ['', tta_type]:\n a = []\n for fold in range(nfolds):\n if len(le) > 0:\n name = f'val_le_{fold}_single_model_{m_type}submission.csv'\n else:\n name = f'val_{fold}_single_model_{m_type}submission.csv'\n filename = os.path.join(work_dir, name)\n if os.path.exists(filename):\n sub = pd.read_csv(filename)\n a.append(calc_auc(sub))\n print(f'{le}_val_single_model_{m_type}metrics={a}')\n print(f'{le}_val_single_model_{m_type}avg_metric={np.mean(a)}')\n if m_type == tta_type:\n if le == 'le':\n val_avg_tta_le_auc = np.mean(a)\n else:\n val_avg_tta_auc = np.mean(a)\n for le in ['', 'le']:\n for m_type in ['', 'tta_']:\n a = []\n subs = []\n for fold in range(nfolds):\n if le == '':\n name = f'test_{fold}_single_model_{m_type}submission.csv'\n else:\n name = (\n f'test_{le}_{fold}_single_model_{m_type}submission.csv'\n )\n filename = os.path.join(work_dir, name)\n if os.path.exists(filename):\n sub = pd.read_csv(filename)\n a.append(calc_auc(sub))\n save_submission(sub, os.path.join(work_dir, 'kaggle_' +\n name))\n subs.append(sub)\n if subs:\n avg_sub = submission.aggregate_submissions(subs)\n auc_avg_sub = calc_auc(avg_sub)\n save_submission(avg_sub, os.path.join(work_dir, 'kaggle_' +\n f'test_{le}_{m_type}.csv'))\n else:\n auc_avg_sub = None\n print(f'{le}_test_single_model_{m_type}metrics={a}')\n print(f'{le}_test_single_model_{m_type}avg_metric={np.mean(a)}')\n print(f'{le}_test_avg_model_{m_type}_metric={auc_avg_sub}')\n return val_avg_tta_le_auc, val_avg_tta_auc\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef calc_auc(subm):\n preds = subm['target'].values\n labels = subm['labels'].values\n if len(set(labels)) == 1:\n print('warning calc_auc with single label dataset, return 0')\n return 0\n return metrics.roc_auc_score(labels, preds)\n\n\ndef save_submission(df, name, do_submit=False):\n df_submission = df[['image_name', 'target']]\n df_submission.to_csv(name, index=False)\n if do_submit:\n name_with_quotes = '\"' + name + '\"'\n os.system(\n f'kaggle competitions submit -c siim-isic-melanoma-classification -f {name_with_quotes} -m {name_with_quotes}'\n )\n\n\ndef main(nfolds, work_dir):\n val_avg_tta_le_auc = None\n val_avg_tta_auc = None\n tta_type = 'tta_'\n for le in ['', 'le']:\n for m_type in ['', tta_type]:\n a = []\n for fold in range(nfolds):\n if len(le) > 0:\n name = f'val_le_{fold}_single_model_{m_type}submission.csv'\n else:\n name = f'val_{fold}_single_model_{m_type}submission.csv'\n filename = os.path.join(work_dir, name)\n if os.path.exists(filename):\n sub = pd.read_csv(filename)\n a.append(calc_auc(sub))\n print(f'{le}_val_single_model_{m_type}metrics={a}')\n print(f'{le}_val_single_model_{m_type}avg_metric={np.mean(a)}')\n if m_type == tta_type:\n if le == 'le':\n val_avg_tta_le_auc = np.mean(a)\n else:\n val_avg_tta_auc = np.mean(a)\n for le in ['', 'le']:\n for m_type in ['', 'tta_']:\n a = []\n subs = []\n for fold in range(nfolds):\n if le == '':\n name = f'test_{fold}_single_model_{m_type}submission.csv'\n else:\n name = (\n f'test_{le}_{fold}_single_model_{m_type}submission.csv'\n )\n filename = os.path.join(work_dir, name)\n if os.path.exists(filename):\n sub = pd.read_csv(filename)\n a.append(calc_auc(sub))\n save_submission(sub, os.path.join(work_dir, 'kaggle_' +\n name))\n subs.append(sub)\n if subs:\n avg_sub = submission.aggregate_submissions(subs)\n auc_avg_sub = calc_auc(avg_sub)\n save_submission(avg_sub, os.path.join(work_dir, 'kaggle_' +\n f'test_{le}_{m_type}.csv'))\n else:\n auc_avg_sub = None\n print(f'{le}_test_single_model_{m_type}metrics={a}')\n print(f'{le}_test_single_model_{m_type}avg_metric={np.mean(a)}')\n print(f'{le}_test_avg_model_{m_type}_metric={auc_avg_sub}')\n return val_avg_tta_le_auc, val_avg_tta_auc\n\n\n<mask token>\nparser.add_argument('--work_dir', type=str)\nparser.add_argument('--folds', type=int, default=0)\nif __name__ == '__main__':\n args = parser.parse_args()\n if args.folds == 0:\n nfolds = len(glob.glob(os.path.join(args.work_dir, 'loss*.png')))\n print(f' --folds not specified, will use {nfolds}')\n else:\n nfolds = args.folds\n main(nfolds, args.work_dir)\n",
"step-3": "<mask token>\n\n\ndef calc_auc(subm):\n preds = subm['target'].values\n labels = subm['labels'].values\n if len(set(labels)) == 1:\n print('warning calc_auc with single label dataset, return 0')\n return 0\n return metrics.roc_auc_score(labels, preds)\n\n\ndef save_submission(df, name, do_submit=False):\n df_submission = df[['image_name', 'target']]\n df_submission.to_csv(name, index=False)\n if do_submit:\n name_with_quotes = '\"' + name + '\"'\n os.system(\n f'kaggle competitions submit -c siim-isic-melanoma-classification -f {name_with_quotes} -m {name_with_quotes}'\n )\n\n\ndef main(nfolds, work_dir):\n val_avg_tta_le_auc = None\n val_avg_tta_auc = None\n tta_type = 'tta_'\n for le in ['', 'le']:\n for m_type in ['', tta_type]:\n a = []\n for fold in range(nfolds):\n if len(le) > 0:\n name = f'val_le_{fold}_single_model_{m_type}submission.csv'\n else:\n name = f'val_{fold}_single_model_{m_type}submission.csv'\n filename = os.path.join(work_dir, name)\n if os.path.exists(filename):\n sub = pd.read_csv(filename)\n a.append(calc_auc(sub))\n print(f'{le}_val_single_model_{m_type}metrics={a}')\n print(f'{le}_val_single_model_{m_type}avg_metric={np.mean(a)}')\n if m_type == tta_type:\n if le == 'le':\n val_avg_tta_le_auc = np.mean(a)\n else:\n val_avg_tta_auc = np.mean(a)\n for le in ['', 'le']:\n for m_type in ['', 'tta_']:\n a = []\n subs = []\n for fold in range(nfolds):\n if le == '':\n name = f'test_{fold}_single_model_{m_type}submission.csv'\n else:\n name = (\n f'test_{le}_{fold}_single_model_{m_type}submission.csv'\n )\n filename = os.path.join(work_dir, name)\n if os.path.exists(filename):\n sub = pd.read_csv(filename)\n a.append(calc_auc(sub))\n save_submission(sub, os.path.join(work_dir, 'kaggle_' +\n name))\n subs.append(sub)\n if subs:\n avg_sub = submission.aggregate_submissions(subs)\n auc_avg_sub = calc_auc(avg_sub)\n save_submission(avg_sub, os.path.join(work_dir, 'kaggle_' +\n f'test_{le}_{m_type}.csv'))\n else:\n auc_avg_sub = None\n print(f'{le}_test_single_model_{m_type}metrics={a}')\n print(f'{le}_test_single_model_{m_type}avg_metric={np.mean(a)}')\n print(f'{le}_test_avg_model_{m_type}_metric={auc_avg_sub}')\n return val_avg_tta_le_auc, val_avg_tta_auc\n\n\nparser = argparse.ArgumentParser()\nparser.add_argument('--work_dir', type=str)\nparser.add_argument('--folds', type=int, default=0)\nif __name__ == '__main__':\n args = parser.parse_args()\n if args.folds == 0:\n nfolds = len(glob.glob(os.path.join(args.work_dir, 'loss*.png')))\n print(f' --folds not specified, will use {nfolds}')\n else:\n nfolds = args.folds\n main(nfolds, args.work_dir)\n",
"step-4": "import os\nfrom sklearn import metrics\nimport pandas as pd\nimport numpy as np\nfrom submission import submission\nimport argparse\nimport glob\n\n\ndef calc_auc(subm):\n preds = subm['target'].values\n labels = subm['labels'].values\n if len(set(labels)) == 1:\n print('warning calc_auc with single label dataset, return 0')\n return 0\n return metrics.roc_auc_score(labels, preds)\n\n\ndef save_submission(df, name, do_submit=False):\n df_submission = df[['image_name', 'target']]\n df_submission.to_csv(name, index=False)\n if do_submit:\n name_with_quotes = '\"' + name + '\"'\n os.system(\n f'kaggle competitions submit -c siim-isic-melanoma-classification -f {name_with_quotes} -m {name_with_quotes}'\n )\n\n\ndef main(nfolds, work_dir):\n val_avg_tta_le_auc = None\n val_avg_tta_auc = None\n tta_type = 'tta_'\n for le in ['', 'le']:\n for m_type in ['', tta_type]:\n a = []\n for fold in range(nfolds):\n if len(le) > 0:\n name = f'val_le_{fold}_single_model_{m_type}submission.csv'\n else:\n name = f'val_{fold}_single_model_{m_type}submission.csv'\n filename = os.path.join(work_dir, name)\n if os.path.exists(filename):\n sub = pd.read_csv(filename)\n a.append(calc_auc(sub))\n print(f'{le}_val_single_model_{m_type}metrics={a}')\n print(f'{le}_val_single_model_{m_type}avg_metric={np.mean(a)}')\n if m_type == tta_type:\n if le == 'le':\n val_avg_tta_le_auc = np.mean(a)\n else:\n val_avg_tta_auc = np.mean(a)\n for le in ['', 'le']:\n for m_type in ['', 'tta_']:\n a = []\n subs = []\n for fold in range(nfolds):\n if le == '':\n name = f'test_{fold}_single_model_{m_type}submission.csv'\n else:\n name = (\n f'test_{le}_{fold}_single_model_{m_type}submission.csv'\n )\n filename = os.path.join(work_dir, name)\n if os.path.exists(filename):\n sub = pd.read_csv(filename)\n a.append(calc_auc(sub))\n save_submission(sub, os.path.join(work_dir, 'kaggle_' +\n name))\n subs.append(sub)\n if subs:\n avg_sub = submission.aggregate_submissions(subs)\n auc_avg_sub = calc_auc(avg_sub)\n save_submission(avg_sub, os.path.join(work_dir, 'kaggle_' +\n f'test_{le}_{m_type}.csv'))\n else:\n auc_avg_sub = None\n print(f'{le}_test_single_model_{m_type}metrics={a}')\n print(f'{le}_test_single_model_{m_type}avg_metric={np.mean(a)}')\n print(f'{le}_test_avg_model_{m_type}_metric={auc_avg_sub}')\n return val_avg_tta_le_auc, val_avg_tta_auc\n\n\nparser = argparse.ArgumentParser()\nparser.add_argument('--work_dir', type=str)\nparser.add_argument('--folds', type=int, default=0)\nif __name__ == '__main__':\n args = parser.parse_args()\n if args.folds == 0:\n nfolds = len(glob.glob(os.path.join(args.work_dir, 'loss*.png')))\n print(f' --folds not specified, will use {nfolds}')\n else:\n nfolds = args.folds\n main(nfolds, args.work_dir)\n",
"step-5": "import os\nfrom sklearn import metrics\nimport pandas as pd\nimport numpy as np\nfrom submission import submission\nimport argparse\nimport glob\n\n\n\ndef calc_auc(subm):\n preds=subm['target'].values\n labels=subm['labels'].values\n if len(set(labels))==1:\n print('warning calc_auc with single label dataset, return 0')\n return 0\n return metrics.roc_auc_score(labels, preds)\n\n\ndef save_submission(df, name, do_submit=False):\n df_submission = df[['image_name', 'target']]\n\n df_submission.to_csv(name, index=False)\n if do_submit:\n name_with_quotes='\\\"'+name+'\\\"'\n os.system(f'kaggle competitions submit -c siim-isic-melanoma-classification -f {name_with_quotes} -m {name_with_quotes}')\n\n\ndef main(nfolds, work_dir):\n val_avg_tta_le_auc=None\n val_avg_tta_auc = None\n tta_type='tta_'\n for le in ['', 'le']:\n for m_type in ['', tta_type]:\n a = []\n\n for fold in range(nfolds):\n if len(le)>0:\n name = f'val_le_{fold}_single_model_{m_type}submission.csv'\n else:\n name = f'val_{fold}_single_model_{m_type}submission.csv'\n filename=os.path.join(work_dir, name)\n if os.path.exists(filename):\n sub = pd.read_csv(filename)\n a.append(calc_auc(sub))\n print(f'{le}_val_single_model_{m_type}metrics={a}')\n print(f'{le}_val_single_model_{m_type}avg_metric={np.mean(a)}')\n if m_type==tta_type:\n if le=='le':\n val_avg_tta_le_auc=np.mean(a)\n else:\n val_avg_tta_auc=np.mean(a)\n\n for le in ['', 'le']:\n for m_type in ['', 'tta_']:\n a = []\n subs = []\n for fold in range(nfolds):\n if le=='':\n name = f'test_{fold}_single_model_{m_type}submission.csv'\n else:\n name = f'test_{le}_{fold}_single_model_{m_type}submission.csv'\n filename=os.path.join(work_dir, name)\n if os.path.exists(filename):\n sub = pd.read_csv(filename)\n a.append(calc_auc(sub))\n save_submission(sub, os.path.join(work_dir, 'kaggle_' + name))\n subs.append(sub)\n if subs:\n avg_sub = submission.aggregate_submissions(subs)\n auc_avg_sub=calc_auc(avg_sub)\n save_submission(avg_sub, os.path.join(work_dir, 'kaggle_' + f'test_{le}_{m_type}.csv'))\n else:\n auc_avg_sub=None\n\n print(f'{le}_test_single_model_{m_type}metrics={a}')\n print(f'{le}_test_single_model_{m_type}avg_metric={np.mean(a)}')\n print(f'{le}_test_avg_model_{m_type}_metric={auc_avg_sub}')\n return val_avg_tta_le_auc, val_avg_tta_auc\n\n\nparser = argparse.ArgumentParser()\nparser.add_argument('--work_dir',type=str)\nparser.add_argument('--folds',type=int, default=0)\n\n\nif __name__==\"__main__\":\n\n args=parser.parse_args()\n\n if args.folds==0:\n nfolds = len(glob.glob(os.path.join(args.work_dir,'loss*.png')))\n print(f' --folds not specified, will use {nfolds}')\n else:\n nfolds=args.folds\n\n main(nfolds,args.work_dir)\n",
"step-ids": [
3,
4,
5,
6,
7
]
}
|
[
3,
4,
5,
6,
7
] |
#!/usr/bin/env python
# coding: utf-8
# In[2]:
from __future__ import absolute_import, division, print_function, unicode_literals
import tensorflow as tf
print("Num GPUs Available: ", len(tf.config.experimental.list_physical_devices('GPU')))
# In[1]:
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import tensorflow as tf
#tf.config.allow_growth = True
#config.gpu_options.allow_growth = True
#session = tf.Session(config=config....)
from tensorflow import keras
# In[5]:
data = keras.datasets.fashion_mnist
(train_X, train_y), (test_X,test_y) = data.load_data()
class_names = ['t-shirt', 'trouser', 'pullover', 'dress'
,'coat', 'sandal', 'shirt', 'sneaker'
, 'bag', 'ankle boot']
train_X = train_X/255
test_X = test_X/255
# In[7]:
plt.imshow(train_X[7], cmap= 'binary')
# In[ ]:
def convolve(image,fltr):
r_p = 0
c_p = 0
conv_list = []
while (r_p+1) <= image.shape[0]-1 :
while (c_p+1) <= image.shape[1]-1 :
x = np.sum(np.multiply(image[r_p : r_p+2 , c_p : c_p+2],fltr))
conv_list.append(x)
c_p += 1
r_p += 1
c_p = 0
return conv_list
img_matrix = np.array(train.iloc[6,1:]).reshape(28,28)
flt = np.matrix([[1,1],[0,0]])
conv = np.array(convolve(img_matrix,flt)).reshape(27,27)
plt.imshow(img_matrix, cmap='gray')
plt.show()
plt.imshow(conv, cmap='gray')
plt.show()
# In[33]:
with tf.device('GPU:0'):
model = keras.Sequential([
#keras.layers.Conv2D(filters=32 ,kernel_size=3, activation='relu',input_shape=(28,28,1)),
keras.layers.Flatten(input_shape=(28,28)),
#keras.layers.Dense(128, activation='relu'),
keras.layers.Dense(2560, activation='relu'),
keras.layers.Dense(2560, activation='relu'),
#keras.layers.Dense(2560, activation='relu'),
keras.layers.Dense(10, activation='softmax')
])
print(model.summary())
model.compile(optimizer='adam', loss='sparse_categorical_crossentropy', metrics=['accuracy'])
import time
tic = time.time()
from warnings import filterwarnings
filterwarnings
model.fit(train_X, train_y,batch_size=1024, epochs=3)
toc = time.time()
print('time : {:0.1f} sec '.format(toc-tic))
# In[72]:
#predictions
train_loss, train_accuracy = model.evaluate(train_X, train_y,verbose=False )
test_loss, test_accuracy = model.evaluate(test_X, test_y, verbose = False )
# In[73]:
print('trin_accuracy : {}'.format(train_accuracy))
print('test_accuracy : {}'.format(test_accuracy))
# In[74]:
predictions = model.predict(test_X)
# In[76]:
plt.imshow(test_X[26], cmap='binary')
plt.title(class_names[test_y[26]])
|
normal
|
{
"blob_id": "aea92827753e12d2dc95d63ddd0fe4eb8ced5d14",
"index": 3815,
"step-1": "<mask token>\n\n\ndef convolve(image, fltr):\n r_p = 0\n c_p = 0\n conv_list = []\n while r_p + 1 <= image.shape[0] - 1:\n while c_p + 1 <= image.shape[1] - 1:\n x = np.sum(np.multiply(image[r_p:r_p + 2, c_p:c_p + 2], fltr))\n conv_list.append(x)\n c_p += 1\n r_p += 1\n c_p = 0\n return conv_list\n\n\n<mask token>\n",
"step-2": "<mask token>\nprint('Num GPUs Available: ', len(tf.config.experimental.\n list_physical_devices('GPU')))\n<mask token>\nplt.imshow(train_X[7], cmap='binary')\n\n\ndef convolve(image, fltr):\n r_p = 0\n c_p = 0\n conv_list = []\n while r_p + 1 <= image.shape[0] - 1:\n while c_p + 1 <= image.shape[1] - 1:\n x = np.sum(np.multiply(image[r_p:r_p + 2, c_p:c_p + 2], fltr))\n conv_list.append(x)\n c_p += 1\n r_p += 1\n c_p = 0\n return conv_list\n\n\n<mask token>\nplt.imshow(img_matrix, cmap='gray')\nplt.show()\nplt.imshow(conv, cmap='gray')\nplt.show()\nwith tf.device('GPU:0'):\n model = keras.Sequential([keras.layers.Flatten(input_shape=(28, 28)),\n keras.layers.Dense(2560, activation='relu'), keras.layers.Dense(\n 2560, activation='relu'), keras.layers.Dense(10, activation='softmax')]\n )\n print(model.summary())\n model.compile(optimizer='adam', loss='sparse_categorical_crossentropy',\n metrics=['accuracy'])\n import time\n tic = time.time()\n from warnings import filterwarnings\n filterwarnings\n model.fit(train_X, train_y, batch_size=1024, epochs=3)\n toc = time.time()\n print('time : {:0.1f} sec '.format(toc - tic))\n<mask token>\nprint('trin_accuracy : {}'.format(train_accuracy))\nprint('test_accuracy : {}'.format(test_accuracy))\n<mask token>\nplt.imshow(test_X[26], cmap='binary')\nplt.title(class_names[test_y[26]])\n",
"step-3": "<mask token>\nprint('Num GPUs Available: ', len(tf.config.experimental.\n list_physical_devices('GPU')))\n<mask token>\ndata = keras.datasets.fashion_mnist\n(train_X, train_y), (test_X, test_y) = data.load_data()\nclass_names = ['t-shirt', 'trouser', 'pullover', 'dress', 'coat', 'sandal',\n 'shirt', 'sneaker', 'bag', 'ankle boot']\ntrain_X = train_X / 255\ntest_X = test_X / 255\nplt.imshow(train_X[7], cmap='binary')\n\n\ndef convolve(image, fltr):\n r_p = 0\n c_p = 0\n conv_list = []\n while r_p + 1 <= image.shape[0] - 1:\n while c_p + 1 <= image.shape[1] - 1:\n x = np.sum(np.multiply(image[r_p:r_p + 2, c_p:c_p + 2], fltr))\n conv_list.append(x)\n c_p += 1\n r_p += 1\n c_p = 0\n return conv_list\n\n\nimg_matrix = np.array(train.iloc[6, 1:]).reshape(28, 28)\nflt = np.matrix([[1, 1], [0, 0]])\nconv = np.array(convolve(img_matrix, flt)).reshape(27, 27)\nplt.imshow(img_matrix, cmap='gray')\nplt.show()\nplt.imshow(conv, cmap='gray')\nplt.show()\nwith tf.device('GPU:0'):\n model = keras.Sequential([keras.layers.Flatten(input_shape=(28, 28)),\n keras.layers.Dense(2560, activation='relu'), keras.layers.Dense(\n 2560, activation='relu'), keras.layers.Dense(10, activation='softmax')]\n )\n print(model.summary())\n model.compile(optimizer='adam', loss='sparse_categorical_crossentropy',\n metrics=['accuracy'])\n import time\n tic = time.time()\n from warnings import filterwarnings\n filterwarnings\n model.fit(train_X, train_y, batch_size=1024, epochs=3)\n toc = time.time()\n print('time : {:0.1f} sec '.format(toc - tic))\ntrain_loss, train_accuracy = model.evaluate(train_X, train_y, verbose=False)\ntest_loss, test_accuracy = model.evaluate(test_X, test_y, verbose=False)\nprint('trin_accuracy : {}'.format(train_accuracy))\nprint('test_accuracy : {}'.format(test_accuracy))\npredictions = model.predict(test_X)\nplt.imshow(test_X[26], cmap='binary')\nplt.title(class_names[test_y[26]])\n",
"step-4": "from __future__ import absolute_import, division, print_function, unicode_literals\nimport tensorflow as tf\nprint('Num GPUs Available: ', len(tf.config.experimental.\n list_physical_devices('GPU')))\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport tensorflow as tf\nfrom tensorflow import keras\ndata = keras.datasets.fashion_mnist\n(train_X, train_y), (test_X, test_y) = data.load_data()\nclass_names = ['t-shirt', 'trouser', 'pullover', 'dress', 'coat', 'sandal',\n 'shirt', 'sneaker', 'bag', 'ankle boot']\ntrain_X = train_X / 255\ntest_X = test_X / 255\nplt.imshow(train_X[7], cmap='binary')\n\n\ndef convolve(image, fltr):\n r_p = 0\n c_p = 0\n conv_list = []\n while r_p + 1 <= image.shape[0] - 1:\n while c_p + 1 <= image.shape[1] - 1:\n x = np.sum(np.multiply(image[r_p:r_p + 2, c_p:c_p + 2], fltr))\n conv_list.append(x)\n c_p += 1\n r_p += 1\n c_p = 0\n return conv_list\n\n\nimg_matrix = np.array(train.iloc[6, 1:]).reshape(28, 28)\nflt = np.matrix([[1, 1], [0, 0]])\nconv = np.array(convolve(img_matrix, flt)).reshape(27, 27)\nplt.imshow(img_matrix, cmap='gray')\nplt.show()\nplt.imshow(conv, cmap='gray')\nplt.show()\nwith tf.device('GPU:0'):\n model = keras.Sequential([keras.layers.Flatten(input_shape=(28, 28)),\n keras.layers.Dense(2560, activation='relu'), keras.layers.Dense(\n 2560, activation='relu'), keras.layers.Dense(10, activation='softmax')]\n )\n print(model.summary())\n model.compile(optimizer='adam', loss='sparse_categorical_crossentropy',\n metrics=['accuracy'])\n import time\n tic = time.time()\n from warnings import filterwarnings\n filterwarnings\n model.fit(train_X, train_y, batch_size=1024, epochs=3)\n toc = time.time()\n print('time : {:0.1f} sec '.format(toc - tic))\ntrain_loss, train_accuracy = model.evaluate(train_X, train_y, verbose=False)\ntest_loss, test_accuracy = model.evaluate(test_X, test_y, verbose=False)\nprint('trin_accuracy : {}'.format(train_accuracy))\nprint('test_accuracy : {}'.format(test_accuracy))\npredictions = model.predict(test_X)\nplt.imshow(test_X[26], cmap='binary')\nplt.title(class_names[test_y[26]])\n",
"step-5": "#!/usr/bin/env python\n# coding: utf-8\n\n# In[2]:\n\n\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\nimport tensorflow as tf\nprint(\"Num GPUs Available: \", len(tf.config.experimental.list_physical_devices('GPU')))\n\n\n# In[1]:\n\n\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport tensorflow as tf\n#tf.config.allow_growth = True\n#config.gpu_options.allow_growth = True\n#session = tf.Session(config=config....)\nfrom tensorflow import keras\n\n\n# In[5]:\n\n\ndata = keras.datasets.fashion_mnist\n\n(train_X, train_y), (test_X,test_y) = data.load_data()\n\nclass_names = ['t-shirt', 'trouser', 'pullover', 'dress'\n ,'coat', 'sandal', 'shirt', 'sneaker'\n , 'bag', 'ankle boot']\n\ntrain_X = train_X/255\ntest_X = test_X/255\n\n\n# In[7]:\n\n\nplt.imshow(train_X[7], cmap= 'binary')\n\n\n# In[ ]:\n\n\ndef convolve(image,fltr):\n r_p = 0\n c_p = 0\n conv_list = []\n while (r_p+1) <= image.shape[0]-1 :\n while (c_p+1) <= image.shape[1]-1 :\n x = np.sum(np.multiply(image[r_p : r_p+2 , c_p : c_p+2],fltr))\n conv_list.append(x)\n c_p += 1\n r_p += 1\n c_p = 0\n return conv_list\nimg_matrix = np.array(train.iloc[6,1:]).reshape(28,28)\nflt = np.matrix([[1,1],[0,0]])\nconv = np.array(convolve(img_matrix,flt)).reshape(27,27)\nplt.imshow(img_matrix, cmap='gray')\nplt.show()\nplt.imshow(conv, cmap='gray')\nplt.show()\n\n\n# In[33]:\n\n\nwith tf.device('GPU:0'):\n model = keras.Sequential([ \n #keras.layers.Conv2D(filters=32 ,kernel_size=3, activation='relu',input_shape=(28,28,1)),\n keras.layers.Flatten(input_shape=(28,28)),\n #keras.layers.Dense(128, activation='relu'),\n keras.layers.Dense(2560, activation='relu'),\n keras.layers.Dense(2560, activation='relu'),\n #keras.layers.Dense(2560, activation='relu'),\n keras.layers.Dense(10, activation='softmax')\n ])\n print(model.summary())\n model.compile(optimizer='adam', loss='sparse_categorical_crossentropy', metrics=['accuracy'])\n\n import time\n tic = time.time()\n from warnings import filterwarnings\n filterwarnings\n model.fit(train_X, train_y,batch_size=1024, epochs=3)\n toc = time.time()\n print('time : {:0.1f} sec '.format(toc-tic))\n\n\n# In[72]:\n\n\n#predictions\ntrain_loss, train_accuracy = model.evaluate(train_X, train_y,verbose=False )\ntest_loss, test_accuracy = model.evaluate(test_X, test_y, verbose = False )\n\n\n# In[73]:\n\n\nprint('trin_accuracy : {}'.format(train_accuracy))\nprint('test_accuracy : {}'.format(test_accuracy))\n\n\n# In[74]:\n\n\npredictions = model.predict(test_X)\n\n\n# In[76]:\n\n\nplt.imshow(test_X[26], cmap='binary')\nplt.title(class_names[test_y[26]])\n\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
import openpyxl # 适用于xlsx文件
'''
纯文本文件 student.txt为学生信息, 里面的内容(包括花括号)如下所示:
{
"1":["张三",150,120,100],
"2":["李四",90,99,95],
"3":["王五",60,66,68]
}
请将上述内容写到 student.xls 文件中
'''
def read_file():
words = []
with open('15.txt', 'r') as file:
content = file.read()
# print(content)
# print(type(content))
word = eval(content)
# print(word)
# print(word.keys())
# for each in word.keys():
# print(each)
# print(word[each])
# print(word.values())
# print(type(word))
for i, j in zip(word.keys(), word.values()):
# print(i, j)
words.append([i, j])
print(words)
return words
def write_list(list): # 写入excel文件
wb = openpyxl.Workbook()
sheet = wb.active
sheet.title = 'test'
value = list
for i in range(0, len(value)):
for j in range(0, len(value[i])):
sheet.cell(row=i + 1, column=j + 1, value=str(value[i][j]))
wb.save('city.xlsx')
print("写入数据成功!")
if __name__ == '__main__':
# read_file()
write_list(read_file())
|
normal
|
{
"blob_id": "f75e0ddf42cc9797cdf1c4a4477e3d16441af740",
"index": 5478,
"step-1": "<mask token>\n\n\ndef write_list(list):\n wb = openpyxl.Workbook()\n sheet = wb.active\n sheet.title = 'test'\n value = list\n for i in range(0, len(value)):\n for j in range(0, len(value[i])):\n sheet.cell(row=i + 1, column=j + 1, value=str(value[i][j]))\n wb.save('city.xlsx')\n print('写入数据成功!')\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef read_file():\n words = []\n with open('15.txt', 'r') as file:\n content = file.read()\n word = eval(content)\n for i, j in zip(word.keys(), word.values()):\n words.append([i, j])\n print(words)\n return words\n\n\ndef write_list(list):\n wb = openpyxl.Workbook()\n sheet = wb.active\n sheet.title = 'test'\n value = list\n for i in range(0, len(value)):\n for j in range(0, len(value[i])):\n sheet.cell(row=i + 1, column=j + 1, value=str(value[i][j]))\n wb.save('city.xlsx')\n print('写入数据成功!')\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef read_file():\n words = []\n with open('15.txt', 'r') as file:\n content = file.read()\n word = eval(content)\n for i, j in zip(word.keys(), word.values()):\n words.append([i, j])\n print(words)\n return words\n\n\ndef write_list(list):\n wb = openpyxl.Workbook()\n sheet = wb.active\n sheet.title = 'test'\n value = list\n for i in range(0, len(value)):\n for j in range(0, len(value[i])):\n sheet.cell(row=i + 1, column=j + 1, value=str(value[i][j]))\n wb.save('city.xlsx')\n print('写入数据成功!')\n\n\nif __name__ == '__main__':\n write_list(read_file())\n",
"step-4": "import openpyxl\n<mask token>\n\n\ndef read_file():\n words = []\n with open('15.txt', 'r') as file:\n content = file.read()\n word = eval(content)\n for i, j in zip(word.keys(), word.values()):\n words.append([i, j])\n print(words)\n return words\n\n\ndef write_list(list):\n wb = openpyxl.Workbook()\n sheet = wb.active\n sheet.title = 'test'\n value = list\n for i in range(0, len(value)):\n for j in range(0, len(value[i])):\n sheet.cell(row=i + 1, column=j + 1, value=str(value[i][j]))\n wb.save('city.xlsx')\n print('写入数据成功!')\n\n\nif __name__ == '__main__':\n write_list(read_file())\n",
"step-5": "import openpyxl # 适用于xlsx文件\n'''\n纯文本文件 student.txt为学生信息, 里面的内容(包括花括号)如下所示:\n\n{\n\t\"1\":[\"张三\",150,120,100],\n\t\"2\":[\"李四\",90,99,95],\n\t\"3\":[\"王五\",60,66,68]\n}\n请将上述内容写到 student.xls 文件中\n'''\n\n\ndef read_file():\n words = []\n with open('15.txt', 'r') as file:\n content = file.read()\n # print(content)\n # print(type(content))\n\n word = eval(content)\n # print(word)\n # print(word.keys())\n # for each in word.keys():\n # print(each)\n # print(word[each])\n # print(word.values())\n # print(type(word))\n for i, j in zip(word.keys(), word.values()):\n # print(i, j)\n words.append([i, j])\n print(words)\n return words\n\n\ndef write_list(list): # 写入excel文件\n wb = openpyxl.Workbook()\n sheet = wb.active\n sheet.title = 'test'\n value = list\n for i in range(0, len(value)):\n for j in range(0, len(value[i])):\n sheet.cell(row=i + 1, column=j + 1, value=str(value[i][j]))\n wb.save('city.xlsx')\n print(\"写入数据成功!\")\n\n\nif __name__ == '__main__':\n # read_file()\n write_list(read_file())\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
<|reserved_special_token_0|>
class Base_dfCleaner(LoggerMixIn, null_clean_methodMixIn, df_plotterMixIn):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def __init__(self, df: DF, df_Xs_keys, df_Ys_key, silent=False, verbose=0):
LoggerMixIn.__init__(self, verbose)
null_clean_methodMixIn.__init__(self)
df_plotterMixIn.__init__(self)
self.df = df
self.silent = silent
self.df_Xs_keys = df_Xs_keys
self.df_Ys_key = df_Ys_key
self.plot = PlotTools()
def __method_template(self, df: DF, col_key: str, col: DF, series:
Series, Xs_key: list, Ys_key: list):
return df
@property
def method_template(self):
method_template = inspect.getsource(self.__method_template)
method_template = method_template.replace('__method_template',
'{col_name}')
return method_template
<|reserved_special_token_0|>
def clean(self) ->DF:
for key, val in self.__class__.__dict__.items():
if key in self.df.keys():
col = self.df[[key]]
series = self.df[key]
self.df = val(self, self.df, key, col, series, self.
df_Xs_keys, self.df_Ys_key)
return self.df
<|reserved_special_token_0|>
def null_cols_plot(self):
df_only_null = self._df_null_include(self.df)
self._df_cols_plot(df_only_null, list(df_only_null.keys()), self.
df_Ys_key)
@staticmethod
def _df_null_include(df: DF) ->DF:
null_column = df.columns[df.isna().any()].tolist()
return df.loc[:, null_column]
def _str_null_col_info(self, df: DF, key) ->str:
ret = []
col = df[[key]]
series = df[key]
na_count = series.isna().sum()
total = len(col)
ret += [
f'column : "{key}", null ratio:{float(na_count) / float(total):.4f}%, {na_count}/{total}(null/total)'
]
ret += [col.describe()]
ret += ['value_counts']
ret += [series.value_counts()]
groupby = df[[key, self.df_Ys_key]].groupby(key).agg(['mean', 'std',
'min', 'max', 'count'])
ret += [groupby]
return '\n'.join(map(str, ret))
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class null_clean_methodMixIn:
@staticmethod
def drop_col(df: DF, key):
return df.drop(key, axis=1)
<|reserved_special_token_0|>
@staticmethod
def fill_random_value_cate(df: DF, key) ->DF:
values = df[key].value_counts().keys()
df[key] = df[key].transform(lambda x: x.fillna(np.random.choice(
values)))
return df
@staticmethod
def fill_rate_value_cate(df: DF, key) ->DF:
values, count = zip(*list(df[key].value_counts().items()))
p = np.array(count) / np.sum(count)
df[key] = df[key].transform(lambda x: x.fillna(np.random.choice(
values, p=p)))
return df
class Base_dfCleaner(LoggerMixIn, null_clean_methodMixIn, df_plotterMixIn):
import_code = """
import pandas as pd
import numpy as np
import random
from script.data_handler.Base_dfCleaner import Base_dfCleaner
DF = pd.DataFrame
Series = pd.Series
"""
class_template = '\nclass dfCleaner(Base_dfCleaner):\n'
def __init__(self, df: DF, df_Xs_keys, df_Ys_key, silent=False, verbose=0):
LoggerMixIn.__init__(self, verbose)
null_clean_methodMixIn.__init__(self)
df_plotterMixIn.__init__(self)
self.df = df
self.silent = silent
self.df_Xs_keys = df_Xs_keys
self.df_Ys_key = df_Ys_key
self.plot = PlotTools()
def __method_template(self, df: DF, col_key: str, col: DF, series:
Series, Xs_key: list, Ys_key: list):
return df
@property
def method_template(self):
method_template = inspect.getsource(self.__method_template)
method_template = method_template.replace('__method_template',
'{col_name}')
return method_template
def boilerplate_maker(self, path=None, encoding='UTF8'):
code = [self.import_code]
code += [self.class_template]
df_only_null = self._df_null_include(self.df)
for key in df_only_null.keys():
code += [self.method_template.format(col_name=key)]
code = '\n'.join(code)
if path is not None:
with open(path, mode='w', encoding=encoding) as f:
f.write(code)
return code
def clean(self) ->DF:
for key, val in self.__class__.__dict__.items():
if key in self.df.keys():
col = self.df[[key]]
series = self.df[key]
self.df = val(self, self.df, key, col, series, self.
df_Xs_keys, self.df_Ys_key)
return self.df
def null_cols_info(self) ->str:
ret = []
for key, val in list(self.__class__.__dict__.items()):
if key in self.df.keys():
info = self._str_null_col_info(self.df, key)
ret += [info]
return '\n\n'.join(ret)
def null_cols_plot(self):
df_only_null = self._df_null_include(self.df)
self._df_cols_plot(df_only_null, list(df_only_null.keys()), self.
df_Ys_key)
@staticmethod
def _df_null_include(df: DF) ->DF:
null_column = df.columns[df.isna().any()].tolist()
return df.loc[:, null_column]
def _str_null_col_info(self, df: DF, key) ->str:
ret = []
col = df[[key]]
series = df[key]
na_count = series.isna().sum()
total = len(col)
ret += [
f'column : "{key}", null ratio:{float(na_count) / float(total):.4f}%, {na_count}/{total}(null/total)'
]
ret += [col.describe()]
ret += ['value_counts']
ret += [series.value_counts()]
groupby = df[[key, self.df_Ys_key]].groupby(key).agg(['mean', 'std',
'min', 'max', 'count'])
ret += [groupby]
return '\n'.join(map(str, ret))
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class null_clean_methodMixIn:
@staticmethod
def drop_col(df: DF, key):
return df.drop(key, axis=1)
@staticmethod
def fill_major_value_cate(df: DF, key) ->DF:
major_value = df[key].astype(str).describe()['top']
df[key] = df[key].fillna(major_value)
return df
@staticmethod
def fill_random_value_cate(df: DF, key) ->DF:
values = df[key].value_counts().keys()
df[key] = df[key].transform(lambda x: x.fillna(np.random.choice(
values)))
return df
@staticmethod
def fill_rate_value_cate(df: DF, key) ->DF:
values, count = zip(*list(df[key].value_counts().items()))
p = np.array(count) / np.sum(count)
df[key] = df[key].transform(lambda x: x.fillna(np.random.choice(
values, p=p)))
return df
class Base_dfCleaner(LoggerMixIn, null_clean_methodMixIn, df_plotterMixIn):
import_code = """
import pandas as pd
import numpy as np
import random
from script.data_handler.Base_dfCleaner import Base_dfCleaner
DF = pd.DataFrame
Series = pd.Series
"""
class_template = '\nclass dfCleaner(Base_dfCleaner):\n'
def __init__(self, df: DF, df_Xs_keys, df_Ys_key, silent=False, verbose=0):
LoggerMixIn.__init__(self, verbose)
null_clean_methodMixIn.__init__(self)
df_plotterMixIn.__init__(self)
self.df = df
self.silent = silent
self.df_Xs_keys = df_Xs_keys
self.df_Ys_key = df_Ys_key
self.plot = PlotTools()
def __method_template(self, df: DF, col_key: str, col: DF, series:
Series, Xs_key: list, Ys_key: list):
return df
@property
def method_template(self):
method_template = inspect.getsource(self.__method_template)
method_template = method_template.replace('__method_template',
'{col_name}')
return method_template
def boilerplate_maker(self, path=None, encoding='UTF8'):
code = [self.import_code]
code += [self.class_template]
df_only_null = self._df_null_include(self.df)
for key in df_only_null.keys():
code += [self.method_template.format(col_name=key)]
code = '\n'.join(code)
if path is not None:
with open(path, mode='w', encoding=encoding) as f:
f.write(code)
return code
def clean(self) ->DF:
for key, val in self.__class__.__dict__.items():
if key in self.df.keys():
col = self.df[[key]]
series = self.df[key]
self.df = val(self, self.df, key, col, series, self.
df_Xs_keys, self.df_Ys_key)
return self.df
def null_cols_info(self) ->str:
ret = []
for key, val in list(self.__class__.__dict__.items()):
if key in self.df.keys():
info = self._str_null_col_info(self.df, key)
ret += [info]
return '\n\n'.join(ret)
def null_cols_plot(self):
df_only_null = self._df_null_include(self.df)
self._df_cols_plot(df_only_null, list(df_only_null.keys()), self.
df_Ys_key)
@staticmethod
def _df_null_include(df: DF) ->DF:
null_column = df.columns[df.isna().any()].tolist()
return df.loc[:, null_column]
def _str_null_col_info(self, df: DF, key) ->str:
ret = []
col = df[[key]]
series = df[key]
na_count = series.isna().sum()
total = len(col)
ret += [
f'column : "{key}", null ratio:{float(na_count) / float(total):.4f}%, {na_count}/{total}(null/total)'
]
ret += [col.describe()]
ret += ['value_counts']
ret += [series.value_counts()]
groupby = df[[key, self.df_Ys_key]].groupby(key).agg(['mean', 'std',
'min', 'max', 'count'])
ret += [groupby]
return '\n'.join(map(str, ret))
<|reserved_special_token_1|>
<|reserved_special_token_0|>
DF = pd.DataFrame
Series = pd.Series
class null_clean_methodMixIn:
@staticmethod
def drop_col(df: DF, key):
return df.drop(key, axis=1)
@staticmethod
def fill_major_value_cate(df: DF, key) ->DF:
major_value = df[key].astype(str).describe()['top']
df[key] = df[key].fillna(major_value)
return df
@staticmethod
def fill_random_value_cate(df: DF, key) ->DF:
values = df[key].value_counts().keys()
df[key] = df[key].transform(lambda x: x.fillna(np.random.choice(
values)))
return df
@staticmethod
def fill_rate_value_cate(df: DF, key) ->DF:
values, count = zip(*list(df[key].value_counts().items()))
p = np.array(count) / np.sum(count)
df[key] = df[key].transform(lambda x: x.fillna(np.random.choice(
values, p=p)))
return df
class Base_dfCleaner(LoggerMixIn, null_clean_methodMixIn, df_plotterMixIn):
import_code = """
import pandas as pd
import numpy as np
import random
from script.data_handler.Base_dfCleaner import Base_dfCleaner
DF = pd.DataFrame
Series = pd.Series
"""
class_template = '\nclass dfCleaner(Base_dfCleaner):\n'
def __init__(self, df: DF, df_Xs_keys, df_Ys_key, silent=False, verbose=0):
LoggerMixIn.__init__(self, verbose)
null_clean_methodMixIn.__init__(self)
df_plotterMixIn.__init__(self)
self.df = df
self.silent = silent
self.df_Xs_keys = df_Xs_keys
self.df_Ys_key = df_Ys_key
self.plot = PlotTools()
def __method_template(self, df: DF, col_key: str, col: DF, series:
Series, Xs_key: list, Ys_key: list):
return df
@property
def method_template(self):
method_template = inspect.getsource(self.__method_template)
method_template = method_template.replace('__method_template',
'{col_name}')
return method_template
def boilerplate_maker(self, path=None, encoding='UTF8'):
code = [self.import_code]
code += [self.class_template]
df_only_null = self._df_null_include(self.df)
for key in df_only_null.keys():
code += [self.method_template.format(col_name=key)]
code = '\n'.join(code)
if path is not None:
with open(path, mode='w', encoding=encoding) as f:
f.write(code)
return code
def clean(self) ->DF:
for key, val in self.__class__.__dict__.items():
if key in self.df.keys():
col = self.df[[key]]
series = self.df[key]
self.df = val(self, self.df, key, col, series, self.
df_Xs_keys, self.df_Ys_key)
return self.df
def null_cols_info(self) ->str:
ret = []
for key, val in list(self.__class__.__dict__.items()):
if key in self.df.keys():
info = self._str_null_col_info(self.df, key)
ret += [info]
return '\n\n'.join(ret)
def null_cols_plot(self):
df_only_null = self._df_null_include(self.df)
self._df_cols_plot(df_only_null, list(df_only_null.keys()), self.
df_Ys_key)
@staticmethod
def _df_null_include(df: DF) ->DF:
null_column = df.columns[df.isna().any()].tolist()
return df.loc[:, null_column]
def _str_null_col_info(self, df: DF, key) ->str:
ret = []
col = df[[key]]
series = df[key]
na_count = series.isna().sum()
total = len(col)
ret += [
f'column : "{key}", null ratio:{float(na_count) / float(total):.4f}%, {na_count}/{total}(null/total)'
]
ret += [col.describe()]
ret += ['value_counts']
ret += [series.value_counts()]
groupby = df[[key, self.df_Ys_key]].groupby(key).agg(['mean', 'std',
'min', 'max', 'count'])
ret += [groupby]
return '\n'.join(map(str, ret))
<|reserved_special_token_1|>
import pandas as pd
import numpy as np
import inspect
from script.data_handler.Base.df_plotterMixIn import df_plotterMixIn
from script.util.MixIn import LoggerMixIn
from script.util.PlotTools import PlotTools
DF = pd.DataFrame
Series = pd.Series
class null_clean_methodMixIn:
@staticmethod
def drop_col(df: DF, key):
return df.drop(key, axis=1)
@staticmethod
def fill_major_value_cate(df: DF, key) -> DF:
major_value = df[key].astype(str).describe()['top']
df[key] = df[key].fillna(major_value)
return df
@staticmethod
def fill_random_value_cate(df: DF, key) -> DF:
values = df[key].value_counts().keys()
df[key] = df[key].transform(lambda x: x.fillna(np.random.choice(values)))
# df[key] = df[key].fillna()
return df
@staticmethod
def fill_rate_value_cate(df: DF, key) -> DF:
values, count = zip(*list(df[key].value_counts().items()))
p = np.array(count) / np.sum(count)
df[key] = df[key].transform(lambda x: x.fillna(np.random.choice(values, p=p)))
return df
class Base_dfCleaner(LoggerMixIn, null_clean_methodMixIn, df_plotterMixIn):
import_code = """
import pandas as pd
import numpy as np
import random
from script.data_handler.Base_dfCleaner import Base_dfCleaner
DF = pd.DataFrame
Series = pd.Series
"""
class_template = """
class dfCleaner(Base_dfCleaner):
"""
def __init__(self, df: DF, df_Xs_keys, df_Ys_key, silent=False, verbose=0):
LoggerMixIn.__init__(self, verbose)
null_clean_methodMixIn.__init__(self)
df_plotterMixIn.__init__(self)
self.df = df
self.silent = silent
self.df_Xs_keys = df_Xs_keys
self.df_Ys_key = df_Ys_key
self.plot = PlotTools()
def __method_template(self, df: DF, col_key: str, col: DF, series: Series, Xs_key: list, Ys_key: list):
return df
@property
def method_template(self):
method_template = inspect.getsource(self.__method_template)
method_template = method_template.replace('__method_template', '{col_name}')
return method_template
def boilerplate_maker(self, path=None, encoding='UTF8'):
code = [self.import_code]
code += [self.class_template]
df_only_null = self._df_null_include(self.df)
for key in df_only_null.keys():
code += [self.method_template.format(col_name=key)]
code = "\n".join(code)
if path is not None:
with open(path, mode='w', encoding=encoding) as f:
f.write(code)
return code
def clean(self) -> DF:
for key, val in self.__class__.__dict__.items():
if key in self.df.keys():
col = self.df[[key]]
series = self.df[key]
self.df = val(self, self.df, key, col, series, self.df_Xs_keys, self.df_Ys_key)
return self.df
def null_cols_info(self) -> str:
ret = []
for key, val in list(self.__class__.__dict__.items()):
if key in self.df.keys():
info = self._str_null_col_info(self.df, key)
ret += [info]
return "\n\n".join(ret)
def null_cols_plot(self):
df_only_null = self._df_null_include(self.df)
self._df_cols_plot(df_only_null, list(df_only_null.keys()), self.df_Ys_key)
@staticmethod
def _df_null_include(df: DF) -> DF:
null_column = df.columns[df.isna().any()].tolist()
return df.loc[:, null_column]
def _str_null_col_info(self, df: DF, key) -> str:
ret = []
col = df[[key]]
series = df[key]
na_count = series.isna().sum()
total = len(col)
ret += [f'column : "{key}", null ratio:{float(na_count)/float(total):.4f}%, {na_count}/{total}(null/total)']
ret += [col.describe()]
ret += ['value_counts']
ret += [series.value_counts()]
groupby = df[[key, self.df_Ys_key]].groupby(key).agg(['mean', 'std', 'min', 'max', 'count'])
ret += [groupby]
return "\n".join(map(str, ret))
|
flexible
|
{
"blob_id": "198beb5a17575d781f7bce1ab36a6213ad7331b3",
"index": 5853,
"step-1": "<mask token>\n\n\nclass Base_dfCleaner(LoggerMixIn, null_clean_methodMixIn, df_plotterMixIn):\n <mask token>\n <mask token>\n\n def __init__(self, df: DF, df_Xs_keys, df_Ys_key, silent=False, verbose=0):\n LoggerMixIn.__init__(self, verbose)\n null_clean_methodMixIn.__init__(self)\n df_plotterMixIn.__init__(self)\n self.df = df\n self.silent = silent\n self.df_Xs_keys = df_Xs_keys\n self.df_Ys_key = df_Ys_key\n self.plot = PlotTools()\n\n def __method_template(self, df: DF, col_key: str, col: DF, series:\n Series, Xs_key: list, Ys_key: list):\n return df\n\n @property\n def method_template(self):\n method_template = inspect.getsource(self.__method_template)\n method_template = method_template.replace('__method_template',\n '{col_name}')\n return method_template\n <mask token>\n\n def clean(self) ->DF:\n for key, val in self.__class__.__dict__.items():\n if key in self.df.keys():\n col = self.df[[key]]\n series = self.df[key]\n self.df = val(self, self.df, key, col, series, self.\n df_Xs_keys, self.df_Ys_key)\n return self.df\n <mask token>\n\n def null_cols_plot(self):\n df_only_null = self._df_null_include(self.df)\n self._df_cols_plot(df_only_null, list(df_only_null.keys()), self.\n df_Ys_key)\n\n @staticmethod\n def _df_null_include(df: DF) ->DF:\n null_column = df.columns[df.isna().any()].tolist()\n return df.loc[:, null_column]\n\n def _str_null_col_info(self, df: DF, key) ->str:\n ret = []\n col = df[[key]]\n series = df[key]\n na_count = series.isna().sum()\n total = len(col)\n ret += [\n f'column : \"{key}\", null ratio:{float(na_count) / float(total):.4f}%, {na_count}/{total}(null/total)'\n ]\n ret += [col.describe()]\n ret += ['value_counts']\n ret += [series.value_counts()]\n groupby = df[[key, self.df_Ys_key]].groupby(key).agg(['mean', 'std',\n 'min', 'max', 'count'])\n ret += [groupby]\n return '\\n'.join(map(str, ret))\n",
"step-2": "<mask token>\n\n\nclass null_clean_methodMixIn:\n\n @staticmethod\n def drop_col(df: DF, key):\n return df.drop(key, axis=1)\n <mask token>\n\n @staticmethod\n def fill_random_value_cate(df: DF, key) ->DF:\n values = df[key].value_counts().keys()\n df[key] = df[key].transform(lambda x: x.fillna(np.random.choice(\n values)))\n return df\n\n @staticmethod\n def fill_rate_value_cate(df: DF, key) ->DF:\n values, count = zip(*list(df[key].value_counts().items()))\n p = np.array(count) / np.sum(count)\n df[key] = df[key].transform(lambda x: x.fillna(np.random.choice(\n values, p=p)))\n return df\n\n\nclass Base_dfCleaner(LoggerMixIn, null_clean_methodMixIn, df_plotterMixIn):\n import_code = \"\"\"\n import pandas as pd\n import numpy as np\n import random\n from script.data_handler.Base_dfCleaner import Base_dfCleaner \n\n DF = pd.DataFrame\n Series = pd.Series\n \n\"\"\"\n class_template = '\\nclass dfCleaner(Base_dfCleaner):\\n'\n\n def __init__(self, df: DF, df_Xs_keys, df_Ys_key, silent=False, verbose=0):\n LoggerMixIn.__init__(self, verbose)\n null_clean_methodMixIn.__init__(self)\n df_plotterMixIn.__init__(self)\n self.df = df\n self.silent = silent\n self.df_Xs_keys = df_Xs_keys\n self.df_Ys_key = df_Ys_key\n self.plot = PlotTools()\n\n def __method_template(self, df: DF, col_key: str, col: DF, series:\n Series, Xs_key: list, Ys_key: list):\n return df\n\n @property\n def method_template(self):\n method_template = inspect.getsource(self.__method_template)\n method_template = method_template.replace('__method_template',\n '{col_name}')\n return method_template\n\n def boilerplate_maker(self, path=None, encoding='UTF8'):\n code = [self.import_code]\n code += [self.class_template]\n df_only_null = self._df_null_include(self.df)\n for key in df_only_null.keys():\n code += [self.method_template.format(col_name=key)]\n code = '\\n'.join(code)\n if path is not None:\n with open(path, mode='w', encoding=encoding) as f:\n f.write(code)\n return code\n\n def clean(self) ->DF:\n for key, val in self.__class__.__dict__.items():\n if key in self.df.keys():\n col = self.df[[key]]\n series = self.df[key]\n self.df = val(self, self.df, key, col, series, self.\n df_Xs_keys, self.df_Ys_key)\n return self.df\n\n def null_cols_info(self) ->str:\n ret = []\n for key, val in list(self.__class__.__dict__.items()):\n if key in self.df.keys():\n info = self._str_null_col_info(self.df, key)\n ret += [info]\n return '\\n\\n'.join(ret)\n\n def null_cols_plot(self):\n df_only_null = self._df_null_include(self.df)\n self._df_cols_plot(df_only_null, list(df_only_null.keys()), self.\n df_Ys_key)\n\n @staticmethod\n def _df_null_include(df: DF) ->DF:\n null_column = df.columns[df.isna().any()].tolist()\n return df.loc[:, null_column]\n\n def _str_null_col_info(self, df: DF, key) ->str:\n ret = []\n col = df[[key]]\n series = df[key]\n na_count = series.isna().sum()\n total = len(col)\n ret += [\n f'column : \"{key}\", null ratio:{float(na_count) / float(total):.4f}%, {na_count}/{total}(null/total)'\n ]\n ret += [col.describe()]\n ret += ['value_counts']\n ret += [series.value_counts()]\n groupby = df[[key, self.df_Ys_key]].groupby(key).agg(['mean', 'std',\n 'min', 'max', 'count'])\n ret += [groupby]\n return '\\n'.join(map(str, ret))\n",
"step-3": "<mask token>\n\n\nclass null_clean_methodMixIn:\n\n @staticmethod\n def drop_col(df: DF, key):\n return df.drop(key, axis=1)\n\n @staticmethod\n def fill_major_value_cate(df: DF, key) ->DF:\n major_value = df[key].astype(str).describe()['top']\n df[key] = df[key].fillna(major_value)\n return df\n\n @staticmethod\n def fill_random_value_cate(df: DF, key) ->DF:\n values = df[key].value_counts().keys()\n df[key] = df[key].transform(lambda x: x.fillna(np.random.choice(\n values)))\n return df\n\n @staticmethod\n def fill_rate_value_cate(df: DF, key) ->DF:\n values, count = zip(*list(df[key].value_counts().items()))\n p = np.array(count) / np.sum(count)\n df[key] = df[key].transform(lambda x: x.fillna(np.random.choice(\n values, p=p)))\n return df\n\n\nclass Base_dfCleaner(LoggerMixIn, null_clean_methodMixIn, df_plotterMixIn):\n import_code = \"\"\"\n import pandas as pd\n import numpy as np\n import random\n from script.data_handler.Base_dfCleaner import Base_dfCleaner \n\n DF = pd.DataFrame\n Series = pd.Series\n \n\"\"\"\n class_template = '\\nclass dfCleaner(Base_dfCleaner):\\n'\n\n def __init__(self, df: DF, df_Xs_keys, df_Ys_key, silent=False, verbose=0):\n LoggerMixIn.__init__(self, verbose)\n null_clean_methodMixIn.__init__(self)\n df_plotterMixIn.__init__(self)\n self.df = df\n self.silent = silent\n self.df_Xs_keys = df_Xs_keys\n self.df_Ys_key = df_Ys_key\n self.plot = PlotTools()\n\n def __method_template(self, df: DF, col_key: str, col: DF, series:\n Series, Xs_key: list, Ys_key: list):\n return df\n\n @property\n def method_template(self):\n method_template = inspect.getsource(self.__method_template)\n method_template = method_template.replace('__method_template',\n '{col_name}')\n return method_template\n\n def boilerplate_maker(self, path=None, encoding='UTF8'):\n code = [self.import_code]\n code += [self.class_template]\n df_only_null = self._df_null_include(self.df)\n for key in df_only_null.keys():\n code += [self.method_template.format(col_name=key)]\n code = '\\n'.join(code)\n if path is not None:\n with open(path, mode='w', encoding=encoding) as f:\n f.write(code)\n return code\n\n def clean(self) ->DF:\n for key, val in self.__class__.__dict__.items():\n if key in self.df.keys():\n col = self.df[[key]]\n series = self.df[key]\n self.df = val(self, self.df, key, col, series, self.\n df_Xs_keys, self.df_Ys_key)\n return self.df\n\n def null_cols_info(self) ->str:\n ret = []\n for key, val in list(self.__class__.__dict__.items()):\n if key in self.df.keys():\n info = self._str_null_col_info(self.df, key)\n ret += [info]\n return '\\n\\n'.join(ret)\n\n def null_cols_plot(self):\n df_only_null = self._df_null_include(self.df)\n self._df_cols_plot(df_only_null, list(df_only_null.keys()), self.\n df_Ys_key)\n\n @staticmethod\n def _df_null_include(df: DF) ->DF:\n null_column = df.columns[df.isna().any()].tolist()\n return df.loc[:, null_column]\n\n def _str_null_col_info(self, df: DF, key) ->str:\n ret = []\n col = df[[key]]\n series = df[key]\n na_count = series.isna().sum()\n total = len(col)\n ret += [\n f'column : \"{key}\", null ratio:{float(na_count) / float(total):.4f}%, {na_count}/{total}(null/total)'\n ]\n ret += [col.describe()]\n ret += ['value_counts']\n ret += [series.value_counts()]\n groupby = df[[key, self.df_Ys_key]].groupby(key).agg(['mean', 'std',\n 'min', 'max', 'count'])\n ret += [groupby]\n return '\\n'.join(map(str, ret))\n",
"step-4": "<mask token>\nDF = pd.DataFrame\nSeries = pd.Series\n\n\nclass null_clean_methodMixIn:\n\n @staticmethod\n def drop_col(df: DF, key):\n return df.drop(key, axis=1)\n\n @staticmethod\n def fill_major_value_cate(df: DF, key) ->DF:\n major_value = df[key].astype(str).describe()['top']\n df[key] = df[key].fillna(major_value)\n return df\n\n @staticmethod\n def fill_random_value_cate(df: DF, key) ->DF:\n values = df[key].value_counts().keys()\n df[key] = df[key].transform(lambda x: x.fillna(np.random.choice(\n values)))\n return df\n\n @staticmethod\n def fill_rate_value_cate(df: DF, key) ->DF:\n values, count = zip(*list(df[key].value_counts().items()))\n p = np.array(count) / np.sum(count)\n df[key] = df[key].transform(lambda x: x.fillna(np.random.choice(\n values, p=p)))\n return df\n\n\nclass Base_dfCleaner(LoggerMixIn, null_clean_methodMixIn, df_plotterMixIn):\n import_code = \"\"\"\n import pandas as pd\n import numpy as np\n import random\n from script.data_handler.Base_dfCleaner import Base_dfCleaner \n\n DF = pd.DataFrame\n Series = pd.Series\n \n\"\"\"\n class_template = '\\nclass dfCleaner(Base_dfCleaner):\\n'\n\n def __init__(self, df: DF, df_Xs_keys, df_Ys_key, silent=False, verbose=0):\n LoggerMixIn.__init__(self, verbose)\n null_clean_methodMixIn.__init__(self)\n df_plotterMixIn.__init__(self)\n self.df = df\n self.silent = silent\n self.df_Xs_keys = df_Xs_keys\n self.df_Ys_key = df_Ys_key\n self.plot = PlotTools()\n\n def __method_template(self, df: DF, col_key: str, col: DF, series:\n Series, Xs_key: list, Ys_key: list):\n return df\n\n @property\n def method_template(self):\n method_template = inspect.getsource(self.__method_template)\n method_template = method_template.replace('__method_template',\n '{col_name}')\n return method_template\n\n def boilerplate_maker(self, path=None, encoding='UTF8'):\n code = [self.import_code]\n code += [self.class_template]\n df_only_null = self._df_null_include(self.df)\n for key in df_only_null.keys():\n code += [self.method_template.format(col_name=key)]\n code = '\\n'.join(code)\n if path is not None:\n with open(path, mode='w', encoding=encoding) as f:\n f.write(code)\n return code\n\n def clean(self) ->DF:\n for key, val in self.__class__.__dict__.items():\n if key in self.df.keys():\n col = self.df[[key]]\n series = self.df[key]\n self.df = val(self, self.df, key, col, series, self.\n df_Xs_keys, self.df_Ys_key)\n return self.df\n\n def null_cols_info(self) ->str:\n ret = []\n for key, val in list(self.__class__.__dict__.items()):\n if key in self.df.keys():\n info = self._str_null_col_info(self.df, key)\n ret += [info]\n return '\\n\\n'.join(ret)\n\n def null_cols_plot(self):\n df_only_null = self._df_null_include(self.df)\n self._df_cols_plot(df_only_null, list(df_only_null.keys()), self.\n df_Ys_key)\n\n @staticmethod\n def _df_null_include(df: DF) ->DF:\n null_column = df.columns[df.isna().any()].tolist()\n return df.loc[:, null_column]\n\n def _str_null_col_info(self, df: DF, key) ->str:\n ret = []\n col = df[[key]]\n series = df[key]\n na_count = series.isna().sum()\n total = len(col)\n ret += [\n f'column : \"{key}\", null ratio:{float(na_count) / float(total):.4f}%, {na_count}/{total}(null/total)'\n ]\n ret += [col.describe()]\n ret += ['value_counts']\n ret += [series.value_counts()]\n groupby = df[[key, self.df_Ys_key]].groupby(key).agg(['mean', 'std',\n 'min', 'max', 'count'])\n ret += [groupby]\n return '\\n'.join(map(str, ret))\n",
"step-5": "import pandas as pd\nimport numpy as np\nimport inspect\n\nfrom script.data_handler.Base.df_plotterMixIn import df_plotterMixIn\nfrom script.util.MixIn import LoggerMixIn\nfrom script.util.PlotTools import PlotTools\n\nDF = pd.DataFrame\nSeries = pd.Series\n\n\nclass null_clean_methodMixIn:\n @staticmethod\n def drop_col(df: DF, key):\n return df.drop(key, axis=1)\n\n @staticmethod\n def fill_major_value_cate(df: DF, key) -> DF:\n major_value = df[key].astype(str).describe()['top']\n df[key] = df[key].fillna(major_value)\n return df\n\n @staticmethod\n def fill_random_value_cate(df: DF, key) -> DF:\n values = df[key].value_counts().keys()\n df[key] = df[key].transform(lambda x: x.fillna(np.random.choice(values)))\n # df[key] = df[key].fillna()\n return df\n\n @staticmethod\n def fill_rate_value_cate(df: DF, key) -> DF:\n values, count = zip(*list(df[key].value_counts().items()))\n p = np.array(count) / np.sum(count)\n df[key] = df[key].transform(lambda x: x.fillna(np.random.choice(values, p=p)))\n return df\n\n\nclass Base_dfCleaner(LoggerMixIn, null_clean_methodMixIn, df_plotterMixIn):\n import_code = \"\"\"\n import pandas as pd\n import numpy as np\n import random\n from script.data_handler.Base_dfCleaner import Base_dfCleaner \n\n DF = pd.DataFrame\n Series = pd.Series\n \n\"\"\"\n\n class_template = \"\"\"\nclass dfCleaner(Base_dfCleaner):\n\"\"\"\n\n def __init__(self, df: DF, df_Xs_keys, df_Ys_key, silent=False, verbose=0):\n LoggerMixIn.__init__(self, verbose)\n null_clean_methodMixIn.__init__(self)\n df_plotterMixIn.__init__(self)\n\n self.df = df\n self.silent = silent\n self.df_Xs_keys = df_Xs_keys\n self.df_Ys_key = df_Ys_key\n self.plot = PlotTools()\n\n def __method_template(self, df: DF, col_key: str, col: DF, series: Series, Xs_key: list, Ys_key: list):\n return df\n\n @property\n def method_template(self):\n method_template = inspect.getsource(self.__method_template)\n method_template = method_template.replace('__method_template', '{col_name}')\n return method_template\n\n def boilerplate_maker(self, path=None, encoding='UTF8'):\n code = [self.import_code]\n code += [self.class_template]\n\n df_only_null = self._df_null_include(self.df)\n for key in df_only_null.keys():\n code += [self.method_template.format(col_name=key)]\n\n code = \"\\n\".join(code)\n if path is not None:\n with open(path, mode='w', encoding=encoding) as f:\n f.write(code)\n\n return code\n\n def clean(self) -> DF:\n for key, val in self.__class__.__dict__.items():\n if key in self.df.keys():\n col = self.df[[key]]\n series = self.df[key]\n\n self.df = val(self, self.df, key, col, series, self.df_Xs_keys, self.df_Ys_key)\n\n return self.df\n\n def null_cols_info(self) -> str:\n ret = []\n for key, val in list(self.__class__.__dict__.items()):\n if key in self.df.keys():\n info = self._str_null_col_info(self.df, key)\n ret += [info]\n\n return \"\\n\\n\".join(ret)\n\n def null_cols_plot(self):\n df_only_null = self._df_null_include(self.df)\n self._df_cols_plot(df_only_null, list(df_only_null.keys()), self.df_Ys_key)\n\n @staticmethod\n def _df_null_include(df: DF) -> DF:\n null_column = df.columns[df.isna().any()].tolist()\n return df.loc[:, null_column]\n\n def _str_null_col_info(self, df: DF, key) -> str:\n ret = []\n col = df[[key]]\n series = df[key]\n\n na_count = series.isna().sum()\n total = len(col)\n ret += [f'column : \"{key}\", null ratio:{float(na_count)/float(total):.4f}%, {na_count}/{total}(null/total)']\n ret += [col.describe()]\n ret += ['value_counts']\n ret += [series.value_counts()]\n groupby = df[[key, self.df_Ys_key]].groupby(key).agg(['mean', 'std', 'min', 'max', 'count'])\n ret += [groupby]\n\n return \"\\n\".join(map(str, ret))\n",
"step-ids": [
8,
15,
16,
17,
19
]
}
|
[
8,
15,
16,
17,
19
] |
import pandas as pd
triples = pd.read_csv("SollTripel.csv", sep=",", skip_blank_lines=True, skipinitialspace=True)
triples.columns = ["triple", "found"]
triples = triples["#" not in triples.triple]
print(triples)
|
normal
|
{
"blob_id": "97afa67cbe20900e2388994481abebe772e22818",
"index": 5301,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint(triples)\n",
"step-3": "<mask token>\ntriples = pd.read_csv('SollTripel.csv', sep=',', skip_blank_lines=True,\n skipinitialspace=True)\ntriples.columns = ['triple', 'found']\ntriples = triples['#' not in triples.triple]\nprint(triples)\n",
"step-4": "import pandas as pd\ntriples = pd.read_csv('SollTripel.csv', sep=',', skip_blank_lines=True,\n skipinitialspace=True)\ntriples.columns = ['triple', 'found']\ntriples = triples['#' not in triples.triple]\nprint(triples)\n",
"step-5": "import pandas as pd\n\ntriples = pd.read_csv(\"SollTripel.csv\", sep=\",\", skip_blank_lines=True, skipinitialspace=True)\ntriples.columns = [\"triple\", \"found\"]\ntriples = triples[\"#\" not in triples.triple]\n\nprint(triples)",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
from __future__ import unicode_literals
import frappe, json
def execute():
for ps in frappe.get_all('Property Setter', filters={'property': '_idx'},
fields = ['doc_type', 'value']):
custom_fields = frappe.get_all('Custom Field',
filters = {'dt': ps.doc_type}, fields=['name', 'fieldname'])
if custom_fields:
_idx = json.loads(ps.value)
for custom_field in custom_fields:
if custom_field.fieldname in _idx:
custom_field_idx = _idx.index(custom_field.fieldname)
if custom_field_idx == 0:
prev_fieldname = ""
else:
prev_fieldname = _idx[custom_field_idx - 1]
else:
prev_fieldname = _idx[-1]
custom_field_idx = len(_idx)
frappe.db.set_value('Custom Field', custom_field.name, 'insert_after', prev_fieldname)
frappe.db.set_value('Custom Field', custom_field.name, 'idx', custom_field_idx)
|
normal
|
{
"blob_id": "6f951815d0edafb08e7734d0e95e6564ab1be1f7",
"index": 2375,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef execute():\n for ps in frappe.get_all('Property Setter', filters={'property': '_idx'\n }, fields=['doc_type', 'value']):\n custom_fields = frappe.get_all('Custom Field', filters={'dt': ps.\n doc_type}, fields=['name', 'fieldname'])\n if custom_fields:\n _idx = json.loads(ps.value)\n for custom_field in custom_fields:\n if custom_field.fieldname in _idx:\n custom_field_idx = _idx.index(custom_field.fieldname)\n if custom_field_idx == 0:\n prev_fieldname = ''\n else:\n prev_fieldname = _idx[custom_field_idx - 1]\n else:\n prev_fieldname = _idx[-1]\n custom_field_idx = len(_idx)\n frappe.db.set_value('Custom Field', custom_field.name,\n 'insert_after', prev_fieldname)\n frappe.db.set_value('Custom Field', custom_field.name,\n 'idx', custom_field_idx)\n",
"step-3": "from __future__ import unicode_literals\nimport frappe, json\n\n\ndef execute():\n for ps in frappe.get_all('Property Setter', filters={'property': '_idx'\n }, fields=['doc_type', 'value']):\n custom_fields = frappe.get_all('Custom Field', filters={'dt': ps.\n doc_type}, fields=['name', 'fieldname'])\n if custom_fields:\n _idx = json.loads(ps.value)\n for custom_field in custom_fields:\n if custom_field.fieldname in _idx:\n custom_field_idx = _idx.index(custom_field.fieldname)\n if custom_field_idx == 0:\n prev_fieldname = ''\n else:\n prev_fieldname = _idx[custom_field_idx - 1]\n else:\n prev_fieldname = _idx[-1]\n custom_field_idx = len(_idx)\n frappe.db.set_value('Custom Field', custom_field.name,\n 'insert_after', prev_fieldname)\n frappe.db.set_value('Custom Field', custom_field.name,\n 'idx', custom_field_idx)\n",
"step-4": "from __future__ import unicode_literals\nimport frappe, json\n\ndef execute():\n\tfor ps in frappe.get_all('Property Setter', filters={'property': '_idx'},\n\t\tfields = ['doc_type', 'value']):\n\t\tcustom_fields = frappe.get_all('Custom Field',\n\t\t\tfilters = {'dt': ps.doc_type}, fields=['name', 'fieldname'])\n\n\t\tif custom_fields:\n\t\t\t_idx = json.loads(ps.value)\n\n\t\t\tfor custom_field in custom_fields:\n\t\t\t\tif custom_field.fieldname in _idx:\n\t\t\t\t\tcustom_field_idx = _idx.index(custom_field.fieldname)\n\t\t\t\t\tif custom_field_idx == 0:\n\t\t\t\t\t\tprev_fieldname = \"\"\n\n\t\t\t\t\telse:\n\t\t\t\t\t\tprev_fieldname = _idx[custom_field_idx - 1]\n\n\t\t\t\telse:\n\t\t\t\t\tprev_fieldname = _idx[-1]\n\t\t\t\t\tcustom_field_idx = len(_idx)\n\n\t\t\t\tfrappe.db.set_value('Custom Field', custom_field.name, 'insert_after', prev_fieldname)\n\t\t\t\tfrappe.db.set_value('Custom Field', custom_field.name, 'idx', custom_field_idx)\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
class SolutionBest(object):
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class SolutionBest(object):
def nextGreatestLetter(self, letters, target):
"""
:type letters: List[str]
:type target: str
:rtype: str
"""
for i in letters:
if i > target:
return i
return letters[0]
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Solution(object):
<|reserved_special_token_0|>
class SolutionBest(object):
def nextGreatestLetter(self, letters, target):
"""
:type letters: List[str]
:type target: str
:rtype: str
"""
for i in letters:
if i > target:
return i
return letters[0]
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Solution(object):
def nextGreatestLetter(self, letters, target):
"""
:type letters: List[str]
:type target: str
:rtype: str
"""
list_a = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k',
'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x',
'y', 'z']
index_target = list_a.index(target)
for i in range(index_target + 1, len(list_a)):
if list_a[i] in letters:
return list_a[i]
return letters[0]
class SolutionBest(object):
def nextGreatestLetter(self, letters, target):
"""
:type letters: List[str]
:type target: str
:rtype: str
"""
for i in letters:
if i > target:
return i
return letters[0]
<|reserved_special_token_1|>
'''给定一个只包含小写字母的有序数组letters 和一个目标字母 target,寻找有序数组里面比目标字母大的最小字母。
数组里字母的顺序是循环的。举个例子,如果目标字母target = 'z' 并且有序数组为 letters = ['a', 'b'],则答案返回 'a'。输入:
示例:
letters = ["c", "f", "j"]
target = "a"
输出: "c"
'''
class Solution(object):
def nextGreatestLetter(self, letters, target):
"""
:type letters: List[str]
:type target: str
:rtype: str
"""
list_a = ['a','b','c','d','e','f','g','h','i','j','k','l','m','n','o','p','q','r','s','t','u','v','w','x','y','z']
index_target = list_a.index(target)
for i in range(index_target + 1,len(list_a)):
if list_a[i] in letters:
return list_a[i]
return letters[0] #以上查询没找到以后,输出列表第一项
class SolutionBest(object):
def nextGreatestLetter(self, letters, target):
"""
:type letters: List[str]
:type target: str
:rtype: str
"""
for i in letters: #题目都说了,有序数组,直接迭代就好
if i > target:#惊不惊喜,字母之间在python是可以直接“比较大小”的
return i
return letters[0]
|
flexible
|
{
"blob_id": "9cb3d8bc7af0061047136d57abfe68cbb5ae0cd7",
"index": 3344,
"step-1": "<mask token>\n\n\nclass SolutionBest(object):\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass SolutionBest(object):\n\n def nextGreatestLetter(self, letters, target):\n \"\"\"\n :type letters: List[str]\n :type target: str\n :rtype: str\n \"\"\"\n for i in letters:\n if i > target:\n return i\n return letters[0]\n",
"step-3": "<mask token>\n\n\nclass Solution(object):\n <mask token>\n\n\nclass SolutionBest(object):\n\n def nextGreatestLetter(self, letters, target):\n \"\"\"\n :type letters: List[str]\n :type target: str\n :rtype: str\n \"\"\"\n for i in letters:\n if i > target:\n return i\n return letters[0]\n",
"step-4": "<mask token>\n\n\nclass Solution(object):\n\n def nextGreatestLetter(self, letters, target):\n \"\"\"\n :type letters: List[str]\n :type target: str\n :rtype: str\n \"\"\"\n list_a = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k',\n 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x',\n 'y', 'z']\n index_target = list_a.index(target)\n for i in range(index_target + 1, len(list_a)):\n if list_a[i] in letters:\n return list_a[i]\n return letters[0]\n\n\nclass SolutionBest(object):\n\n def nextGreatestLetter(self, letters, target):\n \"\"\"\n :type letters: List[str]\n :type target: str\n :rtype: str\n \"\"\"\n for i in letters:\n if i > target:\n return i\n return letters[0]\n",
"step-5": "'''给定一个只包含小写字母的有序数组letters 和一个目标字母 target,寻找有序数组里面比目标字母大的最小字母。\n\n数组里字母的顺序是循环的。举个例子,如果目标字母target = 'z' 并且有序数组为 letters = ['a', 'b'],则答案返回 'a'。输入:\n\n示例:\nletters = [\"c\", \"f\", \"j\"]\ntarget = \"a\"\n输出: \"c\"\n'''\nclass Solution(object):\n def nextGreatestLetter(self, letters, target):\n \"\"\"\n :type letters: List[str]\n :type target: str\n :rtype: str\n \"\"\"\n list_a = ['a','b','c','d','e','f','g','h','i','j','k','l','m','n','o','p','q','r','s','t','u','v','w','x','y','z']\n index_target = list_a.index(target)\n for i in range(index_target + 1,len(list_a)):\n if list_a[i] in letters:\n return list_a[i]\n return letters[0] #以上查询没找到以后,输出列表第一项\n\nclass SolutionBest(object):\n def nextGreatestLetter(self, letters, target):\n \"\"\"\n :type letters: List[str]\n :type target: str\n :rtype: str\n \"\"\"\n for i in letters: #题目都说了,有序数组,直接迭代就好\n if i > target:#惊不惊喜,字母之间在python是可以直接“比较大小”的\n return i\n return letters[0]",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
<|reserved_special_token_0|>
def user_defined_shoot():
global variable_flag
global variable_i
global list_angle_list
variable_i = 1
for count in range(3):
gimbal_ctrl.angle_ctrl(list_angle_list[1], list_angle_list[2])
gun_ctrl.fire_once()
variable_i = variable_i + 2
time.sleep(0.2)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def user_defined_shoot():
global variable_flag
global variable_i
global list_angle_list
variable_i = 1
for count in range(3):
gimbal_ctrl.angle_ctrl(list_angle_list[1], list_angle_list[2])
gun_ctrl.fire_once()
variable_i = variable_i + 2
time.sleep(0.2)
def user_defined_storage_angle():
global variable_flag
global variable_i
global list_angle_list
led_ctrl.gun_led_on()
list_angle_list.append(gimbal_ctrl.get_axis_angle(rm_define.
gimbal_axis_yaw))
list_angle_list.append(gimbal_ctrl.get_axis_angle(rm_define.
gimbal_axis_pitch))
time.sleep(5)
led_ctrl.gun_led_off()
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def user_defined_shoot():
global variable_flag
global variable_i
global list_angle_list
variable_i = 1
for count in range(3):
gimbal_ctrl.angle_ctrl(list_angle_list[1], list_angle_list[2])
gun_ctrl.fire_once()
variable_i = variable_i + 2
time.sleep(0.2)
def user_defined_storage_angle():
global variable_flag
global variable_i
global list_angle_list
led_ctrl.gun_led_on()
list_angle_list.append(gimbal_ctrl.get_axis_angle(rm_define.
gimbal_axis_yaw))
list_angle_list.append(gimbal_ctrl.get_axis_angle(rm_define.
gimbal_axis_pitch))
time.sleep(5)
led_ctrl.gun_led_off()
def start():
global variable_flag
global variable_i
global list_angle_list
robot_ctrl.set_mode(rm_define.robot_mode_free)
gimbal_ctrl.set_rotate_speed(180)
vision_ctrl.enable_detection(rm_define.vision_detection_marker)
vision_ctrl.detect_marker_and_aim(rm_define.marker_trans_red_heart)
time.sleep(5)
user_defined_storage_angle()
vision_ctrl.detect_marker_and_aim(rm_define.marker_number_three)
time.sleep(3)
user_defined_storage_angle()
user_defined_shoot()
<|reserved_special_token_1|>
list_angle_list = RmList()
variable_flag = 0
variable_i = 0
def user_defined_shoot():
global variable_flag
global variable_i
global list_angle_list
variable_i = 1
for count in range(3):
gimbal_ctrl.angle_ctrl(list_angle_list[1], list_angle_list[2])
gun_ctrl.fire_once()
variable_i = variable_i + 2
time.sleep(0.2)
def user_defined_storage_angle():
global variable_flag
global variable_i
global list_angle_list
led_ctrl.gun_led_on()
list_angle_list.append(gimbal_ctrl.get_axis_angle(rm_define.
gimbal_axis_yaw))
list_angle_list.append(gimbal_ctrl.get_axis_angle(rm_define.
gimbal_axis_pitch))
time.sleep(5)
led_ctrl.gun_led_off()
def start():
global variable_flag
global variable_i
global list_angle_list
robot_ctrl.set_mode(rm_define.robot_mode_free)
gimbal_ctrl.set_rotate_speed(180)
vision_ctrl.enable_detection(rm_define.vision_detection_marker)
vision_ctrl.detect_marker_and_aim(rm_define.marker_trans_red_heart)
time.sleep(5)
user_defined_storage_angle()
vision_ctrl.detect_marker_and_aim(rm_define.marker_number_three)
time.sleep(3)
user_defined_storage_angle()
user_defined_shoot()
|
flexible
|
{
"blob_id": "012e4112970a07559f27fa2127cdffcc557a1566",
"index": 4638,
"step-1": "<mask token>\n\n\ndef user_defined_shoot():\n global variable_flag\n global variable_i\n global list_angle_list\n variable_i = 1\n for count in range(3):\n gimbal_ctrl.angle_ctrl(list_angle_list[1], list_angle_list[2])\n gun_ctrl.fire_once()\n variable_i = variable_i + 2\n time.sleep(0.2)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef user_defined_shoot():\n global variable_flag\n global variable_i\n global list_angle_list\n variable_i = 1\n for count in range(3):\n gimbal_ctrl.angle_ctrl(list_angle_list[1], list_angle_list[2])\n gun_ctrl.fire_once()\n variable_i = variable_i + 2\n time.sleep(0.2)\n\n\ndef user_defined_storage_angle():\n global variable_flag\n global variable_i\n global list_angle_list\n led_ctrl.gun_led_on()\n list_angle_list.append(gimbal_ctrl.get_axis_angle(rm_define.\n gimbal_axis_yaw))\n list_angle_list.append(gimbal_ctrl.get_axis_angle(rm_define.\n gimbal_axis_pitch))\n time.sleep(5)\n led_ctrl.gun_led_off()\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef user_defined_shoot():\n global variable_flag\n global variable_i\n global list_angle_list\n variable_i = 1\n for count in range(3):\n gimbal_ctrl.angle_ctrl(list_angle_list[1], list_angle_list[2])\n gun_ctrl.fire_once()\n variable_i = variable_i + 2\n time.sleep(0.2)\n\n\ndef user_defined_storage_angle():\n global variable_flag\n global variable_i\n global list_angle_list\n led_ctrl.gun_led_on()\n list_angle_list.append(gimbal_ctrl.get_axis_angle(rm_define.\n gimbal_axis_yaw))\n list_angle_list.append(gimbal_ctrl.get_axis_angle(rm_define.\n gimbal_axis_pitch))\n time.sleep(5)\n led_ctrl.gun_led_off()\n\n\ndef start():\n global variable_flag\n global variable_i\n global list_angle_list\n robot_ctrl.set_mode(rm_define.robot_mode_free)\n gimbal_ctrl.set_rotate_speed(180)\n vision_ctrl.enable_detection(rm_define.vision_detection_marker)\n vision_ctrl.detect_marker_and_aim(rm_define.marker_trans_red_heart)\n time.sleep(5)\n user_defined_storage_angle()\n vision_ctrl.detect_marker_and_aim(rm_define.marker_number_three)\n time.sleep(3)\n user_defined_storage_angle()\n user_defined_shoot()\n",
"step-4": "list_angle_list = RmList()\nvariable_flag = 0\nvariable_i = 0\n\n\ndef user_defined_shoot():\n global variable_flag\n global variable_i\n global list_angle_list\n variable_i = 1\n for count in range(3):\n gimbal_ctrl.angle_ctrl(list_angle_list[1], list_angle_list[2])\n gun_ctrl.fire_once()\n variable_i = variable_i + 2\n time.sleep(0.2)\n\n\ndef user_defined_storage_angle():\n global variable_flag\n global variable_i\n global list_angle_list\n led_ctrl.gun_led_on()\n list_angle_list.append(gimbal_ctrl.get_axis_angle(rm_define.\n gimbal_axis_yaw))\n list_angle_list.append(gimbal_ctrl.get_axis_angle(rm_define.\n gimbal_axis_pitch))\n time.sleep(5)\n led_ctrl.gun_led_off()\n\n\ndef start():\n global variable_flag\n global variable_i\n global list_angle_list\n robot_ctrl.set_mode(rm_define.robot_mode_free)\n gimbal_ctrl.set_rotate_speed(180)\n vision_ctrl.enable_detection(rm_define.vision_detection_marker)\n vision_ctrl.detect_marker_and_aim(rm_define.marker_trans_red_heart)\n time.sleep(5)\n user_defined_storage_angle()\n vision_ctrl.detect_marker_and_aim(rm_define.marker_number_three)\n time.sleep(3)\n user_defined_storage_angle()\n user_defined_shoot()\n",
"step-5": null,
"step-ids": [
1,
2,
3,
4
]
}
|
[
1,
2,
3,
4
] |
<|reserved_special_token_0|>
class Agent:
def __init__(self):
self.model = torch.load(__file__[:-8] + '/agent.pkl')
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Agent:
def __init__(self):
self.model = torch.load(__file__[:-8] + '/agent.pkl')
def act(self, state):
state = torch.tensor(state)
with torch.no_grad():
return self.model(state.unsqueeze(0)).max(1)[1].item()
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Agent:
def __init__(self):
self.model = torch.load(__file__[:-8] + '/agent.pkl')
def act(self, state):
state = torch.tensor(state)
with torch.no_grad():
return self.model(state.unsqueeze(0)).max(1)[1].item()
def reset(self):
pass
<|reserved_special_token_1|>
import random
import numpy as np
import os
import torch
class Agent:
def __init__(self):
self.model = torch.load(__file__[:-8] + '/agent.pkl')
def act(self, state):
state = torch.tensor(state)
with torch.no_grad():
return self.model(state.unsqueeze(0)).max(1)[1].item()
def reset(self):
pass
<|reserved_special_token_1|>
import random
import numpy as np
import os
import torch
class Agent:
def __init__(self):
self.model = torch.load(__file__[:-8] + "/agent.pkl")
def act(self, state):
state = torch.tensor(state)
with torch.no_grad():
return self.model(state.unsqueeze(0)).max(1)[1].item()
def reset(self):
pass
|
flexible
|
{
"blob_id": "50a4084dd3028acc2e6788e77794c100efcb3fac",
"index": 132,
"step-1": "<mask token>\n\n\nclass Agent:\n\n def __init__(self):\n self.model = torch.load(__file__[:-8] + '/agent.pkl')\n <mask token>\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass Agent:\n\n def __init__(self):\n self.model = torch.load(__file__[:-8] + '/agent.pkl')\n\n def act(self, state):\n state = torch.tensor(state)\n with torch.no_grad():\n return self.model(state.unsqueeze(0)).max(1)[1].item()\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Agent:\n\n def __init__(self):\n self.model = torch.load(__file__[:-8] + '/agent.pkl')\n\n def act(self, state):\n state = torch.tensor(state)\n with torch.no_grad():\n return self.model(state.unsqueeze(0)).max(1)[1].item()\n\n def reset(self):\n pass\n",
"step-4": "import random\nimport numpy as np\nimport os\nimport torch\n\n\nclass Agent:\n\n def __init__(self):\n self.model = torch.load(__file__[:-8] + '/agent.pkl')\n\n def act(self, state):\n state = torch.tensor(state)\n with torch.no_grad():\n return self.model(state.unsqueeze(0)).max(1)[1].item()\n\n def reset(self):\n pass\n",
"step-5": "import random\nimport numpy as np\nimport os\nimport torch\n\n\nclass Agent:\n def __init__(self):\n self.model = torch.load(__file__[:-8] + \"/agent.pkl\")\n \n def act(self, state):\n state = torch.tensor(state)\n with torch.no_grad():\n return self.model(state.unsqueeze(0)).max(1)[1].item()\n\n def reset(self):\n pass\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
#!/bin/env python
from boincvm_common.stomp.StompProtocol import StompProtocolFactory
from stomp.HostStompEngine import HostStompEngine
from boincvm_host.xmlrpc.HostXMLRPCService import HostXMLRPCService
from twisted.internet import reactor
from ConfigParser import SafeConfigParser
import coilmq.start
import logging
import multiprocessing
import time
import pdb
logging.basicConfig(level=logging.DEBUG, \
format='%(asctime)s - %(name)s - %(levelname)s: %(message)s', )
logger = logging.getLogger(__name__)
def startSTOMPBroker(config, serverUpEvent, tries=-1, delay=1, backoff=1.5):
"""
@param tries number of times to retry starting the broker. < 0 means infinitely many.
@param delay number of seconds to wait after the first failed attempt
@param backoff factor by which the delay will be incremented after a failure.
"""
#stomp broker
mtries = tries
mdelay = delay
coilserver = None
from coilmq.config import config as coilconfig
if config.has_section('coilmq'):
for k,v in config.items('coilmq'):
coilconfig.set('coilmq', k, v)
logger.debug("Set %s to %s for coilmq config." % (k,v))
while True:
try:
coilserver = coilmq.start.server_from_config(coilconfig)
logger.info("Stomp server listening on %s:%s" % \
coilserver.server_address)
serverUpEvent.set()
coilserver.serve_forever()
except IOError as ex:
logger.error("Exception while starting coilmq broker: '%s'", ex)
if mtries != 0:
logger.debug("Retrying coilmq startup in %.1f seconds...", mdelay)
time.sleep(mdelay)
mdelay *= backoff
mtries -= 1
else:
logger.debug("Ran out of trials (tried %d times) for coilmq startup. Giving up.", tries)
break
finally:
if coilserver: coilserver.server_close()
def start(config, brokerTimeout = 60.0):
"""
Start twisted event loop and the fun should begin...
@param brokerTimeout how long to wait for a broker
@return a negative number upon failure. Otherwise, it never returns.
"""
manager = multiprocessing.Manager()
serverUpEvent = manager.Event()
broker = multiprocessing.Process(target=startSTOMPBroker, args=(config,serverUpEvent))
broker.daemon = True
broker.name = 'STOMP-Broker'
broker.start()
serverUpEvent.wait(brokerTimeout)
if not serverUpEvent.is_set():
logger.fatal("Broker not available after %.1f seconds. Giving up", brokerTimeout)
return -1
#host side logic
host = config.get('Broker', 'host')
port = int(config.get('Broker', 'port'))
username = config.get('Broker', 'username')
password = config.get('Broker', 'password')
hostEngine = HostStompEngine(config)
stompProtocolFactory = StompProtocolFactory(hostEngine, username, password)
HostXMLRPCService(config).makeEngineAccesible(hostEngine)
reactor.connectTCP(host, port, stompProtocolFactory)
reactor.run()
if __name__ == '__main__':
from sys import argv, exit
if len(argv) < 2:
print "Usage: %s <config-file>" % argv[0]
exit(-1)
else:
configFile = argv[1]
config = SafeConfigParser()
config.read(configFile)
exit(start(config))
|
normal
|
{
"blob_id": "e533b7aadd1cd7137301af8862dd2987622e499e",
"index": 3357,
"step-1": "#!/bin/env python\n\nfrom boincvm_common.stomp.StompProtocol import StompProtocolFactory\nfrom stomp.HostStompEngine import HostStompEngine\n\nfrom boincvm_host.xmlrpc.HostXMLRPCService import HostXMLRPCService\n\nfrom twisted.internet import reactor\nfrom ConfigParser import SafeConfigParser\n\nimport coilmq.start\n\nimport logging\nimport multiprocessing\nimport time \nimport pdb\n\nlogging.basicConfig(level=logging.DEBUG, \\\n format='%(asctime)s - %(name)s - %(levelname)s: %(message)s', )\n\nlogger = logging.getLogger(__name__)\n\ndef startSTOMPBroker(config, serverUpEvent, tries=-1, delay=1, backoff=1.5):\n \"\"\"\n\n @param tries number of times to retry starting the broker. < 0 means infinitely many.\n @param delay number of seconds to wait after the first failed attempt\n @param backoff factor by which the delay will be incremented after a failure.\n \"\"\"\n #stomp broker\n mtries = tries\n mdelay = delay\n coilserver = None\n from coilmq.config import config as coilconfig\n if config.has_section('coilmq'):\n for k,v in config.items('coilmq'):\n coilconfig.set('coilmq', k, v)\n logger.debug(\"Set %s to %s for coilmq config.\" % (k,v))\n while True:\n try:\n coilserver = coilmq.start.server_from_config(coilconfig)\n logger.info(\"Stomp server listening on %s:%s\" % \\\n coilserver.server_address)\n serverUpEvent.set()\n coilserver.serve_forever()\n except IOError as ex:\n logger.error(\"Exception while starting coilmq broker: '%s'\", ex)\n if mtries != 0: \n logger.debug(\"Retrying coilmq startup in %.1f seconds...\", mdelay)\n time.sleep(mdelay)\n mdelay *= backoff\n mtries -= 1\n else:\n logger.debug(\"Ran out of trials (tried %d times) for coilmq startup. Giving up.\", tries)\n break\n finally:\n if coilserver: coilserver.server_close()\n\n\ndef start(config, brokerTimeout = 60.0):\n \"\"\"\n Start twisted event loop and the fun should begin...\n\n @param brokerTimeout how long to wait for a broker \n \n @return a negative number upon failure. Otherwise, it never returns.\n \"\"\"\n \n manager = multiprocessing.Manager()\n serverUpEvent = manager.Event()\n broker = multiprocessing.Process(target=startSTOMPBroker, args=(config,serverUpEvent))\n broker.daemon = True\n broker.name = 'STOMP-Broker'\n broker.start()\n\n serverUpEvent.wait(brokerTimeout)\n if not serverUpEvent.is_set():\n logger.fatal(\"Broker not available after %.1f seconds. Giving up\", brokerTimeout)\n return -1\n #host side logic\n host = config.get('Broker', 'host') \n port = int(config.get('Broker', 'port'))\n username = config.get('Broker', 'username')\n password = config.get('Broker', 'password')\n\n hostEngine = HostStompEngine(config)\n stompProtocolFactory = StompProtocolFactory(hostEngine, username, password)\n \n HostXMLRPCService(config).makeEngineAccesible(hostEngine)\n\n\n reactor.connectTCP(host, port, stompProtocolFactory)\n reactor.run()\n\n\n\nif __name__ == '__main__':\n from sys import argv, exit\n if len(argv) < 2:\n print \"Usage: %s <config-file>\" % argv[0]\n exit(-1)\n else:\n configFile = argv[1]\n\n config = SafeConfigParser()\n config.read(configFile)\n\n exit(start(config))\n\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
<|reserved_special_token_0|>
class SEMGaussianMixture(MyGMM):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def _draw_conditionnal_Z(self, Y):
"""
Tire un échantillon de loi Z sachant Y
:param Y: Observations (n_samples, n_features)
:return: Z (n_samples,n_components) Zik = 1 ssi Zi vaut ek
"""
M = self._compute_Z_conditionnal_density(Y)
s = M.cumsum(axis=1)
r = np.random.rand(M.shape[0])[:, np.newaxis]
zi = (s < r).sum(axis=1)[:, np.newaxis]
I = np.empty(M.shape)
I[:] = np.arange(M.shape[1])
return (I == zi).astype(float)
def threshold(self, Z, n_features):
pik = Z.sum(axis=0)
return (pik >= n_features + 1).prod()
def _m_step(self, Y, log_resp):
"""M step.
Parameters
----------
Y : array-like, shape (n_samples, n_features)
log_resp : array-like, shape (n_samples, n_components)
Logarithm of the posterior probabilities (or responsibilities) of
the point of each sample in Y.
"""
Z = self._draw_conditionnal_Z(Y)
while not self.threshold(Z, Y.shape[1]):
Z = self._draw_conditionnal_Z(Y)
print('Ajustement au seuil')
n_samples, _ = Y.shape
self.weights_, self.means_, self.covariances_ = (
_estimate_gaussian_parameters(Y, Z, self.reg_covar, self.
covariance_type))
self.weights_ /= n_samples
self.precisions_cholesky_ = _compute_precision_cholesky(self.
covariances_, self.covariance_type)
self._m_step_callback(Y)
class SAEMGaussianMixture(SEMGaussianMixture):
def _print_verbose_msg_iter_end(self, n_iter, diff_ll):
super()._print_verbose_msg_iter_end(n_iter, diff_ll)
self.current_iter = n_iter + 1
def _m_step(self, Y, log_resp):
"""M step.
Parameters
----------
Y : array-like, shape (n_samples, n_features)
log_resp : array-like, shape (n_samples, n_components)
Logarithm of the posterior probabilities (or responsibilities) of
the point of each sample in Y.
"""
Z = self._draw_conditionnal_Z(Y)
i = 0
while i < 10 and not self.threshold(Z, Y.shape[1]):
Z = self._draw_conditionnal_Z(Y)
i += 1
print('Ajustement au seuil')
n_samples, _ = Y.shape
SEMweights_, SEMmeans_, SEMcovariances_ = (
_estimate_gaussian_parameters(Y, Z, self.reg_covar, self.
covariance_type))
SEMweights_ /= n_samples
EMweights_, EMmeans_, EMcovariances_ = _estimate_gaussian_parameters(Y,
np.exp(log_resp), self.reg_covar, self.covariance_type)
EMweights_ /= n_samples
r = self.current_iter
gr = self.gamma(r)
self.means_ = (1 - gr) * EMmeans_ + gr * SEMmeans_
self.weights_ = (1 - gr) * EMweights_ + gr * SEMweights_
self.covariances_ = (1 - gr) * EMcovariances_ + gr * SEMcovariances_
self.precisions_cholesky_ = _compute_precision_cholesky(self.
covariances_, self.covariance_type)
self._m_step_callback(Y)
@staticmethod
def gamma(r):
return 1 / np.sqrt(r + 1)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class SEMGaussianMixture(MyGMM):
<|reserved_special_token_0|>
def _compute_Z_conditionnal_density(self, Y):
"""
Calcule les proba conditionnelles de Z_i sachant Y_i
:param Y: Observations (n_samples,n_features)
:return: matrice stochastique (en ligne) (n_samples,n_components)
"""
proba_cond = np.exp(self._estimate_weighted_log_prob(Y))
s = proba_cond.sum(axis=1)[:, np.newaxis]
return proba_cond / s
def _draw_conditionnal_Z(self, Y):
"""
Tire un échantillon de loi Z sachant Y
:param Y: Observations (n_samples, n_features)
:return: Z (n_samples,n_components) Zik = 1 ssi Zi vaut ek
"""
M = self._compute_Z_conditionnal_density(Y)
s = M.cumsum(axis=1)
r = np.random.rand(M.shape[0])[:, np.newaxis]
zi = (s < r).sum(axis=1)[:, np.newaxis]
I = np.empty(M.shape)
I[:] = np.arange(M.shape[1])
return (I == zi).astype(float)
def threshold(self, Z, n_features):
pik = Z.sum(axis=0)
return (pik >= n_features + 1).prod()
def _m_step(self, Y, log_resp):
"""M step.
Parameters
----------
Y : array-like, shape (n_samples, n_features)
log_resp : array-like, shape (n_samples, n_components)
Logarithm of the posterior probabilities (or responsibilities) of
the point of each sample in Y.
"""
Z = self._draw_conditionnal_Z(Y)
while not self.threshold(Z, Y.shape[1]):
Z = self._draw_conditionnal_Z(Y)
print('Ajustement au seuil')
n_samples, _ = Y.shape
self.weights_, self.means_, self.covariances_ = (
_estimate_gaussian_parameters(Y, Z, self.reg_covar, self.
covariance_type))
self.weights_ /= n_samples
self.precisions_cholesky_ = _compute_precision_cholesky(self.
covariances_, self.covariance_type)
self._m_step_callback(Y)
class SAEMGaussianMixture(SEMGaussianMixture):
def _print_verbose_msg_iter_end(self, n_iter, diff_ll):
super()._print_verbose_msg_iter_end(n_iter, diff_ll)
self.current_iter = n_iter + 1
def _m_step(self, Y, log_resp):
"""M step.
Parameters
----------
Y : array-like, shape (n_samples, n_features)
log_resp : array-like, shape (n_samples, n_components)
Logarithm of the posterior probabilities (or responsibilities) of
the point of each sample in Y.
"""
Z = self._draw_conditionnal_Z(Y)
i = 0
while i < 10 and not self.threshold(Z, Y.shape[1]):
Z = self._draw_conditionnal_Z(Y)
i += 1
print('Ajustement au seuil')
n_samples, _ = Y.shape
SEMweights_, SEMmeans_, SEMcovariances_ = (
_estimate_gaussian_parameters(Y, Z, self.reg_covar, self.
covariance_type))
SEMweights_ /= n_samples
EMweights_, EMmeans_, EMcovariances_ = _estimate_gaussian_parameters(Y,
np.exp(log_resp), self.reg_covar, self.covariance_type)
EMweights_ /= n_samples
r = self.current_iter
gr = self.gamma(r)
self.means_ = (1 - gr) * EMmeans_ + gr * SEMmeans_
self.weights_ = (1 - gr) * EMweights_ + gr * SEMweights_
self.covariances_ = (1 - gr) * EMcovariances_ + gr * SEMcovariances_
self.precisions_cholesky_ = _compute_precision_cholesky(self.
covariances_, self.covariance_type)
self._m_step_callback(Y)
@staticmethod
def gamma(r):
return 1 / np.sqrt(r + 1)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class SEMGaussianMixture(MyGMM):
"""Remarque : on utilise la variable Y pour les observations, au lieu de X dans la classe parente."""
def _compute_Z_conditionnal_density(self, Y):
"""
Calcule les proba conditionnelles de Z_i sachant Y_i
:param Y: Observations (n_samples,n_features)
:return: matrice stochastique (en ligne) (n_samples,n_components)
"""
proba_cond = np.exp(self._estimate_weighted_log_prob(Y))
s = proba_cond.sum(axis=1)[:, np.newaxis]
return proba_cond / s
def _draw_conditionnal_Z(self, Y):
"""
Tire un échantillon de loi Z sachant Y
:param Y: Observations (n_samples, n_features)
:return: Z (n_samples,n_components) Zik = 1 ssi Zi vaut ek
"""
M = self._compute_Z_conditionnal_density(Y)
s = M.cumsum(axis=1)
r = np.random.rand(M.shape[0])[:, np.newaxis]
zi = (s < r).sum(axis=1)[:, np.newaxis]
I = np.empty(M.shape)
I[:] = np.arange(M.shape[1])
return (I == zi).astype(float)
def threshold(self, Z, n_features):
pik = Z.sum(axis=0)
return (pik >= n_features + 1).prod()
def _m_step(self, Y, log_resp):
"""M step.
Parameters
----------
Y : array-like, shape (n_samples, n_features)
log_resp : array-like, shape (n_samples, n_components)
Logarithm of the posterior probabilities (or responsibilities) of
the point of each sample in Y.
"""
Z = self._draw_conditionnal_Z(Y)
while not self.threshold(Z, Y.shape[1]):
Z = self._draw_conditionnal_Z(Y)
print('Ajustement au seuil')
n_samples, _ = Y.shape
self.weights_, self.means_, self.covariances_ = (
_estimate_gaussian_parameters(Y, Z, self.reg_covar, self.
covariance_type))
self.weights_ /= n_samples
self.precisions_cholesky_ = _compute_precision_cholesky(self.
covariances_, self.covariance_type)
self._m_step_callback(Y)
class SAEMGaussianMixture(SEMGaussianMixture):
def _print_verbose_msg_iter_end(self, n_iter, diff_ll):
super()._print_verbose_msg_iter_end(n_iter, diff_ll)
self.current_iter = n_iter + 1
def _m_step(self, Y, log_resp):
"""M step.
Parameters
----------
Y : array-like, shape (n_samples, n_features)
log_resp : array-like, shape (n_samples, n_components)
Logarithm of the posterior probabilities (or responsibilities) of
the point of each sample in Y.
"""
Z = self._draw_conditionnal_Z(Y)
i = 0
while i < 10 and not self.threshold(Z, Y.shape[1]):
Z = self._draw_conditionnal_Z(Y)
i += 1
print('Ajustement au seuil')
n_samples, _ = Y.shape
SEMweights_, SEMmeans_, SEMcovariances_ = (
_estimate_gaussian_parameters(Y, Z, self.reg_covar, self.
covariance_type))
SEMweights_ /= n_samples
EMweights_, EMmeans_, EMcovariances_ = _estimate_gaussian_parameters(Y,
np.exp(log_resp), self.reg_covar, self.covariance_type)
EMweights_ /= n_samples
r = self.current_iter
gr = self.gamma(r)
self.means_ = (1 - gr) * EMmeans_ + gr * SEMmeans_
self.weights_ = (1 - gr) * EMweights_ + gr * SEMweights_
self.covariances_ = (1 - gr) * EMcovariances_ + gr * SEMcovariances_
self.precisions_cholesky_ = _compute_precision_cholesky(self.
covariances_, self.covariance_type)
self._m_step_callback(Y)
@staticmethod
def gamma(r):
return 1 / np.sqrt(r + 1)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
import numpy as np
from sklearn.mixture.gaussian_mixture import _estimate_gaussian_parameters, _compute_precision_cholesky
from Core.gllim import MyGMM
class SEMGaussianMixture(MyGMM):
"""Remarque : on utilise la variable Y pour les observations, au lieu de X dans la classe parente."""
def _compute_Z_conditionnal_density(self, Y):
"""
Calcule les proba conditionnelles de Z_i sachant Y_i
:param Y: Observations (n_samples,n_features)
:return: matrice stochastique (en ligne) (n_samples,n_components)
"""
proba_cond = np.exp(self._estimate_weighted_log_prob(Y))
s = proba_cond.sum(axis=1)[:, np.newaxis]
return proba_cond / s
def _draw_conditionnal_Z(self, Y):
"""
Tire un échantillon de loi Z sachant Y
:param Y: Observations (n_samples, n_features)
:return: Z (n_samples,n_components) Zik = 1 ssi Zi vaut ek
"""
M = self._compute_Z_conditionnal_density(Y)
s = M.cumsum(axis=1)
r = np.random.rand(M.shape[0])[:, np.newaxis]
zi = (s < r).sum(axis=1)[:, np.newaxis]
I = np.empty(M.shape)
I[:] = np.arange(M.shape[1])
return (I == zi).astype(float)
def threshold(self, Z, n_features):
pik = Z.sum(axis=0)
return (pik >= n_features + 1).prod()
def _m_step(self, Y, log_resp):
"""M step.
Parameters
----------
Y : array-like, shape (n_samples, n_features)
log_resp : array-like, shape (n_samples, n_components)
Logarithm of the posterior probabilities (or responsibilities) of
the point of each sample in Y.
"""
Z = self._draw_conditionnal_Z(Y)
while not self.threshold(Z, Y.shape[1]):
Z = self._draw_conditionnal_Z(Y)
print('Ajustement au seuil')
n_samples, _ = Y.shape
self.weights_, self.means_, self.covariances_ = (
_estimate_gaussian_parameters(Y, Z, self.reg_covar, self.
covariance_type))
self.weights_ /= n_samples
self.precisions_cholesky_ = _compute_precision_cholesky(self.
covariances_, self.covariance_type)
self._m_step_callback(Y)
class SAEMGaussianMixture(SEMGaussianMixture):
def _print_verbose_msg_iter_end(self, n_iter, diff_ll):
super()._print_verbose_msg_iter_end(n_iter, diff_ll)
self.current_iter = n_iter + 1
def _m_step(self, Y, log_resp):
"""M step.
Parameters
----------
Y : array-like, shape (n_samples, n_features)
log_resp : array-like, shape (n_samples, n_components)
Logarithm of the posterior probabilities (or responsibilities) of
the point of each sample in Y.
"""
Z = self._draw_conditionnal_Z(Y)
i = 0
while i < 10 and not self.threshold(Z, Y.shape[1]):
Z = self._draw_conditionnal_Z(Y)
i += 1
print('Ajustement au seuil')
n_samples, _ = Y.shape
SEMweights_, SEMmeans_, SEMcovariances_ = (
_estimate_gaussian_parameters(Y, Z, self.reg_covar, self.
covariance_type))
SEMweights_ /= n_samples
EMweights_, EMmeans_, EMcovariances_ = _estimate_gaussian_parameters(Y,
np.exp(log_resp), self.reg_covar, self.covariance_type)
EMweights_ /= n_samples
r = self.current_iter
gr = self.gamma(r)
self.means_ = (1 - gr) * EMmeans_ + gr * SEMmeans_
self.weights_ = (1 - gr) * EMweights_ + gr * SEMweights_
self.covariances_ = (1 - gr) * EMcovariances_ + gr * SEMcovariances_
self.precisions_cholesky_ = _compute_precision_cholesky(self.
covariances_, self.covariance_type)
self._m_step_callback(Y)
@staticmethod
def gamma(r):
return 1 / np.sqrt(r + 1)
<|reserved_special_token_1|>
"""Gaussian mixture model, with Stochastic EM algorithm."""
import numpy as np
from sklearn.mixture.gaussian_mixture import _estimate_gaussian_parameters, _compute_precision_cholesky
from Core.gllim import MyGMM
class SEMGaussianMixture(MyGMM):
"""Remarque : on utilise la variable Y pour les observations, au lieu de X dans la classe parente."""
def _compute_Z_conditionnal_density(self,Y):
"""
Calcule les proba conditionnelles de Z_i sachant Y_i
:param Y: Observations (n_samples,n_features)
:return: matrice stochastique (en ligne) (n_samples,n_components)
"""
proba_cond = np.exp(self._estimate_weighted_log_prob(Y)) # Pi_k * g_k(yi)
s = proba_cond.sum(axis=1)[:,np.newaxis] # sum_k (Pi_k * g_k(yi))
return proba_cond / s #On normalise
def _draw_conditionnal_Z(self,Y):
"""
Tire un échantillon de loi Z sachant Y
:param Y: Observations (n_samples, n_features)
:return: Z (n_samples,n_components) Zik = 1 ssi Zi vaut ek
"""
M = self._compute_Z_conditionnal_density(Y)
s = M.cumsum(axis=1)
r = np.random.rand(M.shape[0])[:,np.newaxis]
zi = (s < r).sum(axis=1)[:,np.newaxis]
I = np.empty(M.shape)
I[:] = np.arange(M.shape[1])
return (I == zi).astype(float)
def threshold(self,Z,n_features):
pik = Z.sum(axis=0)
return (pik >= (n_features + 1)).prod()
def _m_step(self, Y, log_resp):
"""M step.
Parameters
----------
Y : array-like, shape (n_samples, n_features)
log_resp : array-like, shape (n_samples, n_components)
Logarithm of the posterior probabilities (or responsibilities) of
the point of each sample in Y.
"""
Z = self._draw_conditionnal_Z(Y)
while not self.threshold(Z,Y.shape[1]): #Condition de seuil
Z = self._draw_conditionnal_Z(Y)
print("Ajustement au seuil")
n_samples, _ = Y.shape
self.weights_, self.means_, self.covariances_ = (
_estimate_gaussian_parameters(Y, Z, self.reg_covar,
self.covariance_type))
self.weights_ /= n_samples
self.precisions_cholesky_ = _compute_precision_cholesky(
self.covariances_, self.covariance_type)
self._m_step_callback(Y)
class SAEMGaussianMixture(SEMGaussianMixture):
def _print_verbose_msg_iter_end(self, n_iter, diff_ll):
super()._print_verbose_msg_iter_end(n_iter,diff_ll)
self.current_iter = n_iter + 1 #Prochaine itération
def _m_step(self, Y, log_resp):
"""M step.
Parameters
----------
Y : array-like, shape (n_samples, n_features)
log_resp : array-like, shape (n_samples, n_components)
Logarithm of the posterior probabilities (or responsibilities) of
the point of each sample in Y.
"""
Z = self._draw_conditionnal_Z(Y)
i = 0
while i < 10 and not self.threshold(Z, Y.shape[1]): # Condition de seuil
Z = self._draw_conditionnal_Z(Y)
i += 1
print("Ajustement au seuil")
n_samples, _ = Y.shape
SEMweights_, SEMmeans_, SEMcovariances_ = (
_estimate_gaussian_parameters(Y, Z, self.reg_covar,
self.covariance_type))
SEMweights_ /= n_samples
EMweights_, EMmeans_, EMcovariances_ = (
_estimate_gaussian_parameters(Y, np.exp(log_resp), self.reg_covar,
self.covariance_type))
EMweights_ /= n_samples
r = self.current_iter
gr = self.gamma(r)
self.means_ = (1 - gr) * EMmeans_ + gr * SEMmeans_
self.weights_ = (1 - gr) * EMweights_ + gr * SEMweights_
self.covariances_ = (1 - gr) * EMcovariances_ + gr * SEMcovariances_
self.precisions_cholesky_ = _compute_precision_cholesky(
self.covariances_, self.covariance_type)
self._m_step_callback(Y)
@staticmethod
def gamma(r):
return 1 / np.sqrt( r + 1)
|
flexible
|
{
"blob_id": "39475626b7e3e0f4c8143b300c002a2eb50cc23a",
"index": 9341,
"step-1": "<mask token>\n\n\nclass SEMGaussianMixture(MyGMM):\n <mask token>\n <mask token>\n\n def _draw_conditionnal_Z(self, Y):\n \"\"\"\n Tire un échantillon de loi Z sachant Y\n\n :param Y: Observations (n_samples, n_features)\n :return: Z (n_samples,n_components) Zik = 1 ssi Zi vaut ek\n \"\"\"\n M = self._compute_Z_conditionnal_density(Y)\n s = M.cumsum(axis=1)\n r = np.random.rand(M.shape[0])[:, np.newaxis]\n zi = (s < r).sum(axis=1)[:, np.newaxis]\n I = np.empty(M.shape)\n I[:] = np.arange(M.shape[1])\n return (I == zi).astype(float)\n\n def threshold(self, Z, n_features):\n pik = Z.sum(axis=0)\n return (pik >= n_features + 1).prod()\n\n def _m_step(self, Y, log_resp):\n \"\"\"M step.\n\n Parameters\n ----------\n Y : array-like, shape (n_samples, n_features)\n\n log_resp : array-like, shape (n_samples, n_components)\n Logarithm of the posterior probabilities (or responsibilities) of\n the point of each sample in Y.\n \"\"\"\n Z = self._draw_conditionnal_Z(Y)\n while not self.threshold(Z, Y.shape[1]):\n Z = self._draw_conditionnal_Z(Y)\n print('Ajustement au seuil')\n n_samples, _ = Y.shape\n self.weights_, self.means_, self.covariances_ = (\n _estimate_gaussian_parameters(Y, Z, self.reg_covar, self.\n covariance_type))\n self.weights_ /= n_samples\n self.precisions_cholesky_ = _compute_precision_cholesky(self.\n covariances_, self.covariance_type)\n self._m_step_callback(Y)\n\n\nclass SAEMGaussianMixture(SEMGaussianMixture):\n\n def _print_verbose_msg_iter_end(self, n_iter, diff_ll):\n super()._print_verbose_msg_iter_end(n_iter, diff_ll)\n self.current_iter = n_iter + 1\n\n def _m_step(self, Y, log_resp):\n \"\"\"M step.\n\n Parameters\n ----------\n Y : array-like, shape (n_samples, n_features)\n\n log_resp : array-like, shape (n_samples, n_components)\n Logarithm of the posterior probabilities (or responsibilities) of\n the point of each sample in Y.\n \"\"\"\n Z = self._draw_conditionnal_Z(Y)\n i = 0\n while i < 10 and not self.threshold(Z, Y.shape[1]):\n Z = self._draw_conditionnal_Z(Y)\n i += 1\n print('Ajustement au seuil')\n n_samples, _ = Y.shape\n SEMweights_, SEMmeans_, SEMcovariances_ = (\n _estimate_gaussian_parameters(Y, Z, self.reg_covar, self.\n covariance_type))\n SEMweights_ /= n_samples\n EMweights_, EMmeans_, EMcovariances_ = _estimate_gaussian_parameters(Y,\n np.exp(log_resp), self.reg_covar, self.covariance_type)\n EMweights_ /= n_samples\n r = self.current_iter\n gr = self.gamma(r)\n self.means_ = (1 - gr) * EMmeans_ + gr * SEMmeans_\n self.weights_ = (1 - gr) * EMweights_ + gr * SEMweights_\n self.covariances_ = (1 - gr) * EMcovariances_ + gr * SEMcovariances_\n self.precisions_cholesky_ = _compute_precision_cholesky(self.\n covariances_, self.covariance_type)\n self._m_step_callback(Y)\n\n @staticmethod\n def gamma(r):\n return 1 / np.sqrt(r + 1)\n",
"step-2": "<mask token>\n\n\nclass SEMGaussianMixture(MyGMM):\n <mask token>\n\n def _compute_Z_conditionnal_density(self, Y):\n \"\"\"\n Calcule les proba conditionnelles de Z_i sachant Y_i\n :param Y: Observations (n_samples,n_features)\n :return: matrice stochastique (en ligne) (n_samples,n_components)\n \"\"\"\n proba_cond = np.exp(self._estimate_weighted_log_prob(Y))\n s = proba_cond.sum(axis=1)[:, np.newaxis]\n return proba_cond / s\n\n def _draw_conditionnal_Z(self, Y):\n \"\"\"\n Tire un échantillon de loi Z sachant Y\n\n :param Y: Observations (n_samples, n_features)\n :return: Z (n_samples,n_components) Zik = 1 ssi Zi vaut ek\n \"\"\"\n M = self._compute_Z_conditionnal_density(Y)\n s = M.cumsum(axis=1)\n r = np.random.rand(M.shape[0])[:, np.newaxis]\n zi = (s < r).sum(axis=1)[:, np.newaxis]\n I = np.empty(M.shape)\n I[:] = np.arange(M.shape[1])\n return (I == zi).astype(float)\n\n def threshold(self, Z, n_features):\n pik = Z.sum(axis=0)\n return (pik >= n_features + 1).prod()\n\n def _m_step(self, Y, log_resp):\n \"\"\"M step.\n\n Parameters\n ----------\n Y : array-like, shape (n_samples, n_features)\n\n log_resp : array-like, shape (n_samples, n_components)\n Logarithm of the posterior probabilities (or responsibilities) of\n the point of each sample in Y.\n \"\"\"\n Z = self._draw_conditionnal_Z(Y)\n while not self.threshold(Z, Y.shape[1]):\n Z = self._draw_conditionnal_Z(Y)\n print('Ajustement au seuil')\n n_samples, _ = Y.shape\n self.weights_, self.means_, self.covariances_ = (\n _estimate_gaussian_parameters(Y, Z, self.reg_covar, self.\n covariance_type))\n self.weights_ /= n_samples\n self.precisions_cholesky_ = _compute_precision_cholesky(self.\n covariances_, self.covariance_type)\n self._m_step_callback(Y)\n\n\nclass SAEMGaussianMixture(SEMGaussianMixture):\n\n def _print_verbose_msg_iter_end(self, n_iter, diff_ll):\n super()._print_verbose_msg_iter_end(n_iter, diff_ll)\n self.current_iter = n_iter + 1\n\n def _m_step(self, Y, log_resp):\n \"\"\"M step.\n\n Parameters\n ----------\n Y : array-like, shape (n_samples, n_features)\n\n log_resp : array-like, shape (n_samples, n_components)\n Logarithm of the posterior probabilities (or responsibilities) of\n the point of each sample in Y.\n \"\"\"\n Z = self._draw_conditionnal_Z(Y)\n i = 0\n while i < 10 and not self.threshold(Z, Y.shape[1]):\n Z = self._draw_conditionnal_Z(Y)\n i += 1\n print('Ajustement au seuil')\n n_samples, _ = Y.shape\n SEMweights_, SEMmeans_, SEMcovariances_ = (\n _estimate_gaussian_parameters(Y, Z, self.reg_covar, self.\n covariance_type))\n SEMweights_ /= n_samples\n EMweights_, EMmeans_, EMcovariances_ = _estimate_gaussian_parameters(Y,\n np.exp(log_resp), self.reg_covar, self.covariance_type)\n EMweights_ /= n_samples\n r = self.current_iter\n gr = self.gamma(r)\n self.means_ = (1 - gr) * EMmeans_ + gr * SEMmeans_\n self.weights_ = (1 - gr) * EMweights_ + gr * SEMweights_\n self.covariances_ = (1 - gr) * EMcovariances_ + gr * SEMcovariances_\n self.precisions_cholesky_ = _compute_precision_cholesky(self.\n covariances_, self.covariance_type)\n self._m_step_callback(Y)\n\n @staticmethod\n def gamma(r):\n return 1 / np.sqrt(r + 1)\n",
"step-3": "<mask token>\n\n\nclass SEMGaussianMixture(MyGMM):\n \"\"\"Remarque : on utilise la variable Y pour les observations, au lieu de X dans la classe parente.\"\"\"\n\n def _compute_Z_conditionnal_density(self, Y):\n \"\"\"\n Calcule les proba conditionnelles de Z_i sachant Y_i\n :param Y: Observations (n_samples,n_features)\n :return: matrice stochastique (en ligne) (n_samples,n_components)\n \"\"\"\n proba_cond = np.exp(self._estimate_weighted_log_prob(Y))\n s = proba_cond.sum(axis=1)[:, np.newaxis]\n return proba_cond / s\n\n def _draw_conditionnal_Z(self, Y):\n \"\"\"\n Tire un échantillon de loi Z sachant Y\n\n :param Y: Observations (n_samples, n_features)\n :return: Z (n_samples,n_components) Zik = 1 ssi Zi vaut ek\n \"\"\"\n M = self._compute_Z_conditionnal_density(Y)\n s = M.cumsum(axis=1)\n r = np.random.rand(M.shape[0])[:, np.newaxis]\n zi = (s < r).sum(axis=1)[:, np.newaxis]\n I = np.empty(M.shape)\n I[:] = np.arange(M.shape[1])\n return (I == zi).astype(float)\n\n def threshold(self, Z, n_features):\n pik = Z.sum(axis=0)\n return (pik >= n_features + 1).prod()\n\n def _m_step(self, Y, log_resp):\n \"\"\"M step.\n\n Parameters\n ----------\n Y : array-like, shape (n_samples, n_features)\n\n log_resp : array-like, shape (n_samples, n_components)\n Logarithm of the posterior probabilities (or responsibilities) of\n the point of each sample in Y.\n \"\"\"\n Z = self._draw_conditionnal_Z(Y)\n while not self.threshold(Z, Y.shape[1]):\n Z = self._draw_conditionnal_Z(Y)\n print('Ajustement au seuil')\n n_samples, _ = Y.shape\n self.weights_, self.means_, self.covariances_ = (\n _estimate_gaussian_parameters(Y, Z, self.reg_covar, self.\n covariance_type))\n self.weights_ /= n_samples\n self.precisions_cholesky_ = _compute_precision_cholesky(self.\n covariances_, self.covariance_type)\n self._m_step_callback(Y)\n\n\nclass SAEMGaussianMixture(SEMGaussianMixture):\n\n def _print_verbose_msg_iter_end(self, n_iter, diff_ll):\n super()._print_verbose_msg_iter_end(n_iter, diff_ll)\n self.current_iter = n_iter + 1\n\n def _m_step(self, Y, log_resp):\n \"\"\"M step.\n\n Parameters\n ----------\n Y : array-like, shape (n_samples, n_features)\n\n log_resp : array-like, shape (n_samples, n_components)\n Logarithm of the posterior probabilities (or responsibilities) of\n the point of each sample in Y.\n \"\"\"\n Z = self._draw_conditionnal_Z(Y)\n i = 0\n while i < 10 and not self.threshold(Z, Y.shape[1]):\n Z = self._draw_conditionnal_Z(Y)\n i += 1\n print('Ajustement au seuil')\n n_samples, _ = Y.shape\n SEMweights_, SEMmeans_, SEMcovariances_ = (\n _estimate_gaussian_parameters(Y, Z, self.reg_covar, self.\n covariance_type))\n SEMweights_ /= n_samples\n EMweights_, EMmeans_, EMcovariances_ = _estimate_gaussian_parameters(Y,\n np.exp(log_resp), self.reg_covar, self.covariance_type)\n EMweights_ /= n_samples\n r = self.current_iter\n gr = self.gamma(r)\n self.means_ = (1 - gr) * EMmeans_ + gr * SEMmeans_\n self.weights_ = (1 - gr) * EMweights_ + gr * SEMweights_\n self.covariances_ = (1 - gr) * EMcovariances_ + gr * SEMcovariances_\n self.precisions_cholesky_ = _compute_precision_cholesky(self.\n covariances_, self.covariance_type)\n self._m_step_callback(Y)\n\n @staticmethod\n def gamma(r):\n return 1 / np.sqrt(r + 1)\n",
"step-4": "<mask token>\nimport numpy as np\nfrom sklearn.mixture.gaussian_mixture import _estimate_gaussian_parameters, _compute_precision_cholesky\nfrom Core.gllim import MyGMM\n\n\nclass SEMGaussianMixture(MyGMM):\n \"\"\"Remarque : on utilise la variable Y pour les observations, au lieu de X dans la classe parente.\"\"\"\n\n def _compute_Z_conditionnal_density(self, Y):\n \"\"\"\n Calcule les proba conditionnelles de Z_i sachant Y_i\n :param Y: Observations (n_samples,n_features)\n :return: matrice stochastique (en ligne) (n_samples,n_components)\n \"\"\"\n proba_cond = np.exp(self._estimate_weighted_log_prob(Y))\n s = proba_cond.sum(axis=1)[:, np.newaxis]\n return proba_cond / s\n\n def _draw_conditionnal_Z(self, Y):\n \"\"\"\n Tire un échantillon de loi Z sachant Y\n\n :param Y: Observations (n_samples, n_features)\n :return: Z (n_samples,n_components) Zik = 1 ssi Zi vaut ek\n \"\"\"\n M = self._compute_Z_conditionnal_density(Y)\n s = M.cumsum(axis=1)\n r = np.random.rand(M.shape[0])[:, np.newaxis]\n zi = (s < r).sum(axis=1)[:, np.newaxis]\n I = np.empty(M.shape)\n I[:] = np.arange(M.shape[1])\n return (I == zi).astype(float)\n\n def threshold(self, Z, n_features):\n pik = Z.sum(axis=0)\n return (pik >= n_features + 1).prod()\n\n def _m_step(self, Y, log_resp):\n \"\"\"M step.\n\n Parameters\n ----------\n Y : array-like, shape (n_samples, n_features)\n\n log_resp : array-like, shape (n_samples, n_components)\n Logarithm of the posterior probabilities (or responsibilities) of\n the point of each sample in Y.\n \"\"\"\n Z = self._draw_conditionnal_Z(Y)\n while not self.threshold(Z, Y.shape[1]):\n Z = self._draw_conditionnal_Z(Y)\n print('Ajustement au seuil')\n n_samples, _ = Y.shape\n self.weights_, self.means_, self.covariances_ = (\n _estimate_gaussian_parameters(Y, Z, self.reg_covar, self.\n covariance_type))\n self.weights_ /= n_samples\n self.precisions_cholesky_ = _compute_precision_cholesky(self.\n covariances_, self.covariance_type)\n self._m_step_callback(Y)\n\n\nclass SAEMGaussianMixture(SEMGaussianMixture):\n\n def _print_verbose_msg_iter_end(self, n_iter, diff_ll):\n super()._print_verbose_msg_iter_end(n_iter, diff_ll)\n self.current_iter = n_iter + 1\n\n def _m_step(self, Y, log_resp):\n \"\"\"M step.\n\n Parameters\n ----------\n Y : array-like, shape (n_samples, n_features)\n\n log_resp : array-like, shape (n_samples, n_components)\n Logarithm of the posterior probabilities (or responsibilities) of\n the point of each sample in Y.\n \"\"\"\n Z = self._draw_conditionnal_Z(Y)\n i = 0\n while i < 10 and not self.threshold(Z, Y.shape[1]):\n Z = self._draw_conditionnal_Z(Y)\n i += 1\n print('Ajustement au seuil')\n n_samples, _ = Y.shape\n SEMweights_, SEMmeans_, SEMcovariances_ = (\n _estimate_gaussian_parameters(Y, Z, self.reg_covar, self.\n covariance_type))\n SEMweights_ /= n_samples\n EMweights_, EMmeans_, EMcovariances_ = _estimate_gaussian_parameters(Y,\n np.exp(log_resp), self.reg_covar, self.covariance_type)\n EMweights_ /= n_samples\n r = self.current_iter\n gr = self.gamma(r)\n self.means_ = (1 - gr) * EMmeans_ + gr * SEMmeans_\n self.weights_ = (1 - gr) * EMweights_ + gr * SEMweights_\n self.covariances_ = (1 - gr) * EMcovariances_ + gr * SEMcovariances_\n self.precisions_cholesky_ = _compute_precision_cholesky(self.\n covariances_, self.covariance_type)\n self._m_step_callback(Y)\n\n @staticmethod\n def gamma(r):\n return 1 / np.sqrt(r + 1)\n",
"step-5": "\"\"\"Gaussian mixture model, with Stochastic EM algorithm.\"\"\"\n\nimport numpy as np\nfrom sklearn.mixture.gaussian_mixture import _estimate_gaussian_parameters, _compute_precision_cholesky\n\nfrom Core.gllim import MyGMM\n\n\nclass SEMGaussianMixture(MyGMM):\n \"\"\"Remarque : on utilise la variable Y pour les observations, au lieu de X dans la classe parente.\"\"\"\n\n def _compute_Z_conditionnal_density(self,Y):\n \"\"\"\n Calcule les proba conditionnelles de Z_i sachant Y_i\n :param Y: Observations (n_samples,n_features)\n :return: matrice stochastique (en ligne) (n_samples,n_components)\n \"\"\"\n proba_cond = np.exp(self._estimate_weighted_log_prob(Y)) # Pi_k * g_k(yi)\n s = proba_cond.sum(axis=1)[:,np.newaxis] # sum_k (Pi_k * g_k(yi))\n return proba_cond / s #On normalise\n\n def _draw_conditionnal_Z(self,Y):\n \"\"\"\n Tire un échantillon de loi Z sachant Y\n\n :param Y: Observations (n_samples, n_features)\n :return: Z (n_samples,n_components) Zik = 1 ssi Zi vaut ek\n \"\"\"\n M = self._compute_Z_conditionnal_density(Y)\n s = M.cumsum(axis=1)\n r = np.random.rand(M.shape[0])[:,np.newaxis]\n zi = (s < r).sum(axis=1)[:,np.newaxis]\n I = np.empty(M.shape)\n I[:] = np.arange(M.shape[1])\n return (I == zi).astype(float)\n\n def threshold(self,Z,n_features):\n pik = Z.sum(axis=0)\n return (pik >= (n_features + 1)).prod()\n\n def _m_step(self, Y, log_resp):\n \"\"\"M step.\n\n Parameters\n ----------\n Y : array-like, shape (n_samples, n_features)\n\n log_resp : array-like, shape (n_samples, n_components)\n Logarithm of the posterior probabilities (or responsibilities) of\n the point of each sample in Y.\n \"\"\"\n Z = self._draw_conditionnal_Z(Y)\n while not self.threshold(Z,Y.shape[1]): #Condition de seuil\n Z = self._draw_conditionnal_Z(Y)\n print(\"Ajustement au seuil\")\n\n n_samples, _ = Y.shape\n self.weights_, self.means_, self.covariances_ = (\n _estimate_gaussian_parameters(Y, Z, self.reg_covar,\n self.covariance_type))\n self.weights_ /= n_samples\n self.precisions_cholesky_ = _compute_precision_cholesky(\n self.covariances_, self.covariance_type)\n\n self._m_step_callback(Y)\n\nclass SAEMGaussianMixture(SEMGaussianMixture):\n\n def _print_verbose_msg_iter_end(self, n_iter, diff_ll):\n super()._print_verbose_msg_iter_end(n_iter,diff_ll)\n self.current_iter = n_iter + 1 #Prochaine itération\n\n def _m_step(self, Y, log_resp):\n \"\"\"M step.\n\n Parameters\n ----------\n Y : array-like, shape (n_samples, n_features)\n\n log_resp : array-like, shape (n_samples, n_components)\n Logarithm of the posterior probabilities (or responsibilities) of\n the point of each sample in Y.\n \"\"\"\n Z = self._draw_conditionnal_Z(Y)\n i = 0\n while i < 10 and not self.threshold(Z, Y.shape[1]): # Condition de seuil\n Z = self._draw_conditionnal_Z(Y)\n i += 1\n print(\"Ajustement au seuil\")\n\n n_samples, _ = Y.shape\n SEMweights_, SEMmeans_, SEMcovariances_ = (\n _estimate_gaussian_parameters(Y, Z, self.reg_covar,\n self.covariance_type))\n SEMweights_ /= n_samples\n\n EMweights_, EMmeans_, EMcovariances_ = (\n _estimate_gaussian_parameters(Y, np.exp(log_resp), self.reg_covar,\n self.covariance_type))\n EMweights_ /= n_samples\n\n r = self.current_iter\n gr = self.gamma(r)\n self.means_ = (1 - gr) * EMmeans_ + gr * SEMmeans_\n self.weights_ = (1 - gr) * EMweights_ + gr * SEMweights_\n self.covariances_ = (1 - gr) * EMcovariances_ + gr * SEMcovariances_\n\n self.precisions_cholesky_ = _compute_precision_cholesky(\n self.covariances_, self.covariance_type)\n\n self._m_step_callback(Y)\n\n @staticmethod\n def gamma(r):\n return 1 / np.sqrt( r + 1)\n\n",
"step-ids": [
8,
9,
10,
11,
12
]
}
|
[
8,
9,
10,
11,
12
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
start_operator >> stage_events_to_redshift >> load_songplays_table
start_operator >> stage_songs_to_redshift >> load_songplays_table
load_songplays_table >> load_song_dimension_table >> run_quality_checks
load_songplays_table >> load_user_dimension_table >> run_quality_checks
load_songplays_table >> load_artist_dimension_table >> run_quality_checks
load_songplays_table >> load_time_dimension_table >> run_quality_checks
run_quality_checks >> end_operator
<|reserved_special_token_1|>
<|reserved_special_token_0|>
default_args = {'owner': 'shreyak', 'start_date': datetime(2020, 12, 1),
'end_date': datetime(2020, 12, 1), 'depends_on_past': False, 'retries':
3, 'retry_delay': timedelta(minutes=5), 'catchup': False,
'email_on_retry': False}
dag = DAG('udac_sparkify_dag', default_args=default_args, description=
'Load and transform data in Redshift with Airflow', schedule_interval=
'0 * * * *')
start_operator = DummyOperator(task_id='Begin_execution', dag=dag)
stage_events_to_redshift = StageToRedshiftOperator(redshift_id='redshift',
aws_id='aws_credentials', schema='staging_events', s3_path=
'udacity-dend', s3_key='log_data/', query_end=
"format as json 's3://udacity-dend/log_json_path.json'", task_id=
'Stage_events', dag=dag)
stage_songs_to_redshift = StageToRedshiftOperator(task_id='Stage_songs',
redshift_id='redshift', aws_id='aws_credentials', schema=
'staging_songs', s3_path='udacity-dend', s3_key='song_data', query_end=
"json 'auto' compupdate off region 'us-west-2'", dag=dag)
load_songplays_table = LoadFactOperator(task_id='Load_songplays_fact_table',
redshift_id='redshift', schema='songplays', query=
'songplay_table_insert', dag=dag, append_only=False)
load_user_dimension_table = LoadDimensionOperator(task_id=
'Load_user_dim_table', redshift_id='redshift', schema='users', query=
'user_table_insert', dag=dag, append_only=False)
load_song_dimension_table = LoadDimensionOperator(task_id=
'Load_song_dim_table', redshift_id='redshift', schema='song', query=
'song_table_insert', dag=dag, append_only=False)
load_artist_dimension_table = LoadDimensionOperator(task_id=
'Load_artist_dim_table', redshift_id='redshift', schema='artist', query
='artist_table_insert', dag=dag, append_only=False)
load_time_dimension_table = LoadDimensionOperator(task_id=
'Load_time_dim_table', redshift_id='redshift', schema='time', query=
'time_table_insert', dag=dag, append_only=False)
run_quality_checks = DataQualityOperator(task_id='Run_data_quality_checks',
redshift_id='redshift', tables=['songplay', 'users', 'song', 'artist',
'time'], dag=dag)
end_operator = DummyOperator(task_id='Stop_execution', dag=dag)
start_operator >> stage_events_to_redshift >> load_songplays_table
start_operator >> stage_songs_to_redshift >> load_songplays_table
load_songplays_table >> load_song_dimension_table >> run_quality_checks
load_songplays_table >> load_user_dimension_table >> run_quality_checks
load_songplays_table >> load_artist_dimension_table >> run_quality_checks
load_songplays_table >> load_time_dimension_table >> run_quality_checks
run_quality_checks >> end_operator
<|reserved_special_token_1|>
from datetime import datetime, timedelta
import os
from airflow import DAG
from airflow.operators.dummy_operator import DummyOperator
from airflow.operators import StageToRedshiftOperator, LoadFactOperator, LoadDimensionOperator, DataQualityOperator
from helpers import SqlQueries
default_args = {'owner': 'shreyak', 'start_date': datetime(2020, 12, 1),
'end_date': datetime(2020, 12, 1), 'depends_on_past': False, 'retries':
3, 'retry_delay': timedelta(minutes=5), 'catchup': False,
'email_on_retry': False}
dag = DAG('udac_sparkify_dag', default_args=default_args, description=
'Load and transform data in Redshift with Airflow', schedule_interval=
'0 * * * *')
start_operator = DummyOperator(task_id='Begin_execution', dag=dag)
stage_events_to_redshift = StageToRedshiftOperator(redshift_id='redshift',
aws_id='aws_credentials', schema='staging_events', s3_path=
'udacity-dend', s3_key='log_data/', query_end=
"format as json 's3://udacity-dend/log_json_path.json'", task_id=
'Stage_events', dag=dag)
stage_songs_to_redshift = StageToRedshiftOperator(task_id='Stage_songs',
redshift_id='redshift', aws_id='aws_credentials', schema=
'staging_songs', s3_path='udacity-dend', s3_key='song_data', query_end=
"json 'auto' compupdate off region 'us-west-2'", dag=dag)
load_songplays_table = LoadFactOperator(task_id='Load_songplays_fact_table',
redshift_id='redshift', schema='songplays', query=
'songplay_table_insert', dag=dag, append_only=False)
load_user_dimension_table = LoadDimensionOperator(task_id=
'Load_user_dim_table', redshift_id='redshift', schema='users', query=
'user_table_insert', dag=dag, append_only=False)
load_song_dimension_table = LoadDimensionOperator(task_id=
'Load_song_dim_table', redshift_id='redshift', schema='song', query=
'song_table_insert', dag=dag, append_only=False)
load_artist_dimension_table = LoadDimensionOperator(task_id=
'Load_artist_dim_table', redshift_id='redshift', schema='artist', query
='artist_table_insert', dag=dag, append_only=False)
load_time_dimension_table = LoadDimensionOperator(task_id=
'Load_time_dim_table', redshift_id='redshift', schema='time', query=
'time_table_insert', dag=dag, append_only=False)
run_quality_checks = DataQualityOperator(task_id='Run_data_quality_checks',
redshift_id='redshift', tables=['songplay', 'users', 'song', 'artist',
'time'], dag=dag)
end_operator = DummyOperator(task_id='Stop_execution', dag=dag)
start_operator >> stage_events_to_redshift >> load_songplays_table
start_operator >> stage_songs_to_redshift >> load_songplays_table
load_songplays_table >> load_song_dimension_table >> run_quality_checks
load_songplays_table >> load_user_dimension_table >> run_quality_checks
load_songplays_table >> load_artist_dimension_table >> run_quality_checks
load_songplays_table >> load_time_dimension_table >> run_quality_checks
run_quality_checks >> end_operator
<|reserved_special_token_1|>
from datetime import datetime, timedelta
import os
from airflow import DAG
from airflow.operators.dummy_operator import DummyOperator
from airflow.operators import (StageToRedshiftOperator, LoadFactOperator,
LoadDimensionOperator, DataQualityOperator)
from helpers import SqlQueries
# AWS_KEY= os.environ.get('AWS_KEY')
# AWS_SECRET = os.environ.get('AWS_SECRET')
# Default arguments
default_args = {
'owner': 'shreyak',
'start_date': datetime(2020, 12, 1),
'end_date': datetime(2020, 12, 1),
'depends_on_past': False,
'retries': 3,
'retry_delay': timedelta(minutes=5),
'catchup': False,
'email_on_retry': False,
}
# Defining DAG
dag = DAG('udac_sparkify_dag',
default_args=default_args,
description='Load and transform data in Redshift with Airflow',
schedule_interval='0 * * * *'
)
# Starting operator
start_operator = DummyOperator(task_id='Begin_execution', dag=dag)
# Operators to create Staging tables on Redshift
stage_events_to_redshift = StageToRedshiftOperator(
redshift_id = "redshift",
aws_id = "aws_credentials",
schema = "staging_events",
s3_path="udacity-dend",
s3_key = "log_data/",
query_end = "format as json 's3://udacity-dend/log_json_path.json'",
task_id='Stage_events',
dag=dag,
)
stage_songs_to_redshift = StageToRedshiftOperator(
task_id='Stage_songs',
redshift_id = "redshift",
aws_id = "aws_credentials",
schema = "staging_songs",
s3_path="udacity-dend",
s3_key = "song_data",
query_end = "json 'auto' compupdate off region 'us-west-2'",
dag=dag
)
# Operator to load fact table
load_songplays_table = LoadFactOperator(
task_id='Load_songplays_fact_table',
redshift_id = "redshift",
schema = "songplays",
query = "songplay_table_insert",
dag=dag,
append_only = False
)
# Operators to load dimension tables
load_user_dimension_table = LoadDimensionOperator(
task_id='Load_user_dim_table',
redshift_id = "redshift",
schema = "users",
query = "user_table_insert",
dag=dag,
append_only = False
)
load_song_dimension_table = LoadDimensionOperator(
task_id='Load_song_dim_table',
redshift_id = "redshift",
schema = "song",
query = "song_table_insert",
dag=dag,
append_only = False
)
load_artist_dimension_table = LoadDimensionOperator(
task_id='Load_artist_dim_table',
redshift_id = "redshift",
schema = "artist",
query = "artist_table_insert",
dag=dag,
append_only = False
)
load_time_dimension_table = LoadDimensionOperator(
task_id='Load_time_dim_table',
redshift_id = "redshift",
schema = "time",
query = "time_table_insert",
dag=dag,
append_only = False
)
# Operator for quality checks
run_quality_checks = DataQualityOperator(
task_id='Run_data_quality_checks',
redshift_id = "redshift",
tables = ["songplay", "users", "song", "artist", "time"],
dag=dag
)
# Ending operator
end_operator = DummyOperator(task_id='Stop_execution', dag=dag)
# Defining dependencies
start_operator >> stage_events_to_redshift >> load_songplays_table
start_operator >> stage_songs_to_redshift >> load_songplays_table
load_songplays_table >> load_song_dimension_table >> run_quality_checks
load_songplays_table >> load_user_dimension_table >> run_quality_checks
load_songplays_table >> load_artist_dimension_table >> run_quality_checks
load_songplays_table >> load_time_dimension_table >> run_quality_checks
run_quality_checks >> end_operator
|
flexible
|
{
"blob_id": "7994d9605c8654053c9a85f8d37983da04f8003a",
"index": 2674,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nstart_operator >> stage_events_to_redshift >> load_songplays_table\nstart_operator >> stage_songs_to_redshift >> load_songplays_table\nload_songplays_table >> load_song_dimension_table >> run_quality_checks\nload_songplays_table >> load_user_dimension_table >> run_quality_checks\nload_songplays_table >> load_artist_dimension_table >> run_quality_checks\nload_songplays_table >> load_time_dimension_table >> run_quality_checks\nrun_quality_checks >> end_operator\n",
"step-3": "<mask token>\ndefault_args = {'owner': 'shreyak', 'start_date': datetime(2020, 12, 1),\n 'end_date': datetime(2020, 12, 1), 'depends_on_past': False, 'retries':\n 3, 'retry_delay': timedelta(minutes=5), 'catchup': False,\n 'email_on_retry': False}\ndag = DAG('udac_sparkify_dag', default_args=default_args, description=\n 'Load and transform data in Redshift with Airflow', schedule_interval=\n '0 * * * *')\nstart_operator = DummyOperator(task_id='Begin_execution', dag=dag)\nstage_events_to_redshift = StageToRedshiftOperator(redshift_id='redshift',\n aws_id='aws_credentials', schema='staging_events', s3_path=\n 'udacity-dend', s3_key='log_data/', query_end=\n \"format as json 's3://udacity-dend/log_json_path.json'\", task_id=\n 'Stage_events', dag=dag)\nstage_songs_to_redshift = StageToRedshiftOperator(task_id='Stage_songs',\n redshift_id='redshift', aws_id='aws_credentials', schema=\n 'staging_songs', s3_path='udacity-dend', s3_key='song_data', query_end=\n \"json 'auto' compupdate off region 'us-west-2'\", dag=dag)\nload_songplays_table = LoadFactOperator(task_id='Load_songplays_fact_table',\n redshift_id='redshift', schema='songplays', query=\n 'songplay_table_insert', dag=dag, append_only=False)\nload_user_dimension_table = LoadDimensionOperator(task_id=\n 'Load_user_dim_table', redshift_id='redshift', schema='users', query=\n 'user_table_insert', dag=dag, append_only=False)\nload_song_dimension_table = LoadDimensionOperator(task_id=\n 'Load_song_dim_table', redshift_id='redshift', schema='song', query=\n 'song_table_insert', dag=dag, append_only=False)\nload_artist_dimension_table = LoadDimensionOperator(task_id=\n 'Load_artist_dim_table', redshift_id='redshift', schema='artist', query\n ='artist_table_insert', dag=dag, append_only=False)\nload_time_dimension_table = LoadDimensionOperator(task_id=\n 'Load_time_dim_table', redshift_id='redshift', schema='time', query=\n 'time_table_insert', dag=dag, append_only=False)\nrun_quality_checks = DataQualityOperator(task_id='Run_data_quality_checks',\n redshift_id='redshift', tables=['songplay', 'users', 'song', 'artist',\n 'time'], dag=dag)\nend_operator = DummyOperator(task_id='Stop_execution', dag=dag)\nstart_operator >> stage_events_to_redshift >> load_songplays_table\nstart_operator >> stage_songs_to_redshift >> load_songplays_table\nload_songplays_table >> load_song_dimension_table >> run_quality_checks\nload_songplays_table >> load_user_dimension_table >> run_quality_checks\nload_songplays_table >> load_artist_dimension_table >> run_quality_checks\nload_songplays_table >> load_time_dimension_table >> run_quality_checks\nrun_quality_checks >> end_operator\n",
"step-4": "from datetime import datetime, timedelta\nimport os\nfrom airflow import DAG\nfrom airflow.operators.dummy_operator import DummyOperator\nfrom airflow.operators import StageToRedshiftOperator, LoadFactOperator, LoadDimensionOperator, DataQualityOperator\nfrom helpers import SqlQueries\ndefault_args = {'owner': 'shreyak', 'start_date': datetime(2020, 12, 1),\n 'end_date': datetime(2020, 12, 1), 'depends_on_past': False, 'retries':\n 3, 'retry_delay': timedelta(minutes=5), 'catchup': False,\n 'email_on_retry': False}\ndag = DAG('udac_sparkify_dag', default_args=default_args, description=\n 'Load and transform data in Redshift with Airflow', schedule_interval=\n '0 * * * *')\nstart_operator = DummyOperator(task_id='Begin_execution', dag=dag)\nstage_events_to_redshift = StageToRedshiftOperator(redshift_id='redshift',\n aws_id='aws_credentials', schema='staging_events', s3_path=\n 'udacity-dend', s3_key='log_data/', query_end=\n \"format as json 's3://udacity-dend/log_json_path.json'\", task_id=\n 'Stage_events', dag=dag)\nstage_songs_to_redshift = StageToRedshiftOperator(task_id='Stage_songs',\n redshift_id='redshift', aws_id='aws_credentials', schema=\n 'staging_songs', s3_path='udacity-dend', s3_key='song_data', query_end=\n \"json 'auto' compupdate off region 'us-west-2'\", dag=dag)\nload_songplays_table = LoadFactOperator(task_id='Load_songplays_fact_table',\n redshift_id='redshift', schema='songplays', query=\n 'songplay_table_insert', dag=dag, append_only=False)\nload_user_dimension_table = LoadDimensionOperator(task_id=\n 'Load_user_dim_table', redshift_id='redshift', schema='users', query=\n 'user_table_insert', dag=dag, append_only=False)\nload_song_dimension_table = LoadDimensionOperator(task_id=\n 'Load_song_dim_table', redshift_id='redshift', schema='song', query=\n 'song_table_insert', dag=dag, append_only=False)\nload_artist_dimension_table = LoadDimensionOperator(task_id=\n 'Load_artist_dim_table', redshift_id='redshift', schema='artist', query\n ='artist_table_insert', dag=dag, append_only=False)\nload_time_dimension_table = LoadDimensionOperator(task_id=\n 'Load_time_dim_table', redshift_id='redshift', schema='time', query=\n 'time_table_insert', dag=dag, append_only=False)\nrun_quality_checks = DataQualityOperator(task_id='Run_data_quality_checks',\n redshift_id='redshift', tables=['songplay', 'users', 'song', 'artist',\n 'time'], dag=dag)\nend_operator = DummyOperator(task_id='Stop_execution', dag=dag)\nstart_operator >> stage_events_to_redshift >> load_songplays_table\nstart_operator >> stage_songs_to_redshift >> load_songplays_table\nload_songplays_table >> load_song_dimension_table >> run_quality_checks\nload_songplays_table >> load_user_dimension_table >> run_quality_checks\nload_songplays_table >> load_artist_dimension_table >> run_quality_checks\nload_songplays_table >> load_time_dimension_table >> run_quality_checks\nrun_quality_checks >> end_operator\n",
"step-5": "from datetime import datetime, timedelta\nimport os\nfrom airflow import DAG\nfrom airflow.operators.dummy_operator import DummyOperator\nfrom airflow.operators import (StageToRedshiftOperator, LoadFactOperator,\n LoadDimensionOperator, DataQualityOperator)\nfrom helpers import SqlQueries\n\n# AWS_KEY= os.environ.get('AWS_KEY')\n# AWS_SECRET = os.environ.get('AWS_SECRET')\n\n# Default arguments\ndefault_args = {\n 'owner': 'shreyak',\n 'start_date': datetime(2020, 12, 1),\n 'end_date': datetime(2020, 12, 1),\n 'depends_on_past': False,\n 'retries': 3,\n 'retry_delay': timedelta(minutes=5),\n 'catchup': False,\n 'email_on_retry': False,\n \n}\n\n# Defining DAG\ndag = DAG('udac_sparkify_dag',\n default_args=default_args,\n description='Load and transform data in Redshift with Airflow',\n schedule_interval='0 * * * *'\n )\n\n# Starting operator\n\nstart_operator = DummyOperator(task_id='Begin_execution', dag=dag)\n\n# Operators to create Staging tables on Redshift\n\nstage_events_to_redshift = StageToRedshiftOperator(\n redshift_id = \"redshift\",\n aws_id = \"aws_credentials\",\n schema = \"staging_events\",\n s3_path=\"udacity-dend\",\n s3_key = \"log_data/\",\n query_end = \"format as json 's3://udacity-dend/log_json_path.json'\",\n task_id='Stage_events',\n dag=dag,\n)\n\nstage_songs_to_redshift = StageToRedshiftOperator(\n task_id='Stage_songs',\n redshift_id = \"redshift\",\n aws_id = \"aws_credentials\",\n schema = \"staging_songs\",\n s3_path=\"udacity-dend\",\n s3_key = \"song_data\",\n query_end = \"json 'auto' compupdate off region 'us-west-2'\",\n dag=dag\n)\n\n# Operator to load fact table\n\nload_songplays_table = LoadFactOperator(\n task_id='Load_songplays_fact_table',\n redshift_id = \"redshift\",\n schema = \"songplays\",\n query = \"songplay_table_insert\",\n dag=dag,\n append_only = False\n)\n\n# Operators to load dimension tables\n\nload_user_dimension_table = LoadDimensionOperator(\n task_id='Load_user_dim_table', \n redshift_id = \"redshift\",\n schema = \"users\",\n query = \"user_table_insert\",\n dag=dag,\n append_only = False\n)\n\nload_song_dimension_table = LoadDimensionOperator(\n task_id='Load_song_dim_table',\n redshift_id = \"redshift\",\n schema = \"song\",\n query = \"song_table_insert\",\n dag=dag,\n append_only = False\n)\n\nload_artist_dimension_table = LoadDimensionOperator(\n task_id='Load_artist_dim_table',\n redshift_id = \"redshift\",\n schema = \"artist\",\n query = \"artist_table_insert\",\n dag=dag,\n append_only = False\n)\n\nload_time_dimension_table = LoadDimensionOperator(\n task_id='Load_time_dim_table',\n redshift_id = \"redshift\",\n schema = \"time\",\n query = \"time_table_insert\",\n dag=dag,\n append_only = False\n)\n\n# Operator for quality checks \n\nrun_quality_checks = DataQualityOperator(\n task_id='Run_data_quality_checks',\n redshift_id = \"redshift\",\n tables = [\"songplay\", \"users\", \"song\", \"artist\", \"time\"],\n dag=dag\n)\n\n# Ending operator\n\nend_operator = DummyOperator(task_id='Stop_execution', dag=dag)\n\n# Defining dependencies\n\nstart_operator >> stage_events_to_redshift >> load_songplays_table\nstart_operator >> stage_songs_to_redshift >> load_songplays_table\n\nload_songplays_table >> load_song_dimension_table >> run_quality_checks\nload_songplays_table >> load_user_dimension_table >> run_quality_checks\nload_songplays_table >> load_artist_dimension_table >> run_quality_checks\nload_songplays_table >> load_time_dimension_table >> run_quality_checks\n\nrun_quality_checks >> end_operator",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
class Statistics(models.Model):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
class StatisticsCategory(models.Model):
"""
StatisticsCategory model class. This represents a single tuple in the
statitics_generator_statisticscategory table in the database.
"""
statistics_id = models.IntegerField()
category = models.CharField(max_length=30)
survey_count = models.IntegerField()
buyer_count = models.IntegerField()
seller_count = models.IntegerField()
amount = models.IntegerField()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Statistics(models.Model):
<|reserved_special_token_0|>
number_surveys = models.IntegerField()
number_listings = models.IntegerField()
number_buyer_surveys = models.IntegerField()
number_seller_surveys = models.IntegerField()
number_buyer_listings = models.IntegerField()
number_seller_listings = models.IntegerField()
average_transaction_amount = models.FloatField()
buyer_transaction_amount = models.FloatField()
seller_transaction_amount = models.FloatField()
successful_transaction_amount = models.FloatField()
average_transaction_time = models.IntegerField()
buyer_transaction_success_rate = models.FloatField()
seller_transaction_success_rate = models.FloatField()
total_transaction_success_rate = models.FloatField()
class StatisticsCategory(models.Model):
"""
StatisticsCategory model class. This represents a single tuple in the
statitics_generator_statisticscategory table in the database.
"""
statistics_id = models.IntegerField()
category = models.CharField(max_length=30)
survey_count = models.IntegerField()
buyer_count = models.IntegerField()
seller_count = models.IntegerField()
amount = models.IntegerField()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Statistics(models.Model):
"""
Statistics model class. This represents a single tuple in the
statitics_generator_statistics table in the database.
"""
number_surveys = models.IntegerField()
number_listings = models.IntegerField()
number_buyer_surveys = models.IntegerField()
number_seller_surveys = models.IntegerField()
number_buyer_listings = models.IntegerField()
number_seller_listings = models.IntegerField()
average_transaction_amount = models.FloatField()
buyer_transaction_amount = models.FloatField()
seller_transaction_amount = models.FloatField()
successful_transaction_amount = models.FloatField()
average_transaction_time = models.IntegerField()
buyer_transaction_success_rate = models.FloatField()
seller_transaction_success_rate = models.FloatField()
total_transaction_success_rate = models.FloatField()
class StatisticsCategory(models.Model):
"""
StatisticsCategory model class. This represents a single tuple in the
statitics_generator_statisticscategory table in the database.
"""
statistics_id = models.IntegerField()
category = models.CharField(max_length=30)
survey_count = models.IntegerField()
buyer_count = models.IntegerField()
seller_count = models.IntegerField()
amount = models.IntegerField()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
from django.db import models
class Statistics(models.Model):
"""
Statistics model class. This represents a single tuple in the
statitics_generator_statistics table in the database.
"""
number_surveys = models.IntegerField()
number_listings = models.IntegerField()
number_buyer_surveys = models.IntegerField()
number_seller_surveys = models.IntegerField()
number_buyer_listings = models.IntegerField()
number_seller_listings = models.IntegerField()
average_transaction_amount = models.FloatField()
buyer_transaction_amount = models.FloatField()
seller_transaction_amount = models.FloatField()
successful_transaction_amount = models.FloatField()
average_transaction_time = models.IntegerField()
buyer_transaction_success_rate = models.FloatField()
seller_transaction_success_rate = models.FloatField()
total_transaction_success_rate = models.FloatField()
class StatisticsCategory(models.Model):
"""
StatisticsCategory model class. This represents a single tuple in the
statitics_generator_statisticscategory table in the database.
"""
statistics_id = models.IntegerField()
category = models.CharField(max_length=30)
survey_count = models.IntegerField()
buyer_count = models.IntegerField()
seller_count = models.IntegerField()
amount = models.IntegerField()
<|reserved_special_token_1|>
'''
Statistics models module. This module contains the database models for the
Statistics class and the StatisticsCategory class.
@author Hubert Ngu
@author Jason Hou
'''
from django.db import models
class Statistics(models.Model):
'''
Statistics model class. This represents a single tuple in the
statitics_generator_statistics table in the database.
'''
number_surveys = models.IntegerField()
number_listings = models.IntegerField()
number_buyer_surveys = models.IntegerField()
number_seller_surveys = models.IntegerField()
number_buyer_listings = models.IntegerField()
number_seller_listings = models.IntegerField()
average_transaction_amount = models.FloatField()
buyer_transaction_amount = models.FloatField()
seller_transaction_amount = models.FloatField()
successful_transaction_amount = models.FloatField()
average_transaction_time = models.IntegerField()
buyer_transaction_success_rate = models.FloatField()
seller_transaction_success_rate = models.FloatField()
total_transaction_success_rate = models.FloatField()
class StatisticsCategory(models.Model):
'''
StatisticsCategory model class. This represents a single tuple in the
statitics_generator_statisticscategory table in the database.
'''
statistics_id = models.IntegerField()
category = models.CharField(max_length=30)
survey_count = models.IntegerField()
buyer_count = models.IntegerField()
seller_count = models.IntegerField()
amount = models.IntegerField()
|
flexible
|
{
"blob_id": "728f9402b3ce4b297be82b3ba1a17c4180ac7c0d",
"index": 8839,
"step-1": "<mask token>\n\n\nclass Statistics(models.Model):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n\nclass StatisticsCategory(models.Model):\n \"\"\"\n\tStatisticsCategory model class. This represents a single tuple in the\n\tstatitics_generator_statisticscategory table in the database.\n\t\"\"\"\n statistics_id = models.IntegerField()\n category = models.CharField(max_length=30)\n survey_count = models.IntegerField()\n buyer_count = models.IntegerField()\n seller_count = models.IntegerField()\n amount = models.IntegerField()\n",
"step-2": "<mask token>\n\n\nclass Statistics(models.Model):\n <mask token>\n number_surveys = models.IntegerField()\n number_listings = models.IntegerField()\n number_buyer_surveys = models.IntegerField()\n number_seller_surveys = models.IntegerField()\n number_buyer_listings = models.IntegerField()\n number_seller_listings = models.IntegerField()\n average_transaction_amount = models.FloatField()\n buyer_transaction_amount = models.FloatField()\n seller_transaction_amount = models.FloatField()\n successful_transaction_amount = models.FloatField()\n average_transaction_time = models.IntegerField()\n buyer_transaction_success_rate = models.FloatField()\n seller_transaction_success_rate = models.FloatField()\n total_transaction_success_rate = models.FloatField()\n\n\nclass StatisticsCategory(models.Model):\n \"\"\"\n\tStatisticsCategory model class. This represents a single tuple in the\n\tstatitics_generator_statisticscategory table in the database.\n\t\"\"\"\n statistics_id = models.IntegerField()\n category = models.CharField(max_length=30)\n survey_count = models.IntegerField()\n buyer_count = models.IntegerField()\n seller_count = models.IntegerField()\n amount = models.IntegerField()\n",
"step-3": "<mask token>\n\n\nclass Statistics(models.Model):\n \"\"\"\n\tStatistics model class. This represents a single tuple in the\n\tstatitics_generator_statistics table in the database.\n\t\"\"\"\n number_surveys = models.IntegerField()\n number_listings = models.IntegerField()\n number_buyer_surveys = models.IntegerField()\n number_seller_surveys = models.IntegerField()\n number_buyer_listings = models.IntegerField()\n number_seller_listings = models.IntegerField()\n average_transaction_amount = models.FloatField()\n buyer_transaction_amount = models.FloatField()\n seller_transaction_amount = models.FloatField()\n successful_transaction_amount = models.FloatField()\n average_transaction_time = models.IntegerField()\n buyer_transaction_success_rate = models.FloatField()\n seller_transaction_success_rate = models.FloatField()\n total_transaction_success_rate = models.FloatField()\n\n\nclass StatisticsCategory(models.Model):\n \"\"\"\n\tStatisticsCategory model class. This represents a single tuple in the\n\tstatitics_generator_statisticscategory table in the database.\n\t\"\"\"\n statistics_id = models.IntegerField()\n category = models.CharField(max_length=30)\n survey_count = models.IntegerField()\n buyer_count = models.IntegerField()\n seller_count = models.IntegerField()\n amount = models.IntegerField()\n",
"step-4": "<mask token>\nfrom django.db import models\n\n\nclass Statistics(models.Model):\n \"\"\"\n\tStatistics model class. This represents a single tuple in the\n\tstatitics_generator_statistics table in the database.\n\t\"\"\"\n number_surveys = models.IntegerField()\n number_listings = models.IntegerField()\n number_buyer_surveys = models.IntegerField()\n number_seller_surveys = models.IntegerField()\n number_buyer_listings = models.IntegerField()\n number_seller_listings = models.IntegerField()\n average_transaction_amount = models.FloatField()\n buyer_transaction_amount = models.FloatField()\n seller_transaction_amount = models.FloatField()\n successful_transaction_amount = models.FloatField()\n average_transaction_time = models.IntegerField()\n buyer_transaction_success_rate = models.FloatField()\n seller_transaction_success_rate = models.FloatField()\n total_transaction_success_rate = models.FloatField()\n\n\nclass StatisticsCategory(models.Model):\n \"\"\"\n\tStatisticsCategory model class. This represents a single tuple in the\n\tstatitics_generator_statisticscategory table in the database.\n\t\"\"\"\n statistics_id = models.IntegerField()\n category = models.CharField(max_length=30)\n survey_count = models.IntegerField()\n buyer_count = models.IntegerField()\n seller_count = models.IntegerField()\n amount = models.IntegerField()\n",
"step-5": "'''\r\n Statistics models module. This module contains the database models for the\r\n Statistics class and the StatisticsCategory class.\r\n\r\n @author Hubert Ngu\r\n @author Jason Hou\r\n'''\r\n\r\nfrom django.db import models\r\n\r\nclass Statistics(models.Model):\r\n\t'''\r\n\tStatistics model class. This represents a single tuple in the\r\n\tstatitics_generator_statistics table in the database.\r\n\t'''\r\n\tnumber_surveys = models.IntegerField()\r\n\tnumber_listings = models.IntegerField()\r\n\tnumber_buyer_surveys = models.IntegerField()\r\n\tnumber_seller_surveys = models.IntegerField()\r\n\tnumber_buyer_listings = models.IntegerField()\r\n\tnumber_seller_listings = models.IntegerField()\r\n\taverage_transaction_amount = models.FloatField()\r\n\tbuyer_transaction_amount = models.FloatField()\r\n\tseller_transaction_amount = models.FloatField()\r\n\tsuccessful_transaction_amount = models.FloatField()\r\n\taverage_transaction_time = models.IntegerField()\r\n\tbuyer_transaction_success_rate = models.FloatField()\r\n\tseller_transaction_success_rate = models.FloatField()\r\n\ttotal_transaction_success_rate = models.FloatField()\r\n\r\nclass StatisticsCategory(models.Model):\r\n\t'''\r\n\tStatisticsCategory model class. This represents a single tuple in the\r\n\tstatitics_generator_statisticscategory table in the database.\r\n\t'''\r\n\tstatistics_id = models.IntegerField()\r\n\tcategory = models.CharField(max_length=30)\r\n\tsurvey_count = models.IntegerField()\r\n\tbuyer_count = models.IntegerField()\r\n\tseller_count = models.IntegerField()\r\n\tamount = models.IntegerField()",
"step-ids": [
4,
5,
6,
7,
8
]
}
|
[
4,
5,
6,
7,
8
] |
#!/usr/bin/env python
#coding=utf-8
from datetime import *
import unittest
def getSnapshot(historyData, id):
data = historyData.split('\n')
lines = len(data)
if lines < 2 :
return 'Input is too short!'
index = 0
curid = ''
idlist = dict()
recordtime = ''
animal_pos = dict()
for i in range(lines):
if len(data[i]) > 0:#id行
if index == 0: #可能有id为空的情况未处理
curid = data[i]
index += 1
if curid in idlist:#检查id是否冲突
return 'Conflict found at ' + curid
else:
idlist[curid] = 1#保存新id
elif index == 1:#日期行
if len(data[i]) == 0:#检查日期是否为空
return 'Conflict found at ' + curid
recordtime = data[i]
index += 1
try:#检查日期格式是否正确
t1 = datetime.strptime(recordtime, '%Y/%m/%d %H:%M:%S')
except:
return 'Invalid format.'
else:#数据行
animal = data[i].split(' ')
if len(animal) == 3:#如果是新动物,检查以前是否出现过
if animal[0] in animal_pos:
return 'Conflict found at ' + curid
else:
animal_pos[animal[0]] = [animal[1], animal[2]]
elif len(animal) == 5:#如果是旧动物,检查以前是否出现过
if animal[0] not in animal_pos:
return 'Conflict found at ' + curid
else:#如果确实是旧动物,校验位置信息
ox = animal_pos[animal[0]][0]
oy = animal_pos[animal[0]][1]
if (ox != animal[1]) or (oy != animal[2]):#校验失败
return 'Conflict found at ' + curid
else:#校验成功,更新位置信息
animal_pos[animal[0]][0] = str(int(animal_pos[animal[0]][0]) + int(animal[3]))
animal_pos[animal[0]][1] = str(int(animal_pos[animal[0]][1]) + int(animal[4]))
else:#一个快照数据读入完毕
index = 0
if(id == curid) and (index == 0 or i == lines-1):#查询的id就是当前快照点
res = ''
for k, v in animal_pos.iteritems():#拼接字符串
tmp = k + ' ' + v[0] + ' ' + v[1]
res += (tmp + '\n')
return res
|
normal
|
{
"blob_id": "ddbcc8e768f93a0b4f8776b19e752c57feb5bbf9",
"index": 6362,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef getSnapshot(historyData, id):\n data = historyData.split('\\n')\n lines = len(data)\n if lines < 2:\n return 'Input is too short!'\n index = 0\n curid = ''\n idlist = dict()\n recordtime = ''\n animal_pos = dict()\n for i in range(lines):\n if len(data[i]) > 0:\n if index == 0:\n curid = data[i]\n index += 1\n if curid in idlist:\n return 'Conflict found at ' + curid\n else:\n idlist[curid] = 1\n elif index == 1:\n if len(data[i]) == 0:\n return 'Conflict found at ' + curid\n recordtime = data[i]\n index += 1\n try:\n t1 = datetime.strptime(recordtime, '%Y/%m/%d %H:%M:%S')\n except:\n return 'Invalid format.'\n else:\n animal = data[i].split(' ')\n if len(animal) == 3:\n if animal[0] in animal_pos:\n return 'Conflict found at ' + curid\n else:\n animal_pos[animal[0]] = [animal[1], animal[2]]\n elif len(animal) == 5:\n if animal[0] not in animal_pos:\n return 'Conflict found at ' + curid\n else:\n ox = animal_pos[animal[0]][0]\n oy = animal_pos[animal[0]][1]\n if ox != animal[1] or oy != animal[2]:\n return 'Conflict found at ' + curid\n else:\n animal_pos[animal[0]][0] = str(int(animal_pos[\n animal[0]][0]) + int(animal[3]))\n animal_pos[animal[0]][1] = str(int(animal_pos[\n animal[0]][1]) + int(animal[4]))\n else:\n index = 0\n if id == curid and (index == 0 or i == lines - 1):\n res = ''\n for k, v in animal_pos.iteritems():\n tmp = k + ' ' + v[0] + ' ' + v[1]\n res += tmp + '\\n'\n return res\n",
"step-3": "from datetime import *\nimport unittest\n\n\ndef getSnapshot(historyData, id):\n data = historyData.split('\\n')\n lines = len(data)\n if lines < 2:\n return 'Input is too short!'\n index = 0\n curid = ''\n idlist = dict()\n recordtime = ''\n animal_pos = dict()\n for i in range(lines):\n if len(data[i]) > 0:\n if index == 0:\n curid = data[i]\n index += 1\n if curid in idlist:\n return 'Conflict found at ' + curid\n else:\n idlist[curid] = 1\n elif index == 1:\n if len(data[i]) == 0:\n return 'Conflict found at ' + curid\n recordtime = data[i]\n index += 1\n try:\n t1 = datetime.strptime(recordtime, '%Y/%m/%d %H:%M:%S')\n except:\n return 'Invalid format.'\n else:\n animal = data[i].split(' ')\n if len(animal) == 3:\n if animal[0] in animal_pos:\n return 'Conflict found at ' + curid\n else:\n animal_pos[animal[0]] = [animal[1], animal[2]]\n elif len(animal) == 5:\n if animal[0] not in animal_pos:\n return 'Conflict found at ' + curid\n else:\n ox = animal_pos[animal[0]][0]\n oy = animal_pos[animal[0]][1]\n if ox != animal[1] or oy != animal[2]:\n return 'Conflict found at ' + curid\n else:\n animal_pos[animal[0]][0] = str(int(animal_pos[\n animal[0]][0]) + int(animal[3]))\n animal_pos[animal[0]][1] = str(int(animal_pos[\n animal[0]][1]) + int(animal[4]))\n else:\n index = 0\n if id == curid and (index == 0 or i == lines - 1):\n res = ''\n for k, v in animal_pos.iteritems():\n tmp = k + ' ' + v[0] + ' ' + v[1]\n res += tmp + '\\n'\n return res\n",
"step-4": "#!/usr/bin/env python\n#coding=utf-8\nfrom datetime import *\nimport unittest\ndef getSnapshot(historyData, id):\n data = historyData.split('\\n')\n lines = len(data)\n if lines < 2 :\n return 'Input is too short!'\n index = 0\n curid = ''\n idlist = dict()\n recordtime = ''\n animal_pos = dict()\n for i in range(lines):\n if len(data[i]) > 0:#id行\n if index == 0: #可能有id为空的情况未处理\n curid = data[i]\n index += 1\n if curid in idlist:#检查id是否冲突\n return 'Conflict found at ' + curid\n else:\n idlist[curid] = 1#保存新id\n elif index == 1:#日期行\n if len(data[i]) == 0:#检查日期是否为空\n return 'Conflict found at ' + curid\n recordtime = data[i]\n index += 1\n try:#检查日期格式是否正确\n t1 = datetime.strptime(recordtime, '%Y/%m/%d %H:%M:%S')\n except:\n return 'Invalid format.'\n else:#数据行\n animal = data[i].split(' ')\n if len(animal) == 3:#如果是新动物,检查以前是否出现过\n if animal[0] in animal_pos:\n return 'Conflict found at ' + curid\n else:\n animal_pos[animal[0]] = [animal[1], animal[2]]\n elif len(animal) == 5:#如果是旧动物,检查以前是否出现过\n if animal[0] not in animal_pos:\n return 'Conflict found at ' + curid\n else:#如果确实是旧动物,校验位置信息\n ox = animal_pos[animal[0]][0]\n oy = animal_pos[animal[0]][1]\n if (ox != animal[1]) or (oy != animal[2]):#校验失败\n return 'Conflict found at ' + curid\n else:#校验成功,更新位置信息\n animal_pos[animal[0]][0] = str(int(animal_pos[animal[0]][0]) + int(animal[3]))\n animal_pos[animal[0]][1] = str(int(animal_pos[animal[0]][1]) + int(animal[4]))\n else:#一个快照数据读入完毕\n index = 0\n if(id == curid) and (index == 0 or i == lines-1):#查询的id就是当前快照点\n res = ''\n for k, v in animal_pos.iteritems():#拼接字符串\n tmp = k + ' ' + v[0] + ' ' + v[1]\n res += (tmp + '\\n')\n return res\n\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
def v_measure(cluster_labels, true_labels):
h_score = homogeneity_score(true_labels, cluster_labels)
c_score = completeness_score(true_labels, cluster_labels)
v_score = v_measure_score(true_labels, cluster_labels)
print('Homogeneity Score: %.6f' % h_score)
print('Completeness Score: %.6f' % c_score)
print('V Measure Score: %.6f' % v_score)
return h_score, c_score, v_score
def silhouette_analysis(X, cluster_labels, n_clusters, figname):
plt.xlim([-0.1, 1])
plt.ylim([0, len(X) + (n_clusters + 1) * 10])
silhouette_avg = silhouette_score(X, cluster_labels)
print('For n_clusters =', n_clusters,
'The average silhouette_score is :', silhouette_avg)
sample_silhouette_values = silhouette_samples(X, cluster_labels)
y_lower = 10
for i in range(n_clusters):
ith_cluster_silhouette_values = sample_silhouette_values[
cluster_labels == i]
ith_cluster_silhouette_values.sort()
size_cluster_i = ith_cluster_silhouette_values.shape[0]
y_upper = y_lower + size_cluster_i
color = cm.nipy_spectral(float(i) / n_clusters)
plt.fill_betweenx(np.arange(y_lower, y_upper), 0,
ith_cluster_silhouette_values, facecolor=color, edgecolor=color,
alpha=0.7)
plt.text(-0.05, y_lower + 0.5 * size_cluster_i, str(i))
y_lower = y_upper + 10
plt.title('The silhouette plot for the various clusters.')
plt.xlabel('The silhouette coefficient values')
plt.ylabel('Cluster label')
plt.axvline(x=silhouette_avg, color='red', linestyle='--')
plt.yticks([])
plt.xticks([-0.1, 0, 0.2, 0.4, 0.6, 0.8, 1])
plt.savefig(figname, format='png')
plt.clf()
def visualize_cluster(X, cluster_labels, n_clusters, centers, figname):
if X.shape[1] < 2:
print('Invalid shape for X: ', X.shape)
return
colors = cm.nipy_spectral(cluster_labels.astype(float) / n_clusters)
plt.scatter(X[:, 0], X[:, 1], marker='.', s=30, lw=0, alpha=0.7, c=
colors, edgecolor='k')
if len(centers) == n_clusters:
plt.scatter(centers[:, 0], centers[:, 1], marker='o', c='white',
alpha=1, s=200, edgecolor='k')
for i, c in enumerate(centers):
plt.scatter(c[0], c[1], marker='$%d$' % i, alpha=1, s=50,
edgecolor='k')
plt.title('The visualization of the clustered data.')
plt.xlabel('Feature space for the 1st feature')
plt.ylabel('Feature space for the 2nd feature')
plt.savefig(figname, format='png')
plt.clf()
def plot_gallery(title, images, figname, n_col=3, n_row=2, shape=(28, 28),
cmap=plt.cm.gray):
plt.figure(figsize=(2.0 * n_col, 2.26 * n_row))
plt.suptitle(title, size=16)
for i, comp in enumerate(images):
plt.subplot(n_row, n_col, i + 1)
vmax = max(comp.max(), -comp.min())
plt.imshow(comp.reshape(shape), cmap=cmap, interpolation='nearest',
vmin=-vmax, vmax=vmax)
plt.xticks(())
plt.yticks(())
plt.savefig(figname, format='png')
plt.clf()
def create_path(*arg, filename=None):
path = os.getcwd()
for directory in arg:
path = os.path.join(path, directory)
if not os.path.exists(path):
print("%s doesn't exist, creating..." % path)
os.mkdir(path)
if filename:
path = os.path.join(path, filename)
return path
def load_data(data_path, split_prop=0.2, is_shuffle=False, is_split=True):
pos_X, neg_X = [], []
with open(data_path, 'r') as f:
for line in f:
instance = list(map(float, line.strip().split(',')))
if instance[-1] == 1.0:
pos_X.append(instance[:-1])
else:
neg_X.append(instance[:-1])
if not is_split:
X, y = np.array(pos_X + neg_X), np.array([1] * len(pos_X) + [0] *
len(neg_X))
if is_shuffle:
indices = list(range(X.shape[0]))
shuffle(indices)
X, y = X[indices], y[indices]
return X, y, [], []
pos_test_size, neg_test_size = int(split_prop * len(pos_X)), int(
split_prop * len(neg_X))
pos_train_size, neg_train_size = len(pos_X) - pos_test_size, len(neg_X
) - neg_test_size
X_test, y_test = pos_X[:pos_test_size] + neg_X[:neg_test_size], [1
] * pos_test_size + [0] * neg_test_size
X_train, y_train = pos_X[pos_test_size:] + neg_X[neg_test_size:], [1
] * pos_train_size + [0] * neg_train_size
assert len(X_train) == len(y_train) and len(X_test) == len(y_test
), 'Dimention of X and y must be the same.'
X_train, X_test, y_train, y_test = np.array(X_train), np.array(X_test
), np.array(y_train), np.array(y_test)
if is_shuffle:
train_indices = list(range(X_train.shape[0]))
shuffle(train_indices)
test_indices = list(range(X_test.shape[0]))
shuffle(test_indices)
X_train, X_test, y_train, y_test = X_train[train_indices], X_test[
test_indices], y_train[train_indices], y_test[test_indices]
return X_train, X_test, y_train, y_test
<|reserved_special_token_0|>
def plot_learning_curve(train_scores_mean, train_scores_std,
val_scores_mean, val_scores_std, train_sizes, ylim=None, title='test',
fig_path='fig', format='png'):
plt.figure()
plt.title(title)
if ylim is not None:
plt.ylim(*ylim)
plt.xlabel('Training examples')
plt.ylabel('Score')
plt.grid(True, linestyle='-.', color='0.3')
plt.fill_between(train_sizes, train_scores_mean - train_scores_std,
train_scores_mean + train_scores_std, alpha=0.1, color='r')
plt.fill_between(train_sizes, val_scores_mean - val_scores_std,
val_scores_mean + val_scores_std, alpha=0.1, color='g')
plt.plot(train_sizes, train_scores_mean, 'o-', color='r', label=
'Training score')
plt.plot(train_sizes, val_scores_mean, 'o-', color='g', label=
'Cross-validation score')
plt.legend(loc='best')
plt.savefig(fig_path + '/' + title + '.' + format, format=format)
plt.clf()
def plot_and_save(x, ys, labels, title, x_axis, y_axis, axis_range='auto',
ylim=None, fig_path='fig', format='png'):
if axis_range is None:
plt.axis([x[0], x[-1], 0, 1])
elif type(axis_range) == type(list()):
plt.axis(axis_range)
elif axis_range == 'auto':
pass
if ylim is not None:
plt.ylim(*ylim)
plt.xlabel(x_axis)
plt.ylabel(y_axis)
plt.title(title)
lines = []
for y in ys:
l, = plt.plot(x, y)
lines.append(l)
if len(labels) == len(ys):
plt.legend(lines, labels, loc='best')
plt.grid(True, linestyle='-.', color='0.3')
plt.savefig(fig_path + '.' + format, format=format)
plt.clf()
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def v_measure(cluster_labels, true_labels):
h_score = homogeneity_score(true_labels, cluster_labels)
c_score = completeness_score(true_labels, cluster_labels)
v_score = v_measure_score(true_labels, cluster_labels)
print('Homogeneity Score: %.6f' % h_score)
print('Completeness Score: %.6f' % c_score)
print('V Measure Score: %.6f' % v_score)
return h_score, c_score, v_score
def silhouette_analysis(X, cluster_labels, n_clusters, figname):
plt.xlim([-0.1, 1])
plt.ylim([0, len(X) + (n_clusters + 1) * 10])
silhouette_avg = silhouette_score(X, cluster_labels)
print('For n_clusters =', n_clusters,
'The average silhouette_score is :', silhouette_avg)
sample_silhouette_values = silhouette_samples(X, cluster_labels)
y_lower = 10
for i in range(n_clusters):
ith_cluster_silhouette_values = sample_silhouette_values[
cluster_labels == i]
ith_cluster_silhouette_values.sort()
size_cluster_i = ith_cluster_silhouette_values.shape[0]
y_upper = y_lower + size_cluster_i
color = cm.nipy_spectral(float(i) / n_clusters)
plt.fill_betweenx(np.arange(y_lower, y_upper), 0,
ith_cluster_silhouette_values, facecolor=color, edgecolor=color,
alpha=0.7)
plt.text(-0.05, y_lower + 0.5 * size_cluster_i, str(i))
y_lower = y_upper + 10
plt.title('The silhouette plot for the various clusters.')
plt.xlabel('The silhouette coefficient values')
plt.ylabel('Cluster label')
plt.axvline(x=silhouette_avg, color='red', linestyle='--')
plt.yticks([])
plt.xticks([-0.1, 0, 0.2, 0.4, 0.6, 0.8, 1])
plt.savefig(figname, format='png')
plt.clf()
def visualize_cluster(X, cluster_labels, n_clusters, centers, figname):
if X.shape[1] < 2:
print('Invalid shape for X: ', X.shape)
return
colors = cm.nipy_spectral(cluster_labels.astype(float) / n_clusters)
plt.scatter(X[:, 0], X[:, 1], marker='.', s=30, lw=0, alpha=0.7, c=
colors, edgecolor='k')
if len(centers) == n_clusters:
plt.scatter(centers[:, 0], centers[:, 1], marker='o', c='white',
alpha=1, s=200, edgecolor='k')
for i, c in enumerate(centers):
plt.scatter(c[0], c[1], marker='$%d$' % i, alpha=1, s=50,
edgecolor='k')
plt.title('The visualization of the clustered data.')
plt.xlabel('Feature space for the 1st feature')
plt.ylabel('Feature space for the 2nd feature')
plt.savefig(figname, format='png')
plt.clf()
def plot_gallery(title, images, figname, n_col=3, n_row=2, shape=(28, 28),
cmap=plt.cm.gray):
plt.figure(figsize=(2.0 * n_col, 2.26 * n_row))
plt.suptitle(title, size=16)
for i, comp in enumerate(images):
plt.subplot(n_row, n_col, i + 1)
vmax = max(comp.max(), -comp.min())
plt.imshow(comp.reshape(shape), cmap=cmap, interpolation='nearest',
vmin=-vmax, vmax=vmax)
plt.xticks(())
plt.yticks(())
plt.savefig(figname, format='png')
plt.clf()
def create_path(*arg, filename=None):
path = os.getcwd()
for directory in arg:
path = os.path.join(path, directory)
if not os.path.exists(path):
print("%s doesn't exist, creating..." % path)
os.mkdir(path)
if filename:
path = os.path.join(path, filename)
return path
def load_data(data_path, split_prop=0.2, is_shuffle=False, is_split=True):
pos_X, neg_X = [], []
with open(data_path, 'r') as f:
for line in f:
instance = list(map(float, line.strip().split(',')))
if instance[-1] == 1.0:
pos_X.append(instance[:-1])
else:
neg_X.append(instance[:-1])
if not is_split:
X, y = np.array(pos_X + neg_X), np.array([1] * len(pos_X) + [0] *
len(neg_X))
if is_shuffle:
indices = list(range(X.shape[0]))
shuffle(indices)
X, y = X[indices], y[indices]
return X, y, [], []
pos_test_size, neg_test_size = int(split_prop * len(pos_X)), int(
split_prop * len(neg_X))
pos_train_size, neg_train_size = len(pos_X) - pos_test_size, len(neg_X
) - neg_test_size
X_test, y_test = pos_X[:pos_test_size] + neg_X[:neg_test_size], [1
] * pos_test_size + [0] * neg_test_size
X_train, y_train = pos_X[pos_test_size:] + neg_X[neg_test_size:], [1
] * pos_train_size + [0] * neg_train_size
assert len(X_train) == len(y_train) and len(X_test) == len(y_test
), 'Dimention of X and y must be the same.'
X_train, X_test, y_train, y_test = np.array(X_train), np.array(X_test
), np.array(y_train), np.array(y_test)
if is_shuffle:
train_indices = list(range(X_train.shape[0]))
shuffle(train_indices)
test_indices = list(range(X_test.shape[0]))
shuffle(test_indices)
X_train, X_test, y_train, y_test = X_train[train_indices], X_test[
test_indices], y_train[train_indices], y_test[test_indices]
return X_train, X_test, y_train, y_test
def dump_data():
pass
<|reserved_special_token_0|>
def plot_learning_curve(train_scores_mean, train_scores_std,
val_scores_mean, val_scores_std, train_sizes, ylim=None, title='test',
fig_path='fig', format='png'):
plt.figure()
plt.title(title)
if ylim is not None:
plt.ylim(*ylim)
plt.xlabel('Training examples')
plt.ylabel('Score')
plt.grid(True, linestyle='-.', color='0.3')
plt.fill_between(train_sizes, train_scores_mean - train_scores_std,
train_scores_mean + train_scores_std, alpha=0.1, color='r')
plt.fill_between(train_sizes, val_scores_mean - val_scores_std,
val_scores_mean + val_scores_std, alpha=0.1, color='g')
plt.plot(train_sizes, train_scores_mean, 'o-', color='r', label=
'Training score')
plt.plot(train_sizes, val_scores_mean, 'o-', color='g', label=
'Cross-validation score')
plt.legend(loc='best')
plt.savefig(fig_path + '/' + title + '.' + format, format=format)
plt.clf()
def plot_and_save(x, ys, labels, title, x_axis, y_axis, axis_range='auto',
ylim=None, fig_path='fig', format='png'):
if axis_range is None:
plt.axis([x[0], x[-1], 0, 1])
elif type(axis_range) == type(list()):
plt.axis(axis_range)
elif axis_range == 'auto':
pass
if ylim is not None:
plt.ylim(*ylim)
plt.xlabel(x_axis)
plt.ylabel(y_axis)
plt.title(title)
lines = []
for y in ys:
l, = plt.plot(x, y)
lines.append(l)
if len(labels) == len(ys):
plt.legend(lines, labels, loc='best')
plt.grid(True, linestyle='-.', color='0.3')
plt.savefig(fig_path + '.' + format, format=format)
plt.clf()
def print_score(scores, scoring, train=False):
if type(scoring) != type([]):
if train:
print('Train: %0.2f (+/- %0.2f)' % (np.mean(scores[
'train_score']), np.std(scores['train_score']) * 2))
print('Cross validation: %0.2f (+/- %0.2f)' % (np.mean(scores[
'test_score']), np.std(scores['test_score']) * 2))
return
for s_method in scoring:
if train:
print('Train: %0.2f (+/- %0.2f)' % (np.mean(scores['train_' +
s_method]), np.std(scores['train_' + s_method]) * 2))
print('Cross validation: %0.2f (+/- %0.2f)' % (np.mean(scores[
'test_' + s_method]), np.std(scores['test_' + s_method]) * 2))
<|reserved_special_token_1|>
<|reserved_special_token_0|>
matplotlib.use('Agg')
<|reserved_special_token_0|>
warnings.simplefilter('ignore')
def v_measure(cluster_labels, true_labels):
h_score = homogeneity_score(true_labels, cluster_labels)
c_score = completeness_score(true_labels, cluster_labels)
v_score = v_measure_score(true_labels, cluster_labels)
print('Homogeneity Score: %.6f' % h_score)
print('Completeness Score: %.6f' % c_score)
print('V Measure Score: %.6f' % v_score)
return h_score, c_score, v_score
def silhouette_analysis(X, cluster_labels, n_clusters, figname):
plt.xlim([-0.1, 1])
plt.ylim([0, len(X) + (n_clusters + 1) * 10])
silhouette_avg = silhouette_score(X, cluster_labels)
print('For n_clusters =', n_clusters,
'The average silhouette_score is :', silhouette_avg)
sample_silhouette_values = silhouette_samples(X, cluster_labels)
y_lower = 10
for i in range(n_clusters):
ith_cluster_silhouette_values = sample_silhouette_values[
cluster_labels == i]
ith_cluster_silhouette_values.sort()
size_cluster_i = ith_cluster_silhouette_values.shape[0]
y_upper = y_lower + size_cluster_i
color = cm.nipy_spectral(float(i) / n_clusters)
plt.fill_betweenx(np.arange(y_lower, y_upper), 0,
ith_cluster_silhouette_values, facecolor=color, edgecolor=color,
alpha=0.7)
plt.text(-0.05, y_lower + 0.5 * size_cluster_i, str(i))
y_lower = y_upper + 10
plt.title('The silhouette plot for the various clusters.')
plt.xlabel('The silhouette coefficient values')
plt.ylabel('Cluster label')
plt.axvline(x=silhouette_avg, color='red', linestyle='--')
plt.yticks([])
plt.xticks([-0.1, 0, 0.2, 0.4, 0.6, 0.8, 1])
plt.savefig(figname, format='png')
plt.clf()
def visualize_cluster(X, cluster_labels, n_clusters, centers, figname):
if X.shape[1] < 2:
print('Invalid shape for X: ', X.shape)
return
colors = cm.nipy_spectral(cluster_labels.astype(float) / n_clusters)
plt.scatter(X[:, 0], X[:, 1], marker='.', s=30, lw=0, alpha=0.7, c=
colors, edgecolor='k')
if len(centers) == n_clusters:
plt.scatter(centers[:, 0], centers[:, 1], marker='o', c='white',
alpha=1, s=200, edgecolor='k')
for i, c in enumerate(centers):
plt.scatter(c[0], c[1], marker='$%d$' % i, alpha=1, s=50,
edgecolor='k')
plt.title('The visualization of the clustered data.')
plt.xlabel('Feature space for the 1st feature')
plt.ylabel('Feature space for the 2nd feature')
plt.savefig(figname, format='png')
plt.clf()
def plot_gallery(title, images, figname, n_col=3, n_row=2, shape=(28, 28),
cmap=plt.cm.gray):
plt.figure(figsize=(2.0 * n_col, 2.26 * n_row))
plt.suptitle(title, size=16)
for i, comp in enumerate(images):
plt.subplot(n_row, n_col, i + 1)
vmax = max(comp.max(), -comp.min())
plt.imshow(comp.reshape(shape), cmap=cmap, interpolation='nearest',
vmin=-vmax, vmax=vmax)
plt.xticks(())
plt.yticks(())
plt.savefig(figname, format='png')
plt.clf()
def create_path(*arg, filename=None):
path = os.getcwd()
for directory in arg:
path = os.path.join(path, directory)
if not os.path.exists(path):
print("%s doesn't exist, creating..." % path)
os.mkdir(path)
if filename:
path = os.path.join(path, filename)
return path
def load_data(data_path, split_prop=0.2, is_shuffle=False, is_split=True):
pos_X, neg_X = [], []
with open(data_path, 'r') as f:
for line in f:
instance = list(map(float, line.strip().split(',')))
if instance[-1] == 1.0:
pos_X.append(instance[:-1])
else:
neg_X.append(instance[:-1])
if not is_split:
X, y = np.array(pos_X + neg_X), np.array([1] * len(pos_X) + [0] *
len(neg_X))
if is_shuffle:
indices = list(range(X.shape[0]))
shuffle(indices)
X, y = X[indices], y[indices]
return X, y, [], []
pos_test_size, neg_test_size = int(split_prop * len(pos_X)), int(
split_prop * len(neg_X))
pos_train_size, neg_train_size = len(pos_X) - pos_test_size, len(neg_X
) - neg_test_size
X_test, y_test = pos_X[:pos_test_size] + neg_X[:neg_test_size], [1
] * pos_test_size + [0] * neg_test_size
X_train, y_train = pos_X[pos_test_size:] + neg_X[neg_test_size:], [1
] * pos_train_size + [0] * neg_train_size
assert len(X_train) == len(y_train) and len(X_test) == len(y_test
), 'Dimention of X and y must be the same.'
X_train, X_test, y_train, y_test = np.array(X_train), np.array(X_test
), np.array(y_train), np.array(y_test)
if is_shuffle:
train_indices = list(range(X_train.shape[0]))
shuffle(train_indices)
test_indices = list(range(X_test.shape[0]))
shuffle(test_indices)
X_train, X_test, y_train, y_test = X_train[train_indices], X_test[
test_indices], y_train[train_indices], y_test[test_indices]
return X_train, X_test, y_train, y_test
def dump_data():
pass
def analyze_data(data_path, threshold=50):
data = []
with open(data_path, 'r') as f:
for line in f:
instance = list(map(float, line.strip().split(',')))
data.append(instance)
count = [0] * len(data[0])
for instance in data:
for i in range(len(instance)):
if instance[i] != 0.0:
count[i] += 1
total = 0
for c in count:
if c >= threshold:
total += 1
return count, total
def plot_learning_curve(train_scores_mean, train_scores_std,
val_scores_mean, val_scores_std, train_sizes, ylim=None, title='test',
fig_path='fig', format='png'):
plt.figure()
plt.title(title)
if ylim is not None:
plt.ylim(*ylim)
plt.xlabel('Training examples')
plt.ylabel('Score')
plt.grid(True, linestyle='-.', color='0.3')
plt.fill_between(train_sizes, train_scores_mean - train_scores_std,
train_scores_mean + train_scores_std, alpha=0.1, color='r')
plt.fill_between(train_sizes, val_scores_mean - val_scores_std,
val_scores_mean + val_scores_std, alpha=0.1, color='g')
plt.plot(train_sizes, train_scores_mean, 'o-', color='r', label=
'Training score')
plt.plot(train_sizes, val_scores_mean, 'o-', color='g', label=
'Cross-validation score')
plt.legend(loc='best')
plt.savefig(fig_path + '/' + title + '.' + format, format=format)
plt.clf()
def plot_and_save(x, ys, labels, title, x_axis, y_axis, axis_range='auto',
ylim=None, fig_path='fig', format='png'):
if axis_range is None:
plt.axis([x[0], x[-1], 0, 1])
elif type(axis_range) == type(list()):
plt.axis(axis_range)
elif axis_range == 'auto':
pass
if ylim is not None:
plt.ylim(*ylim)
plt.xlabel(x_axis)
plt.ylabel(y_axis)
plt.title(title)
lines = []
for y in ys:
l, = plt.plot(x, y)
lines.append(l)
if len(labels) == len(ys):
plt.legend(lines, labels, loc='best')
plt.grid(True, linestyle='-.', color='0.3')
plt.savefig(fig_path + '.' + format, format=format)
plt.clf()
def print_score(scores, scoring, train=False):
if type(scoring) != type([]):
if train:
print('Train: %0.2f (+/- %0.2f)' % (np.mean(scores[
'train_score']), np.std(scores['train_score']) * 2))
print('Cross validation: %0.2f (+/- %0.2f)' % (np.mean(scores[
'test_score']), np.std(scores['test_score']) * 2))
return
for s_method in scoring:
if train:
print('Train: %0.2f (+/- %0.2f)' % (np.mean(scores['train_' +
s_method]), np.std(scores['train_' + s_method]) * 2))
print('Cross validation: %0.2f (+/- %0.2f)' % (np.mean(scores[
'test_' + s_method]), np.std(scores['test_' + s_method]) * 2))
<|reserved_special_token_1|>
from sklearn.model_selection import train_test_split
from sklearn.metrics import silhouette_samples, silhouette_score
from sklearn.metrics.cluster import homogeneity_score, completeness_score, v_measure_score
from sklearn import datasets
from random import shuffle
import os
import matplotlib
matplotlib.use('Agg')
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.cm as cm
import warnings
warnings.simplefilter('ignore')
def v_measure(cluster_labels, true_labels):
h_score = homogeneity_score(true_labels, cluster_labels)
c_score = completeness_score(true_labels, cluster_labels)
v_score = v_measure_score(true_labels, cluster_labels)
print('Homogeneity Score: %.6f' % h_score)
print('Completeness Score: %.6f' % c_score)
print('V Measure Score: %.6f' % v_score)
return h_score, c_score, v_score
def silhouette_analysis(X, cluster_labels, n_clusters, figname):
plt.xlim([-0.1, 1])
plt.ylim([0, len(X) + (n_clusters + 1) * 10])
silhouette_avg = silhouette_score(X, cluster_labels)
print('For n_clusters =', n_clusters,
'The average silhouette_score is :', silhouette_avg)
sample_silhouette_values = silhouette_samples(X, cluster_labels)
y_lower = 10
for i in range(n_clusters):
ith_cluster_silhouette_values = sample_silhouette_values[
cluster_labels == i]
ith_cluster_silhouette_values.sort()
size_cluster_i = ith_cluster_silhouette_values.shape[0]
y_upper = y_lower + size_cluster_i
color = cm.nipy_spectral(float(i) / n_clusters)
plt.fill_betweenx(np.arange(y_lower, y_upper), 0,
ith_cluster_silhouette_values, facecolor=color, edgecolor=color,
alpha=0.7)
plt.text(-0.05, y_lower + 0.5 * size_cluster_i, str(i))
y_lower = y_upper + 10
plt.title('The silhouette plot for the various clusters.')
plt.xlabel('The silhouette coefficient values')
plt.ylabel('Cluster label')
plt.axvline(x=silhouette_avg, color='red', linestyle='--')
plt.yticks([])
plt.xticks([-0.1, 0, 0.2, 0.4, 0.6, 0.8, 1])
plt.savefig(figname, format='png')
plt.clf()
def visualize_cluster(X, cluster_labels, n_clusters, centers, figname):
if X.shape[1] < 2:
print('Invalid shape for X: ', X.shape)
return
colors = cm.nipy_spectral(cluster_labels.astype(float) / n_clusters)
plt.scatter(X[:, 0], X[:, 1], marker='.', s=30, lw=0, alpha=0.7, c=
colors, edgecolor='k')
if len(centers) == n_clusters:
plt.scatter(centers[:, 0], centers[:, 1], marker='o', c='white',
alpha=1, s=200, edgecolor='k')
for i, c in enumerate(centers):
plt.scatter(c[0], c[1], marker='$%d$' % i, alpha=1, s=50,
edgecolor='k')
plt.title('The visualization of the clustered data.')
plt.xlabel('Feature space for the 1st feature')
plt.ylabel('Feature space for the 2nd feature')
plt.savefig(figname, format='png')
plt.clf()
def plot_gallery(title, images, figname, n_col=3, n_row=2, shape=(28, 28),
cmap=plt.cm.gray):
plt.figure(figsize=(2.0 * n_col, 2.26 * n_row))
plt.suptitle(title, size=16)
for i, comp in enumerate(images):
plt.subplot(n_row, n_col, i + 1)
vmax = max(comp.max(), -comp.min())
plt.imshow(comp.reshape(shape), cmap=cmap, interpolation='nearest',
vmin=-vmax, vmax=vmax)
plt.xticks(())
plt.yticks(())
plt.savefig(figname, format='png')
plt.clf()
def create_path(*arg, filename=None):
path = os.getcwd()
for directory in arg:
path = os.path.join(path, directory)
if not os.path.exists(path):
print("%s doesn't exist, creating..." % path)
os.mkdir(path)
if filename:
path = os.path.join(path, filename)
return path
def load_data(data_path, split_prop=0.2, is_shuffle=False, is_split=True):
pos_X, neg_X = [], []
with open(data_path, 'r') as f:
for line in f:
instance = list(map(float, line.strip().split(',')))
if instance[-1] == 1.0:
pos_X.append(instance[:-1])
else:
neg_X.append(instance[:-1])
if not is_split:
X, y = np.array(pos_X + neg_X), np.array([1] * len(pos_X) + [0] *
len(neg_X))
if is_shuffle:
indices = list(range(X.shape[0]))
shuffle(indices)
X, y = X[indices], y[indices]
return X, y, [], []
pos_test_size, neg_test_size = int(split_prop * len(pos_X)), int(
split_prop * len(neg_X))
pos_train_size, neg_train_size = len(pos_X) - pos_test_size, len(neg_X
) - neg_test_size
X_test, y_test = pos_X[:pos_test_size] + neg_X[:neg_test_size], [1
] * pos_test_size + [0] * neg_test_size
X_train, y_train = pos_X[pos_test_size:] + neg_X[neg_test_size:], [1
] * pos_train_size + [0] * neg_train_size
assert len(X_train) == len(y_train) and len(X_test) == len(y_test
), 'Dimention of X and y must be the same.'
X_train, X_test, y_train, y_test = np.array(X_train), np.array(X_test
), np.array(y_train), np.array(y_test)
if is_shuffle:
train_indices = list(range(X_train.shape[0]))
shuffle(train_indices)
test_indices = list(range(X_test.shape[0]))
shuffle(test_indices)
X_train, X_test, y_train, y_test = X_train[train_indices], X_test[
test_indices], y_train[train_indices], y_test[test_indices]
return X_train, X_test, y_train, y_test
def dump_data():
pass
def analyze_data(data_path, threshold=50):
data = []
with open(data_path, 'r') as f:
for line in f:
instance = list(map(float, line.strip().split(',')))
data.append(instance)
count = [0] * len(data[0])
for instance in data:
for i in range(len(instance)):
if instance[i] != 0.0:
count[i] += 1
total = 0
for c in count:
if c >= threshold:
total += 1
return count, total
def plot_learning_curve(train_scores_mean, train_scores_std,
val_scores_mean, val_scores_std, train_sizes, ylim=None, title='test',
fig_path='fig', format='png'):
plt.figure()
plt.title(title)
if ylim is not None:
plt.ylim(*ylim)
plt.xlabel('Training examples')
plt.ylabel('Score')
plt.grid(True, linestyle='-.', color='0.3')
plt.fill_between(train_sizes, train_scores_mean - train_scores_std,
train_scores_mean + train_scores_std, alpha=0.1, color='r')
plt.fill_between(train_sizes, val_scores_mean - val_scores_std,
val_scores_mean + val_scores_std, alpha=0.1, color='g')
plt.plot(train_sizes, train_scores_mean, 'o-', color='r', label=
'Training score')
plt.plot(train_sizes, val_scores_mean, 'o-', color='g', label=
'Cross-validation score')
plt.legend(loc='best')
plt.savefig(fig_path + '/' + title + '.' + format, format=format)
plt.clf()
def plot_and_save(x, ys, labels, title, x_axis, y_axis, axis_range='auto',
ylim=None, fig_path='fig', format='png'):
if axis_range is None:
plt.axis([x[0], x[-1], 0, 1])
elif type(axis_range) == type(list()):
plt.axis(axis_range)
elif axis_range == 'auto':
pass
if ylim is not None:
plt.ylim(*ylim)
plt.xlabel(x_axis)
plt.ylabel(y_axis)
plt.title(title)
lines = []
for y in ys:
l, = plt.plot(x, y)
lines.append(l)
if len(labels) == len(ys):
plt.legend(lines, labels, loc='best')
plt.grid(True, linestyle='-.', color='0.3')
plt.savefig(fig_path + '.' + format, format=format)
plt.clf()
def print_score(scores, scoring, train=False):
if type(scoring) != type([]):
if train:
print('Train: %0.2f (+/- %0.2f)' % (np.mean(scores[
'train_score']), np.std(scores['train_score']) * 2))
print('Cross validation: %0.2f (+/- %0.2f)' % (np.mean(scores[
'test_score']), np.std(scores['test_score']) * 2))
return
for s_method in scoring:
if train:
print('Train: %0.2f (+/- %0.2f)' % (np.mean(scores['train_' +
s_method]), np.std(scores['train_' + s_method]) * 2))
print('Cross validation: %0.2f (+/- %0.2f)' % (np.mean(scores[
'test_' + s_method]), np.std(scores['test_' + s_method]) * 2))
<|reserved_special_token_1|>
from sklearn.model_selection import train_test_split
from sklearn.metrics import silhouette_samples, silhouette_score
from sklearn.metrics.cluster import homogeneity_score, completeness_score, v_measure_score
from sklearn import datasets
from random import shuffle
import os
import matplotlib
matplotlib.use('Agg')
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.cm as cm
import warnings
warnings.simplefilter("ignore")
def v_measure(cluster_labels, true_labels):
h_score = homogeneity_score(true_labels, cluster_labels)
c_score = completeness_score(true_labels, cluster_labels)
v_score = v_measure_score(true_labels, cluster_labels)
print("Homogeneity Score: %.6f" % h_score)
print("Completeness Score: %.6f" % c_score)
print("V Measure Score: %.6f" % v_score)
return h_score, c_score, v_score
def silhouette_analysis(X, cluster_labels, n_clusters, figname):
plt.xlim([-0.1, 1])
plt.ylim([0, len(X) + (n_clusters + 1) * 10])
silhouette_avg = silhouette_score(X, cluster_labels)
print("For n_clusters =", n_clusters,
"The average silhouette_score is :", silhouette_avg)
# Compute the silhouette scores for each sample
sample_silhouette_values = silhouette_samples(X, cluster_labels)
y_lower = 10
for i in range(n_clusters):
# Aggregate the silhouette scores for samples belonging to
# cluster i, and sort them
ith_cluster_silhouette_values = sample_silhouette_values[cluster_labels == i]
ith_cluster_silhouette_values.sort()
size_cluster_i = ith_cluster_silhouette_values.shape[0]
y_upper = y_lower + size_cluster_i
color = cm.nipy_spectral(float(i) / n_clusters)
plt.fill_betweenx(np.arange(y_lower, y_upper),
0, ith_cluster_silhouette_values,
facecolor=color, edgecolor=color, alpha=0.7)
# Label the silhouette plots with their cluster numbers at the middle
plt.text(-0.05, y_lower + 0.5 * size_cluster_i, str(i))
# Compute the new y_lower for next plot
y_lower = y_upper + 10 # 10 for the 0 samples
plt.title("The silhouette plot for the various clusters.")
plt.xlabel("The silhouette coefficient values")
plt.ylabel("Cluster label")
# The vertical line for average silhouette score of all the values
plt.axvline(x=silhouette_avg, color="red", linestyle="--")
plt.yticks([]) # Clear the yaxis labels / ticks
plt.xticks([-0.1, 0, 0.2, 0.4, 0.6, 0.8, 1])
plt.savefig(figname, format='png')
plt.clf()
def visualize_cluster(X, cluster_labels, n_clusters, centers, figname):
if X.shape[1] < 2:
print ("Invalid shape for X: ", X.shape)
return
colors = cm.nipy_spectral(cluster_labels.astype(float) / n_clusters)
plt.scatter(X[:, 0], X[:, 1], marker='.', s=30, lw=0, alpha=0.7,
c=colors, edgecolor='k')
# Draw white circles at cluster centers
if len(centers) == n_clusters:
plt.scatter(centers[:, 0], centers[:, 1], marker='o',
c="white", alpha=1, s=200, edgecolor='k')
for i, c in enumerate(centers):
plt.scatter(c[0], c[1], marker='$%d$' % i, alpha=1,
s=50, edgecolor='k')
plt.title("The visualization of the clustered data.")
plt.xlabel("Feature space for the 1st feature")
plt.ylabel("Feature space for the 2nd feature")
plt.savefig(figname, format='png')
plt.clf()
def plot_gallery(title, images, figname, n_col=3, n_row=2, shape=(28, 28), cmap=plt.cm.gray):
plt.figure(figsize=(2. * n_col, 2.26 * n_row))
plt.suptitle(title, size=16)
for i, comp in enumerate(images):
plt.subplot(n_row, n_col, i + 1)
vmax = max(comp.max(), -comp.min())
plt.imshow(comp.reshape(shape), cmap=cmap,
interpolation='nearest',
vmin=-vmax, vmax=vmax)
plt.xticks(())
plt.yticks(())
#plt.subplots_adjust(0.01, 0.05, 0.99, 0.93, 0.04, 0.)
plt.savefig(figname, format='png')
plt.clf()
#plt.subplots_adjust()
def create_path(*arg, filename=None):
path = os.getcwd()
for directory in arg:
path = os.path.join(path, directory)
if not os.path.exists(path):
print('%s doesn\'t exist, creating...' % path)
os.mkdir(path)
if filename:
path = os.path.join(path, filename)
return path
def load_data(data_path, split_prop=0.2, is_shuffle=False, is_split=True):
pos_X, neg_X = [], []
with open(data_path, 'r') as f:
for line in f:
instance = list(map(float, line.strip().split(',')))
if instance[-1] == 1.0:
pos_X.append(instance[:-1])
else:
neg_X.append(instance[:-1])
if not is_split:
X, y = np.array(pos_X + neg_X), np.array([1] * len(pos_X) + [0] * len(neg_X))
if is_shuffle:
indices = list(range(X.shape[0]))
shuffle(indices)
X, y = X[indices], y[indices]
return X, y, [], []
pos_test_size, neg_test_size = int(split_prop * len(pos_X)), int(split_prop * len(neg_X))
pos_train_size, neg_train_size = len(pos_X) - pos_test_size, len(neg_X) - neg_test_size
X_test, y_test = pos_X[:pos_test_size] + neg_X[:neg_test_size], [1] * pos_test_size + [0] * neg_test_size
X_train, y_train = pos_X[pos_test_size:] + neg_X[neg_test_size:], [1] * pos_train_size + [0] * neg_train_size
assert len(X_train) == len(y_train) and len(X_test) == len(y_test), "Dimention of X and y must be the same."
X_train, X_test, y_train, y_test = np.array(X_train), np.array(X_test), np.array(y_train), np.array(y_test)
if is_shuffle:
train_indices = list(range(X_train.shape[0]))
shuffle(train_indices)
test_indices = list(range(X_test.shape[0]))
shuffle(test_indices)
X_train, X_test, y_train, y_test = X_train[train_indices], X_test[test_indices], y_train[train_indices], y_test[test_indices]
return X_train, X_test, y_train, y_test
def dump_data():
#Need implement
pass
def analyze_data(data_path, threshold=50):
data = []
with open(data_path, 'r') as f:
for line in f:
instance = list(map(float, line.strip().split(',')))
data.append(instance)
count = [0] * len(data[0])
for instance in data:
for i in range(len(instance)):
if instance[i] != 0.0:
count[i] += 1
total = 0
for c in count:
if c >= threshold:
total += 1
return count, total
def plot_learning_curve(train_scores_mean,
train_scores_std,
val_scores_mean,
val_scores_std,
train_sizes,
ylim=None,
title='test',
fig_path='fig',
format='png'):
plt.figure()
plt.title(title)
if ylim is not None:
plt.ylim(*ylim)
plt.xlabel("Training examples")
plt.ylabel("Score")
plt.grid(True, linestyle = "-.", color = '0.3')
plt.fill_between(train_sizes, train_scores_mean - train_scores_std,
train_scores_mean + train_scores_std, alpha=0.1,
color="r")
plt.fill_between(train_sizes, val_scores_mean - val_scores_std,
val_scores_mean + val_scores_std, alpha=0.1, color="g")
plt.plot(train_sizes, train_scores_mean, 'o-', color="r",
label="Training score")
plt.plot(train_sizes, val_scores_mean, 'o-', color="g",
label="Cross-validation score")
plt.legend(loc="best")
plt.savefig(fig_path + '/' + title + '.' + format, format=format)
plt.clf()
def plot_and_save(x, ys, labels, title, x_axis, y_axis, axis_range='auto', ylim=None, fig_path='fig', format='png'):
if axis_range is None:
plt.axis([x[0], x[-1], 0, 1])
elif type(axis_range) == type(list()):
plt.axis(axis_range)
elif axis_range == 'auto':
pass
if ylim is not None:
plt.ylim(*ylim)
plt.xlabel(x_axis)
plt.ylabel(y_axis)
plt.title(title)
lines = []
for y in ys:
l, = plt.plot(x, y)
lines.append(l)
if len(labels) == len(ys):
plt.legend(lines, labels, loc="best")
plt.grid(True, linestyle = "-.", color = '0.3')
plt.savefig(fig_path + '.' + format, format=format)
plt.clf()
def print_score(scores, scoring, train=False):
if type(scoring) != type([]):
if train:
print("Train: %0.2f (+/- %0.2f)" % (np.mean(scores['train_score']), np.std(scores['train_score']) * 2))
print("Cross validation: %0.2f (+/- %0.2f)" % (np.mean(scores['test_score']), np.std(scores['test_score']) * 2))
return
for s_method in scoring:
if train:
print("Train: %0.2f (+/- %0.2f)" % (np.mean(scores['train_' + s_method]), np.std(scores['train_' + s_method]) * 2))
print("Cross validation: %0.2f (+/- %0.2f)" % (np.mean(scores['test_' + s_method]), np.std(scores['test_' + s_method]) * 2))
|
flexible
|
{
"blob_id": "fe63d9b0939bc91d2da14e4d966b33575eab5394",
"index": 2531,
"step-1": "<mask token>\n\n\ndef v_measure(cluster_labels, true_labels):\n h_score = homogeneity_score(true_labels, cluster_labels)\n c_score = completeness_score(true_labels, cluster_labels)\n v_score = v_measure_score(true_labels, cluster_labels)\n print('Homogeneity Score: %.6f' % h_score)\n print('Completeness Score: %.6f' % c_score)\n print('V Measure Score: %.6f' % v_score)\n return h_score, c_score, v_score\n\n\ndef silhouette_analysis(X, cluster_labels, n_clusters, figname):\n plt.xlim([-0.1, 1])\n plt.ylim([0, len(X) + (n_clusters + 1) * 10])\n silhouette_avg = silhouette_score(X, cluster_labels)\n print('For n_clusters =', n_clusters,\n 'The average silhouette_score is :', silhouette_avg)\n sample_silhouette_values = silhouette_samples(X, cluster_labels)\n y_lower = 10\n for i in range(n_clusters):\n ith_cluster_silhouette_values = sample_silhouette_values[\n cluster_labels == i]\n ith_cluster_silhouette_values.sort()\n size_cluster_i = ith_cluster_silhouette_values.shape[0]\n y_upper = y_lower + size_cluster_i\n color = cm.nipy_spectral(float(i) / n_clusters)\n plt.fill_betweenx(np.arange(y_lower, y_upper), 0,\n ith_cluster_silhouette_values, facecolor=color, edgecolor=color,\n alpha=0.7)\n plt.text(-0.05, y_lower + 0.5 * size_cluster_i, str(i))\n y_lower = y_upper + 10\n plt.title('The silhouette plot for the various clusters.')\n plt.xlabel('The silhouette coefficient values')\n plt.ylabel('Cluster label')\n plt.axvline(x=silhouette_avg, color='red', linestyle='--')\n plt.yticks([])\n plt.xticks([-0.1, 0, 0.2, 0.4, 0.6, 0.8, 1])\n plt.savefig(figname, format='png')\n plt.clf()\n\n\ndef visualize_cluster(X, cluster_labels, n_clusters, centers, figname):\n if X.shape[1] < 2:\n print('Invalid shape for X: ', X.shape)\n return\n colors = cm.nipy_spectral(cluster_labels.astype(float) / n_clusters)\n plt.scatter(X[:, 0], X[:, 1], marker='.', s=30, lw=0, alpha=0.7, c=\n colors, edgecolor='k')\n if len(centers) == n_clusters:\n plt.scatter(centers[:, 0], centers[:, 1], marker='o', c='white',\n alpha=1, s=200, edgecolor='k')\n for i, c in enumerate(centers):\n plt.scatter(c[0], c[1], marker='$%d$' % i, alpha=1, s=50,\n edgecolor='k')\n plt.title('The visualization of the clustered data.')\n plt.xlabel('Feature space for the 1st feature')\n plt.ylabel('Feature space for the 2nd feature')\n plt.savefig(figname, format='png')\n plt.clf()\n\n\ndef plot_gallery(title, images, figname, n_col=3, n_row=2, shape=(28, 28),\n cmap=plt.cm.gray):\n plt.figure(figsize=(2.0 * n_col, 2.26 * n_row))\n plt.suptitle(title, size=16)\n for i, comp in enumerate(images):\n plt.subplot(n_row, n_col, i + 1)\n vmax = max(comp.max(), -comp.min())\n plt.imshow(comp.reshape(shape), cmap=cmap, interpolation='nearest',\n vmin=-vmax, vmax=vmax)\n plt.xticks(())\n plt.yticks(())\n plt.savefig(figname, format='png')\n plt.clf()\n\n\ndef create_path(*arg, filename=None):\n path = os.getcwd()\n for directory in arg:\n path = os.path.join(path, directory)\n if not os.path.exists(path):\n print(\"%s doesn't exist, creating...\" % path)\n os.mkdir(path)\n if filename:\n path = os.path.join(path, filename)\n return path\n\n\ndef load_data(data_path, split_prop=0.2, is_shuffle=False, is_split=True):\n pos_X, neg_X = [], []\n with open(data_path, 'r') as f:\n for line in f:\n instance = list(map(float, line.strip().split(',')))\n if instance[-1] == 1.0:\n pos_X.append(instance[:-1])\n else:\n neg_X.append(instance[:-1])\n if not is_split:\n X, y = np.array(pos_X + neg_X), np.array([1] * len(pos_X) + [0] *\n len(neg_X))\n if is_shuffle:\n indices = list(range(X.shape[0]))\n shuffle(indices)\n X, y = X[indices], y[indices]\n return X, y, [], []\n pos_test_size, neg_test_size = int(split_prop * len(pos_X)), int(\n split_prop * len(neg_X))\n pos_train_size, neg_train_size = len(pos_X) - pos_test_size, len(neg_X\n ) - neg_test_size\n X_test, y_test = pos_X[:pos_test_size] + neg_X[:neg_test_size], [1\n ] * pos_test_size + [0] * neg_test_size\n X_train, y_train = pos_X[pos_test_size:] + neg_X[neg_test_size:], [1\n ] * pos_train_size + [0] * neg_train_size\n assert len(X_train) == len(y_train) and len(X_test) == len(y_test\n ), 'Dimention of X and y must be the same.'\n X_train, X_test, y_train, y_test = np.array(X_train), np.array(X_test\n ), np.array(y_train), np.array(y_test)\n if is_shuffle:\n train_indices = list(range(X_train.shape[0]))\n shuffle(train_indices)\n test_indices = list(range(X_test.shape[0]))\n shuffle(test_indices)\n X_train, X_test, y_train, y_test = X_train[train_indices], X_test[\n test_indices], y_train[train_indices], y_test[test_indices]\n return X_train, X_test, y_train, y_test\n\n\n<mask token>\n\n\ndef plot_learning_curve(train_scores_mean, train_scores_std,\n val_scores_mean, val_scores_std, train_sizes, ylim=None, title='test',\n fig_path='fig', format='png'):\n plt.figure()\n plt.title(title)\n if ylim is not None:\n plt.ylim(*ylim)\n plt.xlabel('Training examples')\n plt.ylabel('Score')\n plt.grid(True, linestyle='-.', color='0.3')\n plt.fill_between(train_sizes, train_scores_mean - train_scores_std, \n train_scores_mean + train_scores_std, alpha=0.1, color='r')\n plt.fill_between(train_sizes, val_scores_mean - val_scores_std, \n val_scores_mean + val_scores_std, alpha=0.1, color='g')\n plt.plot(train_sizes, train_scores_mean, 'o-', color='r', label=\n 'Training score')\n plt.plot(train_sizes, val_scores_mean, 'o-', color='g', label=\n 'Cross-validation score')\n plt.legend(loc='best')\n plt.savefig(fig_path + '/' + title + '.' + format, format=format)\n plt.clf()\n\n\ndef plot_and_save(x, ys, labels, title, x_axis, y_axis, axis_range='auto',\n ylim=None, fig_path='fig', format='png'):\n if axis_range is None:\n plt.axis([x[0], x[-1], 0, 1])\n elif type(axis_range) == type(list()):\n plt.axis(axis_range)\n elif axis_range == 'auto':\n pass\n if ylim is not None:\n plt.ylim(*ylim)\n plt.xlabel(x_axis)\n plt.ylabel(y_axis)\n plt.title(title)\n lines = []\n for y in ys:\n l, = plt.plot(x, y)\n lines.append(l)\n if len(labels) == len(ys):\n plt.legend(lines, labels, loc='best')\n plt.grid(True, linestyle='-.', color='0.3')\n plt.savefig(fig_path + '.' + format, format=format)\n plt.clf()\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef v_measure(cluster_labels, true_labels):\n h_score = homogeneity_score(true_labels, cluster_labels)\n c_score = completeness_score(true_labels, cluster_labels)\n v_score = v_measure_score(true_labels, cluster_labels)\n print('Homogeneity Score: %.6f' % h_score)\n print('Completeness Score: %.6f' % c_score)\n print('V Measure Score: %.6f' % v_score)\n return h_score, c_score, v_score\n\n\ndef silhouette_analysis(X, cluster_labels, n_clusters, figname):\n plt.xlim([-0.1, 1])\n plt.ylim([0, len(X) + (n_clusters + 1) * 10])\n silhouette_avg = silhouette_score(X, cluster_labels)\n print('For n_clusters =', n_clusters,\n 'The average silhouette_score is :', silhouette_avg)\n sample_silhouette_values = silhouette_samples(X, cluster_labels)\n y_lower = 10\n for i in range(n_clusters):\n ith_cluster_silhouette_values = sample_silhouette_values[\n cluster_labels == i]\n ith_cluster_silhouette_values.sort()\n size_cluster_i = ith_cluster_silhouette_values.shape[0]\n y_upper = y_lower + size_cluster_i\n color = cm.nipy_spectral(float(i) / n_clusters)\n plt.fill_betweenx(np.arange(y_lower, y_upper), 0,\n ith_cluster_silhouette_values, facecolor=color, edgecolor=color,\n alpha=0.7)\n plt.text(-0.05, y_lower + 0.5 * size_cluster_i, str(i))\n y_lower = y_upper + 10\n plt.title('The silhouette plot for the various clusters.')\n plt.xlabel('The silhouette coefficient values')\n plt.ylabel('Cluster label')\n plt.axvline(x=silhouette_avg, color='red', linestyle='--')\n plt.yticks([])\n plt.xticks([-0.1, 0, 0.2, 0.4, 0.6, 0.8, 1])\n plt.savefig(figname, format='png')\n plt.clf()\n\n\ndef visualize_cluster(X, cluster_labels, n_clusters, centers, figname):\n if X.shape[1] < 2:\n print('Invalid shape for X: ', X.shape)\n return\n colors = cm.nipy_spectral(cluster_labels.astype(float) / n_clusters)\n plt.scatter(X[:, 0], X[:, 1], marker='.', s=30, lw=0, alpha=0.7, c=\n colors, edgecolor='k')\n if len(centers) == n_clusters:\n plt.scatter(centers[:, 0], centers[:, 1], marker='o', c='white',\n alpha=1, s=200, edgecolor='k')\n for i, c in enumerate(centers):\n plt.scatter(c[0], c[1], marker='$%d$' % i, alpha=1, s=50,\n edgecolor='k')\n plt.title('The visualization of the clustered data.')\n plt.xlabel('Feature space for the 1st feature')\n plt.ylabel('Feature space for the 2nd feature')\n plt.savefig(figname, format='png')\n plt.clf()\n\n\ndef plot_gallery(title, images, figname, n_col=3, n_row=2, shape=(28, 28),\n cmap=plt.cm.gray):\n plt.figure(figsize=(2.0 * n_col, 2.26 * n_row))\n plt.suptitle(title, size=16)\n for i, comp in enumerate(images):\n plt.subplot(n_row, n_col, i + 1)\n vmax = max(comp.max(), -comp.min())\n plt.imshow(comp.reshape(shape), cmap=cmap, interpolation='nearest',\n vmin=-vmax, vmax=vmax)\n plt.xticks(())\n plt.yticks(())\n plt.savefig(figname, format='png')\n plt.clf()\n\n\ndef create_path(*arg, filename=None):\n path = os.getcwd()\n for directory in arg:\n path = os.path.join(path, directory)\n if not os.path.exists(path):\n print(\"%s doesn't exist, creating...\" % path)\n os.mkdir(path)\n if filename:\n path = os.path.join(path, filename)\n return path\n\n\ndef load_data(data_path, split_prop=0.2, is_shuffle=False, is_split=True):\n pos_X, neg_X = [], []\n with open(data_path, 'r') as f:\n for line in f:\n instance = list(map(float, line.strip().split(',')))\n if instance[-1] == 1.0:\n pos_X.append(instance[:-1])\n else:\n neg_X.append(instance[:-1])\n if not is_split:\n X, y = np.array(pos_X + neg_X), np.array([1] * len(pos_X) + [0] *\n len(neg_X))\n if is_shuffle:\n indices = list(range(X.shape[0]))\n shuffle(indices)\n X, y = X[indices], y[indices]\n return X, y, [], []\n pos_test_size, neg_test_size = int(split_prop * len(pos_X)), int(\n split_prop * len(neg_X))\n pos_train_size, neg_train_size = len(pos_X) - pos_test_size, len(neg_X\n ) - neg_test_size\n X_test, y_test = pos_X[:pos_test_size] + neg_X[:neg_test_size], [1\n ] * pos_test_size + [0] * neg_test_size\n X_train, y_train = pos_X[pos_test_size:] + neg_X[neg_test_size:], [1\n ] * pos_train_size + [0] * neg_train_size\n assert len(X_train) == len(y_train) and len(X_test) == len(y_test\n ), 'Dimention of X and y must be the same.'\n X_train, X_test, y_train, y_test = np.array(X_train), np.array(X_test\n ), np.array(y_train), np.array(y_test)\n if is_shuffle:\n train_indices = list(range(X_train.shape[0]))\n shuffle(train_indices)\n test_indices = list(range(X_test.shape[0]))\n shuffle(test_indices)\n X_train, X_test, y_train, y_test = X_train[train_indices], X_test[\n test_indices], y_train[train_indices], y_test[test_indices]\n return X_train, X_test, y_train, y_test\n\n\ndef dump_data():\n pass\n\n\n<mask token>\n\n\ndef plot_learning_curve(train_scores_mean, train_scores_std,\n val_scores_mean, val_scores_std, train_sizes, ylim=None, title='test',\n fig_path='fig', format='png'):\n plt.figure()\n plt.title(title)\n if ylim is not None:\n plt.ylim(*ylim)\n plt.xlabel('Training examples')\n plt.ylabel('Score')\n plt.grid(True, linestyle='-.', color='0.3')\n plt.fill_between(train_sizes, train_scores_mean - train_scores_std, \n train_scores_mean + train_scores_std, alpha=0.1, color='r')\n plt.fill_between(train_sizes, val_scores_mean - val_scores_std, \n val_scores_mean + val_scores_std, alpha=0.1, color='g')\n plt.plot(train_sizes, train_scores_mean, 'o-', color='r', label=\n 'Training score')\n plt.plot(train_sizes, val_scores_mean, 'o-', color='g', label=\n 'Cross-validation score')\n plt.legend(loc='best')\n plt.savefig(fig_path + '/' + title + '.' + format, format=format)\n plt.clf()\n\n\ndef plot_and_save(x, ys, labels, title, x_axis, y_axis, axis_range='auto',\n ylim=None, fig_path='fig', format='png'):\n if axis_range is None:\n plt.axis([x[0], x[-1], 0, 1])\n elif type(axis_range) == type(list()):\n plt.axis(axis_range)\n elif axis_range == 'auto':\n pass\n if ylim is not None:\n plt.ylim(*ylim)\n plt.xlabel(x_axis)\n plt.ylabel(y_axis)\n plt.title(title)\n lines = []\n for y in ys:\n l, = plt.plot(x, y)\n lines.append(l)\n if len(labels) == len(ys):\n plt.legend(lines, labels, loc='best')\n plt.grid(True, linestyle='-.', color='0.3')\n plt.savefig(fig_path + '.' + format, format=format)\n plt.clf()\n\n\ndef print_score(scores, scoring, train=False):\n if type(scoring) != type([]):\n if train:\n print('Train: %0.2f (+/- %0.2f)' % (np.mean(scores[\n 'train_score']), np.std(scores['train_score']) * 2))\n print('Cross validation: %0.2f (+/- %0.2f)' % (np.mean(scores[\n 'test_score']), np.std(scores['test_score']) * 2))\n return\n for s_method in scoring:\n if train:\n print('Train: %0.2f (+/- %0.2f)' % (np.mean(scores['train_' +\n s_method]), np.std(scores['train_' + s_method]) * 2))\n print('Cross validation: %0.2f (+/- %0.2f)' % (np.mean(scores[\n 'test_' + s_method]), np.std(scores['test_' + s_method]) * 2))\n",
"step-3": "<mask token>\nmatplotlib.use('Agg')\n<mask token>\nwarnings.simplefilter('ignore')\n\n\ndef v_measure(cluster_labels, true_labels):\n h_score = homogeneity_score(true_labels, cluster_labels)\n c_score = completeness_score(true_labels, cluster_labels)\n v_score = v_measure_score(true_labels, cluster_labels)\n print('Homogeneity Score: %.6f' % h_score)\n print('Completeness Score: %.6f' % c_score)\n print('V Measure Score: %.6f' % v_score)\n return h_score, c_score, v_score\n\n\ndef silhouette_analysis(X, cluster_labels, n_clusters, figname):\n plt.xlim([-0.1, 1])\n plt.ylim([0, len(X) + (n_clusters + 1) * 10])\n silhouette_avg = silhouette_score(X, cluster_labels)\n print('For n_clusters =', n_clusters,\n 'The average silhouette_score is :', silhouette_avg)\n sample_silhouette_values = silhouette_samples(X, cluster_labels)\n y_lower = 10\n for i in range(n_clusters):\n ith_cluster_silhouette_values = sample_silhouette_values[\n cluster_labels == i]\n ith_cluster_silhouette_values.sort()\n size_cluster_i = ith_cluster_silhouette_values.shape[0]\n y_upper = y_lower + size_cluster_i\n color = cm.nipy_spectral(float(i) / n_clusters)\n plt.fill_betweenx(np.arange(y_lower, y_upper), 0,\n ith_cluster_silhouette_values, facecolor=color, edgecolor=color,\n alpha=0.7)\n plt.text(-0.05, y_lower + 0.5 * size_cluster_i, str(i))\n y_lower = y_upper + 10\n plt.title('The silhouette plot for the various clusters.')\n plt.xlabel('The silhouette coefficient values')\n plt.ylabel('Cluster label')\n plt.axvline(x=silhouette_avg, color='red', linestyle='--')\n plt.yticks([])\n plt.xticks([-0.1, 0, 0.2, 0.4, 0.6, 0.8, 1])\n plt.savefig(figname, format='png')\n plt.clf()\n\n\ndef visualize_cluster(X, cluster_labels, n_clusters, centers, figname):\n if X.shape[1] < 2:\n print('Invalid shape for X: ', X.shape)\n return\n colors = cm.nipy_spectral(cluster_labels.astype(float) / n_clusters)\n plt.scatter(X[:, 0], X[:, 1], marker='.', s=30, lw=0, alpha=0.7, c=\n colors, edgecolor='k')\n if len(centers) == n_clusters:\n plt.scatter(centers[:, 0], centers[:, 1], marker='o', c='white',\n alpha=1, s=200, edgecolor='k')\n for i, c in enumerate(centers):\n plt.scatter(c[0], c[1], marker='$%d$' % i, alpha=1, s=50,\n edgecolor='k')\n plt.title('The visualization of the clustered data.')\n plt.xlabel('Feature space for the 1st feature')\n plt.ylabel('Feature space for the 2nd feature')\n plt.savefig(figname, format='png')\n plt.clf()\n\n\ndef plot_gallery(title, images, figname, n_col=3, n_row=2, shape=(28, 28),\n cmap=plt.cm.gray):\n plt.figure(figsize=(2.0 * n_col, 2.26 * n_row))\n plt.suptitle(title, size=16)\n for i, comp in enumerate(images):\n plt.subplot(n_row, n_col, i + 1)\n vmax = max(comp.max(), -comp.min())\n plt.imshow(comp.reshape(shape), cmap=cmap, interpolation='nearest',\n vmin=-vmax, vmax=vmax)\n plt.xticks(())\n plt.yticks(())\n plt.savefig(figname, format='png')\n plt.clf()\n\n\ndef create_path(*arg, filename=None):\n path = os.getcwd()\n for directory in arg:\n path = os.path.join(path, directory)\n if not os.path.exists(path):\n print(\"%s doesn't exist, creating...\" % path)\n os.mkdir(path)\n if filename:\n path = os.path.join(path, filename)\n return path\n\n\ndef load_data(data_path, split_prop=0.2, is_shuffle=False, is_split=True):\n pos_X, neg_X = [], []\n with open(data_path, 'r') as f:\n for line in f:\n instance = list(map(float, line.strip().split(',')))\n if instance[-1] == 1.0:\n pos_X.append(instance[:-1])\n else:\n neg_X.append(instance[:-1])\n if not is_split:\n X, y = np.array(pos_X + neg_X), np.array([1] * len(pos_X) + [0] *\n len(neg_X))\n if is_shuffle:\n indices = list(range(X.shape[0]))\n shuffle(indices)\n X, y = X[indices], y[indices]\n return X, y, [], []\n pos_test_size, neg_test_size = int(split_prop * len(pos_X)), int(\n split_prop * len(neg_X))\n pos_train_size, neg_train_size = len(pos_X) - pos_test_size, len(neg_X\n ) - neg_test_size\n X_test, y_test = pos_X[:pos_test_size] + neg_X[:neg_test_size], [1\n ] * pos_test_size + [0] * neg_test_size\n X_train, y_train = pos_X[pos_test_size:] + neg_X[neg_test_size:], [1\n ] * pos_train_size + [0] * neg_train_size\n assert len(X_train) == len(y_train) and len(X_test) == len(y_test\n ), 'Dimention of X and y must be the same.'\n X_train, X_test, y_train, y_test = np.array(X_train), np.array(X_test\n ), np.array(y_train), np.array(y_test)\n if is_shuffle:\n train_indices = list(range(X_train.shape[0]))\n shuffle(train_indices)\n test_indices = list(range(X_test.shape[0]))\n shuffle(test_indices)\n X_train, X_test, y_train, y_test = X_train[train_indices], X_test[\n test_indices], y_train[train_indices], y_test[test_indices]\n return X_train, X_test, y_train, y_test\n\n\ndef dump_data():\n pass\n\n\ndef analyze_data(data_path, threshold=50):\n data = []\n with open(data_path, 'r') as f:\n for line in f:\n instance = list(map(float, line.strip().split(',')))\n data.append(instance)\n count = [0] * len(data[0])\n for instance in data:\n for i in range(len(instance)):\n if instance[i] != 0.0:\n count[i] += 1\n total = 0\n for c in count:\n if c >= threshold:\n total += 1\n return count, total\n\n\ndef plot_learning_curve(train_scores_mean, train_scores_std,\n val_scores_mean, val_scores_std, train_sizes, ylim=None, title='test',\n fig_path='fig', format='png'):\n plt.figure()\n plt.title(title)\n if ylim is not None:\n plt.ylim(*ylim)\n plt.xlabel('Training examples')\n plt.ylabel('Score')\n plt.grid(True, linestyle='-.', color='0.3')\n plt.fill_between(train_sizes, train_scores_mean - train_scores_std, \n train_scores_mean + train_scores_std, alpha=0.1, color='r')\n plt.fill_between(train_sizes, val_scores_mean - val_scores_std, \n val_scores_mean + val_scores_std, alpha=0.1, color='g')\n plt.plot(train_sizes, train_scores_mean, 'o-', color='r', label=\n 'Training score')\n plt.plot(train_sizes, val_scores_mean, 'o-', color='g', label=\n 'Cross-validation score')\n plt.legend(loc='best')\n plt.savefig(fig_path + '/' + title + '.' + format, format=format)\n plt.clf()\n\n\ndef plot_and_save(x, ys, labels, title, x_axis, y_axis, axis_range='auto',\n ylim=None, fig_path='fig', format='png'):\n if axis_range is None:\n plt.axis([x[0], x[-1], 0, 1])\n elif type(axis_range) == type(list()):\n plt.axis(axis_range)\n elif axis_range == 'auto':\n pass\n if ylim is not None:\n plt.ylim(*ylim)\n plt.xlabel(x_axis)\n plt.ylabel(y_axis)\n plt.title(title)\n lines = []\n for y in ys:\n l, = plt.plot(x, y)\n lines.append(l)\n if len(labels) == len(ys):\n plt.legend(lines, labels, loc='best')\n plt.grid(True, linestyle='-.', color='0.3')\n plt.savefig(fig_path + '.' + format, format=format)\n plt.clf()\n\n\ndef print_score(scores, scoring, train=False):\n if type(scoring) != type([]):\n if train:\n print('Train: %0.2f (+/- %0.2f)' % (np.mean(scores[\n 'train_score']), np.std(scores['train_score']) * 2))\n print('Cross validation: %0.2f (+/- %0.2f)' % (np.mean(scores[\n 'test_score']), np.std(scores['test_score']) * 2))\n return\n for s_method in scoring:\n if train:\n print('Train: %0.2f (+/- %0.2f)' % (np.mean(scores['train_' +\n s_method]), np.std(scores['train_' + s_method]) * 2))\n print('Cross validation: %0.2f (+/- %0.2f)' % (np.mean(scores[\n 'test_' + s_method]), np.std(scores['test_' + s_method]) * 2))\n",
"step-4": "from sklearn.model_selection import train_test_split\nfrom sklearn.metrics import silhouette_samples, silhouette_score\nfrom sklearn.metrics.cluster import homogeneity_score, completeness_score, v_measure_score\nfrom sklearn import datasets\nfrom random import shuffle\nimport os\nimport matplotlib\nmatplotlib.use('Agg')\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib.cm as cm\nimport warnings\nwarnings.simplefilter('ignore')\n\n\ndef v_measure(cluster_labels, true_labels):\n h_score = homogeneity_score(true_labels, cluster_labels)\n c_score = completeness_score(true_labels, cluster_labels)\n v_score = v_measure_score(true_labels, cluster_labels)\n print('Homogeneity Score: %.6f' % h_score)\n print('Completeness Score: %.6f' % c_score)\n print('V Measure Score: %.6f' % v_score)\n return h_score, c_score, v_score\n\n\ndef silhouette_analysis(X, cluster_labels, n_clusters, figname):\n plt.xlim([-0.1, 1])\n plt.ylim([0, len(X) + (n_clusters + 1) * 10])\n silhouette_avg = silhouette_score(X, cluster_labels)\n print('For n_clusters =', n_clusters,\n 'The average silhouette_score is :', silhouette_avg)\n sample_silhouette_values = silhouette_samples(X, cluster_labels)\n y_lower = 10\n for i in range(n_clusters):\n ith_cluster_silhouette_values = sample_silhouette_values[\n cluster_labels == i]\n ith_cluster_silhouette_values.sort()\n size_cluster_i = ith_cluster_silhouette_values.shape[0]\n y_upper = y_lower + size_cluster_i\n color = cm.nipy_spectral(float(i) / n_clusters)\n plt.fill_betweenx(np.arange(y_lower, y_upper), 0,\n ith_cluster_silhouette_values, facecolor=color, edgecolor=color,\n alpha=0.7)\n plt.text(-0.05, y_lower + 0.5 * size_cluster_i, str(i))\n y_lower = y_upper + 10\n plt.title('The silhouette plot for the various clusters.')\n plt.xlabel('The silhouette coefficient values')\n plt.ylabel('Cluster label')\n plt.axvline(x=silhouette_avg, color='red', linestyle='--')\n plt.yticks([])\n plt.xticks([-0.1, 0, 0.2, 0.4, 0.6, 0.8, 1])\n plt.savefig(figname, format='png')\n plt.clf()\n\n\ndef visualize_cluster(X, cluster_labels, n_clusters, centers, figname):\n if X.shape[1] < 2:\n print('Invalid shape for X: ', X.shape)\n return\n colors = cm.nipy_spectral(cluster_labels.astype(float) / n_clusters)\n plt.scatter(X[:, 0], X[:, 1], marker='.', s=30, lw=0, alpha=0.7, c=\n colors, edgecolor='k')\n if len(centers) == n_clusters:\n plt.scatter(centers[:, 0], centers[:, 1], marker='o', c='white',\n alpha=1, s=200, edgecolor='k')\n for i, c in enumerate(centers):\n plt.scatter(c[0], c[1], marker='$%d$' % i, alpha=1, s=50,\n edgecolor='k')\n plt.title('The visualization of the clustered data.')\n plt.xlabel('Feature space for the 1st feature')\n plt.ylabel('Feature space for the 2nd feature')\n plt.savefig(figname, format='png')\n plt.clf()\n\n\ndef plot_gallery(title, images, figname, n_col=3, n_row=2, shape=(28, 28),\n cmap=plt.cm.gray):\n plt.figure(figsize=(2.0 * n_col, 2.26 * n_row))\n plt.suptitle(title, size=16)\n for i, comp in enumerate(images):\n plt.subplot(n_row, n_col, i + 1)\n vmax = max(comp.max(), -comp.min())\n plt.imshow(comp.reshape(shape), cmap=cmap, interpolation='nearest',\n vmin=-vmax, vmax=vmax)\n plt.xticks(())\n plt.yticks(())\n plt.savefig(figname, format='png')\n plt.clf()\n\n\ndef create_path(*arg, filename=None):\n path = os.getcwd()\n for directory in arg:\n path = os.path.join(path, directory)\n if not os.path.exists(path):\n print(\"%s doesn't exist, creating...\" % path)\n os.mkdir(path)\n if filename:\n path = os.path.join(path, filename)\n return path\n\n\ndef load_data(data_path, split_prop=0.2, is_shuffle=False, is_split=True):\n pos_X, neg_X = [], []\n with open(data_path, 'r') as f:\n for line in f:\n instance = list(map(float, line.strip().split(',')))\n if instance[-1] == 1.0:\n pos_X.append(instance[:-1])\n else:\n neg_X.append(instance[:-1])\n if not is_split:\n X, y = np.array(pos_X + neg_X), np.array([1] * len(pos_X) + [0] *\n len(neg_X))\n if is_shuffle:\n indices = list(range(X.shape[0]))\n shuffle(indices)\n X, y = X[indices], y[indices]\n return X, y, [], []\n pos_test_size, neg_test_size = int(split_prop * len(pos_X)), int(\n split_prop * len(neg_X))\n pos_train_size, neg_train_size = len(pos_X) - pos_test_size, len(neg_X\n ) - neg_test_size\n X_test, y_test = pos_X[:pos_test_size] + neg_X[:neg_test_size], [1\n ] * pos_test_size + [0] * neg_test_size\n X_train, y_train = pos_X[pos_test_size:] + neg_X[neg_test_size:], [1\n ] * pos_train_size + [0] * neg_train_size\n assert len(X_train) == len(y_train) and len(X_test) == len(y_test\n ), 'Dimention of X and y must be the same.'\n X_train, X_test, y_train, y_test = np.array(X_train), np.array(X_test\n ), np.array(y_train), np.array(y_test)\n if is_shuffle:\n train_indices = list(range(X_train.shape[0]))\n shuffle(train_indices)\n test_indices = list(range(X_test.shape[0]))\n shuffle(test_indices)\n X_train, X_test, y_train, y_test = X_train[train_indices], X_test[\n test_indices], y_train[train_indices], y_test[test_indices]\n return X_train, X_test, y_train, y_test\n\n\ndef dump_data():\n pass\n\n\ndef analyze_data(data_path, threshold=50):\n data = []\n with open(data_path, 'r') as f:\n for line in f:\n instance = list(map(float, line.strip().split(',')))\n data.append(instance)\n count = [0] * len(data[0])\n for instance in data:\n for i in range(len(instance)):\n if instance[i] != 0.0:\n count[i] += 1\n total = 0\n for c in count:\n if c >= threshold:\n total += 1\n return count, total\n\n\ndef plot_learning_curve(train_scores_mean, train_scores_std,\n val_scores_mean, val_scores_std, train_sizes, ylim=None, title='test',\n fig_path='fig', format='png'):\n plt.figure()\n plt.title(title)\n if ylim is not None:\n plt.ylim(*ylim)\n plt.xlabel('Training examples')\n plt.ylabel('Score')\n plt.grid(True, linestyle='-.', color='0.3')\n plt.fill_between(train_sizes, train_scores_mean - train_scores_std, \n train_scores_mean + train_scores_std, alpha=0.1, color='r')\n plt.fill_between(train_sizes, val_scores_mean - val_scores_std, \n val_scores_mean + val_scores_std, alpha=0.1, color='g')\n plt.plot(train_sizes, train_scores_mean, 'o-', color='r', label=\n 'Training score')\n plt.plot(train_sizes, val_scores_mean, 'o-', color='g', label=\n 'Cross-validation score')\n plt.legend(loc='best')\n plt.savefig(fig_path + '/' + title + '.' + format, format=format)\n plt.clf()\n\n\ndef plot_and_save(x, ys, labels, title, x_axis, y_axis, axis_range='auto',\n ylim=None, fig_path='fig', format='png'):\n if axis_range is None:\n plt.axis([x[0], x[-1], 0, 1])\n elif type(axis_range) == type(list()):\n plt.axis(axis_range)\n elif axis_range == 'auto':\n pass\n if ylim is not None:\n plt.ylim(*ylim)\n plt.xlabel(x_axis)\n plt.ylabel(y_axis)\n plt.title(title)\n lines = []\n for y in ys:\n l, = plt.plot(x, y)\n lines.append(l)\n if len(labels) == len(ys):\n plt.legend(lines, labels, loc='best')\n plt.grid(True, linestyle='-.', color='0.3')\n plt.savefig(fig_path + '.' + format, format=format)\n plt.clf()\n\n\ndef print_score(scores, scoring, train=False):\n if type(scoring) != type([]):\n if train:\n print('Train: %0.2f (+/- %0.2f)' % (np.mean(scores[\n 'train_score']), np.std(scores['train_score']) * 2))\n print('Cross validation: %0.2f (+/- %0.2f)' % (np.mean(scores[\n 'test_score']), np.std(scores['test_score']) * 2))\n return\n for s_method in scoring:\n if train:\n print('Train: %0.2f (+/- %0.2f)' % (np.mean(scores['train_' +\n s_method]), np.std(scores['train_' + s_method]) * 2))\n print('Cross validation: %0.2f (+/- %0.2f)' % (np.mean(scores[\n 'test_' + s_method]), np.std(scores['test_' + s_method]) * 2))\n",
"step-5": "from sklearn.model_selection import train_test_split\nfrom sklearn.metrics import silhouette_samples, silhouette_score\nfrom sklearn.metrics.cluster import homogeneity_score, completeness_score, v_measure_score\nfrom sklearn import datasets\nfrom random import shuffle\n\nimport os\nimport matplotlib\nmatplotlib.use('Agg')\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib.cm as cm\n\nimport warnings\nwarnings.simplefilter(\"ignore\")\n\ndef v_measure(cluster_labels, true_labels):\n h_score = homogeneity_score(true_labels, cluster_labels)\n c_score = completeness_score(true_labels, cluster_labels)\n v_score = v_measure_score(true_labels, cluster_labels)\n\n print(\"Homogeneity Score: %.6f\" % h_score)\n print(\"Completeness Score: %.6f\" % c_score)\n print(\"V Measure Score: %.6f\" % v_score)\n return h_score, c_score, v_score\n\ndef silhouette_analysis(X, cluster_labels, n_clusters, figname):\n plt.xlim([-0.1, 1])\n plt.ylim([0, len(X) + (n_clusters + 1) * 10])\n silhouette_avg = silhouette_score(X, cluster_labels)\n print(\"For n_clusters =\", n_clusters,\n \"The average silhouette_score is :\", silhouette_avg)\n\n # Compute the silhouette scores for each sample\n sample_silhouette_values = silhouette_samples(X, cluster_labels)\n\n y_lower = 10\n for i in range(n_clusters):\n # Aggregate the silhouette scores for samples belonging to\n # cluster i, and sort them\n ith_cluster_silhouette_values = sample_silhouette_values[cluster_labels == i]\n\n ith_cluster_silhouette_values.sort()\n\n size_cluster_i = ith_cluster_silhouette_values.shape[0]\n y_upper = y_lower + size_cluster_i\n\n color = cm.nipy_spectral(float(i) / n_clusters)\n plt.fill_betweenx(np.arange(y_lower, y_upper),\n 0, ith_cluster_silhouette_values,\n facecolor=color, edgecolor=color, alpha=0.7)\n\n # Label the silhouette plots with their cluster numbers at the middle\n plt.text(-0.05, y_lower + 0.5 * size_cluster_i, str(i))\n\n # Compute the new y_lower for next plot\n y_lower = y_upper + 10 # 10 for the 0 samples\n\n plt.title(\"The silhouette plot for the various clusters.\")\n plt.xlabel(\"The silhouette coefficient values\")\n plt.ylabel(\"Cluster label\")\n\n # The vertical line for average silhouette score of all the values\n plt.axvline(x=silhouette_avg, color=\"red\", linestyle=\"--\")\n\n plt.yticks([]) # Clear the yaxis labels / ticks\n plt.xticks([-0.1, 0, 0.2, 0.4, 0.6, 0.8, 1])\n plt.savefig(figname, format='png')\n plt.clf()\n\ndef visualize_cluster(X, cluster_labels, n_clusters, centers, figname):\n if X.shape[1] < 2:\n print (\"Invalid shape for X: \", X.shape)\n return\n\n colors = cm.nipy_spectral(cluster_labels.astype(float) / n_clusters)\n plt.scatter(X[:, 0], X[:, 1], marker='.', s=30, lw=0, alpha=0.7,\n c=colors, edgecolor='k')\n\n # Draw white circles at cluster centers\n if len(centers) == n_clusters:\n plt.scatter(centers[:, 0], centers[:, 1], marker='o',\n c=\"white\", alpha=1, s=200, edgecolor='k')\n\n for i, c in enumerate(centers):\n plt.scatter(c[0], c[1], marker='$%d$' % i, alpha=1,\n s=50, edgecolor='k')\n\n plt.title(\"The visualization of the clustered data.\")\n plt.xlabel(\"Feature space for the 1st feature\")\n plt.ylabel(\"Feature space for the 2nd feature\")\n plt.savefig(figname, format='png')\n plt.clf()\n\ndef plot_gallery(title, images, figname, n_col=3, n_row=2, shape=(28, 28), cmap=plt.cm.gray):\n plt.figure(figsize=(2. * n_col, 2.26 * n_row))\n plt.suptitle(title, size=16)\n for i, comp in enumerate(images):\n plt.subplot(n_row, n_col, i + 1)\n vmax = max(comp.max(), -comp.min())\n plt.imshow(comp.reshape(shape), cmap=cmap,\n interpolation='nearest',\n vmin=-vmax, vmax=vmax)\n plt.xticks(())\n plt.yticks(())\n #plt.subplots_adjust(0.01, 0.05, 0.99, 0.93, 0.04, 0.)\n plt.savefig(figname, format='png')\n plt.clf()\n #plt.subplots_adjust()\n\ndef create_path(*arg, filename=None):\n path = os.getcwd()\n for directory in arg:\n path = os.path.join(path, directory)\n if not os.path.exists(path):\n print('%s doesn\\'t exist, creating...' % path)\n os.mkdir(path)\n\n if filename:\n path = os.path.join(path, filename)\n return path\n\ndef load_data(data_path, split_prop=0.2, is_shuffle=False, is_split=True):\n pos_X, neg_X = [], []\n with open(data_path, 'r') as f:\n for line in f:\n instance = list(map(float, line.strip().split(',')))\n if instance[-1] == 1.0:\n pos_X.append(instance[:-1])\n else:\n neg_X.append(instance[:-1])\n\n if not is_split:\n X, y = np.array(pos_X + neg_X), np.array([1] * len(pos_X) + [0] * len(neg_X))\n if is_shuffle:\n indices = list(range(X.shape[0]))\n shuffle(indices)\n X, y = X[indices], y[indices]\n return X, y, [], []\n\n pos_test_size, neg_test_size = int(split_prop * len(pos_X)), int(split_prop * len(neg_X))\n pos_train_size, neg_train_size = len(pos_X) - pos_test_size, len(neg_X) - neg_test_size\n \n X_test, y_test = pos_X[:pos_test_size] + neg_X[:neg_test_size], [1] * pos_test_size + [0] * neg_test_size\n X_train, y_train = pos_X[pos_test_size:] + neg_X[neg_test_size:], [1] * pos_train_size + [0] * neg_train_size\n\n assert len(X_train) == len(y_train) and len(X_test) == len(y_test), \"Dimention of X and y must be the same.\"\n\n X_train, X_test, y_train, y_test = np.array(X_train), np.array(X_test), np.array(y_train), np.array(y_test)\n if is_shuffle:\n train_indices = list(range(X_train.shape[0]))\n shuffle(train_indices)\n test_indices = list(range(X_test.shape[0]))\n shuffle(test_indices)\n X_train, X_test, y_train, y_test = X_train[train_indices], X_test[test_indices], y_train[train_indices], y_test[test_indices]\n\n return X_train, X_test, y_train, y_test\n\ndef dump_data():\n #Need implement\n pass\n\ndef analyze_data(data_path, threshold=50):\n data = []\n with open(data_path, 'r') as f:\n for line in f:\n instance = list(map(float, line.strip().split(',')))\n data.append(instance)\n\n count = [0] * len(data[0])\n for instance in data:\n for i in range(len(instance)):\n if instance[i] != 0.0:\n count[i] += 1\n\n total = 0\n for c in count:\n if c >= threshold:\n total += 1\n\n\n return count, total\n\ndef plot_learning_curve(train_scores_mean,\n train_scores_std,\n val_scores_mean,\n val_scores_std,\n train_sizes,\n ylim=None,\n title='test',\n fig_path='fig',\n format='png'):\n plt.figure()\n plt.title(title)\n if ylim is not None:\n plt.ylim(*ylim)\n plt.xlabel(\"Training examples\")\n plt.ylabel(\"Score\")\n plt.grid(True, linestyle = \"-.\", color = '0.3')\n plt.fill_between(train_sizes, train_scores_mean - train_scores_std,\n train_scores_mean + train_scores_std, alpha=0.1,\n color=\"r\")\n plt.fill_between(train_sizes, val_scores_mean - val_scores_std,\n val_scores_mean + val_scores_std, alpha=0.1, color=\"g\")\n plt.plot(train_sizes, train_scores_mean, 'o-', color=\"r\",\n label=\"Training score\")\n plt.plot(train_sizes, val_scores_mean, 'o-', color=\"g\",\n label=\"Cross-validation score\")\n\n plt.legend(loc=\"best\")\n plt.savefig(fig_path + '/' + title + '.' + format, format=format)\n plt.clf()\n\ndef plot_and_save(x, ys, labels, title, x_axis, y_axis, axis_range='auto', ylim=None, fig_path='fig', format='png'):\n if axis_range is None:\n plt.axis([x[0], x[-1], 0, 1])\n elif type(axis_range) == type(list()):\n plt.axis(axis_range)\n elif axis_range == 'auto':\n pass\n\n if ylim is not None:\n plt.ylim(*ylim)\n\n plt.xlabel(x_axis)\n plt.ylabel(y_axis)\n plt.title(title)\n\n lines = []\n for y in ys:\n l, = plt.plot(x, y)\n lines.append(l)\n if len(labels) == len(ys):\n plt.legend(lines, labels, loc=\"best\")\n plt.grid(True, linestyle = \"-.\", color = '0.3')\n\n plt.savefig(fig_path + '.' + format, format=format)\n plt.clf()\n\ndef print_score(scores, scoring, train=False):\n if type(scoring) != type([]):\n if train:\n print(\"Train: %0.2f (+/- %0.2f)\" % (np.mean(scores['train_score']), np.std(scores['train_score']) * 2))\n\n print(\"Cross validation: %0.2f (+/- %0.2f)\" % (np.mean(scores['test_score']), np.std(scores['test_score']) * 2))\n return\n\n for s_method in scoring:\n if train:\n print(\"Train: %0.2f (+/- %0.2f)\" % (np.mean(scores['train_' + s_method]), np.std(scores['train_' + s_method]) * 2))\n\n print(\"Cross validation: %0.2f (+/- %0.2f)\" % (np.mean(scores['test_' + s_method]), np.std(scores['test_' + s_method]) * 2))\n\n\n ",
"step-ids": [
8,
10,
12,
13,
14
]
}
|
[
8,
10,
12,
13,
14
] |
from genericentity import GenericEntity as GEntity
import random as ran
class GenericBreeder(object):
"""description of class: its a classy class"""
def __init__(self,nlifesize,nparentsize,nlowestscore):
self.Reset(nlifesize,nparentsize,nlowestscore)
def Reset(self,nlifesize,nparentsize,nlowestscore):
self.life=[self.CreateLife() for i in range(0,nlifesize)]
self.lifesize=nlifesize
self.parentsize=nparentsize
self.parents = self.life[0:self.parentsize]
self.lastscore = nlowestscore
self.hardtimebuffer = 0
def CopulateALot(self,parents,howmuch,list=[]):
if(len(list) == 0):
list = ([0] * howmuch)
for index in range(0,howmuch):
par1 = int(ran.random() * len(parents))
par2 = int(ran.random() * len(parents))
ob= list[index] if(index < len(list)) else 0
tmpent = self.Copulate(parents[par1],parents[par2],obj=ob)
list[index] = tmpent
return list
def Copulate(self,mom,dad,obj=0):
finfac=(mom.GetScore() + dad.GetScore()) / 2
if(obj != 0):
nextadn= self.CopulateSub(obj.adn,mom,dad)
obj.reset(nextadn,finfac)
return obj
else:
nextadn= self.CopulateSub([0]*GEntity.adnsize,mom,dad)
return self.MakeNewborn(nextadn,finfac)
def MakeNewborn(self,nadn,mutsmo):
raise NotImplementedError("MakeNewborn()")
def CopulateSub(self,nextadn,mom,dad):
raise NotImplementedError("CopulateSub()")
@staticmethod
def CreateLife():
raise NotImplementedError("CreateLife()")
def IsMaximal(self):
raise NotImplementedError("IsMaximal()")
def LetTimeFlow(self):
gencount = 0
while(True):
gencount+=1
self.life = self.CopulateALot(self.parents,self.lifesize)
self.life.sort(key=SortByFitness)
score = life[0].GetScore()
print("\r[running] score: ",score,"\t size: ",self.lifesize,"\t gen: ",gencount,end="")
self.PrintInfo(life[0])
print(" ",end="")
self.parents = self.life[0:self.parentsize]
if(self.lastscore <= score):
self.hardtimebuffer+=1
else:
self.hardtimebuffer-=1
if(self.hardtimebuffer < 0):
self.hardtimebuffer = 0
elif(self.hardtimebuffer > 3):
self.lifesize = int(self.lifesize * 1.1)
self.Struggle()
lastperfactor = perfactor
if(self.IsMaximal()):
break
print("\n[ended] score: ",score,"\t size: ",self.lifesize,"\t gen: ",gencount,end="")
self.PrintInfo(life[0])
def PrintInfo(self,best):
raise NotImplementedError("PrintInfo()")
def Struggle(self):
raise NotImplementedError("Struggle()")
|
normal
|
{
"blob_id": "753617c189a88adee8430e994aa597c9db9410fe",
"index": 6143,
"step-1": "<mask token>\n\n\nclass GenericBreeder(object):\n <mask token>\n\n def __init__(self, nlifesize, nparentsize, nlowestscore):\n self.Reset(nlifesize, nparentsize, nlowestscore)\n\n def Reset(self, nlifesize, nparentsize, nlowestscore):\n self.life = [self.CreateLife() for i in range(0, nlifesize)]\n self.lifesize = nlifesize\n self.parentsize = nparentsize\n self.parents = self.life[0:self.parentsize]\n self.lastscore = nlowestscore\n self.hardtimebuffer = 0\n\n def CopulateALot(self, parents, howmuch, list=[]):\n if len(list) == 0:\n list = [0] * howmuch\n for index in range(0, howmuch):\n par1 = int(ran.random() * len(parents))\n par2 = int(ran.random() * len(parents))\n ob = list[index] if index < len(list) else 0\n tmpent = self.Copulate(parents[par1], parents[par2], obj=ob)\n list[index] = tmpent\n return list\n <mask token>\n <mask token>\n <mask token>\n\n @staticmethod\n def CreateLife():\n raise NotImplementedError('CreateLife()')\n\n def IsMaximal(self):\n raise NotImplementedError('IsMaximal()')\n <mask token>\n\n def PrintInfo(self, best):\n raise NotImplementedError('PrintInfo()')\n\n def Struggle(self):\n raise NotImplementedError('Struggle()')\n",
"step-2": "<mask token>\n\n\nclass GenericBreeder(object):\n <mask token>\n\n def __init__(self, nlifesize, nparentsize, nlowestscore):\n self.Reset(nlifesize, nparentsize, nlowestscore)\n\n def Reset(self, nlifesize, nparentsize, nlowestscore):\n self.life = [self.CreateLife() for i in range(0, nlifesize)]\n self.lifesize = nlifesize\n self.parentsize = nparentsize\n self.parents = self.life[0:self.parentsize]\n self.lastscore = nlowestscore\n self.hardtimebuffer = 0\n\n def CopulateALot(self, parents, howmuch, list=[]):\n if len(list) == 0:\n list = [0] * howmuch\n for index in range(0, howmuch):\n par1 = int(ran.random() * len(parents))\n par2 = int(ran.random() * len(parents))\n ob = list[index] if index < len(list) else 0\n tmpent = self.Copulate(parents[par1], parents[par2], obj=ob)\n list[index] = tmpent\n return list\n\n def Copulate(self, mom, dad, obj=0):\n finfac = (mom.GetScore() + dad.GetScore()) / 2\n if obj != 0:\n nextadn = self.CopulateSub(obj.adn, mom, dad)\n obj.reset(nextadn, finfac)\n return obj\n else:\n nextadn = self.CopulateSub([0] * GEntity.adnsize, mom, dad)\n return self.MakeNewborn(nextadn, finfac)\n\n def MakeNewborn(self, nadn, mutsmo):\n raise NotImplementedError('MakeNewborn()')\n <mask token>\n\n @staticmethod\n def CreateLife():\n raise NotImplementedError('CreateLife()')\n\n def IsMaximal(self):\n raise NotImplementedError('IsMaximal()')\n <mask token>\n\n def PrintInfo(self, best):\n raise NotImplementedError('PrintInfo()')\n\n def Struggle(self):\n raise NotImplementedError('Struggle()')\n",
"step-3": "<mask token>\n\n\nclass GenericBreeder(object):\n \"\"\"description of class: its a classy class\"\"\"\n\n def __init__(self, nlifesize, nparentsize, nlowestscore):\n self.Reset(nlifesize, nparentsize, nlowestscore)\n\n def Reset(self, nlifesize, nparentsize, nlowestscore):\n self.life = [self.CreateLife() for i in range(0, nlifesize)]\n self.lifesize = nlifesize\n self.parentsize = nparentsize\n self.parents = self.life[0:self.parentsize]\n self.lastscore = nlowestscore\n self.hardtimebuffer = 0\n\n def CopulateALot(self, parents, howmuch, list=[]):\n if len(list) == 0:\n list = [0] * howmuch\n for index in range(0, howmuch):\n par1 = int(ran.random() * len(parents))\n par2 = int(ran.random() * len(parents))\n ob = list[index] if index < len(list) else 0\n tmpent = self.Copulate(parents[par1], parents[par2], obj=ob)\n list[index] = tmpent\n return list\n\n def Copulate(self, mom, dad, obj=0):\n finfac = (mom.GetScore() + dad.GetScore()) / 2\n if obj != 0:\n nextadn = self.CopulateSub(obj.adn, mom, dad)\n obj.reset(nextadn, finfac)\n return obj\n else:\n nextadn = self.CopulateSub([0] * GEntity.adnsize, mom, dad)\n return self.MakeNewborn(nextadn, finfac)\n\n def MakeNewborn(self, nadn, mutsmo):\n raise NotImplementedError('MakeNewborn()')\n\n def CopulateSub(self, nextadn, mom, dad):\n raise NotImplementedError('CopulateSub()')\n\n @staticmethod\n def CreateLife():\n raise NotImplementedError('CreateLife()')\n\n def IsMaximal(self):\n raise NotImplementedError('IsMaximal()')\n\n def LetTimeFlow(self):\n gencount = 0\n while True:\n gencount += 1\n self.life = self.CopulateALot(self.parents, self.lifesize)\n self.life.sort(key=SortByFitness)\n score = life[0].GetScore()\n print('\\r[running] score: ', score, '\\t size: ', self.lifesize,\n '\\t gen: ', gencount, end='')\n self.PrintInfo(life[0])\n print(' ', end='')\n self.parents = self.life[0:self.parentsize]\n if self.lastscore <= score:\n self.hardtimebuffer += 1\n else:\n self.hardtimebuffer -= 1\n if self.hardtimebuffer < 0:\n self.hardtimebuffer = 0\n elif self.hardtimebuffer > 3:\n self.lifesize = int(self.lifesize * 1.1)\n self.Struggle()\n lastperfactor = perfactor\n if self.IsMaximal():\n break\n print('\\n[ended] score: ', score, '\\t size: ', self.lifesize,\n '\\t gen: ', gencount, end='')\n self.PrintInfo(life[0])\n\n def PrintInfo(self, best):\n raise NotImplementedError('PrintInfo()')\n\n def Struggle(self):\n raise NotImplementedError('Struggle()')\n",
"step-4": "from genericentity import GenericEntity as GEntity\nimport random as ran\n\n\nclass GenericBreeder(object):\n \"\"\"description of class: its a classy class\"\"\"\n\n def __init__(self, nlifesize, nparentsize, nlowestscore):\n self.Reset(nlifesize, nparentsize, nlowestscore)\n\n def Reset(self, nlifesize, nparentsize, nlowestscore):\n self.life = [self.CreateLife() for i in range(0, nlifesize)]\n self.lifesize = nlifesize\n self.parentsize = nparentsize\n self.parents = self.life[0:self.parentsize]\n self.lastscore = nlowestscore\n self.hardtimebuffer = 0\n\n def CopulateALot(self, parents, howmuch, list=[]):\n if len(list) == 0:\n list = [0] * howmuch\n for index in range(0, howmuch):\n par1 = int(ran.random() * len(parents))\n par2 = int(ran.random() * len(parents))\n ob = list[index] if index < len(list) else 0\n tmpent = self.Copulate(parents[par1], parents[par2], obj=ob)\n list[index] = tmpent\n return list\n\n def Copulate(self, mom, dad, obj=0):\n finfac = (mom.GetScore() + dad.GetScore()) / 2\n if obj != 0:\n nextadn = self.CopulateSub(obj.adn, mom, dad)\n obj.reset(nextadn, finfac)\n return obj\n else:\n nextadn = self.CopulateSub([0] * GEntity.adnsize, mom, dad)\n return self.MakeNewborn(nextadn, finfac)\n\n def MakeNewborn(self, nadn, mutsmo):\n raise NotImplementedError('MakeNewborn()')\n\n def CopulateSub(self, nextadn, mom, dad):\n raise NotImplementedError('CopulateSub()')\n\n @staticmethod\n def CreateLife():\n raise NotImplementedError('CreateLife()')\n\n def IsMaximal(self):\n raise NotImplementedError('IsMaximal()')\n\n def LetTimeFlow(self):\n gencount = 0\n while True:\n gencount += 1\n self.life = self.CopulateALot(self.parents, self.lifesize)\n self.life.sort(key=SortByFitness)\n score = life[0].GetScore()\n print('\\r[running] score: ', score, '\\t size: ', self.lifesize,\n '\\t gen: ', gencount, end='')\n self.PrintInfo(life[0])\n print(' ', end='')\n self.parents = self.life[0:self.parentsize]\n if self.lastscore <= score:\n self.hardtimebuffer += 1\n else:\n self.hardtimebuffer -= 1\n if self.hardtimebuffer < 0:\n self.hardtimebuffer = 0\n elif self.hardtimebuffer > 3:\n self.lifesize = int(self.lifesize * 1.1)\n self.Struggle()\n lastperfactor = perfactor\n if self.IsMaximal():\n break\n print('\\n[ended] score: ', score, '\\t size: ', self.lifesize,\n '\\t gen: ', gencount, end='')\n self.PrintInfo(life[0])\n\n def PrintInfo(self, best):\n raise NotImplementedError('PrintInfo()')\n\n def Struggle(self):\n raise NotImplementedError('Struggle()')\n",
"step-5": "from genericentity import GenericEntity as GEntity\nimport random as ran\n\nclass GenericBreeder(object):\n \"\"\"description of class: its a classy class\"\"\"\n \n\n\n def __init__(self,nlifesize,nparentsize,nlowestscore):\n self.Reset(nlifesize,nparentsize,nlowestscore)\n\n def Reset(self,nlifesize,nparentsize,nlowestscore):\n self.life=[self.CreateLife() for i in range(0,nlifesize)]\n self.lifesize=nlifesize\n self.parentsize=nparentsize\n self.parents = self.life[0:self.parentsize]\n self.lastscore = nlowestscore\n self.hardtimebuffer = 0\n \n def CopulateALot(self,parents,howmuch,list=[]):\n if(len(list) == 0):\n list = ([0] * howmuch)\n for index in range(0,howmuch):\n par1 = int(ran.random() * len(parents))\n par2 = int(ran.random() * len(parents))\n\n ob= list[index] if(index < len(list)) else 0 \n\n tmpent = self.Copulate(parents[par1],parents[par2],obj=ob)\n list[index] = tmpent\n \n return list\n\n def Copulate(self,mom,dad,obj=0):\n \n finfac=(mom.GetScore() + dad.GetScore()) / 2\n if(obj != 0):\n nextadn= self.CopulateSub(obj.adn,mom,dad)\n obj.reset(nextadn,finfac)\n return obj\n else:\n nextadn= self.CopulateSub([0]*GEntity.adnsize,mom,dad)\n return self.MakeNewborn(nextadn,finfac)\n \n def MakeNewborn(self,nadn,mutsmo):\n raise NotImplementedError(\"MakeNewborn()\")\n\n def CopulateSub(self,nextadn,mom,dad): \n raise NotImplementedError(\"CopulateSub()\")\n\n @staticmethod\n def CreateLife():\n raise NotImplementedError(\"CreateLife()\")\n\n def IsMaximal(self):\n raise NotImplementedError(\"IsMaximal()\")\n\n def LetTimeFlow(self):\n \n gencount = 0\n while(True):\n gencount+=1\n self.life = self.CopulateALot(self.parents,self.lifesize)\n self.life.sort(key=SortByFitness)\n score = life[0].GetScore()\n print(\"\\r[running] score: \",score,\"\\t size: \",self.lifesize,\"\\t gen: \",gencount,end=\"\")\n self.PrintInfo(life[0])\n print(\" \",end=\"\")\n self.parents = self.life[0:self.parentsize]\n\n if(self.lastscore <= score):\n self.hardtimebuffer+=1\n else:\n self.hardtimebuffer-=1\n\n if(self.hardtimebuffer < 0):\n self.hardtimebuffer = 0\n elif(self.hardtimebuffer > 3):\n self.lifesize = int(self.lifesize * 1.1)\n self.Struggle()\n\n lastperfactor = perfactor\n if(self.IsMaximal()):\n break\n\n print(\"\\n[ended] score: \",score,\"\\t size: \",self.lifesize,\"\\t gen: \",gencount,end=\"\")\n self.PrintInfo(life[0])\n\n def PrintInfo(self,best):\n raise NotImplementedError(\"PrintInfo()\")\n\n def Struggle(self):\n raise NotImplementedError(\"Struggle()\") ",
"step-ids": [
8,
10,
13,
14,
15
]
}
|
[
8,
10,
13,
14,
15
] |
<|reserved_special_token_0|>
def get_server_ip(device_ip):
import socket
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.connect((device_ip, 80))
return s.getsockname()[0]
def transfer_file(from_ip, to_ip, remote_file_path, cmd='ftpput'):
"""
Transfer file from from_ip to to_ip via telnet.
Use ftpput and ftpget.
"""
try:
import pyftpdlib
except ImportError:
import pip
pip.main('install pyftpdlib'.split())
ftp_server = spr.Popen([sys.executable, '-m', 'pyftpdlib', '-w'])
print('Server started')
filename = os.path.basename(remote_file_path)
s = telnetlib.Telnet(DEVICE_IP)
print(s.read_until(b'login: ').decode())
s.write(b'root \n')
print(s.read_until(b'Password: ').decode())
s.write(b'solokey\n')
if s.read_until(b'#'):
s.write(bytes('ls %s\n' % DB_PATH, 'utf-8'))
files = s.read_until(b'#').decode()
if filename in files:
while True:
if cmd == 'ftpput':
command = bytes('%s -P 2121 %s %s %s\n' % (cmd,
server_ip, filename, remote_file_path), 'utf-8')
elif cmd == 'ftpget':
command = bytes('%s -P 2121 %s %s %s\n' % (cmd,
server_ip, remote_file_path, filename), 'utf-8')
else:
raise ValueError('cmd must be `ftpput` or `ftpget`')
s.write(command)
ret = s.read_until(b'#').decode()
if 'refused' not in ret:
print(ret)
break
ftp_server.kill()
print('Server killed')
<|reserved_special_token_0|>
def print_log(*log_row):
"""
Pretty print a log row
log row format: (ID, User_PIN, Verify_Type, Verify_Time, Status)
"""
id, uid, verify_type, verify_time, status = log_row
if status == 1:
status = 'Check out'
elif status == 0:
status = 'Check in'
print('{}. {} {} at {}'.format(id, uid, status, verify_time))
<|reserved_special_token_0|>
def check_log_by_date(uid, date):
pass
def fix_logs(uid, start_date, end_date):
"""
Fix logs of uid from start_date to end_date
A normalized log contains 2 logs per day
One check in log before 8:00
One check out log after 17:00
"""
start_date = '{:%d/%m/%Y}'.format(start_date)
end_date = '{:%d/%m/%Y}'.format(end_date)
day_count = end_date - start_date + 1
for date in (start_date + datetime.timedelta(i) for i in range(day_count)):
date = '{:%d/%m/%Y}'.format(date.date)
logs = get_logs_by_date(uid, date)
if len(logs) == 2:
if not check_log_row(logs[0]) or not check_log_row(logs[1]):
delete_log(logs[0][0])
delete_log(logs[1][0])
add_log(uid, date, 'in')
add_log(uid, date, 'out')
elif len(logs) == 0:
add_log(uid, date, 'in')
add_log(uid, date, 'out')
else:
for log in logs:
delete_log(log[0])
add_log(uid, date, 'in')
add_log(uid, date, 'out')
def main():
today = '{:%d/%m/%Y}'.format(datetime.date.today())
parser = argparse.ArgumentParser()
parser.add_argument('action', help=
'`get`, `checkin`, `checkout`, `add` or `fix` logs', default='get')
parser.add_argument('uids', help='User PINs', type=int, nargs='*')
parser.add_argument('-d', '--date', help='Date', default=today)
parser.add_argument('-r', '--range', help=
'Range of date, ex. 01/01/2017-02/01/2017')
parser.add_argument('--log', help='log id to delete')
parser.add_argument('--late', help='Checkin late or not', action=
'store_true')
args = parser.parse_args()
uids = args.uids
date = args.date or today
if not args.range:
start, end = date, date
else:
start, end = args.range.split('-')
transfer_file(DEVICE_IP, server_ip, DB_PATH, cmd='ftpput')
for uid in uids:
if args.action == 'get':
logs = get_logs(uid, start, end)
for log in logs:
print_log(*log)
elif args.action == 'checkin':
add_logs(uid, start, end, 'in', late=args.late)
elif args.action == 'checkout':
add_logs(uid, start, end, 'out')
elif args.action == 'add':
add_log(uid, start, end)
elif args.action == 'fix':
fix_logs(uid, start, end)
elif args.action == 'delete':
delete_log(args.log)
else:
raise ValueError(
'Action must be `get`, `checkin`, `checkout`, `fix` or `delete`'
)
transfer_file(server_ip, DEVICE_IP, DB_PATH, cmd='ftpget')
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def get_server_ip(device_ip):
import socket
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.connect((device_ip, 80))
return s.getsockname()[0]
def transfer_file(from_ip, to_ip, remote_file_path, cmd='ftpput'):
"""
Transfer file from from_ip to to_ip via telnet.
Use ftpput and ftpget.
"""
try:
import pyftpdlib
except ImportError:
import pip
pip.main('install pyftpdlib'.split())
ftp_server = spr.Popen([sys.executable, '-m', 'pyftpdlib', '-w'])
print('Server started')
filename = os.path.basename(remote_file_path)
s = telnetlib.Telnet(DEVICE_IP)
print(s.read_until(b'login: ').decode())
s.write(b'root \n')
print(s.read_until(b'Password: ').decode())
s.write(b'solokey\n')
if s.read_until(b'#'):
s.write(bytes('ls %s\n' % DB_PATH, 'utf-8'))
files = s.read_until(b'#').decode()
if filename in files:
while True:
if cmd == 'ftpput':
command = bytes('%s -P 2121 %s %s %s\n' % (cmd,
server_ip, filename, remote_file_path), 'utf-8')
elif cmd == 'ftpget':
command = bytes('%s -P 2121 %s %s %s\n' % (cmd,
server_ip, remote_file_path, filename), 'utf-8')
else:
raise ValueError('cmd must be `ftpput` or `ftpget`')
s.write(command)
ret = s.read_until(b'#').decode()
if 'refused' not in ret:
print(ret)
break
ftp_server.kill()
print('Server killed')
def generate_verify_time(status='in', late=False):
"""
Generate normal verify time based on status `in` or `out`
`in` time will be random 10 mins before 8:00
`out` time will be random 10 mins after 17:00
"""
if status == 'in':
status = 0
if not late:
hour = 7
minute = random.randint(50, 59)
else:
hour = 8
minute = random.randint(15, 20)
elif status == 'out':
status = 1
hour = 17
minute = random.randint(0, 10)
else:
raise ValueError('status must be `in` or `out`')
second = random.randint(0, 59)
time = datetime.time(hour, minute, second)
return time
def add_log(uid, date, status, late=False):
"""
Edit ZKDB.db file, ATT_LOG table,
insert a row which represents a check in/out log
uid: User PIN
date: follow format: dd/mm/yyyy - 14/01/2017
status: 'in' is checking in, 'out' is checking out
"""
verify_type = 1
if status == 'in':
status = 0
time = generate_verify_time('in', late=late)
elif status == 'out':
status = 1
time = generate_verify_time('out')
else:
raise ValueError('status must be `in` or `out`')
date = datetime.datetime.strptime(date, '%d/%m/%Y')
combined = datetime.datetime.combine(date, time)
verify_time = '{:%Y-%m-%dT%H:%M:%S}'.format(combined)
with sqlite3.connect(DB) as conn:
query = (
'INSERT INTO ATT_LOG (User_PIN, Verify_Type, Verify_Time, Status, Work_Code_ID, SEND_FLAG) VALUES ({}, {}, "{}", {}, 0, 0)'
.format(uid, verify_type, verify_time, status, 0, 0))
cur = conn.execute(query)
cur = conn.execute('SELECT last_insert_rowid() FROM ATT_LOG')
r = cur.fetchone()
print_log(r, uid, verify_type, verify_time, status)
def add_logs(uid, start, end, status, late=False):
start_date = datetime.datetime.strptime(start, '%d/%m/%Y')
end_date = datetime.datetime.strptime(end, '%d/%m/%Y')
day_count = end_date - start_date
day_count = day_count.days + 1
for date in (start_date + datetime.timedelta(i) for i in range(day_count)):
date = '{:%d/%m/%Y}'.format(date)
add_log(uid, date, status, late)
def delete_log(log_id):
"""
Delete a log row with ID=log_id
"""
with sqlite3.connect(DB) as conn:
query = 'DELETE FROM ATT_LOG WHERE ID={}'.format(log_id)
conn.execute(query)
print('Deleted log {}'.format(log_id))
<|reserved_special_token_0|>
def get_logs_by_date(uid, date):
return get_logs(uid, date, date)
def print_log(*log_row):
"""
Pretty print a log row
log row format: (ID, User_PIN, Verify_Type, Verify_Time, Status)
"""
id, uid, verify_type, verify_time, status = log_row
if status == 1:
status = 'Check out'
elif status == 0:
status = 'Check in'
print('{}. {} {} at {}'.format(id, uid, status, verify_time))
def check_log_row(log_row):
"""
Each day must have exactly 2 logs.
One for checking in, before 8:00:00
One for checking out, after 17:00:00
Return True if satisfies all conditions, else False
"""
in_time = datetime.time(8, 0, 0)
out_time = datetime.time(17, 0, 0)
log_date = datetime.datetime.strptime(log_row[2], '%Y-%m-%dT%H:%M:%S')
status = log_row[-1]
if status == 1 and log_date.time() < out_time:
print('Early log on {}: {}'.format(log_date.date(), log_date))
return False
elif status == 0 and log_date.time() > in_time:
print('Late log on {}: {}'.format(log_date.date(), log_date))
return False
else:
return True
def check_log_by_date(uid, date):
pass
def fix_logs(uid, start_date, end_date):
"""
Fix logs of uid from start_date to end_date
A normalized log contains 2 logs per day
One check in log before 8:00
One check out log after 17:00
"""
start_date = '{:%d/%m/%Y}'.format(start_date)
end_date = '{:%d/%m/%Y}'.format(end_date)
day_count = end_date - start_date + 1
for date in (start_date + datetime.timedelta(i) for i in range(day_count)):
date = '{:%d/%m/%Y}'.format(date.date)
logs = get_logs_by_date(uid, date)
if len(logs) == 2:
if not check_log_row(logs[0]) or not check_log_row(logs[1]):
delete_log(logs[0][0])
delete_log(logs[1][0])
add_log(uid, date, 'in')
add_log(uid, date, 'out')
elif len(logs) == 0:
add_log(uid, date, 'in')
add_log(uid, date, 'out')
else:
for log in logs:
delete_log(log[0])
add_log(uid, date, 'in')
add_log(uid, date, 'out')
def main():
today = '{:%d/%m/%Y}'.format(datetime.date.today())
parser = argparse.ArgumentParser()
parser.add_argument('action', help=
'`get`, `checkin`, `checkout`, `add` or `fix` logs', default='get')
parser.add_argument('uids', help='User PINs', type=int, nargs='*')
parser.add_argument('-d', '--date', help='Date', default=today)
parser.add_argument('-r', '--range', help=
'Range of date, ex. 01/01/2017-02/01/2017')
parser.add_argument('--log', help='log id to delete')
parser.add_argument('--late', help='Checkin late or not', action=
'store_true')
args = parser.parse_args()
uids = args.uids
date = args.date or today
if not args.range:
start, end = date, date
else:
start, end = args.range.split('-')
transfer_file(DEVICE_IP, server_ip, DB_PATH, cmd='ftpput')
for uid in uids:
if args.action == 'get':
logs = get_logs(uid, start, end)
for log in logs:
print_log(*log)
elif args.action == 'checkin':
add_logs(uid, start, end, 'in', late=args.late)
elif args.action == 'checkout':
add_logs(uid, start, end, 'out')
elif args.action == 'add':
add_log(uid, start, end)
elif args.action == 'fix':
fix_logs(uid, start, end)
elif args.action == 'delete':
delete_log(args.log)
else:
raise ValueError(
'Action must be `get`, `checkin`, `checkout`, `fix` or `delete`'
)
transfer_file(server_ip, DEVICE_IP, DB_PATH, cmd='ftpget')
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def get_server_ip(device_ip):
import socket
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.connect((device_ip, 80))
return s.getsockname()[0]
def transfer_file(from_ip, to_ip, remote_file_path, cmd='ftpput'):
"""
Transfer file from from_ip to to_ip via telnet.
Use ftpput and ftpget.
"""
try:
import pyftpdlib
except ImportError:
import pip
pip.main('install pyftpdlib'.split())
ftp_server = spr.Popen([sys.executable, '-m', 'pyftpdlib', '-w'])
print('Server started')
filename = os.path.basename(remote_file_path)
s = telnetlib.Telnet(DEVICE_IP)
print(s.read_until(b'login: ').decode())
s.write(b'root \n')
print(s.read_until(b'Password: ').decode())
s.write(b'solokey\n')
if s.read_until(b'#'):
s.write(bytes('ls %s\n' % DB_PATH, 'utf-8'))
files = s.read_until(b'#').decode()
if filename in files:
while True:
if cmd == 'ftpput':
command = bytes('%s -P 2121 %s %s %s\n' % (cmd,
server_ip, filename, remote_file_path), 'utf-8')
elif cmd == 'ftpget':
command = bytes('%s -P 2121 %s %s %s\n' % (cmd,
server_ip, remote_file_path, filename), 'utf-8')
else:
raise ValueError('cmd must be `ftpput` or `ftpget`')
s.write(command)
ret = s.read_until(b'#').decode()
if 'refused' not in ret:
print(ret)
break
ftp_server.kill()
print('Server killed')
def generate_verify_time(status='in', late=False):
"""
Generate normal verify time based on status `in` or `out`
`in` time will be random 10 mins before 8:00
`out` time will be random 10 mins after 17:00
"""
if status == 'in':
status = 0
if not late:
hour = 7
minute = random.randint(50, 59)
else:
hour = 8
minute = random.randint(15, 20)
elif status == 'out':
status = 1
hour = 17
minute = random.randint(0, 10)
else:
raise ValueError('status must be `in` or `out`')
second = random.randint(0, 59)
time = datetime.time(hour, minute, second)
return time
def add_log(uid, date, status, late=False):
"""
Edit ZKDB.db file, ATT_LOG table,
insert a row which represents a check in/out log
uid: User PIN
date: follow format: dd/mm/yyyy - 14/01/2017
status: 'in' is checking in, 'out' is checking out
"""
verify_type = 1
if status == 'in':
status = 0
time = generate_verify_time('in', late=late)
elif status == 'out':
status = 1
time = generate_verify_time('out')
else:
raise ValueError('status must be `in` or `out`')
date = datetime.datetime.strptime(date, '%d/%m/%Y')
combined = datetime.datetime.combine(date, time)
verify_time = '{:%Y-%m-%dT%H:%M:%S}'.format(combined)
with sqlite3.connect(DB) as conn:
query = (
'INSERT INTO ATT_LOG (User_PIN, Verify_Type, Verify_Time, Status, Work_Code_ID, SEND_FLAG) VALUES ({}, {}, "{}", {}, 0, 0)'
.format(uid, verify_type, verify_time, status, 0, 0))
cur = conn.execute(query)
cur = conn.execute('SELECT last_insert_rowid() FROM ATT_LOG')
r = cur.fetchone()
print_log(r, uid, verify_type, verify_time, status)
def add_logs(uid, start, end, status, late=False):
start_date = datetime.datetime.strptime(start, '%d/%m/%Y')
end_date = datetime.datetime.strptime(end, '%d/%m/%Y')
day_count = end_date - start_date
day_count = day_count.days + 1
for date in (start_date + datetime.timedelta(i) for i in range(day_count)):
date = '{:%d/%m/%Y}'.format(date)
add_log(uid, date, status, late)
def delete_log(log_id):
"""
Delete a log row with ID=log_id
"""
with sqlite3.connect(DB) as conn:
query = 'DELETE FROM ATT_LOG WHERE ID={}'.format(log_id)
conn.execute(query)
print('Deleted log {}'.format(log_id))
def get_logs(uid, start_date, end_date):
"""
Returns logs of 'uid' from 'start_date' to 'end_date'
uid: User PIN
start_date: follow format 14/01/2017
end_date: follow format 15/01/2017
Return format: list of (ID, User_PIN, Verify_Type, Verify_Time, Status)
"""
start_date = datetime.datetime.strptime(start_date, '%d/%m/%Y')
end_date = datetime.datetime.strptime(end_date, '%d/%m/%Y')
with sqlite3.connect(DB) as conn:
query = (
'SELECT ID, User_PIN, Verify_Type, Verify_Time, Status FROM ATT_LOG WHERE User_PIN = {}'
.format(uid))
cur = conn.execute(query)
rows = cur.fetchall()
ret = []
for row in rows:
log_date = datetime.datetime.strptime(row[-2], '%Y-%m-%dT%H:%M:%S')
if (log_date >= start_date and log_date <= end_date + datetime.
timedelta(days=1)):
ret.append(row)
return ret
def get_logs_by_date(uid, date):
return get_logs(uid, date, date)
def print_log(*log_row):
"""
Pretty print a log row
log row format: (ID, User_PIN, Verify_Type, Verify_Time, Status)
"""
id, uid, verify_type, verify_time, status = log_row
if status == 1:
status = 'Check out'
elif status == 0:
status = 'Check in'
print('{}. {} {} at {}'.format(id, uid, status, verify_time))
def check_log_row(log_row):
"""
Each day must have exactly 2 logs.
One for checking in, before 8:00:00
One for checking out, after 17:00:00
Return True if satisfies all conditions, else False
"""
in_time = datetime.time(8, 0, 0)
out_time = datetime.time(17, 0, 0)
log_date = datetime.datetime.strptime(log_row[2], '%Y-%m-%dT%H:%M:%S')
status = log_row[-1]
if status == 1 and log_date.time() < out_time:
print('Early log on {}: {}'.format(log_date.date(), log_date))
return False
elif status == 0 and log_date.time() > in_time:
print('Late log on {}: {}'.format(log_date.date(), log_date))
return False
else:
return True
def check_log_by_date(uid, date):
pass
def fix_logs(uid, start_date, end_date):
"""
Fix logs of uid from start_date to end_date
A normalized log contains 2 logs per day
One check in log before 8:00
One check out log after 17:00
"""
start_date = '{:%d/%m/%Y}'.format(start_date)
end_date = '{:%d/%m/%Y}'.format(end_date)
day_count = end_date - start_date + 1
for date in (start_date + datetime.timedelta(i) for i in range(day_count)):
date = '{:%d/%m/%Y}'.format(date.date)
logs = get_logs_by_date(uid, date)
if len(logs) == 2:
if not check_log_row(logs[0]) or not check_log_row(logs[1]):
delete_log(logs[0][0])
delete_log(logs[1][0])
add_log(uid, date, 'in')
add_log(uid, date, 'out')
elif len(logs) == 0:
add_log(uid, date, 'in')
add_log(uid, date, 'out')
else:
for log in logs:
delete_log(log[0])
add_log(uid, date, 'in')
add_log(uid, date, 'out')
def main():
today = '{:%d/%m/%Y}'.format(datetime.date.today())
parser = argparse.ArgumentParser()
parser.add_argument('action', help=
'`get`, `checkin`, `checkout`, `add` or `fix` logs', default='get')
parser.add_argument('uids', help='User PINs', type=int, nargs='*')
parser.add_argument('-d', '--date', help='Date', default=today)
parser.add_argument('-r', '--range', help=
'Range of date, ex. 01/01/2017-02/01/2017')
parser.add_argument('--log', help='log id to delete')
parser.add_argument('--late', help='Checkin late or not', action=
'store_true')
args = parser.parse_args()
uids = args.uids
date = args.date or today
if not args.range:
start, end = date, date
else:
start, end = args.range.split('-')
transfer_file(DEVICE_IP, server_ip, DB_PATH, cmd='ftpput')
for uid in uids:
if args.action == 'get':
logs = get_logs(uid, start, end)
for log in logs:
print_log(*log)
elif args.action == 'checkin':
add_logs(uid, start, end, 'in', late=args.late)
elif args.action == 'checkout':
add_logs(uid, start, end, 'out')
elif args.action == 'add':
add_log(uid, start, end)
elif args.action == 'fix':
fix_logs(uid, start, end)
elif args.action == 'delete':
delete_log(args.log)
else:
raise ValueError(
'Action must be `get`, `checkin`, `checkout`, `fix` or `delete`'
)
transfer_file(server_ip, DEVICE_IP, DB_PATH, cmd='ftpget')
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def get_server_ip(device_ip):
import socket
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.connect((device_ip, 80))
return s.getsockname()[0]
def transfer_file(from_ip, to_ip, remote_file_path, cmd='ftpput'):
"""
Transfer file from from_ip to to_ip via telnet.
Use ftpput and ftpget.
"""
try:
import pyftpdlib
except ImportError:
import pip
pip.main('install pyftpdlib'.split())
ftp_server = spr.Popen([sys.executable, '-m', 'pyftpdlib', '-w'])
print('Server started')
filename = os.path.basename(remote_file_path)
s = telnetlib.Telnet(DEVICE_IP)
print(s.read_until(b'login: ').decode())
s.write(b'root \n')
print(s.read_until(b'Password: ').decode())
s.write(b'solokey\n')
if s.read_until(b'#'):
s.write(bytes('ls %s\n' % DB_PATH, 'utf-8'))
files = s.read_until(b'#').decode()
if filename in files:
while True:
if cmd == 'ftpput':
command = bytes('%s -P 2121 %s %s %s\n' % (cmd,
server_ip, filename, remote_file_path), 'utf-8')
elif cmd == 'ftpget':
command = bytes('%s -P 2121 %s %s %s\n' % (cmd,
server_ip, remote_file_path, filename), 'utf-8')
else:
raise ValueError('cmd must be `ftpput` or `ftpget`')
s.write(command)
ret = s.read_until(b'#').decode()
if 'refused' not in ret:
print(ret)
break
ftp_server.kill()
print('Server killed')
def generate_verify_time(status='in', late=False):
"""
Generate normal verify time based on status `in` or `out`
`in` time will be random 10 mins before 8:00
`out` time will be random 10 mins after 17:00
"""
if status == 'in':
status = 0
if not late:
hour = 7
minute = random.randint(50, 59)
else:
hour = 8
minute = random.randint(15, 20)
elif status == 'out':
status = 1
hour = 17
minute = random.randint(0, 10)
else:
raise ValueError('status must be `in` or `out`')
second = random.randint(0, 59)
time = datetime.time(hour, minute, second)
return time
def add_log(uid, date, status, late=False):
"""
Edit ZKDB.db file, ATT_LOG table,
insert a row which represents a check in/out log
uid: User PIN
date: follow format: dd/mm/yyyy - 14/01/2017
status: 'in' is checking in, 'out' is checking out
"""
verify_type = 1
if status == 'in':
status = 0
time = generate_verify_time('in', late=late)
elif status == 'out':
status = 1
time = generate_verify_time('out')
else:
raise ValueError('status must be `in` or `out`')
date = datetime.datetime.strptime(date, '%d/%m/%Y')
combined = datetime.datetime.combine(date, time)
verify_time = '{:%Y-%m-%dT%H:%M:%S}'.format(combined)
with sqlite3.connect(DB) as conn:
query = (
'INSERT INTO ATT_LOG (User_PIN, Verify_Type, Verify_Time, Status, Work_Code_ID, SEND_FLAG) VALUES ({}, {}, "{}", {}, 0, 0)'
.format(uid, verify_type, verify_time, status, 0, 0))
cur = conn.execute(query)
cur = conn.execute('SELECT last_insert_rowid() FROM ATT_LOG')
r = cur.fetchone()
print_log(r, uid, verify_type, verify_time, status)
def add_logs(uid, start, end, status, late=False):
start_date = datetime.datetime.strptime(start, '%d/%m/%Y')
end_date = datetime.datetime.strptime(end, '%d/%m/%Y')
day_count = end_date - start_date
day_count = day_count.days + 1
for date in (start_date + datetime.timedelta(i) for i in range(day_count)):
date = '{:%d/%m/%Y}'.format(date)
add_log(uid, date, status, late)
def delete_log(log_id):
"""
Delete a log row with ID=log_id
"""
with sqlite3.connect(DB) as conn:
query = 'DELETE FROM ATT_LOG WHERE ID={}'.format(log_id)
conn.execute(query)
print('Deleted log {}'.format(log_id))
def get_logs(uid, start_date, end_date):
"""
Returns logs of 'uid' from 'start_date' to 'end_date'
uid: User PIN
start_date: follow format 14/01/2017
end_date: follow format 15/01/2017
Return format: list of (ID, User_PIN, Verify_Type, Verify_Time, Status)
"""
start_date = datetime.datetime.strptime(start_date, '%d/%m/%Y')
end_date = datetime.datetime.strptime(end_date, '%d/%m/%Y')
with sqlite3.connect(DB) as conn:
query = (
'SELECT ID, User_PIN, Verify_Type, Verify_Time, Status FROM ATT_LOG WHERE User_PIN = {}'
.format(uid))
cur = conn.execute(query)
rows = cur.fetchall()
ret = []
for row in rows:
log_date = datetime.datetime.strptime(row[-2], '%Y-%m-%dT%H:%M:%S')
if (log_date >= start_date and log_date <= end_date + datetime.
timedelta(days=1)):
ret.append(row)
return ret
def get_logs_by_date(uid, date):
return get_logs(uid, date, date)
def print_log(*log_row):
"""
Pretty print a log row
log row format: (ID, User_PIN, Verify_Type, Verify_Time, Status)
"""
id, uid, verify_type, verify_time, status = log_row
if status == 1:
status = 'Check out'
elif status == 0:
status = 'Check in'
print('{}. {} {} at {}'.format(id, uid, status, verify_time))
def check_log_row(log_row):
"""
Each day must have exactly 2 logs.
One for checking in, before 8:00:00
One for checking out, after 17:00:00
Return True if satisfies all conditions, else False
"""
in_time = datetime.time(8, 0, 0)
out_time = datetime.time(17, 0, 0)
log_date = datetime.datetime.strptime(log_row[2], '%Y-%m-%dT%H:%M:%S')
status = log_row[-1]
if status == 1 and log_date.time() < out_time:
print('Early log on {}: {}'.format(log_date.date(), log_date))
return False
elif status == 0 and log_date.time() > in_time:
print('Late log on {}: {}'.format(log_date.date(), log_date))
return False
else:
return True
def check_log_by_date(uid, date):
pass
def fix_logs(uid, start_date, end_date):
"""
Fix logs of uid from start_date to end_date
A normalized log contains 2 logs per day
One check in log before 8:00
One check out log after 17:00
"""
start_date = '{:%d/%m/%Y}'.format(start_date)
end_date = '{:%d/%m/%Y}'.format(end_date)
day_count = end_date - start_date + 1
for date in (start_date + datetime.timedelta(i) for i in range(day_count)):
date = '{:%d/%m/%Y}'.format(date.date)
logs = get_logs_by_date(uid, date)
if len(logs) == 2:
if not check_log_row(logs[0]) or not check_log_row(logs[1]):
delete_log(logs[0][0])
delete_log(logs[1][0])
add_log(uid, date, 'in')
add_log(uid, date, 'out')
elif len(logs) == 0:
add_log(uid, date, 'in')
add_log(uid, date, 'out')
else:
for log in logs:
delete_log(log[0])
add_log(uid, date, 'in')
add_log(uid, date, 'out')
def main():
today = '{:%d/%m/%Y}'.format(datetime.date.today())
parser = argparse.ArgumentParser()
parser.add_argument('action', help=
'`get`, `checkin`, `checkout`, `add` or `fix` logs', default='get')
parser.add_argument('uids', help='User PINs', type=int, nargs='*')
parser.add_argument('-d', '--date', help='Date', default=today)
parser.add_argument('-r', '--range', help=
'Range of date, ex. 01/01/2017-02/01/2017')
parser.add_argument('--log', help='log id to delete')
parser.add_argument('--late', help='Checkin late or not', action=
'store_true')
args = parser.parse_args()
uids = args.uids
date = args.date or today
if not args.range:
start, end = date, date
else:
start, end = args.range.split('-')
transfer_file(DEVICE_IP, server_ip, DB_PATH, cmd='ftpput')
for uid in uids:
if args.action == 'get':
logs = get_logs(uid, start, end)
for log in logs:
print_log(*log)
elif args.action == 'checkin':
add_logs(uid, start, end, 'in', late=args.late)
elif args.action == 'checkout':
add_logs(uid, start, end, 'out')
elif args.action == 'add':
add_log(uid, start, end)
elif args.action == 'fix':
fix_logs(uid, start, end)
elif args.action == 'delete':
delete_log(args.log)
else:
raise ValueError(
'Action must be `get`, `checkin`, `checkout`, `fix` or `delete`'
)
transfer_file(server_ip, DEVICE_IP, DB_PATH, cmd='ftpget')
if __name__ == '__main__':
DEVICE_IP = '10.0.0.204'
DB_PATH = '/mnt/mtdblock/data/ZKDB.db'
DB = os.path.basename(DB_PATH)
server_ip = get_server_ip(DEVICE_IP)
main()
<|reserved_special_token_1|>
'''
This script will do auto-check in/out for ZMM100 fingerprint access control
device by ZKSoftware.
At my office, the manager uses an application to load data from the
fingerprint device. After he loads data, log in device's database is cleared.
So in my case, I write this script to automate checking in/out everyday.
Device is running linux with busybox, so I have access to ftpput, ftpget and
wget commands (ftpd is missing). Data is stored in /mnt/mtdblock/data/ZKDB.db.
This is a sqlite3 database file. User info is in USER_INFO, user transactions
are in ATT_LOG table.
Procedure:
- telnet into the device
- ftpput database file at /mnt/mtdblock/data/ZKDB.db to a temporary FTP server
- edit ZKDB.db file on server
- ftpget ZKDB.db from FTP server
'''
import argparse
import datetime
import os
import random
import sqlite3
import subprocess as spr
import sys
import telnetlib
def get_server_ip(device_ip):
import socket
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.connect((device_ip, 80))
return s.getsockname()[0]
def transfer_file(from_ip, to_ip, remote_file_path, cmd='ftpput'):
'''
Transfer file from from_ip to to_ip via telnet.
Use ftpput and ftpget.
'''
# ====FTP Server====
try:
import pyftpdlib
except ImportError:
import pip
pip.main('install pyftpdlib'.split())
# start pyftpdlib FTP server: anonymous with write permission, port 2121
ftp_server = spr.Popen([sys.executable, '-m', 'pyftpdlib', '-w'])
print('Server started')
filename = os.path.basename(remote_file_path)
s = telnetlib.Telnet(DEVICE_IP)
print(s.read_until(b'login: ').decode())
s.write(b'root \n')
print(s.read_until(b'Password: ').decode())
s.write(b'solokey\n')
if s.read_until(b'#'):
s.write(bytes('ls %s\n' % DB_PATH, 'utf-8'))
files = s.read_until(b'#').decode()
if filename in files:
while True:
if cmd == 'ftpput':
command = bytes('%s -P 2121 %s %s %s\n' % (cmd, server_ip,
filename,
remote_file_path),
'utf-8')
elif cmd == 'ftpget':
command = bytes('%s -P 2121 %s %s %s\n' % (cmd, server_ip, remote_file_path, filename), 'utf-8')
else:
raise ValueError('cmd must be `ftpput` or `ftpget`')
s.write(command)
ret = s.read_until(b'#').decode()
if 'refused' not in ret:
print(ret)
break
# stop pyftpdlib FTP server
ftp_server.kill()
print('Server killed')
def generate_verify_time(status='in', late=False):
'''
Generate normal verify time based on status `in` or `out`
`in` time will be random 10 mins before 8:00
`out` time will be random 10 mins after 17:00
'''
if status == 'in':
status = 0
if not late:
hour = 7
minute = random.randint(50, 59)
else:
hour = 8
minute = random.randint(15, 20)
elif status == 'out':
status = 1
hour = 17
minute = random.randint(0, 10)
else:
raise ValueError('status must be `in` or `out`')
second = random.randint(0, 59)
time = datetime.time(hour, minute, second)
return time
def add_log(uid, date, status, late=False):
'''
Edit ZKDB.db file, ATT_LOG table,
insert a row which represents a check in/out log
uid: User PIN
date: follow format: dd/mm/yyyy - 14/01/2017
status: 'in' is checking in, 'out' is checking out
'''
# verify_type: 0 is password, 1 is fingerprint
verify_type = 1
if status == 'in':
status = 0
time = generate_verify_time('in', late=late)
elif status == 'out':
status = 1
time = generate_verify_time('out')
else:
raise ValueError('status must be `in` or `out`')
date = datetime.datetime.strptime(date, '%d/%m/%Y')
combined = datetime.datetime.combine(date, time)
verify_time = '{:%Y-%m-%dT%H:%M:%S}'.format(combined)
with sqlite3.connect(DB) as conn:
query = ('INSERT INTO ATT_LOG (User_PIN, Verify_Type, Verify_Time, '
'Status, Work_Code_ID, SEND_FLAG) '
'VALUES ({}, {}, "{}", {}, 0, 0)').format(uid, verify_type,
verify_time, status,
0, 0)
cur = conn.execute(query)
cur = conn.execute('SELECT last_insert_rowid() FROM ATT_LOG')
r = cur.fetchone()
print_log(r, uid, verify_type, verify_time, status)
def add_logs(uid, start, end, status, late=False):
start_date = datetime.datetime.strptime(start, '%d/%m/%Y')
end_date = datetime.datetime.strptime(end, '%d/%m/%Y')
day_count = end_date - start_date
day_count = day_count.days + 1
for date in (start_date + datetime.timedelta(i) for i in range(day_count)):
date = '{:%d/%m/%Y}'.format(date)
add_log(uid, date, status, late)
def delete_log(log_id):
'''
Delete a log row with ID=log_id
'''
with sqlite3.connect(DB) as conn:
query = ('DELETE FROM ATT_LOG WHERE ID={}'.format(log_id))
conn.execute(query)
print('Deleted log {}'.format(log_id))
def get_logs(uid, start_date, end_date):
'''
Returns logs of 'uid' from 'start_date' to 'end_date'
uid: User PIN
start_date: follow format 14/01/2017
end_date: follow format 15/01/2017
Return format: list of (ID, User_PIN, Verify_Type, Verify_Time, Status)
'''
start_date = datetime.datetime.strptime(start_date, '%d/%m/%Y')
end_date = datetime.datetime.strptime(end_date, '%d/%m/%Y')
with sqlite3.connect(DB) as conn:
query = ('SELECT ID, User_PIN, Verify_Type, Verify_Time, Status '
'FROM ATT_LOG WHERE User_PIN = {}'.format(uid))
cur = conn.execute(query)
rows = cur.fetchall()
ret = []
for row in rows:
log_date = datetime.datetime.strptime(row[-2], '%Y-%m-%dT%H:%M:%S')
if log_date >= start_date and log_date <= end_date + datetime.timedelta(days=1):
ret.append(row)
return ret
def get_logs_by_date(uid, date):
return get_logs(uid, date, date)
def print_log(*log_row):
'''
Pretty print a log row
log row format: (ID, User_PIN, Verify_Type, Verify_Time, Status)
'''
id, uid, verify_type, verify_time, status = log_row
if status == 1:
status = 'Check out'
elif status == 0:
status = 'Check in'
print('{}. {} {} at {}'.format(id, uid, status, verify_time))
def check_log_row(log_row):
'''
Each day must have exactly 2 logs.
One for checking in, before 8:00:00
One for checking out, after 17:00:00
Return True if satisfies all conditions, else False
'''
in_time = datetime.time(8, 0, 0)
out_time = datetime.time(17, 0, 0)
log_date = datetime.datetime.strptime(log_row[2], '%Y-%m-%dT%H:%M:%S')
status = log_row[-1]
if status == 1 and log_date.time() < out_time:
print('Early log on {}: {}'.format(log_date.date(), log_date))
return False
elif status == 0 and log_date.time() > in_time:
print('Late log on {}: {}'.format(log_date.date(), log_date))
return False
else:
return True
def check_log_by_date(uid, date):
pass
def fix_logs(uid, start_date, end_date):
'''
Fix logs of uid from start_date to end_date
A normalized log contains 2 logs per day
One check in log before 8:00
One check out log after 17:00
'''
start_date = '{:%d/%m/%Y}'.format(start_date)
end_date = '{:%d/%m/%Y}'.format(end_date)
day_count = (end_date - start_date) + 1
for date in (start_date + datetime.timedelta(i) for i in range(day_count)):
date = '{:%d/%m/%Y}'.format(date.date)
logs = get_logs_by_date(uid, date)
if len(logs) == 2:
if not check_log_row(logs[0]) or not check_log_row(logs[1]):
delete_log(logs[0][0])
delete_log(logs[1][0])
add_log(uid, date, 'in')
add_log(uid, date, 'out')
elif len(logs) == 0:
add_log(uid, date, 'in')
add_log(uid, date, 'out')
else:
for log in logs:
delete_log(log[0])
add_log(uid, date, 'in')
add_log(uid, date, 'out')
def main():
today = '{:%d/%m/%Y}'.format(datetime.date.today())
parser = argparse.ArgumentParser()
parser.add_argument('action', help='`get`, `checkin`, `checkout`, '
'`add` or `fix` logs', default='get')
parser.add_argument('uids', help='User PINs', type=int, nargs='*')
parser.add_argument('-d', '--date', help='Date', default=today)
parser.add_argument('-r', '--range',
help='Range of date, ex. 01/01/2017-02/01/2017')
parser.add_argument('--log', help='log id to delete')
parser.add_argument('--late', help='Checkin late or not',
action='store_true')
args = parser.parse_args()
uids = args.uids
date = args.date or today
if not args.range:
start, end = date, date
else:
start, end = args.range.split('-')
transfer_file(DEVICE_IP, server_ip, DB_PATH, cmd='ftpput')
for uid in uids:
if args.action == 'get':
logs = get_logs(uid, start, end)
for log in logs:
print_log(*log)
elif args.action == 'checkin':
add_logs(uid, start, end, 'in', late=args.late)
elif args.action == 'checkout':
add_logs(uid, start, end, 'out')
elif args.action == 'add':
add_log(uid, start, end)
elif args.action == 'fix':
fix_logs(uid, start, end)
elif args.action == 'delete':
delete_log(args.log)
else:
raise ValueError('Action must be `get`, `checkin`, `checkout`, '
'`fix` or `delete`')
transfer_file(server_ip, DEVICE_IP, DB_PATH, cmd='ftpget')
if __name__ == '__main__':
# ====config====
DEVICE_IP = '10.0.0.204' # todo: find IP, input IP
DB_PATH = '/mnt/mtdblock/data/ZKDB.db'
DB = os.path.basename(DB_PATH)
server_ip = get_server_ip(DEVICE_IP)
main()
|
flexible
|
{
"blob_id": "3d1e6be71f92910cdc9eb2bf60ea7f8f1187f706",
"index": 3698,
"step-1": "<mask token>\n\n\ndef get_server_ip(device_ip):\n import socket\n s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n s.connect((device_ip, 80))\n return s.getsockname()[0]\n\n\ndef transfer_file(from_ip, to_ip, remote_file_path, cmd='ftpput'):\n \"\"\"\n Transfer file from from_ip to to_ip via telnet.\n Use ftpput and ftpget.\n\n \"\"\"\n try:\n import pyftpdlib\n except ImportError:\n import pip\n pip.main('install pyftpdlib'.split())\n ftp_server = spr.Popen([sys.executable, '-m', 'pyftpdlib', '-w'])\n print('Server started')\n filename = os.path.basename(remote_file_path)\n s = telnetlib.Telnet(DEVICE_IP)\n print(s.read_until(b'login: ').decode())\n s.write(b'root \\n')\n print(s.read_until(b'Password: ').decode())\n s.write(b'solokey\\n')\n if s.read_until(b'#'):\n s.write(bytes('ls %s\\n' % DB_PATH, 'utf-8'))\n files = s.read_until(b'#').decode()\n if filename in files:\n while True:\n if cmd == 'ftpput':\n command = bytes('%s -P 2121 %s %s %s\\n' % (cmd,\n server_ip, filename, remote_file_path), 'utf-8')\n elif cmd == 'ftpget':\n command = bytes('%s -P 2121 %s %s %s\\n' % (cmd,\n server_ip, remote_file_path, filename), 'utf-8')\n else:\n raise ValueError('cmd must be `ftpput` or `ftpget`')\n s.write(command)\n ret = s.read_until(b'#').decode()\n if 'refused' not in ret:\n print(ret)\n break\n ftp_server.kill()\n print('Server killed')\n\n\n<mask token>\n\n\ndef print_log(*log_row):\n \"\"\"\n Pretty print a log row\n log row format: (ID, User_PIN, Verify_Type, Verify_Time, Status)\n \"\"\"\n id, uid, verify_type, verify_time, status = log_row\n if status == 1:\n status = 'Check out'\n elif status == 0:\n status = 'Check in'\n print('{}. {} {} at {}'.format(id, uid, status, verify_time))\n\n\n<mask token>\n\n\ndef check_log_by_date(uid, date):\n pass\n\n\ndef fix_logs(uid, start_date, end_date):\n \"\"\"\n Fix logs of uid from start_date to end_date\n A normalized log contains 2 logs per day\n One check in log before 8:00\n One check out log after 17:00\n \"\"\"\n start_date = '{:%d/%m/%Y}'.format(start_date)\n end_date = '{:%d/%m/%Y}'.format(end_date)\n day_count = end_date - start_date + 1\n for date in (start_date + datetime.timedelta(i) for i in range(day_count)):\n date = '{:%d/%m/%Y}'.format(date.date)\n logs = get_logs_by_date(uid, date)\n if len(logs) == 2:\n if not check_log_row(logs[0]) or not check_log_row(logs[1]):\n delete_log(logs[0][0])\n delete_log(logs[1][0])\n add_log(uid, date, 'in')\n add_log(uid, date, 'out')\n elif len(logs) == 0:\n add_log(uid, date, 'in')\n add_log(uid, date, 'out')\n else:\n for log in logs:\n delete_log(log[0])\n add_log(uid, date, 'in')\n add_log(uid, date, 'out')\n\n\ndef main():\n today = '{:%d/%m/%Y}'.format(datetime.date.today())\n parser = argparse.ArgumentParser()\n parser.add_argument('action', help=\n '`get`, `checkin`, `checkout`, `add` or `fix` logs', default='get')\n parser.add_argument('uids', help='User PINs', type=int, nargs='*')\n parser.add_argument('-d', '--date', help='Date', default=today)\n parser.add_argument('-r', '--range', help=\n 'Range of date, ex. 01/01/2017-02/01/2017')\n parser.add_argument('--log', help='log id to delete')\n parser.add_argument('--late', help='Checkin late or not', action=\n 'store_true')\n args = parser.parse_args()\n uids = args.uids\n date = args.date or today\n if not args.range:\n start, end = date, date\n else:\n start, end = args.range.split('-')\n transfer_file(DEVICE_IP, server_ip, DB_PATH, cmd='ftpput')\n for uid in uids:\n if args.action == 'get':\n logs = get_logs(uid, start, end)\n for log in logs:\n print_log(*log)\n elif args.action == 'checkin':\n add_logs(uid, start, end, 'in', late=args.late)\n elif args.action == 'checkout':\n add_logs(uid, start, end, 'out')\n elif args.action == 'add':\n add_log(uid, start, end)\n elif args.action == 'fix':\n fix_logs(uid, start, end)\n elif args.action == 'delete':\n delete_log(args.log)\n else:\n raise ValueError(\n 'Action must be `get`, `checkin`, `checkout`, `fix` or `delete`'\n )\n transfer_file(server_ip, DEVICE_IP, DB_PATH, cmd='ftpget')\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef get_server_ip(device_ip):\n import socket\n s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n s.connect((device_ip, 80))\n return s.getsockname()[0]\n\n\ndef transfer_file(from_ip, to_ip, remote_file_path, cmd='ftpput'):\n \"\"\"\n Transfer file from from_ip to to_ip via telnet.\n Use ftpput and ftpget.\n\n \"\"\"\n try:\n import pyftpdlib\n except ImportError:\n import pip\n pip.main('install pyftpdlib'.split())\n ftp_server = spr.Popen([sys.executable, '-m', 'pyftpdlib', '-w'])\n print('Server started')\n filename = os.path.basename(remote_file_path)\n s = telnetlib.Telnet(DEVICE_IP)\n print(s.read_until(b'login: ').decode())\n s.write(b'root \\n')\n print(s.read_until(b'Password: ').decode())\n s.write(b'solokey\\n')\n if s.read_until(b'#'):\n s.write(bytes('ls %s\\n' % DB_PATH, 'utf-8'))\n files = s.read_until(b'#').decode()\n if filename in files:\n while True:\n if cmd == 'ftpput':\n command = bytes('%s -P 2121 %s %s %s\\n' % (cmd,\n server_ip, filename, remote_file_path), 'utf-8')\n elif cmd == 'ftpget':\n command = bytes('%s -P 2121 %s %s %s\\n' % (cmd,\n server_ip, remote_file_path, filename), 'utf-8')\n else:\n raise ValueError('cmd must be `ftpput` or `ftpget`')\n s.write(command)\n ret = s.read_until(b'#').decode()\n if 'refused' not in ret:\n print(ret)\n break\n ftp_server.kill()\n print('Server killed')\n\n\ndef generate_verify_time(status='in', late=False):\n \"\"\"\n Generate normal verify time based on status `in` or `out`\n `in` time will be random 10 mins before 8:00\n `out` time will be random 10 mins after 17:00\n \"\"\"\n if status == 'in':\n status = 0\n if not late:\n hour = 7\n minute = random.randint(50, 59)\n else:\n hour = 8\n minute = random.randint(15, 20)\n elif status == 'out':\n status = 1\n hour = 17\n minute = random.randint(0, 10)\n else:\n raise ValueError('status must be `in` or `out`')\n second = random.randint(0, 59)\n time = datetime.time(hour, minute, second)\n return time\n\n\ndef add_log(uid, date, status, late=False):\n \"\"\"\n Edit ZKDB.db file, ATT_LOG table,\n insert a row which represents a check in/out log\n uid: User PIN\n date: follow format: dd/mm/yyyy - 14/01/2017\n status: 'in' is checking in, 'out' is checking out\n \"\"\"\n verify_type = 1\n if status == 'in':\n status = 0\n time = generate_verify_time('in', late=late)\n elif status == 'out':\n status = 1\n time = generate_verify_time('out')\n else:\n raise ValueError('status must be `in` or `out`')\n date = datetime.datetime.strptime(date, '%d/%m/%Y')\n combined = datetime.datetime.combine(date, time)\n verify_time = '{:%Y-%m-%dT%H:%M:%S}'.format(combined)\n with sqlite3.connect(DB) as conn:\n query = (\n 'INSERT INTO ATT_LOG (User_PIN, Verify_Type, Verify_Time, Status, Work_Code_ID, SEND_FLAG) VALUES ({}, {}, \"{}\", {}, 0, 0)'\n .format(uid, verify_type, verify_time, status, 0, 0))\n cur = conn.execute(query)\n cur = conn.execute('SELECT last_insert_rowid() FROM ATT_LOG')\n r = cur.fetchone()\n print_log(r, uid, verify_type, verify_time, status)\n\n\ndef add_logs(uid, start, end, status, late=False):\n start_date = datetime.datetime.strptime(start, '%d/%m/%Y')\n end_date = datetime.datetime.strptime(end, '%d/%m/%Y')\n day_count = end_date - start_date\n day_count = day_count.days + 1\n for date in (start_date + datetime.timedelta(i) for i in range(day_count)):\n date = '{:%d/%m/%Y}'.format(date)\n add_log(uid, date, status, late)\n\n\ndef delete_log(log_id):\n \"\"\"\n Delete a log row with ID=log_id\n \"\"\"\n with sqlite3.connect(DB) as conn:\n query = 'DELETE FROM ATT_LOG WHERE ID={}'.format(log_id)\n conn.execute(query)\n print('Deleted log {}'.format(log_id))\n\n\n<mask token>\n\n\ndef get_logs_by_date(uid, date):\n return get_logs(uid, date, date)\n\n\ndef print_log(*log_row):\n \"\"\"\n Pretty print a log row\n log row format: (ID, User_PIN, Verify_Type, Verify_Time, Status)\n \"\"\"\n id, uid, verify_type, verify_time, status = log_row\n if status == 1:\n status = 'Check out'\n elif status == 0:\n status = 'Check in'\n print('{}. {} {} at {}'.format(id, uid, status, verify_time))\n\n\ndef check_log_row(log_row):\n \"\"\"\n Each day must have exactly 2 logs.\n One for checking in, before 8:00:00\n One for checking out, after 17:00:00\n Return True if satisfies all conditions, else False\n \"\"\"\n in_time = datetime.time(8, 0, 0)\n out_time = datetime.time(17, 0, 0)\n log_date = datetime.datetime.strptime(log_row[2], '%Y-%m-%dT%H:%M:%S')\n status = log_row[-1]\n if status == 1 and log_date.time() < out_time:\n print('Early log on {}: {}'.format(log_date.date(), log_date))\n return False\n elif status == 0 and log_date.time() > in_time:\n print('Late log on {}: {}'.format(log_date.date(), log_date))\n return False\n else:\n return True\n\n\ndef check_log_by_date(uid, date):\n pass\n\n\ndef fix_logs(uid, start_date, end_date):\n \"\"\"\n Fix logs of uid from start_date to end_date\n A normalized log contains 2 logs per day\n One check in log before 8:00\n One check out log after 17:00\n \"\"\"\n start_date = '{:%d/%m/%Y}'.format(start_date)\n end_date = '{:%d/%m/%Y}'.format(end_date)\n day_count = end_date - start_date + 1\n for date in (start_date + datetime.timedelta(i) for i in range(day_count)):\n date = '{:%d/%m/%Y}'.format(date.date)\n logs = get_logs_by_date(uid, date)\n if len(logs) == 2:\n if not check_log_row(logs[0]) or not check_log_row(logs[1]):\n delete_log(logs[0][0])\n delete_log(logs[1][0])\n add_log(uid, date, 'in')\n add_log(uid, date, 'out')\n elif len(logs) == 0:\n add_log(uid, date, 'in')\n add_log(uid, date, 'out')\n else:\n for log in logs:\n delete_log(log[0])\n add_log(uid, date, 'in')\n add_log(uid, date, 'out')\n\n\ndef main():\n today = '{:%d/%m/%Y}'.format(datetime.date.today())\n parser = argparse.ArgumentParser()\n parser.add_argument('action', help=\n '`get`, `checkin`, `checkout`, `add` or `fix` logs', default='get')\n parser.add_argument('uids', help='User PINs', type=int, nargs='*')\n parser.add_argument('-d', '--date', help='Date', default=today)\n parser.add_argument('-r', '--range', help=\n 'Range of date, ex. 01/01/2017-02/01/2017')\n parser.add_argument('--log', help='log id to delete')\n parser.add_argument('--late', help='Checkin late or not', action=\n 'store_true')\n args = parser.parse_args()\n uids = args.uids\n date = args.date or today\n if not args.range:\n start, end = date, date\n else:\n start, end = args.range.split('-')\n transfer_file(DEVICE_IP, server_ip, DB_PATH, cmd='ftpput')\n for uid in uids:\n if args.action == 'get':\n logs = get_logs(uid, start, end)\n for log in logs:\n print_log(*log)\n elif args.action == 'checkin':\n add_logs(uid, start, end, 'in', late=args.late)\n elif args.action == 'checkout':\n add_logs(uid, start, end, 'out')\n elif args.action == 'add':\n add_log(uid, start, end)\n elif args.action == 'fix':\n fix_logs(uid, start, end)\n elif args.action == 'delete':\n delete_log(args.log)\n else:\n raise ValueError(\n 'Action must be `get`, `checkin`, `checkout`, `fix` or `delete`'\n )\n transfer_file(server_ip, DEVICE_IP, DB_PATH, cmd='ftpget')\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef get_server_ip(device_ip):\n import socket\n s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n s.connect((device_ip, 80))\n return s.getsockname()[0]\n\n\ndef transfer_file(from_ip, to_ip, remote_file_path, cmd='ftpput'):\n \"\"\"\n Transfer file from from_ip to to_ip via telnet.\n Use ftpput and ftpget.\n\n \"\"\"\n try:\n import pyftpdlib\n except ImportError:\n import pip\n pip.main('install pyftpdlib'.split())\n ftp_server = spr.Popen([sys.executable, '-m', 'pyftpdlib', '-w'])\n print('Server started')\n filename = os.path.basename(remote_file_path)\n s = telnetlib.Telnet(DEVICE_IP)\n print(s.read_until(b'login: ').decode())\n s.write(b'root \\n')\n print(s.read_until(b'Password: ').decode())\n s.write(b'solokey\\n')\n if s.read_until(b'#'):\n s.write(bytes('ls %s\\n' % DB_PATH, 'utf-8'))\n files = s.read_until(b'#').decode()\n if filename in files:\n while True:\n if cmd == 'ftpput':\n command = bytes('%s -P 2121 %s %s %s\\n' % (cmd,\n server_ip, filename, remote_file_path), 'utf-8')\n elif cmd == 'ftpget':\n command = bytes('%s -P 2121 %s %s %s\\n' % (cmd,\n server_ip, remote_file_path, filename), 'utf-8')\n else:\n raise ValueError('cmd must be `ftpput` or `ftpget`')\n s.write(command)\n ret = s.read_until(b'#').decode()\n if 'refused' not in ret:\n print(ret)\n break\n ftp_server.kill()\n print('Server killed')\n\n\ndef generate_verify_time(status='in', late=False):\n \"\"\"\n Generate normal verify time based on status `in` or `out`\n `in` time will be random 10 mins before 8:00\n `out` time will be random 10 mins after 17:00\n \"\"\"\n if status == 'in':\n status = 0\n if not late:\n hour = 7\n minute = random.randint(50, 59)\n else:\n hour = 8\n minute = random.randint(15, 20)\n elif status == 'out':\n status = 1\n hour = 17\n minute = random.randint(0, 10)\n else:\n raise ValueError('status must be `in` or `out`')\n second = random.randint(0, 59)\n time = datetime.time(hour, minute, second)\n return time\n\n\ndef add_log(uid, date, status, late=False):\n \"\"\"\n Edit ZKDB.db file, ATT_LOG table,\n insert a row which represents a check in/out log\n uid: User PIN\n date: follow format: dd/mm/yyyy - 14/01/2017\n status: 'in' is checking in, 'out' is checking out\n \"\"\"\n verify_type = 1\n if status == 'in':\n status = 0\n time = generate_verify_time('in', late=late)\n elif status == 'out':\n status = 1\n time = generate_verify_time('out')\n else:\n raise ValueError('status must be `in` or `out`')\n date = datetime.datetime.strptime(date, '%d/%m/%Y')\n combined = datetime.datetime.combine(date, time)\n verify_time = '{:%Y-%m-%dT%H:%M:%S}'.format(combined)\n with sqlite3.connect(DB) as conn:\n query = (\n 'INSERT INTO ATT_LOG (User_PIN, Verify_Type, Verify_Time, Status, Work_Code_ID, SEND_FLAG) VALUES ({}, {}, \"{}\", {}, 0, 0)'\n .format(uid, verify_type, verify_time, status, 0, 0))\n cur = conn.execute(query)\n cur = conn.execute('SELECT last_insert_rowid() FROM ATT_LOG')\n r = cur.fetchone()\n print_log(r, uid, verify_type, verify_time, status)\n\n\ndef add_logs(uid, start, end, status, late=False):\n start_date = datetime.datetime.strptime(start, '%d/%m/%Y')\n end_date = datetime.datetime.strptime(end, '%d/%m/%Y')\n day_count = end_date - start_date\n day_count = day_count.days + 1\n for date in (start_date + datetime.timedelta(i) for i in range(day_count)):\n date = '{:%d/%m/%Y}'.format(date)\n add_log(uid, date, status, late)\n\n\ndef delete_log(log_id):\n \"\"\"\n Delete a log row with ID=log_id\n \"\"\"\n with sqlite3.connect(DB) as conn:\n query = 'DELETE FROM ATT_LOG WHERE ID={}'.format(log_id)\n conn.execute(query)\n print('Deleted log {}'.format(log_id))\n\n\ndef get_logs(uid, start_date, end_date):\n \"\"\"\n Returns logs of 'uid' from 'start_date' to 'end_date'\n uid: User PIN\n start_date: follow format 14/01/2017\n end_date: follow format 15/01/2017\n Return format: list of (ID, User_PIN, Verify_Type, Verify_Time, Status)\n \"\"\"\n start_date = datetime.datetime.strptime(start_date, '%d/%m/%Y')\n end_date = datetime.datetime.strptime(end_date, '%d/%m/%Y')\n with sqlite3.connect(DB) as conn:\n query = (\n 'SELECT ID, User_PIN, Verify_Type, Verify_Time, Status FROM ATT_LOG WHERE User_PIN = {}'\n .format(uid))\n cur = conn.execute(query)\n rows = cur.fetchall()\n ret = []\n for row in rows:\n log_date = datetime.datetime.strptime(row[-2], '%Y-%m-%dT%H:%M:%S')\n if (log_date >= start_date and log_date <= end_date + datetime.\n timedelta(days=1)):\n ret.append(row)\n return ret\n\n\ndef get_logs_by_date(uid, date):\n return get_logs(uid, date, date)\n\n\ndef print_log(*log_row):\n \"\"\"\n Pretty print a log row\n log row format: (ID, User_PIN, Verify_Type, Verify_Time, Status)\n \"\"\"\n id, uid, verify_type, verify_time, status = log_row\n if status == 1:\n status = 'Check out'\n elif status == 0:\n status = 'Check in'\n print('{}. {} {} at {}'.format(id, uid, status, verify_time))\n\n\ndef check_log_row(log_row):\n \"\"\"\n Each day must have exactly 2 logs.\n One for checking in, before 8:00:00\n One for checking out, after 17:00:00\n Return True if satisfies all conditions, else False\n \"\"\"\n in_time = datetime.time(8, 0, 0)\n out_time = datetime.time(17, 0, 0)\n log_date = datetime.datetime.strptime(log_row[2], '%Y-%m-%dT%H:%M:%S')\n status = log_row[-1]\n if status == 1 and log_date.time() < out_time:\n print('Early log on {}: {}'.format(log_date.date(), log_date))\n return False\n elif status == 0 and log_date.time() > in_time:\n print('Late log on {}: {}'.format(log_date.date(), log_date))\n return False\n else:\n return True\n\n\ndef check_log_by_date(uid, date):\n pass\n\n\ndef fix_logs(uid, start_date, end_date):\n \"\"\"\n Fix logs of uid from start_date to end_date\n A normalized log contains 2 logs per day\n One check in log before 8:00\n One check out log after 17:00\n \"\"\"\n start_date = '{:%d/%m/%Y}'.format(start_date)\n end_date = '{:%d/%m/%Y}'.format(end_date)\n day_count = end_date - start_date + 1\n for date in (start_date + datetime.timedelta(i) for i in range(day_count)):\n date = '{:%d/%m/%Y}'.format(date.date)\n logs = get_logs_by_date(uid, date)\n if len(logs) == 2:\n if not check_log_row(logs[0]) or not check_log_row(logs[1]):\n delete_log(logs[0][0])\n delete_log(logs[1][0])\n add_log(uid, date, 'in')\n add_log(uid, date, 'out')\n elif len(logs) == 0:\n add_log(uid, date, 'in')\n add_log(uid, date, 'out')\n else:\n for log in logs:\n delete_log(log[0])\n add_log(uid, date, 'in')\n add_log(uid, date, 'out')\n\n\ndef main():\n today = '{:%d/%m/%Y}'.format(datetime.date.today())\n parser = argparse.ArgumentParser()\n parser.add_argument('action', help=\n '`get`, `checkin`, `checkout`, `add` or `fix` logs', default='get')\n parser.add_argument('uids', help='User PINs', type=int, nargs='*')\n parser.add_argument('-d', '--date', help='Date', default=today)\n parser.add_argument('-r', '--range', help=\n 'Range of date, ex. 01/01/2017-02/01/2017')\n parser.add_argument('--log', help='log id to delete')\n parser.add_argument('--late', help='Checkin late or not', action=\n 'store_true')\n args = parser.parse_args()\n uids = args.uids\n date = args.date or today\n if not args.range:\n start, end = date, date\n else:\n start, end = args.range.split('-')\n transfer_file(DEVICE_IP, server_ip, DB_PATH, cmd='ftpput')\n for uid in uids:\n if args.action == 'get':\n logs = get_logs(uid, start, end)\n for log in logs:\n print_log(*log)\n elif args.action == 'checkin':\n add_logs(uid, start, end, 'in', late=args.late)\n elif args.action == 'checkout':\n add_logs(uid, start, end, 'out')\n elif args.action == 'add':\n add_log(uid, start, end)\n elif args.action == 'fix':\n fix_logs(uid, start, end)\n elif args.action == 'delete':\n delete_log(args.log)\n else:\n raise ValueError(\n 'Action must be `get`, `checkin`, `checkout`, `fix` or `delete`'\n )\n transfer_file(server_ip, DEVICE_IP, DB_PATH, cmd='ftpget')\n\n\n<mask token>\n",
"step-4": "<mask token>\n\n\ndef get_server_ip(device_ip):\n import socket\n s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n s.connect((device_ip, 80))\n return s.getsockname()[0]\n\n\ndef transfer_file(from_ip, to_ip, remote_file_path, cmd='ftpput'):\n \"\"\"\n Transfer file from from_ip to to_ip via telnet.\n Use ftpput and ftpget.\n\n \"\"\"\n try:\n import pyftpdlib\n except ImportError:\n import pip\n pip.main('install pyftpdlib'.split())\n ftp_server = spr.Popen([sys.executable, '-m', 'pyftpdlib', '-w'])\n print('Server started')\n filename = os.path.basename(remote_file_path)\n s = telnetlib.Telnet(DEVICE_IP)\n print(s.read_until(b'login: ').decode())\n s.write(b'root \\n')\n print(s.read_until(b'Password: ').decode())\n s.write(b'solokey\\n')\n if s.read_until(b'#'):\n s.write(bytes('ls %s\\n' % DB_PATH, 'utf-8'))\n files = s.read_until(b'#').decode()\n if filename in files:\n while True:\n if cmd == 'ftpput':\n command = bytes('%s -P 2121 %s %s %s\\n' % (cmd,\n server_ip, filename, remote_file_path), 'utf-8')\n elif cmd == 'ftpget':\n command = bytes('%s -P 2121 %s %s %s\\n' % (cmd,\n server_ip, remote_file_path, filename), 'utf-8')\n else:\n raise ValueError('cmd must be `ftpput` or `ftpget`')\n s.write(command)\n ret = s.read_until(b'#').decode()\n if 'refused' not in ret:\n print(ret)\n break\n ftp_server.kill()\n print('Server killed')\n\n\ndef generate_verify_time(status='in', late=False):\n \"\"\"\n Generate normal verify time based on status `in` or `out`\n `in` time will be random 10 mins before 8:00\n `out` time will be random 10 mins after 17:00\n \"\"\"\n if status == 'in':\n status = 0\n if not late:\n hour = 7\n minute = random.randint(50, 59)\n else:\n hour = 8\n minute = random.randint(15, 20)\n elif status == 'out':\n status = 1\n hour = 17\n minute = random.randint(0, 10)\n else:\n raise ValueError('status must be `in` or `out`')\n second = random.randint(0, 59)\n time = datetime.time(hour, minute, second)\n return time\n\n\ndef add_log(uid, date, status, late=False):\n \"\"\"\n Edit ZKDB.db file, ATT_LOG table,\n insert a row which represents a check in/out log\n uid: User PIN\n date: follow format: dd/mm/yyyy - 14/01/2017\n status: 'in' is checking in, 'out' is checking out\n \"\"\"\n verify_type = 1\n if status == 'in':\n status = 0\n time = generate_verify_time('in', late=late)\n elif status == 'out':\n status = 1\n time = generate_verify_time('out')\n else:\n raise ValueError('status must be `in` or `out`')\n date = datetime.datetime.strptime(date, '%d/%m/%Y')\n combined = datetime.datetime.combine(date, time)\n verify_time = '{:%Y-%m-%dT%H:%M:%S}'.format(combined)\n with sqlite3.connect(DB) as conn:\n query = (\n 'INSERT INTO ATT_LOG (User_PIN, Verify_Type, Verify_Time, Status, Work_Code_ID, SEND_FLAG) VALUES ({}, {}, \"{}\", {}, 0, 0)'\n .format(uid, verify_type, verify_time, status, 0, 0))\n cur = conn.execute(query)\n cur = conn.execute('SELECT last_insert_rowid() FROM ATT_LOG')\n r = cur.fetchone()\n print_log(r, uid, verify_type, verify_time, status)\n\n\ndef add_logs(uid, start, end, status, late=False):\n start_date = datetime.datetime.strptime(start, '%d/%m/%Y')\n end_date = datetime.datetime.strptime(end, '%d/%m/%Y')\n day_count = end_date - start_date\n day_count = day_count.days + 1\n for date in (start_date + datetime.timedelta(i) for i in range(day_count)):\n date = '{:%d/%m/%Y}'.format(date)\n add_log(uid, date, status, late)\n\n\ndef delete_log(log_id):\n \"\"\"\n Delete a log row with ID=log_id\n \"\"\"\n with sqlite3.connect(DB) as conn:\n query = 'DELETE FROM ATT_LOG WHERE ID={}'.format(log_id)\n conn.execute(query)\n print('Deleted log {}'.format(log_id))\n\n\ndef get_logs(uid, start_date, end_date):\n \"\"\"\n Returns logs of 'uid' from 'start_date' to 'end_date'\n uid: User PIN\n start_date: follow format 14/01/2017\n end_date: follow format 15/01/2017\n Return format: list of (ID, User_PIN, Verify_Type, Verify_Time, Status)\n \"\"\"\n start_date = datetime.datetime.strptime(start_date, '%d/%m/%Y')\n end_date = datetime.datetime.strptime(end_date, '%d/%m/%Y')\n with sqlite3.connect(DB) as conn:\n query = (\n 'SELECT ID, User_PIN, Verify_Type, Verify_Time, Status FROM ATT_LOG WHERE User_PIN = {}'\n .format(uid))\n cur = conn.execute(query)\n rows = cur.fetchall()\n ret = []\n for row in rows:\n log_date = datetime.datetime.strptime(row[-2], '%Y-%m-%dT%H:%M:%S')\n if (log_date >= start_date and log_date <= end_date + datetime.\n timedelta(days=1)):\n ret.append(row)\n return ret\n\n\ndef get_logs_by_date(uid, date):\n return get_logs(uid, date, date)\n\n\ndef print_log(*log_row):\n \"\"\"\n Pretty print a log row\n log row format: (ID, User_PIN, Verify_Type, Verify_Time, Status)\n \"\"\"\n id, uid, verify_type, verify_time, status = log_row\n if status == 1:\n status = 'Check out'\n elif status == 0:\n status = 'Check in'\n print('{}. {} {} at {}'.format(id, uid, status, verify_time))\n\n\ndef check_log_row(log_row):\n \"\"\"\n Each day must have exactly 2 logs.\n One for checking in, before 8:00:00\n One for checking out, after 17:00:00\n Return True if satisfies all conditions, else False\n \"\"\"\n in_time = datetime.time(8, 0, 0)\n out_time = datetime.time(17, 0, 0)\n log_date = datetime.datetime.strptime(log_row[2], '%Y-%m-%dT%H:%M:%S')\n status = log_row[-1]\n if status == 1 and log_date.time() < out_time:\n print('Early log on {}: {}'.format(log_date.date(), log_date))\n return False\n elif status == 0 and log_date.time() > in_time:\n print('Late log on {}: {}'.format(log_date.date(), log_date))\n return False\n else:\n return True\n\n\ndef check_log_by_date(uid, date):\n pass\n\n\ndef fix_logs(uid, start_date, end_date):\n \"\"\"\n Fix logs of uid from start_date to end_date\n A normalized log contains 2 logs per day\n One check in log before 8:00\n One check out log after 17:00\n \"\"\"\n start_date = '{:%d/%m/%Y}'.format(start_date)\n end_date = '{:%d/%m/%Y}'.format(end_date)\n day_count = end_date - start_date + 1\n for date in (start_date + datetime.timedelta(i) for i in range(day_count)):\n date = '{:%d/%m/%Y}'.format(date.date)\n logs = get_logs_by_date(uid, date)\n if len(logs) == 2:\n if not check_log_row(logs[0]) or not check_log_row(logs[1]):\n delete_log(logs[0][0])\n delete_log(logs[1][0])\n add_log(uid, date, 'in')\n add_log(uid, date, 'out')\n elif len(logs) == 0:\n add_log(uid, date, 'in')\n add_log(uid, date, 'out')\n else:\n for log in logs:\n delete_log(log[0])\n add_log(uid, date, 'in')\n add_log(uid, date, 'out')\n\n\ndef main():\n today = '{:%d/%m/%Y}'.format(datetime.date.today())\n parser = argparse.ArgumentParser()\n parser.add_argument('action', help=\n '`get`, `checkin`, `checkout`, `add` or `fix` logs', default='get')\n parser.add_argument('uids', help='User PINs', type=int, nargs='*')\n parser.add_argument('-d', '--date', help='Date', default=today)\n parser.add_argument('-r', '--range', help=\n 'Range of date, ex. 01/01/2017-02/01/2017')\n parser.add_argument('--log', help='log id to delete')\n parser.add_argument('--late', help='Checkin late or not', action=\n 'store_true')\n args = parser.parse_args()\n uids = args.uids\n date = args.date or today\n if not args.range:\n start, end = date, date\n else:\n start, end = args.range.split('-')\n transfer_file(DEVICE_IP, server_ip, DB_PATH, cmd='ftpput')\n for uid in uids:\n if args.action == 'get':\n logs = get_logs(uid, start, end)\n for log in logs:\n print_log(*log)\n elif args.action == 'checkin':\n add_logs(uid, start, end, 'in', late=args.late)\n elif args.action == 'checkout':\n add_logs(uid, start, end, 'out')\n elif args.action == 'add':\n add_log(uid, start, end)\n elif args.action == 'fix':\n fix_logs(uid, start, end)\n elif args.action == 'delete':\n delete_log(args.log)\n else:\n raise ValueError(\n 'Action must be `get`, `checkin`, `checkout`, `fix` or `delete`'\n )\n transfer_file(server_ip, DEVICE_IP, DB_PATH, cmd='ftpget')\n\n\nif __name__ == '__main__':\n DEVICE_IP = '10.0.0.204'\n DB_PATH = '/mnt/mtdblock/data/ZKDB.db'\n DB = os.path.basename(DB_PATH)\n server_ip = get_server_ip(DEVICE_IP)\n main()\n",
"step-5": "'''\nThis script will do auto-check in/out for ZMM100 fingerprint access control\ndevice by ZKSoftware.\n\nAt my office, the manager uses an application to load data from the\nfingerprint device. After he loads data, log in device's database is cleared.\nSo in my case, I write this script to automate checking in/out everyday.\n\nDevice is running linux with busybox, so I have access to ftpput, ftpget and\nwget commands (ftpd is missing). Data is stored in /mnt/mtdblock/data/ZKDB.db.\nThis is a sqlite3 database file. User info is in USER_INFO, user transactions\nare in ATT_LOG table.\n\nProcedure:\n- telnet into the device\n- ftpput database file at /mnt/mtdblock/data/ZKDB.db to a temporary FTP server\n- edit ZKDB.db file on server\n- ftpget ZKDB.db from FTP server\n'''\nimport argparse\nimport datetime\nimport os\nimport random\nimport sqlite3\nimport subprocess as spr\nimport sys\nimport telnetlib\n\n\ndef get_server_ip(device_ip):\n import socket\n\n s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n s.connect((device_ip, 80))\n return s.getsockname()[0]\n\n\ndef transfer_file(from_ip, to_ip, remote_file_path, cmd='ftpput'):\n '''\n Transfer file from from_ip to to_ip via telnet.\n Use ftpput and ftpget.\n\n '''\n\n # ====FTP Server====\n try:\n import pyftpdlib\n except ImportError:\n import pip\n pip.main('install pyftpdlib'.split())\n\n # start pyftpdlib FTP server: anonymous with write permission, port 2121\n ftp_server = spr.Popen([sys.executable, '-m', 'pyftpdlib', '-w'])\n print('Server started')\n filename = os.path.basename(remote_file_path)\n\n s = telnetlib.Telnet(DEVICE_IP)\n print(s.read_until(b'login: ').decode())\n s.write(b'root \\n')\n print(s.read_until(b'Password: ').decode())\n s.write(b'solokey\\n')\n if s.read_until(b'#'):\n s.write(bytes('ls %s\\n' % DB_PATH, 'utf-8'))\n files = s.read_until(b'#').decode()\n\n if filename in files:\n while True:\n if cmd == 'ftpput':\n command = bytes('%s -P 2121 %s %s %s\\n' % (cmd, server_ip,\n filename,\n remote_file_path),\n 'utf-8')\n elif cmd == 'ftpget':\n command = bytes('%s -P 2121 %s %s %s\\n' % (cmd, server_ip, remote_file_path, filename), 'utf-8')\n else:\n raise ValueError('cmd must be `ftpput` or `ftpget`')\n s.write(command)\n ret = s.read_until(b'#').decode()\n if 'refused' not in ret:\n print(ret)\n break\n\n # stop pyftpdlib FTP server\n ftp_server.kill()\n print('Server killed')\n\n\ndef generate_verify_time(status='in', late=False):\n '''\n Generate normal verify time based on status `in` or `out`\n `in` time will be random 10 mins before 8:00\n `out` time will be random 10 mins after 17:00\n '''\n if status == 'in':\n status = 0\n if not late:\n hour = 7\n minute = random.randint(50, 59)\n else:\n hour = 8\n minute = random.randint(15, 20)\n elif status == 'out':\n status = 1\n hour = 17\n minute = random.randint(0, 10)\n else:\n raise ValueError('status must be `in` or `out`')\n\n second = random.randint(0, 59)\n time = datetime.time(hour, minute, second)\n\n return time\n\n\ndef add_log(uid, date, status, late=False):\n '''\n Edit ZKDB.db file, ATT_LOG table,\n insert a row which represents a check in/out log\n uid: User PIN\n date: follow format: dd/mm/yyyy - 14/01/2017\n status: 'in' is checking in, 'out' is checking out\n '''\n # verify_type: 0 is password, 1 is fingerprint\n verify_type = 1\n\n if status == 'in':\n status = 0\n time = generate_verify_time('in', late=late)\n\n elif status == 'out':\n status = 1\n time = generate_verify_time('out')\n else:\n raise ValueError('status must be `in` or `out`')\n\n date = datetime.datetime.strptime(date, '%d/%m/%Y')\n combined = datetime.datetime.combine(date, time)\n verify_time = '{:%Y-%m-%dT%H:%M:%S}'.format(combined)\n\n with sqlite3.connect(DB) as conn:\n query = ('INSERT INTO ATT_LOG (User_PIN, Verify_Type, Verify_Time, '\n 'Status, Work_Code_ID, SEND_FLAG) '\n 'VALUES ({}, {}, \"{}\", {}, 0, 0)').format(uid, verify_type,\n verify_time, status,\n 0, 0)\n cur = conn.execute(query)\n cur = conn.execute('SELECT last_insert_rowid() FROM ATT_LOG')\n r = cur.fetchone()\n\n print_log(r, uid, verify_type, verify_time, status)\n\ndef add_logs(uid, start, end, status, late=False):\n start_date = datetime.datetime.strptime(start, '%d/%m/%Y')\n end_date = datetime.datetime.strptime(end, '%d/%m/%Y')\n day_count = end_date - start_date\n day_count = day_count.days + 1\n for date in (start_date + datetime.timedelta(i) for i in range(day_count)):\n date = '{:%d/%m/%Y}'.format(date)\n add_log(uid, date, status, late)\n\n\ndef delete_log(log_id):\n '''\n Delete a log row with ID=log_id\n '''\n with sqlite3.connect(DB) as conn:\n query = ('DELETE FROM ATT_LOG WHERE ID={}'.format(log_id))\n conn.execute(query)\n print('Deleted log {}'.format(log_id))\n\n\ndef get_logs(uid, start_date, end_date):\n '''\n Returns logs of 'uid' from 'start_date' to 'end_date'\n uid: User PIN\n start_date: follow format 14/01/2017\n end_date: follow format 15/01/2017\n Return format: list of (ID, User_PIN, Verify_Type, Verify_Time, Status)\n '''\n start_date = datetime.datetime.strptime(start_date, '%d/%m/%Y')\n end_date = datetime.datetime.strptime(end_date, '%d/%m/%Y')\n\n with sqlite3.connect(DB) as conn:\n query = ('SELECT ID, User_PIN, Verify_Type, Verify_Time, Status '\n 'FROM ATT_LOG WHERE User_PIN = {}'.format(uid))\n cur = conn.execute(query)\n rows = cur.fetchall()\n\n ret = []\n for row in rows:\n log_date = datetime.datetime.strptime(row[-2], '%Y-%m-%dT%H:%M:%S')\n if log_date >= start_date and log_date <= end_date + datetime.timedelta(days=1):\n ret.append(row)\n return ret\n\n\ndef get_logs_by_date(uid, date):\n return get_logs(uid, date, date)\n\n\ndef print_log(*log_row):\n '''\n Pretty print a log row\n log row format: (ID, User_PIN, Verify_Type, Verify_Time, Status)\n '''\n id, uid, verify_type, verify_time, status = log_row\n\n if status == 1:\n status = 'Check out'\n elif status == 0:\n status = 'Check in'\n print('{}. {} {} at {}'.format(id, uid, status, verify_time))\n\n\ndef check_log_row(log_row):\n '''\n Each day must have exactly 2 logs.\n One for checking in, before 8:00:00\n One for checking out, after 17:00:00\n Return True if satisfies all conditions, else False\n '''\n in_time = datetime.time(8, 0, 0)\n out_time = datetime.time(17, 0, 0)\n\n log_date = datetime.datetime.strptime(log_row[2], '%Y-%m-%dT%H:%M:%S')\n status = log_row[-1]\n\n if status == 1 and log_date.time() < out_time:\n print('Early log on {}: {}'.format(log_date.date(), log_date))\n return False\n elif status == 0 and log_date.time() > in_time:\n print('Late log on {}: {}'.format(log_date.date(), log_date))\n return False\n else:\n return True\n\n\ndef check_log_by_date(uid, date):\n pass\n\n\ndef fix_logs(uid, start_date, end_date):\n '''\n Fix logs of uid from start_date to end_date\n A normalized log contains 2 logs per day\n One check in log before 8:00\n One check out log after 17:00\n '''\n\n start_date = '{:%d/%m/%Y}'.format(start_date)\n end_date = '{:%d/%m/%Y}'.format(end_date)\n day_count = (end_date - start_date) + 1\n\n for date in (start_date + datetime.timedelta(i) for i in range(day_count)):\n date = '{:%d/%m/%Y}'.format(date.date)\n logs = get_logs_by_date(uid, date)\n if len(logs) == 2:\n if not check_log_row(logs[0]) or not check_log_row(logs[1]):\n delete_log(logs[0][0])\n delete_log(logs[1][0])\n add_log(uid, date, 'in')\n add_log(uid, date, 'out')\n elif len(logs) == 0:\n add_log(uid, date, 'in')\n add_log(uid, date, 'out')\n else:\n for log in logs:\n delete_log(log[0])\n add_log(uid, date, 'in')\n add_log(uid, date, 'out')\n\n\ndef main():\n\n today = '{:%d/%m/%Y}'.format(datetime.date.today())\n\n parser = argparse.ArgumentParser()\n parser.add_argument('action', help='`get`, `checkin`, `checkout`, '\n '`add` or `fix` logs', default='get')\n parser.add_argument('uids', help='User PINs', type=int, nargs='*')\n parser.add_argument('-d', '--date', help='Date', default=today)\n parser.add_argument('-r', '--range',\n help='Range of date, ex. 01/01/2017-02/01/2017')\n parser.add_argument('--log', help='log id to delete')\n parser.add_argument('--late', help='Checkin late or not',\n action='store_true')\n\n args = parser.parse_args()\n uids = args.uids\n date = args.date or today\n if not args.range:\n start, end = date, date\n else:\n start, end = args.range.split('-')\n\n transfer_file(DEVICE_IP, server_ip, DB_PATH, cmd='ftpput')\n\n for uid in uids:\n if args.action == 'get':\n logs = get_logs(uid, start, end)\n for log in logs:\n print_log(*log)\n elif args.action == 'checkin':\n add_logs(uid, start, end, 'in', late=args.late)\n elif args.action == 'checkout':\n add_logs(uid, start, end, 'out')\n elif args.action == 'add':\n add_log(uid, start, end)\n elif args.action == 'fix':\n fix_logs(uid, start, end)\n elif args.action == 'delete':\n delete_log(args.log)\n else:\n raise ValueError('Action must be `get`, `checkin`, `checkout`, '\n '`fix` or `delete`')\n\n transfer_file(server_ip, DEVICE_IP, DB_PATH, cmd='ftpget')\n\n\nif __name__ == '__main__':\n # ====config====\n DEVICE_IP = '10.0.0.204' # todo: find IP, input IP\n DB_PATH = '/mnt/mtdblock/data/ZKDB.db'\n DB = os.path.basename(DB_PATH)\n server_ip = get_server_ip(DEVICE_IP)\n\n main()\n",
"step-ids": [
6,
12,
13,
14,
16
]
}
|
[
6,
12,
13,
14,
16
] |
def correctLineup1(athletes: list) ->list:
return [(athletes[i + 1] if i % 2 == 0 else athletes[i - 1]) for i in
range(len(athletes))]
<|reserved_special_token_0|>
<|reserved_special_token_1|>
def correctLineup1(athletes: list) ->list:
return [(athletes[i + 1] if i % 2 == 0 else athletes[i - 1]) for i in
range(len(athletes))]
def correctLineup1(athletes: list) ->list:
return [athletes[i ^ 1] for i in range(len(athletes))]
<|reserved_special_token_0|>
<|reserved_special_token_1|>
def correctLineup1(athletes: list) ->list:
return [(athletes[i + 1] if i % 2 == 0 else athletes[i - 1]) for i in
range(len(athletes))]
def correctLineup1(athletes: list) ->list:
return [athletes[i ^ 1] for i in range(len(athletes))]
<|reserved_special_token_0|>
print(r1)
<|reserved_special_token_1|>
def correctLineup1(athletes: list) ->list:
return [(athletes[i + 1] if i % 2 == 0 else athletes[i - 1]) for i in
range(len(athletes))]
def correctLineup1(athletes: list) ->list:
return [athletes[i ^ 1] for i in range(len(athletes))]
a1 = [1, 2, 3, 4, 5, 6]
r1 = correctLineup1(a1)
print(r1)
<|reserved_special_token_1|>
#
# * Python 57, Correct Lineup
# * Easy
# * For the opening ceremony of the upcoming sports event an even number of
# * athletes were picked. They formed a correct lineup, i.e. such a lineup in
# * which no two boys or two girls stand together. The first person in the lineup
# * was a girl. As a part of the performance, adjacent pairs of athletes (i.e.
# * the first one together with the second one, the third one together with the
# * fourth one, etc.) had to swap positions with each other.
# * Given a list of athletes, return the list of athletes after the changes, i.e.
# * after each adjacent pair of athletes is swapped.
# * Example
# For athletes = [1, 2, 3, 4, 5, 6], the output should be
# correctLineup(athletes) = [2, 1, 4, 3, 6, 5].
# * Input/Output
# [execution time limit] 4 seconds (py3)
# [input] array.integer athletes
# A list of even length representing the athletes, where each athlete is given
# by the number written on their back.
# Guaranteed constraints:
# 2 ≤ athletes.length ≤ 20,
# 1 ≤ athletes[i] ≤ 100.
# [output] array.integer
# Array of athletes with each pair of adjacent elements swapped.
#%%
# * Solution 1
def correctLineup1(athletes:list)-> list:
return [athletes[i+1] if i%2==0 else athletes[i-1] for i in range(len(athletes))]
# * Solution 2
# ! bitwise operator ^.
def correctLineup1(athletes:list)-> list:
return [athletes[i^1] for i in range(len(athletes))]
a1 = [1, 2, 3, 4, 5, 6]
r1 = correctLineup1(a1)
print(r1)
# %%
|
flexible
|
{
"blob_id": "6c5f60e7a122e3da5e6705bfacf73a361f6c1362",
"index": 1120,
"step-1": "def correctLineup1(athletes: list) ->list:\n return [(athletes[i + 1] if i % 2 == 0 else athletes[i - 1]) for i in\n range(len(athletes))]\n\n\n<mask token>\n",
"step-2": "def correctLineup1(athletes: list) ->list:\n return [(athletes[i + 1] if i % 2 == 0 else athletes[i - 1]) for i in\n range(len(athletes))]\n\n\ndef correctLineup1(athletes: list) ->list:\n return [athletes[i ^ 1] for i in range(len(athletes))]\n\n\n<mask token>\n",
"step-3": "def correctLineup1(athletes: list) ->list:\n return [(athletes[i + 1] if i % 2 == 0 else athletes[i - 1]) for i in\n range(len(athletes))]\n\n\ndef correctLineup1(athletes: list) ->list:\n return [athletes[i ^ 1] for i in range(len(athletes))]\n\n\n<mask token>\nprint(r1)\n",
"step-4": "def correctLineup1(athletes: list) ->list:\n return [(athletes[i + 1] if i % 2 == 0 else athletes[i - 1]) for i in\n range(len(athletes))]\n\n\ndef correctLineup1(athletes: list) ->list:\n return [athletes[i ^ 1] for i in range(len(athletes))]\n\n\na1 = [1, 2, 3, 4, 5, 6]\nr1 = correctLineup1(a1)\nprint(r1)\n",
"step-5": "#\n# * Python 57, Correct Lineup\n# * Easy\n\n# * For the opening ceremony of the upcoming sports event an even number of \n# * athletes were picked. They formed a correct lineup, i.e. such a lineup in \n# * which no two boys or two girls stand together. The first person in the lineup \n# * was a girl. As a part of the performance, adjacent pairs of athletes (i.e. \n# * the first one together with the second one, the third one together with the \n# * fourth one, etc.) had to swap positions with each other.\n\n# * Given a list of athletes, return the list of athletes after the changes, i.e. \n# * after each adjacent pair of athletes is swapped.\n\n# * Example\n\n# For athletes = [1, 2, 3, 4, 5, 6], the output should be\n# correctLineup(athletes) = [2, 1, 4, 3, 6, 5].\n\n# * Input/Output\n\n# [execution time limit] 4 seconds (py3)\n\n# [input] array.integer athletes\n\n# A list of even length representing the athletes, where each athlete is given \n# by the number written on their back.\n\n# Guaranteed constraints:\n# 2 ≤ athletes.length ≤ 20,\n# 1 ≤ athletes[i] ≤ 100.\n\n# [output] array.integer\n\n# Array of athletes with each pair of adjacent elements swapped.\n\n#%%\n\n# * Solution 1\ndef correctLineup1(athletes:list)-> list:\n return [athletes[i+1] if i%2==0 else athletes[i-1] for i in range(len(athletes))]\n\n\n# * Solution 2\n# ! bitwise operator ^. \ndef correctLineup1(athletes:list)-> list:\n return [athletes[i^1] for i in range(len(athletes))]\n\n\na1 = [1, 2, 3, 4, 5, 6]\nr1 = correctLineup1(a1)\nprint(r1)\n\n\n# %%\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
DEFAULT_LL_URL = "https://ll.thespacedevs.com"
DEFAULT_VERSION = "2.0.0"
DEFAULT_API_URL = "/".join([DEFAULT_LL_URL, DEFAULT_VERSION])
|
normal
|
{
"blob_id": "1a72da7f436e6c5e73e396b771f8ce1a3affba1a",
"index": 3010,
"step-1": "<mask token>\n",
"step-2": "DEFAULT_LL_URL = 'https://ll.thespacedevs.com'\nDEFAULT_VERSION = '2.0.0'\nDEFAULT_API_URL = '/'.join([DEFAULT_LL_URL, DEFAULT_VERSION])\n",
"step-3": "DEFAULT_LL_URL = \"https://ll.thespacedevs.com\"\nDEFAULT_VERSION = \"2.0.0\"\nDEFAULT_API_URL = \"/\".join([DEFAULT_LL_URL, DEFAULT_VERSION])\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
class Node:
def __init__(self, dataVal=None):
self.dataVal = dataVal
self.nextVal = None
class LinkedList:
def __init__(self):
self.headVal = None
def atBeginning(self, data):
NewNode = Node(data)
NewNode.nextVal = self.headVal
self.headVal = NewNode
return NewNode
def atEnd(self, data):
NewNode = Node(data)
NewNode.nextVal = None
if self.headVal is None:
self.headVal = NewNode
return NewNode
last = self.headVal
while(last.nextVal):
last = last.nextVal
last.nextVal = NewNode
return NewNode
def inBetween(self, n1, n2, data):
NewNode = Node(data)
n1.nextVal = NewNode
NewNode.nextVal = n2
return NewNode
def deleteNode(self,node):
last = self.headVal
if self.headVal == node:
self.headVal = node.nextVal
return
else:
while(last):
if (last.nextVal.dataVal) == (node.dataVal):
if last.nextVal is not None:
last.nextVal = node.nextVal
return
else:
self.headVal.nextVal = None
last = last.nextVal
def printList(self):
self.printVal = self.headVal
while self.printVal is not None:
# print(self.printVal.dataVal)
print(self.printVal.dataVal)
self.printVal = self.printVal.nextVal
# def isPalindrome(self):
def deleteNthNode(self, n):
last = self.headVal
i=2
while(last):
if i == n:
prevNode = last
nextNode = last.nextVal.nextVal
prevNode.nextVal = nextNode
return
i+=1
last = last.nextVal
def getNthNode(self, n):
curr = self.headVal
i=1
while(curr):
if i == n:
print (curr.dataVal)
return
i+=1
curr = curr.nextVal
def reverse(self, node):
print("2:", node.dataVal)
if node.nextVal == None:
self.headVal = node
return
print("3:", node.dataVal)
self.reverse(node.nextVal)
tmp = node.nextVal
tmp.nextVal = node
node.nextVal = None
def reverseLinkedList(self):
curr = self.headVal
print("1:", curr.dataVal)
while(curr):
self.reverse(curr)
curr = curr.nextVal
def checkPalindrome(self):
curr = self.headVal
firstNode = self.headVal
nextNode = curr.nextVal
ans = False
while(curr.dataVal is None):
if curr.dataVal == firstNode.dataVal:
ans = True
list1 = LinkedList()
list1.headVal = Node(1)
e2 = Node(2)
e3 = Node(3)
e4 = Node(4)
list1.headVal.nextVal = e2
e2.nextVal = e3
e3.nextVal = e4
e4.nextVal = None
# list1.headVal = Node('Sunday')
# e2 = Node('Monday')
# e3 = Node('Tuesday')
# e4 = Node('Wednesday')
# e5 = Node('Thursday')
# e6 = Node('Friday')
# e7 = Node('Saturday')
# list1.headVal.nextVal = e2
# e2.nextVal = e3
# e3.nextVal = e4
# e4.nextVal = e5
# e5.nextVal = e6
# e6.nextVal = e7
# e8 = list1.atBeginning('MyTestJanuary')
# e9 = list1.atEnd('MyTestDecember')
# e10 = list1.inBetween(list1.headVal, e2, 'I hate this')
# e11 = list1.inBetween(e6, e7, 'I love this')
# list1.deleteNode(e2)
# list1.printList()
# list1.deleteNode(list1.headVal)
# list1.printList()
# print(">>>>>>>>>",type(e6), type(e8), type(e10))
# list1.deleteNode(e9)
# print("Deleting the last node>>>>>>")
# list1.deleteNthNode(3)
# list1.getNthNode(3)
# list1.printList()
list1.reverseLinkedList()
# list1.printList()
# e10 = list1.atBeginning('1')
# e8.nextVal = None
|
normal
|
{
"blob_id": "00260e23614a7b0a11ff3649e71392e4892de423",
"index": 4511,
"step-1": "<mask token>\n\n\nclass LinkedList:\n <mask token>\n <mask token>\n\n def atEnd(self, data):\n NewNode = Node(data)\n NewNode.nextVal = None\n if self.headVal is None:\n self.headVal = NewNode\n return NewNode\n last = self.headVal\n while last.nextVal:\n last = last.nextVal\n last.nextVal = NewNode\n return NewNode\n\n def inBetween(self, n1, n2, data):\n NewNode = Node(data)\n n1.nextVal = NewNode\n NewNode.nextVal = n2\n return NewNode\n <mask token>\n <mask token>\n <mask token>\n\n def getNthNode(self, n):\n curr = self.headVal\n i = 1\n while curr:\n if i == n:\n print(curr.dataVal)\n return\n i += 1\n curr = curr.nextVal\n <mask token>\n\n def reverseLinkedList(self):\n curr = self.headVal\n print('1:', curr.dataVal)\n while curr:\n self.reverse(curr)\n curr = curr.nextVal\n\n def checkPalindrome(self):\n curr = self.headVal\n firstNode = self.headVal\n nextNode = curr.nextVal\n ans = False\n while curr.dataVal is None:\n if curr.dataVal == firstNode.dataVal:\n ans = True\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass LinkedList:\n\n def __init__(self):\n self.headVal = None\n <mask token>\n\n def atEnd(self, data):\n NewNode = Node(data)\n NewNode.nextVal = None\n if self.headVal is None:\n self.headVal = NewNode\n return NewNode\n last = self.headVal\n while last.nextVal:\n last = last.nextVal\n last.nextVal = NewNode\n return NewNode\n\n def inBetween(self, n1, n2, data):\n NewNode = Node(data)\n n1.nextVal = NewNode\n NewNode.nextVal = n2\n return NewNode\n <mask token>\n <mask token>\n\n def deleteNthNode(self, n):\n last = self.headVal\n i = 2\n while last:\n if i == n:\n prevNode = last\n nextNode = last.nextVal.nextVal\n prevNode.nextVal = nextNode\n return\n i += 1\n last = last.nextVal\n\n def getNthNode(self, n):\n curr = self.headVal\n i = 1\n while curr:\n if i == n:\n print(curr.dataVal)\n return\n i += 1\n curr = curr.nextVal\n\n def reverse(self, node):\n print('2:', node.dataVal)\n if node.nextVal == None:\n self.headVal = node\n return\n print('3:', node.dataVal)\n self.reverse(node.nextVal)\n tmp = node.nextVal\n tmp.nextVal = node\n node.nextVal = None\n\n def reverseLinkedList(self):\n curr = self.headVal\n print('1:', curr.dataVal)\n while curr:\n self.reverse(curr)\n curr = curr.nextVal\n\n def checkPalindrome(self):\n curr = self.headVal\n firstNode = self.headVal\n nextNode = curr.nextVal\n ans = False\n while curr.dataVal is None:\n if curr.dataVal == firstNode.dataVal:\n ans = True\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass LinkedList:\n\n def __init__(self):\n self.headVal = None\n\n def atBeginning(self, data):\n NewNode = Node(data)\n NewNode.nextVal = self.headVal\n self.headVal = NewNode\n return NewNode\n\n def atEnd(self, data):\n NewNode = Node(data)\n NewNode.nextVal = None\n if self.headVal is None:\n self.headVal = NewNode\n return NewNode\n last = self.headVal\n while last.nextVal:\n last = last.nextVal\n last.nextVal = NewNode\n return NewNode\n\n def inBetween(self, n1, n2, data):\n NewNode = Node(data)\n n1.nextVal = NewNode\n NewNode.nextVal = n2\n return NewNode\n\n def deleteNode(self, node):\n last = self.headVal\n if self.headVal == node:\n self.headVal = node.nextVal\n return\n else:\n while last:\n if last.nextVal.dataVal == node.dataVal:\n if last.nextVal is not None:\n last.nextVal = node.nextVal\n return\n else:\n self.headVal.nextVal = None\n last = last.nextVal\n\n def printList(self):\n self.printVal = self.headVal\n while self.printVal is not None:\n print(self.printVal.dataVal)\n self.printVal = self.printVal.nextVal\n\n def deleteNthNode(self, n):\n last = self.headVal\n i = 2\n while last:\n if i == n:\n prevNode = last\n nextNode = last.nextVal.nextVal\n prevNode.nextVal = nextNode\n return\n i += 1\n last = last.nextVal\n\n def getNthNode(self, n):\n curr = self.headVal\n i = 1\n while curr:\n if i == n:\n print(curr.dataVal)\n return\n i += 1\n curr = curr.nextVal\n\n def reverse(self, node):\n print('2:', node.dataVal)\n if node.nextVal == None:\n self.headVal = node\n return\n print('3:', node.dataVal)\n self.reverse(node.nextVal)\n tmp = node.nextVal\n tmp.nextVal = node\n node.nextVal = None\n\n def reverseLinkedList(self):\n curr = self.headVal\n print('1:', curr.dataVal)\n while curr:\n self.reverse(curr)\n curr = curr.nextVal\n\n def checkPalindrome(self):\n curr = self.headVal\n firstNode = self.headVal\n nextNode = curr.nextVal\n ans = False\n while curr.dataVal is None:\n if curr.dataVal == firstNode.dataVal:\n ans = True\n\n\n<mask token>\n",
"step-4": "class Node:\n\n def __init__(self, dataVal=None):\n self.dataVal = dataVal\n self.nextVal = None\n\n\nclass LinkedList:\n\n def __init__(self):\n self.headVal = None\n\n def atBeginning(self, data):\n NewNode = Node(data)\n NewNode.nextVal = self.headVal\n self.headVal = NewNode\n return NewNode\n\n def atEnd(self, data):\n NewNode = Node(data)\n NewNode.nextVal = None\n if self.headVal is None:\n self.headVal = NewNode\n return NewNode\n last = self.headVal\n while last.nextVal:\n last = last.nextVal\n last.nextVal = NewNode\n return NewNode\n\n def inBetween(self, n1, n2, data):\n NewNode = Node(data)\n n1.nextVal = NewNode\n NewNode.nextVal = n2\n return NewNode\n\n def deleteNode(self, node):\n last = self.headVal\n if self.headVal == node:\n self.headVal = node.nextVal\n return\n else:\n while last:\n if last.nextVal.dataVal == node.dataVal:\n if last.nextVal is not None:\n last.nextVal = node.nextVal\n return\n else:\n self.headVal.nextVal = None\n last = last.nextVal\n\n def printList(self):\n self.printVal = self.headVal\n while self.printVal is not None:\n print(self.printVal.dataVal)\n self.printVal = self.printVal.nextVal\n\n def deleteNthNode(self, n):\n last = self.headVal\n i = 2\n while last:\n if i == n:\n prevNode = last\n nextNode = last.nextVal.nextVal\n prevNode.nextVal = nextNode\n return\n i += 1\n last = last.nextVal\n\n def getNthNode(self, n):\n curr = self.headVal\n i = 1\n while curr:\n if i == n:\n print(curr.dataVal)\n return\n i += 1\n curr = curr.nextVal\n\n def reverse(self, node):\n print('2:', node.dataVal)\n if node.nextVal == None:\n self.headVal = node\n return\n print('3:', node.dataVal)\n self.reverse(node.nextVal)\n tmp = node.nextVal\n tmp.nextVal = node\n node.nextVal = None\n\n def reverseLinkedList(self):\n curr = self.headVal\n print('1:', curr.dataVal)\n while curr:\n self.reverse(curr)\n curr = curr.nextVal\n\n def checkPalindrome(self):\n curr = self.headVal\n firstNode = self.headVal\n nextNode = curr.nextVal\n ans = False\n while curr.dataVal is None:\n if curr.dataVal == firstNode.dataVal:\n ans = True\n\n\n<mask token>\nlist1.reverseLinkedList()\n",
"step-5": "class Node:\n def __init__(self, dataVal=None):\n self.dataVal = dataVal\n self.nextVal = None\n\nclass LinkedList:\n def __init__(self):\n self.headVal = None\n def atBeginning(self, data):\n NewNode = Node(data)\n NewNode.nextVal = self.headVal\n self.headVal = NewNode\n return NewNode\n def atEnd(self, data):\n NewNode = Node(data)\n NewNode.nextVal = None\n if self.headVal is None:\n self.headVal = NewNode\n return NewNode\n last = self.headVal\n while(last.nextVal):\n last = last.nextVal\n last.nextVal = NewNode\n return NewNode\n \n def inBetween(self, n1, n2, data):\n NewNode = Node(data)\n n1.nextVal = NewNode\n NewNode.nextVal = n2\n return NewNode\n\n def deleteNode(self,node):\n last = self.headVal\n if self.headVal == node:\n self.headVal = node.nextVal \n return\n else:\n while(last):\n if (last.nextVal.dataVal) == (node.dataVal):\n if last.nextVal is not None:\n last.nextVal = node.nextVal\n return\n else:\n self.headVal.nextVal = None\n\n last = last.nextVal\n \n def printList(self):\n self.printVal = self.headVal\n while self.printVal is not None:\n # print(self.printVal.dataVal)\n print(self.printVal.dataVal)\n self.printVal = self.printVal.nextVal\n # def isPalindrome(self):\n def deleteNthNode(self, n):\n last = self.headVal\n i=2\n while(last):\n if i == n:\n prevNode = last\n nextNode = last.nextVal.nextVal\n prevNode.nextVal = nextNode\n return\n i+=1\n last = last.nextVal\n def getNthNode(self, n):\n curr = self.headVal\n i=1\n while(curr):\n if i == n:\n print (curr.dataVal)\n return\n i+=1\n curr = curr.nextVal\n \n def reverse(self, node):\n print(\"2:\", node.dataVal)\n if node.nextVal == None:\n self.headVal = node\n return\n print(\"3:\", node.dataVal)\n self.reverse(node.nextVal)\n tmp = node.nextVal\n tmp.nextVal = node\n node.nextVal = None\n\n def reverseLinkedList(self):\n curr = self.headVal\n print(\"1:\", curr.dataVal)\n while(curr):\n self.reverse(curr)\n curr = curr.nextVal\n \n def checkPalindrome(self):\n curr = self.headVal\n firstNode = self.headVal\n nextNode = curr.nextVal\n ans = False\n while(curr.dataVal is None):\n if curr.dataVal == firstNode.dataVal:\n ans = True\n \n\n\n\n \n\n \n\nlist1 = LinkedList()\nlist1.headVal = Node(1)\ne2 = Node(2)\ne3 = Node(3)\ne4 = Node(4)\nlist1.headVal.nextVal = e2\ne2.nextVal = e3\ne3.nextVal = e4\ne4.nextVal = None\n# list1.headVal = Node('Sunday')\n# e2 = Node('Monday')\n# e3 = Node('Tuesday')\n# e4 = Node('Wednesday')\n# e5 = Node('Thursday')\n# e6 = Node('Friday')\n# e7 = Node('Saturday')\n\n# list1.headVal.nextVal = e2\n# e2.nextVal = e3\n# e3.nextVal = e4\n# e4.nextVal = e5\n# e5.nextVal = e6\n# e6.nextVal = e7\n# e8 = list1.atBeginning('MyTestJanuary')\n# e9 = list1.atEnd('MyTestDecember')\n# e10 = list1.inBetween(list1.headVal, e2, 'I hate this')\n# e11 = list1.inBetween(e6, e7, 'I love this')\n# list1.deleteNode(e2)\n# list1.printList()\n\n# list1.deleteNode(list1.headVal)\n# list1.printList()\n\n\n# print(\">>>>>>>>>\",type(e6), type(e8), type(e10))\n# list1.deleteNode(e9)\n# print(\"Deleting the last node>>>>>>\")\n\n# list1.deleteNthNode(3)\n# list1.getNthNode(3)\n# list1.printList()\nlist1.reverseLinkedList()\n# list1.printList()\n# e10 = list1.atBeginning('1')\n# e8.nextVal = None\n\n\n\n\n\n",
"step-ids": [
6,
9,
12,
15,
17
]
}
|
[
6,
9,
12,
15,
17
] |
import os
import sys
import glob
import shutil
import json
import codecs
from collections import OrderedDict
def getRegionClass(image_path, data_id, imgName):
region_class = ['nosmoke_background', 'nosmoke_face', 'nosmoke_suspect', 'nosmoke_cover', 'smoke_hand', 'smoke_nohand', 'smoke_hard']
label_class = ['nosmoke_bg', 'nosmoke_face', 'nosmoke_susp', 'nosmoke_cover', 'smoke_hand', 'smoke_nohand', 'smoke_hard']
select_class = None
for class_id in range(len(region_class)):
cur_class = region_class[class_id]
cur_label_class = label_class[class_id]
check_file_name = os.path.join(image_path, data_id, cur_class, imgName)
if os.path.isfile(check_file_name):
select_class = cur_label_class
#print check_file_name
break
return select_class
def add_common_box_smoke_region(org_json_dir, dst_json_dir, done_root_dir):
if not os.path.exists(dst_json_dir):
os.makedirs(dst_json_dir)
smoke_hand_num, smoke_nohand_num, smoke_hard_num = 0, 0, 0
nosmoke_bg_num, nosmoke_face_num, nosmoke_susp_num, nosmoke_cover_num = 0, 0, 0, 0
for json_file_name in glob.glob(org_json_dir + '/*.json'):
json_file = open(json_file_name, 'r')
base_file_id = os.path.basename(json_file_name)[:-5]
print(base_file_id + '.json')
json_lines = json_file.read().splitlines()
dst_json_lines = []
new_json_file = codecs.open(dst_json_dir + '/' + base_file_id + '.json', "w", "utf-8")
new_json_file.close()
new_json_file = codecs.open(dst_json_dir + '/' + base_file_id + '.json', "a+", 'utf-8')
for line in json_lines:
if line[0] == '#':
new_json_file.write(line + '\n')
continue
js = json.loads(line, object_pairs_hook=OrderedDict)
#new_js_line = json.dumps(js) + "\n"
#new_json_file.write(new_js_line)
#continue
imgName = js["image_key"]
select_class = getRegionClass(done_root_dir, base_file_id, imgName)
if select_class == None:
new_json_file.write(line + '\n') #
#print('Not Found: ', done_root_dir, base_file_id, imgName)
continue
#print select_class
new_common_box = {}
new_attrs = {}
new_attrs['ignore'] = 'no'
new_attrs['type'] = 'smoke_region'
new_attrs['class'] = select_class
new_common_box['attrs'] = new_attrs
if select_class == 'smoke_hard':
new_attrs['ignore'] = 'yes'
# statistic
if select_class == 'smoke_hand':
smoke_hand_num += 1
elif select_class == 'smoke_nohand':
smoke_nohand_num += 1
elif select_class == 'smoke_hard':
smoke_hard_num += 1
elif select_class == 'nosmoke_bg':
nosmoke_bg_num += 1
elif select_class == 'nosmoke_face':
nosmoke_face_num += 1
elif select_class == 'nosmoke_susp':
nosmoke_susp_num += 1
elif select_class == 'nosmoke_cover':
nosmoke_cover_num += 1
else:
print('Invalid smoke class.', select_class)
# common box, like phone, hand
if 'common_box' in js:
js['common_box'].append(new_common_box)
else:
js['common_box'] = [new_common_box]
new_js_line = json.dumps(js) + "\n"
new_json_file.write(new_js_line)
new_json_file.close()
print('write ' + base_file_id + '.json')
print('add_common_box_smoke_region done.')
print('smoke_hand:%d, smoke_nohand:%d, smoke_hard:%d'%(smoke_hand_num, smoke_nohand_num, smoke_hard_num))
print('nosmoke_bg:%d, nosmoke_face:%d, nosmoke_susp:%d, nosmoke_cover:%d'%(nosmoke_bg_num, nosmoke_face_num, nosmoke_susp_num, nosmoke_cover_num))
if __name__ == '__main__':
if len(sys.argv) < 2:
print('useage: add_common_box_smoke_region.py org_json_dir dst_json_dir done_root_dir')
exit()
org_json_dir = sys.argv[1]
dst_json_dir = sys.argv[2]
done_root_dir = sys.argv[3]
add_common_box_smoke_region(org_json_dir, dst_json_dir, done_root_dir)
|
normal
|
{
"blob_id": "75833617996549167fa157ff78cc1a11f870784f",
"index": 8639,
"step-1": "<mask token>\n\n\ndef getRegionClass(image_path, data_id, imgName):\n region_class = ['nosmoke_background', 'nosmoke_face', 'nosmoke_suspect',\n 'nosmoke_cover', 'smoke_hand', 'smoke_nohand', 'smoke_hard']\n label_class = ['nosmoke_bg', 'nosmoke_face', 'nosmoke_susp',\n 'nosmoke_cover', 'smoke_hand', 'smoke_nohand', 'smoke_hard']\n select_class = None\n for class_id in range(len(region_class)):\n cur_class = region_class[class_id]\n cur_label_class = label_class[class_id]\n check_file_name = os.path.join(image_path, data_id, cur_class, imgName)\n if os.path.isfile(check_file_name):\n select_class = cur_label_class\n break\n return select_class\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef getRegionClass(image_path, data_id, imgName):\n region_class = ['nosmoke_background', 'nosmoke_face', 'nosmoke_suspect',\n 'nosmoke_cover', 'smoke_hand', 'smoke_nohand', 'smoke_hard']\n label_class = ['nosmoke_bg', 'nosmoke_face', 'nosmoke_susp',\n 'nosmoke_cover', 'smoke_hand', 'smoke_nohand', 'smoke_hard']\n select_class = None\n for class_id in range(len(region_class)):\n cur_class = region_class[class_id]\n cur_label_class = label_class[class_id]\n check_file_name = os.path.join(image_path, data_id, cur_class, imgName)\n if os.path.isfile(check_file_name):\n select_class = cur_label_class\n break\n return select_class\n\n\ndef add_common_box_smoke_region(org_json_dir, dst_json_dir, done_root_dir):\n if not os.path.exists(dst_json_dir):\n os.makedirs(dst_json_dir)\n smoke_hand_num, smoke_nohand_num, smoke_hard_num = 0, 0, 0\n (nosmoke_bg_num, nosmoke_face_num, nosmoke_susp_num, nosmoke_cover_num\n ) = 0, 0, 0, 0\n for json_file_name in glob.glob(org_json_dir + '/*.json'):\n json_file = open(json_file_name, 'r')\n base_file_id = os.path.basename(json_file_name)[:-5]\n print(base_file_id + '.json')\n json_lines = json_file.read().splitlines()\n dst_json_lines = []\n new_json_file = codecs.open(dst_json_dir + '/' + base_file_id +\n '.json', 'w', 'utf-8')\n new_json_file.close()\n new_json_file = codecs.open(dst_json_dir + '/' + base_file_id +\n '.json', 'a+', 'utf-8')\n for line in json_lines:\n if line[0] == '#':\n new_json_file.write(line + '\\n')\n continue\n js = json.loads(line, object_pairs_hook=OrderedDict)\n imgName = js['image_key']\n select_class = getRegionClass(done_root_dir, base_file_id, imgName)\n if select_class == None:\n new_json_file.write(line + '\\n')\n continue\n new_common_box = {}\n new_attrs = {}\n new_attrs['ignore'] = 'no'\n new_attrs['type'] = 'smoke_region'\n new_attrs['class'] = select_class\n new_common_box['attrs'] = new_attrs\n if select_class == 'smoke_hard':\n new_attrs['ignore'] = 'yes'\n if select_class == 'smoke_hand':\n smoke_hand_num += 1\n elif select_class == 'smoke_nohand':\n smoke_nohand_num += 1\n elif select_class == 'smoke_hard':\n smoke_hard_num += 1\n elif select_class == 'nosmoke_bg':\n nosmoke_bg_num += 1\n elif select_class == 'nosmoke_face':\n nosmoke_face_num += 1\n elif select_class == 'nosmoke_susp':\n nosmoke_susp_num += 1\n elif select_class == 'nosmoke_cover':\n nosmoke_cover_num += 1\n else:\n print('Invalid smoke class.', select_class)\n if 'common_box' in js:\n js['common_box'].append(new_common_box)\n else:\n js['common_box'] = [new_common_box]\n new_js_line = json.dumps(js) + '\\n'\n new_json_file.write(new_js_line)\n new_json_file.close()\n print('write ' + base_file_id + '.json')\n print('add_common_box_smoke_region done.')\n print('smoke_hand:%d, smoke_nohand:%d, smoke_hard:%d' % (smoke_hand_num,\n smoke_nohand_num, smoke_hard_num))\n print(\n 'nosmoke_bg:%d, nosmoke_face:%d, nosmoke_susp:%d, nosmoke_cover:%d' %\n (nosmoke_bg_num, nosmoke_face_num, nosmoke_susp_num, nosmoke_cover_num)\n )\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef getRegionClass(image_path, data_id, imgName):\n region_class = ['nosmoke_background', 'nosmoke_face', 'nosmoke_suspect',\n 'nosmoke_cover', 'smoke_hand', 'smoke_nohand', 'smoke_hard']\n label_class = ['nosmoke_bg', 'nosmoke_face', 'nosmoke_susp',\n 'nosmoke_cover', 'smoke_hand', 'smoke_nohand', 'smoke_hard']\n select_class = None\n for class_id in range(len(region_class)):\n cur_class = region_class[class_id]\n cur_label_class = label_class[class_id]\n check_file_name = os.path.join(image_path, data_id, cur_class, imgName)\n if os.path.isfile(check_file_name):\n select_class = cur_label_class\n break\n return select_class\n\n\ndef add_common_box_smoke_region(org_json_dir, dst_json_dir, done_root_dir):\n if not os.path.exists(dst_json_dir):\n os.makedirs(dst_json_dir)\n smoke_hand_num, smoke_nohand_num, smoke_hard_num = 0, 0, 0\n (nosmoke_bg_num, nosmoke_face_num, nosmoke_susp_num, nosmoke_cover_num\n ) = 0, 0, 0, 0\n for json_file_name in glob.glob(org_json_dir + '/*.json'):\n json_file = open(json_file_name, 'r')\n base_file_id = os.path.basename(json_file_name)[:-5]\n print(base_file_id + '.json')\n json_lines = json_file.read().splitlines()\n dst_json_lines = []\n new_json_file = codecs.open(dst_json_dir + '/' + base_file_id +\n '.json', 'w', 'utf-8')\n new_json_file.close()\n new_json_file = codecs.open(dst_json_dir + '/' + base_file_id +\n '.json', 'a+', 'utf-8')\n for line in json_lines:\n if line[0] == '#':\n new_json_file.write(line + '\\n')\n continue\n js = json.loads(line, object_pairs_hook=OrderedDict)\n imgName = js['image_key']\n select_class = getRegionClass(done_root_dir, base_file_id, imgName)\n if select_class == None:\n new_json_file.write(line + '\\n')\n continue\n new_common_box = {}\n new_attrs = {}\n new_attrs['ignore'] = 'no'\n new_attrs['type'] = 'smoke_region'\n new_attrs['class'] = select_class\n new_common_box['attrs'] = new_attrs\n if select_class == 'smoke_hard':\n new_attrs['ignore'] = 'yes'\n if select_class == 'smoke_hand':\n smoke_hand_num += 1\n elif select_class == 'smoke_nohand':\n smoke_nohand_num += 1\n elif select_class == 'smoke_hard':\n smoke_hard_num += 1\n elif select_class == 'nosmoke_bg':\n nosmoke_bg_num += 1\n elif select_class == 'nosmoke_face':\n nosmoke_face_num += 1\n elif select_class == 'nosmoke_susp':\n nosmoke_susp_num += 1\n elif select_class == 'nosmoke_cover':\n nosmoke_cover_num += 1\n else:\n print('Invalid smoke class.', select_class)\n if 'common_box' in js:\n js['common_box'].append(new_common_box)\n else:\n js['common_box'] = [new_common_box]\n new_js_line = json.dumps(js) + '\\n'\n new_json_file.write(new_js_line)\n new_json_file.close()\n print('write ' + base_file_id + '.json')\n print('add_common_box_smoke_region done.')\n print('smoke_hand:%d, smoke_nohand:%d, smoke_hard:%d' % (smoke_hand_num,\n smoke_nohand_num, smoke_hard_num))\n print(\n 'nosmoke_bg:%d, nosmoke_face:%d, nosmoke_susp:%d, nosmoke_cover:%d' %\n (nosmoke_bg_num, nosmoke_face_num, nosmoke_susp_num, nosmoke_cover_num)\n )\n\n\nif __name__ == '__main__':\n if len(sys.argv) < 2:\n print(\n 'useage: add_common_box_smoke_region.py org_json_dir dst_json_dir done_root_dir'\n )\n exit()\n org_json_dir = sys.argv[1]\n dst_json_dir = sys.argv[2]\n done_root_dir = sys.argv[3]\n add_common_box_smoke_region(org_json_dir, dst_json_dir, done_root_dir)\n",
"step-4": "import os\nimport sys\nimport glob\nimport shutil\nimport json\nimport codecs\nfrom collections import OrderedDict\n\n\ndef getRegionClass(image_path, data_id, imgName):\n region_class = ['nosmoke_background', 'nosmoke_face', 'nosmoke_suspect',\n 'nosmoke_cover', 'smoke_hand', 'smoke_nohand', 'smoke_hard']\n label_class = ['nosmoke_bg', 'nosmoke_face', 'nosmoke_susp',\n 'nosmoke_cover', 'smoke_hand', 'smoke_nohand', 'smoke_hard']\n select_class = None\n for class_id in range(len(region_class)):\n cur_class = region_class[class_id]\n cur_label_class = label_class[class_id]\n check_file_name = os.path.join(image_path, data_id, cur_class, imgName)\n if os.path.isfile(check_file_name):\n select_class = cur_label_class\n break\n return select_class\n\n\ndef add_common_box_smoke_region(org_json_dir, dst_json_dir, done_root_dir):\n if not os.path.exists(dst_json_dir):\n os.makedirs(dst_json_dir)\n smoke_hand_num, smoke_nohand_num, smoke_hard_num = 0, 0, 0\n (nosmoke_bg_num, nosmoke_face_num, nosmoke_susp_num, nosmoke_cover_num\n ) = 0, 0, 0, 0\n for json_file_name in glob.glob(org_json_dir + '/*.json'):\n json_file = open(json_file_name, 'r')\n base_file_id = os.path.basename(json_file_name)[:-5]\n print(base_file_id + '.json')\n json_lines = json_file.read().splitlines()\n dst_json_lines = []\n new_json_file = codecs.open(dst_json_dir + '/' + base_file_id +\n '.json', 'w', 'utf-8')\n new_json_file.close()\n new_json_file = codecs.open(dst_json_dir + '/' + base_file_id +\n '.json', 'a+', 'utf-8')\n for line in json_lines:\n if line[0] == '#':\n new_json_file.write(line + '\\n')\n continue\n js = json.loads(line, object_pairs_hook=OrderedDict)\n imgName = js['image_key']\n select_class = getRegionClass(done_root_dir, base_file_id, imgName)\n if select_class == None:\n new_json_file.write(line + '\\n')\n continue\n new_common_box = {}\n new_attrs = {}\n new_attrs['ignore'] = 'no'\n new_attrs['type'] = 'smoke_region'\n new_attrs['class'] = select_class\n new_common_box['attrs'] = new_attrs\n if select_class == 'smoke_hard':\n new_attrs['ignore'] = 'yes'\n if select_class == 'smoke_hand':\n smoke_hand_num += 1\n elif select_class == 'smoke_nohand':\n smoke_nohand_num += 1\n elif select_class == 'smoke_hard':\n smoke_hard_num += 1\n elif select_class == 'nosmoke_bg':\n nosmoke_bg_num += 1\n elif select_class == 'nosmoke_face':\n nosmoke_face_num += 1\n elif select_class == 'nosmoke_susp':\n nosmoke_susp_num += 1\n elif select_class == 'nosmoke_cover':\n nosmoke_cover_num += 1\n else:\n print('Invalid smoke class.', select_class)\n if 'common_box' in js:\n js['common_box'].append(new_common_box)\n else:\n js['common_box'] = [new_common_box]\n new_js_line = json.dumps(js) + '\\n'\n new_json_file.write(new_js_line)\n new_json_file.close()\n print('write ' + base_file_id + '.json')\n print('add_common_box_smoke_region done.')\n print('smoke_hand:%d, smoke_nohand:%d, smoke_hard:%d' % (smoke_hand_num,\n smoke_nohand_num, smoke_hard_num))\n print(\n 'nosmoke_bg:%d, nosmoke_face:%d, nosmoke_susp:%d, nosmoke_cover:%d' %\n (nosmoke_bg_num, nosmoke_face_num, nosmoke_susp_num, nosmoke_cover_num)\n )\n\n\nif __name__ == '__main__':\n if len(sys.argv) < 2:\n print(\n 'useage: add_common_box_smoke_region.py org_json_dir dst_json_dir done_root_dir'\n )\n exit()\n org_json_dir = sys.argv[1]\n dst_json_dir = sys.argv[2]\n done_root_dir = sys.argv[3]\n add_common_box_smoke_region(org_json_dir, dst_json_dir, done_root_dir)\n",
"step-5": "import os\nimport sys\nimport glob\nimport shutil\nimport json\nimport codecs\nfrom collections import OrderedDict\n\ndef getRegionClass(image_path, data_id, imgName):\n region_class = ['nosmoke_background', 'nosmoke_face', 'nosmoke_suspect', 'nosmoke_cover', 'smoke_hand', 'smoke_nohand', 'smoke_hard']\n label_class = ['nosmoke_bg', 'nosmoke_face', 'nosmoke_susp', 'nosmoke_cover', 'smoke_hand', 'smoke_nohand', 'smoke_hard']\n select_class = None\n for class_id in range(len(region_class)):\n cur_class = region_class[class_id]\n cur_label_class = label_class[class_id]\n check_file_name = os.path.join(image_path, data_id, cur_class, imgName)\n if os.path.isfile(check_file_name):\n select_class = cur_label_class\n #print check_file_name\n break\n return select_class\n\ndef add_common_box_smoke_region(org_json_dir, dst_json_dir, done_root_dir):\n if not os.path.exists(dst_json_dir):\n os.makedirs(dst_json_dir)\n \n smoke_hand_num, smoke_nohand_num, smoke_hard_num = 0, 0, 0\n nosmoke_bg_num, nosmoke_face_num, nosmoke_susp_num, nosmoke_cover_num = 0, 0, 0, 0\n for json_file_name in glob.glob(org_json_dir + '/*.json'):\n json_file = open(json_file_name, 'r')\n base_file_id = os.path.basename(json_file_name)[:-5]\n print(base_file_id + '.json')\n \n json_lines = json_file.read().splitlines()\n dst_json_lines = []\n \n new_json_file = codecs.open(dst_json_dir + '/' + base_file_id + '.json', \"w\", \"utf-8\")\n new_json_file.close()\n new_json_file = codecs.open(dst_json_dir + '/' + base_file_id + '.json', \"a+\", 'utf-8')\n for line in json_lines:\n if line[0] == '#':\n new_json_file.write(line + '\\n')\n continue\n js = json.loads(line, object_pairs_hook=OrderedDict)\n \n #new_js_line = json.dumps(js) + \"\\n\"\n #new_json_file.write(new_js_line)\n #continue\n \n imgName = js[\"image_key\"]\n select_class = getRegionClass(done_root_dir, base_file_id, imgName)\n if select_class == None:\n new_json_file.write(line + '\\n') #\n #print('Not Found: ', done_root_dir, base_file_id, imgName)\n continue\n #print select_class\n new_common_box = {}\n new_attrs = {}\n new_attrs['ignore'] = 'no'\n new_attrs['type'] = 'smoke_region'\n new_attrs['class'] = select_class\n new_common_box['attrs'] = new_attrs\n if select_class == 'smoke_hard':\n new_attrs['ignore'] = 'yes'\n \n # statistic\n if select_class == 'smoke_hand':\n smoke_hand_num += 1\n elif select_class == 'smoke_nohand':\n smoke_nohand_num += 1\n elif select_class == 'smoke_hard':\n smoke_hard_num += 1\n elif select_class == 'nosmoke_bg':\n nosmoke_bg_num += 1\n elif select_class == 'nosmoke_face':\n nosmoke_face_num += 1\n elif select_class == 'nosmoke_susp':\n nosmoke_susp_num += 1\n elif select_class == 'nosmoke_cover':\n nosmoke_cover_num += 1\n else:\n print('Invalid smoke class.', select_class)\n \n # common box, like phone, hand\n if 'common_box' in js:\n js['common_box'].append(new_common_box)\n else:\n js['common_box'] = [new_common_box]\n new_js_line = json.dumps(js) + \"\\n\"\n new_json_file.write(new_js_line)\n new_json_file.close()\n print('write ' + base_file_id + '.json')\n print('add_common_box_smoke_region done.')\n print('smoke_hand:%d, smoke_nohand:%d, smoke_hard:%d'%(smoke_hand_num, smoke_nohand_num, smoke_hard_num))\n print('nosmoke_bg:%d, nosmoke_face:%d, nosmoke_susp:%d, nosmoke_cover:%d'%(nosmoke_bg_num, nosmoke_face_num, nosmoke_susp_num, nosmoke_cover_num))\n \nif __name__ == '__main__':\n if len(sys.argv) < 2:\n print('useage: add_common_box_smoke_region.py org_json_dir dst_json_dir done_root_dir')\n exit()\n org_json_dir = sys.argv[1]\n dst_json_dir = sys.argv[2]\n done_root_dir = sys.argv[3]\n add_common_box_smoke_region(org_json_dir, dst_json_dir, done_root_dir)\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
import datetime
from datetime import datetime, timedelta
import time
import json
import base64
import requests
from bson.objectid import ObjectId
import urllib
isinpackage = not __name__ in ['google_api', '__main__']
if isinpackage:
from .settings import settings
from . import util
from .util import Just
from .db import get_collection
from .import certificate
else:
from settings import settings
# import util
from util import Just
from db import get_collection
# import certificate
users_db = get_collection('users')
client_id = settings.google.client_id()
redirect_uri = f'{settings.url_prefix()}/api/v1/oauth/google/redirect'
scope = urllib.parse.quote(settings.google.scope(), safe='')
access_type = settings.google.access_type()
prompt = settings.google.prompt()
response_type = settings.google.response_type()
def get_certs_keys(kid):
url = 'https://www.googleapis.com/oauth2/v3/certs'
data = requests.get(url).json()['keys']
return next(filter(lambda e: kid == e['kid']), None)
def get_redirect_link(realid=None):
state = util.generate_id(50)
certificate.register_state(state, "google_oauth", {"realid": realid})
return 'https://accounts.google.com/o/oauth2/v2/auth?' \
+ f"client_id={client_id}&" \
+ f"include_granted_scopes={'true'}&" \
+ f"redirect_uri={redirect_uri}&" \
+ f"scope={scope}&" \
+ f"access_type={access_type}&" \
+ f"state={state}&" \
+ f"prompt={prompt}&" \
+ f"response_type={response_type}"
def code_to_refresh_token(code):
endpoint = 'https://oauth2.googleapis.com/token'
tokens = requests.post(endpoint, {
'code': code,
'client_id': client_id,
'client_secret': settings.google.google_client_secret(),
'redirect_uri': redirect_uri,
'grant_type': 'authorization_code'
}).json()
header, profile = decode_id_token(tokens['id_token'])
return profile, tokens
def decode_base64_padding(s):
return base64.urlsafe_b64decode(s + '=' * (-len(s) % 4)).decode()
def decode_id_token(id_token):
s = id_token.split('.')
header = json.loads(decode_base64_padding(s[0]))
payload = json.loads(decode_base64_padding(s[1]))
# key = get_certs_keys(header['kid'])
return header, payload
def register(profile, tokens, realid=None):
profile.update(tokens)
user = users_db.find_one({'_id': ObjectId(realid), 'connections.google.sub': profile['sub']})
if realid:
users_db.update_one({'_id': ObjectId(realid)}, {
'$set': {
'connections.google': profile,
},
'$inc': {
'connections.length': 0 if user else 1
}
})
print('add google info')
else:
users_db.insert_one({
'connections': {
'google': profile,
'length': 1
}
})
print('connect with google')
def refresh_token(refresh_token):
endpoint = 'https://oauth2.googleapis.com/token'
return requests.post(endpoint, {
'client_id': client_id,
'client_secret': settings.google.google_client_secret(),
'refresh_token': refresh_token,
'grant_type': 'refresh_token'
}).json()
def verify_access_token(access_token):
url = f'https://oauth2.googleapis.com/tokeninfo?access_token={access_token}'
return requests.get(url).status_code == 200
def get_access_token(google_user_id):
data = Just(users_db.find_one({'connections.google.sub': google_user_id}))
access_token = data.connections.google.access_token()
_refresh_token = data.connections.google.refresh_token()
assert _refresh_token
if access_token and verify_access_token(access_token):
return access_token
else:
return Just(refresh_token(_refresh_token)).access_token()
def get_real_user_id(user_id):
return str(users_db.find_one({"connections.google.sub": user_id})["_id"])
def get_google_user_id(real_user_id):
data = Just(users_db.find_one({"_id": ObjectId(real_user_id)}))
if data() and ('line' in data.connections()):
return data.connections.google.sub()
else:
raise RuntimeError
def add_event(real_user_id, start, end, options={
'summary': '',
'description': ''
}):
endpoint = 'https://www.googleapis.com/calendar/v3/calendars/primary/events'
d = {
'end': {
'dateTime': end,
'timeZone': 'Asia/Tokyo'
},
'start': {
'dateTime': start,
'timeZone': 'Asia/Tokyo'
},
}
d.update(options)
res = requests.post(endpoint, json=d, headers={
'content-type': 'application/json',
'authorization': f'Bearer {get_access_token(get_google_user_id(real_user_id))}'
})
r = res.status_code == 200
if not r:
print(res.text)
return r
|
normal
|
{
"blob_id": "c75c69b006734e476352de1913fd4a58021bffd6",
"index": 2704,
"step-1": "<mask token>\n\n\ndef get_certs_keys(kid):\n url = 'https://www.googleapis.com/oauth2/v3/certs'\n data = requests.get(url).json()['keys']\n return next(filter(lambda e: kid == e['kid']), None)\n\n\ndef get_redirect_link(realid=None):\n state = util.generate_id(50)\n certificate.register_state(state, 'google_oauth', {'realid': realid})\n return ('https://accounts.google.com/o/oauth2/v2/auth?' +\n f'client_id={client_id}&' + f\"include_granted_scopes={'true'}&\" +\n f'redirect_uri={redirect_uri}&' + f'scope={scope}&' +\n f'access_type={access_type}&' + f'state={state}&' +\n f'prompt={prompt}&' + f'response_type={response_type}')\n\n\ndef code_to_refresh_token(code):\n endpoint = 'https://oauth2.googleapis.com/token'\n tokens = requests.post(endpoint, {'code': code, 'client_id': client_id,\n 'client_secret': settings.google.google_client_secret(),\n 'redirect_uri': redirect_uri, 'grant_type': 'authorization_code'}\n ).json()\n header, profile = decode_id_token(tokens['id_token'])\n return profile, tokens\n\n\ndef decode_base64_padding(s):\n return base64.urlsafe_b64decode(s + '=' * (-len(s) % 4)).decode()\n\n\n<mask token>\n\n\ndef register(profile, tokens, realid=None):\n profile.update(tokens)\n user = users_db.find_one({'_id': ObjectId(realid),\n 'connections.google.sub': profile['sub']})\n if realid:\n users_db.update_one({'_id': ObjectId(realid)}, {'$set': {\n 'connections.google': profile}, '$inc': {'connections.length': \n 0 if user else 1}})\n print('add google info')\n else:\n users_db.insert_one({'connections': {'google': profile, 'length': 1}})\n print('connect with google')\n\n\ndef refresh_token(refresh_token):\n endpoint = 'https://oauth2.googleapis.com/token'\n return requests.post(endpoint, {'client_id': client_id, 'client_secret':\n settings.google.google_client_secret(), 'refresh_token':\n refresh_token, 'grant_type': 'refresh_token'}).json()\n\n\ndef verify_access_token(access_token):\n url = (\n f'https://oauth2.googleapis.com/tokeninfo?access_token={access_token}')\n return requests.get(url).status_code == 200\n\n\ndef get_access_token(google_user_id):\n data = Just(users_db.find_one({'connections.google.sub': google_user_id}))\n access_token = data.connections.google.access_token()\n _refresh_token = data.connections.google.refresh_token()\n assert _refresh_token\n if access_token and verify_access_token(access_token):\n return access_token\n else:\n return Just(refresh_token(_refresh_token)).access_token()\n\n\ndef get_real_user_id(user_id):\n return str(users_db.find_one({'connections.google.sub': user_id})['_id'])\n\n\ndef get_google_user_id(real_user_id):\n data = Just(users_db.find_one({'_id': ObjectId(real_user_id)}))\n if data() and 'line' in data.connections():\n return data.connections.google.sub()\n else:\n raise RuntimeError\n\n\ndef add_event(real_user_id, start, end, options={'summary': '',\n 'description': ''}):\n endpoint = (\n 'https://www.googleapis.com/calendar/v3/calendars/primary/events')\n d = {'end': {'dateTime': end, 'timeZone': 'Asia/Tokyo'}, 'start': {\n 'dateTime': start, 'timeZone': 'Asia/Tokyo'}}\n d.update(options)\n res = requests.post(endpoint, json=d, headers={'content-type':\n 'application/json', 'authorization':\n f'Bearer {get_access_token(get_google_user_id(real_user_id))}'})\n r = res.status_code == 200\n if not r:\n print(res.text)\n return r\n",
"step-2": "<mask token>\n\n\ndef get_certs_keys(kid):\n url = 'https://www.googleapis.com/oauth2/v3/certs'\n data = requests.get(url).json()['keys']\n return next(filter(lambda e: kid == e['kid']), None)\n\n\ndef get_redirect_link(realid=None):\n state = util.generate_id(50)\n certificate.register_state(state, 'google_oauth', {'realid': realid})\n return ('https://accounts.google.com/o/oauth2/v2/auth?' +\n f'client_id={client_id}&' + f\"include_granted_scopes={'true'}&\" +\n f'redirect_uri={redirect_uri}&' + f'scope={scope}&' +\n f'access_type={access_type}&' + f'state={state}&' +\n f'prompt={prompt}&' + f'response_type={response_type}')\n\n\ndef code_to_refresh_token(code):\n endpoint = 'https://oauth2.googleapis.com/token'\n tokens = requests.post(endpoint, {'code': code, 'client_id': client_id,\n 'client_secret': settings.google.google_client_secret(),\n 'redirect_uri': redirect_uri, 'grant_type': 'authorization_code'}\n ).json()\n header, profile = decode_id_token(tokens['id_token'])\n return profile, tokens\n\n\ndef decode_base64_padding(s):\n return base64.urlsafe_b64decode(s + '=' * (-len(s) % 4)).decode()\n\n\ndef decode_id_token(id_token):\n s = id_token.split('.')\n header = json.loads(decode_base64_padding(s[0]))\n payload = json.loads(decode_base64_padding(s[1]))\n return header, payload\n\n\ndef register(profile, tokens, realid=None):\n profile.update(tokens)\n user = users_db.find_one({'_id': ObjectId(realid),\n 'connections.google.sub': profile['sub']})\n if realid:\n users_db.update_one({'_id': ObjectId(realid)}, {'$set': {\n 'connections.google': profile}, '$inc': {'connections.length': \n 0 if user else 1}})\n print('add google info')\n else:\n users_db.insert_one({'connections': {'google': profile, 'length': 1}})\n print('connect with google')\n\n\ndef refresh_token(refresh_token):\n endpoint = 'https://oauth2.googleapis.com/token'\n return requests.post(endpoint, {'client_id': client_id, 'client_secret':\n settings.google.google_client_secret(), 'refresh_token':\n refresh_token, 'grant_type': 'refresh_token'}).json()\n\n\ndef verify_access_token(access_token):\n url = (\n f'https://oauth2.googleapis.com/tokeninfo?access_token={access_token}')\n return requests.get(url).status_code == 200\n\n\ndef get_access_token(google_user_id):\n data = Just(users_db.find_one({'connections.google.sub': google_user_id}))\n access_token = data.connections.google.access_token()\n _refresh_token = data.connections.google.refresh_token()\n assert _refresh_token\n if access_token and verify_access_token(access_token):\n return access_token\n else:\n return Just(refresh_token(_refresh_token)).access_token()\n\n\ndef get_real_user_id(user_id):\n return str(users_db.find_one({'connections.google.sub': user_id})['_id'])\n\n\ndef get_google_user_id(real_user_id):\n data = Just(users_db.find_one({'_id': ObjectId(real_user_id)}))\n if data() and 'line' in data.connections():\n return data.connections.google.sub()\n else:\n raise RuntimeError\n\n\ndef add_event(real_user_id, start, end, options={'summary': '',\n 'description': ''}):\n endpoint = (\n 'https://www.googleapis.com/calendar/v3/calendars/primary/events')\n d = {'end': {'dateTime': end, 'timeZone': 'Asia/Tokyo'}, 'start': {\n 'dateTime': start, 'timeZone': 'Asia/Tokyo'}}\n d.update(options)\n res = requests.post(endpoint, json=d, headers={'content-type':\n 'application/json', 'authorization':\n f'Bearer {get_access_token(get_google_user_id(real_user_id))}'})\n r = res.status_code == 200\n if not r:\n print(res.text)\n return r\n",
"step-3": "<mask token>\nisinpackage = not __name__ in ['google_api', '__main__']\nif isinpackage:\n from .settings import settings\n from . import util\n from .util import Just\n from .db import get_collection\n from . import certificate\nelse:\n from settings import settings\n from util import Just\n from db import get_collection\nusers_db = get_collection('users')\nclient_id = settings.google.client_id()\nredirect_uri = f'{settings.url_prefix()}/api/v1/oauth/google/redirect'\nscope = urllib.parse.quote(settings.google.scope(), safe='')\naccess_type = settings.google.access_type()\nprompt = settings.google.prompt()\nresponse_type = settings.google.response_type()\n\n\ndef get_certs_keys(kid):\n url = 'https://www.googleapis.com/oauth2/v3/certs'\n data = requests.get(url).json()['keys']\n return next(filter(lambda e: kid == e['kid']), None)\n\n\ndef get_redirect_link(realid=None):\n state = util.generate_id(50)\n certificate.register_state(state, 'google_oauth', {'realid': realid})\n return ('https://accounts.google.com/o/oauth2/v2/auth?' +\n f'client_id={client_id}&' + f\"include_granted_scopes={'true'}&\" +\n f'redirect_uri={redirect_uri}&' + f'scope={scope}&' +\n f'access_type={access_type}&' + f'state={state}&' +\n f'prompt={prompt}&' + f'response_type={response_type}')\n\n\ndef code_to_refresh_token(code):\n endpoint = 'https://oauth2.googleapis.com/token'\n tokens = requests.post(endpoint, {'code': code, 'client_id': client_id,\n 'client_secret': settings.google.google_client_secret(),\n 'redirect_uri': redirect_uri, 'grant_type': 'authorization_code'}\n ).json()\n header, profile = decode_id_token(tokens['id_token'])\n return profile, tokens\n\n\ndef decode_base64_padding(s):\n return base64.urlsafe_b64decode(s + '=' * (-len(s) % 4)).decode()\n\n\ndef decode_id_token(id_token):\n s = id_token.split('.')\n header = json.loads(decode_base64_padding(s[0]))\n payload = json.loads(decode_base64_padding(s[1]))\n return header, payload\n\n\ndef register(profile, tokens, realid=None):\n profile.update(tokens)\n user = users_db.find_one({'_id': ObjectId(realid),\n 'connections.google.sub': profile['sub']})\n if realid:\n users_db.update_one({'_id': ObjectId(realid)}, {'$set': {\n 'connections.google': profile}, '$inc': {'connections.length': \n 0 if user else 1}})\n print('add google info')\n else:\n users_db.insert_one({'connections': {'google': profile, 'length': 1}})\n print('connect with google')\n\n\ndef refresh_token(refresh_token):\n endpoint = 'https://oauth2.googleapis.com/token'\n return requests.post(endpoint, {'client_id': client_id, 'client_secret':\n settings.google.google_client_secret(), 'refresh_token':\n refresh_token, 'grant_type': 'refresh_token'}).json()\n\n\ndef verify_access_token(access_token):\n url = (\n f'https://oauth2.googleapis.com/tokeninfo?access_token={access_token}')\n return requests.get(url).status_code == 200\n\n\ndef get_access_token(google_user_id):\n data = Just(users_db.find_one({'connections.google.sub': google_user_id}))\n access_token = data.connections.google.access_token()\n _refresh_token = data.connections.google.refresh_token()\n assert _refresh_token\n if access_token and verify_access_token(access_token):\n return access_token\n else:\n return Just(refresh_token(_refresh_token)).access_token()\n\n\ndef get_real_user_id(user_id):\n return str(users_db.find_one({'connections.google.sub': user_id})['_id'])\n\n\ndef get_google_user_id(real_user_id):\n data = Just(users_db.find_one({'_id': ObjectId(real_user_id)}))\n if data() and 'line' in data.connections():\n return data.connections.google.sub()\n else:\n raise RuntimeError\n\n\ndef add_event(real_user_id, start, end, options={'summary': '',\n 'description': ''}):\n endpoint = (\n 'https://www.googleapis.com/calendar/v3/calendars/primary/events')\n d = {'end': {'dateTime': end, 'timeZone': 'Asia/Tokyo'}, 'start': {\n 'dateTime': start, 'timeZone': 'Asia/Tokyo'}}\n d.update(options)\n res = requests.post(endpoint, json=d, headers={'content-type':\n 'application/json', 'authorization':\n f'Bearer {get_access_token(get_google_user_id(real_user_id))}'})\n r = res.status_code == 200\n if not r:\n print(res.text)\n return r\n",
"step-4": "import datetime\nfrom datetime import datetime, timedelta\nimport time\nimport json\nimport base64\nimport requests\nfrom bson.objectid import ObjectId\nimport urllib\nisinpackage = not __name__ in ['google_api', '__main__']\nif isinpackage:\n from .settings import settings\n from . import util\n from .util import Just\n from .db import get_collection\n from . import certificate\nelse:\n from settings import settings\n from util import Just\n from db import get_collection\nusers_db = get_collection('users')\nclient_id = settings.google.client_id()\nredirect_uri = f'{settings.url_prefix()}/api/v1/oauth/google/redirect'\nscope = urllib.parse.quote(settings.google.scope(), safe='')\naccess_type = settings.google.access_type()\nprompt = settings.google.prompt()\nresponse_type = settings.google.response_type()\n\n\ndef get_certs_keys(kid):\n url = 'https://www.googleapis.com/oauth2/v3/certs'\n data = requests.get(url).json()['keys']\n return next(filter(lambda e: kid == e['kid']), None)\n\n\ndef get_redirect_link(realid=None):\n state = util.generate_id(50)\n certificate.register_state(state, 'google_oauth', {'realid': realid})\n return ('https://accounts.google.com/o/oauth2/v2/auth?' +\n f'client_id={client_id}&' + f\"include_granted_scopes={'true'}&\" +\n f'redirect_uri={redirect_uri}&' + f'scope={scope}&' +\n f'access_type={access_type}&' + f'state={state}&' +\n f'prompt={prompt}&' + f'response_type={response_type}')\n\n\ndef code_to_refresh_token(code):\n endpoint = 'https://oauth2.googleapis.com/token'\n tokens = requests.post(endpoint, {'code': code, 'client_id': client_id,\n 'client_secret': settings.google.google_client_secret(),\n 'redirect_uri': redirect_uri, 'grant_type': 'authorization_code'}\n ).json()\n header, profile = decode_id_token(tokens['id_token'])\n return profile, tokens\n\n\ndef decode_base64_padding(s):\n return base64.urlsafe_b64decode(s + '=' * (-len(s) % 4)).decode()\n\n\ndef decode_id_token(id_token):\n s = id_token.split('.')\n header = json.loads(decode_base64_padding(s[0]))\n payload = json.loads(decode_base64_padding(s[1]))\n return header, payload\n\n\ndef register(profile, tokens, realid=None):\n profile.update(tokens)\n user = users_db.find_one({'_id': ObjectId(realid),\n 'connections.google.sub': profile['sub']})\n if realid:\n users_db.update_one({'_id': ObjectId(realid)}, {'$set': {\n 'connections.google': profile}, '$inc': {'connections.length': \n 0 if user else 1}})\n print('add google info')\n else:\n users_db.insert_one({'connections': {'google': profile, 'length': 1}})\n print('connect with google')\n\n\ndef refresh_token(refresh_token):\n endpoint = 'https://oauth2.googleapis.com/token'\n return requests.post(endpoint, {'client_id': client_id, 'client_secret':\n settings.google.google_client_secret(), 'refresh_token':\n refresh_token, 'grant_type': 'refresh_token'}).json()\n\n\ndef verify_access_token(access_token):\n url = (\n f'https://oauth2.googleapis.com/tokeninfo?access_token={access_token}')\n return requests.get(url).status_code == 200\n\n\ndef get_access_token(google_user_id):\n data = Just(users_db.find_one({'connections.google.sub': google_user_id}))\n access_token = data.connections.google.access_token()\n _refresh_token = data.connections.google.refresh_token()\n assert _refresh_token\n if access_token and verify_access_token(access_token):\n return access_token\n else:\n return Just(refresh_token(_refresh_token)).access_token()\n\n\ndef get_real_user_id(user_id):\n return str(users_db.find_one({'connections.google.sub': user_id})['_id'])\n\n\ndef get_google_user_id(real_user_id):\n data = Just(users_db.find_one({'_id': ObjectId(real_user_id)}))\n if data() and 'line' in data.connections():\n return data.connections.google.sub()\n else:\n raise RuntimeError\n\n\ndef add_event(real_user_id, start, end, options={'summary': '',\n 'description': ''}):\n endpoint = (\n 'https://www.googleapis.com/calendar/v3/calendars/primary/events')\n d = {'end': {'dateTime': end, 'timeZone': 'Asia/Tokyo'}, 'start': {\n 'dateTime': start, 'timeZone': 'Asia/Tokyo'}}\n d.update(options)\n res = requests.post(endpoint, json=d, headers={'content-type':\n 'application/json', 'authorization':\n f'Bearer {get_access_token(get_google_user_id(real_user_id))}'})\n r = res.status_code == 200\n if not r:\n print(res.text)\n return r\n",
"step-5": "import datetime\nfrom datetime import datetime, timedelta\nimport time\nimport json\nimport base64\nimport requests\nfrom bson.objectid import ObjectId\nimport urllib\nisinpackage = not __name__ in ['google_api', '__main__']\nif isinpackage:\n from .settings import settings\n from . import util\n from .util import Just\n from .db import get_collection\n from .import certificate\nelse:\n from settings import settings\n # import util\n from util import Just\n from db import get_collection\n # import certificate\n\n\nusers_db = get_collection('users')\n\n\nclient_id = settings.google.client_id()\nredirect_uri = f'{settings.url_prefix()}/api/v1/oauth/google/redirect'\nscope = urllib.parse.quote(settings.google.scope(), safe='')\naccess_type = settings.google.access_type()\nprompt = settings.google.prompt()\nresponse_type = settings.google.response_type()\n\n\ndef get_certs_keys(kid):\n url = 'https://www.googleapis.com/oauth2/v3/certs'\n data = requests.get(url).json()['keys']\n return next(filter(lambda e: kid == e['kid']), None)\n\n\ndef get_redirect_link(realid=None):\n state = util.generate_id(50)\n certificate.register_state(state, \"google_oauth\", {\"realid\": realid})\n return 'https://accounts.google.com/o/oauth2/v2/auth?' \\\n + f\"client_id={client_id}&\" \\\n + f\"include_granted_scopes={'true'}&\" \\\n + f\"redirect_uri={redirect_uri}&\" \\\n + f\"scope={scope}&\" \\\n + f\"access_type={access_type}&\" \\\n + f\"state={state}&\" \\\n + f\"prompt={prompt}&\" \\\n + f\"response_type={response_type}\"\n\n\ndef code_to_refresh_token(code):\n endpoint = 'https://oauth2.googleapis.com/token'\n tokens = requests.post(endpoint, {\n 'code': code,\n 'client_id': client_id,\n 'client_secret': settings.google.google_client_secret(),\n 'redirect_uri': redirect_uri,\n 'grant_type': 'authorization_code'\n }).json()\n header, profile = decode_id_token(tokens['id_token'])\n return profile, tokens\n\n\ndef decode_base64_padding(s):\n return base64.urlsafe_b64decode(s + '=' * (-len(s) % 4)).decode()\n\n\ndef decode_id_token(id_token):\n s = id_token.split('.')\n header = json.loads(decode_base64_padding(s[0]))\n payload = json.loads(decode_base64_padding(s[1]))\n # key = get_certs_keys(header['kid'])\n return header, payload\n\n\ndef register(profile, tokens, realid=None):\n profile.update(tokens)\n user = users_db.find_one({'_id': ObjectId(realid), 'connections.google.sub': profile['sub']})\n if realid:\n users_db.update_one({'_id': ObjectId(realid)}, {\n '$set': {\n 'connections.google': profile,\n },\n '$inc': {\n 'connections.length': 0 if user else 1\n }\n })\n print('add google info')\n else:\n users_db.insert_one({\n 'connections': {\n 'google': profile,\n 'length': 1\n }\n })\n print('connect with google')\n\n\ndef refresh_token(refresh_token):\n endpoint = 'https://oauth2.googleapis.com/token'\n return requests.post(endpoint, {\n 'client_id': client_id,\n 'client_secret': settings.google.google_client_secret(),\n 'refresh_token': refresh_token,\n 'grant_type': 'refresh_token'\n }).json()\n\n\ndef verify_access_token(access_token):\n url = f'https://oauth2.googleapis.com/tokeninfo?access_token={access_token}'\n return requests.get(url).status_code == 200\n\n\ndef get_access_token(google_user_id):\n data = Just(users_db.find_one({'connections.google.sub': google_user_id}))\n access_token = data.connections.google.access_token()\n _refresh_token = data.connections.google.refresh_token()\n assert _refresh_token\n if access_token and verify_access_token(access_token):\n return access_token\n else:\n return Just(refresh_token(_refresh_token)).access_token()\n\n\ndef get_real_user_id(user_id):\n return str(users_db.find_one({\"connections.google.sub\": user_id})[\"_id\"])\n\n\ndef get_google_user_id(real_user_id):\n data = Just(users_db.find_one({\"_id\": ObjectId(real_user_id)}))\n if data() and ('line' in data.connections()):\n return data.connections.google.sub()\n else:\n raise RuntimeError\n\n\ndef add_event(real_user_id, start, end, options={\n 'summary': '',\n 'description': ''\n}):\n endpoint = 'https://www.googleapis.com/calendar/v3/calendars/primary/events'\n d = {\n 'end': {\n 'dateTime': end,\n 'timeZone': 'Asia/Tokyo'\n },\n 'start': {\n 'dateTime': start,\n 'timeZone': 'Asia/Tokyo'\n },\n }\n d.update(options)\n res = requests.post(endpoint, json=d, headers={\n 'content-type': 'application/json',\n 'authorization': f'Bearer {get_access_token(get_google_user_id(real_user_id))}'\n })\n r = res.status_code == 200\n if not r:\n print(res.text)\n return r\n",
"step-ids": [
11,
12,
14,
15,
16
]
}
|
[
11,
12,
14,
15,
16
] |
class Solution:
def countLetters(self, S: str) ->int:
ans = 0
for _, g in itertools.groupby(S):
cnt = len(list(g))
ans += (1 + cnt) * cnt // 2
return ans
|
normal
|
{
"blob_id": "f9cee552dde5ecf229fda559122b4b0e780c3b88",
"index": 7350,
"step-1": "<mask token>\n",
"step-2": "class Solution:\n <mask token>\n",
"step-3": "class Solution:\n\n def countLetters(self, S: str) ->int:\n ans = 0\n for _, g in itertools.groupby(S):\n cnt = len(list(g))\n ans += (1 + cnt) * cnt // 2\n return ans\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
#!/usr/bin/python3
"Places module"
from flask import jsonify, request, Response, abort
from api.v1.views import app_views
from models import storage
from models.place import Place
@app_views.route('cities/<city_id>/places', strict_slashes=False,
methods=['GET'])
def get_all_places(city_id):
''' gets all places in a city '''
city = storage.get("City", city_id)
if not city:
abort(404)
return jsonify([place.to_dict() for place in city.places]), 200
@app_views.route('/places/<place_id>', strict_slashes=False, methods=['GET'])
def get_place(place_id):
"Gets a place by place id"
place = storage.get("Place", place_id)
if not place:
abort(404)
return jsonify(place.to_dict()), 200
@app_views.route('/places/<place_id>', strict_slashes=False,
methods=['DELETE'])
def delete_place(place_id):
''' deletes places'''
place = storage.get("Place", place_id)
if not place:
abort(404)
storage.delete(place)
storage.save()
return jsonify({}), 200
@app_views.route('/cities/<city_id>/places', strict_slashes=False,
methods=['POST'])
def post_place(city_id):
'''posts a new place to city'''
kwargs = request.get_json()
if not kwargs:
abort(400, 'Not a JSON')
elif 'name' not in kwargs:
abort(400, 'Missing name')
elif 'user_id' not in kwargs:
abort(400, 'Missing user_id')
else:
city = storage.get("City", city_id)
user = storage.get("User", kwargs['user_id'])
if not city:
abort(404)
if not user:
abort(404)
# overwrites or adds w/ valid state_id in case they provide in post
kwargs['city_id'] = city_id
kwargs['user_id'] = user.id
new_place = Place(**kwargs)
storage.save()
return jsonify(new_place.to_dict()), 201
@app_views.route('/places/<place_id>', strict_slashes=False, methods=['PUT'])
def update_place(place_id):
''' updates place '''
params = request.get_json()
if not params:
abort(400, 'Not a JSON')
place = storage.get('Place', place_id)
if not place:
abort(404)
for k, v in params.items():
if k not in ['id', 'user_id', 'city_id', 'create_at', 'updated_at']:
setattr(place, k, v)
storage.save()
return jsonify(place.to_dict()), 200
|
normal
|
{
"blob_id": "d67a2eca4e2fde443b99f5133c2657cdf4ac00de",
"index": 4173,
"step-1": "<mask token>\n\n\n@app_views.route('cities/<city_id>/places', strict_slashes=False, methods=[\n 'GET'])\ndef get_all_places(city_id):\n \"\"\" gets all places in a city \"\"\"\n city = storage.get('City', city_id)\n if not city:\n abort(404)\n return jsonify([place.to_dict() for place in city.places]), 200\n\n\n@app_views.route('/places/<place_id>', strict_slashes=False, methods=['GET'])\ndef get_place(place_id):\n \"\"\"Gets a place by place id\"\"\"\n place = storage.get('Place', place_id)\n if not place:\n abort(404)\n return jsonify(place.to_dict()), 200\n\n\n@app_views.route('/places/<place_id>', strict_slashes=False, methods=['DELETE']\n )\ndef delete_place(place_id):\n \"\"\" deletes places\"\"\"\n place = storage.get('Place', place_id)\n if not place:\n abort(404)\n storage.delete(place)\n storage.save()\n return jsonify({}), 200\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\n@app_views.route('cities/<city_id>/places', strict_slashes=False, methods=[\n 'GET'])\ndef get_all_places(city_id):\n \"\"\" gets all places in a city \"\"\"\n city = storage.get('City', city_id)\n if not city:\n abort(404)\n return jsonify([place.to_dict() for place in city.places]), 200\n\n\n@app_views.route('/places/<place_id>', strict_slashes=False, methods=['GET'])\ndef get_place(place_id):\n \"\"\"Gets a place by place id\"\"\"\n place = storage.get('Place', place_id)\n if not place:\n abort(404)\n return jsonify(place.to_dict()), 200\n\n\n@app_views.route('/places/<place_id>', strict_slashes=False, methods=['DELETE']\n )\ndef delete_place(place_id):\n \"\"\" deletes places\"\"\"\n place = storage.get('Place', place_id)\n if not place:\n abort(404)\n storage.delete(place)\n storage.save()\n return jsonify({}), 200\n\n\n<mask token>\n\n\n@app_views.route('/places/<place_id>', strict_slashes=False, methods=['PUT'])\ndef update_place(place_id):\n \"\"\" updates place \"\"\"\n params = request.get_json()\n if not params:\n abort(400, 'Not a JSON')\n place = storage.get('Place', place_id)\n if not place:\n abort(404)\n for k, v in params.items():\n if k not in ['id', 'user_id', 'city_id', 'create_at', 'updated_at']:\n setattr(place, k, v)\n storage.save()\n return jsonify(place.to_dict()), 200\n",
"step-3": "<mask token>\n\n\n@app_views.route('cities/<city_id>/places', strict_slashes=False, methods=[\n 'GET'])\ndef get_all_places(city_id):\n \"\"\" gets all places in a city \"\"\"\n city = storage.get('City', city_id)\n if not city:\n abort(404)\n return jsonify([place.to_dict() for place in city.places]), 200\n\n\n@app_views.route('/places/<place_id>', strict_slashes=False, methods=['GET'])\ndef get_place(place_id):\n \"\"\"Gets a place by place id\"\"\"\n place = storage.get('Place', place_id)\n if not place:\n abort(404)\n return jsonify(place.to_dict()), 200\n\n\n@app_views.route('/places/<place_id>', strict_slashes=False, methods=['DELETE']\n )\ndef delete_place(place_id):\n \"\"\" deletes places\"\"\"\n place = storage.get('Place', place_id)\n if not place:\n abort(404)\n storage.delete(place)\n storage.save()\n return jsonify({}), 200\n\n\n@app_views.route('/cities/<city_id>/places', strict_slashes=False, methods=\n ['POST'])\ndef post_place(city_id):\n \"\"\"posts a new place to city\"\"\"\n kwargs = request.get_json()\n if not kwargs:\n abort(400, 'Not a JSON')\n elif 'name' not in kwargs:\n abort(400, 'Missing name')\n elif 'user_id' not in kwargs:\n abort(400, 'Missing user_id')\n else:\n city = storage.get('City', city_id)\n user = storage.get('User', kwargs['user_id'])\n if not city:\n abort(404)\n if not user:\n abort(404)\n kwargs['city_id'] = city_id\n kwargs['user_id'] = user.id\n new_place = Place(**kwargs)\n storage.save()\n return jsonify(new_place.to_dict()), 201\n\n\n@app_views.route('/places/<place_id>', strict_slashes=False, methods=['PUT'])\ndef update_place(place_id):\n \"\"\" updates place \"\"\"\n params = request.get_json()\n if not params:\n abort(400, 'Not a JSON')\n place = storage.get('Place', place_id)\n if not place:\n abort(404)\n for k, v in params.items():\n if k not in ['id', 'user_id', 'city_id', 'create_at', 'updated_at']:\n setattr(place, k, v)\n storage.save()\n return jsonify(place.to_dict()), 200\n",
"step-4": "<mask token>\nfrom flask import jsonify, request, Response, abort\nfrom api.v1.views import app_views\nfrom models import storage\nfrom models.place import Place\n\n\n@app_views.route('cities/<city_id>/places', strict_slashes=False, methods=[\n 'GET'])\ndef get_all_places(city_id):\n \"\"\" gets all places in a city \"\"\"\n city = storage.get('City', city_id)\n if not city:\n abort(404)\n return jsonify([place.to_dict() for place in city.places]), 200\n\n\n@app_views.route('/places/<place_id>', strict_slashes=False, methods=['GET'])\ndef get_place(place_id):\n \"\"\"Gets a place by place id\"\"\"\n place = storage.get('Place', place_id)\n if not place:\n abort(404)\n return jsonify(place.to_dict()), 200\n\n\n@app_views.route('/places/<place_id>', strict_slashes=False, methods=['DELETE']\n )\ndef delete_place(place_id):\n \"\"\" deletes places\"\"\"\n place = storage.get('Place', place_id)\n if not place:\n abort(404)\n storage.delete(place)\n storage.save()\n return jsonify({}), 200\n\n\n@app_views.route('/cities/<city_id>/places', strict_slashes=False, methods=\n ['POST'])\ndef post_place(city_id):\n \"\"\"posts a new place to city\"\"\"\n kwargs = request.get_json()\n if not kwargs:\n abort(400, 'Not a JSON')\n elif 'name' not in kwargs:\n abort(400, 'Missing name')\n elif 'user_id' not in kwargs:\n abort(400, 'Missing user_id')\n else:\n city = storage.get('City', city_id)\n user = storage.get('User', kwargs['user_id'])\n if not city:\n abort(404)\n if not user:\n abort(404)\n kwargs['city_id'] = city_id\n kwargs['user_id'] = user.id\n new_place = Place(**kwargs)\n storage.save()\n return jsonify(new_place.to_dict()), 201\n\n\n@app_views.route('/places/<place_id>', strict_slashes=False, methods=['PUT'])\ndef update_place(place_id):\n \"\"\" updates place \"\"\"\n params = request.get_json()\n if not params:\n abort(400, 'Not a JSON')\n place = storage.get('Place', place_id)\n if not place:\n abort(404)\n for k, v in params.items():\n if k not in ['id', 'user_id', 'city_id', 'create_at', 'updated_at']:\n setattr(place, k, v)\n storage.save()\n return jsonify(place.to_dict()), 200\n",
"step-5": "#!/usr/bin/python3\n\"Places module\"\nfrom flask import jsonify, request, Response, abort\nfrom api.v1.views import app_views\nfrom models import storage\nfrom models.place import Place\n\n\n@app_views.route('cities/<city_id>/places', strict_slashes=False,\n methods=['GET'])\ndef get_all_places(city_id):\n ''' gets all places in a city '''\n city = storage.get(\"City\", city_id)\n if not city:\n abort(404)\n\n return jsonify([place.to_dict() for place in city.places]), 200\n\n\n@app_views.route('/places/<place_id>', strict_slashes=False, methods=['GET'])\ndef get_place(place_id):\n \"Gets a place by place id\"\n place = storage.get(\"Place\", place_id)\n if not place:\n abort(404)\n return jsonify(place.to_dict()), 200\n\n\n@app_views.route('/places/<place_id>', strict_slashes=False,\n methods=['DELETE'])\ndef delete_place(place_id):\n ''' deletes places'''\n place = storage.get(\"Place\", place_id)\n if not place:\n abort(404)\n storage.delete(place)\n storage.save()\n return jsonify({}), 200\n\n\n@app_views.route('/cities/<city_id>/places', strict_slashes=False,\n methods=['POST'])\ndef post_place(city_id):\n '''posts a new place to city'''\n kwargs = request.get_json()\n\n if not kwargs:\n abort(400, 'Not a JSON')\n elif 'name' not in kwargs:\n abort(400, 'Missing name')\n elif 'user_id' not in kwargs:\n abort(400, 'Missing user_id')\n else:\n city = storage.get(\"City\", city_id)\n user = storage.get(\"User\", kwargs['user_id'])\n if not city:\n abort(404)\n if not user:\n abort(404)\n\n # overwrites or adds w/ valid state_id in case they provide in post\n kwargs['city_id'] = city_id\n kwargs['user_id'] = user.id\n\n new_place = Place(**kwargs)\n storage.save()\n\n return jsonify(new_place.to_dict()), 201\n\n\n@app_views.route('/places/<place_id>', strict_slashes=False, methods=['PUT'])\ndef update_place(place_id):\n ''' updates place '''\n params = request.get_json()\n if not params:\n abort(400, 'Not a JSON')\n place = storage.get('Place', place_id)\n if not place:\n abort(404)\n for k, v in params.items():\n if k not in ['id', 'user_id', 'city_id', 'create_at', 'updated_at']:\n setattr(place, k, v)\n storage.save()\n return jsonify(place.to_dict()), 200\n",
"step-ids": [
3,
4,
5,
6,
7
]
}
|
[
3,
4,
5,
6,
7
] |
from operator import itemgetter
import math
def get_tf_idf_map(document, max_freq, n_docs, index):
tf_idf_map = {}
for term in document:
tf = 0
idf = math.log(n_docs)
if term in index and term not in tf_idf_map:
posting_list = index[term]
freq_term = sum([post[1] for post in posting_list])
tf = 0.5 + 0.5*(freq_term/max_freq)
idf = math.log(1 + (n_docs/len(posting_list)))
if term not in tf_idf_map:
tf_idf_map[term] = tf * idf
return tf_idf_map
def get_cosinus_simularity(tf_idf_map, key_words):
sum_common_terms = 0
sum_tf_idf_terms = 0
for term in tf_idf_map:
if term in key_words:
sum_common_terms += tf_idf_map[term]
sum_tf_idf_terms += math.pow(tf_idf_map[term],2)
cosinus_similarity = sum_common_terms/(math.sqrt(sum_tf_idf_terms)+math.sqrt(len(key_words)))
return cosinus_similarity
def get_cosinus_ranked_documents(category, tf_idf_map, reference_words, context_words):
ranked_documents = []
for document in tf_idf_map:
referens_simularity = get_cosinus_simularity(tf_idf_map[document],reference_words)
context_simularity = 0
if not referens_simularity == 0:
context_simularity = get_cosinus_simularity(tf_idf_map[document], context_words)
simularity = context_simularity*referens_simularity
if(simularity != 0):
ranked_documents.append((document,simularity))
ranked_documents = sorted(ranked_documents, key=itemgetter(1), reverse=True)
return ranked_documents
|
normal
|
{
"blob_id": "39197b3f9f85d94457584d7e488ca376e52207f1",
"index": 5832,
"step-1": "<mask token>\n\n\ndef get_cosinus_simularity(tf_idf_map, key_words):\n sum_common_terms = 0\n sum_tf_idf_terms = 0\n for term in tf_idf_map:\n if term in key_words:\n sum_common_terms += tf_idf_map[term]\n sum_tf_idf_terms += math.pow(tf_idf_map[term], 2)\n cosinus_similarity = sum_common_terms / (math.sqrt(sum_tf_idf_terms) +\n math.sqrt(len(key_words)))\n return cosinus_similarity\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef get_cosinus_simularity(tf_idf_map, key_words):\n sum_common_terms = 0\n sum_tf_idf_terms = 0\n for term in tf_idf_map:\n if term in key_words:\n sum_common_terms += tf_idf_map[term]\n sum_tf_idf_terms += math.pow(tf_idf_map[term], 2)\n cosinus_similarity = sum_common_terms / (math.sqrt(sum_tf_idf_terms) +\n math.sqrt(len(key_words)))\n return cosinus_similarity\n\n\ndef get_cosinus_ranked_documents(category, tf_idf_map, reference_words,\n context_words):\n ranked_documents = []\n for document in tf_idf_map:\n referens_simularity = get_cosinus_simularity(tf_idf_map[document],\n reference_words)\n context_simularity = 0\n if not referens_simularity == 0:\n context_simularity = get_cosinus_simularity(tf_idf_map[document\n ], context_words)\n simularity = context_simularity * referens_simularity\n if simularity != 0:\n ranked_documents.append((document, simularity))\n ranked_documents = sorted(ranked_documents, key=itemgetter(1), reverse=True\n )\n return ranked_documents\n",
"step-3": "<mask token>\n\n\ndef get_tf_idf_map(document, max_freq, n_docs, index):\n tf_idf_map = {}\n for term in document:\n tf = 0\n idf = math.log(n_docs)\n if term in index and term not in tf_idf_map:\n posting_list = index[term]\n freq_term = sum([post[1] for post in posting_list])\n tf = 0.5 + 0.5 * (freq_term / max_freq)\n idf = math.log(1 + n_docs / len(posting_list))\n if term not in tf_idf_map:\n tf_idf_map[term] = tf * idf\n return tf_idf_map\n\n\ndef get_cosinus_simularity(tf_idf_map, key_words):\n sum_common_terms = 0\n sum_tf_idf_terms = 0\n for term in tf_idf_map:\n if term in key_words:\n sum_common_terms += tf_idf_map[term]\n sum_tf_idf_terms += math.pow(tf_idf_map[term], 2)\n cosinus_similarity = sum_common_terms / (math.sqrt(sum_tf_idf_terms) +\n math.sqrt(len(key_words)))\n return cosinus_similarity\n\n\ndef get_cosinus_ranked_documents(category, tf_idf_map, reference_words,\n context_words):\n ranked_documents = []\n for document in tf_idf_map:\n referens_simularity = get_cosinus_simularity(tf_idf_map[document],\n reference_words)\n context_simularity = 0\n if not referens_simularity == 0:\n context_simularity = get_cosinus_simularity(tf_idf_map[document\n ], context_words)\n simularity = context_simularity * referens_simularity\n if simularity != 0:\n ranked_documents.append((document, simularity))\n ranked_documents = sorted(ranked_documents, key=itemgetter(1), reverse=True\n )\n return ranked_documents\n",
"step-4": "from operator import itemgetter\nimport math\n\n\ndef get_tf_idf_map(document, max_freq, n_docs, index):\n tf_idf_map = {}\n for term in document:\n tf = 0\n idf = math.log(n_docs)\n if term in index and term not in tf_idf_map:\n posting_list = index[term]\n freq_term = sum([post[1] for post in posting_list])\n tf = 0.5 + 0.5 * (freq_term / max_freq)\n idf = math.log(1 + n_docs / len(posting_list))\n if term not in tf_idf_map:\n tf_idf_map[term] = tf * idf\n return tf_idf_map\n\n\ndef get_cosinus_simularity(tf_idf_map, key_words):\n sum_common_terms = 0\n sum_tf_idf_terms = 0\n for term in tf_idf_map:\n if term in key_words:\n sum_common_terms += tf_idf_map[term]\n sum_tf_idf_terms += math.pow(tf_idf_map[term], 2)\n cosinus_similarity = sum_common_terms / (math.sqrt(sum_tf_idf_terms) +\n math.sqrt(len(key_words)))\n return cosinus_similarity\n\n\ndef get_cosinus_ranked_documents(category, tf_idf_map, reference_words,\n context_words):\n ranked_documents = []\n for document in tf_idf_map:\n referens_simularity = get_cosinus_simularity(tf_idf_map[document],\n reference_words)\n context_simularity = 0\n if not referens_simularity == 0:\n context_simularity = get_cosinus_simularity(tf_idf_map[document\n ], context_words)\n simularity = context_simularity * referens_simularity\n if simularity != 0:\n ranked_documents.append((document, simularity))\n ranked_documents = sorted(ranked_documents, key=itemgetter(1), reverse=True\n )\n return ranked_documents\n",
"step-5": "from operator import itemgetter\nimport math\n\ndef get_tf_idf_map(document, max_freq, n_docs, index):\n tf_idf_map = {}\n \n for term in document:\n tf = 0\n idf = math.log(n_docs)\n if term in index and term not in tf_idf_map: \n posting_list = index[term]\n freq_term = sum([post[1] for post in posting_list]) \n tf = 0.5 + 0.5*(freq_term/max_freq)\n idf = math.log(1 + (n_docs/len(posting_list)))\n if term not in tf_idf_map:\n tf_idf_map[term] = tf * idf\n\n return tf_idf_map\n\ndef get_cosinus_simularity(tf_idf_map, key_words):\n sum_common_terms = 0\n sum_tf_idf_terms = 0\n for term in tf_idf_map:\n if term in key_words:\n sum_common_terms += tf_idf_map[term]\n sum_tf_idf_terms += math.pow(tf_idf_map[term],2)\n cosinus_similarity = sum_common_terms/(math.sqrt(sum_tf_idf_terms)+math.sqrt(len(key_words)))\n return cosinus_similarity \n\ndef get_cosinus_ranked_documents(category, tf_idf_map, reference_words, context_words):\n ranked_documents = [] \n for document in tf_idf_map:\n referens_simularity = get_cosinus_simularity(tf_idf_map[document],reference_words)\n context_simularity = 0\n if not referens_simularity == 0:\n context_simularity = get_cosinus_simularity(tf_idf_map[document], context_words)\n simularity = context_simularity*referens_simularity\n if(simularity != 0):\n ranked_documents.append((document,simularity)) \n ranked_documents = sorted(ranked_documents, key=itemgetter(1), reverse=True)\n return ranked_documents",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
#!/usr/bin/python
# -*- coding: utf-8 -*-
import pywikibot
from pywikibot import pagegenerators
import re
from pywikibot import xmlreader
import datetime
import collections
from klasa import *
def fraz(data):
data_slownie = data[6:8] + '.' + data[4:6] + '.' + data[0:4]
lista_stron = getListFromXML(data)
site = pywikibot.Site('pl', 'wiktionary')
outputPage = pywikibot.Page(site, 'Wikipedysta:AlkamidBot/listy/związki_frazeologiczne')
logfile = 'log/fraz.txt'
tempLangs = []
notFound = []
text = 'Hasła, które określone zostały jako związek frazeologiczny, lecz nie widnieją w indeksie związków frazeologicznych odpowiednim dla danego języka. Ostatnia aktualizacja: %s\n' % (data_slownie)
phraseList = {}
notFoundList = collections.defaultdict(list)
LangsMediaWiki = getAllLanguages()
# prepare a dictionary of phrase indexes. If an index page doesn't exist
# assign a blank page to it
for a in LangsMediaWiki:
#print a.shortName
indexPageName = 'Indeks:{0}_-_Związki_frazeologiczne'.format(a.upperName)
try: phraseList[a.shortName] = pywikibot.Page(site, indexPageName).get()
except pywikibot.NoPage:
phraseList['%s' % a.shortName] = ''
except pywikibot.IsRedirectPage:
print('redirect')
for a in lista_stron:
try: word = Haslo(a)
except notFromMainNamespace:
continue
except sectionsNotFound:
continue
except WrongHeader:
continue
else:
if word.type == 3:
for lang in word.listLangs:
if lang.type != 2:
lang.pola()
try: lang.subSections['znaczenia'].text
except AttributeError:
pass
except KeyError:
with open(logfile, 'a+', encoding='utf-8') as lf:
lf.write('\n"znaczenia" not found; word: {0}; lang: {1}'.format(word.title, lang.lang))
else:
if lang.type != 2 and 'związek frazeologiczny' in lang.subSections['znaczenia'].text and '[[{0}]]'.format(word.title) not in phraseList[lang.lang]:
notFoundList['%s' % lang.lang].append(word.title)
for a in LangsMediaWiki:
if notFoundList['%s' % a.shortName]:
text += '== [[Indeks:%s_-_Związki_frazeologiczne|%s]] ==' % (a.upperName, a.longName)
for b in notFoundList['%s' % a.shortName]:
text += '\n*[[%s]] <nowiki>| *[[%s]]</nowiki> →' % (b, b)
text += '\n'
with open('output/fraz.txt', encoding='utf-8', mode='w') as f:
f.write(text)
outputPage.text = text
outputPage.save(comment="Aktualizacja listy", botflag=False)
|
normal
|
{
"blob_id": "2b928dad60bfb0ba863e9039a5462faa885644f3",
"index": 4643,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef fraz(data):\n data_slownie = data[6:8] + '.' + data[4:6] + '.' + data[0:4]\n lista_stron = getListFromXML(data)\n site = pywikibot.Site('pl', 'wiktionary')\n outputPage = pywikibot.Page(site,\n 'Wikipedysta:AlkamidBot/listy/związki_frazeologiczne')\n logfile = 'log/fraz.txt'\n tempLangs = []\n notFound = []\n text = (\n \"\"\"Hasła, które określone zostały jako związek frazeologiczny, lecz nie widnieją w indeksie związków frazeologicznych odpowiednim dla danego języka. Ostatnia aktualizacja: %s\n\"\"\"\n % data_slownie)\n phraseList = {}\n notFoundList = collections.defaultdict(list)\n LangsMediaWiki = getAllLanguages()\n for a in LangsMediaWiki:\n indexPageName = 'Indeks:{0}_-_Związki_frazeologiczne'.format(a.\n upperName)\n try:\n phraseList[a.shortName] = pywikibot.Page(site, indexPageName).get()\n except pywikibot.NoPage:\n phraseList['%s' % a.shortName] = ''\n except pywikibot.IsRedirectPage:\n print('redirect')\n for a in lista_stron:\n try:\n word = Haslo(a)\n except notFromMainNamespace:\n continue\n except sectionsNotFound:\n continue\n except WrongHeader:\n continue\n else:\n if word.type == 3:\n for lang in word.listLangs:\n if lang.type != 2:\n lang.pola()\n try:\n lang.subSections['znaczenia'].text\n except AttributeError:\n pass\n except KeyError:\n with open(logfile, 'a+', encoding='utf-8') as lf:\n lf.write(\n '\\n\"znaczenia\" not found; word: {0}; lang: {1}'\n .format(word.title, lang.lang))\n else:\n if (lang.type != 2 and 'związek frazeologiczny' in\n lang.subSections['znaczenia'].text and \n '[[{0}]]'.format(word.title) not in phraseList[\n lang.lang]):\n notFoundList['%s' % lang.lang].append(word.title)\n for a in LangsMediaWiki:\n if notFoundList['%s' % a.shortName]:\n text += '== [[Indeks:%s_-_Związki_frazeologiczne|%s]] ==' % (a.\n upperName, a.longName)\n for b in notFoundList['%s' % a.shortName]:\n text += '\\n*[[%s]] <nowiki>| *[[%s]]</nowiki> →' % (b, b)\n text += '\\n'\n with open('output/fraz.txt', encoding='utf-8', mode='w') as f:\n f.write(text)\n outputPage.text = text\n outputPage.save(comment='Aktualizacja listy', botflag=False)\n",
"step-3": "import pywikibot\nfrom pywikibot import pagegenerators\nimport re\nfrom pywikibot import xmlreader\nimport datetime\nimport collections\nfrom klasa import *\n\n\ndef fraz(data):\n data_slownie = data[6:8] + '.' + data[4:6] + '.' + data[0:4]\n lista_stron = getListFromXML(data)\n site = pywikibot.Site('pl', 'wiktionary')\n outputPage = pywikibot.Page(site,\n 'Wikipedysta:AlkamidBot/listy/związki_frazeologiczne')\n logfile = 'log/fraz.txt'\n tempLangs = []\n notFound = []\n text = (\n \"\"\"Hasła, które określone zostały jako związek frazeologiczny, lecz nie widnieją w indeksie związków frazeologicznych odpowiednim dla danego języka. Ostatnia aktualizacja: %s\n\"\"\"\n % data_slownie)\n phraseList = {}\n notFoundList = collections.defaultdict(list)\n LangsMediaWiki = getAllLanguages()\n for a in LangsMediaWiki:\n indexPageName = 'Indeks:{0}_-_Związki_frazeologiczne'.format(a.\n upperName)\n try:\n phraseList[a.shortName] = pywikibot.Page(site, indexPageName).get()\n except pywikibot.NoPage:\n phraseList['%s' % a.shortName] = ''\n except pywikibot.IsRedirectPage:\n print('redirect')\n for a in lista_stron:\n try:\n word = Haslo(a)\n except notFromMainNamespace:\n continue\n except sectionsNotFound:\n continue\n except WrongHeader:\n continue\n else:\n if word.type == 3:\n for lang in word.listLangs:\n if lang.type != 2:\n lang.pola()\n try:\n lang.subSections['znaczenia'].text\n except AttributeError:\n pass\n except KeyError:\n with open(logfile, 'a+', encoding='utf-8') as lf:\n lf.write(\n '\\n\"znaczenia\" not found; word: {0}; lang: {1}'\n .format(word.title, lang.lang))\n else:\n if (lang.type != 2 and 'związek frazeologiczny' in\n lang.subSections['znaczenia'].text and \n '[[{0}]]'.format(word.title) not in phraseList[\n lang.lang]):\n notFoundList['%s' % lang.lang].append(word.title)\n for a in LangsMediaWiki:\n if notFoundList['%s' % a.shortName]:\n text += '== [[Indeks:%s_-_Związki_frazeologiczne|%s]] ==' % (a.\n upperName, a.longName)\n for b in notFoundList['%s' % a.shortName]:\n text += '\\n*[[%s]] <nowiki>| *[[%s]]</nowiki> →' % (b, b)\n text += '\\n'\n with open('output/fraz.txt', encoding='utf-8', mode='w') as f:\n f.write(text)\n outputPage.text = text\n outputPage.save(comment='Aktualizacja listy', botflag=False)\n",
"step-4": "#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\nimport pywikibot\nfrom pywikibot import pagegenerators\nimport re\nfrom pywikibot import xmlreader\nimport datetime\nimport collections\nfrom klasa import *\n\ndef fraz(data):\n\n data_slownie = data[6:8] + '.' + data[4:6] + '.' + data[0:4]\n\n lista_stron = getListFromXML(data)\n site = pywikibot.Site('pl', 'wiktionary')\n outputPage = pywikibot.Page(site, 'Wikipedysta:AlkamidBot/listy/związki_frazeologiczne')\n logfile = 'log/fraz.txt'\n\n tempLangs = []\n\n notFound = []\n text = 'Hasła, które określone zostały jako związek frazeologiczny, lecz nie widnieją w indeksie związków frazeologicznych odpowiednim dla danego języka. Ostatnia aktualizacja: %s\\n' % (data_slownie)\n phraseList = {}\n notFoundList = collections.defaultdict(list)\n\n LangsMediaWiki = getAllLanguages()\n\n # prepare a dictionary of phrase indexes. If an index page doesn't exist\n # assign a blank page to it\n\n for a in LangsMediaWiki:\n #print a.shortName\n indexPageName = 'Indeks:{0}_-_Związki_frazeologiczne'.format(a.upperName)\n \n try: phraseList[a.shortName] = pywikibot.Page(site, indexPageName).get()\n except pywikibot.NoPage:\n phraseList['%s' % a.shortName] = ''\n except pywikibot.IsRedirectPage:\n print('redirect')\n\n\n for a in lista_stron:\n try: word = Haslo(a)\n except notFromMainNamespace:\n continue\n except sectionsNotFound:\n continue\n except WrongHeader:\n continue\n else:\n if word.type == 3:\n for lang in word.listLangs:\n if lang.type != 2:\n lang.pola()\n try: lang.subSections['znaczenia'].text\n except AttributeError:\n pass\n except KeyError:\n with open(logfile, 'a+', encoding='utf-8') as lf:\n lf.write('\\n\"znaczenia\" not found; word: {0}; lang: {1}'.format(word.title, lang.lang))\n else:\n if lang.type != 2 and 'związek frazeologiczny' in lang.subSections['znaczenia'].text and '[[{0}]]'.format(word.title) not in phraseList[lang.lang]:\n notFoundList['%s' % lang.lang].append(word.title)\n\n for a in LangsMediaWiki:\n if notFoundList['%s' % a.shortName]:\n text += '== [[Indeks:%s_-_Związki_frazeologiczne|%s]] ==' % (a.upperName, a.longName)\n for b in notFoundList['%s' % a.shortName]:\n text += '\\n*[[%s]] <nowiki>| *[[%s]]</nowiki> →' % (b, b)\n text += '\\n'\n\n with open('output/fraz.txt', encoding='utf-8', mode='w') as f:\n f.write(text)\n\n outputPage.text = text\n outputPage.save(comment=\"Aktualizacja listy\", botflag=False)\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
class BubbleTypes(Enum):
USER = auto()
SYSTEM = auto()
STATUS = auto()
INFO = auto()
def __str__(self):
return str(self.value)
class Relations(Enum):
UNDERMINE = 'undermine'
UNDERCUT = 'undercut'
REBUT = 'rebut'
SUPPORT = 'support'
def __str__(self):
return str(self.value)
class Attitudes(Enum):
AGREE = 'agree'
DISAGREE = 'disagree'
DONT_KNOW = 'dontknow'
def __str__(self):
return str(self.value)
<|reserved_special_token_0|>
def escape_string(text):
"""
Escapes all html special chars.
:param text: string
:return: html.escape(text)
"""
return escape(text)
def get_discussion_language(matchdict, params, session, current_issue_uid=None
):
"""
Returns Language.ui_locales
CALL AFTER issue_handler.get_id_of_slug(..)!
:param matchdict: matchdict of the current request
:param params: params of the current request
:param session: session of the current request
:param current_issue_uid: uid
:return:
"""
if not current_issue_uid:
current_issue = DBDiscussionSession.query(Issue).filter(Issue.
is_disabled == False, Issue.is_private == False).first()
current_issue_uid = current_issue.uid if current_issue else None
issue = matchdict['issue'] if 'issue' in matchdict else params['issue'
] if 'issue' in params else session['issue'
] if 'issue' in session else current_issue_uid
db_issue = DBDiscussionSession.query(Issue).get(issue)
return db_issue.lang if db_issue else 'en'
def python_datetime_pretty_print(ts, lang):
"""
Pretty print of a locale
:param ts: Timestamp
:param lang: ui_locales
:return: String
"""
formatter = '%b. %d.'
if lang == 'de':
try:
locale.setlocale(locale.LC_TIME, 'de_DE.UTF-8')
formatter = '%d. %b.'
except locale.Error:
locale.setlocale(locale.LC_TIME, 'en_US.UTF8')
return datetime.strptime(str(ts), '%Y-%m-%d').strftime(formatter)
<|reserved_special_token_0|>
def __get_undercuts_of_argument(argument_uid, include_disabled):
"""
Returns all undercuts fo the given argument
:param argument_uid: Argument.uid
:param include_disabled: boolean
:return: list of Arguments
"""
db_undercuts = DBDiscussionSession.query(Argument).filter_by(argument_uid
=argument_uid)
if not include_disabled:
db_undercuts = db_undercuts.filter_by(is_disabled=False)
return db_undercuts.all() if db_undercuts else []
<|reserved_special_token_0|>
def get_all_arguments_with_text_by_statement_id(statement_uid):
"""
Given a statement_uid, it returns all arguments, which use this statement and adds
the corresponding text to it, which normally appears in the bubbles. The resulting
text depends on the provided language.
:param statement_uid: uid to a statement, which should be analyzed
:return: list of dictionaries containing some properties of these arguments
:rtype: list
"""
logger('DBAS.LIB', 'main ' + str(statement_uid))
arguments = get_all_arguments_by_statement(statement_uid)
results = []
if arguments:
results = [{'uid': arg.uid, 'text': get_text_for_argument_uid(arg.
uid)} for arg in arguments]
return results
<|reserved_special_token_0|>
def get_slug_by_statement_uid(uid):
"""
Returns slug for the given Issue.uid
:param uid: Issue.uid
:return: String
"""
db_statement = DBDiscussionSession.query(Statement).get(uid)
return resolve_issue_uid_to_slug(db_statement.issue_uid)
def get_text_for_argument_uid(uid, nickname=None, with_html_tag=False,
start_with_intro=False, first_arg_by_user=False, user_changed_opinion=
False, rearrange_intro=False, colored_position=False, attack_type=None,
minimize_on_undercut=False, is_users_opinion=True, anonymous_style=
False, support_counter_argument=False):
"""
Returns current argument as string like "conclusion, because premise1 and premise2"
:param uid: Integer
:param with_html_tag: Boolean
:param start_with_intro: Boolean
:param first_arg_by_user: Boolean
:param user_changed_opinion: Boolean
:param rearrange_intro: Boolean
:param colored_position: Boolean
:param attack_type: String
:param minimize_on_undercut: Boolean
:param anonymous_style: Boolean
:param support_counter_argument: Boolean
:return: String
"""
logger('DBAS.LIB', 'main {}'.format(uid))
db_argument = DBDiscussionSession.query(Argument).get(uid)
if not db_argument:
return None
lang = db_argument.lang
_t = Translator(lang)
premisegroup_by_user = False
author_uid = None
db_user = DBDiscussionSession.query(User).filter_by(nickname=str(nickname)
).first()
if db_user:
author_uid = db_user.uid
pgroup = DBDiscussionSession.query(PremiseGroup).get(db_argument.
premisegroup_uid)
marked_argument = DBDiscussionSession.query(MarkedArgument).filter_by(
argument_uid=uid, author_uid=db_user.uid).first()
premisegroup_by_user = (pgroup.author_uid == db_user.uid or
marked_argument is not None)
arg_array = [db_argument]
while db_argument.argument_uid:
db_argument = DBDiscussionSession.query(Argument).get(db_argument.
argument_uid)
arg_array.append(db_argument)
if attack_type == 'jump':
return __build_argument_for_jump(arg_array, with_html_tag)
if len(arg_array) == 1:
return __build_single_argument(arg_array[0], rearrange_intro,
with_html_tag, colored_position, attack_type, _t,
start_with_intro, is_users_opinion, anonymous_style,
support_counter_argument, author_uid)
else:
return __build_nested_argument(arg_array, first_arg_by_user,
user_changed_opinion, with_html_tag, start_with_intro,
minimize_on_undercut, anonymous_style, premisegroup_by_user, _t)
<|reserved_special_token_0|>
def __build_val_for_jump(db_argument, tag_premise, tag_conclusion, tag_end, _t
):
premises = db_argument.get_premisegroup_text()
if premises[-1] != '.':
premises += '.'
conclusion = db_argument.get_conclusion_text()
because = _t.get(_.because).lower()
conclusion = tag_conclusion + conclusion + tag_end
premises = tag_premise + premises + tag_end
intro = start_con + _t.get(_.isNotRight).lower(
) + end_tag if not db_argument.is_supportive else ''
ret_value = '{} {} {} {}'.format(conclusion, intro, because, premises)
if _t.get_lang() == 'de':
intro = _t.get(_.itIsTrueThatAnonymous
) if db_argument.is_supportive else _t.get(_.itIsFalseThatAnonymous
)
intro = intro[0:1].upper() + intro[1:]
intro = (start_pro if db_argument.is_supportive else start_con
) + intro + end_tag
ret_value = '{} {}, {} {}'.format(intro, conclusion, because, premises)
return ret_value
<|reserved_special_token_0|>
def __build_nested_argument(arg_array: List[Argument], first_arg_by_user,
user_changed_opinion, with_html_tag, start_with_intro,
minimize_on_undercut, anonymous_style, premisegroup_by_user, _t):
"""
:param arg_array:
:param first_arg_by_user:
:param user_changed_opinion:
:param with_html_tag:
:param start_with_intro:
:param minimize_on_undercut:
:param anonymous_style:
:param premisegroup_by_user:
:param _t:
:return:
"""
pgroups = []
supportive = []
arg_array = arg_array[::-1]
local_lang = arg_array[0].lang
for db_argument in arg_array:
text = db_argument.get_premisegroup_text()
pgroups.append(text)
supportive.append(db_argument.is_supportive)
conclusion = arg_array[0].get_conclusion_text()
sb = start_position if with_html_tag else ''
se = end_tag if with_html_tag else ''
because = (', ' if local_lang == 'de' else ' ') + _t.get(_.because).lower(
) + ' '
if len(arg_array
) % 2 is 0 and not first_arg_by_user and not anonymous_style:
ret_value = _t.get(_.earlierYouArguedThat if user_changed_opinion else
_.otherUsersSaidThat) + ' '
tmp_users_opinion = True
elif not anonymous_style:
ret_value = _t.get(_.soYourOpinionIsThat
) + ': ' if start_with_intro else ''
tmp_users_opinion = False
conclusion = se + conclusion[0:1].upper() + conclusion[1:]
else:
ret_value = _t.get(_.someoneArgued) + ' '
tmp_users_opinion = False
tmp = _t.get(_.itFalseIsThat) + ' ' if not supportive[0] else ''
ret_value += tmp + conclusion + because + pgroups[0] + '.'
del pgroups[0]
if minimize_on_undercut and not user_changed_opinion and len(pgroups) > 2:
return _t.get(_.butYouCounteredWith).strip() + ' ' + sb + pgroups[
len(pgroups) - 1] + se + '.'
for i, pgroup in enumerate(pgroups):
ret_value += ' '
if tmp_users_opinion and not anonymous_style:
tmp = (_.butYouCounteredWithArgument if premisegroup_by_user else
_.butYouCounteredWithInterest)
ret_value += _t.get(_.otherParticipantsConvincedYouThat if
user_changed_opinion else tmp)
elif not anonymous_style:
ret_value += _t.get(_.youAgreeWithThatNow)
else:
ret_value += _t.get(_.otherUsersSaidThat) if i == 0 else _t.get(_
.thenOtherUsersSaidThat)
ret_value += sb + ' ' + pgroups[i] + '.'
tmp_users_opinion = not tmp_users_opinion
return ret_value.replace(' ', ' ')
def get_text_for_premisegroup_uid(uid):
"""
Returns joined text of the premise group and the premise ids
:param uid: premisegroup_uid
:return: text, uids
"""
warnings.warn('Use PremiseGroup.get_text() instead.', DeprecationWarning)
db_premises = DBDiscussionSession.query(Premise).filter_by(premisegroup_uid
=uid).join(Statement).all()
if len(db_premises) == 0:
return ''
texts = [premise.get_text() for premise in db_premises]
lang = DBDiscussionSession.query(Statement).get(db_premises[0].
statements.uid).lang
_t = Translator(lang)
return ' {} '.format(_t.get(_.aand)).join(texts)
<|reserved_special_token_0|>
def get_text_for_premise(uid: int, colored_position: bool=False):
"""
Returns text of premise with given uid
:param uid: Statement.uid
:param colored_position: Boolean
:return: String
"""
db_premise = DBDiscussionSession.query(Premise).get(uid)
if db_premise:
return db_premise.get_text(html=colored_position)
else:
return None
def get_text_for_conclusion(argument, start_with_intro=False,
rearrange_intro=False, is_users_opinion=True):
"""
Check the arguments conclusion whether it is an statement or an argument and returns the text
:param argument: Argument
:param start_with_intro: Boolean
:param rearrange_intro: Boolean
:return: String
"""
if argument.argument_uid:
return get_text_for_argument_uid(argument.argument_uid,
start_with_intro, rearrange_intro=rearrange_intro,
is_users_opinion=is_users_opinion)
else:
return argument.get_conclusion_text()
<|reserved_special_token_0|>
def get_user_by_private_or_public_nickname(nickname):
"""
Gets the user by his (public) nickname, based on the option, whether his nickname is public or not
:param nickname: Nickname of the user
:return: Current user or None
"""
db_user = get_user_by_case_insensitive_nickname(nickname)
db_public_user = get_user_by_case_insensitive_public_nickname(nickname)
uid = 0
if db_user:
uid = db_user.uid
elif db_public_user:
uid = db_public_user.uid
db_settings = DBDiscussionSession.query(Settings).filter_by(author_uid=uid
).first()
if not db_settings:
return None
if db_settings.should_show_public_nickname and db_user:
return db_user
elif not db_settings.should_show_public_nickname and db_public_user:
return db_public_user
return None
def get_user_by_case_insensitive_nickname(nickname):
"""
Returns user with given nickname
:param nickname: String
:return: User or None
"""
return DBDiscussionSession.query(User).filter(func.lower(User.nickname) ==
func.lower(nickname)).first()
<|reserved_special_token_0|>
def __get_text_for_click_and_mark_count(nickname, is_user, argument_uid,
statement_uid, speech, lang):
"""
Build text for a bubble, how many other participants have the same interest?
:param nickname: User.nickname
:param is_user: boolean
:param argument_uid: Argument.uid
:param statement_uid: Statement.uid
:param speech: dict()
:param lang: ui_locales
:return: [String]
"""
if not nickname:
nickname = 'anonymous'
db_user = DBDiscussionSession.query(User).filter_by(nickname=nickname
).first()
if not db_user:
db_user = DBDiscussionSession.query(User).filter_by(nickname=
'anonymous').first()
db_clicks, db_marks = __get_clicks_and_marks(argument_uid,
statement_uid, db_user)
_t = Translator(lang)
speech['votecounts'] = len(db_clicks) if db_clicks else 0
if db_marks:
speech['votecounts'] += len(db_marks)
votecount_keys = defaultdict(lambda : '{} {}.'.format(speech[
'votecounts'], _t.get(_.voteCountTextMore)))
if is_user and db_user.gender == 'm':
gender_key = _.voteCountTextFirstM
elif is_user and db_user.gender == 'f':
gender_key = _.voteCountTextFirstF
else:
gender_key = _.voteCountTextFirst
votecount_keys[0] = '{}.'.format(_t.get(gender_key))
votecount_keys[1] = _t.get(_.voteCountTextOneOther) + '.'
return votecount_keys
def __get_clicks_and_marks(argument_uid, statement_uid, db_user):
db_clicks = None
db_marks = None
if argument_uid:
db_clicks = DBDiscussionSession.query(ClickedArgument).filter(
ClickedArgument.argument_uid == argument_uid, ClickedArgument.
is_up_vote == True, ClickedArgument.is_valid, ClickedArgument.
author_uid != db_user.uid).all()
db_marks = DBDiscussionSession.query(MarkedArgument).filter(
MarkedArgument.argument_uid == argument_uid, MarkedArgument.
author_uid != db_user.uid).all()
elif statement_uid:
db_clicks = DBDiscussionSession.query(ClickedStatement).filter(
ClickedStatement.statement_uid == statement_uid,
ClickedStatement.is_up_vote == True, ClickedStatement.is_valid,
ClickedStatement.author_uid != db_user.uid).all()
db_marks = DBDiscussionSession.query(MarkedStatement).filter(
MarkedStatement.statement_uid == statement_uid, MarkedStatement
.author_uid != db_user.uid).all()
return db_clicks, db_marks
def is_argument_disabled_due_to_disabled_statements(argument):
"""
Returns true if any involved statement is disabled.
:param argument: Argument
:return: Boolean
"""
if argument.conclusion_uid is None:
db_argument = DBDiscussionSession.query(Argument).get(argument.
argument_uid)
conclusion = DBDiscussionSession(Statement).get(db_argument.
conclusion_uid)
if conclusion.is_disabled:
return True
premises = __get_all_premises_of_argument(db_argument)
for premise in premises:
if premise.statements.is_disabled:
return True
else:
print(argument.conclusion_uid)
conclusion = DBDiscussionSession.query(Statement).get(argument.
conclusion_uid)
if conclusion.is_disabled:
return True
premises = __get_all_premises_of_argument(argument)
for premise in premises:
if premise.statements.is_disabled:
return True
return False
def is_author_of_statement(db_user: User, statement_uid: int) ->bool:
"""
Is the user with given nickname author of the statement?
:param db_user: User
:param statement_uid: Statement.uid
:return: Boolean
"""
db_user = (db_user if db_user and db_user.nickname !=
nick_of_anonymous_user else None)
if not db_user:
return False
db_textversion = DBDiscussionSession.query(TextVersion).filter_by(
statement_uid=statement_uid).order_by(TextVersion.uid.asc()).first()
if not db_textversion:
return False
return db_textversion.author_uid == db_user.uid
<|reserved_special_token_0|>
def get_profile_picture(user: User, size: int=80, ignore_privacy_settings:
bool=False):
"""
Returns the url to a https://secure.gravatar.com picture, with the option wavatar and size of 80px
:param user: User
:param size: Integer, default 80
:param ignore_privacy_settings:
:return: String
"""
additional_id = ''
if user and isinstance(user, User):
additional_id = ('' if user.settings.should_show_public_nickname or
ignore_privacy_settings else 'x')
return __get_gravatar(user, additional_id, size)
<|reserved_special_token_0|>
def get_author_data(uid, gravatar_on_right_side=True,
linked_with_users_page=True, profile_picture_size=20):
"""
Returns a-tag with gravatar of current author and users page as href
:param uid: Uid of the author
:param gravatar_on_right_side: True, if the gravatar is on the right of authors name
:param linked_with_users_page: True, if the text is a link to the authors site
:param profile_picture_size: Integer
:return: HTML-String
"""
db_user = DBDiscussionSession.query(User).get(int(uid))
if not db_user:
return None, 'Missing author with uid ' + str(uid), False
nick = db_user.global_nickname
img_src = get_profile_picture(db_user, profile_picture_size)
link_begin = ''
link_end = ''
if linked_with_users_page:
link_begin = '<a href="/user/{}" title="{}">'.format(db_user.uid, nick)
link_end = '</a>'
side = 'left' if gravatar_on_right_side else 'right'
img = '<img class="img-circle" src="{}" style="padding-{}: 0.3em">'.format(
img_src, side)
if gravatar_on_right_side:
return db_user, '{}{}{}{}'.format(link_begin, nick, img, link_end
), True
else:
return db_user, '{}{}{}{}'.format(link_begin, img, nick, link_end
), True
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class BubbleTypes(Enum):
USER = auto()
SYSTEM = auto()
STATUS = auto()
INFO = auto()
def __str__(self):
return str(self.value)
class Relations(Enum):
UNDERMINE = 'undermine'
UNDERCUT = 'undercut'
REBUT = 'rebut'
SUPPORT = 'support'
def __str__(self):
return str(self.value)
class Attitudes(Enum):
AGREE = 'agree'
DISAGREE = 'disagree'
DONT_KNOW = 'dontknow'
def __str__(self):
return str(self.value)
<|reserved_special_token_0|>
def escape_string(text):
"""
Escapes all html special chars.
:param text: string
:return: html.escape(text)
"""
return escape(text)
def get_discussion_language(matchdict, params, session, current_issue_uid=None
):
"""
Returns Language.ui_locales
CALL AFTER issue_handler.get_id_of_slug(..)!
:param matchdict: matchdict of the current request
:param params: params of the current request
:param session: session of the current request
:param current_issue_uid: uid
:return:
"""
if not current_issue_uid:
current_issue = DBDiscussionSession.query(Issue).filter(Issue.
is_disabled == False, Issue.is_private == False).first()
current_issue_uid = current_issue.uid if current_issue else None
issue = matchdict['issue'] if 'issue' in matchdict else params['issue'
] if 'issue' in params else session['issue'
] if 'issue' in session else current_issue_uid
db_issue = DBDiscussionSession.query(Issue).get(issue)
return db_issue.lang if db_issue else 'en'
def python_datetime_pretty_print(ts, lang):
"""
Pretty print of a locale
:param ts: Timestamp
:param lang: ui_locales
:return: String
"""
formatter = '%b. %d.'
if lang == 'de':
try:
locale.setlocale(locale.LC_TIME, 'de_DE.UTF-8')
formatter = '%d. %b.'
except locale.Error:
locale.setlocale(locale.LC_TIME, 'en_US.UTF8')
return datetime.strptime(str(ts), '%Y-%m-%d').strftime(formatter)
<|reserved_special_token_0|>
def __get_undercuts_of_argument(argument_uid, include_disabled):
"""
Returns all undercuts fo the given argument
:param argument_uid: Argument.uid
:param include_disabled: boolean
:return: list of Arguments
"""
db_undercuts = DBDiscussionSession.query(Argument).filter_by(argument_uid
=argument_uid)
if not include_disabled:
db_undercuts = db_undercuts.filter_by(is_disabled=False)
return db_undercuts.all() if db_undercuts else []
<|reserved_special_token_0|>
def get_all_arguments_with_text_by_statement_id(statement_uid):
"""
Given a statement_uid, it returns all arguments, which use this statement and adds
the corresponding text to it, which normally appears in the bubbles. The resulting
text depends on the provided language.
:param statement_uid: uid to a statement, which should be analyzed
:return: list of dictionaries containing some properties of these arguments
:rtype: list
"""
logger('DBAS.LIB', 'main ' + str(statement_uid))
arguments = get_all_arguments_by_statement(statement_uid)
results = []
if arguments:
results = [{'uid': arg.uid, 'text': get_text_for_argument_uid(arg.
uid)} for arg in arguments]
return results
<|reserved_special_token_0|>
def get_slug_by_statement_uid(uid):
"""
Returns slug for the given Issue.uid
:param uid: Issue.uid
:return: String
"""
db_statement = DBDiscussionSession.query(Statement).get(uid)
return resolve_issue_uid_to_slug(db_statement.issue_uid)
def get_text_for_argument_uid(uid, nickname=None, with_html_tag=False,
start_with_intro=False, first_arg_by_user=False, user_changed_opinion=
False, rearrange_intro=False, colored_position=False, attack_type=None,
minimize_on_undercut=False, is_users_opinion=True, anonymous_style=
False, support_counter_argument=False):
"""
Returns current argument as string like "conclusion, because premise1 and premise2"
:param uid: Integer
:param with_html_tag: Boolean
:param start_with_intro: Boolean
:param first_arg_by_user: Boolean
:param user_changed_opinion: Boolean
:param rearrange_intro: Boolean
:param colored_position: Boolean
:param attack_type: String
:param minimize_on_undercut: Boolean
:param anonymous_style: Boolean
:param support_counter_argument: Boolean
:return: String
"""
logger('DBAS.LIB', 'main {}'.format(uid))
db_argument = DBDiscussionSession.query(Argument).get(uid)
if not db_argument:
return None
lang = db_argument.lang
_t = Translator(lang)
premisegroup_by_user = False
author_uid = None
db_user = DBDiscussionSession.query(User).filter_by(nickname=str(nickname)
).first()
if db_user:
author_uid = db_user.uid
pgroup = DBDiscussionSession.query(PremiseGroup).get(db_argument.
premisegroup_uid)
marked_argument = DBDiscussionSession.query(MarkedArgument).filter_by(
argument_uid=uid, author_uid=db_user.uid).first()
premisegroup_by_user = (pgroup.author_uid == db_user.uid or
marked_argument is not None)
arg_array = [db_argument]
while db_argument.argument_uid:
db_argument = DBDiscussionSession.query(Argument).get(db_argument.
argument_uid)
arg_array.append(db_argument)
if attack_type == 'jump':
return __build_argument_for_jump(arg_array, with_html_tag)
if len(arg_array) == 1:
return __build_single_argument(arg_array[0], rearrange_intro,
with_html_tag, colored_position, attack_type, _t,
start_with_intro, is_users_opinion, anonymous_style,
support_counter_argument, author_uid)
else:
return __build_nested_argument(arg_array, first_arg_by_user,
user_changed_opinion, with_html_tag, start_with_intro,
minimize_on_undercut, anonymous_style, premisegroup_by_user, _t)
<|reserved_special_token_0|>
def __build_val_for_jump(db_argument, tag_premise, tag_conclusion, tag_end, _t
):
premises = db_argument.get_premisegroup_text()
if premises[-1] != '.':
premises += '.'
conclusion = db_argument.get_conclusion_text()
because = _t.get(_.because).lower()
conclusion = tag_conclusion + conclusion + tag_end
premises = tag_premise + premises + tag_end
intro = start_con + _t.get(_.isNotRight).lower(
) + end_tag if not db_argument.is_supportive else ''
ret_value = '{} {} {} {}'.format(conclusion, intro, because, premises)
if _t.get_lang() == 'de':
intro = _t.get(_.itIsTrueThatAnonymous
) if db_argument.is_supportive else _t.get(_.itIsFalseThatAnonymous
)
intro = intro[0:1].upper() + intro[1:]
intro = (start_pro if db_argument.is_supportive else start_con
) + intro + end_tag
ret_value = '{} {}, {} {}'.format(intro, conclusion, because, premises)
return ret_value
<|reserved_special_token_0|>
def __get_tags_for_building_single_argument(with_html_tag, attack_type,
colored_position, premises, conclusion):
sb_none = start_tag if with_html_tag else ''
se = end_tag if with_html_tag else ''
if attack_type not in ['dont_know', 'jump']:
sb = start_tag if with_html_tag else ''
if colored_position:
sb = start_position if with_html_tag else ''
if attack_type == Relations.UNDERMINE:
premises = sb + premises + se
else:
conclusion = sb + conclusion + se
else:
sb = start_argument if with_html_tag else ''
sb_tmp = start_attack if with_html_tag else ''
premises = sb + premises + se
conclusion = sb_tmp + conclusion + se
return premises, conclusion, sb, sb_none, se
<|reserved_special_token_0|>
def __build_nested_argument(arg_array: List[Argument], first_arg_by_user,
user_changed_opinion, with_html_tag, start_with_intro,
minimize_on_undercut, anonymous_style, premisegroup_by_user, _t):
"""
:param arg_array:
:param first_arg_by_user:
:param user_changed_opinion:
:param with_html_tag:
:param start_with_intro:
:param minimize_on_undercut:
:param anonymous_style:
:param premisegroup_by_user:
:param _t:
:return:
"""
pgroups = []
supportive = []
arg_array = arg_array[::-1]
local_lang = arg_array[0].lang
for db_argument in arg_array:
text = db_argument.get_premisegroup_text()
pgroups.append(text)
supportive.append(db_argument.is_supportive)
conclusion = arg_array[0].get_conclusion_text()
sb = start_position if with_html_tag else ''
se = end_tag if with_html_tag else ''
because = (', ' if local_lang == 'de' else ' ') + _t.get(_.because).lower(
) + ' '
if len(arg_array
) % 2 is 0 and not first_arg_by_user and not anonymous_style:
ret_value = _t.get(_.earlierYouArguedThat if user_changed_opinion else
_.otherUsersSaidThat) + ' '
tmp_users_opinion = True
elif not anonymous_style:
ret_value = _t.get(_.soYourOpinionIsThat
) + ': ' if start_with_intro else ''
tmp_users_opinion = False
conclusion = se + conclusion[0:1].upper() + conclusion[1:]
else:
ret_value = _t.get(_.someoneArgued) + ' '
tmp_users_opinion = False
tmp = _t.get(_.itFalseIsThat) + ' ' if not supportive[0] else ''
ret_value += tmp + conclusion + because + pgroups[0] + '.'
del pgroups[0]
if minimize_on_undercut and not user_changed_opinion and len(pgroups) > 2:
return _t.get(_.butYouCounteredWith).strip() + ' ' + sb + pgroups[
len(pgroups) - 1] + se + '.'
for i, pgroup in enumerate(pgroups):
ret_value += ' '
if tmp_users_opinion and not anonymous_style:
tmp = (_.butYouCounteredWithArgument if premisegroup_by_user else
_.butYouCounteredWithInterest)
ret_value += _t.get(_.otherParticipantsConvincedYouThat if
user_changed_opinion else tmp)
elif not anonymous_style:
ret_value += _t.get(_.youAgreeWithThatNow)
else:
ret_value += _t.get(_.otherUsersSaidThat) if i == 0 else _t.get(_
.thenOtherUsersSaidThat)
ret_value += sb + ' ' + pgroups[i] + '.'
tmp_users_opinion = not tmp_users_opinion
return ret_value.replace(' ', ' ')
def get_text_for_premisegroup_uid(uid):
"""
Returns joined text of the premise group and the premise ids
:param uid: premisegroup_uid
:return: text, uids
"""
warnings.warn('Use PremiseGroup.get_text() instead.', DeprecationWarning)
db_premises = DBDiscussionSession.query(Premise).filter_by(premisegroup_uid
=uid).join(Statement).all()
if len(db_premises) == 0:
return ''
texts = [premise.get_text() for premise in db_premises]
lang = DBDiscussionSession.query(Statement).get(db_premises[0].
statements.uid).lang
_t = Translator(lang)
return ' {} '.format(_t.get(_.aand)).join(texts)
<|reserved_special_token_0|>
def get_text_for_premise(uid: int, colored_position: bool=False):
"""
Returns text of premise with given uid
:param uid: Statement.uid
:param colored_position: Boolean
:return: String
"""
db_premise = DBDiscussionSession.query(Premise).get(uid)
if db_premise:
return db_premise.get_text(html=colored_position)
else:
return None
def get_text_for_conclusion(argument, start_with_intro=False,
rearrange_intro=False, is_users_opinion=True):
"""
Check the arguments conclusion whether it is an statement or an argument and returns the text
:param argument: Argument
:param start_with_intro: Boolean
:param rearrange_intro: Boolean
:return: String
"""
if argument.argument_uid:
return get_text_for_argument_uid(argument.argument_uid,
start_with_intro, rearrange_intro=rearrange_intro,
is_users_opinion=is_users_opinion)
else:
return argument.get_conclusion_text()
<|reserved_special_token_0|>
def get_user_by_private_or_public_nickname(nickname):
"""
Gets the user by his (public) nickname, based on the option, whether his nickname is public or not
:param nickname: Nickname of the user
:return: Current user or None
"""
db_user = get_user_by_case_insensitive_nickname(nickname)
db_public_user = get_user_by_case_insensitive_public_nickname(nickname)
uid = 0
if db_user:
uid = db_user.uid
elif db_public_user:
uid = db_public_user.uid
db_settings = DBDiscussionSession.query(Settings).filter_by(author_uid=uid
).first()
if not db_settings:
return None
if db_settings.should_show_public_nickname and db_user:
return db_user
elif not db_settings.should_show_public_nickname and db_public_user:
return db_public_user
return None
def get_user_by_case_insensitive_nickname(nickname):
"""
Returns user with given nickname
:param nickname: String
:return: User or None
"""
return DBDiscussionSession.query(User).filter(func.lower(User.nickname) ==
func.lower(nickname)).first()
<|reserved_special_token_0|>
def __get_text_for_click_and_mark_count(nickname, is_user, argument_uid,
statement_uid, speech, lang):
"""
Build text for a bubble, how many other participants have the same interest?
:param nickname: User.nickname
:param is_user: boolean
:param argument_uid: Argument.uid
:param statement_uid: Statement.uid
:param speech: dict()
:param lang: ui_locales
:return: [String]
"""
if not nickname:
nickname = 'anonymous'
db_user = DBDiscussionSession.query(User).filter_by(nickname=nickname
).first()
if not db_user:
db_user = DBDiscussionSession.query(User).filter_by(nickname=
'anonymous').first()
db_clicks, db_marks = __get_clicks_and_marks(argument_uid,
statement_uid, db_user)
_t = Translator(lang)
speech['votecounts'] = len(db_clicks) if db_clicks else 0
if db_marks:
speech['votecounts'] += len(db_marks)
votecount_keys = defaultdict(lambda : '{} {}.'.format(speech[
'votecounts'], _t.get(_.voteCountTextMore)))
if is_user and db_user.gender == 'm':
gender_key = _.voteCountTextFirstM
elif is_user and db_user.gender == 'f':
gender_key = _.voteCountTextFirstF
else:
gender_key = _.voteCountTextFirst
votecount_keys[0] = '{}.'.format(_t.get(gender_key))
votecount_keys[1] = _t.get(_.voteCountTextOneOther) + '.'
return votecount_keys
def __get_clicks_and_marks(argument_uid, statement_uid, db_user):
db_clicks = None
db_marks = None
if argument_uid:
db_clicks = DBDiscussionSession.query(ClickedArgument).filter(
ClickedArgument.argument_uid == argument_uid, ClickedArgument.
is_up_vote == True, ClickedArgument.is_valid, ClickedArgument.
author_uid != db_user.uid).all()
db_marks = DBDiscussionSession.query(MarkedArgument).filter(
MarkedArgument.argument_uid == argument_uid, MarkedArgument.
author_uid != db_user.uid).all()
elif statement_uid:
db_clicks = DBDiscussionSession.query(ClickedStatement).filter(
ClickedStatement.statement_uid == statement_uid,
ClickedStatement.is_up_vote == True, ClickedStatement.is_valid,
ClickedStatement.author_uid != db_user.uid).all()
db_marks = DBDiscussionSession.query(MarkedStatement).filter(
MarkedStatement.statement_uid == statement_uid, MarkedStatement
.author_uid != db_user.uid).all()
return db_clicks, db_marks
def is_argument_disabled_due_to_disabled_statements(argument):
"""
Returns true if any involved statement is disabled.
:param argument: Argument
:return: Boolean
"""
if argument.conclusion_uid is None:
db_argument = DBDiscussionSession.query(Argument).get(argument.
argument_uid)
conclusion = DBDiscussionSession(Statement).get(db_argument.
conclusion_uid)
if conclusion.is_disabled:
return True
premises = __get_all_premises_of_argument(db_argument)
for premise in premises:
if premise.statements.is_disabled:
return True
else:
print(argument.conclusion_uid)
conclusion = DBDiscussionSession.query(Statement).get(argument.
conclusion_uid)
if conclusion.is_disabled:
return True
premises = __get_all_premises_of_argument(argument)
for premise in premises:
if premise.statements.is_disabled:
return True
return False
def is_author_of_statement(db_user: User, statement_uid: int) ->bool:
"""
Is the user with given nickname author of the statement?
:param db_user: User
:param statement_uid: Statement.uid
:return: Boolean
"""
db_user = (db_user if db_user and db_user.nickname !=
nick_of_anonymous_user else None)
if not db_user:
return False
db_textversion = DBDiscussionSession.query(TextVersion).filter_by(
statement_uid=statement_uid).order_by(TextVersion.uid.asc()).first()
if not db_textversion:
return False
return db_textversion.author_uid == db_user.uid
<|reserved_special_token_0|>
def get_profile_picture(user: User, size: int=80, ignore_privacy_settings:
bool=False):
"""
Returns the url to a https://secure.gravatar.com picture, with the option wavatar and size of 80px
:param user: User
:param size: Integer, default 80
:param ignore_privacy_settings:
:return: String
"""
additional_id = ''
if user and isinstance(user, User):
additional_id = ('' if user.settings.should_show_public_nickname or
ignore_privacy_settings else 'x')
return __get_gravatar(user, additional_id, size)
<|reserved_special_token_0|>
def __get_gravatar(user, additional_id, size):
if user:
if str(user.email) == 'None':
email = (user.nickname + additional_id).encode('utf-8')
else:
email = (user.email + additional_id).encode('utf-8')
else:
email = 'unknown'.encode('utf-8')
gravatar_url = 'https://secure.gravatar.com/avatar/{}?'.format(hashlib.
md5(email.lower()).hexdigest())
gravatar_url += parse.urlencode({'d': 'wavatar', 's': str(size)})
return gravatar_url
def get_author_data(uid, gravatar_on_right_side=True,
linked_with_users_page=True, profile_picture_size=20):
"""
Returns a-tag with gravatar of current author and users page as href
:param uid: Uid of the author
:param gravatar_on_right_side: True, if the gravatar is on the right of authors name
:param linked_with_users_page: True, if the text is a link to the authors site
:param profile_picture_size: Integer
:return: HTML-String
"""
db_user = DBDiscussionSession.query(User).get(int(uid))
if not db_user:
return None, 'Missing author with uid ' + str(uid), False
nick = db_user.global_nickname
img_src = get_profile_picture(db_user, profile_picture_size)
link_begin = ''
link_end = ''
if linked_with_users_page:
link_begin = '<a href="/user/{}" title="{}">'.format(db_user.uid, nick)
link_end = '</a>'
side = 'left' if gravatar_on_right_side else 'right'
img = '<img class="img-circle" src="{}" style="padding-{}: 0.3em">'.format(
img_src, side)
if gravatar_on_right_side:
return db_user, '{}{}{}{}'.format(link_begin, nick, img, link_end
), True
else:
return db_user, '{}{}{}{}'.format(link_begin, img, nick, link_end
), True
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class BubbleTypes(Enum):
USER = auto()
SYSTEM = auto()
STATUS = auto()
INFO = auto()
def __str__(self):
return str(self.value)
class Relations(Enum):
UNDERMINE = 'undermine'
UNDERCUT = 'undercut'
REBUT = 'rebut'
SUPPORT = 'support'
def __str__(self):
return str(self.value)
class Attitudes(Enum):
AGREE = 'agree'
DISAGREE = 'disagree'
DONT_KNOW = 'dontknow'
def __str__(self):
return str(self.value)
<|reserved_special_token_0|>
def get_global_url():
"""
Returns the global url of the project, based on the ENV
:return: String
"""
return os.environ.get('URL', '')
def get_changelog(no):
"""
Returns the 'no' last entries from the changelog
:param no: int
:return: list
"""
path = str(os.path.realpath(__file__ + '/../../CHANGELOG.md'))
lines = [line.rstrip('\n').strip() for line in open(path) if len(line.
rstrip('\n').strip()) > 0]
changelog = []
title = ''
body = []
for l in lines:
if l.startswith('#'):
if len(title) > 0:
changelog.append({'title': title, 'body': body})
body = []
title = l.replace('### ', '')
else:
body.append(l.replace('- ', ''))
return changelog[0:no]
<|reserved_special_token_0|>
def usage_of_matomo(registry):
"""
Returns true, if matomo is set in the current ini file.
:param registry: request.registry
:return: Boolean
"""
if 'mode' in registry.settings:
return registry.settings['usage_of_matomo'].lower() == 'true'
return False
def escape_string(text):
"""
Escapes all html special chars.
:param text: string
:return: html.escape(text)
"""
return escape(text)
def get_discussion_language(matchdict, params, session, current_issue_uid=None
):
"""
Returns Language.ui_locales
CALL AFTER issue_handler.get_id_of_slug(..)!
:param matchdict: matchdict of the current request
:param params: params of the current request
:param session: session of the current request
:param current_issue_uid: uid
:return:
"""
if not current_issue_uid:
current_issue = DBDiscussionSession.query(Issue).filter(Issue.
is_disabled == False, Issue.is_private == False).first()
current_issue_uid = current_issue.uid if current_issue else None
issue = matchdict['issue'] if 'issue' in matchdict else params['issue'
] if 'issue' in params else session['issue'
] if 'issue' in session else current_issue_uid
db_issue = DBDiscussionSession.query(Issue).get(issue)
return db_issue.lang if db_issue else 'en'
def python_datetime_pretty_print(ts, lang):
"""
Pretty print of a locale
:param ts: Timestamp
:param lang: ui_locales
:return: String
"""
formatter = '%b. %d.'
if lang == 'de':
try:
locale.setlocale(locale.LC_TIME, 'de_DE.UTF-8')
formatter = '%d. %b.'
except locale.Error:
locale.setlocale(locale.LC_TIME, 'en_US.UTF8')
return datetime.strptime(str(ts), '%Y-%m-%d').strftime(formatter)
def get_all_arguments_by_statement(statement_uid, include_disabled=False):
"""
Returns a list of all arguments where the statement is a conclusion or member of the premisegroup
:param statement_uid: Statement.uid
:param include_disabled: Boolean
:return: [Arguments]
"""
logger('DBAS.LIB', 'main {}, include_disabled {}'.format(statement_uid,
include_disabled))
db_arguments = __get_arguments_of_conclusion(statement_uid,
include_disabled)
arg_array = [arg for arg in db_arguments] if db_arguments else []
premises = DBDiscussionSession.query(Premise).filter_by(statement_uid=
statement_uid)
if not include_disabled:
premises = premises.filter_by(is_disabled=False)
premises = premises.all()
for premise in premises:
arg_array += __get_argument_of_premisegroup(premise.
premisegroup_uid, include_disabled)
db_undercuts = []
for arg in arg_array:
db_undercuts += __get_undercuts_of_argument(arg.uid, include_disabled)
db_undercutted_undercuts = []
for arg in db_undercuts:
db_undercutted_undercuts += __get_undercuts_of_argument(arg.uid,
include_disabled)
arg_array = list(set(arg_array + db_undercuts + db_undercutted_undercuts))
logger('DBAS.LIB', 'returning arguments {}'.format([arg.uid for arg in
arg_array]))
return arg_array if len(arg_array) > 0 else None
def __get_argument_of_premisegroup(premisegroup_uid, include_disabled):
"""
Returns all arguments with the given premisegroup
:param premisegroup_uid: PremisgGroup.uid
:param include_disabled: Boolean
:return: list of Arguments
"""
db_arguments = DBDiscussionSession.query(Argument).filter_by(
premisegroup_uid=premisegroup_uid)
if not include_disabled:
db_arguments = db_arguments.filter_by(is_disabled=False)
return db_arguments.all() if db_arguments else []
def __get_undercuts_of_argument(argument_uid, include_disabled):
"""
Returns all undercuts fo the given argument
:param argument_uid: Argument.uid
:param include_disabled: boolean
:return: list of Arguments
"""
db_undercuts = DBDiscussionSession.query(Argument).filter_by(argument_uid
=argument_uid)
if not include_disabled:
db_undercuts = db_undercuts.filter_by(is_disabled=False)
return db_undercuts.all() if db_undercuts else []
def __get_arguments_of_conclusion(statement_uid, include_disabled):
"""
Returns all arguments, where the statement is set as conclusion
:param statement_uid: Statement.uid
:param include_disabled: Boolean
:return: list of arguments
"""
db_arguments = DBDiscussionSession.query(Argument).filter_by(conclusion_uid
=statement_uid)
if not include_disabled:
db_arguments = db_arguments.filter_by(is_disabled=False)
return db_arguments.all() if db_arguments else []
def get_all_arguments_with_text_by_statement_id(statement_uid):
"""
Given a statement_uid, it returns all arguments, which use this statement and adds
the corresponding text to it, which normally appears in the bubbles. The resulting
text depends on the provided language.
:param statement_uid: uid to a statement, which should be analyzed
:return: list of dictionaries containing some properties of these arguments
:rtype: list
"""
logger('DBAS.LIB', 'main ' + str(statement_uid))
arguments = get_all_arguments_by_statement(statement_uid)
results = []
if arguments:
results = [{'uid': arg.uid, 'text': get_text_for_argument_uid(arg.
uid)} for arg in arguments]
return results
def get_all_arguments_with_text_and_url_by_statement_id(db_statement,
urlmanager, color_statement=False, is_jump=False):
"""
Given a statement_uid, it returns all arguments, which use this statement and adds
the corresponding text to it, which normally appears in the bubbles. The resulting
text depends on the provided language.
:param db_statement: Statement
:param urlmanager:
:param color_statement: True, if the statement (specified by the ID) should be colored
:return: list of dictionaries containing some properties of these arguments
:rtype: list
"""
logger('DBAS.LIB', 'main ' + str(db_statement.uid))
arguments = get_all_arguments_by_statement(db_statement.uid)
uids = [arg.uid for arg in arguments] if arguments else None
results = list()
sb = '<{} data-argumentation-type="position">'.format(tag_type
) if color_statement else ''
se = '</{}>'.format(tag_type) if color_statement else ''
if not uids:
return []
uids.sort()
for uid in uids:
statement_text = db_statement.get_text()
attack_type = 'jump' if is_jump else ''
argument_text = get_text_for_argument_uid(uid, anonymous_style=True,
attack_type=attack_type)
pos = argument_text.lower().find(statement_text.lower())
argument_text = argument_text[:pos] + sb + argument_text[pos:]
pos += len(statement_text) + len(sb)
argument_text = argument_text[:pos] + se + argument_text[pos:]
results.append({'uid': uid, 'text': argument_text, 'url':
urlmanager.get_url_for_jump(uid)})
return results
def get_slug_by_statement_uid(uid):
"""
Returns slug for the given Issue.uid
:param uid: Issue.uid
:return: String
"""
db_statement = DBDiscussionSession.query(Statement).get(uid)
return resolve_issue_uid_to_slug(db_statement.issue_uid)
def get_text_for_argument_uid(uid, nickname=None, with_html_tag=False,
start_with_intro=False, first_arg_by_user=False, user_changed_opinion=
False, rearrange_intro=False, colored_position=False, attack_type=None,
minimize_on_undercut=False, is_users_opinion=True, anonymous_style=
False, support_counter_argument=False):
"""
Returns current argument as string like "conclusion, because premise1 and premise2"
:param uid: Integer
:param with_html_tag: Boolean
:param start_with_intro: Boolean
:param first_arg_by_user: Boolean
:param user_changed_opinion: Boolean
:param rearrange_intro: Boolean
:param colored_position: Boolean
:param attack_type: String
:param minimize_on_undercut: Boolean
:param anonymous_style: Boolean
:param support_counter_argument: Boolean
:return: String
"""
logger('DBAS.LIB', 'main {}'.format(uid))
db_argument = DBDiscussionSession.query(Argument).get(uid)
if not db_argument:
return None
lang = db_argument.lang
_t = Translator(lang)
premisegroup_by_user = False
author_uid = None
db_user = DBDiscussionSession.query(User).filter_by(nickname=str(nickname)
).first()
if db_user:
author_uid = db_user.uid
pgroup = DBDiscussionSession.query(PremiseGroup).get(db_argument.
premisegroup_uid)
marked_argument = DBDiscussionSession.query(MarkedArgument).filter_by(
argument_uid=uid, author_uid=db_user.uid).first()
premisegroup_by_user = (pgroup.author_uid == db_user.uid or
marked_argument is not None)
arg_array = [db_argument]
while db_argument.argument_uid:
db_argument = DBDiscussionSession.query(Argument).get(db_argument.
argument_uid)
arg_array.append(db_argument)
if attack_type == 'jump':
return __build_argument_for_jump(arg_array, with_html_tag)
if len(arg_array) == 1:
return __build_single_argument(arg_array[0], rearrange_intro,
with_html_tag, colored_position, attack_type, _t,
start_with_intro, is_users_opinion, anonymous_style,
support_counter_argument, author_uid)
else:
return __build_nested_argument(arg_array, first_arg_by_user,
user_changed_opinion, with_html_tag, start_with_intro,
minimize_on_undercut, anonymous_style, premisegroup_by_user, _t)
<|reserved_special_token_0|>
def __build_val_for_jump(db_argument, tag_premise, tag_conclusion, tag_end, _t
):
premises = db_argument.get_premisegroup_text()
if premises[-1] != '.':
premises += '.'
conclusion = db_argument.get_conclusion_text()
because = _t.get(_.because).lower()
conclusion = tag_conclusion + conclusion + tag_end
premises = tag_premise + premises + tag_end
intro = start_con + _t.get(_.isNotRight).lower(
) + end_tag if not db_argument.is_supportive else ''
ret_value = '{} {} {} {}'.format(conclusion, intro, because, premises)
if _t.get_lang() == 'de':
intro = _t.get(_.itIsTrueThatAnonymous
) if db_argument.is_supportive else _t.get(_.itIsFalseThatAnonymous
)
intro = intro[0:1].upper() + intro[1:]
intro = (start_pro if db_argument.is_supportive else start_con
) + intro + end_tag
ret_value = '{} {}, {} {}'.format(intro, conclusion, because, premises)
return ret_value
<|reserved_special_token_0|>
def __build_val_for_undercutted_undercut(arg_array: List[Argument],
tag_premise, tag_conclusion, tag_end, _t):
premise1 = arg_array[0].get_premisegroup_text()
premise2 = arg_array[1].get_premisegroup_text()
premise3 = arg_array[2].get_premisegroup_text()
conclusion = arg_array[2].get_conclusion_text()
bind = start_con + _t.get(_.isNotAGoodReasonAgainstArgument) + end_tag
because = _t.get(_.because)
seperator = ',' if _t.get_lang() == 'de' else ''
premise1 = tag_premise + premise1 + tag_end
premise2 = tag_conclusion + premise2 + tag_end
argument = '{}{} {} {}'.format(conclusion, seperator, because.lower(),
premise3)
argument = tag_conclusion + argument + tag_end
ret_value = '{} {} {}. {} {}'.format(premise2, bind, argument, because,
premise1)
return ret_value
def __build_single_argument(db_argument: Argument, rearrange_intro: bool,
with_html_tag: bool, colored_position: bool, attack_type: str, _t:
Translator, start_with_intro: bool, is_users_opinion: bool,
anonymous_style: bool, support_counter_argument: bool=False, author_uid
=None):
"""
Build up argument text for a single argument
Please, do not touch this!
:param uid: Argument.uid
:param rearrange_intro: Boolean
:param with_html_tag: Boolean
:param colored_position: Boolean
:param attack_type: String
:param _t: Translator
:param start_with_intro: Boolean
:param is_users_opinion: Boolean
:param anonymous_style: Boolean
:param support_counter_argument: Boolean
:param author_uid: User.uid
:return: String
"""
premises_text = db_argument.get_premisegroup_text()
conclusion_text = db_argument.get_conclusion_text()
lang = db_argument.lang
if lang != 'de':
premises_text = premises_text[0:1].lower() + premises_text[1:]
premises_text, conclusion_text, sb, sb_none, se = (
__get_tags_for_building_single_argument(with_html_tag, attack_type,
colored_position, premises_text, conclusion_text))
marked_element = False
if author_uid:
db_marked = DBDiscussionSession.query(MarkedArgument).filter(
MarkedArgument.argument_uid == db_argument.uid, MarkedArgument.
author_uid == author_uid).first()
marked_element = db_marked is not None
you_have_the_opinion_that = _t.get(_.youHaveTheOpinionThat).format(''
).strip()
if lang == 'de':
ret_value = __build_single_argument_for_de(_t, sb, se,
you_have_the_opinion_that, start_with_intro, anonymous_style,
rearrange_intro, db_argument, attack_type, sb_none,
marked_element, lang, premises_text, conclusion_text,
is_users_opinion, support_counter_argument)
else:
ret_value = __build_single_argument_for_en(_t, sb, se,
you_have_the_opinion_that, marked_element, conclusion_text,
premises_text, db_argument)
return ret_value.replace(' ', ' ')
def __get_tags_for_building_single_argument(with_html_tag, attack_type,
colored_position, premises, conclusion):
sb_none = start_tag if with_html_tag else ''
se = end_tag if with_html_tag else ''
if attack_type not in ['dont_know', 'jump']:
sb = start_tag if with_html_tag else ''
if colored_position:
sb = start_position if with_html_tag else ''
if attack_type == Relations.UNDERMINE:
premises = sb + premises + se
else:
conclusion = sb + conclusion + se
else:
sb = start_argument if with_html_tag else ''
sb_tmp = start_attack if with_html_tag else ''
premises = sb + premises + se
conclusion = sb_tmp + conclusion + se
return premises, conclusion, sb, sb_none, se
def __build_single_argument_for_de(_t, sb, se, you_have_the_opinion_that,
start_with_intro, anonymous_style, rearrange_intro, db_argument,
attack_type, sb_none, marked_element, lang, premises, conclusion,
is_users_opinion, support_counter_argument):
if start_with_intro and not anonymous_style:
intro = _t.get(_.itIsTrueThat
) if db_argument.is_supportive else _t.get(_.itIsFalseThat)
if rearrange_intro:
intro = _t.get(_.itTrueIsThat
) if db_argument.is_supportive else _t.get(_.itFalseIsThat)
ret_value = (sb_none if attack_type in ['dont_know'] else sb
) + intro + se + ' '
elif is_users_opinion and not anonymous_style:
ret_value = sb_none
if support_counter_argument:
ret_value += _t.get(_.youAgreeWithThecounterargument)
elif marked_element:
ret_value += you_have_the_opinion_that
else:
ret_value += _t.get(_.youArgue)
ret_value += se + ' '
else:
tmp = _t.get(_.itIsTrueThatAnonymous if db_argument.is_supportive else
_.itIsFalseThatAnonymous)
ret_value = sb_none + sb + tmp + se + ' '
ret_value += ' {}{}{} '.format(sb, _t.get(_.itIsNotRight), se
) if not db_argument.is_supportive else ''
ret_value += conclusion
ret_value += ', ' if lang == 'de' else ' '
ret_value += sb_none + _t.get(_.because).lower() + se + ' ' + premises
return ret_value
<|reserved_special_token_0|>
def __build_nested_argument(arg_array: List[Argument], first_arg_by_user,
user_changed_opinion, with_html_tag, start_with_intro,
minimize_on_undercut, anonymous_style, premisegroup_by_user, _t):
"""
:param arg_array:
:param first_arg_by_user:
:param user_changed_opinion:
:param with_html_tag:
:param start_with_intro:
:param minimize_on_undercut:
:param anonymous_style:
:param premisegroup_by_user:
:param _t:
:return:
"""
pgroups = []
supportive = []
arg_array = arg_array[::-1]
local_lang = arg_array[0].lang
for db_argument in arg_array:
text = db_argument.get_premisegroup_text()
pgroups.append(text)
supportive.append(db_argument.is_supportive)
conclusion = arg_array[0].get_conclusion_text()
sb = start_position if with_html_tag else ''
se = end_tag if with_html_tag else ''
because = (', ' if local_lang == 'de' else ' ') + _t.get(_.because).lower(
) + ' '
if len(arg_array
) % 2 is 0 and not first_arg_by_user and not anonymous_style:
ret_value = _t.get(_.earlierYouArguedThat if user_changed_opinion else
_.otherUsersSaidThat) + ' '
tmp_users_opinion = True
elif not anonymous_style:
ret_value = _t.get(_.soYourOpinionIsThat
) + ': ' if start_with_intro else ''
tmp_users_opinion = False
conclusion = se + conclusion[0:1].upper() + conclusion[1:]
else:
ret_value = _t.get(_.someoneArgued) + ' '
tmp_users_opinion = False
tmp = _t.get(_.itFalseIsThat) + ' ' if not supportive[0] else ''
ret_value += tmp + conclusion + because + pgroups[0] + '.'
del pgroups[0]
if minimize_on_undercut and not user_changed_opinion and len(pgroups) > 2:
return _t.get(_.butYouCounteredWith).strip() + ' ' + sb + pgroups[
len(pgroups) - 1] + se + '.'
for i, pgroup in enumerate(pgroups):
ret_value += ' '
if tmp_users_opinion and not anonymous_style:
tmp = (_.butYouCounteredWithArgument if premisegroup_by_user else
_.butYouCounteredWithInterest)
ret_value += _t.get(_.otherParticipantsConvincedYouThat if
user_changed_opinion else tmp)
elif not anonymous_style:
ret_value += _t.get(_.youAgreeWithThatNow)
else:
ret_value += _t.get(_.otherUsersSaidThat) if i == 0 else _t.get(_
.thenOtherUsersSaidThat)
ret_value += sb + ' ' + pgroups[i] + '.'
tmp_users_opinion = not tmp_users_opinion
return ret_value.replace(' ', ' ')
def get_text_for_premisegroup_uid(uid):
"""
Returns joined text of the premise group and the premise ids
:param uid: premisegroup_uid
:return: text, uids
"""
warnings.warn('Use PremiseGroup.get_text() instead.', DeprecationWarning)
db_premises = DBDiscussionSession.query(Premise).filter_by(premisegroup_uid
=uid).join(Statement).all()
if len(db_premises) == 0:
return ''
texts = [premise.get_text() for premise in db_premises]
lang = DBDiscussionSession.query(Statement).get(db_premises[0].
statements.uid).lang
_t = Translator(lang)
return ' {} '.format(_t.get(_.aand)).join(texts)
<|reserved_special_token_0|>
def get_text_for_premise(uid: int, colored_position: bool=False):
"""
Returns text of premise with given uid
:param uid: Statement.uid
:param colored_position: Boolean
:return: String
"""
db_premise = DBDiscussionSession.query(Premise).get(uid)
if db_premise:
return db_premise.get_text(html=colored_position)
else:
return None
def get_text_for_conclusion(argument, start_with_intro=False,
rearrange_intro=False, is_users_opinion=True):
"""
Check the arguments conclusion whether it is an statement or an argument and returns the text
:param argument: Argument
:param start_with_intro: Boolean
:param rearrange_intro: Boolean
:return: String
"""
if argument.argument_uid:
return get_text_for_argument_uid(argument.argument_uid,
start_with_intro, rearrange_intro=rearrange_intro,
is_users_opinion=is_users_opinion)
else:
return argument.get_conclusion_text()
def resolve_issue_uid_to_slug(uid):
"""
Given the issue uid query database and return the correct slug of the issue.
:param uid: issue_uid
:type uid: int
:return: Slug of issue
:rtype: str
"""
issue = DBDiscussionSession.query(Issue).get(uid)
return issue.slug if issue else None
<|reserved_special_token_0|>
def get_user_by_private_or_public_nickname(nickname):
"""
Gets the user by his (public) nickname, based on the option, whether his nickname is public or not
:param nickname: Nickname of the user
:return: Current user or None
"""
db_user = get_user_by_case_insensitive_nickname(nickname)
db_public_user = get_user_by_case_insensitive_public_nickname(nickname)
uid = 0
if db_user:
uid = db_user.uid
elif db_public_user:
uid = db_public_user.uid
db_settings = DBDiscussionSession.query(Settings).filter_by(author_uid=uid
).first()
if not db_settings:
return None
if db_settings.should_show_public_nickname and db_user:
return db_user
elif not db_settings.should_show_public_nickname and db_public_user:
return db_public_user
return None
def get_user_by_case_insensitive_nickname(nickname):
"""
Returns user with given nickname
:param nickname: String
:return: User or None
"""
return DBDiscussionSession.query(User).filter(func.lower(User.nickname) ==
func.lower(nickname)).first()
def get_user_by_case_insensitive_public_nickname(public_nickname):
"""
Returns user with given public nickname
:param public_nickname: String
:return: User or None
"""
return DBDiscussionSession.query(User).filter(func.lower(User.
public_nickname) == func.lower(public_nickname)).first()
def pretty_print_options(message):
"""
Some modifications for pretty printing.
Use uppercase for first letter in text and a single dot for the end if there isn't one already.
:param message: String
:return: String
"""
if message[0:1] == '<':
pos = message.index('>')
message = message[0:pos + 1] + message[pos + 1:pos + 2].upper(
) + message[pos + 2:]
else:
message = message[0:1].upper() + message[1:]
if message[-1] == '>':
pos = message.rfind('<')
if message[pos - 1:pos] not in ['.', '?', '!']:
message = message[0:pos] + '.' + message[pos:]
elif not message.endswith(tuple(['.', '?', '!'])) and id is not 'now':
message += '.'
return message
def create_speechbubble_dict(bubble_type: BubbleTypes, is_markable: bool=
False, is_author: bool=False, uid: str='', bubble_url: str='', content:
str='', omit_bubble_url: bool=False, omit_vote_info: bool=False,
argument_uid: int=None, statement_uid: int=None, is_supportive: bool=
False, nickname: str='anonymous', lang: str='en', is_users_opinion:
bool=False, other_author: User=None):
"""
Creates an dictionary which includes every information needed for a bubble.
:param bubble_type: BubbleTypes
:param is_markable: True if the content itself could be flagged
:param is_author: True if the current user is author of the content
:param uid: Identifier for the bubble
:param bubble_url: URL for the click event of the bubble
:param content: Text of the bubble
:param omit_bubble_url: True if the bubble should have a link
:param omit_vote_info: True if the bubble have the little, grey information text
:param argument_uid: Argument.uid
:param statement_uid: Statement.uid
:param is_supportive: Boolean
:param nickname: String
:param omit_bubble_url: Boolean
:param lang: is_users_opinion
:param is_users_opinion: Boolean
:return: dict()
"""
gravatar_link = get_global_url() + '/static/images/icon.png'
profile = None
if uid is not 'now':
content = pretty_print_options(content)
if bubble_type is BubbleTypes.SYSTEM and other_author is not None:
gravatar_link = get_profile_picture(other_author, 25)
profile = '/user/{}'.format(other_author.uid),
if bubble_type is BubbleTypes.USER and nickname != 'anonymous':
db_user = DBDiscussionSession.query(User).filter_by(nickname=nickname
).first()
db_marked = None
gravatar_link = get_profile_picture(db_user, 25)
if argument_uid is not None and db_user is not None:
db_marked = DBDiscussionSession.query(MarkedArgument).filter(
MarkedArgument.argument_uid == argument_uid, MarkedArgument
.author_uid == db_user.uid).first()
if statement_uid is not None and db_user is not None:
db_marked = DBDiscussionSession.query(MarkedStatement).filter(
MarkedStatement.statement_uid == statement_uid,
MarkedStatement.author_uid == db_user.uid).first()
is_users_opinion = db_marked is not None
speech = {'is_user': bubble_type is BubbleTypes.USER, 'is_system':
bubble_type is BubbleTypes.SYSTEM, 'is_status': bubble_type is
BubbleTypes.STATUS, 'is_info': bubble_type is BubbleTypes.INFO,
'is_markable': is_markable, 'is_author': is_author, 'id': uid if
len(str(uid)) > 0 else uuid4().hex, 'bubble_url': bubble_url,
'message': content, 'omit_bubble_url': omit_bubble_url,
'omit_vote_info': omit_vote_info, 'data_type': 'argument' if
argument_uid else 'statement' if statement_uid else 'None',
'data_argument_uid': argument_uid, 'data_statement_uid':
statement_uid, 'data_is_supportive': is_supportive,
'is_users_opinion': is_users_opinion, 'enemy': {'avatar':
gravatar_link, 'profile': profile, 'available': profile is not None}}
votecount_keys = __get_text_for_click_and_mark_count(nickname,
bubble_type is BubbleTypes.USER, argument_uid, statement_uid,
speech, lang)
speech['votecounts_message'] = votecount_keys[speech['votecounts']]
return speech
def __get_text_for_click_and_mark_count(nickname, is_user, argument_uid,
statement_uid, speech, lang):
"""
Build text for a bubble, how many other participants have the same interest?
:param nickname: User.nickname
:param is_user: boolean
:param argument_uid: Argument.uid
:param statement_uid: Statement.uid
:param speech: dict()
:param lang: ui_locales
:return: [String]
"""
if not nickname:
nickname = 'anonymous'
db_user = DBDiscussionSession.query(User).filter_by(nickname=nickname
).first()
if not db_user:
db_user = DBDiscussionSession.query(User).filter_by(nickname=
'anonymous').first()
db_clicks, db_marks = __get_clicks_and_marks(argument_uid,
statement_uid, db_user)
_t = Translator(lang)
speech['votecounts'] = len(db_clicks) if db_clicks else 0
if db_marks:
speech['votecounts'] += len(db_marks)
votecount_keys = defaultdict(lambda : '{} {}.'.format(speech[
'votecounts'], _t.get(_.voteCountTextMore)))
if is_user and db_user.gender == 'm':
gender_key = _.voteCountTextFirstM
elif is_user and db_user.gender == 'f':
gender_key = _.voteCountTextFirstF
else:
gender_key = _.voteCountTextFirst
votecount_keys[0] = '{}.'.format(_t.get(gender_key))
votecount_keys[1] = _t.get(_.voteCountTextOneOther) + '.'
return votecount_keys
def __get_clicks_and_marks(argument_uid, statement_uid, db_user):
db_clicks = None
db_marks = None
if argument_uid:
db_clicks = DBDiscussionSession.query(ClickedArgument).filter(
ClickedArgument.argument_uid == argument_uid, ClickedArgument.
is_up_vote == True, ClickedArgument.is_valid, ClickedArgument.
author_uid != db_user.uid).all()
db_marks = DBDiscussionSession.query(MarkedArgument).filter(
MarkedArgument.argument_uid == argument_uid, MarkedArgument.
author_uid != db_user.uid).all()
elif statement_uid:
db_clicks = DBDiscussionSession.query(ClickedStatement).filter(
ClickedStatement.statement_uid == statement_uid,
ClickedStatement.is_up_vote == True, ClickedStatement.is_valid,
ClickedStatement.author_uid != db_user.uid).all()
db_marks = DBDiscussionSession.query(MarkedStatement).filter(
MarkedStatement.statement_uid == statement_uid, MarkedStatement
.author_uid != db_user.uid).all()
return db_clicks, db_marks
def is_argument_disabled_due_to_disabled_statements(argument):
"""
Returns true if any involved statement is disabled.
:param argument: Argument
:return: Boolean
"""
if argument.conclusion_uid is None:
db_argument = DBDiscussionSession.query(Argument).get(argument.
argument_uid)
conclusion = DBDiscussionSession(Statement).get(db_argument.
conclusion_uid)
if conclusion.is_disabled:
return True
premises = __get_all_premises_of_argument(db_argument)
for premise in premises:
if premise.statements.is_disabled:
return True
else:
print(argument.conclusion_uid)
conclusion = DBDiscussionSession.query(Statement).get(argument.
conclusion_uid)
if conclusion.is_disabled:
return True
premises = __get_all_premises_of_argument(argument)
for premise in premises:
if premise.statements.is_disabled:
return True
return False
def is_author_of_statement(db_user: User, statement_uid: int) ->bool:
"""
Is the user with given nickname author of the statement?
:param db_user: User
:param statement_uid: Statement.uid
:return: Boolean
"""
db_user = (db_user if db_user and db_user.nickname !=
nick_of_anonymous_user else None)
if not db_user:
return False
db_textversion = DBDiscussionSession.query(TextVersion).filter_by(
statement_uid=statement_uid).order_by(TextVersion.uid.asc()).first()
if not db_textversion:
return False
return db_textversion.author_uid == db_user.uid
def is_author_of_argument(db_user: User, argument_uid: int) ->bool:
"""
Is the user with given nickname author of the argument?
:param db_user: User
:param argument_uid: Argument.uid
:return: Boolean
"""
db_user = (db_user if db_user and db_user.nickname !=
nick_of_anonymous_user else None)
if not db_user:
return False
db_argument = DBDiscussionSession.query(Argument).filter(Argument.uid ==
argument_uid, Argument.author_uid == db_user.uid).first()
return True if db_argument else False
<|reserved_special_token_0|>
def get_profile_picture(user: User, size: int=80, ignore_privacy_settings:
bool=False):
"""
Returns the url to a https://secure.gravatar.com picture, with the option wavatar and size of 80px
:param user: User
:param size: Integer, default 80
:param ignore_privacy_settings:
:return: String
"""
additional_id = ''
if user and isinstance(user, User):
additional_id = ('' if user.settings.should_show_public_nickname or
ignore_privacy_settings else 'x')
return __get_gravatar(user, additional_id, size)
def get_public_profile_picture(user: User, size: int=80):
"""
Returns the url to a https://secure.gravatar.com picture, with the option wavatar and size of 80px
If the user doesn't want an public profile, an anonymous image will be returned
:param user: User
:param size: Integer, default 80
:return: String
"""
additional_id = ''
if user.settings.should_show_public_nickname:
additional_id = 'x'
if len(str(user.oauth_provider)) > 0:
additional_id = '{}{}'.format(user.oauth_provider, user.
oauth_provider_id)
return __get_gravatar(user, additional_id, size)
def __get_gravatar(user, additional_id, size):
if user:
if str(user.email) == 'None':
email = (user.nickname + additional_id).encode('utf-8')
else:
email = (user.email + additional_id).encode('utf-8')
else:
email = 'unknown'.encode('utf-8')
gravatar_url = 'https://secure.gravatar.com/avatar/{}?'.format(hashlib.
md5(email.lower()).hexdigest())
gravatar_url += parse.urlencode({'d': 'wavatar', 's': str(size)})
return gravatar_url
def get_author_data(uid, gravatar_on_right_side=True,
linked_with_users_page=True, profile_picture_size=20):
"""
Returns a-tag with gravatar of current author and users page as href
:param uid: Uid of the author
:param gravatar_on_right_side: True, if the gravatar is on the right of authors name
:param linked_with_users_page: True, if the text is a link to the authors site
:param profile_picture_size: Integer
:return: HTML-String
"""
db_user = DBDiscussionSession.query(User).get(int(uid))
if not db_user:
return None, 'Missing author with uid ' + str(uid), False
nick = db_user.global_nickname
img_src = get_profile_picture(db_user, profile_picture_size)
link_begin = ''
link_end = ''
if linked_with_users_page:
link_begin = '<a href="/user/{}" title="{}">'.format(db_user.uid, nick)
link_end = '</a>'
side = 'left' if gravatar_on_right_side else 'right'
img = '<img class="img-circle" src="{}" style="padding-{}: 0.3em">'.format(
img_src, side)
if gravatar_on_right_side:
return db_user, '{}{}{}{}'.format(link_begin, nick, img, link_end
), True
else:
return db_user, '{}{}{}{}'.format(link_begin, img, nick, link_end
), True
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class BubbleTypes(Enum):
USER = auto()
SYSTEM = auto()
STATUS = auto()
INFO = auto()
def __str__(self):
return str(self.value)
class Relations(Enum):
UNDERMINE = 'undermine'
UNDERCUT = 'undercut'
REBUT = 'rebut'
SUPPORT = 'support'
def __str__(self):
return str(self.value)
class Attitudes(Enum):
AGREE = 'agree'
DISAGREE = 'disagree'
DONT_KNOW = 'dontknow'
def __str__(self):
return str(self.value)
<|reserved_special_token_0|>
def get_global_url():
"""
Returns the global url of the project, based on the ENV
:return: String
"""
return os.environ.get('URL', '')
def get_changelog(no):
"""
Returns the 'no' last entries from the changelog
:param no: int
:return: list
"""
path = str(os.path.realpath(__file__ + '/../../CHANGELOG.md'))
lines = [line.rstrip('\n').strip() for line in open(path) if len(line.
rstrip('\n').strip()) > 0]
changelog = []
title = ''
body = []
for l in lines:
if l.startswith('#'):
if len(title) > 0:
changelog.append({'title': title, 'body': body})
body = []
title = l.replace('### ', '')
else:
body.append(l.replace('- ', ''))
return changelog[0:no]
def is_development_mode(registry):
"""
Returns true, if mode is set to development in current ini file.
:param registry: request.registry
:return: Boolean
"""
if 'mode' in registry.settings:
return registry.settings['mode'].lower() == 'development'
return False
def usage_of_modern_bubbles(registry):
"""
Returns true, if modern bubbles are set in the current ini file.
:param registry: request.registry
:return: Boolean
"""
if 'modern_bubbles' in registry.settings:
return registry.settings['modern_bubbles'].lower() == 'true'
return False
def usage_of_matomo(registry):
"""
Returns true, if matomo is set in the current ini file.
:param registry: request.registry
:return: Boolean
"""
if 'mode' in registry.settings:
return registry.settings['usage_of_matomo'].lower() == 'true'
return False
def escape_string(text):
"""
Escapes all html special chars.
:param text: string
:return: html.escape(text)
"""
return escape(text)
def get_discussion_language(matchdict, params, session, current_issue_uid=None
):
"""
Returns Language.ui_locales
CALL AFTER issue_handler.get_id_of_slug(..)!
:param matchdict: matchdict of the current request
:param params: params of the current request
:param session: session of the current request
:param current_issue_uid: uid
:return:
"""
if not current_issue_uid:
current_issue = DBDiscussionSession.query(Issue).filter(Issue.
is_disabled == False, Issue.is_private == False).first()
current_issue_uid = current_issue.uid if current_issue else None
issue = matchdict['issue'] if 'issue' in matchdict else params['issue'
] if 'issue' in params else session['issue'
] if 'issue' in session else current_issue_uid
db_issue = DBDiscussionSession.query(Issue).get(issue)
return db_issue.lang if db_issue else 'en'
def python_datetime_pretty_print(ts, lang):
"""
Pretty print of a locale
:param ts: Timestamp
:param lang: ui_locales
:return: String
"""
formatter = '%b. %d.'
if lang == 'de':
try:
locale.setlocale(locale.LC_TIME, 'de_DE.UTF-8')
formatter = '%d. %b.'
except locale.Error:
locale.setlocale(locale.LC_TIME, 'en_US.UTF8')
return datetime.strptime(str(ts), '%Y-%m-%d').strftime(formatter)
def get_all_arguments_by_statement(statement_uid, include_disabled=False):
"""
Returns a list of all arguments where the statement is a conclusion or member of the premisegroup
:param statement_uid: Statement.uid
:param include_disabled: Boolean
:return: [Arguments]
"""
logger('DBAS.LIB', 'main {}, include_disabled {}'.format(statement_uid,
include_disabled))
db_arguments = __get_arguments_of_conclusion(statement_uid,
include_disabled)
arg_array = [arg for arg in db_arguments] if db_arguments else []
premises = DBDiscussionSession.query(Premise).filter_by(statement_uid=
statement_uid)
if not include_disabled:
premises = premises.filter_by(is_disabled=False)
premises = premises.all()
for premise in premises:
arg_array += __get_argument_of_premisegroup(premise.
premisegroup_uid, include_disabled)
db_undercuts = []
for arg in arg_array:
db_undercuts += __get_undercuts_of_argument(arg.uid, include_disabled)
db_undercutted_undercuts = []
for arg in db_undercuts:
db_undercutted_undercuts += __get_undercuts_of_argument(arg.uid,
include_disabled)
arg_array = list(set(arg_array + db_undercuts + db_undercutted_undercuts))
logger('DBAS.LIB', 'returning arguments {}'.format([arg.uid for arg in
arg_array]))
return arg_array if len(arg_array) > 0 else None
def __get_argument_of_premisegroup(premisegroup_uid, include_disabled):
"""
Returns all arguments with the given premisegroup
:param premisegroup_uid: PremisgGroup.uid
:param include_disabled: Boolean
:return: list of Arguments
"""
db_arguments = DBDiscussionSession.query(Argument).filter_by(
premisegroup_uid=premisegroup_uid)
if not include_disabled:
db_arguments = db_arguments.filter_by(is_disabled=False)
return db_arguments.all() if db_arguments else []
def __get_undercuts_of_argument(argument_uid, include_disabled):
"""
Returns all undercuts fo the given argument
:param argument_uid: Argument.uid
:param include_disabled: boolean
:return: list of Arguments
"""
db_undercuts = DBDiscussionSession.query(Argument).filter_by(argument_uid
=argument_uid)
if not include_disabled:
db_undercuts = db_undercuts.filter_by(is_disabled=False)
return db_undercuts.all() if db_undercuts else []
def __get_arguments_of_conclusion(statement_uid, include_disabled):
"""
Returns all arguments, where the statement is set as conclusion
:param statement_uid: Statement.uid
:param include_disabled: Boolean
:return: list of arguments
"""
db_arguments = DBDiscussionSession.query(Argument).filter_by(conclusion_uid
=statement_uid)
if not include_disabled:
db_arguments = db_arguments.filter_by(is_disabled=False)
return db_arguments.all() if db_arguments else []
def get_all_arguments_with_text_by_statement_id(statement_uid):
"""
Given a statement_uid, it returns all arguments, which use this statement and adds
the corresponding text to it, which normally appears in the bubbles. The resulting
text depends on the provided language.
:param statement_uid: uid to a statement, which should be analyzed
:return: list of dictionaries containing some properties of these arguments
:rtype: list
"""
logger('DBAS.LIB', 'main ' + str(statement_uid))
arguments = get_all_arguments_by_statement(statement_uid)
results = []
if arguments:
results = [{'uid': arg.uid, 'text': get_text_for_argument_uid(arg.
uid)} for arg in arguments]
return results
def get_all_arguments_with_text_and_url_by_statement_id(db_statement,
urlmanager, color_statement=False, is_jump=False):
"""
Given a statement_uid, it returns all arguments, which use this statement and adds
the corresponding text to it, which normally appears in the bubbles. The resulting
text depends on the provided language.
:param db_statement: Statement
:param urlmanager:
:param color_statement: True, if the statement (specified by the ID) should be colored
:return: list of dictionaries containing some properties of these arguments
:rtype: list
"""
logger('DBAS.LIB', 'main ' + str(db_statement.uid))
arguments = get_all_arguments_by_statement(db_statement.uid)
uids = [arg.uid for arg in arguments] if arguments else None
results = list()
sb = '<{} data-argumentation-type="position">'.format(tag_type
) if color_statement else ''
se = '</{}>'.format(tag_type) if color_statement else ''
if not uids:
return []
uids.sort()
for uid in uids:
statement_text = db_statement.get_text()
attack_type = 'jump' if is_jump else ''
argument_text = get_text_for_argument_uid(uid, anonymous_style=True,
attack_type=attack_type)
pos = argument_text.lower().find(statement_text.lower())
argument_text = argument_text[:pos] + sb + argument_text[pos:]
pos += len(statement_text) + len(sb)
argument_text = argument_text[:pos] + se + argument_text[pos:]
results.append({'uid': uid, 'text': argument_text, 'url':
urlmanager.get_url_for_jump(uid)})
return results
def get_slug_by_statement_uid(uid):
"""
Returns slug for the given Issue.uid
:param uid: Issue.uid
:return: String
"""
db_statement = DBDiscussionSession.query(Statement).get(uid)
return resolve_issue_uid_to_slug(db_statement.issue_uid)
def get_text_for_argument_uid(uid, nickname=None, with_html_tag=False,
start_with_intro=False, first_arg_by_user=False, user_changed_opinion=
False, rearrange_intro=False, colored_position=False, attack_type=None,
minimize_on_undercut=False, is_users_opinion=True, anonymous_style=
False, support_counter_argument=False):
"""
Returns current argument as string like "conclusion, because premise1 and premise2"
:param uid: Integer
:param with_html_tag: Boolean
:param start_with_intro: Boolean
:param first_arg_by_user: Boolean
:param user_changed_opinion: Boolean
:param rearrange_intro: Boolean
:param colored_position: Boolean
:param attack_type: String
:param minimize_on_undercut: Boolean
:param anonymous_style: Boolean
:param support_counter_argument: Boolean
:return: String
"""
logger('DBAS.LIB', 'main {}'.format(uid))
db_argument = DBDiscussionSession.query(Argument).get(uid)
if not db_argument:
return None
lang = db_argument.lang
_t = Translator(lang)
premisegroup_by_user = False
author_uid = None
db_user = DBDiscussionSession.query(User).filter_by(nickname=str(nickname)
).first()
if db_user:
author_uid = db_user.uid
pgroup = DBDiscussionSession.query(PremiseGroup).get(db_argument.
premisegroup_uid)
marked_argument = DBDiscussionSession.query(MarkedArgument).filter_by(
argument_uid=uid, author_uid=db_user.uid).first()
premisegroup_by_user = (pgroup.author_uid == db_user.uid or
marked_argument is not None)
arg_array = [db_argument]
while db_argument.argument_uid:
db_argument = DBDiscussionSession.query(Argument).get(db_argument.
argument_uid)
arg_array.append(db_argument)
if attack_type == 'jump':
return __build_argument_for_jump(arg_array, with_html_tag)
if len(arg_array) == 1:
return __build_single_argument(arg_array[0], rearrange_intro,
with_html_tag, colored_position, attack_type, _t,
start_with_intro, is_users_opinion, anonymous_style,
support_counter_argument, author_uid)
else:
return __build_nested_argument(arg_array, first_arg_by_user,
user_changed_opinion, with_html_tag, start_with_intro,
minimize_on_undercut, anonymous_style, premisegroup_by_user, _t)
def __build_argument_for_jump(arg_array: List[Argument], with_html_tag):
"""
Build tet for an argument, if we jump to this argument
:param arg_array: [Argument]
:param with_html_tag: Boolean
:return: String
"""
tag_premise = ('<' + tag_type + ' data-argumentation-type="attack">' if
with_html_tag else '')
tag_conclusion = ('<' + tag_type +
' data-argumentation-type="argument">' if with_html_tag else '')
tag_end = '</' + tag_type + '>' if with_html_tag else ''
lang = arg_array[0].lang
_t = Translator(lang)
if len(arg_array) == 1:
ret_value = __build_val_for_jump(arg_array[0], tag_premise,
tag_conclusion, tag_end, _t)
elif len(arg_array) == 2:
ret_value = __build_val_for_undercut(arg_array, tag_premise,
tag_conclusion, tag_end, _t)
else:
ret_value = __build_val_for_undercutted_undercut(arg_array,
tag_premise, tag_conclusion, tag_end, _t)
return ret_value.replace(' ', ' ')
def __build_val_for_jump(db_argument, tag_premise, tag_conclusion, tag_end, _t
):
premises = db_argument.get_premisegroup_text()
if premises[-1] != '.':
premises += '.'
conclusion = db_argument.get_conclusion_text()
because = _t.get(_.because).lower()
conclusion = tag_conclusion + conclusion + tag_end
premises = tag_premise + premises + tag_end
intro = start_con + _t.get(_.isNotRight).lower(
) + end_tag if not db_argument.is_supportive else ''
ret_value = '{} {} {} {}'.format(conclusion, intro, because, premises)
if _t.get_lang() == 'de':
intro = _t.get(_.itIsTrueThatAnonymous
) if db_argument.is_supportive else _t.get(_.itIsFalseThatAnonymous
)
intro = intro[0:1].upper() + intro[1:]
intro = (start_pro if db_argument.is_supportive else start_con
) + intro + end_tag
ret_value = '{} {}, {} {}'.format(intro, conclusion, because, premises)
return ret_value
def __build_val_for_undercut(arg_array: List[Argument], tag_premise,
tag_conclusion, tag_end, _t):
db_undercut = arg_array[0]
db_conclusion_argument = arg_array[1]
premise = db_undercut.get_premisegroup_text()
conclusion_premise = db_conclusion_argument.get_premisegroup_text()
conclusion_conclusion = db_conclusion_argument.get_conclusion_text()
premise = tag_premise + premise + tag_end
conclusion_premise = tag_conclusion + conclusion_premise + tag_end
conclusion_conclusion = tag_conclusion + conclusion_conclusion + tag_end
intro = _t.get(_.statementAbout) + ' ' if _t.get_lang() == 'de' else ''
bind = start_con + _t.get(_.isNotAGoodReasonFor) + end_tag
because = _t.get(_.because)
ret_value = '{}{} {} {}. {} {}.'.format(intro, conclusion_premise, bind,
conclusion_conclusion, because, premise)
return ret_value
def __build_val_for_undercutted_undercut(arg_array: List[Argument],
tag_premise, tag_conclusion, tag_end, _t):
premise1 = arg_array[0].get_premisegroup_text()
premise2 = arg_array[1].get_premisegroup_text()
premise3 = arg_array[2].get_premisegroup_text()
conclusion = arg_array[2].get_conclusion_text()
bind = start_con + _t.get(_.isNotAGoodReasonAgainstArgument) + end_tag
because = _t.get(_.because)
seperator = ',' if _t.get_lang() == 'de' else ''
premise1 = tag_premise + premise1 + tag_end
premise2 = tag_conclusion + premise2 + tag_end
argument = '{}{} {} {}'.format(conclusion, seperator, because.lower(),
premise3)
argument = tag_conclusion + argument + tag_end
ret_value = '{} {} {}. {} {}'.format(premise2, bind, argument, because,
premise1)
return ret_value
def __build_single_argument(db_argument: Argument, rearrange_intro: bool,
with_html_tag: bool, colored_position: bool, attack_type: str, _t:
Translator, start_with_intro: bool, is_users_opinion: bool,
anonymous_style: bool, support_counter_argument: bool=False, author_uid
=None):
"""
Build up argument text for a single argument
Please, do not touch this!
:param uid: Argument.uid
:param rearrange_intro: Boolean
:param with_html_tag: Boolean
:param colored_position: Boolean
:param attack_type: String
:param _t: Translator
:param start_with_intro: Boolean
:param is_users_opinion: Boolean
:param anonymous_style: Boolean
:param support_counter_argument: Boolean
:param author_uid: User.uid
:return: String
"""
premises_text = db_argument.get_premisegroup_text()
conclusion_text = db_argument.get_conclusion_text()
lang = db_argument.lang
if lang != 'de':
premises_text = premises_text[0:1].lower() + premises_text[1:]
premises_text, conclusion_text, sb, sb_none, se = (
__get_tags_for_building_single_argument(with_html_tag, attack_type,
colored_position, premises_text, conclusion_text))
marked_element = False
if author_uid:
db_marked = DBDiscussionSession.query(MarkedArgument).filter(
MarkedArgument.argument_uid == db_argument.uid, MarkedArgument.
author_uid == author_uid).first()
marked_element = db_marked is not None
you_have_the_opinion_that = _t.get(_.youHaveTheOpinionThat).format(''
).strip()
if lang == 'de':
ret_value = __build_single_argument_for_de(_t, sb, se,
you_have_the_opinion_that, start_with_intro, anonymous_style,
rearrange_intro, db_argument, attack_type, sb_none,
marked_element, lang, premises_text, conclusion_text,
is_users_opinion, support_counter_argument)
else:
ret_value = __build_single_argument_for_en(_t, sb, se,
you_have_the_opinion_that, marked_element, conclusion_text,
premises_text, db_argument)
return ret_value.replace(' ', ' ')
def __get_tags_for_building_single_argument(with_html_tag, attack_type,
colored_position, premises, conclusion):
sb_none = start_tag if with_html_tag else ''
se = end_tag if with_html_tag else ''
if attack_type not in ['dont_know', 'jump']:
sb = start_tag if with_html_tag else ''
if colored_position:
sb = start_position if with_html_tag else ''
if attack_type == Relations.UNDERMINE:
premises = sb + premises + se
else:
conclusion = sb + conclusion + se
else:
sb = start_argument if with_html_tag else ''
sb_tmp = start_attack if with_html_tag else ''
premises = sb + premises + se
conclusion = sb_tmp + conclusion + se
return premises, conclusion, sb, sb_none, se
def __build_single_argument_for_de(_t, sb, se, you_have_the_opinion_that,
start_with_intro, anonymous_style, rearrange_intro, db_argument,
attack_type, sb_none, marked_element, lang, premises, conclusion,
is_users_opinion, support_counter_argument):
if start_with_intro and not anonymous_style:
intro = _t.get(_.itIsTrueThat
) if db_argument.is_supportive else _t.get(_.itIsFalseThat)
if rearrange_intro:
intro = _t.get(_.itTrueIsThat
) if db_argument.is_supportive else _t.get(_.itFalseIsThat)
ret_value = (sb_none if attack_type in ['dont_know'] else sb
) + intro + se + ' '
elif is_users_opinion and not anonymous_style:
ret_value = sb_none
if support_counter_argument:
ret_value += _t.get(_.youAgreeWithThecounterargument)
elif marked_element:
ret_value += you_have_the_opinion_that
else:
ret_value += _t.get(_.youArgue)
ret_value += se + ' '
else:
tmp = _t.get(_.itIsTrueThatAnonymous if db_argument.is_supportive else
_.itIsFalseThatAnonymous)
ret_value = sb_none + sb + tmp + se + ' '
ret_value += ' {}{}{} '.format(sb, _t.get(_.itIsNotRight), se
) if not db_argument.is_supportive else ''
ret_value += conclusion
ret_value += ', ' if lang == 'de' else ' '
ret_value += sb_none + _t.get(_.because).lower() + se + ' ' + premises
return ret_value
<|reserved_special_token_0|>
def __build_nested_argument(arg_array: List[Argument], first_arg_by_user,
user_changed_opinion, with_html_tag, start_with_intro,
minimize_on_undercut, anonymous_style, premisegroup_by_user, _t):
"""
:param arg_array:
:param first_arg_by_user:
:param user_changed_opinion:
:param with_html_tag:
:param start_with_intro:
:param minimize_on_undercut:
:param anonymous_style:
:param premisegroup_by_user:
:param _t:
:return:
"""
pgroups = []
supportive = []
arg_array = arg_array[::-1]
local_lang = arg_array[0].lang
for db_argument in arg_array:
text = db_argument.get_premisegroup_text()
pgroups.append(text)
supportive.append(db_argument.is_supportive)
conclusion = arg_array[0].get_conclusion_text()
sb = start_position if with_html_tag else ''
se = end_tag if with_html_tag else ''
because = (', ' if local_lang == 'de' else ' ') + _t.get(_.because).lower(
) + ' '
if len(arg_array
) % 2 is 0 and not first_arg_by_user and not anonymous_style:
ret_value = _t.get(_.earlierYouArguedThat if user_changed_opinion else
_.otherUsersSaidThat) + ' '
tmp_users_opinion = True
elif not anonymous_style:
ret_value = _t.get(_.soYourOpinionIsThat
) + ': ' if start_with_intro else ''
tmp_users_opinion = False
conclusion = se + conclusion[0:1].upper() + conclusion[1:]
else:
ret_value = _t.get(_.someoneArgued) + ' '
tmp_users_opinion = False
tmp = _t.get(_.itFalseIsThat) + ' ' if not supportive[0] else ''
ret_value += tmp + conclusion + because + pgroups[0] + '.'
del pgroups[0]
if minimize_on_undercut and not user_changed_opinion and len(pgroups) > 2:
return _t.get(_.butYouCounteredWith).strip() + ' ' + sb + pgroups[
len(pgroups) - 1] + se + '.'
for i, pgroup in enumerate(pgroups):
ret_value += ' '
if tmp_users_opinion and not anonymous_style:
tmp = (_.butYouCounteredWithArgument if premisegroup_by_user else
_.butYouCounteredWithInterest)
ret_value += _t.get(_.otherParticipantsConvincedYouThat if
user_changed_opinion else tmp)
elif not anonymous_style:
ret_value += _t.get(_.youAgreeWithThatNow)
else:
ret_value += _t.get(_.otherUsersSaidThat) if i == 0 else _t.get(_
.thenOtherUsersSaidThat)
ret_value += sb + ' ' + pgroups[i] + '.'
tmp_users_opinion = not tmp_users_opinion
return ret_value.replace(' ', ' ')
def get_text_for_premisegroup_uid(uid):
"""
Returns joined text of the premise group and the premise ids
:param uid: premisegroup_uid
:return: text, uids
"""
warnings.warn('Use PremiseGroup.get_text() instead.', DeprecationWarning)
db_premises = DBDiscussionSession.query(Premise).filter_by(premisegroup_uid
=uid).join(Statement).all()
if len(db_premises) == 0:
return ''
texts = [premise.get_text() for premise in db_premises]
lang = DBDiscussionSession.query(Statement).get(db_premises[0].
statements.uid).lang
_t = Translator(lang)
return ' {} '.format(_t.get(_.aand)).join(texts)
<|reserved_special_token_0|>
def get_text_for_premise(uid: int, colored_position: bool=False):
"""
Returns text of premise with given uid
:param uid: Statement.uid
:param colored_position: Boolean
:return: String
"""
db_premise = DBDiscussionSession.query(Premise).get(uid)
if db_premise:
return db_premise.get_text(html=colored_position)
else:
return None
def get_text_for_conclusion(argument, start_with_intro=False,
rearrange_intro=False, is_users_opinion=True):
"""
Check the arguments conclusion whether it is an statement or an argument and returns the text
:param argument: Argument
:param start_with_intro: Boolean
:param rearrange_intro: Boolean
:return: String
"""
if argument.argument_uid:
return get_text_for_argument_uid(argument.argument_uid,
start_with_intro, rearrange_intro=rearrange_intro,
is_users_opinion=is_users_opinion)
else:
return argument.get_conclusion_text()
def resolve_issue_uid_to_slug(uid):
"""
Given the issue uid query database and return the correct slug of the issue.
:param uid: issue_uid
:type uid: int
:return: Slug of issue
:rtype: str
"""
issue = DBDiscussionSession.query(Issue).get(uid)
return issue.slug if issue else None
def get_all_attacking_arg_uids_from_history(history):
"""
Returns all arguments of the history, which attacked the user
:param history: String
:return: [Arguments.uid]
:rtype: list
"""
try:
splitted_history = history.split('-')
uids = []
for part in splitted_history:
if 'reaction' in part:
parts = part.split('/')
pos = parts.index('reaction')
uids.append(part.split('/')[pos + 3])
return uids
except AttributeError:
return []
def get_user_by_private_or_public_nickname(nickname):
"""
Gets the user by his (public) nickname, based on the option, whether his nickname is public or not
:param nickname: Nickname of the user
:return: Current user or None
"""
db_user = get_user_by_case_insensitive_nickname(nickname)
db_public_user = get_user_by_case_insensitive_public_nickname(nickname)
uid = 0
if db_user:
uid = db_user.uid
elif db_public_user:
uid = db_public_user.uid
db_settings = DBDiscussionSession.query(Settings).filter_by(author_uid=uid
).first()
if not db_settings:
return None
if db_settings.should_show_public_nickname and db_user:
return db_user
elif not db_settings.should_show_public_nickname and db_public_user:
return db_public_user
return None
def get_user_by_case_insensitive_nickname(nickname):
"""
Returns user with given nickname
:param nickname: String
:return: User or None
"""
return DBDiscussionSession.query(User).filter(func.lower(User.nickname) ==
func.lower(nickname)).first()
def get_user_by_case_insensitive_public_nickname(public_nickname):
"""
Returns user with given public nickname
:param public_nickname: String
:return: User or None
"""
return DBDiscussionSession.query(User).filter(func.lower(User.
public_nickname) == func.lower(public_nickname)).first()
def pretty_print_options(message):
"""
Some modifications for pretty printing.
Use uppercase for first letter in text and a single dot for the end if there isn't one already.
:param message: String
:return: String
"""
if message[0:1] == '<':
pos = message.index('>')
message = message[0:pos + 1] + message[pos + 1:pos + 2].upper(
) + message[pos + 2:]
else:
message = message[0:1].upper() + message[1:]
if message[-1] == '>':
pos = message.rfind('<')
if message[pos - 1:pos] not in ['.', '?', '!']:
message = message[0:pos] + '.' + message[pos:]
elif not message.endswith(tuple(['.', '?', '!'])) and id is not 'now':
message += '.'
return message
def create_speechbubble_dict(bubble_type: BubbleTypes, is_markable: bool=
False, is_author: bool=False, uid: str='', bubble_url: str='', content:
str='', omit_bubble_url: bool=False, omit_vote_info: bool=False,
argument_uid: int=None, statement_uid: int=None, is_supportive: bool=
False, nickname: str='anonymous', lang: str='en', is_users_opinion:
bool=False, other_author: User=None):
"""
Creates an dictionary which includes every information needed for a bubble.
:param bubble_type: BubbleTypes
:param is_markable: True if the content itself could be flagged
:param is_author: True if the current user is author of the content
:param uid: Identifier for the bubble
:param bubble_url: URL for the click event of the bubble
:param content: Text of the bubble
:param omit_bubble_url: True if the bubble should have a link
:param omit_vote_info: True if the bubble have the little, grey information text
:param argument_uid: Argument.uid
:param statement_uid: Statement.uid
:param is_supportive: Boolean
:param nickname: String
:param omit_bubble_url: Boolean
:param lang: is_users_opinion
:param is_users_opinion: Boolean
:return: dict()
"""
gravatar_link = get_global_url() + '/static/images/icon.png'
profile = None
if uid is not 'now':
content = pretty_print_options(content)
if bubble_type is BubbleTypes.SYSTEM and other_author is not None:
gravatar_link = get_profile_picture(other_author, 25)
profile = '/user/{}'.format(other_author.uid),
if bubble_type is BubbleTypes.USER and nickname != 'anonymous':
db_user = DBDiscussionSession.query(User).filter_by(nickname=nickname
).first()
db_marked = None
gravatar_link = get_profile_picture(db_user, 25)
if argument_uid is not None and db_user is not None:
db_marked = DBDiscussionSession.query(MarkedArgument).filter(
MarkedArgument.argument_uid == argument_uid, MarkedArgument
.author_uid == db_user.uid).first()
if statement_uid is not None and db_user is not None:
db_marked = DBDiscussionSession.query(MarkedStatement).filter(
MarkedStatement.statement_uid == statement_uid,
MarkedStatement.author_uid == db_user.uid).first()
is_users_opinion = db_marked is not None
speech = {'is_user': bubble_type is BubbleTypes.USER, 'is_system':
bubble_type is BubbleTypes.SYSTEM, 'is_status': bubble_type is
BubbleTypes.STATUS, 'is_info': bubble_type is BubbleTypes.INFO,
'is_markable': is_markable, 'is_author': is_author, 'id': uid if
len(str(uid)) > 0 else uuid4().hex, 'bubble_url': bubble_url,
'message': content, 'omit_bubble_url': omit_bubble_url,
'omit_vote_info': omit_vote_info, 'data_type': 'argument' if
argument_uid else 'statement' if statement_uid else 'None',
'data_argument_uid': argument_uid, 'data_statement_uid':
statement_uid, 'data_is_supportive': is_supportive,
'is_users_opinion': is_users_opinion, 'enemy': {'avatar':
gravatar_link, 'profile': profile, 'available': profile is not None}}
votecount_keys = __get_text_for_click_and_mark_count(nickname,
bubble_type is BubbleTypes.USER, argument_uid, statement_uid,
speech, lang)
speech['votecounts_message'] = votecount_keys[speech['votecounts']]
return speech
def __get_text_for_click_and_mark_count(nickname, is_user, argument_uid,
statement_uid, speech, lang):
"""
Build text for a bubble, how many other participants have the same interest?
:param nickname: User.nickname
:param is_user: boolean
:param argument_uid: Argument.uid
:param statement_uid: Statement.uid
:param speech: dict()
:param lang: ui_locales
:return: [String]
"""
if not nickname:
nickname = 'anonymous'
db_user = DBDiscussionSession.query(User).filter_by(nickname=nickname
).first()
if not db_user:
db_user = DBDiscussionSession.query(User).filter_by(nickname=
'anonymous').first()
db_clicks, db_marks = __get_clicks_and_marks(argument_uid,
statement_uid, db_user)
_t = Translator(lang)
speech['votecounts'] = len(db_clicks) if db_clicks else 0
if db_marks:
speech['votecounts'] += len(db_marks)
votecount_keys = defaultdict(lambda : '{} {}.'.format(speech[
'votecounts'], _t.get(_.voteCountTextMore)))
if is_user and db_user.gender == 'm':
gender_key = _.voteCountTextFirstM
elif is_user and db_user.gender == 'f':
gender_key = _.voteCountTextFirstF
else:
gender_key = _.voteCountTextFirst
votecount_keys[0] = '{}.'.format(_t.get(gender_key))
votecount_keys[1] = _t.get(_.voteCountTextOneOther) + '.'
return votecount_keys
def __get_clicks_and_marks(argument_uid, statement_uid, db_user):
db_clicks = None
db_marks = None
if argument_uid:
db_clicks = DBDiscussionSession.query(ClickedArgument).filter(
ClickedArgument.argument_uid == argument_uid, ClickedArgument.
is_up_vote == True, ClickedArgument.is_valid, ClickedArgument.
author_uid != db_user.uid).all()
db_marks = DBDiscussionSession.query(MarkedArgument).filter(
MarkedArgument.argument_uid == argument_uid, MarkedArgument.
author_uid != db_user.uid).all()
elif statement_uid:
db_clicks = DBDiscussionSession.query(ClickedStatement).filter(
ClickedStatement.statement_uid == statement_uid,
ClickedStatement.is_up_vote == True, ClickedStatement.is_valid,
ClickedStatement.author_uid != db_user.uid).all()
db_marks = DBDiscussionSession.query(MarkedStatement).filter(
MarkedStatement.statement_uid == statement_uid, MarkedStatement
.author_uid != db_user.uid).all()
return db_clicks, db_marks
def is_argument_disabled_due_to_disabled_statements(argument):
"""
Returns true if any involved statement is disabled.
:param argument: Argument
:return: Boolean
"""
if argument.conclusion_uid is None:
db_argument = DBDiscussionSession.query(Argument).get(argument.
argument_uid)
conclusion = DBDiscussionSession(Statement).get(db_argument.
conclusion_uid)
if conclusion.is_disabled:
return True
premises = __get_all_premises_of_argument(db_argument)
for premise in premises:
if premise.statements.is_disabled:
return True
else:
print(argument.conclusion_uid)
conclusion = DBDiscussionSession.query(Statement).get(argument.
conclusion_uid)
if conclusion.is_disabled:
return True
premises = __get_all_premises_of_argument(argument)
for premise in premises:
if premise.statements.is_disabled:
return True
return False
def is_author_of_statement(db_user: User, statement_uid: int) ->bool:
"""
Is the user with given nickname author of the statement?
:param db_user: User
:param statement_uid: Statement.uid
:return: Boolean
"""
db_user = (db_user if db_user and db_user.nickname !=
nick_of_anonymous_user else None)
if not db_user:
return False
db_textversion = DBDiscussionSession.query(TextVersion).filter_by(
statement_uid=statement_uid).order_by(TextVersion.uid.asc()).first()
if not db_textversion:
return False
return db_textversion.author_uid == db_user.uid
def is_author_of_argument(db_user: User, argument_uid: int) ->bool:
"""
Is the user with given nickname author of the argument?
:param db_user: User
:param argument_uid: Argument.uid
:return: Boolean
"""
db_user = (db_user if db_user and db_user.nickname !=
nick_of_anonymous_user else None)
if not db_user:
return False
db_argument = DBDiscussionSession.query(Argument).filter(Argument.uid ==
argument_uid, Argument.author_uid == db_user.uid).first()
return True if db_argument else False
def __get_all_premises_of_argument(argument):
"""
Returns list with all premises of the argument.
:param argument: Argument
:return: list()
"""
ret_list = []
db_premises = DBDiscussionSession.query(Premise).filter_by(premisegroup_uid
=argument.premisegroup_uid).join(Statement).all()
for premise in db_premises:
ret_list.append(premise)
return ret_list
def get_profile_picture(user: User, size: int=80, ignore_privacy_settings:
bool=False):
"""
Returns the url to a https://secure.gravatar.com picture, with the option wavatar and size of 80px
:param user: User
:param size: Integer, default 80
:param ignore_privacy_settings:
:return: String
"""
additional_id = ''
if user and isinstance(user, User):
additional_id = ('' if user.settings.should_show_public_nickname or
ignore_privacy_settings else 'x')
return __get_gravatar(user, additional_id, size)
def get_public_profile_picture(user: User, size: int=80):
"""
Returns the url to a https://secure.gravatar.com picture, with the option wavatar and size of 80px
If the user doesn't want an public profile, an anonymous image will be returned
:param user: User
:param size: Integer, default 80
:return: String
"""
additional_id = ''
if user.settings.should_show_public_nickname:
additional_id = 'x'
if len(str(user.oauth_provider)) > 0:
additional_id = '{}{}'.format(user.oauth_provider, user.
oauth_provider_id)
return __get_gravatar(user, additional_id, size)
def __get_gravatar(user, additional_id, size):
if user:
if str(user.email) == 'None':
email = (user.nickname + additional_id).encode('utf-8')
else:
email = (user.email + additional_id).encode('utf-8')
else:
email = 'unknown'.encode('utf-8')
gravatar_url = 'https://secure.gravatar.com/avatar/{}?'.format(hashlib.
md5(email.lower()).hexdigest())
gravatar_url += parse.urlencode({'d': 'wavatar', 's': str(size)})
return gravatar_url
def get_author_data(uid, gravatar_on_right_side=True,
linked_with_users_page=True, profile_picture_size=20):
"""
Returns a-tag with gravatar of current author and users page as href
:param uid: Uid of the author
:param gravatar_on_right_side: True, if the gravatar is on the right of authors name
:param linked_with_users_page: True, if the text is a link to the authors site
:param profile_picture_size: Integer
:return: HTML-String
"""
db_user = DBDiscussionSession.query(User).get(int(uid))
if not db_user:
return None, 'Missing author with uid ' + str(uid), False
nick = db_user.global_nickname
img_src = get_profile_picture(db_user, profile_picture_size)
link_begin = ''
link_end = ''
if linked_with_users_page:
link_begin = '<a href="/user/{}" title="{}">'.format(db_user.uid, nick)
link_end = '</a>'
side = 'left' if gravatar_on_right_side else 'right'
img = '<img class="img-circle" src="{}" style="padding-{}: 0.3em">'.format(
img_src, side)
if gravatar_on_right_side:
return db_user, '{}{}{}{}'.format(link_begin, nick, img, link_end
), True
else:
return db_user, '{}{}{}{}'.format(link_begin, img, nick, link_end
), True
def bubbles_already_last_in_list(bubble_list, bubbles):
"""
Are the given bubbles already at the end of the bubble list
:param bubble_list: list of Bubbles
:param bubbles: list of bubbles
:return: Boolean
"""
if isinstance(bubbles, list):
length = len(bubbles)
else:
length = 1
bubbles = [bubbles]
if len(bubble_list) < length:
return False
for bubble in bubbles:
if 'message' not in bubble:
return False
start_index = -length
is_already_in = False
for bubble in bubbles:
last = bubble_list[start_index]
if 'message' not in last or 'message' not in bubble:
return False
text1 = unhtmlify(last['message'].lower()).strip()
text2 = unhtmlify(bubble['message'].lower()).strip()
is_already_in = is_already_in or text1 == text2
start_index += 1
return is_already_in
def unhtmlify(html):
"""
Remove html-tags and unescape encoded html-entities.
:param html: Evil-string containing html
:return:
"""
return unescape(re.sub('<.*?>', '', html))
<|reserved_special_token_1|>
"""
Common, pure functions used by the D-BAS.
.. codeauthor:: Tobias Krauthoff <krauthoff@cs.uni-duesseldorf.de
"""
import hashlib
import locale
import os
import re
import warnings
from collections import defaultdict
from datetime import datetime
from enum import Enum, auto
from html import escape, unescape
from typing import List
from urllib import parse
from uuid import uuid4
from sqlalchemy import func
from dbas.database import DBDiscussionSession
from dbas.database.discussion_model import Argument, Premise, Statement, TextVersion, Issue, User, Settings, \
ClickedArgument, ClickedStatement, MarkedArgument, MarkedStatement, PremiseGroup
from dbas.logger import logger
from dbas.strings.keywords import Keywords as _
from dbas.strings.translator import Translator
nick_of_anonymous_user = 'anonymous'
fallback_lang = 'en'
tag_type = 'span'
start_attack = '<{} data-argumentation-type="attack">'.format(tag_type)
start_argument = '<{} data-argumentation-type="argument">'.format(tag_type)
start_position = '<{} data-argumentation-type="position">'.format(tag_type)
start_content = '<{} class="triangle-content-text">'.format(tag_type)
start_pro = '<{} data-attitude="pro">'.format(tag_type)
start_con = '<{} data-attitude="con">'.format(tag_type)
start_tag = '<{}>'.format(tag_type)
end_tag = '</{}>'.format(tag_type)
class BubbleTypes(Enum):
USER = auto()
SYSTEM = auto()
STATUS = auto()
INFO = auto()
def __str__(self):
return str(self.value)
class Relations(Enum):
UNDERMINE = 'undermine'
UNDERCUT = 'undercut'
REBUT = 'rebut'
SUPPORT = 'support'
def __str__(self):
return str(self.value)
class Attitudes(Enum):
AGREE = 'agree'
DISAGREE = 'disagree'
DONT_KNOW = 'dontknow'
def __str__(self):
return str(self.value)
relation_mapper = {relation.value: relation for relation in Relations}
attitude_mapper = {attitude.value: attitude for attitude in Attitudes}
def get_global_url():
"""
Returns the global url of the project, based on the ENV
:return: String
"""
return os.environ.get('URL', '')
def get_changelog(no):
"""
Returns the 'no' last entries from the changelog
:param no: int
:return: list
"""
path = str(os.path.realpath(__file__ + '/../../CHANGELOG.md'))
lines = [line.rstrip('\n').strip() for line in open(path) if len(line.rstrip('\n').strip()) > 0]
changelog = []
title = ''
body = []
for l in lines:
if l.startswith('#'):
if len(title) > 0:
changelog.append({'title': title, 'body': body})
body = []
title = l.replace('### ', '')
else:
body.append(l.replace('- ', ''))
return changelog[0:no]
def is_development_mode(registry):
"""
Returns true, if mode is set to development in current ini file.
:param registry: request.registry
:return: Boolean
"""
if 'mode' in registry.settings:
return registry.settings['mode'].lower() == 'development'
return False
def usage_of_modern_bubbles(registry):
"""
Returns true, if modern bubbles are set in the current ini file.
:param registry: request.registry
:return: Boolean
"""
if 'modern_bubbles' in registry.settings:
return registry.settings['modern_bubbles'].lower() == 'true'
return False
def usage_of_matomo(registry):
"""
Returns true, if matomo is set in the current ini file.
:param registry: request.registry
:return: Boolean
"""
if 'mode' in registry.settings:
return registry.settings['usage_of_matomo'].lower() == 'true'
return False
def escape_string(text):
"""
Escapes all html special chars.
:param text: string
:return: html.escape(text)
"""
return escape(text)
def get_discussion_language(matchdict, params, session, current_issue_uid=None):
"""
Returns Language.ui_locales
CALL AFTER issue_handler.get_id_of_slug(..)!
:param matchdict: matchdict of the current request
:param params: params of the current request
:param session: session of the current request
:param current_issue_uid: uid
:return:
"""
if not current_issue_uid:
current_issue = DBDiscussionSession.query(Issue).filter(Issue.is_disabled == False,
Issue.is_private == False).first()
current_issue_uid = current_issue.uid if current_issue else None
# first matchdict, then params, then session, afterwards fallback
issue = matchdict['issue'] if 'issue' in matchdict \
else params['issue'] if 'issue' in params \
else session['issue'] if 'issue' in session \
else current_issue_uid
db_issue = DBDiscussionSession.query(Issue).get(issue)
return db_issue.lang if db_issue else 'en'
def python_datetime_pretty_print(ts, lang):
"""
Pretty print of a locale
:param ts: Timestamp
:param lang: ui_locales
:return: String
"""
formatter = '%b. %d.'
if lang == 'de':
try:
locale.setlocale(locale.LC_TIME, 'de_DE.UTF-8')
formatter = '%d. %b.'
except locale.Error:
locale.setlocale(locale.LC_TIME, 'en_US.UTF8')
return datetime.strptime(str(ts), '%Y-%m-%d').strftime(formatter)
def get_all_arguments_by_statement(statement_uid, include_disabled=False):
"""
Returns a list of all arguments where the statement is a conclusion or member of the premisegroup
:param statement_uid: Statement.uid
:param include_disabled: Boolean
:return: [Arguments]
"""
logger('DBAS.LIB', 'main {}, include_disabled {}'.format(statement_uid, include_disabled))
db_arguments = __get_arguments_of_conclusion(statement_uid, include_disabled)
arg_array = [arg for arg in db_arguments] if db_arguments else []
premises = DBDiscussionSession.query(Premise).filter_by(statement_uid=statement_uid)
if not include_disabled:
premises = premises.filter_by(is_disabled=False)
premises = premises.all()
for premise in premises:
arg_array += __get_argument_of_premisegroup(premise.premisegroup_uid, include_disabled)
db_undercuts = []
for arg in arg_array:
db_undercuts += __get_undercuts_of_argument(arg.uid, include_disabled)
db_undercutted_undercuts = []
for arg in db_undercuts:
db_undercutted_undercuts += __get_undercuts_of_argument(arg.uid, include_disabled)
arg_array = list(set(arg_array + db_undercuts + db_undercutted_undercuts))
logger('DBAS.LIB', 'returning arguments {}'.format([arg.uid for arg in arg_array]))
return arg_array if len(arg_array) > 0 else None
def __get_argument_of_premisegroup(premisegroup_uid, include_disabled):
"""
Returns all arguments with the given premisegroup
:param premisegroup_uid: PremisgGroup.uid
:param include_disabled: Boolean
:return: list of Arguments
"""
db_arguments = DBDiscussionSession.query(Argument).filter_by(premisegroup_uid=premisegroup_uid)
if not include_disabled:
db_arguments = db_arguments.filter_by(is_disabled=False)
return db_arguments.all() if db_arguments else []
def __get_undercuts_of_argument(argument_uid, include_disabled):
"""
Returns all undercuts fo the given argument
:param argument_uid: Argument.uid
:param include_disabled: boolean
:return: list of Arguments
"""
db_undercuts = DBDiscussionSession.query(Argument).filter_by(argument_uid=argument_uid)
if not include_disabled:
db_undercuts = db_undercuts.filter_by(is_disabled=False)
return db_undercuts.all() if db_undercuts else []
def __get_arguments_of_conclusion(statement_uid, include_disabled):
"""
Returns all arguments, where the statement is set as conclusion
:param statement_uid: Statement.uid
:param include_disabled: Boolean
:return: list of arguments
"""
db_arguments = DBDiscussionSession.query(Argument).filter_by(conclusion_uid=statement_uid)
if not include_disabled:
db_arguments = db_arguments.filter_by(is_disabled=False)
return db_arguments.all() if db_arguments else []
def get_all_arguments_with_text_by_statement_id(statement_uid):
"""
Given a statement_uid, it returns all arguments, which use this statement and adds
the corresponding text to it, which normally appears in the bubbles. The resulting
text depends on the provided language.
:param statement_uid: uid to a statement, which should be analyzed
:return: list of dictionaries containing some properties of these arguments
:rtype: list
"""
logger('DBAS.LIB', 'main ' + str(statement_uid))
arguments = get_all_arguments_by_statement(statement_uid)
results = []
if arguments:
results = [{'uid': arg.uid, 'text': get_text_for_argument_uid(arg.uid)} for arg in arguments]
return results
def get_all_arguments_with_text_and_url_by_statement_id(db_statement, urlmanager, color_statement=False,
is_jump=False):
"""
Given a statement_uid, it returns all arguments, which use this statement and adds
the corresponding text to it, which normally appears in the bubbles. The resulting
text depends on the provided language.
:param db_statement: Statement
:param urlmanager:
:param color_statement: True, if the statement (specified by the ID) should be colored
:return: list of dictionaries containing some properties of these arguments
:rtype: list
"""
logger('DBAS.LIB', 'main ' + str(db_statement.uid))
arguments = get_all_arguments_by_statement(db_statement.uid)
uids = [arg.uid for arg in arguments] if arguments else None
results = list()
sb = '<{} data-argumentation-type="position">'.format(tag_type) if color_statement else ''
se = '</{}>'.format(tag_type) if color_statement else ''
if not uids:
return []
uids.sort()
for uid in uids:
statement_text = db_statement.get_text()
attack_type = 'jump' if is_jump else ''
argument_text = get_text_for_argument_uid(uid, anonymous_style=True, attack_type=attack_type)
pos = argument_text.lower().find(statement_text.lower())
argument_text = argument_text[:pos] + sb + argument_text[pos:]
pos += len(statement_text) + len(sb)
argument_text = argument_text[:pos] + se + argument_text[pos:]
results.append({
'uid': uid,
'text': argument_text,
'url': urlmanager.get_url_for_jump(uid)
})
return results
def get_slug_by_statement_uid(uid):
"""
Returns slug for the given Issue.uid
:param uid: Issue.uid
:return: String
"""
db_statement = DBDiscussionSession.query(Statement).get(uid)
return resolve_issue_uid_to_slug(db_statement.issue_uid)
def get_text_for_argument_uid(uid, nickname=None, with_html_tag=False, start_with_intro=False, first_arg_by_user=False,
user_changed_opinion=False, rearrange_intro=False, colored_position=False,
attack_type=None, minimize_on_undercut=False, is_users_opinion=True,
anonymous_style=False, support_counter_argument=False):
"""
Returns current argument as string like "conclusion, because premise1 and premise2"
:param uid: Integer
:param with_html_tag: Boolean
:param start_with_intro: Boolean
:param first_arg_by_user: Boolean
:param user_changed_opinion: Boolean
:param rearrange_intro: Boolean
:param colored_position: Boolean
:param attack_type: String
:param minimize_on_undercut: Boolean
:param anonymous_style: Boolean
:param support_counter_argument: Boolean
:return: String
"""
logger('DBAS.LIB', 'main {}'.format(uid))
db_argument = DBDiscussionSession.query(Argument).get(uid)
if not db_argument:
return None
lang = db_argument.lang
_t = Translator(lang)
premisegroup_by_user = False
author_uid = None
db_user = DBDiscussionSession.query(User).filter_by(nickname=str(nickname)).first()
if db_user:
author_uid = db_user.uid
pgroup = DBDiscussionSession.query(PremiseGroup).get(db_argument.premisegroup_uid)
marked_argument = DBDiscussionSession.query(MarkedArgument).filter_by(
argument_uid=uid,
author_uid=db_user.uid).first()
premisegroup_by_user = pgroup.author_uid == db_user.uid or marked_argument is not None
# getting all argument id
arg_array = [db_argument]
while db_argument.argument_uid:
db_argument = DBDiscussionSession.query(Argument).get(db_argument.argument_uid)
arg_array.append(db_argument)
if attack_type == 'jump':
return __build_argument_for_jump(arg_array, with_html_tag)
if len(arg_array) == 1:
# build one argument only
return __build_single_argument(arg_array[0], rearrange_intro, with_html_tag, colored_position, attack_type, _t,
start_with_intro, is_users_opinion, anonymous_style, support_counter_argument,
author_uid)
else:
# get all pgroups and at last, the conclusion
return __build_nested_argument(arg_array, first_arg_by_user, user_changed_opinion, with_html_tag,
start_with_intro, minimize_on_undercut, anonymous_style, premisegroup_by_user,
_t)
def __build_argument_for_jump(arg_array: List[Argument], with_html_tag):
"""
Build tet for an argument, if we jump to this argument
:param arg_array: [Argument]
:param with_html_tag: Boolean
:return: String
"""
tag_premise = ('<' + tag_type + ' data-argumentation-type="attack">') if with_html_tag else ''
tag_conclusion = ('<' + tag_type + ' data-argumentation-type="argument">') if with_html_tag else ''
tag_end = ('</' + tag_type + '>') if with_html_tag else ''
lang = arg_array[0].lang
_t = Translator(lang)
if len(arg_array) == 1:
ret_value = __build_val_for_jump(arg_array[0], tag_premise, tag_conclusion, tag_end, _t)
elif len(arg_array) == 2:
ret_value = __build_val_for_undercut(arg_array, tag_premise, tag_conclusion, tag_end, _t)
else:
ret_value = __build_val_for_undercutted_undercut(arg_array, tag_premise, tag_conclusion, tag_end, _t)
return ret_value.replace(' ', ' ')
def __build_val_for_jump(db_argument, tag_premise, tag_conclusion, tag_end, _t):
premises = db_argument.get_premisegroup_text()
if premises[-1] != '.':
premises += '.'
conclusion = db_argument.get_conclusion_text()
because = _t.get(_.because).lower()
conclusion = tag_conclusion + conclusion + tag_end
premises = tag_premise + premises + tag_end
intro = (start_con + _t.get(_.isNotRight).lower() + end_tag) if not db_argument.is_supportive else ''
ret_value = '{} {} {} {}'.format(conclusion, intro, because, premises)
if _t.get_lang() == 'de':
intro = _t.get(_.itIsTrueThatAnonymous) if db_argument.is_supportive else _t.get(_.itIsFalseThatAnonymous)
intro = intro[0:1].upper() + intro[1:]
intro = (start_pro if db_argument.is_supportive else start_con) + intro + end_tag
ret_value = '{} {}, {} {}'.format(intro, conclusion, because, premises)
return ret_value
def __build_val_for_undercut(arg_array: List[Argument], tag_premise, tag_conclusion, tag_end, _t):
db_undercut = arg_array[0]
db_conclusion_argument = arg_array[1]
premise = db_undercut.get_premisegroup_text()
conclusion_premise = db_conclusion_argument.get_premisegroup_text()
conclusion_conclusion = db_conclusion_argument.get_conclusion_text()
premise = tag_premise + premise + tag_end
conclusion_premise = tag_conclusion + conclusion_premise + tag_end
conclusion_conclusion = tag_conclusion + conclusion_conclusion + tag_end
intro = (_t.get(_.statementAbout) + ' ') if _t.get_lang() == 'de' else ''
bind = start_con + _t.get(_.isNotAGoodReasonFor) + end_tag
because = _t.get(_.because)
ret_value = '{}{} {} {}. {} {}.'.format(intro, conclusion_premise, bind, conclusion_conclusion, because, premise)
return ret_value
def __build_val_for_undercutted_undercut(arg_array: List[Argument], tag_premise, tag_conclusion, tag_end, _t):
premise1 = arg_array[0].get_premisegroup_text()
premise2 = arg_array[1].get_premisegroup_text()
premise3 = arg_array[2].get_premisegroup_text()
conclusion = arg_array[2].get_conclusion_text()
bind = start_con + _t.get(_.isNotAGoodReasonAgainstArgument) + end_tag
because = _t.get(_.because)
seperator = ',' if _t.get_lang() == 'de' else ''
premise1 = tag_premise + premise1 + tag_end
premise2 = tag_conclusion + premise2 + tag_end
argument = '{}{} {} {}'.format(conclusion, seperator, because.lower(), premise3)
argument = tag_conclusion + argument + tag_end
# P2 ist kein guter Grund gegen das Argument, dass C weil P3. Weil P1
ret_value = '{} {} {}. {} {}'.format(premise2, bind, argument, because, premise1)
return ret_value
def __build_single_argument(db_argument: Argument, rearrange_intro: bool, with_html_tag: bool, colored_position: bool,
attack_type: str, _t: Translator, start_with_intro: bool, is_users_opinion: bool,
anonymous_style: bool, support_counter_argument: bool=False, author_uid=None):
"""
Build up argument text for a single argument
Please, do not touch this!
:param uid: Argument.uid
:param rearrange_intro: Boolean
:param with_html_tag: Boolean
:param colored_position: Boolean
:param attack_type: String
:param _t: Translator
:param start_with_intro: Boolean
:param is_users_opinion: Boolean
:param anonymous_style: Boolean
:param support_counter_argument: Boolean
:param author_uid: User.uid
:return: String
"""
premises_text = db_argument.get_premisegroup_text()
conclusion_text = db_argument.get_conclusion_text()
lang = db_argument.lang
if lang != 'de':
premises_text = premises_text[0:1].lower() + premises_text[1:] # pretty print
premises_text, conclusion_text, sb, sb_none, se = __get_tags_for_building_single_argument(with_html_tag,
attack_type,
colored_position,
premises_text,
conclusion_text)
marked_element = False
if author_uid:
db_marked = DBDiscussionSession.query(MarkedArgument).filter(MarkedArgument.argument_uid == db_argument.uid,
MarkedArgument.author_uid == author_uid).first()
marked_element = db_marked is not None
you_have_the_opinion_that = _t.get(_.youHaveTheOpinionThat).format('').strip()
if lang == 'de':
ret_value = __build_single_argument_for_de(_t, sb, se, you_have_the_opinion_that, start_with_intro,
anonymous_style, rearrange_intro, db_argument, attack_type, sb_none,
marked_element, lang, premises_text, conclusion_text,
is_users_opinion,
support_counter_argument)
else:
ret_value = __build_single_argument_for_en(_t, sb, se, you_have_the_opinion_that, marked_element,
conclusion_text,
premises_text, db_argument)
return ret_value.replace(' ', ' ')
def __get_tags_for_building_single_argument(with_html_tag, attack_type, colored_position, premises, conclusion):
sb_none = start_tag if with_html_tag else ''
se = end_tag if with_html_tag else ''
if attack_type not in ['dont_know', 'jump']:
sb = start_tag if with_html_tag else ''
if colored_position:
sb = start_position if with_html_tag else ''
if attack_type == Relations.UNDERMINE:
premises = sb + premises + se
else:
conclusion = sb + conclusion + se
else:
sb = start_argument if with_html_tag else ''
sb_tmp = start_attack if with_html_tag else ''
premises = sb + premises + se
conclusion = sb_tmp + conclusion + se
return premises, conclusion, sb, sb_none, se
def __build_single_argument_for_de(_t, sb, se, you_have_the_opinion_that, start_with_intro, anonymous_style,
rearrange_intro, db_argument, attack_type, sb_none, marked_element, lang,
premises, conclusion, is_users_opinion, support_counter_argument):
if start_with_intro and not anonymous_style:
intro = _t.get(_.itIsTrueThat) if db_argument.is_supportive else _t.get(_.itIsFalseThat)
if rearrange_intro:
intro = _t.get(_.itTrueIsThat) if db_argument.is_supportive else _t.get(_.itFalseIsThat)
ret_value = (sb_none if attack_type in ['dont_know'] else sb) + intro + se + ' '
elif is_users_opinion and not anonymous_style:
ret_value = sb_none
if support_counter_argument:
ret_value += _t.get(_.youAgreeWithThecounterargument)
elif marked_element:
ret_value += you_have_the_opinion_that
else:
ret_value += _t.get(_.youArgue)
ret_value += se + ' '
else:
tmp = _t.get(_.itIsTrueThatAnonymous if db_argument.is_supportive else _.itIsFalseThatAnonymous)
ret_value = sb_none + sb + tmp + se + ' '
ret_value += ' {}{}{} '.format(sb, _t.get(_.itIsNotRight), se) if not db_argument.is_supportive else ''
ret_value += conclusion
ret_value += ', ' if lang == 'de' else ' '
ret_value += sb_none + _t.get(_.because).lower() + se + ' ' + premises
return ret_value
def __build_single_argument_for_en(_t, sb, se, you_have_the_opinion_that, marked_element, conclusion, premises, db_arg):
tmp = sb + ' ' + _t.get(_.isNotRight).lower() + se + ', ' + _t.get(_.because).lower() + ' '
ret_value = (you_have_the_opinion_that + ' ' if marked_element else '') + conclusion + ' '
ret_value += _t.get(_.because).lower() if db_arg.is_supportive else tmp
ret_value += ' ' + premises
return ret_value
def __build_nested_argument(arg_array: List[Argument], first_arg_by_user, user_changed_opinion, with_html_tag,
start_with_intro, minimize_on_undercut, anonymous_style, premisegroup_by_user, _t):
"""
:param arg_array:
:param first_arg_by_user:
:param user_changed_opinion:
:param with_html_tag:
:param start_with_intro:
:param minimize_on_undercut:
:param anonymous_style:
:param premisegroup_by_user:
:param _t:
:return:
"""
# get all pgroups and at last, the conclusion
pgroups = []
supportive = []
arg_array = arg_array[::-1]
local_lang = arg_array[0].lang
# grepping all arguments in the chain
for db_argument in arg_array:
text = db_argument.get_premisegroup_text()
pgroups.append(text)
supportive.append(db_argument.is_supportive)
conclusion = arg_array[0].get_conclusion_text()
# html tags for framing
sb = start_position if with_html_tag else ''
se = end_tag if with_html_tag else ''
because = (', ' if local_lang == 'de' else ' ') + _t.get(_.because).lower() + ' '
if len(arg_array) % 2 is 0 and not first_arg_by_user and not anonymous_style: # system starts
ret_value = _t.get(_.earlierYouArguedThat if user_changed_opinion else _.otherUsersSaidThat) + ' '
tmp_users_opinion = True # user after system
elif not anonymous_style: # user starts
ret_value = (_t.get(_.soYourOpinionIsThat) + ': ') if start_with_intro else ''
tmp_users_opinion = False # system after user
conclusion = se + conclusion[0:1].upper() + conclusion[1:] # pretty print
else:
ret_value = _t.get(_.someoneArgued) + ' '
tmp_users_opinion = False
tmp = _t.get(_.itFalseIsThat) + ' ' if not supportive[0] else ''
ret_value += tmp + conclusion + because + pgroups[0] + '.'
del pgroups[0]
# just display the last premise group on undercuts, because the story is always saved in all bubbles
if minimize_on_undercut and not user_changed_opinion and len(pgroups) > 2:
return _t.get(_.butYouCounteredWith).strip() + ' ' + sb + pgroups[len(pgroups) - 1] + se + '.'
for i, pgroup in enumerate(pgroups):
ret_value += ' '
if tmp_users_opinion and not anonymous_style:
tmp = _.butYouCounteredWithArgument if premisegroup_by_user else _.butYouCounteredWithInterest
ret_value += _t.get(_.otherParticipantsConvincedYouThat if user_changed_opinion else tmp)
elif not anonymous_style:
ret_value += _t.get(_.youAgreeWithThatNow)
else:
ret_value += _t.get(_.otherUsersSaidThat) if i == 0 else _t.get(_.thenOtherUsersSaidThat)
ret_value += sb + ' ' + pgroups[i] + '.'
tmp_users_opinion = not tmp_users_opinion
return ret_value.replace(' ', ' ')
def get_text_for_premisegroup_uid(uid):
"""
Returns joined text of the premise group and the premise ids
:param uid: premisegroup_uid
:return: text, uids
"""
warnings.warn("Use PremiseGroup.get_text() instead.", DeprecationWarning)
db_premises = DBDiscussionSession.query(Premise).filter_by(premisegroup_uid=uid).join(Statement).all()
if len(db_premises) == 0:
return ''
texts = [premise.get_text() for premise in db_premises]
lang = DBDiscussionSession.query(Statement).get(db_premises[0].statements.uid).lang
_t = Translator(lang)
return ' {} '.format(_t.get(_.aand)).join(texts)
def get_text_for_statement_uid(uid: int, colored_position=False):
"""
Returns text of statement with given uid
:param uid: Statement.uid
:param colored_position: Boolean
:return: String
"""
warnings.warn("Use Statement.get_text() or Statement.get_html() instead.", DeprecationWarning)
if not isinstance(uid, int):
return None
db_statement = DBDiscussionSession.query(Statement).get(uid)
if not db_statement:
return None
db_textversion = DBDiscussionSession.query(TextVersion).order_by(TextVersion.uid.desc()).get(
db_statement.textversion_uid)
content = db_textversion.content
while content.endswith(('.', '?', '!')):
content = content[:-1]
sb, se = '', ''
if colored_position:
sb = '<{} data-argumentation-type="position">'.format(tag_type)
se = '</{}>'.format(tag_type)
return sb + content + se
def get_text_for_premise(uid: int, colored_position: bool = False):
"""
Returns text of premise with given uid
:param uid: Statement.uid
:param colored_position: Boolean
:return: String
"""
db_premise = DBDiscussionSession.query(Premise).get(uid)
if db_premise:
return db_premise.get_text(html=colored_position)
else:
return None
def get_text_for_conclusion(argument, start_with_intro=False, rearrange_intro=False, is_users_opinion=True):
"""
Check the arguments conclusion whether it is an statement or an argument and returns the text
:param argument: Argument
:param start_with_intro: Boolean
:param rearrange_intro: Boolean
:return: String
"""
if argument.argument_uid:
return get_text_for_argument_uid(argument.argument_uid, start_with_intro, rearrange_intro=rearrange_intro,
is_users_opinion=is_users_opinion)
else:
return argument.get_conclusion_text()
def resolve_issue_uid_to_slug(uid):
"""
Given the issue uid query database and return the correct slug of the issue.
:param uid: issue_uid
:type uid: int
:return: Slug of issue
:rtype: str
"""
issue = DBDiscussionSession.query(Issue).get(uid)
return issue.slug if issue else None
def get_all_attacking_arg_uids_from_history(history):
"""
Returns all arguments of the history, which attacked the user
:param history: String
:return: [Arguments.uid]
:rtype: list
"""
try:
splitted_history = history.split('-')
uids = []
for part in splitted_history:
if 'reaction' in part:
parts = part.split('/')
pos = parts.index('reaction')
uids.append(part.split('/')[pos + 3])
return uids
except AttributeError:
return []
def get_user_by_private_or_public_nickname(nickname):
"""
Gets the user by his (public) nickname, based on the option, whether his nickname is public or not
:param nickname: Nickname of the user
:return: Current user or None
"""
db_user = get_user_by_case_insensitive_nickname(nickname)
db_public_user = get_user_by_case_insensitive_public_nickname(nickname)
uid = 0
if db_user:
uid = db_user.uid
elif db_public_user:
uid = db_public_user.uid
db_settings = DBDiscussionSession.query(Settings).filter_by(author_uid=uid).first()
if not db_settings:
return None
if db_settings.should_show_public_nickname and db_user:
return db_user
elif not db_settings.should_show_public_nickname and db_public_user:
return db_public_user
return None
def get_user_by_case_insensitive_nickname(nickname):
"""
Returns user with given nickname
:param nickname: String
:return: User or None
"""
return DBDiscussionSession.query(User).filter(func.lower(User.nickname) == func.lower(nickname)).first()
def get_user_by_case_insensitive_public_nickname(public_nickname):
"""
Returns user with given public nickname
:param public_nickname: String
:return: User or None
"""
return DBDiscussionSession.query(User).filter(
func.lower(User.public_nickname) == func.lower(public_nickname)).first()
def pretty_print_options(message):
"""
Some modifications for pretty printing.
Use uppercase for first letter in text and a single dot for the end if there isn't one already.
:param message: String
:return: String
"""
# check for html
if message[0:1] == '<':
pos = message.index('>')
message = message[0:pos + 1] + message[pos + 1:pos + 2].upper() + message[pos + 2:]
else:
message = message[0:1].upper() + message[1:]
# check for html
if message[-1] == '>':
pos = message.rfind('<')
if message[pos - 1:pos] not in ['.', '?', '!']:
message = message[0:pos] + '.' + message[pos:]
elif not message.endswith(tuple(['.', '?', '!'])) and id is not 'now':
message += '.'
return message
def create_speechbubble_dict(bubble_type: BubbleTypes, is_markable: bool=False, is_author: bool=False, uid: str='',
bubble_url: str= '', content: str= '', omit_bubble_url: bool=False, omit_vote_info: bool=False,
argument_uid: int=None, statement_uid: int=None, is_supportive: bool=False,
nickname: str='anonymous', lang: str='en', is_users_opinion: bool=False, other_author: User=None):
"""
Creates an dictionary which includes every information needed for a bubble.
:param bubble_type: BubbleTypes
:param is_markable: True if the content itself could be flagged
:param is_author: True if the current user is author of the content
:param uid: Identifier for the bubble
:param bubble_url: URL for the click event of the bubble
:param content: Text of the bubble
:param omit_bubble_url: True if the bubble should have a link
:param omit_vote_info: True if the bubble have the little, grey information text
:param argument_uid: Argument.uid
:param statement_uid: Statement.uid
:param is_supportive: Boolean
:param nickname: String
:param omit_bubble_url: Boolean
:param lang: is_users_opinion
:param is_users_opinion: Boolean
:return: dict()
"""
gravatar_link = get_global_url() + '/static/images/icon.png'
profile = None
if uid is not 'now':
content = pretty_print_options(content)
if bubble_type is BubbleTypes.SYSTEM and other_author is not None:
gravatar_link = get_profile_picture(other_author, 25)
profile = '/user/{}'.format(other_author.uid),
# check for users opinion
if bubble_type is BubbleTypes.USER and nickname != 'anonymous':
db_user = DBDiscussionSession.query(User).filter_by(nickname=nickname).first()
db_marked = None
gravatar_link = get_profile_picture(db_user, 25)
if argument_uid is not None and db_user is not None:
db_marked = DBDiscussionSession.query(MarkedArgument).filter(
MarkedArgument.argument_uid == argument_uid,
MarkedArgument.author_uid == db_user.uid).first()
if statement_uid is not None and db_user is not None:
db_marked = DBDiscussionSession.query(MarkedStatement).filter(
MarkedStatement.statement_uid == statement_uid,
MarkedStatement.author_uid == db_user.uid).first()
is_users_opinion = db_marked is not None
speech = {
'is_user': bubble_type is BubbleTypes.USER,
'is_system': bubble_type is BubbleTypes.SYSTEM,
'is_status': bubble_type is BubbleTypes.STATUS,
'is_info': bubble_type is BubbleTypes.INFO,
'is_markable': is_markable,
'is_author': is_author,
'id': uid if len(str(uid)) > 0 else uuid4().hex,
'bubble_url': bubble_url,
'message': content,
'omit_bubble_url': omit_bubble_url,
'omit_vote_info': omit_vote_info,
'data_type': 'argument' if argument_uid else 'statement' if statement_uid else 'None',
'data_argument_uid': argument_uid,
'data_statement_uid': statement_uid,
'data_is_supportive': is_supportive,
'is_users_opinion': is_users_opinion,
'enemy': {
'avatar': gravatar_link,
'profile': profile,
'available': profile is not None
}
}
votecount_keys = __get_text_for_click_and_mark_count(nickname, bubble_type is BubbleTypes.USER, argument_uid,
statement_uid, speech, lang)
speech['votecounts_message'] = votecount_keys[speech['votecounts']]
return speech
def __get_text_for_click_and_mark_count(nickname, is_user, argument_uid, statement_uid, speech, lang):
"""
Build text for a bubble, how many other participants have the same interest?
:param nickname: User.nickname
:param is_user: boolean
:param argument_uid: Argument.uid
:param statement_uid: Statement.uid
:param speech: dict()
:param lang: ui_locales
:return: [String]
"""
if not nickname:
nickname = 'anonymous'
db_user = DBDiscussionSession.query(User).filter_by(nickname=nickname).first()
if not db_user:
db_user = DBDiscussionSession.query(User).filter_by(nickname='anonymous').first()
db_clicks, db_marks = __get_clicks_and_marks(argument_uid, statement_uid, db_user)
_t = Translator(lang)
speech['votecounts'] = len(db_clicks) if db_clicks else 0
if db_marks:
speech['votecounts'] += len(db_marks)
votecount_keys = defaultdict(lambda: "{} {}.".format(speech['votecounts'], _t.get(_.voteCountTextMore)))
if is_user and db_user.gender == 'm':
gender_key = _.voteCountTextFirstM
elif is_user and db_user.gender == 'f':
gender_key = _.voteCountTextFirstF
else:
gender_key = _.voteCountTextFirst
votecount_keys[0] = '{}.'.format(_t.get(gender_key))
votecount_keys[1] = _t.get(_.voteCountTextOneOther) + '.'
return votecount_keys
def __get_clicks_and_marks(argument_uid, statement_uid, db_user):
db_clicks = None
db_marks = None
if argument_uid:
db_clicks = DBDiscussionSession.query(ClickedArgument). \
filter(ClickedArgument.argument_uid == argument_uid,
ClickedArgument.is_up_vote == True,
ClickedArgument.is_valid,
ClickedArgument.author_uid != db_user.uid).all()
db_marks = DBDiscussionSession.query(MarkedArgument). \
filter(MarkedArgument.argument_uid == argument_uid,
MarkedArgument.author_uid != db_user.uid).all()
elif statement_uid:
db_clicks = DBDiscussionSession.query(ClickedStatement). \
filter(ClickedStatement.statement_uid == statement_uid,
ClickedStatement.is_up_vote == True,
ClickedStatement.is_valid,
ClickedStatement.author_uid != db_user.uid).all()
db_marks = DBDiscussionSession.query(MarkedStatement). \
filter(MarkedStatement.statement_uid == statement_uid,
MarkedStatement.author_uid != db_user.uid).all()
return db_clicks, db_marks
def is_argument_disabled_due_to_disabled_statements(argument):
"""
Returns true if any involved statement is disabled.
:param argument: Argument
:return: Boolean
"""
if argument.conclusion_uid is None:
# check conclusion of given arguments conclusion
db_argument = DBDiscussionSession.query(Argument).get(argument.argument_uid)
conclusion = DBDiscussionSession(Statement).get(db_argument.conclusion_uid)
if conclusion.is_disabled:
return True
# check premisegroup of given arguments conclusion
premises = __get_all_premises_of_argument(db_argument)
for premise in premises:
if premise.statements.is_disabled:
return True
else:
# check conclusion of given argument
print(argument.conclusion_uid)
conclusion = DBDiscussionSession.query(Statement).get(argument.conclusion_uid)
if conclusion.is_disabled:
return True
# check premisegroup of given argument
premises = __get_all_premises_of_argument(argument)
for premise in premises:
if premise.statements.is_disabled:
return True
return False
def is_author_of_statement(db_user: User, statement_uid: int) -> bool:
"""
Is the user with given nickname author of the statement?
:param db_user: User
:param statement_uid: Statement.uid
:return: Boolean
"""
db_user = db_user if db_user and db_user.nickname != nick_of_anonymous_user else None
if not db_user:
return False
db_textversion = DBDiscussionSession.query(TextVersion).filter_by(statement_uid=statement_uid).order_by(
TextVersion.uid.asc()).first() # TODO #432
if not db_textversion:
return False
return db_textversion.author_uid == db_user.uid
def is_author_of_argument(db_user: User, argument_uid: int) -> bool:
"""
Is the user with given nickname author of the argument?
:param db_user: User
:param argument_uid: Argument.uid
:return: Boolean
"""
db_user = db_user if db_user and db_user.nickname != nick_of_anonymous_user else None
if not db_user:
return False
db_argument = DBDiscussionSession.query(Argument).filter(Argument.uid == argument_uid,
Argument.author_uid == db_user.uid).first()
return True if db_argument else False
def __get_all_premises_of_argument(argument):
"""
Returns list with all premises of the argument.
:param argument: Argument
:return: list()
"""
ret_list = []
db_premises = DBDiscussionSession.query(Premise).filter_by(premisegroup_uid=argument.premisegroup_uid).join(
Statement).all()
for premise in db_premises:
ret_list.append(premise)
return ret_list
def get_profile_picture(user: User, size: int = 80, ignore_privacy_settings: bool = False):
"""
Returns the url to a https://secure.gravatar.com picture, with the option wavatar and size of 80px
:param user: User
:param size: Integer, default 80
:param ignore_privacy_settings:
:return: String
"""
additional_id = ''
if user and isinstance(user, User):
additional_id = '' if user.settings.should_show_public_nickname or ignore_privacy_settings else 'x'
return __get_gravatar(user, additional_id, size)
def get_public_profile_picture(user: User, size: int = 80):
"""
Returns the url to a https://secure.gravatar.com picture, with the option wavatar and size of 80px
If the user doesn't want an public profile, an anonymous image will be returned
:param user: User
:param size: Integer, default 80
:return: String
"""
additional_id = ''
if user.settings.should_show_public_nickname:
additional_id = 'x'
if len(str(user.oauth_provider)) > 0:
additional_id = '{}{}'.format(user.oauth_provider, user.oauth_provider_id)
return __get_gravatar(user, additional_id, size)
def __get_gravatar(user, additional_id, size):
if user:
if str(user.email) == 'None':
email = (user.nickname + additional_id).encode('utf-8')
else:
email = (user.email + additional_id).encode('utf-8')
else:
email = 'unknown'.encode('utf-8')
gravatar_url = 'https://secure.gravatar.com/avatar/{}?'.format(hashlib.md5(email.lower()).hexdigest())
gravatar_url += parse.urlencode({'d': 'wavatar', 's': str(size)})
return gravatar_url
def get_author_data(uid, gravatar_on_right_side=True, linked_with_users_page=True, profile_picture_size=20):
"""
Returns a-tag with gravatar of current author and users page as href
:param uid: Uid of the author
:param gravatar_on_right_side: True, if the gravatar is on the right of authors name
:param linked_with_users_page: True, if the text is a link to the authors site
:param profile_picture_size: Integer
:return: HTML-String
"""
db_user = DBDiscussionSession.query(User).get(int(uid))
if not db_user:
return None, 'Missing author with uid ' + str(uid), False
nick = db_user.global_nickname
img_src = get_profile_picture(db_user, profile_picture_size)
link_begin = ''
link_end = ''
if linked_with_users_page:
link_begin = '<a href="/user/{}" title="{}">'.format(db_user.uid, nick)
link_end = '</a>'
side = 'left' if gravatar_on_right_side else 'right'
img = '<img class="img-circle" src="{}" style="padding-{}: 0.3em">'.format(img_src, side)
if gravatar_on_right_side:
return db_user, '{}{}{}{}'.format(link_begin, nick, img, link_end), True
else:
return db_user, '{}{}{}{}'.format(link_begin, img, nick, link_end), True
def bubbles_already_last_in_list(bubble_list, bubbles):
"""
Are the given bubbles already at the end of the bubble list
:param bubble_list: list of Bubbles
:param bubbles: list of bubbles
:return: Boolean
"""
if isinstance(bubbles, list):
length = len(bubbles)
else:
length = 1
bubbles = [bubbles]
if len(bubble_list) < length:
return False
for bubble in bubbles:
if 'message' not in bubble:
return False
start_index = - length
is_already_in = False
for bubble in bubbles:
last = bubble_list[start_index]
if 'message' not in last or 'message' not in bubble:
return False
text1 = unhtmlify(last['message'].lower()).strip()
text2 = unhtmlify(bubble['message'].lower()).strip()
is_already_in = is_already_in or (text1 == text2)
start_index += 1
return is_already_in
def unhtmlify(html):
"""
Remove html-tags and unescape encoded html-entities.
:param html: Evil-string containing html
:return:
"""
return unescape(re.sub(r'<.*?>', '', html))
|
flexible
|
{
"blob_id": "10a9437453371bd7472e93af1026c778b7983cf8",
"index": 1137,
"step-1": "<mask token>\n\n\nclass BubbleTypes(Enum):\n USER = auto()\n SYSTEM = auto()\n STATUS = auto()\n INFO = auto()\n\n def __str__(self):\n return str(self.value)\n\n\nclass Relations(Enum):\n UNDERMINE = 'undermine'\n UNDERCUT = 'undercut'\n REBUT = 'rebut'\n SUPPORT = 'support'\n\n def __str__(self):\n return str(self.value)\n\n\nclass Attitudes(Enum):\n AGREE = 'agree'\n DISAGREE = 'disagree'\n DONT_KNOW = 'dontknow'\n\n def __str__(self):\n return str(self.value)\n\n\n<mask token>\n\n\ndef escape_string(text):\n \"\"\"\n Escapes all html special chars.\n\n :param text: string\n :return: html.escape(text)\n \"\"\"\n return escape(text)\n\n\ndef get_discussion_language(matchdict, params, session, current_issue_uid=None\n ):\n \"\"\"\n Returns Language.ui_locales\n CALL AFTER issue_handler.get_id_of_slug(..)!\n\n :param matchdict: matchdict of the current request\n :param params: params of the current request\n :param session: session of the current request\n :param current_issue_uid: uid\n :return:\n \"\"\"\n if not current_issue_uid:\n current_issue = DBDiscussionSession.query(Issue).filter(Issue.\n is_disabled == False, Issue.is_private == False).first()\n current_issue_uid = current_issue.uid if current_issue else None\n issue = matchdict['issue'] if 'issue' in matchdict else params['issue'\n ] if 'issue' in params else session['issue'\n ] if 'issue' in session else current_issue_uid\n db_issue = DBDiscussionSession.query(Issue).get(issue)\n return db_issue.lang if db_issue else 'en'\n\n\ndef python_datetime_pretty_print(ts, lang):\n \"\"\"\n Pretty print of a locale\n\n :param ts: Timestamp\n :param lang: ui_locales\n :return: String\n \"\"\"\n formatter = '%b. %d.'\n if lang == 'de':\n try:\n locale.setlocale(locale.LC_TIME, 'de_DE.UTF-8')\n formatter = '%d. %b.'\n except locale.Error:\n locale.setlocale(locale.LC_TIME, 'en_US.UTF8')\n return datetime.strptime(str(ts), '%Y-%m-%d').strftime(formatter)\n\n\n<mask token>\n\n\ndef __get_undercuts_of_argument(argument_uid, include_disabled):\n \"\"\"\n Returns all undercuts fo the given argument\n\n :param argument_uid: Argument.uid\n :param include_disabled: boolean\n :return: list of Arguments\n \"\"\"\n db_undercuts = DBDiscussionSession.query(Argument).filter_by(argument_uid\n =argument_uid)\n if not include_disabled:\n db_undercuts = db_undercuts.filter_by(is_disabled=False)\n return db_undercuts.all() if db_undercuts else []\n\n\n<mask token>\n\n\ndef get_all_arguments_with_text_by_statement_id(statement_uid):\n \"\"\"\n Given a statement_uid, it returns all arguments, which use this statement and adds\n the corresponding text to it, which normally appears in the bubbles. The resulting\n text depends on the provided language.\n\n :param statement_uid: uid to a statement, which should be analyzed\n :return: list of dictionaries containing some properties of these arguments\n :rtype: list\n \"\"\"\n logger('DBAS.LIB', 'main ' + str(statement_uid))\n arguments = get_all_arguments_by_statement(statement_uid)\n results = []\n if arguments:\n results = [{'uid': arg.uid, 'text': get_text_for_argument_uid(arg.\n uid)} for arg in arguments]\n return results\n\n\n<mask token>\n\n\ndef get_slug_by_statement_uid(uid):\n \"\"\"\n Returns slug for the given Issue.uid\n\n :param uid: Issue.uid\n :return: String\n \"\"\"\n db_statement = DBDiscussionSession.query(Statement).get(uid)\n return resolve_issue_uid_to_slug(db_statement.issue_uid)\n\n\ndef get_text_for_argument_uid(uid, nickname=None, with_html_tag=False,\n start_with_intro=False, first_arg_by_user=False, user_changed_opinion=\n False, rearrange_intro=False, colored_position=False, attack_type=None,\n minimize_on_undercut=False, is_users_opinion=True, anonymous_style=\n False, support_counter_argument=False):\n \"\"\"\n Returns current argument as string like \"conclusion, because premise1 and premise2\"\n\n :param uid: Integer\n :param with_html_tag: Boolean\n :param start_with_intro: Boolean\n :param first_arg_by_user: Boolean\n :param user_changed_opinion: Boolean\n :param rearrange_intro: Boolean\n :param colored_position: Boolean\n :param attack_type: String\n :param minimize_on_undercut: Boolean\n :param anonymous_style: Boolean\n :param support_counter_argument: Boolean\n :return: String\n \"\"\"\n logger('DBAS.LIB', 'main {}'.format(uid))\n db_argument = DBDiscussionSession.query(Argument).get(uid)\n if not db_argument:\n return None\n lang = db_argument.lang\n _t = Translator(lang)\n premisegroup_by_user = False\n author_uid = None\n db_user = DBDiscussionSession.query(User).filter_by(nickname=str(nickname)\n ).first()\n if db_user:\n author_uid = db_user.uid\n pgroup = DBDiscussionSession.query(PremiseGroup).get(db_argument.\n premisegroup_uid)\n marked_argument = DBDiscussionSession.query(MarkedArgument).filter_by(\n argument_uid=uid, author_uid=db_user.uid).first()\n premisegroup_by_user = (pgroup.author_uid == db_user.uid or \n marked_argument is not None)\n arg_array = [db_argument]\n while db_argument.argument_uid:\n db_argument = DBDiscussionSession.query(Argument).get(db_argument.\n argument_uid)\n arg_array.append(db_argument)\n if attack_type == 'jump':\n return __build_argument_for_jump(arg_array, with_html_tag)\n if len(arg_array) == 1:\n return __build_single_argument(arg_array[0], rearrange_intro,\n with_html_tag, colored_position, attack_type, _t,\n start_with_intro, is_users_opinion, anonymous_style,\n support_counter_argument, author_uid)\n else:\n return __build_nested_argument(arg_array, first_arg_by_user,\n user_changed_opinion, with_html_tag, start_with_intro,\n minimize_on_undercut, anonymous_style, premisegroup_by_user, _t)\n\n\n<mask token>\n\n\ndef __build_val_for_jump(db_argument, tag_premise, tag_conclusion, tag_end, _t\n ):\n premises = db_argument.get_premisegroup_text()\n if premises[-1] != '.':\n premises += '.'\n conclusion = db_argument.get_conclusion_text()\n because = _t.get(_.because).lower()\n conclusion = tag_conclusion + conclusion + tag_end\n premises = tag_premise + premises + tag_end\n intro = start_con + _t.get(_.isNotRight).lower(\n ) + end_tag if not db_argument.is_supportive else ''\n ret_value = '{} {} {} {}'.format(conclusion, intro, because, premises)\n if _t.get_lang() == 'de':\n intro = _t.get(_.itIsTrueThatAnonymous\n ) if db_argument.is_supportive else _t.get(_.itIsFalseThatAnonymous\n )\n intro = intro[0:1].upper() + intro[1:]\n intro = (start_pro if db_argument.is_supportive else start_con\n ) + intro + end_tag\n ret_value = '{} {}, {} {}'.format(intro, conclusion, because, premises)\n return ret_value\n\n\n<mask token>\n\n\ndef __build_nested_argument(arg_array: List[Argument], first_arg_by_user,\n user_changed_opinion, with_html_tag, start_with_intro,\n minimize_on_undercut, anonymous_style, premisegroup_by_user, _t):\n \"\"\"\n\n :param arg_array:\n :param first_arg_by_user:\n :param user_changed_opinion:\n :param with_html_tag:\n :param start_with_intro:\n :param minimize_on_undercut:\n :param anonymous_style:\n :param premisegroup_by_user:\n :param _t:\n :return:\n \"\"\"\n pgroups = []\n supportive = []\n arg_array = arg_array[::-1]\n local_lang = arg_array[0].lang\n for db_argument in arg_array:\n text = db_argument.get_premisegroup_text()\n pgroups.append(text)\n supportive.append(db_argument.is_supportive)\n conclusion = arg_array[0].get_conclusion_text()\n sb = start_position if with_html_tag else ''\n se = end_tag if with_html_tag else ''\n because = (', ' if local_lang == 'de' else ' ') + _t.get(_.because).lower(\n ) + ' '\n if len(arg_array\n ) % 2 is 0 and not first_arg_by_user and not anonymous_style:\n ret_value = _t.get(_.earlierYouArguedThat if user_changed_opinion else\n _.otherUsersSaidThat) + ' '\n tmp_users_opinion = True\n elif not anonymous_style:\n ret_value = _t.get(_.soYourOpinionIsThat\n ) + ': ' if start_with_intro else ''\n tmp_users_opinion = False\n conclusion = se + conclusion[0:1].upper() + conclusion[1:]\n else:\n ret_value = _t.get(_.someoneArgued) + ' '\n tmp_users_opinion = False\n tmp = _t.get(_.itFalseIsThat) + ' ' if not supportive[0] else ''\n ret_value += tmp + conclusion + because + pgroups[0] + '.'\n del pgroups[0]\n if minimize_on_undercut and not user_changed_opinion and len(pgroups) > 2:\n return _t.get(_.butYouCounteredWith).strip() + ' ' + sb + pgroups[\n len(pgroups) - 1] + se + '.'\n for i, pgroup in enumerate(pgroups):\n ret_value += ' '\n if tmp_users_opinion and not anonymous_style:\n tmp = (_.butYouCounteredWithArgument if premisegroup_by_user else\n _.butYouCounteredWithInterest)\n ret_value += _t.get(_.otherParticipantsConvincedYouThat if\n user_changed_opinion else tmp)\n elif not anonymous_style:\n ret_value += _t.get(_.youAgreeWithThatNow)\n else:\n ret_value += _t.get(_.otherUsersSaidThat) if i == 0 else _t.get(_\n .thenOtherUsersSaidThat)\n ret_value += sb + ' ' + pgroups[i] + '.'\n tmp_users_opinion = not tmp_users_opinion\n return ret_value.replace(' ', ' ')\n\n\ndef get_text_for_premisegroup_uid(uid):\n \"\"\"\n Returns joined text of the premise group and the premise ids\n\n :param uid: premisegroup_uid\n :return: text, uids\n \"\"\"\n warnings.warn('Use PremiseGroup.get_text() instead.', DeprecationWarning)\n db_premises = DBDiscussionSession.query(Premise).filter_by(premisegroup_uid\n =uid).join(Statement).all()\n if len(db_premises) == 0:\n return ''\n texts = [premise.get_text() for premise in db_premises]\n lang = DBDiscussionSession.query(Statement).get(db_premises[0].\n statements.uid).lang\n _t = Translator(lang)\n return ' {} '.format(_t.get(_.aand)).join(texts)\n\n\n<mask token>\n\n\ndef get_text_for_premise(uid: int, colored_position: bool=False):\n \"\"\"\n Returns text of premise with given uid\n\n :param uid: Statement.uid\n :param colored_position: Boolean\n :return: String\n \"\"\"\n db_premise = DBDiscussionSession.query(Premise).get(uid)\n if db_premise:\n return db_premise.get_text(html=colored_position)\n else:\n return None\n\n\ndef get_text_for_conclusion(argument, start_with_intro=False,\n rearrange_intro=False, is_users_opinion=True):\n \"\"\"\n Check the arguments conclusion whether it is an statement or an argument and returns the text\n\n :param argument: Argument\n :param start_with_intro: Boolean\n :param rearrange_intro: Boolean\n :return: String\n \"\"\"\n if argument.argument_uid:\n return get_text_for_argument_uid(argument.argument_uid,\n start_with_intro, rearrange_intro=rearrange_intro,\n is_users_opinion=is_users_opinion)\n else:\n return argument.get_conclusion_text()\n\n\n<mask token>\n\n\ndef get_user_by_private_or_public_nickname(nickname):\n \"\"\"\n Gets the user by his (public) nickname, based on the option, whether his nickname is public or not\n\n :param nickname: Nickname of the user\n :return: Current user or None\n \"\"\"\n db_user = get_user_by_case_insensitive_nickname(nickname)\n db_public_user = get_user_by_case_insensitive_public_nickname(nickname)\n uid = 0\n if db_user:\n uid = db_user.uid\n elif db_public_user:\n uid = db_public_user.uid\n db_settings = DBDiscussionSession.query(Settings).filter_by(author_uid=uid\n ).first()\n if not db_settings:\n return None\n if db_settings.should_show_public_nickname and db_user:\n return db_user\n elif not db_settings.should_show_public_nickname and db_public_user:\n return db_public_user\n return None\n\n\ndef get_user_by_case_insensitive_nickname(nickname):\n \"\"\"\n Returns user with given nickname\n\n :param nickname: String\n :return: User or None\n \"\"\"\n return DBDiscussionSession.query(User).filter(func.lower(User.nickname) ==\n func.lower(nickname)).first()\n\n\n<mask token>\n\n\ndef __get_text_for_click_and_mark_count(nickname, is_user, argument_uid,\n statement_uid, speech, lang):\n \"\"\"\n Build text for a bubble, how many other participants have the same interest?\n\n :param nickname: User.nickname\n :param is_user: boolean\n :param argument_uid: Argument.uid\n :param statement_uid: Statement.uid\n :param speech: dict()\n :param lang: ui_locales\n :return: [String]\n \"\"\"\n if not nickname:\n nickname = 'anonymous'\n db_user = DBDiscussionSession.query(User).filter_by(nickname=nickname\n ).first()\n if not db_user:\n db_user = DBDiscussionSession.query(User).filter_by(nickname=\n 'anonymous').first()\n db_clicks, db_marks = __get_clicks_and_marks(argument_uid,\n statement_uid, db_user)\n _t = Translator(lang)\n speech['votecounts'] = len(db_clicks) if db_clicks else 0\n if db_marks:\n speech['votecounts'] += len(db_marks)\n votecount_keys = defaultdict(lambda : '{} {}.'.format(speech[\n 'votecounts'], _t.get(_.voteCountTextMore)))\n if is_user and db_user.gender == 'm':\n gender_key = _.voteCountTextFirstM\n elif is_user and db_user.gender == 'f':\n gender_key = _.voteCountTextFirstF\n else:\n gender_key = _.voteCountTextFirst\n votecount_keys[0] = '{}.'.format(_t.get(gender_key))\n votecount_keys[1] = _t.get(_.voteCountTextOneOther) + '.'\n return votecount_keys\n\n\ndef __get_clicks_and_marks(argument_uid, statement_uid, db_user):\n db_clicks = None\n db_marks = None\n if argument_uid:\n db_clicks = DBDiscussionSession.query(ClickedArgument).filter(\n ClickedArgument.argument_uid == argument_uid, ClickedArgument.\n is_up_vote == True, ClickedArgument.is_valid, ClickedArgument.\n author_uid != db_user.uid).all()\n db_marks = DBDiscussionSession.query(MarkedArgument).filter(\n MarkedArgument.argument_uid == argument_uid, MarkedArgument.\n author_uid != db_user.uid).all()\n elif statement_uid:\n db_clicks = DBDiscussionSession.query(ClickedStatement).filter(\n ClickedStatement.statement_uid == statement_uid, \n ClickedStatement.is_up_vote == True, ClickedStatement.is_valid,\n ClickedStatement.author_uid != db_user.uid).all()\n db_marks = DBDiscussionSession.query(MarkedStatement).filter(\n MarkedStatement.statement_uid == statement_uid, MarkedStatement\n .author_uid != db_user.uid).all()\n return db_clicks, db_marks\n\n\ndef is_argument_disabled_due_to_disabled_statements(argument):\n \"\"\"\n Returns true if any involved statement is disabled.\n\n :param argument: Argument\n :return: Boolean\n \"\"\"\n if argument.conclusion_uid is None:\n db_argument = DBDiscussionSession.query(Argument).get(argument.\n argument_uid)\n conclusion = DBDiscussionSession(Statement).get(db_argument.\n conclusion_uid)\n if conclusion.is_disabled:\n return True\n premises = __get_all_premises_of_argument(db_argument)\n for premise in premises:\n if premise.statements.is_disabled:\n return True\n else:\n print(argument.conclusion_uid)\n conclusion = DBDiscussionSession.query(Statement).get(argument.\n conclusion_uid)\n if conclusion.is_disabled:\n return True\n premises = __get_all_premises_of_argument(argument)\n for premise in premises:\n if premise.statements.is_disabled:\n return True\n return False\n\n\ndef is_author_of_statement(db_user: User, statement_uid: int) ->bool:\n \"\"\"\n Is the user with given nickname author of the statement?\n\n :param db_user: User\n :param statement_uid: Statement.uid\n :return: Boolean\n \"\"\"\n db_user = (db_user if db_user and db_user.nickname !=\n nick_of_anonymous_user else None)\n if not db_user:\n return False\n db_textversion = DBDiscussionSession.query(TextVersion).filter_by(\n statement_uid=statement_uid).order_by(TextVersion.uid.asc()).first()\n if not db_textversion:\n return False\n return db_textversion.author_uid == db_user.uid\n\n\n<mask token>\n\n\ndef get_profile_picture(user: User, size: int=80, ignore_privacy_settings:\n bool=False):\n \"\"\"\n Returns the url to a https://secure.gravatar.com picture, with the option wavatar and size of 80px\n\n :param user: User\n :param size: Integer, default 80\n :param ignore_privacy_settings:\n :return: String\n \"\"\"\n additional_id = ''\n if user and isinstance(user, User):\n additional_id = ('' if user.settings.should_show_public_nickname or\n ignore_privacy_settings else 'x')\n return __get_gravatar(user, additional_id, size)\n\n\n<mask token>\n\n\ndef get_author_data(uid, gravatar_on_right_side=True,\n linked_with_users_page=True, profile_picture_size=20):\n \"\"\"\n Returns a-tag with gravatar of current author and users page as href\n\n :param uid: Uid of the author\n :param gravatar_on_right_side: True, if the gravatar is on the right of authors name\n :param linked_with_users_page: True, if the text is a link to the authors site\n :param profile_picture_size: Integer\n :return: HTML-String\n \"\"\"\n db_user = DBDiscussionSession.query(User).get(int(uid))\n if not db_user:\n return None, 'Missing author with uid ' + str(uid), False\n nick = db_user.global_nickname\n img_src = get_profile_picture(db_user, profile_picture_size)\n link_begin = ''\n link_end = ''\n if linked_with_users_page:\n link_begin = '<a href=\"/user/{}\" title=\"{}\">'.format(db_user.uid, nick)\n link_end = '</a>'\n side = 'left' if gravatar_on_right_side else 'right'\n img = '<img class=\"img-circle\" src=\"{}\" style=\"padding-{}: 0.3em\">'.format(\n img_src, side)\n if gravatar_on_right_side:\n return db_user, '{}{}{}{}'.format(link_begin, nick, img, link_end\n ), True\n else:\n return db_user, '{}{}{}{}'.format(link_begin, img, nick, link_end\n ), True\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass BubbleTypes(Enum):\n USER = auto()\n SYSTEM = auto()\n STATUS = auto()\n INFO = auto()\n\n def __str__(self):\n return str(self.value)\n\n\nclass Relations(Enum):\n UNDERMINE = 'undermine'\n UNDERCUT = 'undercut'\n REBUT = 'rebut'\n SUPPORT = 'support'\n\n def __str__(self):\n return str(self.value)\n\n\nclass Attitudes(Enum):\n AGREE = 'agree'\n DISAGREE = 'disagree'\n DONT_KNOW = 'dontknow'\n\n def __str__(self):\n return str(self.value)\n\n\n<mask token>\n\n\ndef escape_string(text):\n \"\"\"\n Escapes all html special chars.\n\n :param text: string\n :return: html.escape(text)\n \"\"\"\n return escape(text)\n\n\ndef get_discussion_language(matchdict, params, session, current_issue_uid=None\n ):\n \"\"\"\n Returns Language.ui_locales\n CALL AFTER issue_handler.get_id_of_slug(..)!\n\n :param matchdict: matchdict of the current request\n :param params: params of the current request\n :param session: session of the current request\n :param current_issue_uid: uid\n :return:\n \"\"\"\n if not current_issue_uid:\n current_issue = DBDiscussionSession.query(Issue).filter(Issue.\n is_disabled == False, Issue.is_private == False).first()\n current_issue_uid = current_issue.uid if current_issue else None\n issue = matchdict['issue'] if 'issue' in matchdict else params['issue'\n ] if 'issue' in params else session['issue'\n ] if 'issue' in session else current_issue_uid\n db_issue = DBDiscussionSession.query(Issue).get(issue)\n return db_issue.lang if db_issue else 'en'\n\n\ndef python_datetime_pretty_print(ts, lang):\n \"\"\"\n Pretty print of a locale\n\n :param ts: Timestamp\n :param lang: ui_locales\n :return: String\n \"\"\"\n formatter = '%b. %d.'\n if lang == 'de':\n try:\n locale.setlocale(locale.LC_TIME, 'de_DE.UTF-8')\n formatter = '%d. %b.'\n except locale.Error:\n locale.setlocale(locale.LC_TIME, 'en_US.UTF8')\n return datetime.strptime(str(ts), '%Y-%m-%d').strftime(formatter)\n\n\n<mask token>\n\n\ndef __get_undercuts_of_argument(argument_uid, include_disabled):\n \"\"\"\n Returns all undercuts fo the given argument\n\n :param argument_uid: Argument.uid\n :param include_disabled: boolean\n :return: list of Arguments\n \"\"\"\n db_undercuts = DBDiscussionSession.query(Argument).filter_by(argument_uid\n =argument_uid)\n if not include_disabled:\n db_undercuts = db_undercuts.filter_by(is_disabled=False)\n return db_undercuts.all() if db_undercuts else []\n\n\n<mask token>\n\n\ndef get_all_arguments_with_text_by_statement_id(statement_uid):\n \"\"\"\n Given a statement_uid, it returns all arguments, which use this statement and adds\n the corresponding text to it, which normally appears in the bubbles. The resulting\n text depends on the provided language.\n\n :param statement_uid: uid to a statement, which should be analyzed\n :return: list of dictionaries containing some properties of these arguments\n :rtype: list\n \"\"\"\n logger('DBAS.LIB', 'main ' + str(statement_uid))\n arguments = get_all_arguments_by_statement(statement_uid)\n results = []\n if arguments:\n results = [{'uid': arg.uid, 'text': get_text_for_argument_uid(arg.\n uid)} for arg in arguments]\n return results\n\n\n<mask token>\n\n\ndef get_slug_by_statement_uid(uid):\n \"\"\"\n Returns slug for the given Issue.uid\n\n :param uid: Issue.uid\n :return: String\n \"\"\"\n db_statement = DBDiscussionSession.query(Statement).get(uid)\n return resolve_issue_uid_to_slug(db_statement.issue_uid)\n\n\ndef get_text_for_argument_uid(uid, nickname=None, with_html_tag=False,\n start_with_intro=False, first_arg_by_user=False, user_changed_opinion=\n False, rearrange_intro=False, colored_position=False, attack_type=None,\n minimize_on_undercut=False, is_users_opinion=True, anonymous_style=\n False, support_counter_argument=False):\n \"\"\"\n Returns current argument as string like \"conclusion, because premise1 and premise2\"\n\n :param uid: Integer\n :param with_html_tag: Boolean\n :param start_with_intro: Boolean\n :param first_arg_by_user: Boolean\n :param user_changed_opinion: Boolean\n :param rearrange_intro: Boolean\n :param colored_position: Boolean\n :param attack_type: String\n :param minimize_on_undercut: Boolean\n :param anonymous_style: Boolean\n :param support_counter_argument: Boolean\n :return: String\n \"\"\"\n logger('DBAS.LIB', 'main {}'.format(uid))\n db_argument = DBDiscussionSession.query(Argument).get(uid)\n if not db_argument:\n return None\n lang = db_argument.lang\n _t = Translator(lang)\n premisegroup_by_user = False\n author_uid = None\n db_user = DBDiscussionSession.query(User).filter_by(nickname=str(nickname)\n ).first()\n if db_user:\n author_uid = db_user.uid\n pgroup = DBDiscussionSession.query(PremiseGroup).get(db_argument.\n premisegroup_uid)\n marked_argument = DBDiscussionSession.query(MarkedArgument).filter_by(\n argument_uid=uid, author_uid=db_user.uid).first()\n premisegroup_by_user = (pgroup.author_uid == db_user.uid or \n marked_argument is not None)\n arg_array = [db_argument]\n while db_argument.argument_uid:\n db_argument = DBDiscussionSession.query(Argument).get(db_argument.\n argument_uid)\n arg_array.append(db_argument)\n if attack_type == 'jump':\n return __build_argument_for_jump(arg_array, with_html_tag)\n if len(arg_array) == 1:\n return __build_single_argument(arg_array[0], rearrange_intro,\n with_html_tag, colored_position, attack_type, _t,\n start_with_intro, is_users_opinion, anonymous_style,\n support_counter_argument, author_uid)\n else:\n return __build_nested_argument(arg_array, first_arg_by_user,\n user_changed_opinion, with_html_tag, start_with_intro,\n minimize_on_undercut, anonymous_style, premisegroup_by_user, _t)\n\n\n<mask token>\n\n\ndef __build_val_for_jump(db_argument, tag_premise, tag_conclusion, tag_end, _t\n ):\n premises = db_argument.get_premisegroup_text()\n if premises[-1] != '.':\n premises += '.'\n conclusion = db_argument.get_conclusion_text()\n because = _t.get(_.because).lower()\n conclusion = tag_conclusion + conclusion + tag_end\n premises = tag_premise + premises + tag_end\n intro = start_con + _t.get(_.isNotRight).lower(\n ) + end_tag if not db_argument.is_supportive else ''\n ret_value = '{} {} {} {}'.format(conclusion, intro, because, premises)\n if _t.get_lang() == 'de':\n intro = _t.get(_.itIsTrueThatAnonymous\n ) if db_argument.is_supportive else _t.get(_.itIsFalseThatAnonymous\n )\n intro = intro[0:1].upper() + intro[1:]\n intro = (start_pro if db_argument.is_supportive else start_con\n ) + intro + end_tag\n ret_value = '{} {}, {} {}'.format(intro, conclusion, because, premises)\n return ret_value\n\n\n<mask token>\n\n\ndef __get_tags_for_building_single_argument(with_html_tag, attack_type,\n colored_position, premises, conclusion):\n sb_none = start_tag if with_html_tag else ''\n se = end_tag if with_html_tag else ''\n if attack_type not in ['dont_know', 'jump']:\n sb = start_tag if with_html_tag else ''\n if colored_position:\n sb = start_position if with_html_tag else ''\n if attack_type == Relations.UNDERMINE:\n premises = sb + premises + se\n else:\n conclusion = sb + conclusion + se\n else:\n sb = start_argument if with_html_tag else ''\n sb_tmp = start_attack if with_html_tag else ''\n premises = sb + premises + se\n conclusion = sb_tmp + conclusion + se\n return premises, conclusion, sb, sb_none, se\n\n\n<mask token>\n\n\ndef __build_nested_argument(arg_array: List[Argument], first_arg_by_user,\n user_changed_opinion, with_html_tag, start_with_intro,\n minimize_on_undercut, anonymous_style, premisegroup_by_user, _t):\n \"\"\"\n\n :param arg_array:\n :param first_arg_by_user:\n :param user_changed_opinion:\n :param with_html_tag:\n :param start_with_intro:\n :param minimize_on_undercut:\n :param anonymous_style:\n :param premisegroup_by_user:\n :param _t:\n :return:\n \"\"\"\n pgroups = []\n supportive = []\n arg_array = arg_array[::-1]\n local_lang = arg_array[0].lang\n for db_argument in arg_array:\n text = db_argument.get_premisegroup_text()\n pgroups.append(text)\n supportive.append(db_argument.is_supportive)\n conclusion = arg_array[0].get_conclusion_text()\n sb = start_position if with_html_tag else ''\n se = end_tag if with_html_tag else ''\n because = (', ' if local_lang == 'de' else ' ') + _t.get(_.because).lower(\n ) + ' '\n if len(arg_array\n ) % 2 is 0 and not first_arg_by_user and not anonymous_style:\n ret_value = _t.get(_.earlierYouArguedThat if user_changed_opinion else\n _.otherUsersSaidThat) + ' '\n tmp_users_opinion = True\n elif not anonymous_style:\n ret_value = _t.get(_.soYourOpinionIsThat\n ) + ': ' if start_with_intro else ''\n tmp_users_opinion = False\n conclusion = se + conclusion[0:1].upper() + conclusion[1:]\n else:\n ret_value = _t.get(_.someoneArgued) + ' '\n tmp_users_opinion = False\n tmp = _t.get(_.itFalseIsThat) + ' ' if not supportive[0] else ''\n ret_value += tmp + conclusion + because + pgroups[0] + '.'\n del pgroups[0]\n if minimize_on_undercut and not user_changed_opinion and len(pgroups) > 2:\n return _t.get(_.butYouCounteredWith).strip() + ' ' + sb + pgroups[\n len(pgroups) - 1] + se + '.'\n for i, pgroup in enumerate(pgroups):\n ret_value += ' '\n if tmp_users_opinion and not anonymous_style:\n tmp = (_.butYouCounteredWithArgument if premisegroup_by_user else\n _.butYouCounteredWithInterest)\n ret_value += _t.get(_.otherParticipantsConvincedYouThat if\n user_changed_opinion else tmp)\n elif not anonymous_style:\n ret_value += _t.get(_.youAgreeWithThatNow)\n else:\n ret_value += _t.get(_.otherUsersSaidThat) if i == 0 else _t.get(_\n .thenOtherUsersSaidThat)\n ret_value += sb + ' ' + pgroups[i] + '.'\n tmp_users_opinion = not tmp_users_opinion\n return ret_value.replace(' ', ' ')\n\n\ndef get_text_for_premisegroup_uid(uid):\n \"\"\"\n Returns joined text of the premise group and the premise ids\n\n :param uid: premisegroup_uid\n :return: text, uids\n \"\"\"\n warnings.warn('Use PremiseGroup.get_text() instead.', DeprecationWarning)\n db_premises = DBDiscussionSession.query(Premise).filter_by(premisegroup_uid\n =uid).join(Statement).all()\n if len(db_premises) == 0:\n return ''\n texts = [premise.get_text() for premise in db_premises]\n lang = DBDiscussionSession.query(Statement).get(db_premises[0].\n statements.uid).lang\n _t = Translator(lang)\n return ' {} '.format(_t.get(_.aand)).join(texts)\n\n\n<mask token>\n\n\ndef get_text_for_premise(uid: int, colored_position: bool=False):\n \"\"\"\n Returns text of premise with given uid\n\n :param uid: Statement.uid\n :param colored_position: Boolean\n :return: String\n \"\"\"\n db_premise = DBDiscussionSession.query(Premise).get(uid)\n if db_premise:\n return db_premise.get_text(html=colored_position)\n else:\n return None\n\n\ndef get_text_for_conclusion(argument, start_with_intro=False,\n rearrange_intro=False, is_users_opinion=True):\n \"\"\"\n Check the arguments conclusion whether it is an statement or an argument and returns the text\n\n :param argument: Argument\n :param start_with_intro: Boolean\n :param rearrange_intro: Boolean\n :return: String\n \"\"\"\n if argument.argument_uid:\n return get_text_for_argument_uid(argument.argument_uid,\n start_with_intro, rearrange_intro=rearrange_intro,\n is_users_opinion=is_users_opinion)\n else:\n return argument.get_conclusion_text()\n\n\n<mask token>\n\n\ndef get_user_by_private_or_public_nickname(nickname):\n \"\"\"\n Gets the user by his (public) nickname, based on the option, whether his nickname is public or not\n\n :param nickname: Nickname of the user\n :return: Current user or None\n \"\"\"\n db_user = get_user_by_case_insensitive_nickname(nickname)\n db_public_user = get_user_by_case_insensitive_public_nickname(nickname)\n uid = 0\n if db_user:\n uid = db_user.uid\n elif db_public_user:\n uid = db_public_user.uid\n db_settings = DBDiscussionSession.query(Settings).filter_by(author_uid=uid\n ).first()\n if not db_settings:\n return None\n if db_settings.should_show_public_nickname and db_user:\n return db_user\n elif not db_settings.should_show_public_nickname and db_public_user:\n return db_public_user\n return None\n\n\ndef get_user_by_case_insensitive_nickname(nickname):\n \"\"\"\n Returns user with given nickname\n\n :param nickname: String\n :return: User or None\n \"\"\"\n return DBDiscussionSession.query(User).filter(func.lower(User.nickname) ==\n func.lower(nickname)).first()\n\n\n<mask token>\n\n\ndef __get_text_for_click_and_mark_count(nickname, is_user, argument_uid,\n statement_uid, speech, lang):\n \"\"\"\n Build text for a bubble, how many other participants have the same interest?\n\n :param nickname: User.nickname\n :param is_user: boolean\n :param argument_uid: Argument.uid\n :param statement_uid: Statement.uid\n :param speech: dict()\n :param lang: ui_locales\n :return: [String]\n \"\"\"\n if not nickname:\n nickname = 'anonymous'\n db_user = DBDiscussionSession.query(User).filter_by(nickname=nickname\n ).first()\n if not db_user:\n db_user = DBDiscussionSession.query(User).filter_by(nickname=\n 'anonymous').first()\n db_clicks, db_marks = __get_clicks_and_marks(argument_uid,\n statement_uid, db_user)\n _t = Translator(lang)\n speech['votecounts'] = len(db_clicks) if db_clicks else 0\n if db_marks:\n speech['votecounts'] += len(db_marks)\n votecount_keys = defaultdict(lambda : '{} {}.'.format(speech[\n 'votecounts'], _t.get(_.voteCountTextMore)))\n if is_user and db_user.gender == 'm':\n gender_key = _.voteCountTextFirstM\n elif is_user and db_user.gender == 'f':\n gender_key = _.voteCountTextFirstF\n else:\n gender_key = _.voteCountTextFirst\n votecount_keys[0] = '{}.'.format(_t.get(gender_key))\n votecount_keys[1] = _t.get(_.voteCountTextOneOther) + '.'\n return votecount_keys\n\n\ndef __get_clicks_and_marks(argument_uid, statement_uid, db_user):\n db_clicks = None\n db_marks = None\n if argument_uid:\n db_clicks = DBDiscussionSession.query(ClickedArgument).filter(\n ClickedArgument.argument_uid == argument_uid, ClickedArgument.\n is_up_vote == True, ClickedArgument.is_valid, ClickedArgument.\n author_uid != db_user.uid).all()\n db_marks = DBDiscussionSession.query(MarkedArgument).filter(\n MarkedArgument.argument_uid == argument_uid, MarkedArgument.\n author_uid != db_user.uid).all()\n elif statement_uid:\n db_clicks = DBDiscussionSession.query(ClickedStatement).filter(\n ClickedStatement.statement_uid == statement_uid, \n ClickedStatement.is_up_vote == True, ClickedStatement.is_valid,\n ClickedStatement.author_uid != db_user.uid).all()\n db_marks = DBDiscussionSession.query(MarkedStatement).filter(\n MarkedStatement.statement_uid == statement_uid, MarkedStatement\n .author_uid != db_user.uid).all()\n return db_clicks, db_marks\n\n\ndef is_argument_disabled_due_to_disabled_statements(argument):\n \"\"\"\n Returns true if any involved statement is disabled.\n\n :param argument: Argument\n :return: Boolean\n \"\"\"\n if argument.conclusion_uid is None:\n db_argument = DBDiscussionSession.query(Argument).get(argument.\n argument_uid)\n conclusion = DBDiscussionSession(Statement).get(db_argument.\n conclusion_uid)\n if conclusion.is_disabled:\n return True\n premises = __get_all_premises_of_argument(db_argument)\n for premise in premises:\n if premise.statements.is_disabled:\n return True\n else:\n print(argument.conclusion_uid)\n conclusion = DBDiscussionSession.query(Statement).get(argument.\n conclusion_uid)\n if conclusion.is_disabled:\n return True\n premises = __get_all_premises_of_argument(argument)\n for premise in premises:\n if premise.statements.is_disabled:\n return True\n return False\n\n\ndef is_author_of_statement(db_user: User, statement_uid: int) ->bool:\n \"\"\"\n Is the user with given nickname author of the statement?\n\n :param db_user: User\n :param statement_uid: Statement.uid\n :return: Boolean\n \"\"\"\n db_user = (db_user if db_user and db_user.nickname !=\n nick_of_anonymous_user else None)\n if not db_user:\n return False\n db_textversion = DBDiscussionSession.query(TextVersion).filter_by(\n statement_uid=statement_uid).order_by(TextVersion.uid.asc()).first()\n if not db_textversion:\n return False\n return db_textversion.author_uid == db_user.uid\n\n\n<mask token>\n\n\ndef get_profile_picture(user: User, size: int=80, ignore_privacy_settings:\n bool=False):\n \"\"\"\n Returns the url to a https://secure.gravatar.com picture, with the option wavatar and size of 80px\n\n :param user: User\n :param size: Integer, default 80\n :param ignore_privacy_settings:\n :return: String\n \"\"\"\n additional_id = ''\n if user and isinstance(user, User):\n additional_id = ('' if user.settings.should_show_public_nickname or\n ignore_privacy_settings else 'x')\n return __get_gravatar(user, additional_id, size)\n\n\n<mask token>\n\n\ndef __get_gravatar(user, additional_id, size):\n if user:\n if str(user.email) == 'None':\n email = (user.nickname + additional_id).encode('utf-8')\n else:\n email = (user.email + additional_id).encode('utf-8')\n else:\n email = 'unknown'.encode('utf-8')\n gravatar_url = 'https://secure.gravatar.com/avatar/{}?'.format(hashlib.\n md5(email.lower()).hexdigest())\n gravatar_url += parse.urlencode({'d': 'wavatar', 's': str(size)})\n return gravatar_url\n\n\ndef get_author_data(uid, gravatar_on_right_side=True,\n linked_with_users_page=True, profile_picture_size=20):\n \"\"\"\n Returns a-tag with gravatar of current author and users page as href\n\n :param uid: Uid of the author\n :param gravatar_on_right_side: True, if the gravatar is on the right of authors name\n :param linked_with_users_page: True, if the text is a link to the authors site\n :param profile_picture_size: Integer\n :return: HTML-String\n \"\"\"\n db_user = DBDiscussionSession.query(User).get(int(uid))\n if not db_user:\n return None, 'Missing author with uid ' + str(uid), False\n nick = db_user.global_nickname\n img_src = get_profile_picture(db_user, profile_picture_size)\n link_begin = ''\n link_end = ''\n if linked_with_users_page:\n link_begin = '<a href=\"/user/{}\" title=\"{}\">'.format(db_user.uid, nick)\n link_end = '</a>'\n side = 'left' if gravatar_on_right_side else 'right'\n img = '<img class=\"img-circle\" src=\"{}\" style=\"padding-{}: 0.3em\">'.format(\n img_src, side)\n if gravatar_on_right_side:\n return db_user, '{}{}{}{}'.format(link_begin, nick, img, link_end\n ), True\n else:\n return db_user, '{}{}{}{}'.format(link_begin, img, nick, link_end\n ), True\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass BubbleTypes(Enum):\n USER = auto()\n SYSTEM = auto()\n STATUS = auto()\n INFO = auto()\n\n def __str__(self):\n return str(self.value)\n\n\nclass Relations(Enum):\n UNDERMINE = 'undermine'\n UNDERCUT = 'undercut'\n REBUT = 'rebut'\n SUPPORT = 'support'\n\n def __str__(self):\n return str(self.value)\n\n\nclass Attitudes(Enum):\n AGREE = 'agree'\n DISAGREE = 'disagree'\n DONT_KNOW = 'dontknow'\n\n def __str__(self):\n return str(self.value)\n\n\n<mask token>\n\n\ndef get_global_url():\n \"\"\"\n Returns the global url of the project, based on the ENV\n\n :return: String\n \"\"\"\n return os.environ.get('URL', '')\n\n\ndef get_changelog(no):\n \"\"\"\n Returns the 'no' last entries from the changelog\n\n :param no: int\n :return: list\n \"\"\"\n path = str(os.path.realpath(__file__ + '/../../CHANGELOG.md'))\n lines = [line.rstrip('\\n').strip() for line in open(path) if len(line.\n rstrip('\\n').strip()) > 0]\n changelog = []\n title = ''\n body = []\n for l in lines:\n if l.startswith('#'):\n if len(title) > 0:\n changelog.append({'title': title, 'body': body})\n body = []\n title = l.replace('### ', '')\n else:\n body.append(l.replace('- ', ''))\n return changelog[0:no]\n\n\n<mask token>\n\n\ndef usage_of_matomo(registry):\n \"\"\"\n Returns true, if matomo is set in the current ini file.\n\n :param registry: request.registry\n :return: Boolean\n \"\"\"\n if 'mode' in registry.settings:\n return registry.settings['usage_of_matomo'].lower() == 'true'\n return False\n\n\ndef escape_string(text):\n \"\"\"\n Escapes all html special chars.\n\n :param text: string\n :return: html.escape(text)\n \"\"\"\n return escape(text)\n\n\ndef get_discussion_language(matchdict, params, session, current_issue_uid=None\n ):\n \"\"\"\n Returns Language.ui_locales\n CALL AFTER issue_handler.get_id_of_slug(..)!\n\n :param matchdict: matchdict of the current request\n :param params: params of the current request\n :param session: session of the current request\n :param current_issue_uid: uid\n :return:\n \"\"\"\n if not current_issue_uid:\n current_issue = DBDiscussionSession.query(Issue).filter(Issue.\n is_disabled == False, Issue.is_private == False).first()\n current_issue_uid = current_issue.uid if current_issue else None\n issue = matchdict['issue'] if 'issue' in matchdict else params['issue'\n ] if 'issue' in params else session['issue'\n ] if 'issue' in session else current_issue_uid\n db_issue = DBDiscussionSession.query(Issue).get(issue)\n return db_issue.lang if db_issue else 'en'\n\n\ndef python_datetime_pretty_print(ts, lang):\n \"\"\"\n Pretty print of a locale\n\n :param ts: Timestamp\n :param lang: ui_locales\n :return: String\n \"\"\"\n formatter = '%b. %d.'\n if lang == 'de':\n try:\n locale.setlocale(locale.LC_TIME, 'de_DE.UTF-8')\n formatter = '%d. %b.'\n except locale.Error:\n locale.setlocale(locale.LC_TIME, 'en_US.UTF8')\n return datetime.strptime(str(ts), '%Y-%m-%d').strftime(formatter)\n\n\ndef get_all_arguments_by_statement(statement_uid, include_disabled=False):\n \"\"\"\n Returns a list of all arguments where the statement is a conclusion or member of the premisegroup\n\n :param statement_uid: Statement.uid\n :param include_disabled: Boolean\n :return: [Arguments]\n \"\"\"\n logger('DBAS.LIB', 'main {}, include_disabled {}'.format(statement_uid,\n include_disabled))\n db_arguments = __get_arguments_of_conclusion(statement_uid,\n include_disabled)\n arg_array = [arg for arg in db_arguments] if db_arguments else []\n premises = DBDiscussionSession.query(Premise).filter_by(statement_uid=\n statement_uid)\n if not include_disabled:\n premises = premises.filter_by(is_disabled=False)\n premises = premises.all()\n for premise in premises:\n arg_array += __get_argument_of_premisegroup(premise.\n premisegroup_uid, include_disabled)\n db_undercuts = []\n for arg in arg_array:\n db_undercuts += __get_undercuts_of_argument(arg.uid, include_disabled)\n db_undercutted_undercuts = []\n for arg in db_undercuts:\n db_undercutted_undercuts += __get_undercuts_of_argument(arg.uid,\n include_disabled)\n arg_array = list(set(arg_array + db_undercuts + db_undercutted_undercuts))\n logger('DBAS.LIB', 'returning arguments {}'.format([arg.uid for arg in\n arg_array]))\n return arg_array if len(arg_array) > 0 else None\n\n\ndef __get_argument_of_premisegroup(premisegroup_uid, include_disabled):\n \"\"\"\n Returns all arguments with the given premisegroup\n\n :param premisegroup_uid: PremisgGroup.uid\n :param include_disabled: Boolean\n :return: list of Arguments\n \"\"\"\n db_arguments = DBDiscussionSession.query(Argument).filter_by(\n premisegroup_uid=premisegroup_uid)\n if not include_disabled:\n db_arguments = db_arguments.filter_by(is_disabled=False)\n return db_arguments.all() if db_arguments else []\n\n\ndef __get_undercuts_of_argument(argument_uid, include_disabled):\n \"\"\"\n Returns all undercuts fo the given argument\n\n :param argument_uid: Argument.uid\n :param include_disabled: boolean\n :return: list of Arguments\n \"\"\"\n db_undercuts = DBDiscussionSession.query(Argument).filter_by(argument_uid\n =argument_uid)\n if not include_disabled:\n db_undercuts = db_undercuts.filter_by(is_disabled=False)\n return db_undercuts.all() if db_undercuts else []\n\n\ndef __get_arguments_of_conclusion(statement_uid, include_disabled):\n \"\"\"\n Returns all arguments, where the statement is set as conclusion\n\n :param statement_uid: Statement.uid\n :param include_disabled: Boolean\n :return: list of arguments\n \"\"\"\n db_arguments = DBDiscussionSession.query(Argument).filter_by(conclusion_uid\n =statement_uid)\n if not include_disabled:\n db_arguments = db_arguments.filter_by(is_disabled=False)\n return db_arguments.all() if db_arguments else []\n\n\ndef get_all_arguments_with_text_by_statement_id(statement_uid):\n \"\"\"\n Given a statement_uid, it returns all arguments, which use this statement and adds\n the corresponding text to it, which normally appears in the bubbles. The resulting\n text depends on the provided language.\n\n :param statement_uid: uid to a statement, which should be analyzed\n :return: list of dictionaries containing some properties of these arguments\n :rtype: list\n \"\"\"\n logger('DBAS.LIB', 'main ' + str(statement_uid))\n arguments = get_all_arguments_by_statement(statement_uid)\n results = []\n if arguments:\n results = [{'uid': arg.uid, 'text': get_text_for_argument_uid(arg.\n uid)} for arg in arguments]\n return results\n\n\ndef get_all_arguments_with_text_and_url_by_statement_id(db_statement,\n urlmanager, color_statement=False, is_jump=False):\n \"\"\"\n Given a statement_uid, it returns all arguments, which use this statement and adds\n the corresponding text to it, which normally appears in the bubbles. The resulting\n text depends on the provided language.\n\n :param db_statement: Statement\n :param urlmanager:\n :param color_statement: True, if the statement (specified by the ID) should be colored\n :return: list of dictionaries containing some properties of these arguments\n :rtype: list\n \"\"\"\n logger('DBAS.LIB', 'main ' + str(db_statement.uid))\n arguments = get_all_arguments_by_statement(db_statement.uid)\n uids = [arg.uid for arg in arguments] if arguments else None\n results = list()\n sb = '<{} data-argumentation-type=\"position\">'.format(tag_type\n ) if color_statement else ''\n se = '</{}>'.format(tag_type) if color_statement else ''\n if not uids:\n return []\n uids.sort()\n for uid in uids:\n statement_text = db_statement.get_text()\n attack_type = 'jump' if is_jump else ''\n argument_text = get_text_for_argument_uid(uid, anonymous_style=True,\n attack_type=attack_type)\n pos = argument_text.lower().find(statement_text.lower())\n argument_text = argument_text[:pos] + sb + argument_text[pos:]\n pos += len(statement_text) + len(sb)\n argument_text = argument_text[:pos] + se + argument_text[pos:]\n results.append({'uid': uid, 'text': argument_text, 'url':\n urlmanager.get_url_for_jump(uid)})\n return results\n\n\ndef get_slug_by_statement_uid(uid):\n \"\"\"\n Returns slug for the given Issue.uid\n\n :param uid: Issue.uid\n :return: String\n \"\"\"\n db_statement = DBDiscussionSession.query(Statement).get(uid)\n return resolve_issue_uid_to_slug(db_statement.issue_uid)\n\n\ndef get_text_for_argument_uid(uid, nickname=None, with_html_tag=False,\n start_with_intro=False, first_arg_by_user=False, user_changed_opinion=\n False, rearrange_intro=False, colored_position=False, attack_type=None,\n minimize_on_undercut=False, is_users_opinion=True, anonymous_style=\n False, support_counter_argument=False):\n \"\"\"\n Returns current argument as string like \"conclusion, because premise1 and premise2\"\n\n :param uid: Integer\n :param with_html_tag: Boolean\n :param start_with_intro: Boolean\n :param first_arg_by_user: Boolean\n :param user_changed_opinion: Boolean\n :param rearrange_intro: Boolean\n :param colored_position: Boolean\n :param attack_type: String\n :param minimize_on_undercut: Boolean\n :param anonymous_style: Boolean\n :param support_counter_argument: Boolean\n :return: String\n \"\"\"\n logger('DBAS.LIB', 'main {}'.format(uid))\n db_argument = DBDiscussionSession.query(Argument).get(uid)\n if not db_argument:\n return None\n lang = db_argument.lang\n _t = Translator(lang)\n premisegroup_by_user = False\n author_uid = None\n db_user = DBDiscussionSession.query(User).filter_by(nickname=str(nickname)\n ).first()\n if db_user:\n author_uid = db_user.uid\n pgroup = DBDiscussionSession.query(PremiseGroup).get(db_argument.\n premisegroup_uid)\n marked_argument = DBDiscussionSession.query(MarkedArgument).filter_by(\n argument_uid=uid, author_uid=db_user.uid).first()\n premisegroup_by_user = (pgroup.author_uid == db_user.uid or \n marked_argument is not None)\n arg_array = [db_argument]\n while db_argument.argument_uid:\n db_argument = DBDiscussionSession.query(Argument).get(db_argument.\n argument_uid)\n arg_array.append(db_argument)\n if attack_type == 'jump':\n return __build_argument_for_jump(arg_array, with_html_tag)\n if len(arg_array) == 1:\n return __build_single_argument(arg_array[0], rearrange_intro,\n with_html_tag, colored_position, attack_type, _t,\n start_with_intro, is_users_opinion, anonymous_style,\n support_counter_argument, author_uid)\n else:\n return __build_nested_argument(arg_array, first_arg_by_user,\n user_changed_opinion, with_html_tag, start_with_intro,\n minimize_on_undercut, anonymous_style, premisegroup_by_user, _t)\n\n\n<mask token>\n\n\ndef __build_val_for_jump(db_argument, tag_premise, tag_conclusion, tag_end, _t\n ):\n premises = db_argument.get_premisegroup_text()\n if premises[-1] != '.':\n premises += '.'\n conclusion = db_argument.get_conclusion_text()\n because = _t.get(_.because).lower()\n conclusion = tag_conclusion + conclusion + tag_end\n premises = tag_premise + premises + tag_end\n intro = start_con + _t.get(_.isNotRight).lower(\n ) + end_tag if not db_argument.is_supportive else ''\n ret_value = '{} {} {} {}'.format(conclusion, intro, because, premises)\n if _t.get_lang() == 'de':\n intro = _t.get(_.itIsTrueThatAnonymous\n ) if db_argument.is_supportive else _t.get(_.itIsFalseThatAnonymous\n )\n intro = intro[0:1].upper() + intro[1:]\n intro = (start_pro if db_argument.is_supportive else start_con\n ) + intro + end_tag\n ret_value = '{} {}, {} {}'.format(intro, conclusion, because, premises)\n return ret_value\n\n\n<mask token>\n\n\ndef __build_val_for_undercutted_undercut(arg_array: List[Argument],\n tag_premise, tag_conclusion, tag_end, _t):\n premise1 = arg_array[0].get_premisegroup_text()\n premise2 = arg_array[1].get_premisegroup_text()\n premise3 = arg_array[2].get_premisegroup_text()\n conclusion = arg_array[2].get_conclusion_text()\n bind = start_con + _t.get(_.isNotAGoodReasonAgainstArgument) + end_tag\n because = _t.get(_.because)\n seperator = ',' if _t.get_lang() == 'de' else ''\n premise1 = tag_premise + premise1 + tag_end\n premise2 = tag_conclusion + premise2 + tag_end\n argument = '{}{} {} {}'.format(conclusion, seperator, because.lower(),\n premise3)\n argument = tag_conclusion + argument + tag_end\n ret_value = '{} {} {}. {} {}'.format(premise2, bind, argument, because,\n premise1)\n return ret_value\n\n\ndef __build_single_argument(db_argument: Argument, rearrange_intro: bool,\n with_html_tag: bool, colored_position: bool, attack_type: str, _t:\n Translator, start_with_intro: bool, is_users_opinion: bool,\n anonymous_style: bool, support_counter_argument: bool=False, author_uid\n =None):\n \"\"\"\n Build up argument text for a single argument\n\n Please, do not touch this!\n\n :param uid: Argument.uid\n :param rearrange_intro: Boolean\n :param with_html_tag: Boolean\n :param colored_position: Boolean\n :param attack_type: String\n :param _t: Translator\n :param start_with_intro: Boolean\n :param is_users_opinion: Boolean\n :param anonymous_style: Boolean\n :param support_counter_argument: Boolean\n :param author_uid: User.uid\n :return: String\n \"\"\"\n premises_text = db_argument.get_premisegroup_text()\n conclusion_text = db_argument.get_conclusion_text()\n lang = db_argument.lang\n if lang != 'de':\n premises_text = premises_text[0:1].lower() + premises_text[1:]\n premises_text, conclusion_text, sb, sb_none, se = (\n __get_tags_for_building_single_argument(with_html_tag, attack_type,\n colored_position, premises_text, conclusion_text))\n marked_element = False\n if author_uid:\n db_marked = DBDiscussionSession.query(MarkedArgument).filter(\n MarkedArgument.argument_uid == db_argument.uid, MarkedArgument.\n author_uid == author_uid).first()\n marked_element = db_marked is not None\n you_have_the_opinion_that = _t.get(_.youHaveTheOpinionThat).format(''\n ).strip()\n if lang == 'de':\n ret_value = __build_single_argument_for_de(_t, sb, se,\n you_have_the_opinion_that, start_with_intro, anonymous_style,\n rearrange_intro, db_argument, attack_type, sb_none,\n marked_element, lang, premises_text, conclusion_text,\n is_users_opinion, support_counter_argument)\n else:\n ret_value = __build_single_argument_for_en(_t, sb, se,\n you_have_the_opinion_that, marked_element, conclusion_text,\n premises_text, db_argument)\n return ret_value.replace(' ', ' ')\n\n\ndef __get_tags_for_building_single_argument(with_html_tag, attack_type,\n colored_position, premises, conclusion):\n sb_none = start_tag if with_html_tag else ''\n se = end_tag if with_html_tag else ''\n if attack_type not in ['dont_know', 'jump']:\n sb = start_tag if with_html_tag else ''\n if colored_position:\n sb = start_position if with_html_tag else ''\n if attack_type == Relations.UNDERMINE:\n premises = sb + premises + se\n else:\n conclusion = sb + conclusion + se\n else:\n sb = start_argument if with_html_tag else ''\n sb_tmp = start_attack if with_html_tag else ''\n premises = sb + premises + se\n conclusion = sb_tmp + conclusion + se\n return premises, conclusion, sb, sb_none, se\n\n\ndef __build_single_argument_for_de(_t, sb, se, you_have_the_opinion_that,\n start_with_intro, anonymous_style, rearrange_intro, db_argument,\n attack_type, sb_none, marked_element, lang, premises, conclusion,\n is_users_opinion, support_counter_argument):\n if start_with_intro and not anonymous_style:\n intro = _t.get(_.itIsTrueThat\n ) if db_argument.is_supportive else _t.get(_.itIsFalseThat)\n if rearrange_intro:\n intro = _t.get(_.itTrueIsThat\n ) if db_argument.is_supportive else _t.get(_.itFalseIsThat)\n ret_value = (sb_none if attack_type in ['dont_know'] else sb\n ) + intro + se + ' '\n elif is_users_opinion and not anonymous_style:\n ret_value = sb_none\n if support_counter_argument:\n ret_value += _t.get(_.youAgreeWithThecounterargument)\n elif marked_element:\n ret_value += you_have_the_opinion_that\n else:\n ret_value += _t.get(_.youArgue)\n ret_value += se + ' '\n else:\n tmp = _t.get(_.itIsTrueThatAnonymous if db_argument.is_supportive else\n _.itIsFalseThatAnonymous)\n ret_value = sb_none + sb + tmp + se + ' '\n ret_value += ' {}{}{} '.format(sb, _t.get(_.itIsNotRight), se\n ) if not db_argument.is_supportive else ''\n ret_value += conclusion\n ret_value += ', ' if lang == 'de' else ' '\n ret_value += sb_none + _t.get(_.because).lower() + se + ' ' + premises\n return ret_value\n\n\n<mask token>\n\n\ndef __build_nested_argument(arg_array: List[Argument], first_arg_by_user,\n user_changed_opinion, with_html_tag, start_with_intro,\n minimize_on_undercut, anonymous_style, premisegroup_by_user, _t):\n \"\"\"\n\n :param arg_array:\n :param first_arg_by_user:\n :param user_changed_opinion:\n :param with_html_tag:\n :param start_with_intro:\n :param minimize_on_undercut:\n :param anonymous_style:\n :param premisegroup_by_user:\n :param _t:\n :return:\n \"\"\"\n pgroups = []\n supportive = []\n arg_array = arg_array[::-1]\n local_lang = arg_array[0].lang\n for db_argument in arg_array:\n text = db_argument.get_premisegroup_text()\n pgroups.append(text)\n supportive.append(db_argument.is_supportive)\n conclusion = arg_array[0].get_conclusion_text()\n sb = start_position if with_html_tag else ''\n se = end_tag if with_html_tag else ''\n because = (', ' if local_lang == 'de' else ' ') + _t.get(_.because).lower(\n ) + ' '\n if len(arg_array\n ) % 2 is 0 and not first_arg_by_user and not anonymous_style:\n ret_value = _t.get(_.earlierYouArguedThat if user_changed_opinion else\n _.otherUsersSaidThat) + ' '\n tmp_users_opinion = True\n elif not anonymous_style:\n ret_value = _t.get(_.soYourOpinionIsThat\n ) + ': ' if start_with_intro else ''\n tmp_users_opinion = False\n conclusion = se + conclusion[0:1].upper() + conclusion[1:]\n else:\n ret_value = _t.get(_.someoneArgued) + ' '\n tmp_users_opinion = False\n tmp = _t.get(_.itFalseIsThat) + ' ' if not supportive[0] else ''\n ret_value += tmp + conclusion + because + pgroups[0] + '.'\n del pgroups[0]\n if minimize_on_undercut and not user_changed_opinion and len(pgroups) > 2:\n return _t.get(_.butYouCounteredWith).strip() + ' ' + sb + pgroups[\n len(pgroups) - 1] + se + '.'\n for i, pgroup in enumerate(pgroups):\n ret_value += ' '\n if tmp_users_opinion and not anonymous_style:\n tmp = (_.butYouCounteredWithArgument if premisegroup_by_user else\n _.butYouCounteredWithInterest)\n ret_value += _t.get(_.otherParticipantsConvincedYouThat if\n user_changed_opinion else tmp)\n elif not anonymous_style:\n ret_value += _t.get(_.youAgreeWithThatNow)\n else:\n ret_value += _t.get(_.otherUsersSaidThat) if i == 0 else _t.get(_\n .thenOtherUsersSaidThat)\n ret_value += sb + ' ' + pgroups[i] + '.'\n tmp_users_opinion = not tmp_users_opinion\n return ret_value.replace(' ', ' ')\n\n\ndef get_text_for_premisegroup_uid(uid):\n \"\"\"\n Returns joined text of the premise group and the premise ids\n\n :param uid: premisegroup_uid\n :return: text, uids\n \"\"\"\n warnings.warn('Use PremiseGroup.get_text() instead.', DeprecationWarning)\n db_premises = DBDiscussionSession.query(Premise).filter_by(premisegroup_uid\n =uid).join(Statement).all()\n if len(db_premises) == 0:\n return ''\n texts = [premise.get_text() for premise in db_premises]\n lang = DBDiscussionSession.query(Statement).get(db_premises[0].\n statements.uid).lang\n _t = Translator(lang)\n return ' {} '.format(_t.get(_.aand)).join(texts)\n\n\n<mask token>\n\n\ndef get_text_for_premise(uid: int, colored_position: bool=False):\n \"\"\"\n Returns text of premise with given uid\n\n :param uid: Statement.uid\n :param colored_position: Boolean\n :return: String\n \"\"\"\n db_premise = DBDiscussionSession.query(Premise).get(uid)\n if db_premise:\n return db_premise.get_text(html=colored_position)\n else:\n return None\n\n\ndef get_text_for_conclusion(argument, start_with_intro=False,\n rearrange_intro=False, is_users_opinion=True):\n \"\"\"\n Check the arguments conclusion whether it is an statement or an argument and returns the text\n\n :param argument: Argument\n :param start_with_intro: Boolean\n :param rearrange_intro: Boolean\n :return: String\n \"\"\"\n if argument.argument_uid:\n return get_text_for_argument_uid(argument.argument_uid,\n start_with_intro, rearrange_intro=rearrange_intro,\n is_users_opinion=is_users_opinion)\n else:\n return argument.get_conclusion_text()\n\n\ndef resolve_issue_uid_to_slug(uid):\n \"\"\"\n Given the issue uid query database and return the correct slug of the issue.\n\n :param uid: issue_uid\n :type uid: int\n :return: Slug of issue\n :rtype: str\n \"\"\"\n issue = DBDiscussionSession.query(Issue).get(uid)\n return issue.slug if issue else None\n\n\n<mask token>\n\n\ndef get_user_by_private_or_public_nickname(nickname):\n \"\"\"\n Gets the user by his (public) nickname, based on the option, whether his nickname is public or not\n\n :param nickname: Nickname of the user\n :return: Current user or None\n \"\"\"\n db_user = get_user_by_case_insensitive_nickname(nickname)\n db_public_user = get_user_by_case_insensitive_public_nickname(nickname)\n uid = 0\n if db_user:\n uid = db_user.uid\n elif db_public_user:\n uid = db_public_user.uid\n db_settings = DBDiscussionSession.query(Settings).filter_by(author_uid=uid\n ).first()\n if not db_settings:\n return None\n if db_settings.should_show_public_nickname and db_user:\n return db_user\n elif not db_settings.should_show_public_nickname and db_public_user:\n return db_public_user\n return None\n\n\ndef get_user_by_case_insensitive_nickname(nickname):\n \"\"\"\n Returns user with given nickname\n\n :param nickname: String\n :return: User or None\n \"\"\"\n return DBDiscussionSession.query(User).filter(func.lower(User.nickname) ==\n func.lower(nickname)).first()\n\n\ndef get_user_by_case_insensitive_public_nickname(public_nickname):\n \"\"\"\n Returns user with given public nickname\n\n :param public_nickname: String\n :return: User or None\n \"\"\"\n return DBDiscussionSession.query(User).filter(func.lower(User.\n public_nickname) == func.lower(public_nickname)).first()\n\n\ndef pretty_print_options(message):\n \"\"\"\n Some modifications for pretty printing.\n Use uppercase for first letter in text and a single dot for the end if there isn't one already.\n\n :param message: String\n :return: String\n \"\"\"\n if message[0:1] == '<':\n pos = message.index('>')\n message = message[0:pos + 1] + message[pos + 1:pos + 2].upper(\n ) + message[pos + 2:]\n else:\n message = message[0:1].upper() + message[1:]\n if message[-1] == '>':\n pos = message.rfind('<')\n if message[pos - 1:pos] not in ['.', '?', '!']:\n message = message[0:pos] + '.' + message[pos:]\n elif not message.endswith(tuple(['.', '?', '!'])) and id is not 'now':\n message += '.'\n return message\n\n\ndef create_speechbubble_dict(bubble_type: BubbleTypes, is_markable: bool=\n False, is_author: bool=False, uid: str='', bubble_url: str='', content:\n str='', omit_bubble_url: bool=False, omit_vote_info: bool=False,\n argument_uid: int=None, statement_uid: int=None, is_supportive: bool=\n False, nickname: str='anonymous', lang: str='en', is_users_opinion:\n bool=False, other_author: User=None):\n \"\"\"\n Creates an dictionary which includes every information needed for a bubble.\n\n :param bubble_type: BubbleTypes\n :param is_markable: True if the content itself could be flagged\n :param is_author: True if the current user is author of the content\n :param uid: Identifier for the bubble\n :param bubble_url: URL for the click event of the bubble\n :param content: Text of the bubble\n :param omit_bubble_url: True if the bubble should have a link\n :param omit_vote_info: True if the bubble have the little, grey information text\n :param argument_uid: Argument.uid\n :param statement_uid: Statement.uid\n :param is_supportive: Boolean\n :param nickname: String\n :param omit_bubble_url: Boolean\n :param lang: is_users_opinion\n :param is_users_opinion: Boolean\n :return: dict()\n \"\"\"\n gravatar_link = get_global_url() + '/static/images/icon.png'\n profile = None\n if uid is not 'now':\n content = pretty_print_options(content)\n if bubble_type is BubbleTypes.SYSTEM and other_author is not None:\n gravatar_link = get_profile_picture(other_author, 25)\n profile = '/user/{}'.format(other_author.uid),\n if bubble_type is BubbleTypes.USER and nickname != 'anonymous':\n db_user = DBDiscussionSession.query(User).filter_by(nickname=nickname\n ).first()\n db_marked = None\n gravatar_link = get_profile_picture(db_user, 25)\n if argument_uid is not None and db_user is not None:\n db_marked = DBDiscussionSession.query(MarkedArgument).filter(\n MarkedArgument.argument_uid == argument_uid, MarkedArgument\n .author_uid == db_user.uid).first()\n if statement_uid is not None and db_user is not None:\n db_marked = DBDiscussionSession.query(MarkedStatement).filter(\n MarkedStatement.statement_uid == statement_uid, \n MarkedStatement.author_uid == db_user.uid).first()\n is_users_opinion = db_marked is not None\n speech = {'is_user': bubble_type is BubbleTypes.USER, 'is_system': \n bubble_type is BubbleTypes.SYSTEM, 'is_status': bubble_type is\n BubbleTypes.STATUS, 'is_info': bubble_type is BubbleTypes.INFO,\n 'is_markable': is_markable, 'is_author': is_author, 'id': uid if \n len(str(uid)) > 0 else uuid4().hex, 'bubble_url': bubble_url,\n 'message': content, 'omit_bubble_url': omit_bubble_url,\n 'omit_vote_info': omit_vote_info, 'data_type': 'argument' if\n argument_uid else 'statement' if statement_uid else 'None',\n 'data_argument_uid': argument_uid, 'data_statement_uid':\n statement_uid, 'data_is_supportive': is_supportive,\n 'is_users_opinion': is_users_opinion, 'enemy': {'avatar':\n gravatar_link, 'profile': profile, 'available': profile is not None}}\n votecount_keys = __get_text_for_click_and_mark_count(nickname, \n bubble_type is BubbleTypes.USER, argument_uid, statement_uid,\n speech, lang)\n speech['votecounts_message'] = votecount_keys[speech['votecounts']]\n return speech\n\n\ndef __get_text_for_click_and_mark_count(nickname, is_user, argument_uid,\n statement_uid, speech, lang):\n \"\"\"\n Build text for a bubble, how many other participants have the same interest?\n\n :param nickname: User.nickname\n :param is_user: boolean\n :param argument_uid: Argument.uid\n :param statement_uid: Statement.uid\n :param speech: dict()\n :param lang: ui_locales\n :return: [String]\n \"\"\"\n if not nickname:\n nickname = 'anonymous'\n db_user = DBDiscussionSession.query(User).filter_by(nickname=nickname\n ).first()\n if not db_user:\n db_user = DBDiscussionSession.query(User).filter_by(nickname=\n 'anonymous').first()\n db_clicks, db_marks = __get_clicks_and_marks(argument_uid,\n statement_uid, db_user)\n _t = Translator(lang)\n speech['votecounts'] = len(db_clicks) if db_clicks else 0\n if db_marks:\n speech['votecounts'] += len(db_marks)\n votecount_keys = defaultdict(lambda : '{} {}.'.format(speech[\n 'votecounts'], _t.get(_.voteCountTextMore)))\n if is_user and db_user.gender == 'm':\n gender_key = _.voteCountTextFirstM\n elif is_user and db_user.gender == 'f':\n gender_key = _.voteCountTextFirstF\n else:\n gender_key = _.voteCountTextFirst\n votecount_keys[0] = '{}.'.format(_t.get(gender_key))\n votecount_keys[1] = _t.get(_.voteCountTextOneOther) + '.'\n return votecount_keys\n\n\ndef __get_clicks_and_marks(argument_uid, statement_uid, db_user):\n db_clicks = None\n db_marks = None\n if argument_uid:\n db_clicks = DBDiscussionSession.query(ClickedArgument).filter(\n ClickedArgument.argument_uid == argument_uid, ClickedArgument.\n is_up_vote == True, ClickedArgument.is_valid, ClickedArgument.\n author_uid != db_user.uid).all()\n db_marks = DBDiscussionSession.query(MarkedArgument).filter(\n MarkedArgument.argument_uid == argument_uid, MarkedArgument.\n author_uid != db_user.uid).all()\n elif statement_uid:\n db_clicks = DBDiscussionSession.query(ClickedStatement).filter(\n ClickedStatement.statement_uid == statement_uid, \n ClickedStatement.is_up_vote == True, ClickedStatement.is_valid,\n ClickedStatement.author_uid != db_user.uid).all()\n db_marks = DBDiscussionSession.query(MarkedStatement).filter(\n MarkedStatement.statement_uid == statement_uid, MarkedStatement\n .author_uid != db_user.uid).all()\n return db_clicks, db_marks\n\n\ndef is_argument_disabled_due_to_disabled_statements(argument):\n \"\"\"\n Returns true if any involved statement is disabled.\n\n :param argument: Argument\n :return: Boolean\n \"\"\"\n if argument.conclusion_uid is None:\n db_argument = DBDiscussionSession.query(Argument).get(argument.\n argument_uid)\n conclusion = DBDiscussionSession(Statement).get(db_argument.\n conclusion_uid)\n if conclusion.is_disabled:\n return True\n premises = __get_all_premises_of_argument(db_argument)\n for premise in premises:\n if premise.statements.is_disabled:\n return True\n else:\n print(argument.conclusion_uid)\n conclusion = DBDiscussionSession.query(Statement).get(argument.\n conclusion_uid)\n if conclusion.is_disabled:\n return True\n premises = __get_all_premises_of_argument(argument)\n for premise in premises:\n if premise.statements.is_disabled:\n return True\n return False\n\n\ndef is_author_of_statement(db_user: User, statement_uid: int) ->bool:\n \"\"\"\n Is the user with given nickname author of the statement?\n\n :param db_user: User\n :param statement_uid: Statement.uid\n :return: Boolean\n \"\"\"\n db_user = (db_user if db_user and db_user.nickname !=\n nick_of_anonymous_user else None)\n if not db_user:\n return False\n db_textversion = DBDiscussionSession.query(TextVersion).filter_by(\n statement_uid=statement_uid).order_by(TextVersion.uid.asc()).first()\n if not db_textversion:\n return False\n return db_textversion.author_uid == db_user.uid\n\n\ndef is_author_of_argument(db_user: User, argument_uid: int) ->bool:\n \"\"\"\n Is the user with given nickname author of the argument?\n\n :param db_user: User\n :param argument_uid: Argument.uid\n :return: Boolean\n \"\"\"\n db_user = (db_user if db_user and db_user.nickname !=\n nick_of_anonymous_user else None)\n if not db_user:\n return False\n db_argument = DBDiscussionSession.query(Argument).filter(Argument.uid ==\n argument_uid, Argument.author_uid == db_user.uid).first()\n return True if db_argument else False\n\n\n<mask token>\n\n\ndef get_profile_picture(user: User, size: int=80, ignore_privacy_settings:\n bool=False):\n \"\"\"\n Returns the url to a https://secure.gravatar.com picture, with the option wavatar and size of 80px\n\n :param user: User\n :param size: Integer, default 80\n :param ignore_privacy_settings:\n :return: String\n \"\"\"\n additional_id = ''\n if user and isinstance(user, User):\n additional_id = ('' if user.settings.should_show_public_nickname or\n ignore_privacy_settings else 'x')\n return __get_gravatar(user, additional_id, size)\n\n\ndef get_public_profile_picture(user: User, size: int=80):\n \"\"\"\n Returns the url to a https://secure.gravatar.com picture, with the option wavatar and size of 80px\n If the user doesn't want an public profile, an anonymous image will be returned\n\n :param user: User\n :param size: Integer, default 80\n :return: String\n \"\"\"\n additional_id = ''\n if user.settings.should_show_public_nickname:\n additional_id = 'x'\n if len(str(user.oauth_provider)) > 0:\n additional_id = '{}{}'.format(user.oauth_provider, user.\n oauth_provider_id)\n return __get_gravatar(user, additional_id, size)\n\n\ndef __get_gravatar(user, additional_id, size):\n if user:\n if str(user.email) == 'None':\n email = (user.nickname + additional_id).encode('utf-8')\n else:\n email = (user.email + additional_id).encode('utf-8')\n else:\n email = 'unknown'.encode('utf-8')\n gravatar_url = 'https://secure.gravatar.com/avatar/{}?'.format(hashlib.\n md5(email.lower()).hexdigest())\n gravatar_url += parse.urlencode({'d': 'wavatar', 's': str(size)})\n return gravatar_url\n\n\ndef get_author_data(uid, gravatar_on_right_side=True,\n linked_with_users_page=True, profile_picture_size=20):\n \"\"\"\n Returns a-tag with gravatar of current author and users page as href\n\n :param uid: Uid of the author\n :param gravatar_on_right_side: True, if the gravatar is on the right of authors name\n :param linked_with_users_page: True, if the text is a link to the authors site\n :param profile_picture_size: Integer\n :return: HTML-String\n \"\"\"\n db_user = DBDiscussionSession.query(User).get(int(uid))\n if not db_user:\n return None, 'Missing author with uid ' + str(uid), False\n nick = db_user.global_nickname\n img_src = get_profile_picture(db_user, profile_picture_size)\n link_begin = ''\n link_end = ''\n if linked_with_users_page:\n link_begin = '<a href=\"/user/{}\" title=\"{}\">'.format(db_user.uid, nick)\n link_end = '</a>'\n side = 'left' if gravatar_on_right_side else 'right'\n img = '<img class=\"img-circle\" src=\"{}\" style=\"padding-{}: 0.3em\">'.format(\n img_src, side)\n if gravatar_on_right_side:\n return db_user, '{}{}{}{}'.format(link_begin, nick, img, link_end\n ), True\n else:\n return db_user, '{}{}{}{}'.format(link_begin, img, nick, link_end\n ), True\n\n\n<mask token>\n",
"step-4": "<mask token>\n\n\nclass BubbleTypes(Enum):\n USER = auto()\n SYSTEM = auto()\n STATUS = auto()\n INFO = auto()\n\n def __str__(self):\n return str(self.value)\n\n\nclass Relations(Enum):\n UNDERMINE = 'undermine'\n UNDERCUT = 'undercut'\n REBUT = 'rebut'\n SUPPORT = 'support'\n\n def __str__(self):\n return str(self.value)\n\n\nclass Attitudes(Enum):\n AGREE = 'agree'\n DISAGREE = 'disagree'\n DONT_KNOW = 'dontknow'\n\n def __str__(self):\n return str(self.value)\n\n\n<mask token>\n\n\ndef get_global_url():\n \"\"\"\n Returns the global url of the project, based on the ENV\n\n :return: String\n \"\"\"\n return os.environ.get('URL', '')\n\n\ndef get_changelog(no):\n \"\"\"\n Returns the 'no' last entries from the changelog\n\n :param no: int\n :return: list\n \"\"\"\n path = str(os.path.realpath(__file__ + '/../../CHANGELOG.md'))\n lines = [line.rstrip('\\n').strip() for line in open(path) if len(line.\n rstrip('\\n').strip()) > 0]\n changelog = []\n title = ''\n body = []\n for l in lines:\n if l.startswith('#'):\n if len(title) > 0:\n changelog.append({'title': title, 'body': body})\n body = []\n title = l.replace('### ', '')\n else:\n body.append(l.replace('- ', ''))\n return changelog[0:no]\n\n\ndef is_development_mode(registry):\n \"\"\"\n Returns true, if mode is set to development in current ini file.\n\n :param registry: request.registry\n :return: Boolean\n \"\"\"\n if 'mode' in registry.settings:\n return registry.settings['mode'].lower() == 'development'\n return False\n\n\ndef usage_of_modern_bubbles(registry):\n \"\"\"\n Returns true, if modern bubbles are set in the current ini file.\n\n :param registry: request.registry\n :return: Boolean\n \"\"\"\n if 'modern_bubbles' in registry.settings:\n return registry.settings['modern_bubbles'].lower() == 'true'\n return False\n\n\ndef usage_of_matomo(registry):\n \"\"\"\n Returns true, if matomo is set in the current ini file.\n\n :param registry: request.registry\n :return: Boolean\n \"\"\"\n if 'mode' in registry.settings:\n return registry.settings['usage_of_matomo'].lower() == 'true'\n return False\n\n\ndef escape_string(text):\n \"\"\"\n Escapes all html special chars.\n\n :param text: string\n :return: html.escape(text)\n \"\"\"\n return escape(text)\n\n\ndef get_discussion_language(matchdict, params, session, current_issue_uid=None\n ):\n \"\"\"\n Returns Language.ui_locales\n CALL AFTER issue_handler.get_id_of_slug(..)!\n\n :param matchdict: matchdict of the current request\n :param params: params of the current request\n :param session: session of the current request\n :param current_issue_uid: uid\n :return:\n \"\"\"\n if not current_issue_uid:\n current_issue = DBDiscussionSession.query(Issue).filter(Issue.\n is_disabled == False, Issue.is_private == False).first()\n current_issue_uid = current_issue.uid if current_issue else None\n issue = matchdict['issue'] if 'issue' in matchdict else params['issue'\n ] if 'issue' in params else session['issue'\n ] if 'issue' in session else current_issue_uid\n db_issue = DBDiscussionSession.query(Issue).get(issue)\n return db_issue.lang if db_issue else 'en'\n\n\ndef python_datetime_pretty_print(ts, lang):\n \"\"\"\n Pretty print of a locale\n\n :param ts: Timestamp\n :param lang: ui_locales\n :return: String\n \"\"\"\n formatter = '%b. %d.'\n if lang == 'de':\n try:\n locale.setlocale(locale.LC_TIME, 'de_DE.UTF-8')\n formatter = '%d. %b.'\n except locale.Error:\n locale.setlocale(locale.LC_TIME, 'en_US.UTF8')\n return datetime.strptime(str(ts), '%Y-%m-%d').strftime(formatter)\n\n\ndef get_all_arguments_by_statement(statement_uid, include_disabled=False):\n \"\"\"\n Returns a list of all arguments where the statement is a conclusion or member of the premisegroup\n\n :param statement_uid: Statement.uid\n :param include_disabled: Boolean\n :return: [Arguments]\n \"\"\"\n logger('DBAS.LIB', 'main {}, include_disabled {}'.format(statement_uid,\n include_disabled))\n db_arguments = __get_arguments_of_conclusion(statement_uid,\n include_disabled)\n arg_array = [arg for arg in db_arguments] if db_arguments else []\n premises = DBDiscussionSession.query(Premise).filter_by(statement_uid=\n statement_uid)\n if not include_disabled:\n premises = premises.filter_by(is_disabled=False)\n premises = premises.all()\n for premise in premises:\n arg_array += __get_argument_of_premisegroup(premise.\n premisegroup_uid, include_disabled)\n db_undercuts = []\n for arg in arg_array:\n db_undercuts += __get_undercuts_of_argument(arg.uid, include_disabled)\n db_undercutted_undercuts = []\n for arg in db_undercuts:\n db_undercutted_undercuts += __get_undercuts_of_argument(arg.uid,\n include_disabled)\n arg_array = list(set(arg_array + db_undercuts + db_undercutted_undercuts))\n logger('DBAS.LIB', 'returning arguments {}'.format([arg.uid for arg in\n arg_array]))\n return arg_array if len(arg_array) > 0 else None\n\n\ndef __get_argument_of_premisegroup(premisegroup_uid, include_disabled):\n \"\"\"\n Returns all arguments with the given premisegroup\n\n :param premisegroup_uid: PremisgGroup.uid\n :param include_disabled: Boolean\n :return: list of Arguments\n \"\"\"\n db_arguments = DBDiscussionSession.query(Argument).filter_by(\n premisegroup_uid=premisegroup_uid)\n if not include_disabled:\n db_arguments = db_arguments.filter_by(is_disabled=False)\n return db_arguments.all() if db_arguments else []\n\n\ndef __get_undercuts_of_argument(argument_uid, include_disabled):\n \"\"\"\n Returns all undercuts fo the given argument\n\n :param argument_uid: Argument.uid\n :param include_disabled: boolean\n :return: list of Arguments\n \"\"\"\n db_undercuts = DBDiscussionSession.query(Argument).filter_by(argument_uid\n =argument_uid)\n if not include_disabled:\n db_undercuts = db_undercuts.filter_by(is_disabled=False)\n return db_undercuts.all() if db_undercuts else []\n\n\ndef __get_arguments_of_conclusion(statement_uid, include_disabled):\n \"\"\"\n Returns all arguments, where the statement is set as conclusion\n\n :param statement_uid: Statement.uid\n :param include_disabled: Boolean\n :return: list of arguments\n \"\"\"\n db_arguments = DBDiscussionSession.query(Argument).filter_by(conclusion_uid\n =statement_uid)\n if not include_disabled:\n db_arguments = db_arguments.filter_by(is_disabled=False)\n return db_arguments.all() if db_arguments else []\n\n\ndef get_all_arguments_with_text_by_statement_id(statement_uid):\n \"\"\"\n Given a statement_uid, it returns all arguments, which use this statement and adds\n the corresponding text to it, which normally appears in the bubbles. The resulting\n text depends on the provided language.\n\n :param statement_uid: uid to a statement, which should be analyzed\n :return: list of dictionaries containing some properties of these arguments\n :rtype: list\n \"\"\"\n logger('DBAS.LIB', 'main ' + str(statement_uid))\n arguments = get_all_arguments_by_statement(statement_uid)\n results = []\n if arguments:\n results = [{'uid': arg.uid, 'text': get_text_for_argument_uid(arg.\n uid)} for arg in arguments]\n return results\n\n\ndef get_all_arguments_with_text_and_url_by_statement_id(db_statement,\n urlmanager, color_statement=False, is_jump=False):\n \"\"\"\n Given a statement_uid, it returns all arguments, which use this statement and adds\n the corresponding text to it, which normally appears in the bubbles. The resulting\n text depends on the provided language.\n\n :param db_statement: Statement\n :param urlmanager:\n :param color_statement: True, if the statement (specified by the ID) should be colored\n :return: list of dictionaries containing some properties of these arguments\n :rtype: list\n \"\"\"\n logger('DBAS.LIB', 'main ' + str(db_statement.uid))\n arguments = get_all_arguments_by_statement(db_statement.uid)\n uids = [arg.uid for arg in arguments] if arguments else None\n results = list()\n sb = '<{} data-argumentation-type=\"position\">'.format(tag_type\n ) if color_statement else ''\n se = '</{}>'.format(tag_type) if color_statement else ''\n if not uids:\n return []\n uids.sort()\n for uid in uids:\n statement_text = db_statement.get_text()\n attack_type = 'jump' if is_jump else ''\n argument_text = get_text_for_argument_uid(uid, anonymous_style=True,\n attack_type=attack_type)\n pos = argument_text.lower().find(statement_text.lower())\n argument_text = argument_text[:pos] + sb + argument_text[pos:]\n pos += len(statement_text) + len(sb)\n argument_text = argument_text[:pos] + se + argument_text[pos:]\n results.append({'uid': uid, 'text': argument_text, 'url':\n urlmanager.get_url_for_jump(uid)})\n return results\n\n\ndef get_slug_by_statement_uid(uid):\n \"\"\"\n Returns slug for the given Issue.uid\n\n :param uid: Issue.uid\n :return: String\n \"\"\"\n db_statement = DBDiscussionSession.query(Statement).get(uid)\n return resolve_issue_uid_to_slug(db_statement.issue_uid)\n\n\ndef get_text_for_argument_uid(uid, nickname=None, with_html_tag=False,\n start_with_intro=False, first_arg_by_user=False, user_changed_opinion=\n False, rearrange_intro=False, colored_position=False, attack_type=None,\n minimize_on_undercut=False, is_users_opinion=True, anonymous_style=\n False, support_counter_argument=False):\n \"\"\"\n Returns current argument as string like \"conclusion, because premise1 and premise2\"\n\n :param uid: Integer\n :param with_html_tag: Boolean\n :param start_with_intro: Boolean\n :param first_arg_by_user: Boolean\n :param user_changed_opinion: Boolean\n :param rearrange_intro: Boolean\n :param colored_position: Boolean\n :param attack_type: String\n :param minimize_on_undercut: Boolean\n :param anonymous_style: Boolean\n :param support_counter_argument: Boolean\n :return: String\n \"\"\"\n logger('DBAS.LIB', 'main {}'.format(uid))\n db_argument = DBDiscussionSession.query(Argument).get(uid)\n if not db_argument:\n return None\n lang = db_argument.lang\n _t = Translator(lang)\n premisegroup_by_user = False\n author_uid = None\n db_user = DBDiscussionSession.query(User).filter_by(nickname=str(nickname)\n ).first()\n if db_user:\n author_uid = db_user.uid\n pgroup = DBDiscussionSession.query(PremiseGroup).get(db_argument.\n premisegroup_uid)\n marked_argument = DBDiscussionSession.query(MarkedArgument).filter_by(\n argument_uid=uid, author_uid=db_user.uid).first()\n premisegroup_by_user = (pgroup.author_uid == db_user.uid or \n marked_argument is not None)\n arg_array = [db_argument]\n while db_argument.argument_uid:\n db_argument = DBDiscussionSession.query(Argument).get(db_argument.\n argument_uid)\n arg_array.append(db_argument)\n if attack_type == 'jump':\n return __build_argument_for_jump(arg_array, with_html_tag)\n if len(arg_array) == 1:\n return __build_single_argument(arg_array[0], rearrange_intro,\n with_html_tag, colored_position, attack_type, _t,\n start_with_intro, is_users_opinion, anonymous_style,\n support_counter_argument, author_uid)\n else:\n return __build_nested_argument(arg_array, first_arg_by_user,\n user_changed_opinion, with_html_tag, start_with_intro,\n minimize_on_undercut, anonymous_style, premisegroup_by_user, _t)\n\n\ndef __build_argument_for_jump(arg_array: List[Argument], with_html_tag):\n \"\"\"\n Build tet for an argument, if we jump to this argument\n\n :param arg_array: [Argument]\n :param with_html_tag: Boolean\n :return: String\n \"\"\"\n tag_premise = ('<' + tag_type + ' data-argumentation-type=\"attack\">' if\n with_html_tag else '')\n tag_conclusion = ('<' + tag_type +\n ' data-argumentation-type=\"argument\">' if with_html_tag else '')\n tag_end = '</' + tag_type + '>' if with_html_tag else ''\n lang = arg_array[0].lang\n _t = Translator(lang)\n if len(arg_array) == 1:\n ret_value = __build_val_for_jump(arg_array[0], tag_premise,\n tag_conclusion, tag_end, _t)\n elif len(arg_array) == 2:\n ret_value = __build_val_for_undercut(arg_array, tag_premise,\n tag_conclusion, tag_end, _t)\n else:\n ret_value = __build_val_for_undercutted_undercut(arg_array,\n tag_premise, tag_conclusion, tag_end, _t)\n return ret_value.replace(' ', ' ')\n\n\ndef __build_val_for_jump(db_argument, tag_premise, tag_conclusion, tag_end, _t\n ):\n premises = db_argument.get_premisegroup_text()\n if premises[-1] != '.':\n premises += '.'\n conclusion = db_argument.get_conclusion_text()\n because = _t.get(_.because).lower()\n conclusion = tag_conclusion + conclusion + tag_end\n premises = tag_premise + premises + tag_end\n intro = start_con + _t.get(_.isNotRight).lower(\n ) + end_tag if not db_argument.is_supportive else ''\n ret_value = '{} {} {} {}'.format(conclusion, intro, because, premises)\n if _t.get_lang() == 'de':\n intro = _t.get(_.itIsTrueThatAnonymous\n ) if db_argument.is_supportive else _t.get(_.itIsFalseThatAnonymous\n )\n intro = intro[0:1].upper() + intro[1:]\n intro = (start_pro if db_argument.is_supportive else start_con\n ) + intro + end_tag\n ret_value = '{} {}, {} {}'.format(intro, conclusion, because, premises)\n return ret_value\n\n\ndef __build_val_for_undercut(arg_array: List[Argument], tag_premise,\n tag_conclusion, tag_end, _t):\n db_undercut = arg_array[0]\n db_conclusion_argument = arg_array[1]\n premise = db_undercut.get_premisegroup_text()\n conclusion_premise = db_conclusion_argument.get_premisegroup_text()\n conclusion_conclusion = db_conclusion_argument.get_conclusion_text()\n premise = tag_premise + premise + tag_end\n conclusion_premise = tag_conclusion + conclusion_premise + tag_end\n conclusion_conclusion = tag_conclusion + conclusion_conclusion + tag_end\n intro = _t.get(_.statementAbout) + ' ' if _t.get_lang() == 'de' else ''\n bind = start_con + _t.get(_.isNotAGoodReasonFor) + end_tag\n because = _t.get(_.because)\n ret_value = '{}{} {} {}. {} {}.'.format(intro, conclusion_premise, bind,\n conclusion_conclusion, because, premise)\n return ret_value\n\n\ndef __build_val_for_undercutted_undercut(arg_array: List[Argument],\n tag_premise, tag_conclusion, tag_end, _t):\n premise1 = arg_array[0].get_premisegroup_text()\n premise2 = arg_array[1].get_premisegroup_text()\n premise3 = arg_array[2].get_premisegroup_text()\n conclusion = arg_array[2].get_conclusion_text()\n bind = start_con + _t.get(_.isNotAGoodReasonAgainstArgument) + end_tag\n because = _t.get(_.because)\n seperator = ',' if _t.get_lang() == 'de' else ''\n premise1 = tag_premise + premise1 + tag_end\n premise2 = tag_conclusion + premise2 + tag_end\n argument = '{}{} {} {}'.format(conclusion, seperator, because.lower(),\n premise3)\n argument = tag_conclusion + argument + tag_end\n ret_value = '{} {} {}. {} {}'.format(premise2, bind, argument, because,\n premise1)\n return ret_value\n\n\ndef __build_single_argument(db_argument: Argument, rearrange_intro: bool,\n with_html_tag: bool, colored_position: bool, attack_type: str, _t:\n Translator, start_with_intro: bool, is_users_opinion: bool,\n anonymous_style: bool, support_counter_argument: bool=False, author_uid\n =None):\n \"\"\"\n Build up argument text for a single argument\n\n Please, do not touch this!\n\n :param uid: Argument.uid\n :param rearrange_intro: Boolean\n :param with_html_tag: Boolean\n :param colored_position: Boolean\n :param attack_type: String\n :param _t: Translator\n :param start_with_intro: Boolean\n :param is_users_opinion: Boolean\n :param anonymous_style: Boolean\n :param support_counter_argument: Boolean\n :param author_uid: User.uid\n :return: String\n \"\"\"\n premises_text = db_argument.get_premisegroup_text()\n conclusion_text = db_argument.get_conclusion_text()\n lang = db_argument.lang\n if lang != 'de':\n premises_text = premises_text[0:1].lower() + premises_text[1:]\n premises_text, conclusion_text, sb, sb_none, se = (\n __get_tags_for_building_single_argument(with_html_tag, attack_type,\n colored_position, premises_text, conclusion_text))\n marked_element = False\n if author_uid:\n db_marked = DBDiscussionSession.query(MarkedArgument).filter(\n MarkedArgument.argument_uid == db_argument.uid, MarkedArgument.\n author_uid == author_uid).first()\n marked_element = db_marked is not None\n you_have_the_opinion_that = _t.get(_.youHaveTheOpinionThat).format(''\n ).strip()\n if lang == 'de':\n ret_value = __build_single_argument_for_de(_t, sb, se,\n you_have_the_opinion_that, start_with_intro, anonymous_style,\n rearrange_intro, db_argument, attack_type, sb_none,\n marked_element, lang, premises_text, conclusion_text,\n is_users_opinion, support_counter_argument)\n else:\n ret_value = __build_single_argument_for_en(_t, sb, se,\n you_have_the_opinion_that, marked_element, conclusion_text,\n premises_text, db_argument)\n return ret_value.replace(' ', ' ')\n\n\ndef __get_tags_for_building_single_argument(with_html_tag, attack_type,\n colored_position, premises, conclusion):\n sb_none = start_tag if with_html_tag else ''\n se = end_tag if with_html_tag else ''\n if attack_type not in ['dont_know', 'jump']:\n sb = start_tag if with_html_tag else ''\n if colored_position:\n sb = start_position if with_html_tag else ''\n if attack_type == Relations.UNDERMINE:\n premises = sb + premises + se\n else:\n conclusion = sb + conclusion + se\n else:\n sb = start_argument if with_html_tag else ''\n sb_tmp = start_attack if with_html_tag else ''\n premises = sb + premises + se\n conclusion = sb_tmp + conclusion + se\n return premises, conclusion, sb, sb_none, se\n\n\ndef __build_single_argument_for_de(_t, sb, se, you_have_the_opinion_that,\n start_with_intro, anonymous_style, rearrange_intro, db_argument,\n attack_type, sb_none, marked_element, lang, premises, conclusion,\n is_users_opinion, support_counter_argument):\n if start_with_intro and not anonymous_style:\n intro = _t.get(_.itIsTrueThat\n ) if db_argument.is_supportive else _t.get(_.itIsFalseThat)\n if rearrange_intro:\n intro = _t.get(_.itTrueIsThat\n ) if db_argument.is_supportive else _t.get(_.itFalseIsThat)\n ret_value = (sb_none if attack_type in ['dont_know'] else sb\n ) + intro + se + ' '\n elif is_users_opinion and not anonymous_style:\n ret_value = sb_none\n if support_counter_argument:\n ret_value += _t.get(_.youAgreeWithThecounterargument)\n elif marked_element:\n ret_value += you_have_the_opinion_that\n else:\n ret_value += _t.get(_.youArgue)\n ret_value += se + ' '\n else:\n tmp = _t.get(_.itIsTrueThatAnonymous if db_argument.is_supportive else\n _.itIsFalseThatAnonymous)\n ret_value = sb_none + sb + tmp + se + ' '\n ret_value += ' {}{}{} '.format(sb, _t.get(_.itIsNotRight), se\n ) if not db_argument.is_supportive else ''\n ret_value += conclusion\n ret_value += ', ' if lang == 'de' else ' '\n ret_value += sb_none + _t.get(_.because).lower() + se + ' ' + premises\n return ret_value\n\n\n<mask token>\n\n\ndef __build_nested_argument(arg_array: List[Argument], first_arg_by_user,\n user_changed_opinion, with_html_tag, start_with_intro,\n minimize_on_undercut, anonymous_style, premisegroup_by_user, _t):\n \"\"\"\n\n :param arg_array:\n :param first_arg_by_user:\n :param user_changed_opinion:\n :param with_html_tag:\n :param start_with_intro:\n :param minimize_on_undercut:\n :param anonymous_style:\n :param premisegroup_by_user:\n :param _t:\n :return:\n \"\"\"\n pgroups = []\n supportive = []\n arg_array = arg_array[::-1]\n local_lang = arg_array[0].lang\n for db_argument in arg_array:\n text = db_argument.get_premisegroup_text()\n pgroups.append(text)\n supportive.append(db_argument.is_supportive)\n conclusion = arg_array[0].get_conclusion_text()\n sb = start_position if with_html_tag else ''\n se = end_tag if with_html_tag else ''\n because = (', ' if local_lang == 'de' else ' ') + _t.get(_.because).lower(\n ) + ' '\n if len(arg_array\n ) % 2 is 0 and not first_arg_by_user and not anonymous_style:\n ret_value = _t.get(_.earlierYouArguedThat if user_changed_opinion else\n _.otherUsersSaidThat) + ' '\n tmp_users_opinion = True\n elif not anonymous_style:\n ret_value = _t.get(_.soYourOpinionIsThat\n ) + ': ' if start_with_intro else ''\n tmp_users_opinion = False\n conclusion = se + conclusion[0:1].upper() + conclusion[1:]\n else:\n ret_value = _t.get(_.someoneArgued) + ' '\n tmp_users_opinion = False\n tmp = _t.get(_.itFalseIsThat) + ' ' if not supportive[0] else ''\n ret_value += tmp + conclusion + because + pgroups[0] + '.'\n del pgroups[0]\n if minimize_on_undercut and not user_changed_opinion and len(pgroups) > 2:\n return _t.get(_.butYouCounteredWith).strip() + ' ' + sb + pgroups[\n len(pgroups) - 1] + se + '.'\n for i, pgroup in enumerate(pgroups):\n ret_value += ' '\n if tmp_users_opinion and not anonymous_style:\n tmp = (_.butYouCounteredWithArgument if premisegroup_by_user else\n _.butYouCounteredWithInterest)\n ret_value += _t.get(_.otherParticipantsConvincedYouThat if\n user_changed_opinion else tmp)\n elif not anonymous_style:\n ret_value += _t.get(_.youAgreeWithThatNow)\n else:\n ret_value += _t.get(_.otherUsersSaidThat) if i == 0 else _t.get(_\n .thenOtherUsersSaidThat)\n ret_value += sb + ' ' + pgroups[i] + '.'\n tmp_users_opinion = not tmp_users_opinion\n return ret_value.replace(' ', ' ')\n\n\ndef get_text_for_premisegroup_uid(uid):\n \"\"\"\n Returns joined text of the premise group and the premise ids\n\n :param uid: premisegroup_uid\n :return: text, uids\n \"\"\"\n warnings.warn('Use PremiseGroup.get_text() instead.', DeprecationWarning)\n db_premises = DBDiscussionSession.query(Premise).filter_by(premisegroup_uid\n =uid).join(Statement).all()\n if len(db_premises) == 0:\n return ''\n texts = [premise.get_text() for premise in db_premises]\n lang = DBDiscussionSession.query(Statement).get(db_premises[0].\n statements.uid).lang\n _t = Translator(lang)\n return ' {} '.format(_t.get(_.aand)).join(texts)\n\n\n<mask token>\n\n\ndef get_text_for_premise(uid: int, colored_position: bool=False):\n \"\"\"\n Returns text of premise with given uid\n\n :param uid: Statement.uid\n :param colored_position: Boolean\n :return: String\n \"\"\"\n db_premise = DBDiscussionSession.query(Premise).get(uid)\n if db_premise:\n return db_premise.get_text(html=colored_position)\n else:\n return None\n\n\ndef get_text_for_conclusion(argument, start_with_intro=False,\n rearrange_intro=False, is_users_opinion=True):\n \"\"\"\n Check the arguments conclusion whether it is an statement or an argument and returns the text\n\n :param argument: Argument\n :param start_with_intro: Boolean\n :param rearrange_intro: Boolean\n :return: String\n \"\"\"\n if argument.argument_uid:\n return get_text_for_argument_uid(argument.argument_uid,\n start_with_intro, rearrange_intro=rearrange_intro,\n is_users_opinion=is_users_opinion)\n else:\n return argument.get_conclusion_text()\n\n\ndef resolve_issue_uid_to_slug(uid):\n \"\"\"\n Given the issue uid query database and return the correct slug of the issue.\n\n :param uid: issue_uid\n :type uid: int\n :return: Slug of issue\n :rtype: str\n \"\"\"\n issue = DBDiscussionSession.query(Issue).get(uid)\n return issue.slug if issue else None\n\n\ndef get_all_attacking_arg_uids_from_history(history):\n \"\"\"\n Returns all arguments of the history, which attacked the user\n\n :param history: String\n :return: [Arguments.uid]\n :rtype: list\n \"\"\"\n try:\n splitted_history = history.split('-')\n uids = []\n for part in splitted_history:\n if 'reaction' in part:\n parts = part.split('/')\n pos = parts.index('reaction')\n uids.append(part.split('/')[pos + 3])\n return uids\n except AttributeError:\n return []\n\n\ndef get_user_by_private_or_public_nickname(nickname):\n \"\"\"\n Gets the user by his (public) nickname, based on the option, whether his nickname is public or not\n\n :param nickname: Nickname of the user\n :return: Current user or None\n \"\"\"\n db_user = get_user_by_case_insensitive_nickname(nickname)\n db_public_user = get_user_by_case_insensitive_public_nickname(nickname)\n uid = 0\n if db_user:\n uid = db_user.uid\n elif db_public_user:\n uid = db_public_user.uid\n db_settings = DBDiscussionSession.query(Settings).filter_by(author_uid=uid\n ).first()\n if not db_settings:\n return None\n if db_settings.should_show_public_nickname and db_user:\n return db_user\n elif not db_settings.should_show_public_nickname and db_public_user:\n return db_public_user\n return None\n\n\ndef get_user_by_case_insensitive_nickname(nickname):\n \"\"\"\n Returns user with given nickname\n\n :param nickname: String\n :return: User or None\n \"\"\"\n return DBDiscussionSession.query(User).filter(func.lower(User.nickname) ==\n func.lower(nickname)).first()\n\n\ndef get_user_by_case_insensitive_public_nickname(public_nickname):\n \"\"\"\n Returns user with given public nickname\n\n :param public_nickname: String\n :return: User or None\n \"\"\"\n return DBDiscussionSession.query(User).filter(func.lower(User.\n public_nickname) == func.lower(public_nickname)).first()\n\n\ndef pretty_print_options(message):\n \"\"\"\n Some modifications for pretty printing.\n Use uppercase for first letter in text and a single dot for the end if there isn't one already.\n\n :param message: String\n :return: String\n \"\"\"\n if message[0:1] == '<':\n pos = message.index('>')\n message = message[0:pos + 1] + message[pos + 1:pos + 2].upper(\n ) + message[pos + 2:]\n else:\n message = message[0:1].upper() + message[1:]\n if message[-1] == '>':\n pos = message.rfind('<')\n if message[pos - 1:pos] not in ['.', '?', '!']:\n message = message[0:pos] + '.' + message[pos:]\n elif not message.endswith(tuple(['.', '?', '!'])) and id is not 'now':\n message += '.'\n return message\n\n\ndef create_speechbubble_dict(bubble_type: BubbleTypes, is_markable: bool=\n False, is_author: bool=False, uid: str='', bubble_url: str='', content:\n str='', omit_bubble_url: bool=False, omit_vote_info: bool=False,\n argument_uid: int=None, statement_uid: int=None, is_supportive: bool=\n False, nickname: str='anonymous', lang: str='en', is_users_opinion:\n bool=False, other_author: User=None):\n \"\"\"\n Creates an dictionary which includes every information needed for a bubble.\n\n :param bubble_type: BubbleTypes\n :param is_markable: True if the content itself could be flagged\n :param is_author: True if the current user is author of the content\n :param uid: Identifier for the bubble\n :param bubble_url: URL for the click event of the bubble\n :param content: Text of the bubble\n :param omit_bubble_url: True if the bubble should have a link\n :param omit_vote_info: True if the bubble have the little, grey information text\n :param argument_uid: Argument.uid\n :param statement_uid: Statement.uid\n :param is_supportive: Boolean\n :param nickname: String\n :param omit_bubble_url: Boolean\n :param lang: is_users_opinion\n :param is_users_opinion: Boolean\n :return: dict()\n \"\"\"\n gravatar_link = get_global_url() + '/static/images/icon.png'\n profile = None\n if uid is not 'now':\n content = pretty_print_options(content)\n if bubble_type is BubbleTypes.SYSTEM and other_author is not None:\n gravatar_link = get_profile_picture(other_author, 25)\n profile = '/user/{}'.format(other_author.uid),\n if bubble_type is BubbleTypes.USER and nickname != 'anonymous':\n db_user = DBDiscussionSession.query(User).filter_by(nickname=nickname\n ).first()\n db_marked = None\n gravatar_link = get_profile_picture(db_user, 25)\n if argument_uid is not None and db_user is not None:\n db_marked = DBDiscussionSession.query(MarkedArgument).filter(\n MarkedArgument.argument_uid == argument_uid, MarkedArgument\n .author_uid == db_user.uid).first()\n if statement_uid is not None and db_user is not None:\n db_marked = DBDiscussionSession.query(MarkedStatement).filter(\n MarkedStatement.statement_uid == statement_uid, \n MarkedStatement.author_uid == db_user.uid).first()\n is_users_opinion = db_marked is not None\n speech = {'is_user': bubble_type is BubbleTypes.USER, 'is_system': \n bubble_type is BubbleTypes.SYSTEM, 'is_status': bubble_type is\n BubbleTypes.STATUS, 'is_info': bubble_type is BubbleTypes.INFO,\n 'is_markable': is_markable, 'is_author': is_author, 'id': uid if \n len(str(uid)) > 0 else uuid4().hex, 'bubble_url': bubble_url,\n 'message': content, 'omit_bubble_url': omit_bubble_url,\n 'omit_vote_info': omit_vote_info, 'data_type': 'argument' if\n argument_uid else 'statement' if statement_uid else 'None',\n 'data_argument_uid': argument_uid, 'data_statement_uid':\n statement_uid, 'data_is_supportive': is_supportive,\n 'is_users_opinion': is_users_opinion, 'enemy': {'avatar':\n gravatar_link, 'profile': profile, 'available': profile is not None}}\n votecount_keys = __get_text_for_click_and_mark_count(nickname, \n bubble_type is BubbleTypes.USER, argument_uid, statement_uid,\n speech, lang)\n speech['votecounts_message'] = votecount_keys[speech['votecounts']]\n return speech\n\n\ndef __get_text_for_click_and_mark_count(nickname, is_user, argument_uid,\n statement_uid, speech, lang):\n \"\"\"\n Build text for a bubble, how many other participants have the same interest?\n\n :param nickname: User.nickname\n :param is_user: boolean\n :param argument_uid: Argument.uid\n :param statement_uid: Statement.uid\n :param speech: dict()\n :param lang: ui_locales\n :return: [String]\n \"\"\"\n if not nickname:\n nickname = 'anonymous'\n db_user = DBDiscussionSession.query(User).filter_by(nickname=nickname\n ).first()\n if not db_user:\n db_user = DBDiscussionSession.query(User).filter_by(nickname=\n 'anonymous').first()\n db_clicks, db_marks = __get_clicks_and_marks(argument_uid,\n statement_uid, db_user)\n _t = Translator(lang)\n speech['votecounts'] = len(db_clicks) if db_clicks else 0\n if db_marks:\n speech['votecounts'] += len(db_marks)\n votecount_keys = defaultdict(lambda : '{} {}.'.format(speech[\n 'votecounts'], _t.get(_.voteCountTextMore)))\n if is_user and db_user.gender == 'm':\n gender_key = _.voteCountTextFirstM\n elif is_user and db_user.gender == 'f':\n gender_key = _.voteCountTextFirstF\n else:\n gender_key = _.voteCountTextFirst\n votecount_keys[0] = '{}.'.format(_t.get(gender_key))\n votecount_keys[1] = _t.get(_.voteCountTextOneOther) + '.'\n return votecount_keys\n\n\ndef __get_clicks_and_marks(argument_uid, statement_uid, db_user):\n db_clicks = None\n db_marks = None\n if argument_uid:\n db_clicks = DBDiscussionSession.query(ClickedArgument).filter(\n ClickedArgument.argument_uid == argument_uid, ClickedArgument.\n is_up_vote == True, ClickedArgument.is_valid, ClickedArgument.\n author_uid != db_user.uid).all()\n db_marks = DBDiscussionSession.query(MarkedArgument).filter(\n MarkedArgument.argument_uid == argument_uid, MarkedArgument.\n author_uid != db_user.uid).all()\n elif statement_uid:\n db_clicks = DBDiscussionSession.query(ClickedStatement).filter(\n ClickedStatement.statement_uid == statement_uid, \n ClickedStatement.is_up_vote == True, ClickedStatement.is_valid,\n ClickedStatement.author_uid != db_user.uid).all()\n db_marks = DBDiscussionSession.query(MarkedStatement).filter(\n MarkedStatement.statement_uid == statement_uid, MarkedStatement\n .author_uid != db_user.uid).all()\n return db_clicks, db_marks\n\n\ndef is_argument_disabled_due_to_disabled_statements(argument):\n \"\"\"\n Returns true if any involved statement is disabled.\n\n :param argument: Argument\n :return: Boolean\n \"\"\"\n if argument.conclusion_uid is None:\n db_argument = DBDiscussionSession.query(Argument).get(argument.\n argument_uid)\n conclusion = DBDiscussionSession(Statement).get(db_argument.\n conclusion_uid)\n if conclusion.is_disabled:\n return True\n premises = __get_all_premises_of_argument(db_argument)\n for premise in premises:\n if premise.statements.is_disabled:\n return True\n else:\n print(argument.conclusion_uid)\n conclusion = DBDiscussionSession.query(Statement).get(argument.\n conclusion_uid)\n if conclusion.is_disabled:\n return True\n premises = __get_all_premises_of_argument(argument)\n for premise in premises:\n if premise.statements.is_disabled:\n return True\n return False\n\n\ndef is_author_of_statement(db_user: User, statement_uid: int) ->bool:\n \"\"\"\n Is the user with given nickname author of the statement?\n\n :param db_user: User\n :param statement_uid: Statement.uid\n :return: Boolean\n \"\"\"\n db_user = (db_user if db_user and db_user.nickname !=\n nick_of_anonymous_user else None)\n if not db_user:\n return False\n db_textversion = DBDiscussionSession.query(TextVersion).filter_by(\n statement_uid=statement_uid).order_by(TextVersion.uid.asc()).first()\n if not db_textversion:\n return False\n return db_textversion.author_uid == db_user.uid\n\n\ndef is_author_of_argument(db_user: User, argument_uid: int) ->bool:\n \"\"\"\n Is the user with given nickname author of the argument?\n\n :param db_user: User\n :param argument_uid: Argument.uid\n :return: Boolean\n \"\"\"\n db_user = (db_user if db_user and db_user.nickname !=\n nick_of_anonymous_user else None)\n if not db_user:\n return False\n db_argument = DBDiscussionSession.query(Argument).filter(Argument.uid ==\n argument_uid, Argument.author_uid == db_user.uid).first()\n return True if db_argument else False\n\n\ndef __get_all_premises_of_argument(argument):\n \"\"\"\n Returns list with all premises of the argument.\n\n :param argument: Argument\n :return: list()\n \"\"\"\n ret_list = []\n db_premises = DBDiscussionSession.query(Premise).filter_by(premisegroup_uid\n =argument.premisegroup_uid).join(Statement).all()\n for premise in db_premises:\n ret_list.append(premise)\n return ret_list\n\n\ndef get_profile_picture(user: User, size: int=80, ignore_privacy_settings:\n bool=False):\n \"\"\"\n Returns the url to a https://secure.gravatar.com picture, with the option wavatar and size of 80px\n\n :param user: User\n :param size: Integer, default 80\n :param ignore_privacy_settings:\n :return: String\n \"\"\"\n additional_id = ''\n if user and isinstance(user, User):\n additional_id = ('' if user.settings.should_show_public_nickname or\n ignore_privacy_settings else 'x')\n return __get_gravatar(user, additional_id, size)\n\n\ndef get_public_profile_picture(user: User, size: int=80):\n \"\"\"\n Returns the url to a https://secure.gravatar.com picture, with the option wavatar and size of 80px\n If the user doesn't want an public profile, an anonymous image will be returned\n\n :param user: User\n :param size: Integer, default 80\n :return: String\n \"\"\"\n additional_id = ''\n if user.settings.should_show_public_nickname:\n additional_id = 'x'\n if len(str(user.oauth_provider)) > 0:\n additional_id = '{}{}'.format(user.oauth_provider, user.\n oauth_provider_id)\n return __get_gravatar(user, additional_id, size)\n\n\ndef __get_gravatar(user, additional_id, size):\n if user:\n if str(user.email) == 'None':\n email = (user.nickname + additional_id).encode('utf-8')\n else:\n email = (user.email + additional_id).encode('utf-8')\n else:\n email = 'unknown'.encode('utf-8')\n gravatar_url = 'https://secure.gravatar.com/avatar/{}?'.format(hashlib.\n md5(email.lower()).hexdigest())\n gravatar_url += parse.urlencode({'d': 'wavatar', 's': str(size)})\n return gravatar_url\n\n\ndef get_author_data(uid, gravatar_on_right_side=True,\n linked_with_users_page=True, profile_picture_size=20):\n \"\"\"\n Returns a-tag with gravatar of current author and users page as href\n\n :param uid: Uid of the author\n :param gravatar_on_right_side: True, if the gravatar is on the right of authors name\n :param linked_with_users_page: True, if the text is a link to the authors site\n :param profile_picture_size: Integer\n :return: HTML-String\n \"\"\"\n db_user = DBDiscussionSession.query(User).get(int(uid))\n if not db_user:\n return None, 'Missing author with uid ' + str(uid), False\n nick = db_user.global_nickname\n img_src = get_profile_picture(db_user, profile_picture_size)\n link_begin = ''\n link_end = ''\n if linked_with_users_page:\n link_begin = '<a href=\"/user/{}\" title=\"{}\">'.format(db_user.uid, nick)\n link_end = '</a>'\n side = 'left' if gravatar_on_right_side else 'right'\n img = '<img class=\"img-circle\" src=\"{}\" style=\"padding-{}: 0.3em\">'.format(\n img_src, side)\n if gravatar_on_right_side:\n return db_user, '{}{}{}{}'.format(link_begin, nick, img, link_end\n ), True\n else:\n return db_user, '{}{}{}{}'.format(link_begin, img, nick, link_end\n ), True\n\n\ndef bubbles_already_last_in_list(bubble_list, bubbles):\n \"\"\"\n Are the given bubbles already at the end of the bubble list\n\n :param bubble_list: list of Bubbles\n :param bubbles: list of bubbles\n :return: Boolean\n \"\"\"\n if isinstance(bubbles, list):\n length = len(bubbles)\n else:\n length = 1\n bubbles = [bubbles]\n if len(bubble_list) < length:\n return False\n for bubble in bubbles:\n if 'message' not in bubble:\n return False\n start_index = -length\n is_already_in = False\n for bubble in bubbles:\n last = bubble_list[start_index]\n if 'message' not in last or 'message' not in bubble:\n return False\n text1 = unhtmlify(last['message'].lower()).strip()\n text2 = unhtmlify(bubble['message'].lower()).strip()\n is_already_in = is_already_in or text1 == text2\n start_index += 1\n return is_already_in\n\n\ndef unhtmlify(html):\n \"\"\"\n Remove html-tags and unescape encoded html-entities.\n\n :param html: Evil-string containing html\n :return:\n \"\"\"\n return unescape(re.sub('<.*?>', '', html))\n",
"step-5": "\"\"\"\nCommon, pure functions used by the D-BAS.\n\n\n.. codeauthor:: Tobias Krauthoff <krauthoff@cs.uni-duesseldorf.de\n\"\"\"\nimport hashlib\nimport locale\nimport os\nimport re\nimport warnings\nfrom collections import defaultdict\nfrom datetime import datetime\nfrom enum import Enum, auto\nfrom html import escape, unescape\nfrom typing import List\nfrom urllib import parse\nfrom uuid import uuid4\n\nfrom sqlalchemy import func\n\nfrom dbas.database import DBDiscussionSession\nfrom dbas.database.discussion_model import Argument, Premise, Statement, TextVersion, Issue, User, Settings, \\\n ClickedArgument, ClickedStatement, MarkedArgument, MarkedStatement, PremiseGroup\nfrom dbas.logger import logger\nfrom dbas.strings.keywords import Keywords as _\nfrom dbas.strings.translator import Translator\n\nnick_of_anonymous_user = 'anonymous'\n\nfallback_lang = 'en'\ntag_type = 'span'\nstart_attack = '<{} data-argumentation-type=\"attack\">'.format(tag_type)\nstart_argument = '<{} data-argumentation-type=\"argument\">'.format(tag_type)\nstart_position = '<{} data-argumentation-type=\"position\">'.format(tag_type)\nstart_content = '<{} class=\"triangle-content-text\">'.format(tag_type)\nstart_pro = '<{} data-attitude=\"pro\">'.format(tag_type)\nstart_con = '<{} data-attitude=\"con\">'.format(tag_type)\nstart_tag = '<{}>'.format(tag_type)\nend_tag = '</{}>'.format(tag_type)\n\n\nclass BubbleTypes(Enum):\n USER = auto()\n SYSTEM = auto()\n STATUS = auto()\n INFO = auto()\n\n def __str__(self):\n return str(self.value)\n\n\nclass Relations(Enum):\n UNDERMINE = 'undermine'\n UNDERCUT = 'undercut'\n REBUT = 'rebut'\n SUPPORT = 'support'\n\n def __str__(self):\n return str(self.value)\n\n\nclass Attitudes(Enum):\n AGREE = 'agree'\n DISAGREE = 'disagree'\n DONT_KNOW = 'dontknow'\n\n def __str__(self):\n return str(self.value)\n\n\nrelation_mapper = {relation.value: relation for relation in Relations}\nattitude_mapper = {attitude.value: attitude for attitude in Attitudes}\n\n\ndef get_global_url():\n \"\"\"\n Returns the global url of the project, based on the ENV\n\n :return: String\n \"\"\"\n return os.environ.get('URL', '')\n\n\ndef get_changelog(no):\n \"\"\"\n Returns the 'no' last entries from the changelog\n\n :param no: int\n :return: list\n \"\"\"\n path = str(os.path.realpath(__file__ + '/../../CHANGELOG.md'))\n lines = [line.rstrip('\\n').strip() for line in open(path) if len(line.rstrip('\\n').strip()) > 0]\n changelog = []\n title = ''\n body = []\n for l in lines:\n if l.startswith('#'):\n if len(title) > 0:\n changelog.append({'title': title, 'body': body})\n body = []\n title = l.replace('### ', '')\n else:\n body.append(l.replace('- ', ''))\n\n return changelog[0:no]\n\n\ndef is_development_mode(registry):\n \"\"\"\n Returns true, if mode is set to development in current ini file.\n\n :param registry: request.registry\n :return: Boolean\n \"\"\"\n if 'mode' in registry.settings:\n return registry.settings['mode'].lower() == 'development'\n return False\n\n\ndef usage_of_modern_bubbles(registry):\n \"\"\"\n Returns true, if modern bubbles are set in the current ini file.\n\n :param registry: request.registry\n :return: Boolean\n \"\"\"\n if 'modern_bubbles' in registry.settings:\n return registry.settings['modern_bubbles'].lower() == 'true'\n return False\n\n\ndef usage_of_matomo(registry):\n \"\"\"\n Returns true, if matomo is set in the current ini file.\n\n :param registry: request.registry\n :return: Boolean\n \"\"\"\n if 'mode' in registry.settings:\n return registry.settings['usage_of_matomo'].lower() == 'true'\n return False\n\n\ndef escape_string(text):\n \"\"\"\n Escapes all html special chars.\n\n :param text: string\n :return: html.escape(text)\n \"\"\"\n return escape(text)\n\n\ndef get_discussion_language(matchdict, params, session, current_issue_uid=None):\n \"\"\"\n Returns Language.ui_locales\n CALL AFTER issue_handler.get_id_of_slug(..)!\n\n :param matchdict: matchdict of the current request\n :param params: params of the current request\n :param session: session of the current request\n :param current_issue_uid: uid\n :return:\n \"\"\"\n if not current_issue_uid:\n current_issue = DBDiscussionSession.query(Issue).filter(Issue.is_disabled == False,\n Issue.is_private == False).first()\n current_issue_uid = current_issue.uid if current_issue else None\n\n # first matchdict, then params, then session, afterwards fallback\n issue = matchdict['issue'] if 'issue' in matchdict \\\n else params['issue'] if 'issue' in params \\\n else session['issue'] if 'issue' in session \\\n else current_issue_uid\n\n db_issue = DBDiscussionSession.query(Issue).get(issue)\n\n return db_issue.lang if db_issue else 'en'\n\n\ndef python_datetime_pretty_print(ts, lang):\n \"\"\"\n Pretty print of a locale\n\n :param ts: Timestamp\n :param lang: ui_locales\n :return: String\n \"\"\"\n formatter = '%b. %d.'\n if lang == 'de':\n try:\n locale.setlocale(locale.LC_TIME, 'de_DE.UTF-8')\n formatter = '%d. %b.'\n except locale.Error:\n locale.setlocale(locale.LC_TIME, 'en_US.UTF8')\n\n return datetime.strptime(str(ts), '%Y-%m-%d').strftime(formatter)\n\n\ndef get_all_arguments_by_statement(statement_uid, include_disabled=False):\n \"\"\"\n Returns a list of all arguments where the statement is a conclusion or member of the premisegroup\n\n :param statement_uid: Statement.uid\n :param include_disabled: Boolean\n :return: [Arguments]\n \"\"\"\n logger('DBAS.LIB', 'main {}, include_disabled {}'.format(statement_uid, include_disabled))\n db_arguments = __get_arguments_of_conclusion(statement_uid, include_disabled)\n arg_array = [arg for arg in db_arguments] if db_arguments else []\n\n premises = DBDiscussionSession.query(Premise).filter_by(statement_uid=statement_uid)\n if not include_disabled:\n premises = premises.filter_by(is_disabled=False)\n premises = premises.all()\n\n for premise in premises:\n arg_array += __get_argument_of_premisegroup(premise.premisegroup_uid, include_disabled)\n\n db_undercuts = []\n for arg in arg_array:\n db_undercuts += __get_undercuts_of_argument(arg.uid, include_disabled)\n\n db_undercutted_undercuts = []\n for arg in db_undercuts:\n db_undercutted_undercuts += __get_undercuts_of_argument(arg.uid, include_disabled)\n\n arg_array = list(set(arg_array + db_undercuts + db_undercutted_undercuts))\n\n logger('DBAS.LIB', 'returning arguments {}'.format([arg.uid for arg in arg_array]))\n return arg_array if len(arg_array) > 0 else None\n\n\ndef __get_argument_of_premisegroup(premisegroup_uid, include_disabled):\n \"\"\"\n Returns all arguments with the given premisegroup\n\n :param premisegroup_uid: PremisgGroup.uid\n :param include_disabled: Boolean\n :return: list of Arguments\n \"\"\"\n db_arguments = DBDiscussionSession.query(Argument).filter_by(premisegroup_uid=premisegroup_uid)\n if not include_disabled:\n db_arguments = db_arguments.filter_by(is_disabled=False)\n return db_arguments.all() if db_arguments else []\n\n\ndef __get_undercuts_of_argument(argument_uid, include_disabled):\n \"\"\"\n Returns all undercuts fo the given argument\n\n :param argument_uid: Argument.uid\n :param include_disabled: boolean\n :return: list of Arguments\n \"\"\"\n db_undercuts = DBDiscussionSession.query(Argument).filter_by(argument_uid=argument_uid)\n if not include_disabled:\n db_undercuts = db_undercuts.filter_by(is_disabled=False)\n return db_undercuts.all() if db_undercuts else []\n\n\ndef __get_arguments_of_conclusion(statement_uid, include_disabled):\n \"\"\"\n Returns all arguments, where the statement is set as conclusion\n\n :param statement_uid: Statement.uid\n :param include_disabled: Boolean\n :return: list of arguments\n \"\"\"\n db_arguments = DBDiscussionSession.query(Argument).filter_by(conclusion_uid=statement_uid)\n if not include_disabled:\n db_arguments = db_arguments.filter_by(is_disabled=False)\n return db_arguments.all() if db_arguments else []\n\n\ndef get_all_arguments_with_text_by_statement_id(statement_uid):\n \"\"\"\n Given a statement_uid, it returns all arguments, which use this statement and adds\n the corresponding text to it, which normally appears in the bubbles. The resulting\n text depends on the provided language.\n\n :param statement_uid: uid to a statement, which should be analyzed\n :return: list of dictionaries containing some properties of these arguments\n :rtype: list\n \"\"\"\n logger('DBAS.LIB', 'main ' + str(statement_uid))\n arguments = get_all_arguments_by_statement(statement_uid)\n results = []\n if arguments:\n results = [{'uid': arg.uid, 'text': get_text_for_argument_uid(arg.uid)} for arg in arguments]\n return results\n\n\ndef get_all_arguments_with_text_and_url_by_statement_id(db_statement, urlmanager, color_statement=False,\n is_jump=False):\n \"\"\"\n Given a statement_uid, it returns all arguments, which use this statement and adds\n the corresponding text to it, which normally appears in the bubbles. The resulting\n text depends on the provided language.\n\n :param db_statement: Statement\n :param urlmanager:\n :param color_statement: True, if the statement (specified by the ID) should be colored\n :return: list of dictionaries containing some properties of these arguments\n :rtype: list\n \"\"\"\n logger('DBAS.LIB', 'main ' + str(db_statement.uid))\n arguments = get_all_arguments_by_statement(db_statement.uid)\n uids = [arg.uid for arg in arguments] if arguments else None\n results = list()\n sb = '<{} data-argumentation-type=\"position\">'.format(tag_type) if color_statement else ''\n se = '</{}>'.format(tag_type) if color_statement else ''\n\n if not uids:\n return []\n\n uids.sort()\n for uid in uids:\n statement_text = db_statement.get_text()\n attack_type = 'jump' if is_jump else ''\n argument_text = get_text_for_argument_uid(uid, anonymous_style=True, attack_type=attack_type)\n pos = argument_text.lower().find(statement_text.lower())\n\n argument_text = argument_text[:pos] + sb + argument_text[pos:]\n pos += len(statement_text) + len(sb)\n argument_text = argument_text[:pos] + se + argument_text[pos:]\n\n results.append({\n 'uid': uid,\n 'text': argument_text,\n 'url': urlmanager.get_url_for_jump(uid)\n })\n return results\n\n\ndef get_slug_by_statement_uid(uid):\n \"\"\"\n Returns slug for the given Issue.uid\n\n :param uid: Issue.uid\n :return: String\n \"\"\"\n db_statement = DBDiscussionSession.query(Statement).get(uid)\n return resolve_issue_uid_to_slug(db_statement.issue_uid)\n\n\ndef get_text_for_argument_uid(uid, nickname=None, with_html_tag=False, start_with_intro=False, first_arg_by_user=False,\n user_changed_opinion=False, rearrange_intro=False, colored_position=False,\n attack_type=None, minimize_on_undercut=False, is_users_opinion=True,\n anonymous_style=False, support_counter_argument=False):\n \"\"\"\n Returns current argument as string like \"conclusion, because premise1 and premise2\"\n\n :param uid: Integer\n :param with_html_tag: Boolean\n :param start_with_intro: Boolean\n :param first_arg_by_user: Boolean\n :param user_changed_opinion: Boolean\n :param rearrange_intro: Boolean\n :param colored_position: Boolean\n :param attack_type: String\n :param minimize_on_undercut: Boolean\n :param anonymous_style: Boolean\n :param support_counter_argument: Boolean\n :return: String\n \"\"\"\n logger('DBAS.LIB', 'main {}'.format(uid))\n db_argument = DBDiscussionSession.query(Argument).get(uid)\n if not db_argument:\n return None\n\n lang = db_argument.lang\n _t = Translator(lang)\n premisegroup_by_user = False\n author_uid = None\n db_user = DBDiscussionSession.query(User).filter_by(nickname=str(nickname)).first()\n\n if db_user:\n author_uid = db_user.uid\n pgroup = DBDiscussionSession.query(PremiseGroup).get(db_argument.premisegroup_uid)\n marked_argument = DBDiscussionSession.query(MarkedArgument).filter_by(\n argument_uid=uid,\n author_uid=db_user.uid).first()\n premisegroup_by_user = pgroup.author_uid == db_user.uid or marked_argument is not None\n\n # getting all argument id\n arg_array = [db_argument]\n while db_argument.argument_uid:\n db_argument = DBDiscussionSession.query(Argument).get(db_argument.argument_uid)\n arg_array.append(db_argument)\n\n if attack_type == 'jump':\n return __build_argument_for_jump(arg_array, with_html_tag)\n\n if len(arg_array) == 1:\n # build one argument only\n return __build_single_argument(arg_array[0], rearrange_intro, with_html_tag, colored_position, attack_type, _t,\n start_with_intro, is_users_opinion, anonymous_style, support_counter_argument,\n author_uid)\n\n else:\n # get all pgroups and at last, the conclusion\n return __build_nested_argument(arg_array, first_arg_by_user, user_changed_opinion, with_html_tag,\n start_with_intro, minimize_on_undercut, anonymous_style, premisegroup_by_user,\n _t)\n\n\ndef __build_argument_for_jump(arg_array: List[Argument], with_html_tag):\n \"\"\"\n Build tet for an argument, if we jump to this argument\n\n :param arg_array: [Argument]\n :param with_html_tag: Boolean\n :return: String\n \"\"\"\n tag_premise = ('<' + tag_type + ' data-argumentation-type=\"attack\">') if with_html_tag else ''\n tag_conclusion = ('<' + tag_type + ' data-argumentation-type=\"argument\">') if with_html_tag else ''\n tag_end = ('</' + tag_type + '>') if with_html_tag else ''\n lang = arg_array[0].lang\n _t = Translator(lang)\n\n if len(arg_array) == 1:\n ret_value = __build_val_for_jump(arg_array[0], tag_premise, tag_conclusion, tag_end, _t)\n\n elif len(arg_array) == 2:\n ret_value = __build_val_for_undercut(arg_array, tag_premise, tag_conclusion, tag_end, _t)\n\n else:\n ret_value = __build_val_for_undercutted_undercut(arg_array, tag_premise, tag_conclusion, tag_end, _t)\n\n return ret_value.replace(' ', ' ')\n\n\ndef __build_val_for_jump(db_argument, tag_premise, tag_conclusion, tag_end, _t):\n premises = db_argument.get_premisegroup_text()\n if premises[-1] != '.':\n premises += '.'\n conclusion = db_argument.get_conclusion_text()\n\n because = _t.get(_.because).lower()\n conclusion = tag_conclusion + conclusion + tag_end\n premises = tag_premise + premises + tag_end\n\n intro = (start_con + _t.get(_.isNotRight).lower() + end_tag) if not db_argument.is_supportive else ''\n ret_value = '{} {} {} {}'.format(conclusion, intro, because, premises)\n if _t.get_lang() == 'de':\n intro = _t.get(_.itIsTrueThatAnonymous) if db_argument.is_supportive else _t.get(_.itIsFalseThatAnonymous)\n intro = intro[0:1].upper() + intro[1:]\n intro = (start_pro if db_argument.is_supportive else start_con) + intro + end_tag\n ret_value = '{} {}, {} {}'.format(intro, conclusion, because, premises)\n\n return ret_value\n\n\ndef __build_val_for_undercut(arg_array: List[Argument], tag_premise, tag_conclusion, tag_end, _t):\n db_undercut = arg_array[0]\n db_conclusion_argument = arg_array[1]\n premise = db_undercut.get_premisegroup_text()\n conclusion_premise = db_conclusion_argument.get_premisegroup_text()\n conclusion_conclusion = db_conclusion_argument.get_conclusion_text()\n\n premise = tag_premise + premise + tag_end\n conclusion_premise = tag_conclusion + conclusion_premise + tag_end\n conclusion_conclusion = tag_conclusion + conclusion_conclusion + tag_end\n\n intro = (_t.get(_.statementAbout) + ' ') if _t.get_lang() == 'de' else ''\n bind = start_con + _t.get(_.isNotAGoodReasonFor) + end_tag\n because = _t.get(_.because)\n ret_value = '{}{} {} {}. {} {}.'.format(intro, conclusion_premise, bind, conclusion_conclusion, because, premise)\n\n return ret_value\n\n\ndef __build_val_for_undercutted_undercut(arg_array: List[Argument], tag_premise, tag_conclusion, tag_end, _t):\n premise1 = arg_array[0].get_premisegroup_text()\n premise2 = arg_array[1].get_premisegroup_text()\n premise3 = arg_array[2].get_premisegroup_text()\n conclusion = arg_array[2].get_conclusion_text()\n\n bind = start_con + _t.get(_.isNotAGoodReasonAgainstArgument) + end_tag\n because = _t.get(_.because)\n seperator = ',' if _t.get_lang() == 'de' else ''\n\n premise1 = tag_premise + premise1 + tag_end\n premise2 = tag_conclusion + premise2 + tag_end\n argument = '{}{} {} {}'.format(conclusion, seperator, because.lower(), premise3)\n argument = tag_conclusion + argument + tag_end\n\n # P2 ist kein guter Grund gegen das Argument, dass C weil P3. Weil P1\n ret_value = '{} {} {}. {} {}'.format(premise2, bind, argument, because, premise1)\n return ret_value\n\n\ndef __build_single_argument(db_argument: Argument, rearrange_intro: bool, with_html_tag: bool, colored_position: bool,\n attack_type: str, _t: Translator, start_with_intro: bool, is_users_opinion: bool,\n anonymous_style: bool, support_counter_argument: bool=False, author_uid=None):\n \"\"\"\n Build up argument text for a single argument\n\n Please, do not touch this!\n\n :param uid: Argument.uid\n :param rearrange_intro: Boolean\n :param with_html_tag: Boolean\n :param colored_position: Boolean\n :param attack_type: String\n :param _t: Translator\n :param start_with_intro: Boolean\n :param is_users_opinion: Boolean\n :param anonymous_style: Boolean\n :param support_counter_argument: Boolean\n :param author_uid: User.uid\n :return: String\n \"\"\"\n premises_text = db_argument.get_premisegroup_text()\n conclusion_text = db_argument.get_conclusion_text()\n lang = db_argument.lang\n\n if lang != 'de':\n premises_text = premises_text[0:1].lower() + premises_text[1:] # pretty print\n\n premises_text, conclusion_text, sb, sb_none, se = __get_tags_for_building_single_argument(with_html_tag,\n attack_type,\n colored_position,\n premises_text,\n conclusion_text)\n\n marked_element = False\n if author_uid:\n db_marked = DBDiscussionSession.query(MarkedArgument).filter(MarkedArgument.argument_uid == db_argument.uid,\n MarkedArgument.author_uid == author_uid).first()\n marked_element = db_marked is not None\n\n you_have_the_opinion_that = _t.get(_.youHaveTheOpinionThat).format('').strip()\n\n if lang == 'de':\n ret_value = __build_single_argument_for_de(_t, sb, se, you_have_the_opinion_that, start_with_intro,\n anonymous_style, rearrange_intro, db_argument, attack_type, sb_none,\n marked_element, lang, premises_text, conclusion_text,\n is_users_opinion,\n support_counter_argument)\n else:\n ret_value = __build_single_argument_for_en(_t, sb, se, you_have_the_opinion_that, marked_element,\n conclusion_text,\n premises_text, db_argument)\n return ret_value.replace(' ', ' ')\n\n\ndef __get_tags_for_building_single_argument(with_html_tag, attack_type, colored_position, premises, conclusion):\n sb_none = start_tag if with_html_tag else ''\n se = end_tag if with_html_tag else ''\n if attack_type not in ['dont_know', 'jump']:\n sb = start_tag if with_html_tag else ''\n if colored_position:\n sb = start_position if with_html_tag else ''\n\n if attack_type == Relations.UNDERMINE:\n premises = sb + premises + se\n else:\n conclusion = sb + conclusion + se\n else:\n sb = start_argument if with_html_tag else ''\n sb_tmp = start_attack if with_html_tag else ''\n premises = sb + premises + se\n conclusion = sb_tmp + conclusion + se\n return premises, conclusion, sb, sb_none, se\n\n\ndef __build_single_argument_for_de(_t, sb, se, you_have_the_opinion_that, start_with_intro, anonymous_style,\n rearrange_intro, db_argument, attack_type, sb_none, marked_element, lang,\n premises, conclusion, is_users_opinion, support_counter_argument):\n if start_with_intro and not anonymous_style:\n intro = _t.get(_.itIsTrueThat) if db_argument.is_supportive else _t.get(_.itIsFalseThat)\n if rearrange_intro:\n intro = _t.get(_.itTrueIsThat) if db_argument.is_supportive else _t.get(_.itFalseIsThat)\n\n ret_value = (sb_none if attack_type in ['dont_know'] else sb) + intro + se + ' '\n\n elif is_users_opinion and not anonymous_style:\n ret_value = sb_none\n if support_counter_argument:\n ret_value += _t.get(_.youAgreeWithThecounterargument)\n elif marked_element:\n ret_value += you_have_the_opinion_that\n else:\n ret_value += _t.get(_.youArgue)\n ret_value += se + ' '\n\n else:\n tmp = _t.get(_.itIsTrueThatAnonymous if db_argument.is_supportive else _.itIsFalseThatAnonymous)\n ret_value = sb_none + sb + tmp + se + ' '\n ret_value += ' {}{}{} '.format(sb, _t.get(_.itIsNotRight), se) if not db_argument.is_supportive else ''\n ret_value += conclusion\n ret_value += ', ' if lang == 'de' else ' '\n ret_value += sb_none + _t.get(_.because).lower() + se + ' ' + premises\n return ret_value\n\n\ndef __build_single_argument_for_en(_t, sb, se, you_have_the_opinion_that, marked_element, conclusion, premises, db_arg):\n tmp = sb + ' ' + _t.get(_.isNotRight).lower() + se + ', ' + _t.get(_.because).lower() + ' '\n ret_value = (you_have_the_opinion_that + ' ' if marked_element else '') + conclusion + ' '\n ret_value += _t.get(_.because).lower() if db_arg.is_supportive else tmp\n ret_value += ' ' + premises\n return ret_value\n\n\ndef __build_nested_argument(arg_array: List[Argument], first_arg_by_user, user_changed_opinion, with_html_tag,\n start_with_intro, minimize_on_undercut, anonymous_style, premisegroup_by_user, _t):\n \"\"\"\n\n :param arg_array:\n :param first_arg_by_user:\n :param user_changed_opinion:\n :param with_html_tag:\n :param start_with_intro:\n :param minimize_on_undercut:\n :param anonymous_style:\n :param premisegroup_by_user:\n :param _t:\n :return:\n \"\"\"\n # get all pgroups and at last, the conclusion\n pgroups = []\n supportive = []\n arg_array = arg_array[::-1]\n local_lang = arg_array[0].lang\n\n # grepping all arguments in the chain\n for db_argument in arg_array:\n text = db_argument.get_premisegroup_text()\n\n pgroups.append(text)\n supportive.append(db_argument.is_supportive)\n\n conclusion = arg_array[0].get_conclusion_text()\n\n # html tags for framing\n sb = start_position if with_html_tag else ''\n se = end_tag if with_html_tag else ''\n\n because = (', ' if local_lang == 'de' else ' ') + _t.get(_.because).lower() + ' '\n\n if len(arg_array) % 2 is 0 and not first_arg_by_user and not anonymous_style: # system starts\n ret_value = _t.get(_.earlierYouArguedThat if user_changed_opinion else _.otherUsersSaidThat) + ' '\n tmp_users_opinion = True # user after system\n\n elif not anonymous_style: # user starts\n ret_value = (_t.get(_.soYourOpinionIsThat) + ': ') if start_with_intro else ''\n tmp_users_opinion = False # system after user\n conclusion = se + conclusion[0:1].upper() + conclusion[1:] # pretty print\n\n else:\n ret_value = _t.get(_.someoneArgued) + ' '\n tmp_users_opinion = False\n\n tmp = _t.get(_.itFalseIsThat) + ' ' if not supportive[0] else ''\n ret_value += tmp + conclusion + because + pgroups[0] + '.'\n del pgroups[0]\n\n # just display the last premise group on undercuts, because the story is always saved in all bubbles\n if minimize_on_undercut and not user_changed_opinion and len(pgroups) > 2:\n return _t.get(_.butYouCounteredWith).strip() + ' ' + sb + pgroups[len(pgroups) - 1] + se + '.'\n\n for i, pgroup in enumerate(pgroups):\n ret_value += ' '\n if tmp_users_opinion and not anonymous_style:\n tmp = _.butYouCounteredWithArgument if premisegroup_by_user else _.butYouCounteredWithInterest\n ret_value += _t.get(_.otherParticipantsConvincedYouThat if user_changed_opinion else tmp)\n elif not anonymous_style:\n ret_value += _t.get(_.youAgreeWithThatNow)\n else:\n ret_value += _t.get(_.otherUsersSaidThat) if i == 0 else _t.get(_.thenOtherUsersSaidThat)\n\n ret_value += sb + ' ' + pgroups[i] + '.'\n tmp_users_opinion = not tmp_users_opinion\n\n return ret_value.replace(' ', ' ')\n\n\ndef get_text_for_premisegroup_uid(uid):\n \"\"\"\n Returns joined text of the premise group and the premise ids\n\n :param uid: premisegroup_uid\n :return: text, uids\n \"\"\"\n warnings.warn(\"Use PremiseGroup.get_text() instead.\", DeprecationWarning)\n\n db_premises = DBDiscussionSession.query(Premise).filter_by(premisegroup_uid=uid).join(Statement).all()\n if len(db_premises) == 0:\n return ''\n texts = [premise.get_text() for premise in db_premises]\n lang = DBDiscussionSession.query(Statement).get(db_premises[0].statements.uid).lang\n _t = Translator(lang)\n\n return ' {} '.format(_t.get(_.aand)).join(texts)\n\n\ndef get_text_for_statement_uid(uid: int, colored_position=False):\n \"\"\"\n Returns text of statement with given uid\n\n :param uid: Statement.uid\n :param colored_position: Boolean\n :return: String\n \"\"\"\n warnings.warn(\"Use Statement.get_text() or Statement.get_html() instead.\", DeprecationWarning)\n\n if not isinstance(uid, int):\n return None\n db_statement = DBDiscussionSession.query(Statement).get(uid)\n if not db_statement:\n return None\n\n db_textversion = DBDiscussionSession.query(TextVersion).order_by(TextVersion.uid.desc()).get(\n db_statement.textversion_uid)\n content = db_textversion.content\n\n while content.endswith(('.', '?', '!')):\n content = content[:-1]\n\n sb, se = '', ''\n if colored_position:\n sb = '<{} data-argumentation-type=\"position\">'.format(tag_type)\n se = '</{}>'.format(tag_type)\n\n return sb + content + se\n\n\ndef get_text_for_premise(uid: int, colored_position: bool = False):\n \"\"\"\n Returns text of premise with given uid\n\n :param uid: Statement.uid\n :param colored_position: Boolean\n :return: String\n \"\"\"\n db_premise = DBDiscussionSession.query(Premise).get(uid)\n if db_premise:\n return db_premise.get_text(html=colored_position)\n else:\n return None\n\n\ndef get_text_for_conclusion(argument, start_with_intro=False, rearrange_intro=False, is_users_opinion=True):\n \"\"\"\n Check the arguments conclusion whether it is an statement or an argument and returns the text\n\n :param argument: Argument\n :param start_with_intro: Boolean\n :param rearrange_intro: Boolean\n :return: String\n \"\"\"\n if argument.argument_uid:\n return get_text_for_argument_uid(argument.argument_uid, start_with_intro, rearrange_intro=rearrange_intro,\n is_users_opinion=is_users_opinion)\n else:\n return argument.get_conclusion_text()\n\n\ndef resolve_issue_uid_to_slug(uid):\n \"\"\"\n Given the issue uid query database and return the correct slug of the issue.\n\n :param uid: issue_uid\n :type uid: int\n :return: Slug of issue\n :rtype: str\n \"\"\"\n issue = DBDiscussionSession.query(Issue).get(uid)\n return issue.slug if issue else None\n\n\ndef get_all_attacking_arg_uids_from_history(history):\n \"\"\"\n Returns all arguments of the history, which attacked the user\n\n :param history: String\n :return: [Arguments.uid]\n :rtype: list\n \"\"\"\n try:\n splitted_history = history.split('-')\n uids = []\n for part in splitted_history:\n if 'reaction' in part:\n parts = part.split('/')\n pos = parts.index('reaction')\n uids.append(part.split('/')[pos + 3])\n return uids\n except AttributeError:\n return []\n\n\ndef get_user_by_private_or_public_nickname(nickname):\n \"\"\"\n Gets the user by his (public) nickname, based on the option, whether his nickname is public or not\n\n :param nickname: Nickname of the user\n :return: Current user or None\n \"\"\"\n db_user = get_user_by_case_insensitive_nickname(nickname)\n db_public_user = get_user_by_case_insensitive_public_nickname(nickname)\n uid = 0\n\n if db_user:\n uid = db_user.uid\n elif db_public_user:\n uid = db_public_user.uid\n\n db_settings = DBDiscussionSession.query(Settings).filter_by(author_uid=uid).first()\n\n if not db_settings:\n return None\n\n if db_settings.should_show_public_nickname and db_user:\n return db_user\n elif not db_settings.should_show_public_nickname and db_public_user:\n return db_public_user\n\n return None\n\n\ndef get_user_by_case_insensitive_nickname(nickname):\n \"\"\"\n Returns user with given nickname\n\n :param nickname: String\n :return: User or None\n \"\"\"\n return DBDiscussionSession.query(User).filter(func.lower(User.nickname) == func.lower(nickname)).first()\n\n\ndef get_user_by_case_insensitive_public_nickname(public_nickname):\n \"\"\"\n Returns user with given public nickname\n\n :param public_nickname: String\n :return: User or None\n \"\"\"\n return DBDiscussionSession.query(User).filter(\n func.lower(User.public_nickname) == func.lower(public_nickname)).first()\n\n\ndef pretty_print_options(message):\n \"\"\"\n Some modifications for pretty printing.\n Use uppercase for first letter in text and a single dot for the end if there isn't one already.\n\n :param message: String\n :return: String\n \"\"\"\n\n # check for html\n if message[0:1] == '<':\n pos = message.index('>')\n message = message[0:pos + 1] + message[pos + 1:pos + 2].upper() + message[pos + 2:]\n else:\n message = message[0:1].upper() + message[1:]\n\n # check for html\n if message[-1] == '>':\n pos = message.rfind('<')\n if message[pos - 1:pos] not in ['.', '?', '!']:\n message = message[0:pos] + '.' + message[pos:]\n elif not message.endswith(tuple(['.', '?', '!'])) and id is not 'now':\n message += '.'\n\n return message\n\n\ndef create_speechbubble_dict(bubble_type: BubbleTypes, is_markable: bool=False, is_author: bool=False, uid: str='',\n bubble_url: str= '', content: str= '', omit_bubble_url: bool=False, omit_vote_info: bool=False,\n argument_uid: int=None, statement_uid: int=None, is_supportive: bool=False,\n nickname: str='anonymous', lang: str='en', is_users_opinion: bool=False, other_author: User=None):\n \"\"\"\n Creates an dictionary which includes every information needed for a bubble.\n\n :param bubble_type: BubbleTypes\n :param is_markable: True if the content itself could be flagged\n :param is_author: True if the current user is author of the content\n :param uid: Identifier for the bubble\n :param bubble_url: URL for the click event of the bubble\n :param content: Text of the bubble\n :param omit_bubble_url: True if the bubble should have a link\n :param omit_vote_info: True if the bubble have the little, grey information text\n :param argument_uid: Argument.uid\n :param statement_uid: Statement.uid\n :param is_supportive: Boolean\n :param nickname: String\n :param omit_bubble_url: Boolean\n :param lang: is_users_opinion\n :param is_users_opinion: Boolean\n :return: dict()\n \"\"\"\n gravatar_link = get_global_url() + '/static/images/icon.png'\n profile = None\n\n if uid is not 'now':\n content = pretty_print_options(content)\n\n if bubble_type is BubbleTypes.SYSTEM and other_author is not None:\n gravatar_link = get_profile_picture(other_author, 25)\n profile = '/user/{}'.format(other_author.uid),\n\n # check for users opinion\n if bubble_type is BubbleTypes.USER and nickname != 'anonymous':\n db_user = DBDiscussionSession.query(User).filter_by(nickname=nickname).first()\n db_marked = None\n gravatar_link = get_profile_picture(db_user, 25)\n if argument_uid is not None and db_user is not None:\n db_marked = DBDiscussionSession.query(MarkedArgument).filter(\n MarkedArgument.argument_uid == argument_uid,\n MarkedArgument.author_uid == db_user.uid).first()\n\n if statement_uid is not None and db_user is not None:\n db_marked = DBDiscussionSession.query(MarkedStatement).filter(\n MarkedStatement.statement_uid == statement_uid,\n MarkedStatement.author_uid == db_user.uid).first()\n\n is_users_opinion = db_marked is not None\n\n speech = {\n 'is_user': bubble_type is BubbleTypes.USER,\n 'is_system': bubble_type is BubbleTypes.SYSTEM,\n 'is_status': bubble_type is BubbleTypes.STATUS,\n 'is_info': bubble_type is BubbleTypes.INFO,\n 'is_markable': is_markable,\n 'is_author': is_author,\n 'id': uid if len(str(uid)) > 0 else uuid4().hex,\n 'bubble_url': bubble_url,\n 'message': content,\n 'omit_bubble_url': omit_bubble_url,\n 'omit_vote_info': omit_vote_info,\n 'data_type': 'argument' if argument_uid else 'statement' if statement_uid else 'None',\n 'data_argument_uid': argument_uid,\n 'data_statement_uid': statement_uid,\n 'data_is_supportive': is_supportive,\n 'is_users_opinion': is_users_opinion,\n 'enemy': {\n 'avatar': gravatar_link,\n 'profile': profile,\n 'available': profile is not None\n }\n }\n\n votecount_keys = __get_text_for_click_and_mark_count(nickname, bubble_type is BubbleTypes.USER, argument_uid,\n statement_uid, speech, lang)\n\n speech['votecounts_message'] = votecount_keys[speech['votecounts']]\n\n return speech\n\n\ndef __get_text_for_click_and_mark_count(nickname, is_user, argument_uid, statement_uid, speech, lang):\n \"\"\"\n Build text for a bubble, how many other participants have the same interest?\n\n :param nickname: User.nickname\n :param is_user: boolean\n :param argument_uid: Argument.uid\n :param statement_uid: Statement.uid\n :param speech: dict()\n :param lang: ui_locales\n :return: [String]\n \"\"\"\n\n if not nickname:\n nickname = 'anonymous'\n\n db_user = DBDiscussionSession.query(User).filter_by(nickname=nickname).first()\n if not db_user:\n db_user = DBDiscussionSession.query(User).filter_by(nickname='anonymous').first()\n\n db_clicks, db_marks = __get_clicks_and_marks(argument_uid, statement_uid, db_user)\n\n _t = Translator(lang)\n speech['votecounts'] = len(db_clicks) if db_clicks else 0\n if db_marks:\n speech['votecounts'] += len(db_marks)\n\n votecount_keys = defaultdict(lambda: \"{} {}.\".format(speech['votecounts'], _t.get(_.voteCountTextMore)))\n\n if is_user and db_user.gender == 'm':\n gender_key = _.voteCountTextFirstM\n elif is_user and db_user.gender == 'f':\n gender_key = _.voteCountTextFirstF\n else:\n gender_key = _.voteCountTextFirst\n\n votecount_keys[0] = '{}.'.format(_t.get(gender_key))\n votecount_keys[1] = _t.get(_.voteCountTextOneOther) + '.'\n\n return votecount_keys\n\n\ndef __get_clicks_and_marks(argument_uid, statement_uid, db_user):\n db_clicks = None\n db_marks = None\n if argument_uid:\n db_clicks = DBDiscussionSession.query(ClickedArgument). \\\n filter(ClickedArgument.argument_uid == argument_uid,\n ClickedArgument.is_up_vote == True,\n ClickedArgument.is_valid,\n ClickedArgument.author_uid != db_user.uid).all()\n db_marks = DBDiscussionSession.query(MarkedArgument). \\\n filter(MarkedArgument.argument_uid == argument_uid,\n MarkedArgument.author_uid != db_user.uid).all()\n\n elif statement_uid:\n db_clicks = DBDiscussionSession.query(ClickedStatement). \\\n filter(ClickedStatement.statement_uid == statement_uid,\n ClickedStatement.is_up_vote == True,\n ClickedStatement.is_valid,\n ClickedStatement.author_uid != db_user.uid).all()\n db_marks = DBDiscussionSession.query(MarkedStatement). \\\n filter(MarkedStatement.statement_uid == statement_uid,\n MarkedStatement.author_uid != db_user.uid).all()\n\n return db_clicks, db_marks\n\n\ndef is_argument_disabled_due_to_disabled_statements(argument):\n \"\"\"\n Returns true if any involved statement is disabled.\n\n :param argument: Argument\n :return: Boolean\n \"\"\"\n if argument.conclusion_uid is None:\n # check conclusion of given arguments conclusion\n db_argument = DBDiscussionSession.query(Argument).get(argument.argument_uid)\n conclusion = DBDiscussionSession(Statement).get(db_argument.conclusion_uid)\n if conclusion.is_disabled:\n return True\n # check premisegroup of given arguments conclusion\n premises = __get_all_premises_of_argument(db_argument)\n for premise in premises:\n if premise.statements.is_disabled:\n return True\n else:\n # check conclusion of given argument\n print(argument.conclusion_uid)\n conclusion = DBDiscussionSession.query(Statement).get(argument.conclusion_uid)\n if conclusion.is_disabled:\n return True\n\n # check premisegroup of given argument\n premises = __get_all_premises_of_argument(argument)\n for premise in premises:\n if premise.statements.is_disabled:\n return True\n\n return False\n\n\ndef is_author_of_statement(db_user: User, statement_uid: int) -> bool:\n \"\"\"\n Is the user with given nickname author of the statement?\n\n :param db_user: User\n :param statement_uid: Statement.uid\n :return: Boolean\n \"\"\"\n db_user = db_user if db_user and db_user.nickname != nick_of_anonymous_user else None\n if not db_user:\n return False\n\n db_textversion = DBDiscussionSession.query(TextVersion).filter_by(statement_uid=statement_uid).order_by(\n TextVersion.uid.asc()).first() # TODO #432\n if not db_textversion:\n return False\n return db_textversion.author_uid == db_user.uid\n\n\ndef is_author_of_argument(db_user: User, argument_uid: int) -> bool:\n \"\"\"\n Is the user with given nickname author of the argument?\n\n :param db_user: User\n :param argument_uid: Argument.uid\n :return: Boolean\n \"\"\"\n db_user = db_user if db_user and db_user.nickname != nick_of_anonymous_user else None\n if not db_user:\n return False\n db_argument = DBDiscussionSession.query(Argument).filter(Argument.uid == argument_uid,\n Argument.author_uid == db_user.uid).first()\n return True if db_argument else False\n\n\ndef __get_all_premises_of_argument(argument):\n \"\"\"\n Returns list with all premises of the argument.\n\n :param argument: Argument\n :return: list()\n \"\"\"\n ret_list = []\n db_premises = DBDiscussionSession.query(Premise).filter_by(premisegroup_uid=argument.premisegroup_uid).join(\n Statement).all()\n for premise in db_premises:\n ret_list.append(premise)\n return ret_list\n\n\ndef get_profile_picture(user: User, size: int = 80, ignore_privacy_settings: bool = False):\n \"\"\"\n Returns the url to a https://secure.gravatar.com picture, with the option wavatar and size of 80px\n\n :param user: User\n :param size: Integer, default 80\n :param ignore_privacy_settings:\n :return: String\n \"\"\"\n additional_id = ''\n if user and isinstance(user, User):\n additional_id = '' if user.settings.should_show_public_nickname or ignore_privacy_settings else 'x'\n\n return __get_gravatar(user, additional_id, size)\n\n\ndef get_public_profile_picture(user: User, size: int = 80):\n \"\"\"\n Returns the url to a https://secure.gravatar.com picture, with the option wavatar and size of 80px\n If the user doesn't want an public profile, an anonymous image will be returned\n\n :param user: User\n :param size: Integer, default 80\n :return: String\n \"\"\"\n additional_id = ''\n if user.settings.should_show_public_nickname:\n additional_id = 'x'\n if len(str(user.oauth_provider)) > 0:\n additional_id = '{}{}'.format(user.oauth_provider, user.oauth_provider_id)\n\n return __get_gravatar(user, additional_id, size)\n\n\ndef __get_gravatar(user, additional_id, size):\n if user:\n if str(user.email) == 'None':\n email = (user.nickname + additional_id).encode('utf-8')\n else:\n email = (user.email + additional_id).encode('utf-8')\n else:\n email = 'unknown'.encode('utf-8')\n gravatar_url = 'https://secure.gravatar.com/avatar/{}?'.format(hashlib.md5(email.lower()).hexdigest())\n gravatar_url += parse.urlencode({'d': 'wavatar', 's': str(size)})\n\n return gravatar_url\n\n\ndef get_author_data(uid, gravatar_on_right_side=True, linked_with_users_page=True, profile_picture_size=20):\n \"\"\"\n Returns a-tag with gravatar of current author and users page as href\n\n :param uid: Uid of the author\n :param gravatar_on_right_side: True, if the gravatar is on the right of authors name\n :param linked_with_users_page: True, if the text is a link to the authors site\n :param profile_picture_size: Integer\n :return: HTML-String\n \"\"\"\n db_user = DBDiscussionSession.query(User).get(int(uid))\n if not db_user:\n return None, 'Missing author with uid ' + str(uid), False\n\n nick = db_user.global_nickname\n img_src = get_profile_picture(db_user, profile_picture_size)\n link_begin = ''\n link_end = ''\n if linked_with_users_page:\n link_begin = '<a href=\"/user/{}\" title=\"{}\">'.format(db_user.uid, nick)\n link_end = '</a>'\n\n side = 'left' if gravatar_on_right_side else 'right'\n img = '<img class=\"img-circle\" src=\"{}\" style=\"padding-{}: 0.3em\">'.format(img_src, side)\n\n if gravatar_on_right_side:\n return db_user, '{}{}{}{}'.format(link_begin, nick, img, link_end), True\n else:\n return db_user, '{}{}{}{}'.format(link_begin, img, nick, link_end), True\n\n\ndef bubbles_already_last_in_list(bubble_list, bubbles):\n \"\"\"\n Are the given bubbles already at the end of the bubble list\n\n :param bubble_list: list of Bubbles\n :param bubbles: list of bubbles\n :return: Boolean\n \"\"\"\n if isinstance(bubbles, list):\n length = len(bubbles)\n else:\n length = 1\n bubbles = [bubbles]\n\n if len(bubble_list) < length:\n return False\n\n for bubble in bubbles:\n if 'message' not in bubble:\n return False\n\n start_index = - length\n is_already_in = False\n for bubble in bubbles:\n\n last = bubble_list[start_index]\n if 'message' not in last or 'message' not in bubble:\n return False\n\n text1 = unhtmlify(last['message'].lower()).strip()\n text2 = unhtmlify(bubble['message'].lower()).strip()\n is_already_in = is_already_in or (text1 == text2)\n start_index += 1\n\n return is_already_in\n\n\ndef unhtmlify(html):\n \"\"\"\n Remove html-tags and unescape encoded html-entities.\n\n :param html: Evil-string containing html\n :return:\n \"\"\"\n return unescape(re.sub(r'<.*?>', '', html))\n",
"step-ids": [
29,
31,
47,
55,
60
]
}
|
[
29,
31,
47,
55,
60
] |
import surname_common as sc
from sklearn.utils import shuffle
import glob
import os
import re
import pprint
import pandas as pd
import unicodedata
import string
def unicode_to_ascii(s):
return ''.join(
c for c in unicodedata.normalize('NFD', s)
if unicodedata.category(c) != 'Mn' and c in sc.ALL_LETTERS
)
def load_surnames():
df_surnames = pd.DataFrame()
list_ = []
for filename in glob.glob('data/names/*.txt'):
m = re.match(r'(.*)\/(.*?)\.txt', filename)
category = m.group(2)
df = pd.read_csv(filename,names=['surname'])
df['category'] = category
list_.append(df)
df_surnames = pd.concat(list_)
df_surnames['normalized'] = df_surnames['surname'].apply(lambda x: unicode_to_ascii(x))
series_categories = df_surnames.groupby(['category'])['category'].count()
df_categories = pd.DataFrame({
'category':series_categories.index,
'freq':series_categories.tolist(),
'index':range(0,len(series_categories))
})
return df_surnames, df_categories
def save_df_surnames_as_pickle():
df_surnames, df_categories = load_surnames()
# train test split
df = shuffle(df_surnames, random_state=sc.RANDOM_STATE)
train_cnt = int(df['surname'].count()*sc.TRAIN_TEST_RATIO)
train = df[0:train_cnt]
test = df[train_cnt+1:]
# save as pickle
df_surnames.to_pickle('data/pickles/df_surnames.pickle',compression='bz2')
df_categories.to_pickle('data/pickles/df_categories.pickle',compression='bz2')
train.to_pickle('data/pickles/train.pickle',compression='bz2')
test.to_pickle('data/pickles/test.pickle',compression='bz2')
# train test stat
t1 = train.groupby(['category']).count().drop(['normalized'],axis=1)
t2 = test.groupby(['category']).count().drop(['normalized'],axis=1)
t1.columns = ['surname_train']
t2.columns = ['surname_test']
tt = pd.DataFrame(pd.merge(t1, t2, left_index=True, right_index=True))
tt['ratio'] = tt['surname_train'] / (tt['surname_train'] + tt['surname_test'])
tt.to_pickle('data/pickles/train_test_stat.pickle',compression='bz2')
return tt
|
normal
|
{
"blob_id": "db46fbfb1acd855eebb5c9f557d70038b84e812d",
"index": 8573,
"step-1": "<mask token>\n\n\ndef save_df_surnames_as_pickle():\n df_surnames, df_categories = load_surnames()\n df = shuffle(df_surnames, random_state=sc.RANDOM_STATE)\n train_cnt = int(df['surname'].count() * sc.TRAIN_TEST_RATIO)\n train = df[0:train_cnt]\n test = df[train_cnt + 1:]\n df_surnames.to_pickle('data/pickles/df_surnames.pickle', compression='bz2')\n df_categories.to_pickle('data/pickles/df_categories.pickle',\n compression='bz2')\n train.to_pickle('data/pickles/train.pickle', compression='bz2')\n test.to_pickle('data/pickles/test.pickle', compression='bz2')\n t1 = train.groupby(['category']).count().drop(['normalized'], axis=1)\n t2 = test.groupby(['category']).count().drop(['normalized'], axis=1)\n t1.columns = ['surname_train']\n t2.columns = ['surname_test']\n tt = pd.DataFrame(pd.merge(t1, t2, left_index=True, right_index=True))\n tt['ratio'] = tt['surname_train'] / (tt['surname_train'] + tt[\n 'surname_test'])\n tt.to_pickle('data/pickles/train_test_stat.pickle', compression='bz2')\n return tt\n",
"step-2": "<mask token>\n\n\ndef unicode_to_ascii(s):\n return ''.join(c for c in unicodedata.normalize('NFD', s) if \n unicodedata.category(c) != 'Mn' and c in sc.ALL_LETTERS)\n\n\n<mask token>\n\n\ndef save_df_surnames_as_pickle():\n df_surnames, df_categories = load_surnames()\n df = shuffle(df_surnames, random_state=sc.RANDOM_STATE)\n train_cnt = int(df['surname'].count() * sc.TRAIN_TEST_RATIO)\n train = df[0:train_cnt]\n test = df[train_cnt + 1:]\n df_surnames.to_pickle('data/pickles/df_surnames.pickle', compression='bz2')\n df_categories.to_pickle('data/pickles/df_categories.pickle',\n compression='bz2')\n train.to_pickle('data/pickles/train.pickle', compression='bz2')\n test.to_pickle('data/pickles/test.pickle', compression='bz2')\n t1 = train.groupby(['category']).count().drop(['normalized'], axis=1)\n t2 = test.groupby(['category']).count().drop(['normalized'], axis=1)\n t1.columns = ['surname_train']\n t2.columns = ['surname_test']\n tt = pd.DataFrame(pd.merge(t1, t2, left_index=True, right_index=True))\n tt['ratio'] = tt['surname_train'] / (tt['surname_train'] + tt[\n 'surname_test'])\n tt.to_pickle('data/pickles/train_test_stat.pickle', compression='bz2')\n return tt\n",
"step-3": "<mask token>\n\n\ndef unicode_to_ascii(s):\n return ''.join(c for c in unicodedata.normalize('NFD', s) if \n unicodedata.category(c) != 'Mn' and c in sc.ALL_LETTERS)\n\n\ndef load_surnames():\n df_surnames = pd.DataFrame()\n list_ = []\n for filename in glob.glob('data/names/*.txt'):\n m = re.match('(.*)\\\\/(.*?)\\\\.txt', filename)\n category = m.group(2)\n df = pd.read_csv(filename, names=['surname'])\n df['category'] = category\n list_.append(df)\n df_surnames = pd.concat(list_)\n df_surnames['normalized'] = df_surnames['surname'].apply(lambda x:\n unicode_to_ascii(x))\n series_categories = df_surnames.groupby(['category'])['category'].count()\n df_categories = pd.DataFrame({'category': series_categories.index,\n 'freq': series_categories.tolist(), 'index': range(0, len(\n series_categories))})\n return df_surnames, df_categories\n\n\ndef save_df_surnames_as_pickle():\n df_surnames, df_categories = load_surnames()\n df = shuffle(df_surnames, random_state=sc.RANDOM_STATE)\n train_cnt = int(df['surname'].count() * sc.TRAIN_TEST_RATIO)\n train = df[0:train_cnt]\n test = df[train_cnt + 1:]\n df_surnames.to_pickle('data/pickles/df_surnames.pickle', compression='bz2')\n df_categories.to_pickle('data/pickles/df_categories.pickle',\n compression='bz2')\n train.to_pickle('data/pickles/train.pickle', compression='bz2')\n test.to_pickle('data/pickles/test.pickle', compression='bz2')\n t1 = train.groupby(['category']).count().drop(['normalized'], axis=1)\n t2 = test.groupby(['category']).count().drop(['normalized'], axis=1)\n t1.columns = ['surname_train']\n t2.columns = ['surname_test']\n tt = pd.DataFrame(pd.merge(t1, t2, left_index=True, right_index=True))\n tt['ratio'] = tt['surname_train'] / (tt['surname_train'] + tt[\n 'surname_test'])\n tt.to_pickle('data/pickles/train_test_stat.pickle', compression='bz2')\n return tt\n",
"step-4": "import surname_common as sc\nfrom sklearn.utils import shuffle\nimport glob\nimport os\nimport re\nimport pprint\nimport pandas as pd\nimport unicodedata\nimport string\n\n\ndef unicode_to_ascii(s):\n return ''.join(c for c in unicodedata.normalize('NFD', s) if \n unicodedata.category(c) != 'Mn' and c in sc.ALL_LETTERS)\n\n\ndef load_surnames():\n df_surnames = pd.DataFrame()\n list_ = []\n for filename in glob.glob('data/names/*.txt'):\n m = re.match('(.*)\\\\/(.*?)\\\\.txt', filename)\n category = m.group(2)\n df = pd.read_csv(filename, names=['surname'])\n df['category'] = category\n list_.append(df)\n df_surnames = pd.concat(list_)\n df_surnames['normalized'] = df_surnames['surname'].apply(lambda x:\n unicode_to_ascii(x))\n series_categories = df_surnames.groupby(['category'])['category'].count()\n df_categories = pd.DataFrame({'category': series_categories.index,\n 'freq': series_categories.tolist(), 'index': range(0, len(\n series_categories))})\n return df_surnames, df_categories\n\n\ndef save_df_surnames_as_pickle():\n df_surnames, df_categories = load_surnames()\n df = shuffle(df_surnames, random_state=sc.RANDOM_STATE)\n train_cnt = int(df['surname'].count() * sc.TRAIN_TEST_RATIO)\n train = df[0:train_cnt]\n test = df[train_cnt + 1:]\n df_surnames.to_pickle('data/pickles/df_surnames.pickle', compression='bz2')\n df_categories.to_pickle('data/pickles/df_categories.pickle',\n compression='bz2')\n train.to_pickle('data/pickles/train.pickle', compression='bz2')\n test.to_pickle('data/pickles/test.pickle', compression='bz2')\n t1 = train.groupby(['category']).count().drop(['normalized'], axis=1)\n t2 = test.groupby(['category']).count().drop(['normalized'], axis=1)\n t1.columns = ['surname_train']\n t2.columns = ['surname_test']\n tt = pd.DataFrame(pd.merge(t1, t2, left_index=True, right_index=True))\n tt['ratio'] = tt['surname_train'] / (tt['surname_train'] + tt[\n 'surname_test'])\n tt.to_pickle('data/pickles/train_test_stat.pickle', compression='bz2')\n return tt\n",
"step-5": "import surname_common as sc\nfrom sklearn.utils import shuffle\nimport glob\nimport os\nimport re\nimport pprint\nimport pandas as pd\nimport unicodedata\nimport string\n\n\ndef unicode_to_ascii(s):\n return ''.join(\n c for c in unicodedata.normalize('NFD', s)\n if unicodedata.category(c) != 'Mn' and c in sc.ALL_LETTERS\n )\n\ndef load_surnames():\n df_surnames = pd.DataFrame()\n list_ = []\n\n for filename in glob.glob('data/names/*.txt'):\n m = re.match(r'(.*)\\/(.*?)\\.txt', filename)\n category = m.group(2)\n df = pd.read_csv(filename,names=['surname'])\n df['category'] = category\n list_.append(df)\n df_surnames = pd.concat(list_) \n df_surnames['normalized'] = df_surnames['surname'].apply(lambda x: unicode_to_ascii(x))\n \n series_categories = df_surnames.groupby(['category'])['category'].count()\n df_categories = pd.DataFrame({\n 'category':series_categories.index, \n 'freq':series_categories.tolist(), \n 'index':range(0,len(series_categories))\n })\n \n return df_surnames, df_categories\n\ndef save_df_surnames_as_pickle():\n df_surnames, df_categories = load_surnames()\n # train test split\n df = shuffle(df_surnames, random_state=sc.RANDOM_STATE)\n train_cnt = int(df['surname'].count()*sc.TRAIN_TEST_RATIO)\n train = df[0:train_cnt]\n test = df[train_cnt+1:]\n # save as pickle\n df_surnames.to_pickle('data/pickles/df_surnames.pickle',compression='bz2')\n df_categories.to_pickle('data/pickles/df_categories.pickle',compression='bz2')\n train.to_pickle('data/pickles/train.pickle',compression='bz2')\n test.to_pickle('data/pickles/test.pickle',compression='bz2')\n # train test stat \n t1 = train.groupby(['category']).count().drop(['normalized'],axis=1)\n t2 = test.groupby(['category']).count().drop(['normalized'],axis=1)\n t1.columns = ['surname_train']\n t2.columns = ['surname_test']\n tt = pd.DataFrame(pd.merge(t1, t2, left_index=True, right_index=True))\n tt['ratio'] = tt['surname_train'] / (tt['surname_train'] + tt['surname_test'])\n tt.to_pickle('data/pickles/train_test_stat.pickle',compression='bz2')\n return tt",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
from django.db import models
# Create your models here.
class GeneralInformation(models.Model):
name = models.CharField(max_length=100)
address = models.TextField()
city = models.CharField(max_length=20)
class Meta:
ordering = ['name']
def __str__(self):
return "{} {} {}".format(self.name, self.address, self.city)
|
normal
|
{
"blob_id": "d0f83e3b7eb5e1bc81a56e46043f394757437af8",
"index": 5504,
"step-1": "<mask token>\n\n\nclass GeneralInformation(models.Model):\n <mask token>\n <mask token>\n <mask token>\n\n\n class Meta:\n ordering = ['name']\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass GeneralInformation(models.Model):\n <mask token>\n <mask token>\n <mask token>\n\n\n class Meta:\n ordering = ['name']\n\n def __str__(self):\n return '{} {} {}'.format(self.name, self.address, self.city)\n",
"step-3": "<mask token>\n\n\nclass GeneralInformation(models.Model):\n name = models.CharField(max_length=100)\n address = models.TextField()\n city = models.CharField(max_length=20)\n\n\n class Meta:\n ordering = ['name']\n\n def __str__(self):\n return '{} {} {}'.format(self.name, self.address, self.city)\n",
"step-4": "from django.db import models\n\n\nclass GeneralInformation(models.Model):\n name = models.CharField(max_length=100)\n address = models.TextField()\n city = models.CharField(max_length=20)\n\n\n class Meta:\n ordering = ['name']\n\n def __str__(self):\n return '{} {} {}'.format(self.name, self.address, self.city)\n",
"step-5": "from django.db import models\n\n\n# Create your models here.\n\nclass GeneralInformation(models.Model):\n name = models.CharField(max_length=100)\n address = models.TextField()\n city = models.CharField(max_length=20)\n\n class Meta:\n ordering = ['name']\n\n def __str__(self):\n return \"{} {} {}\".format(self.name, self.address, self.city)\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
import datetime
import matplotlib.pyplot as plt
import numpy as np
import statsmodels.api as sm
import xlrd
from pandas import *
from xlrd import xldate
#since I messed up when first scraping the data, I have the dates and viewcounts in separate files
#need to create a dictionary of 'author-title':[viewcount, date]
viewcount_dict = {}
#to get the viewcount
workbook = xlrd.open_workbook('ted_info.xlsx')
worksheet = workbook.sheet_by_name('Sheet1')
num_rows = worksheet.nrows - 1
num_cells = worksheet.ncols - 1
curr_row = 0
while curr_row < num_rows:
curr_row += 1
row = worksheet.row(curr_row)
print 'Row:', curr_row
author_name = worksheet.cell_value(curr_row, 0)
talk_title = worksheet.cell_value(curr_row, 3)
viewcount = worksheet.cell_value(curr_row, 5)
if author_name + ":" + talk_title in viewcount_dict:
print author_name + ":" + talk_title
raise "error in datafile, there is a duplicate"
viewcount_dict[author_name + ":" + talk_title] = [viewcount]
#the following prints each cell value and cell type
#curr_cell = -1
#while curr_cell < num_cells:
#curr_cell += 1
# Cell Types: 0=Empty, 1=Text, 2=Number, 3=Date, 4=Boolean, 5=Error, 6=Blank
#cell_type = worksheet.cell_type(curr_row, curr_cell)
#cell_value = worksheet.cell_value(curr_row, curr_cell)
#print ' ', cell_type, ':', cell_value
#to get the year
workbook = xlrd.open_workbook('ted_info_name_title_date.xlsx')
worksheet = workbook.sheet_by_name('Sheet1')
num_rows = worksheet.nrows - 1
num_cells = worksheet.ncols - 1
curr_row = 0
while curr_row < num_rows:
curr_row += 1
row = worksheet.row(curr_row)
author_name = worksheet.cell_value(curr_row, 0)
talk_title = worksheet.cell_value(curr_row, 1)
date = worksheet.cell_value(curr_row, 2)
date_as_datetime = xldate.xldate_as_tuple(date, workbook.datemode)
year, month, day, hour, minute, second = date_as_datetime
print year
try:
viewcount_dict[author_name + ":" + talk_title].append(year)
except:
#author/title not in dictionary (because it was one of the weirdly formatted pages)
print row
continue
print len(viewcount_dict)
year_viewcount_dict = {}
for year in range(2006,2016):
#create a dictionary for each year due to the input of the violin plot
year_viewcount_dict[year] = {}
year_viewcount_dict["All"] = {} #also have one that includes all years
for key, value in viewcount_dict.iteritems():
#print value
try:
year = value[1]
except:
continue
#this means that it did not have a year, likely because that author/talk was not in the date file
viewcount = value[0]
year_viewcount_dict[year][len(year_viewcount_dict[value[1]])] = viewcount
year_viewcount_dict["All"][len(year_viewcount_dict[value[1]])] = viewcount
list_of_counts = [Series(year_viewcount_dict[year]) for year in ["All"] + range(2006,2016)] #turn into data type required for violinplot
labels = ["All"] + [str(year) for year in range(2006, 2016)] #note that they started in June of 2006 and that this data only invludes up to april 2015
plt.rcParams['figure.subplot.bottom'] = 0.23 # keep labels visible
fig = plt.figure()
ax = fig.add_subplot(111)
sm.graphics.violinplot(list_of_counts, ax=ax, labels=labels,
plot_opts={'cutoff_val':5, 'cutoff_type':'abs',
'label_fontsize':'small'})
ax.set_xlabel("Year")
ax.set_yscale("log") #set to log scale because the range of viewcounts
ax.set_ylabel("Viewcount of talks (log scale)")
#plt.show()
plt.savefig('violinplot_viewcounts.png', bbox_inches='tight')
|
normal
|
{
"blob_id": "6ece524c82521b175cc7791e22c8249dd24dc714",
"index": 2281,
"step-1": "import datetime\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport statsmodels.api as sm\nimport xlrd\nfrom pandas import *\nfrom xlrd import xldate\n\n\n#since I messed up when first scraping the data, I have the dates and viewcounts in separate files\n\n#need to create a dictionary of 'author-title':[viewcount, date]\nviewcount_dict = {}\n\n\n#to get the viewcount\nworkbook = xlrd.open_workbook('ted_info.xlsx')\nworksheet = workbook.sheet_by_name('Sheet1')\nnum_rows = worksheet.nrows - 1\nnum_cells = worksheet.ncols - 1\ncurr_row = 0\nwhile curr_row < num_rows:\n curr_row += 1\n row = worksheet.row(curr_row)\n print 'Row:', curr_row\n\n author_name = worksheet.cell_value(curr_row, 0)\n talk_title = worksheet.cell_value(curr_row, 3)\n viewcount = worksheet.cell_value(curr_row, 5)\n\n if author_name + \":\" + talk_title in viewcount_dict:\n print author_name + \":\" + talk_title\n raise \"error in datafile, there is a duplicate\"\n\n viewcount_dict[author_name + \":\" + talk_title] = [viewcount]\n\n #the following prints each cell value and cell type\n #curr_cell = -1\n #while curr_cell < num_cells:\n #curr_cell += 1\n # Cell Types: 0=Empty, 1=Text, 2=Number, 3=Date, 4=Boolean, 5=Error, 6=Blank\n #cell_type = worksheet.cell_type(curr_row, curr_cell)\n #cell_value = worksheet.cell_value(curr_row, curr_cell)\n #print ' ', cell_type, ':', cell_value\n\n\n#to get the year\nworkbook = xlrd.open_workbook('ted_info_name_title_date.xlsx')\nworksheet = workbook.sheet_by_name('Sheet1')\nnum_rows = worksheet.nrows - 1\nnum_cells = worksheet.ncols - 1\ncurr_row = 0\nwhile curr_row < num_rows:\n curr_row += 1\n row = worksheet.row(curr_row)\n\n author_name = worksheet.cell_value(curr_row, 0)\n talk_title = worksheet.cell_value(curr_row, 1)\n date = worksheet.cell_value(curr_row, 2)\n date_as_datetime = xldate.xldate_as_tuple(date, workbook.datemode)\n year, month, day, hour, minute, second = date_as_datetime\n print year\n\n try:\n viewcount_dict[author_name + \":\" + talk_title].append(year)\n except:\n #author/title not in dictionary (because it was one of the weirdly formatted pages)\n print row\n continue\n\n\nprint len(viewcount_dict)\n\n\nyear_viewcount_dict = {}\nfor year in range(2006,2016):\n #create a dictionary for each year due to the input of the violin plot \n year_viewcount_dict[year] = {}\nyear_viewcount_dict[\"All\"] = {} #also have one that includes all years\n\nfor key, value in viewcount_dict.iteritems():\n #print value\n try:\n year = value[1]\n except:\n continue\n #this means that it did not have a year, likely because that author/talk was not in the date file\n viewcount = value[0]\n year_viewcount_dict[year][len(year_viewcount_dict[value[1]])] = viewcount\n year_viewcount_dict[\"All\"][len(year_viewcount_dict[value[1]])] = viewcount\n\nlist_of_counts = [Series(year_viewcount_dict[year]) for year in [\"All\"] + range(2006,2016)] #turn into data type required for violinplot\n\n\nlabels = [\"All\"] + [str(year) for year in range(2006, 2016)] #note that they started in June of 2006 and that this data only invludes up to april 2015\nplt.rcParams['figure.subplot.bottom'] = 0.23 # keep labels visible\nfig = plt.figure()\nax = fig.add_subplot(111)\nsm.graphics.violinplot(list_of_counts, ax=ax, labels=labels,\n plot_opts={'cutoff_val':5, 'cutoff_type':'abs',\n 'label_fontsize':'small'})\nax.set_xlabel(\"Year\")\nax.set_yscale(\"log\") #set to log scale because the range of viewcounts\nax.set_ylabel(\"Viewcount of talks (log scale)\")\n\n#plt.show()\nplt.savefig('violinplot_viewcounts.png', bbox_inches='tight')\n\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
from .ros_publisher import *
|
normal
|
{
"blob_id": "6e7cca4f766ca89d2e2f82a73f22742b0e8f92a8",
"index": 5870,
"step-1": "<mask token>\n",
"step-2": "from .ros_publisher import *\n",
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0,
1
]
}
|
[
0,
1
] |
from django.shortcuts import render
from rest_framework.response import Response
from rest_framework import status
from rest_framework.decorators import api_view
from rest_framework.permissions import IsAuthenticated
from .models import Flight, Passenger, Reservation
from .serializers import FlightSerializer, PassengerSerializer, ReservationSerializer
from rest_framework.generics import ListCreateAPIView, RetrieveUpdateDestroyAPIView
# Function Based Views Below
@api_view(['GET'])
def find_flight(request):
bodyData = request.data
req_flight = Flight.objects.filter(
departureCity = bodyData['departureCity'],
arrivalCity = bodyData['arrivalCity'],
dateOfDeparture = bodyData['dateOfDeparture']
)
serialized_flight = FlightSerializer(req_flight, many=True)
return Response(serialized_flight.data)
@api_view(['POST'])
def save_reservation(request):
bodyData = request.data
req_flight = Flight.objects.get(id= bodyData['flightID'])
req_passenger = Passenger()
req_passenger.firstName = bodyData['firstName']
req_passenger.lastName = bodyData['lastName']
req_passenger.middlename = bodyData['middleName']
req_passenger.email = bodyData['email']
req_passenger.phone = bodyData['phone']
req_passenger.save()
req_reservation = Reservation()
req_reservation.flight = req_flight
req_reservation.passenger = req_passenger
req_reservation.save()
return Response(status=status.HTTP_201_CREATED)
# Non Primary based Operations Below
class ListFlight(ListCreateAPIView):
queryset = Flight.objects.all()
serializer_class = FlightSerializer
permission_classes = [IsAuthenticated]
class ListPassengers(ListCreateAPIView):
queryset = Passenger.objects.all()
serializer_class = PassengerSerializer
class ListReservation(ListCreateAPIView):
queryset = Reservation.objects.all()
serializer_class = ReservationSerializer
# Primary Key based Operation Below
class DetailedFlight(RetrieveUpdateDestroyAPIView):
queryset = Flight.objects.all()
serializer_class = FlightSerializer
permission_classes = [IsAuthenticated]
class DetailedPassenger(RetrieveUpdateDestroyAPIView):
queryset = Passenger.objects.all()
serializer_class = PassengerSerializer
class Detailedreservation(RetrieveUpdateDestroyAPIView):
queryset = Reservation.objects.all()
serializer_class = ReservationSerializer
|
normal
|
{
"blob_id": "d437d77d5a57a6f2f4a2d530be05c3845dce93bc",
"index": 1459,
"step-1": "<mask token>\n\n\nclass Detailedreservation(RetrieveUpdateDestroyAPIView):\n <mask token>\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass ListReservation(ListCreateAPIView):\n <mask token>\n <mask token>\n\n\nclass DetailedFlight(RetrieveUpdateDestroyAPIView):\n queryset = Flight.objects.all()\n serializer_class = FlightSerializer\n permission_classes = [IsAuthenticated]\n\n\nclass DetailedPassenger(RetrieveUpdateDestroyAPIView):\n queryset = Passenger.objects.all()\n serializer_class = PassengerSerializer\n\n\nclass Detailedreservation(RetrieveUpdateDestroyAPIView):\n queryset = Reservation.objects.all()\n serializer_class = ReservationSerializer\n",
"step-3": "<mask token>\n\n\n@api_view(['POST'])\ndef save_reservation(request):\n bodyData = request.data\n req_flight = Flight.objects.get(id=bodyData['flightID'])\n req_passenger = Passenger()\n req_passenger.firstName = bodyData['firstName']\n req_passenger.lastName = bodyData['lastName']\n req_passenger.middlename = bodyData['middleName']\n req_passenger.email = bodyData['email']\n req_passenger.phone = bodyData['phone']\n req_passenger.save()\n req_reservation = Reservation()\n req_reservation.flight = req_flight\n req_reservation.passenger = req_passenger\n req_reservation.save()\n return Response(status=status.HTTP_201_CREATED)\n\n\nclass ListFlight(ListCreateAPIView):\n queryset = Flight.objects.all()\n serializer_class = FlightSerializer\n permission_classes = [IsAuthenticated]\n\n\nclass ListPassengers(ListCreateAPIView):\n queryset = Passenger.objects.all()\n serializer_class = PassengerSerializer\n\n\nclass ListReservation(ListCreateAPIView):\n queryset = Reservation.objects.all()\n serializer_class = ReservationSerializer\n\n\nclass DetailedFlight(RetrieveUpdateDestroyAPIView):\n queryset = Flight.objects.all()\n serializer_class = FlightSerializer\n permission_classes = [IsAuthenticated]\n\n\nclass DetailedPassenger(RetrieveUpdateDestroyAPIView):\n queryset = Passenger.objects.all()\n serializer_class = PassengerSerializer\n\n\nclass Detailedreservation(RetrieveUpdateDestroyAPIView):\n queryset = Reservation.objects.all()\n serializer_class = ReservationSerializer\n",
"step-4": "<mask token>\n\n\n@api_view(['GET'])\ndef find_flight(request):\n bodyData = request.data\n req_flight = Flight.objects.filter(departureCity=bodyData[\n 'departureCity'], arrivalCity=bodyData['arrivalCity'],\n dateOfDeparture=bodyData['dateOfDeparture'])\n serialized_flight = FlightSerializer(req_flight, many=True)\n return Response(serialized_flight.data)\n\n\n@api_view(['POST'])\ndef save_reservation(request):\n bodyData = request.data\n req_flight = Flight.objects.get(id=bodyData['flightID'])\n req_passenger = Passenger()\n req_passenger.firstName = bodyData['firstName']\n req_passenger.lastName = bodyData['lastName']\n req_passenger.middlename = bodyData['middleName']\n req_passenger.email = bodyData['email']\n req_passenger.phone = bodyData['phone']\n req_passenger.save()\n req_reservation = Reservation()\n req_reservation.flight = req_flight\n req_reservation.passenger = req_passenger\n req_reservation.save()\n return Response(status=status.HTTP_201_CREATED)\n\n\nclass ListFlight(ListCreateAPIView):\n queryset = Flight.objects.all()\n serializer_class = FlightSerializer\n permission_classes = [IsAuthenticated]\n\n\nclass ListPassengers(ListCreateAPIView):\n queryset = Passenger.objects.all()\n serializer_class = PassengerSerializer\n\n\nclass ListReservation(ListCreateAPIView):\n queryset = Reservation.objects.all()\n serializer_class = ReservationSerializer\n\n\nclass DetailedFlight(RetrieveUpdateDestroyAPIView):\n queryset = Flight.objects.all()\n serializer_class = FlightSerializer\n permission_classes = [IsAuthenticated]\n\n\nclass DetailedPassenger(RetrieveUpdateDestroyAPIView):\n queryset = Passenger.objects.all()\n serializer_class = PassengerSerializer\n\n\nclass Detailedreservation(RetrieveUpdateDestroyAPIView):\n queryset = Reservation.objects.all()\n serializer_class = ReservationSerializer\n",
"step-5": "from django.shortcuts import render\nfrom rest_framework.response import Response\nfrom rest_framework import status\nfrom rest_framework.decorators import api_view\nfrom rest_framework.permissions import IsAuthenticated\nfrom .models import Flight, Passenger, Reservation\nfrom .serializers import FlightSerializer, PassengerSerializer, ReservationSerializer\nfrom rest_framework.generics import ListCreateAPIView, RetrieveUpdateDestroyAPIView\n\n# Function Based Views Below\n\n@api_view(['GET'])\ndef find_flight(request):\n bodyData = request.data\n req_flight = Flight.objects.filter(\n departureCity = bodyData['departureCity'],\n arrivalCity = bodyData['arrivalCity'],\n dateOfDeparture = bodyData['dateOfDeparture']\n )\n serialized_flight = FlightSerializer(req_flight, many=True)\n return Response(serialized_flight.data)\n\n\n@api_view(['POST'])\ndef save_reservation(request):\n bodyData = request.data\n req_flight = Flight.objects.get(id= bodyData['flightID'])\n\n req_passenger = Passenger()\n req_passenger.firstName = bodyData['firstName']\n req_passenger.lastName = bodyData['lastName']\n req_passenger.middlename = bodyData['middleName']\n req_passenger.email = bodyData['email']\n req_passenger.phone = bodyData['phone']\n req_passenger.save()\n\n req_reservation = Reservation()\n req_reservation.flight = req_flight\n req_reservation.passenger = req_passenger\n req_reservation.save()\n\n return Response(status=status.HTTP_201_CREATED)\n\n\n# Non Primary based Operations Below\n\nclass ListFlight(ListCreateAPIView):\n queryset = Flight.objects.all()\n serializer_class = FlightSerializer\n permission_classes = [IsAuthenticated]\n\nclass ListPassengers(ListCreateAPIView):\n queryset = Passenger.objects.all()\n serializer_class = PassengerSerializer\n\nclass ListReservation(ListCreateAPIView):\n queryset = Reservation.objects.all()\n serializer_class = ReservationSerializer\n\n\n# Primary Key based Operation Below \n\n\nclass DetailedFlight(RetrieveUpdateDestroyAPIView):\n queryset = Flight.objects.all()\n serializer_class = FlightSerializer\n permission_classes = [IsAuthenticated]\n\nclass DetailedPassenger(RetrieveUpdateDestroyAPIView):\n queryset = Passenger.objects.all()\n serializer_class = PassengerSerializer\n\nclass Detailedreservation(RetrieveUpdateDestroyAPIView):\n queryset = Reservation.objects.all()\n serializer_class = ReservationSerializer",
"step-ids": [
1,
7,
13,
14,
16
]
}
|
[
1,
7,
13,
14,
16
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
panel.pack()
root.mainloop()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
root = Tk()
photo = PhotoImage(file='flag.png')
panel = Label(root, image=photo)
panel.pack()
root.mainloop()
<|reserved_special_token_1|>
from tkinter import *
root = Tk()
photo = PhotoImage(file='flag.png')
panel = Label(root, image=photo)
panel.pack()
root.mainloop()
|
flexible
|
{
"blob_id": "2d192963bfe046bce1a0c82e0179380693f5c541",
"index": 9518,
"step-1": "<mask token>\n",
"step-2": "<mask token>\npanel.pack()\nroot.mainloop()\n",
"step-3": "<mask token>\nroot = Tk()\nphoto = PhotoImage(file='flag.png')\npanel = Label(root, image=photo)\npanel.pack()\nroot.mainloop()\n",
"step-4": "from tkinter import *\nroot = Tk()\nphoto = PhotoImage(file='flag.png')\npanel = Label(root, image=photo)\npanel.pack()\nroot.mainloop()\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
import numpy as np
import string
import networkx as nx
import matplotlib.pyplot as plt
def PlotUndirectedGraph(A,color):
NodesNames = list(string.ascii_uppercase);
NNodes = A.shape[0]
G = nx.DiGraph()
for i in range(NNodes):
G.add_node(NodesNames[i])
for i in range(NNodes):
for j in range(i+1,NNodes):
if A[i,j] != 0:
G.add_edge(NodesNames[i],NodesNames[j],weight=A[i,j])
pos = nx.spring_layout(G)
edge_labels=dict([((u,v,),d['weight'])
for u,v,d in G.edges(data=True)])
if len(color)==0:
#edge_colors = ['black' if not edge in red_edges else 'red' for edge in G.edges()]
nx.draw_networkx_nodes(G, pos, node_size=400, node_color = 'skyblue')
else:
nx.draw_networkx_nodes(G, pos, node_size=400, node_color = color, cmap=plt.cm.Pastel1)
#nx.draw(G,pos, node_color = values, node_size=1500,edge_color=edge_colors,edge_cmap=plt.cm.Reds)
nx.draw_networkx_labels(G, pos, edge_labels=edge_labels)
nx.draw_networkx_edges(G, pos, arrows = False)
nx.draw_networkx_edge_labels(G, pos, edge_labels=edge_labels)
plt.show()
|
normal
|
{
"blob_id": "61388b2edb35055cccbdc98ed52caedcd0b02983",
"index": 5624,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef PlotUndirectedGraph(A, color):\n NodesNames = list(string.ascii_uppercase)\n NNodes = A.shape[0]\n G = nx.DiGraph()\n for i in range(NNodes):\n G.add_node(NodesNames[i])\n for i in range(NNodes):\n for j in range(i + 1, NNodes):\n if A[i, j] != 0:\n G.add_edge(NodesNames[i], NodesNames[j], weight=A[i, j])\n pos = nx.spring_layout(G)\n edge_labels = dict([((u, v), d['weight']) for u, v, d in G.edges(data=\n True)])\n if len(color) == 0:\n nx.draw_networkx_nodes(G, pos, node_size=400, node_color='skyblue')\n else:\n nx.draw_networkx_nodes(G, pos, node_size=400, node_color=color,\n cmap=plt.cm.Pastel1)\n nx.draw_networkx_labels(G, pos, edge_labels=edge_labels)\n nx.draw_networkx_edges(G, pos, arrows=False)\n nx.draw_networkx_edge_labels(G, pos, edge_labels=edge_labels)\n plt.show()\n",
"step-3": "import numpy as np\nimport string\nimport networkx as nx\nimport matplotlib.pyplot as plt\n\n\ndef PlotUndirectedGraph(A, color):\n NodesNames = list(string.ascii_uppercase)\n NNodes = A.shape[0]\n G = nx.DiGraph()\n for i in range(NNodes):\n G.add_node(NodesNames[i])\n for i in range(NNodes):\n for j in range(i + 1, NNodes):\n if A[i, j] != 0:\n G.add_edge(NodesNames[i], NodesNames[j], weight=A[i, j])\n pos = nx.spring_layout(G)\n edge_labels = dict([((u, v), d['weight']) for u, v, d in G.edges(data=\n True)])\n if len(color) == 0:\n nx.draw_networkx_nodes(G, pos, node_size=400, node_color='skyblue')\n else:\n nx.draw_networkx_nodes(G, pos, node_size=400, node_color=color,\n cmap=plt.cm.Pastel1)\n nx.draw_networkx_labels(G, pos, edge_labels=edge_labels)\n nx.draw_networkx_edges(G, pos, arrows=False)\n nx.draw_networkx_edge_labels(G, pos, edge_labels=edge_labels)\n plt.show()\n",
"step-4": "import numpy as np \nimport string \nimport networkx as nx\nimport matplotlib.pyplot as plt\n\n\n\ndef PlotUndirectedGraph(A,color):\n NodesNames = list(string.ascii_uppercase);\n NNodes = A.shape[0]\n G = nx.DiGraph()\n for i in range(NNodes):\n G.add_node(NodesNames[i])\n for i in range(NNodes):\n for j in range(i+1,NNodes):\n if A[i,j] != 0:\n G.add_edge(NodesNames[i],NodesNames[j],weight=A[i,j])\n pos = nx.spring_layout(G)\n edge_labels=dict([((u,v,),d['weight'])\n for u,v,d in G.edges(data=True)])\n if len(color)==0:\n #edge_colors = ['black' if not edge in red_edges else 'red' for edge in G.edges()]\n nx.draw_networkx_nodes(G, pos, node_size=400, node_color = 'skyblue')\n else:\n nx.draw_networkx_nodes(G, pos, node_size=400, node_color = color, cmap=plt.cm.Pastel1)\n #nx.draw(G,pos, node_color = values, node_size=1500,edge_color=edge_colors,edge_cmap=plt.cm.Reds)\n\n nx.draw_networkx_labels(G, pos, edge_labels=edge_labels)\n nx.draw_networkx_edges(G, pos, arrows = False)\n nx.draw_networkx_edge_labels(G, pos, edge_labels=edge_labels)\n plt.show()\n\n\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
import random
import re
from datetime import datetime, timedelta
from threading import Lock
from telegram.ext import run_async
from src.models.user import UserDB
from src.models.user_stat import UserStat
from src.utils.cache import cache, USER_CACHE_EXPIRE
from src.utils.logger_helpers import get_logger
logger = get_logger(__name__)
class PidorWeekly:
lock = Lock()
re_words = re.compile(
r"\b(ге[йяи]|геев|анал|аналы|аналь\S+|анус|очко|жоп[ау]|жопой|поп[ау]|попой|попк[ау]|попкой|говн[оа]|говном|пенис\S*|член\S*|пизд\S+|гомос\S+|гомик\S*|\S+сексуал\S*|климов\S*|педерас\S+|пидор\S*|пидар\S*|педик\S+|подвор\S+|iphone\S*|айфон\S*|samsung|самсунг\S*|смузи|барбер\S*|рокет\S*|хипстер\S*|лгбт\S*|бабочк\S+|м[ао]к[ао]син\S*|ахтунг\S*|толерант\S+|политкорр?ект\S+|стрижк\S+|бород\S+|аниме\S*|саратов\S*|фемк\S+|\S+изм\S*|dtf|дтф|в[еэ]йп\S*|гироскутер\S*|мизог\S+|козел|козл\S+|муда[кч]\S*|сволоч\S+|ресторан\S*|кача[лт]\S+|мыло|читер\S*|читы?|культур\S+|сра[тл]\S+|насра[тл]\S+|гад\S*|блогг?ер\S*)\b",
re.IGNORECASE)
re_inside = re.compile(r"п[еи]д[оа]р\S*", re.IGNORECASE)
@classmethod
def get_top_pidor(cls, cid, date=None):
monday = cls.__get_current_monday() if date is None else cls.__get_date_monday(date)
db = cls.__get_db(monday, cid)
stats = UserStat.get_chat_stats(cid, date)
# подсчитаем всех по отношению пидор-слов к общему количеству слов этого участника
pidor_by_count = {}
for user_stat, user in stats:
count = user_stat.all_messages_count
# учитываем только тек, кто написал от 30 сообщений
if count < 30 or user_stat.words_count < 500:
continue
if user.uid not in db:
continue
pidor_by_count[user.uid] = db[user.uid] / count
if len(pidor_by_count) > 0:
uid, _ = cls.__sort_dict(pidor_by_count)[0]
elif len(stats) == 0:
return None
else:
_, user = random.choice(stats)
uid = user.uid
return uid
@classmethod
@run_async
def parse_message(cls, message):
msg = message.text
if msg is None:
return
uid = message.from_user.id
cid = message.chat_id
entities = message.parse_entities()
if not cls.__has_pidor(msg):
return
cls.__add(uid, cid)
if message.reply_to_message is not None:
to_uid = message.reply_to_message.from_user.id
cls.__add(to_uid, cid, replay=True)
for entity, entity_text in entities.items():
if entity.type == 'mention':
username = entity_text.lstrip('@').strip()
try:
mentioned_user_uid = UserDB.get_uid_by_username(username)
if mentioned_user_uid:
cls.__add(mentioned_user_uid, cid, replay=True)
except Exception:
pass
continue
if entity.type == 'text_mention':
cls.__add(entity.user.id, cid, replay=True)
continue
@classmethod
def __has_pidor(cls, msg):
msg_lower = msg.lower().replace('ё', 'е')
if cls.re_words.search(msg_lower):
return True
if cls.re_inside.search(msg_lower):
return True
return False
@classmethod
def __add(cls, uid, cid, date=None, replay=False):
monday = cls.__get_current_monday() if date is None else cls.__get_date_monday(date)
logger.debug(f'lock {cid}:{uid}')
with cls.lock:
db = cls.__get_db(monday, cid)
value = 1
if replay is True:
value = 0.4
if uid in db:
db[uid] += value
else:
db[uid] = value
cls.__set_db(db, monday, cid)
@staticmethod
def __sort_dict(d):
return sorted(d.items(), key=lambda x: x[1], reverse=True)
@staticmethod
def __get_cache_key(monday, cid):
return f'pidorweekly:{monday.strftime("%Y%m%d")}:{cid}'
@staticmethod
def __get_date_monday(date):
monday = date - timedelta(days=date.weekday())
return monday.replace(hour=0, minute=0, second=0, microsecond=0)
@classmethod
def __get_current_monday(cls):
return cls.__get_date_monday(datetime.today())
@classmethod
def __get_db(cls, monday, cid):
cached = cache.get(cls.__get_cache_key(monday, cid))
if cached:
return cached
return {}
@classmethod
def __set_db(cls, newdb, monday, cid):
cache.set(cls.__get_cache_key(monday, cid), newdb, time=USER_CACHE_EXPIRE)
|
normal
|
{
"blob_id": "109ca06685eece74034f77a98b1d7172a17aca21",
"index": 7469,
"step-1": "<mask token>\n\n\nclass PidorWeekly:\n <mask token>\n <mask token>\n <mask token>\n\n @classmethod\n def get_top_pidor(cls, cid, date=None):\n monday = cls.__get_current_monday(\n ) if date is None else cls.__get_date_monday(date)\n db = cls.__get_db(monday, cid)\n stats = UserStat.get_chat_stats(cid, date)\n pidor_by_count = {}\n for user_stat, user in stats:\n count = user_stat.all_messages_count\n if count < 30 or user_stat.words_count < 500:\n continue\n if user.uid not in db:\n continue\n pidor_by_count[user.uid] = db[user.uid] / count\n if len(pidor_by_count) > 0:\n uid, _ = cls.__sort_dict(pidor_by_count)[0]\n elif len(stats) == 0:\n return None\n else:\n _, user = random.choice(stats)\n uid = user.uid\n return uid\n\n @classmethod\n @run_async\n def parse_message(cls, message):\n msg = message.text\n if msg is None:\n return\n uid = message.from_user.id\n cid = message.chat_id\n entities = message.parse_entities()\n if not cls.__has_pidor(msg):\n return\n cls.__add(uid, cid)\n if message.reply_to_message is not None:\n to_uid = message.reply_to_message.from_user.id\n cls.__add(to_uid, cid, replay=True)\n for entity, entity_text in entities.items():\n if entity.type == 'mention':\n username = entity_text.lstrip('@').strip()\n try:\n mentioned_user_uid = UserDB.get_uid_by_username(username)\n if mentioned_user_uid:\n cls.__add(mentioned_user_uid, cid, replay=True)\n except Exception:\n pass\n continue\n if entity.type == 'text_mention':\n cls.__add(entity.user.id, cid, replay=True)\n continue\n <mask token>\n\n @classmethod\n def __add(cls, uid, cid, date=None, replay=False):\n monday = cls.__get_current_monday(\n ) if date is None else cls.__get_date_monday(date)\n logger.debug(f'lock {cid}:{uid}')\n with cls.lock:\n db = cls.__get_db(monday, cid)\n value = 1\n if replay is True:\n value = 0.4\n if uid in db:\n db[uid] += value\n else:\n db[uid] = value\n cls.__set_db(db, monday, cid)\n\n @staticmethod\n def __sort_dict(d):\n return sorted(d.items(), key=lambda x: x[1], reverse=True)\n\n @staticmethod\n def __get_cache_key(monday, cid):\n return f\"pidorweekly:{monday.strftime('%Y%m%d')}:{cid}\"\n\n @staticmethod\n def __get_date_monday(date):\n monday = date - timedelta(days=date.weekday())\n return monday.replace(hour=0, minute=0, second=0, microsecond=0)\n\n @classmethod\n def __get_current_monday(cls):\n return cls.__get_date_monday(datetime.today())\n <mask token>\n\n @classmethod\n def __set_db(cls, newdb, monday, cid):\n cache.set(cls.__get_cache_key(monday, cid), newdb, time=\n USER_CACHE_EXPIRE)\n",
"step-2": "<mask token>\n\n\nclass PidorWeekly:\n lock = Lock()\n re_words = re.compile(\n '\\\\b(ге[йяи]|геев|анал|аналы|аналь\\\\S+|анус|очко|жоп[ау]|жопой|поп[ау]|попой|попк[ау]|попкой|говн[оа]|говном|пенис\\\\S*|член\\\\S*|пизд\\\\S+|гомос\\\\S+|гомик\\\\S*|\\\\S+сексуал\\\\S*|климов\\\\S*|педерас\\\\S+|пидор\\\\S*|пидар\\\\S*|педик\\\\S+|подвор\\\\S+|iphone\\\\S*|айфон\\\\S*|samsung|самсунг\\\\S*|смузи|барбер\\\\S*|рокет\\\\S*|хипстер\\\\S*|лгбт\\\\S*|бабочк\\\\S+|м[ао]к[ао]син\\\\S*|ахтунг\\\\S*|толерант\\\\S+|политкорр?ект\\\\S+|стрижк\\\\S+|бород\\\\S+|аниме\\\\S*|саратов\\\\S*|фемк\\\\S+|\\\\S+изм\\\\S*|dtf|дтф|в[еэ]йп\\\\S*|гироскутер\\\\S*|мизог\\\\S+|козел|козл\\\\S+|муда[кч]\\\\S*|сволоч\\\\S+|ресторан\\\\S*|кача[лт]\\\\S+|мыло|читер\\\\S*|читы?|культур\\\\S+|сра[тл]\\\\S+|насра[тл]\\\\S+|гад\\\\S*|блогг?ер\\\\S*)\\\\b'\n , re.IGNORECASE)\n re_inside = re.compile('п[еи]д[оа]р\\\\S*', re.IGNORECASE)\n\n @classmethod\n def get_top_pidor(cls, cid, date=None):\n monday = cls.__get_current_monday(\n ) if date is None else cls.__get_date_monday(date)\n db = cls.__get_db(monday, cid)\n stats = UserStat.get_chat_stats(cid, date)\n pidor_by_count = {}\n for user_stat, user in stats:\n count = user_stat.all_messages_count\n if count < 30 or user_stat.words_count < 500:\n continue\n if user.uid not in db:\n continue\n pidor_by_count[user.uid] = db[user.uid] / count\n if len(pidor_by_count) > 0:\n uid, _ = cls.__sort_dict(pidor_by_count)[0]\n elif len(stats) == 0:\n return None\n else:\n _, user = random.choice(stats)\n uid = user.uid\n return uid\n\n @classmethod\n @run_async\n def parse_message(cls, message):\n msg = message.text\n if msg is None:\n return\n uid = message.from_user.id\n cid = message.chat_id\n entities = message.parse_entities()\n if not cls.__has_pidor(msg):\n return\n cls.__add(uid, cid)\n if message.reply_to_message is not None:\n to_uid = message.reply_to_message.from_user.id\n cls.__add(to_uid, cid, replay=True)\n for entity, entity_text in entities.items():\n if entity.type == 'mention':\n username = entity_text.lstrip('@').strip()\n try:\n mentioned_user_uid = UserDB.get_uid_by_username(username)\n if mentioned_user_uid:\n cls.__add(mentioned_user_uid, cid, replay=True)\n except Exception:\n pass\n continue\n if entity.type == 'text_mention':\n cls.__add(entity.user.id, cid, replay=True)\n continue\n\n @classmethod\n def __has_pidor(cls, msg):\n msg_lower = msg.lower().replace('ё', 'е')\n if cls.re_words.search(msg_lower):\n return True\n if cls.re_inside.search(msg_lower):\n return True\n return False\n\n @classmethod\n def __add(cls, uid, cid, date=None, replay=False):\n monday = cls.__get_current_monday(\n ) if date is None else cls.__get_date_monday(date)\n logger.debug(f'lock {cid}:{uid}')\n with cls.lock:\n db = cls.__get_db(monday, cid)\n value = 1\n if replay is True:\n value = 0.4\n if uid in db:\n db[uid] += value\n else:\n db[uid] = value\n cls.__set_db(db, monday, cid)\n\n @staticmethod\n def __sort_dict(d):\n return sorted(d.items(), key=lambda x: x[1], reverse=True)\n\n @staticmethod\n def __get_cache_key(monday, cid):\n return f\"pidorweekly:{monday.strftime('%Y%m%d')}:{cid}\"\n\n @staticmethod\n def __get_date_monday(date):\n monday = date - timedelta(days=date.weekday())\n return monday.replace(hour=0, minute=0, second=0, microsecond=0)\n\n @classmethod\n def __get_current_monday(cls):\n return cls.__get_date_monday(datetime.today())\n\n @classmethod\n def __get_db(cls, monday, cid):\n cached = cache.get(cls.__get_cache_key(monday, cid))\n if cached:\n return cached\n return {}\n\n @classmethod\n def __set_db(cls, newdb, monday, cid):\n cache.set(cls.__get_cache_key(monday, cid), newdb, time=\n USER_CACHE_EXPIRE)\n",
"step-3": "<mask token>\nlogger = get_logger(__name__)\n\n\nclass PidorWeekly:\n lock = Lock()\n re_words = re.compile(\n '\\\\b(ге[йяи]|геев|анал|аналы|аналь\\\\S+|анус|очко|жоп[ау]|жопой|поп[ау]|попой|попк[ау]|попкой|говн[оа]|говном|пенис\\\\S*|член\\\\S*|пизд\\\\S+|гомос\\\\S+|гомик\\\\S*|\\\\S+сексуал\\\\S*|климов\\\\S*|педерас\\\\S+|пидор\\\\S*|пидар\\\\S*|педик\\\\S+|подвор\\\\S+|iphone\\\\S*|айфон\\\\S*|samsung|самсунг\\\\S*|смузи|барбер\\\\S*|рокет\\\\S*|хипстер\\\\S*|лгбт\\\\S*|бабочк\\\\S+|м[ао]к[ао]син\\\\S*|ахтунг\\\\S*|толерант\\\\S+|политкорр?ект\\\\S+|стрижк\\\\S+|бород\\\\S+|аниме\\\\S*|саратов\\\\S*|фемк\\\\S+|\\\\S+изм\\\\S*|dtf|дтф|в[еэ]йп\\\\S*|гироскутер\\\\S*|мизог\\\\S+|козел|козл\\\\S+|муда[кч]\\\\S*|сволоч\\\\S+|ресторан\\\\S*|кача[лт]\\\\S+|мыло|читер\\\\S*|читы?|культур\\\\S+|сра[тл]\\\\S+|насра[тл]\\\\S+|гад\\\\S*|блогг?ер\\\\S*)\\\\b'\n , re.IGNORECASE)\n re_inside = re.compile('п[еи]д[оа]р\\\\S*', re.IGNORECASE)\n\n @classmethod\n def get_top_pidor(cls, cid, date=None):\n monday = cls.__get_current_monday(\n ) if date is None else cls.__get_date_monday(date)\n db = cls.__get_db(monday, cid)\n stats = UserStat.get_chat_stats(cid, date)\n pidor_by_count = {}\n for user_stat, user in stats:\n count = user_stat.all_messages_count\n if count < 30 or user_stat.words_count < 500:\n continue\n if user.uid not in db:\n continue\n pidor_by_count[user.uid] = db[user.uid] / count\n if len(pidor_by_count) > 0:\n uid, _ = cls.__sort_dict(pidor_by_count)[0]\n elif len(stats) == 0:\n return None\n else:\n _, user = random.choice(stats)\n uid = user.uid\n return uid\n\n @classmethod\n @run_async\n def parse_message(cls, message):\n msg = message.text\n if msg is None:\n return\n uid = message.from_user.id\n cid = message.chat_id\n entities = message.parse_entities()\n if not cls.__has_pidor(msg):\n return\n cls.__add(uid, cid)\n if message.reply_to_message is not None:\n to_uid = message.reply_to_message.from_user.id\n cls.__add(to_uid, cid, replay=True)\n for entity, entity_text in entities.items():\n if entity.type == 'mention':\n username = entity_text.lstrip('@').strip()\n try:\n mentioned_user_uid = UserDB.get_uid_by_username(username)\n if mentioned_user_uid:\n cls.__add(mentioned_user_uid, cid, replay=True)\n except Exception:\n pass\n continue\n if entity.type == 'text_mention':\n cls.__add(entity.user.id, cid, replay=True)\n continue\n\n @classmethod\n def __has_pidor(cls, msg):\n msg_lower = msg.lower().replace('ё', 'е')\n if cls.re_words.search(msg_lower):\n return True\n if cls.re_inside.search(msg_lower):\n return True\n return False\n\n @classmethod\n def __add(cls, uid, cid, date=None, replay=False):\n monday = cls.__get_current_monday(\n ) if date is None else cls.__get_date_monday(date)\n logger.debug(f'lock {cid}:{uid}')\n with cls.lock:\n db = cls.__get_db(monday, cid)\n value = 1\n if replay is True:\n value = 0.4\n if uid in db:\n db[uid] += value\n else:\n db[uid] = value\n cls.__set_db(db, monday, cid)\n\n @staticmethod\n def __sort_dict(d):\n return sorted(d.items(), key=lambda x: x[1], reverse=True)\n\n @staticmethod\n def __get_cache_key(monday, cid):\n return f\"pidorweekly:{monday.strftime('%Y%m%d')}:{cid}\"\n\n @staticmethod\n def __get_date_monday(date):\n monday = date - timedelta(days=date.weekday())\n return monday.replace(hour=0, minute=0, second=0, microsecond=0)\n\n @classmethod\n def __get_current_monday(cls):\n return cls.__get_date_monday(datetime.today())\n\n @classmethod\n def __get_db(cls, monday, cid):\n cached = cache.get(cls.__get_cache_key(monday, cid))\n if cached:\n return cached\n return {}\n\n @classmethod\n def __set_db(cls, newdb, monday, cid):\n cache.set(cls.__get_cache_key(monday, cid), newdb, time=\n USER_CACHE_EXPIRE)\n",
"step-4": "import random\nimport re\nfrom datetime import datetime, timedelta\nfrom threading import Lock\nfrom telegram.ext import run_async\nfrom src.models.user import UserDB\nfrom src.models.user_stat import UserStat\nfrom src.utils.cache import cache, USER_CACHE_EXPIRE\nfrom src.utils.logger_helpers import get_logger\nlogger = get_logger(__name__)\n\n\nclass PidorWeekly:\n lock = Lock()\n re_words = re.compile(\n '\\\\b(ге[йяи]|геев|анал|аналы|аналь\\\\S+|анус|очко|жоп[ау]|жопой|поп[ау]|попой|попк[ау]|попкой|говн[оа]|говном|пенис\\\\S*|член\\\\S*|пизд\\\\S+|гомос\\\\S+|гомик\\\\S*|\\\\S+сексуал\\\\S*|климов\\\\S*|педерас\\\\S+|пидор\\\\S*|пидар\\\\S*|педик\\\\S+|подвор\\\\S+|iphone\\\\S*|айфон\\\\S*|samsung|самсунг\\\\S*|смузи|барбер\\\\S*|рокет\\\\S*|хипстер\\\\S*|лгбт\\\\S*|бабочк\\\\S+|м[ао]к[ао]син\\\\S*|ахтунг\\\\S*|толерант\\\\S+|политкорр?ект\\\\S+|стрижк\\\\S+|бород\\\\S+|аниме\\\\S*|саратов\\\\S*|фемк\\\\S+|\\\\S+изм\\\\S*|dtf|дтф|в[еэ]йп\\\\S*|гироскутер\\\\S*|мизог\\\\S+|козел|козл\\\\S+|муда[кч]\\\\S*|сволоч\\\\S+|ресторан\\\\S*|кача[лт]\\\\S+|мыло|читер\\\\S*|читы?|культур\\\\S+|сра[тл]\\\\S+|насра[тл]\\\\S+|гад\\\\S*|блогг?ер\\\\S*)\\\\b'\n , re.IGNORECASE)\n re_inside = re.compile('п[еи]д[оа]р\\\\S*', re.IGNORECASE)\n\n @classmethod\n def get_top_pidor(cls, cid, date=None):\n monday = cls.__get_current_monday(\n ) if date is None else cls.__get_date_monday(date)\n db = cls.__get_db(monday, cid)\n stats = UserStat.get_chat_stats(cid, date)\n pidor_by_count = {}\n for user_stat, user in stats:\n count = user_stat.all_messages_count\n if count < 30 or user_stat.words_count < 500:\n continue\n if user.uid not in db:\n continue\n pidor_by_count[user.uid] = db[user.uid] / count\n if len(pidor_by_count) > 0:\n uid, _ = cls.__sort_dict(pidor_by_count)[0]\n elif len(stats) == 0:\n return None\n else:\n _, user = random.choice(stats)\n uid = user.uid\n return uid\n\n @classmethod\n @run_async\n def parse_message(cls, message):\n msg = message.text\n if msg is None:\n return\n uid = message.from_user.id\n cid = message.chat_id\n entities = message.parse_entities()\n if not cls.__has_pidor(msg):\n return\n cls.__add(uid, cid)\n if message.reply_to_message is not None:\n to_uid = message.reply_to_message.from_user.id\n cls.__add(to_uid, cid, replay=True)\n for entity, entity_text in entities.items():\n if entity.type == 'mention':\n username = entity_text.lstrip('@').strip()\n try:\n mentioned_user_uid = UserDB.get_uid_by_username(username)\n if mentioned_user_uid:\n cls.__add(mentioned_user_uid, cid, replay=True)\n except Exception:\n pass\n continue\n if entity.type == 'text_mention':\n cls.__add(entity.user.id, cid, replay=True)\n continue\n\n @classmethod\n def __has_pidor(cls, msg):\n msg_lower = msg.lower().replace('ё', 'е')\n if cls.re_words.search(msg_lower):\n return True\n if cls.re_inside.search(msg_lower):\n return True\n return False\n\n @classmethod\n def __add(cls, uid, cid, date=None, replay=False):\n monday = cls.__get_current_monday(\n ) if date is None else cls.__get_date_monday(date)\n logger.debug(f'lock {cid}:{uid}')\n with cls.lock:\n db = cls.__get_db(monday, cid)\n value = 1\n if replay is True:\n value = 0.4\n if uid in db:\n db[uid] += value\n else:\n db[uid] = value\n cls.__set_db(db, monday, cid)\n\n @staticmethod\n def __sort_dict(d):\n return sorted(d.items(), key=lambda x: x[1], reverse=True)\n\n @staticmethod\n def __get_cache_key(monday, cid):\n return f\"pidorweekly:{monday.strftime('%Y%m%d')}:{cid}\"\n\n @staticmethod\n def __get_date_monday(date):\n monday = date - timedelta(days=date.weekday())\n return monday.replace(hour=0, minute=0, second=0, microsecond=0)\n\n @classmethod\n def __get_current_monday(cls):\n return cls.__get_date_monday(datetime.today())\n\n @classmethod\n def __get_db(cls, monday, cid):\n cached = cache.get(cls.__get_cache_key(monday, cid))\n if cached:\n return cached\n return {}\n\n @classmethod\n def __set_db(cls, newdb, monday, cid):\n cache.set(cls.__get_cache_key(monday, cid), newdb, time=\n USER_CACHE_EXPIRE)\n",
"step-5": "import random\nimport re\nfrom datetime import datetime, timedelta\nfrom threading import Lock\n\nfrom telegram.ext import run_async\n\nfrom src.models.user import UserDB\nfrom src.models.user_stat import UserStat\nfrom src.utils.cache import cache, USER_CACHE_EXPIRE\nfrom src.utils.logger_helpers import get_logger\n\nlogger = get_logger(__name__)\n\n\nclass PidorWeekly:\n lock = Lock()\n re_words = re.compile(\n r\"\\b(ге[йяи]|геев|анал|аналы|аналь\\S+|анус|очко|жоп[ау]|жопой|поп[ау]|попой|попк[ау]|попкой|говн[оа]|говном|пенис\\S*|член\\S*|пизд\\S+|гомос\\S+|гомик\\S*|\\S+сексуал\\S*|климов\\S*|педерас\\S+|пидор\\S*|пидар\\S*|педик\\S+|подвор\\S+|iphone\\S*|айфон\\S*|samsung|самсунг\\S*|смузи|барбер\\S*|рокет\\S*|хипстер\\S*|лгбт\\S*|бабочк\\S+|м[ао]к[ао]син\\S*|ахтунг\\S*|толерант\\S+|политкорр?ект\\S+|стрижк\\S+|бород\\S+|аниме\\S*|саратов\\S*|фемк\\S+|\\S+изм\\S*|dtf|дтф|в[еэ]йп\\S*|гироскутер\\S*|мизог\\S+|козел|козл\\S+|муда[кч]\\S*|сволоч\\S+|ресторан\\S*|кача[лт]\\S+|мыло|читер\\S*|читы?|культур\\S+|сра[тл]\\S+|насра[тл]\\S+|гад\\S*|блогг?ер\\S*)\\b\",\n re.IGNORECASE)\n re_inside = re.compile(r\"п[еи]д[оа]р\\S*\", re.IGNORECASE)\n\n @classmethod\n def get_top_pidor(cls, cid, date=None):\n monday = cls.__get_current_monday() if date is None else cls.__get_date_monday(date)\n db = cls.__get_db(monday, cid)\n stats = UserStat.get_chat_stats(cid, date)\n\n # подсчитаем всех по отношению пидор-слов к общему количеству слов этого участника\n pidor_by_count = {}\n for user_stat, user in stats:\n count = user_stat.all_messages_count\n # учитываем только тек, кто написал от 30 сообщений\n if count < 30 or user_stat.words_count < 500:\n continue\n if user.uid not in db:\n continue\n pidor_by_count[user.uid] = db[user.uid] / count\n\n if len(pidor_by_count) > 0:\n uid, _ = cls.__sort_dict(pidor_by_count)[0]\n elif len(stats) == 0:\n return None\n else:\n _, user = random.choice(stats)\n uid = user.uid\n return uid\n\n @classmethod\n @run_async\n def parse_message(cls, message):\n msg = message.text\n if msg is None:\n return\n uid = message.from_user.id\n cid = message.chat_id\n entities = message.parse_entities()\n\n if not cls.__has_pidor(msg):\n return\n cls.__add(uid, cid)\n\n if message.reply_to_message is not None:\n to_uid = message.reply_to_message.from_user.id\n cls.__add(to_uid, cid, replay=True)\n\n for entity, entity_text in entities.items():\n if entity.type == 'mention':\n username = entity_text.lstrip('@').strip()\n try:\n mentioned_user_uid = UserDB.get_uid_by_username(username)\n if mentioned_user_uid:\n cls.__add(mentioned_user_uid, cid, replay=True)\n except Exception:\n pass\n continue\n if entity.type == 'text_mention':\n cls.__add(entity.user.id, cid, replay=True)\n continue\n\n @classmethod\n def __has_pidor(cls, msg):\n msg_lower = msg.lower().replace('ё', 'е')\n if cls.re_words.search(msg_lower):\n return True\n if cls.re_inside.search(msg_lower):\n return True\n return False\n\n @classmethod\n def __add(cls, uid, cid, date=None, replay=False):\n monday = cls.__get_current_monday() if date is None else cls.__get_date_monday(date)\n logger.debug(f'lock {cid}:{uid}')\n with cls.lock:\n db = cls.__get_db(monday, cid)\n value = 1\n if replay is True:\n value = 0.4\n\n if uid in db:\n db[uid] += value\n else:\n db[uid] = value\n\n cls.__set_db(db, monday, cid)\n\n @staticmethod\n def __sort_dict(d):\n return sorted(d.items(), key=lambda x: x[1], reverse=True)\n\n @staticmethod\n def __get_cache_key(monday, cid):\n return f'pidorweekly:{monday.strftime(\"%Y%m%d\")}:{cid}'\n\n @staticmethod\n def __get_date_monday(date):\n monday = date - timedelta(days=date.weekday())\n return monday.replace(hour=0, minute=0, second=0, microsecond=0)\n\n @classmethod\n def __get_current_monday(cls):\n return cls.__get_date_monday(datetime.today())\n\n @classmethod\n def __get_db(cls, monday, cid):\n cached = cache.get(cls.__get_cache_key(monday, cid))\n if cached:\n return cached\n return {}\n\n @classmethod\n def __set_db(cls, newdb, monday, cid):\n cache.set(cls.__get_cache_key(monday, cid), newdb, time=USER_CACHE_EXPIRE)\n",
"step-ids": [
9,
12,
13,
14,
15
]
}
|
[
9,
12,
13,
14,
15
] |
a = int(input('점수를 입력하세요'))
if a >= 70 :
print:('통과입니다.')
print:('축하합니다.')
else :
print:('불합격입니다.')
print("안녕")
|
normal
|
{
"blob_id": "f8d0cc9cb0e5f8adf9077ffb39dd6abedfedaa12",
"index": 5427,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nif a >= 70:\n print: '통과입니다.'\n print: '축하합니다.'\nelse:\n print: '불합격입니다.'\nprint('안녕')\n",
"step-3": "a = int(input('점수를 입력하세요'))\nif a >= 70:\n print: '통과입니다.'\n print: '축하합니다.'\nelse:\n print: '불합격입니다.'\nprint('안녕')\n",
"step-4": "a = int(input('점수를 입력하세요'))\r\nif a >= 70 :\r\n print:('통과입니다.')\r\n print:('축하합니다.')\r\nelse :\r\n print:('불합격입니다.')\r\nprint(\"안녕\")\r\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
class Remove_ItemView(generic.ListView):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
class Update_ItemView(generic.ListView):
model = GPS
template_name = 'inv_templates/update_item.html'
context_object_name = 'unit'
class Check_Out_ItemView(generic.ListView):
model = GPS
template_name = 'inv_templates/check_out_item.html'
context_object_name = 'checkedin_units'
queryset = GPS.objects.filter(status=False)
class Check_In_ItemView(generic.ListView):
model = GPS
template_name = 'inv_templates/check_in_item.html'
context_object_name = 'checkedout_units'
queryset = GPS.objects.filter(status=True)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class HomeView(generic.ListView):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
class Add_ItemView(generic.TemplateView):
model = GPS
template_name = 'inv_templates/add_item.html'
class Remove_ItemView(generic.ListView):
model = GPS
template_name = 'inv_templates/remove_item.html'
context_object_name = 'unit'
class Update_ItemView(generic.ListView):
model = GPS
template_name = 'inv_templates/update_item.html'
context_object_name = 'unit'
class Check_Out_ItemView(generic.ListView):
model = GPS
template_name = 'inv_templates/check_out_item.html'
context_object_name = 'checkedin_units'
queryset = GPS.objects.filter(status=False)
class Check_In_ItemView(generic.ListView):
model = GPS
template_name = 'inv_templates/check_in_item.html'
context_object_name = 'checkedout_units'
queryset = GPS.objects.filter(status=True)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class HomeView(generic.ListView):
model = GPS
template_name = 'inv_templates/home.html'
context_object_name = 'unit'
class Add_ItemView(generic.TemplateView):
model = GPS
template_name = 'inv_templates/add_item.html'
class Remove_ItemView(generic.ListView):
model = GPS
template_name = 'inv_templates/remove_item.html'
context_object_name = 'unit'
class Update_ItemView(generic.ListView):
model = GPS
template_name = 'inv_templates/update_item.html'
context_object_name = 'unit'
class Check_Out_ItemView(generic.ListView):
model = GPS
template_name = 'inv_templates/check_out_item.html'
context_object_name = 'checkedin_units'
queryset = GPS.objects.filter(status=False)
class Check_In_ItemView(generic.ListView):
model = GPS
template_name = 'inv_templates/check_in_item.html'
context_object_name = 'checkedout_units'
queryset = GPS.objects.filter(status=True)
<|reserved_special_token_1|>
from django.views import generic
from .models import GPS
class HomeView(generic.ListView):
model = GPS
template_name = 'inv_templates/home.html'
context_object_name = 'unit'
class Add_ItemView(generic.TemplateView):
model = GPS
template_name = 'inv_templates/add_item.html'
class Remove_ItemView(generic.ListView):
model = GPS
template_name = 'inv_templates/remove_item.html'
context_object_name = 'unit'
class Update_ItemView(generic.ListView):
model = GPS
template_name = 'inv_templates/update_item.html'
context_object_name = 'unit'
class Check_Out_ItemView(generic.ListView):
model = GPS
template_name = 'inv_templates/check_out_item.html'
context_object_name = 'checkedin_units'
queryset = GPS.objects.filter(status=False)
class Check_In_ItemView(generic.ListView):
model = GPS
template_name = 'inv_templates/check_in_item.html'
context_object_name = 'checkedout_units'
queryset = GPS.objects.filter(status=True)
<|reserved_special_token_1|>
from django.views import generic
from .models import GPS
# This is the view for my home page. It is a list view because it needs to display a list of all
# of the GPS units that are currently in the database.
class HomeView(generic.ListView):
model = GPS
template_name = 'inv_templates/home.html'
context_object_name = 'unit'
# This is the view for my add item page.
class Add_ItemView(generic.TemplateView):
model = GPS
template_name = 'inv_templates/add_item.html'
# This is the view for my remove item page. It is a list view because it needs to display a
# list of all of the GPS units that are currently in the database.
class Remove_ItemView(generic.ListView):
model = GPS
template_name = 'inv_templates/remove_item.html'
context_object_name = 'unit'
# This is the view for my update item page. It is a list view because it needs to display a
# list of all of the GPS units that are currently in the database.
class Update_ItemView(generic.ListView):
model = GPS
template_name = 'inv_templates/update_item.html'
context_object_name = 'unit'
# This is the view for my check out item page. It is a list view because it needs to display a
# list of all of the GPS units that are currently checked in.
class Check_Out_ItemView(generic.ListView):
model = GPS
template_name = 'inv_templates/check_out_item.html'
context_object_name = 'checkedin_units'
queryset = GPS.objects.filter(status=False)
# This is the view for my check in item page. It is a list view because it needs to display a
# list of all of the GPS units that are currently checked out.
class Check_In_ItemView(generic.ListView):
model = GPS
template_name = 'inv_templates/check_in_item.html'
context_object_name = 'checkedout_units'
queryset = GPS.objects.filter(status=True)
|
flexible
|
{
"blob_id": "67db3a66e5525d41de13df665167a0db2d81056e",
"index": 2721,
"step-1": "<mask token>\n\n\nclass Remove_ItemView(generic.ListView):\n <mask token>\n <mask token>\n <mask token>\n\n\nclass Update_ItemView(generic.ListView):\n model = GPS\n template_name = 'inv_templates/update_item.html'\n context_object_name = 'unit'\n\n\nclass Check_Out_ItemView(generic.ListView):\n model = GPS\n template_name = 'inv_templates/check_out_item.html'\n context_object_name = 'checkedin_units'\n queryset = GPS.objects.filter(status=False)\n\n\nclass Check_In_ItemView(generic.ListView):\n model = GPS\n template_name = 'inv_templates/check_in_item.html'\n context_object_name = 'checkedout_units'\n queryset = GPS.objects.filter(status=True)\n",
"step-2": "<mask token>\n\n\nclass HomeView(generic.ListView):\n <mask token>\n <mask token>\n <mask token>\n\n\nclass Add_ItemView(generic.TemplateView):\n model = GPS\n template_name = 'inv_templates/add_item.html'\n\n\nclass Remove_ItemView(generic.ListView):\n model = GPS\n template_name = 'inv_templates/remove_item.html'\n context_object_name = 'unit'\n\n\nclass Update_ItemView(generic.ListView):\n model = GPS\n template_name = 'inv_templates/update_item.html'\n context_object_name = 'unit'\n\n\nclass Check_Out_ItemView(generic.ListView):\n model = GPS\n template_name = 'inv_templates/check_out_item.html'\n context_object_name = 'checkedin_units'\n queryset = GPS.objects.filter(status=False)\n\n\nclass Check_In_ItemView(generic.ListView):\n model = GPS\n template_name = 'inv_templates/check_in_item.html'\n context_object_name = 'checkedout_units'\n queryset = GPS.objects.filter(status=True)\n",
"step-3": "<mask token>\n\n\nclass HomeView(generic.ListView):\n model = GPS\n template_name = 'inv_templates/home.html'\n context_object_name = 'unit'\n\n\nclass Add_ItemView(generic.TemplateView):\n model = GPS\n template_name = 'inv_templates/add_item.html'\n\n\nclass Remove_ItemView(generic.ListView):\n model = GPS\n template_name = 'inv_templates/remove_item.html'\n context_object_name = 'unit'\n\n\nclass Update_ItemView(generic.ListView):\n model = GPS\n template_name = 'inv_templates/update_item.html'\n context_object_name = 'unit'\n\n\nclass Check_Out_ItemView(generic.ListView):\n model = GPS\n template_name = 'inv_templates/check_out_item.html'\n context_object_name = 'checkedin_units'\n queryset = GPS.objects.filter(status=False)\n\n\nclass Check_In_ItemView(generic.ListView):\n model = GPS\n template_name = 'inv_templates/check_in_item.html'\n context_object_name = 'checkedout_units'\n queryset = GPS.objects.filter(status=True)\n",
"step-4": "from django.views import generic\nfrom .models import GPS\n\n\nclass HomeView(generic.ListView):\n model = GPS\n template_name = 'inv_templates/home.html'\n context_object_name = 'unit'\n\n\nclass Add_ItemView(generic.TemplateView):\n model = GPS\n template_name = 'inv_templates/add_item.html'\n\n\nclass Remove_ItemView(generic.ListView):\n model = GPS\n template_name = 'inv_templates/remove_item.html'\n context_object_name = 'unit'\n\n\nclass Update_ItemView(generic.ListView):\n model = GPS\n template_name = 'inv_templates/update_item.html'\n context_object_name = 'unit'\n\n\nclass Check_Out_ItemView(generic.ListView):\n model = GPS\n template_name = 'inv_templates/check_out_item.html'\n context_object_name = 'checkedin_units'\n queryset = GPS.objects.filter(status=False)\n\n\nclass Check_In_ItemView(generic.ListView):\n model = GPS\n template_name = 'inv_templates/check_in_item.html'\n context_object_name = 'checkedout_units'\n queryset = GPS.objects.filter(status=True)\n",
"step-5": "from django.views import generic\nfrom .models import GPS\n# This is the view for my home page. It is a list view because it needs to display a list of all\n# of the GPS units that are currently in the database.\nclass HomeView(generic.ListView):\n model = GPS\n template_name = 'inv_templates/home.html'\n context_object_name = 'unit'\n\n# This is the view for my add item page.\nclass Add_ItemView(generic.TemplateView):\n model = GPS\n template_name = 'inv_templates/add_item.html'\n\n# This is the view for my remove item page. It is a list view because it needs to display a\n# list of all of the GPS units that are currently in the database.\nclass Remove_ItemView(generic.ListView):\n model = GPS\n template_name = 'inv_templates/remove_item.html'\n context_object_name = 'unit'\n\n# This is the view for my update item page. It is a list view because it needs to display a\n# list of all of the GPS units that are currently in the database.\nclass Update_ItemView(generic.ListView):\n model = GPS\n template_name = 'inv_templates/update_item.html'\n context_object_name = 'unit'\n\n# This is the view for my check out item page. It is a list view because it needs to display a\n# list of all of the GPS units that are currently checked in.\nclass Check_Out_ItemView(generic.ListView):\n model = GPS\n template_name = 'inv_templates/check_out_item.html'\n context_object_name = 'checkedin_units'\n queryset = GPS.objects.filter(status=False)\n\n# This is the view for my check in item page. It is a list view because it needs to display a\n# list of all of the GPS units that are currently checked out.\nclass Check_In_ItemView(generic.ListView):\n model = GPS\n template_name = 'inv_templates/check_in_item.html'\n context_object_name = 'checkedout_units'\n queryset = GPS.objects.filter(status=True)\n",
"step-ids": [
7,
11,
12,
13,
14
]
}
|
[
7,
11,
12,
13,
14
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Migration(migrations.Migration):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Migration(migrations.Migration):
dependencies = [('rover', '0002_auto_20180501_1431')]
operations = [migrations.CreateModel(name='RoverPage', fields=[('id',
models.AutoField(auto_created=True, primary_key=True, serialize=
False, verbose_name='ID')), ('design_review', models.FileField(
blank=True, upload_to='documents/rover'))], options={
'verbose_name_plural': 'Rover Page', 'verbose_name': 'Rover Page'})]
<|reserved_special_token_1|>
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [('rover', '0002_auto_20180501_1431')]
operations = [migrations.CreateModel(name='RoverPage', fields=[('id',
models.AutoField(auto_created=True, primary_key=True, serialize=
False, verbose_name='ID')), ('design_review', models.FileField(
blank=True, upload_to='documents/rover'))], options={
'verbose_name_plural': 'Rover Page', 'verbose_name': 'Rover Page'})]
<|reserved_special_token_1|>
# Generated by Django 2.0.1 on 2018-05-01 11:46
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('rover', '0002_auto_20180501_1431'),
]
operations = [
migrations.CreateModel(
name='RoverPage',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('design_review', models.FileField(blank=True, upload_to='documents/rover')),
],
options={
'verbose_name_plural': 'Rover Page',
'verbose_name': 'Rover Page',
},
),
]
|
flexible
|
{
"blob_id": "fed94e0affa1fe6c705577a63fabee839aa9f05c",
"index": 5096,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Migration(migrations.Migration):\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Migration(migrations.Migration):\n dependencies = [('rover', '0002_auto_20180501_1431')]\n operations = [migrations.CreateModel(name='RoverPage', fields=[('id',\n models.AutoField(auto_created=True, primary_key=True, serialize=\n False, verbose_name='ID')), ('design_review', models.FileField(\n blank=True, upload_to='documents/rover'))], options={\n 'verbose_name_plural': 'Rover Page', 'verbose_name': 'Rover Page'})]\n",
"step-4": "from django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n dependencies = [('rover', '0002_auto_20180501_1431')]\n operations = [migrations.CreateModel(name='RoverPage', fields=[('id',\n models.AutoField(auto_created=True, primary_key=True, serialize=\n False, verbose_name='ID')), ('design_review', models.FileField(\n blank=True, upload_to='documents/rover'))], options={\n 'verbose_name_plural': 'Rover Page', 'verbose_name': 'Rover Page'})]\n",
"step-5": "# Generated by Django 2.0.1 on 2018-05-01 11:46\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('rover', '0002_auto_20180501_1431'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='RoverPage',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('design_review', models.FileField(blank=True, upload_to='documents/rover')),\n ],\n options={\n 'verbose_name_plural': 'Rover Page',\n 'verbose_name': 'Rover Page',\n },\n ),\n ]\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
# Definition for singly-linked list.
# class ListNode:
# def __init__(self, val=0, next=None):
# self.val = val
# self.next = next
class Solution:
def swapPairs(self, head: ListNode) -> ListNode:
dummy_head=ListNode(0)
dummy_head.next=head
pre=dummy_head
cur=head
while cur and cur.next:
next=cur.next
next_next=next.next
pre.next=next
next.next=cur
cur.next=next_next
pre=cur
cur=next_next
return dummy_head.next
# !!!!!!!!!!!!!!反转链表套路:
# 虚拟头结点
# 在循环外定义pre cur,在循环内求next和next_next(如果有需要),这样就可以cur and cur.next作为判断while条件
|
normal
|
{
"blob_id": "4afc2ceed860c20af071e1d9ccaca17973cb9a8e",
"index": 7553,
"step-1": "<mask token>\n",
"step-2": "class Solution:\n <mask token>\n",
"step-3": "class Solution:\n\n def swapPairs(self, head: ListNode) ->ListNode:\n dummy_head = ListNode(0)\n dummy_head.next = head\n pre = dummy_head\n cur = head\n while cur and cur.next:\n next = cur.next\n next_next = next.next\n pre.next = next\n next.next = cur\n cur.next = next_next\n pre = cur\n cur = next_next\n return dummy_head.next\n",
"step-4": "# Definition for singly-linked list.\n# class ListNode:\n# def __init__(self, val=0, next=None):\n# self.val = val\n# self.next = next\nclass Solution:\n def swapPairs(self, head: ListNode) -> ListNode:\n dummy_head=ListNode(0)\n dummy_head.next=head\n pre=dummy_head\n cur=head\n while cur and cur.next:\n next=cur.next\n next_next=next.next\n pre.next=next\n next.next=cur\n cur.next=next_next\n pre=cur\n cur=next_next\n return dummy_head.next\n# !!!!!!!!!!!!!!反转链表套路: \n# 虚拟头结点\n# 在循环外定义pre cur,在循环内求next和next_next(如果有需要),这样就可以cur and cur.next作为判断while条件\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
class AuthenticatorTest(absltest.TestCase):
<|reserved_special_token_0|>
def testGetGoogleSheetsServiceByCred_badFilePath_raisesFileNotFoundError(
self):
bad_file_path = './credential.json'
with self.assertRaises(FileNotFoundError):
authenticator.GetGoogleSheetsServiceByCredential(
gcp_credential_path=bad_file_path)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class AuthenticatorTest(absltest.TestCase):
"""Tests GetGoogleSheetsService method in authenticator module."""
def testGetGoogleSheetsServiceByCred_badFilePath_raisesFileNotFoundError(
self):
bad_file_path = './credential.json'
with self.assertRaises(FileNotFoundError):
authenticator.GetGoogleSheetsServiceByCredential(
gcp_credential_path=bad_file_path)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class AuthenticatorTest(absltest.TestCase):
"""Tests GetGoogleSheetsService method in authenticator module."""
def testGetGoogleSheetsServiceByCred_badFilePath_raisesFileNotFoundError(
self):
bad_file_path = './credential.json'
with self.assertRaises(FileNotFoundError):
authenticator.GetGoogleSheetsServiceByCredential(
gcp_credential_path=bad_file_path)
if __name__ == '__main__':
absltest.main()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
from absl.testing import absltest
from model import authenticator
class AuthenticatorTest(absltest.TestCase):
"""Tests GetGoogleSheetsService method in authenticator module."""
def testGetGoogleSheetsServiceByCred_badFilePath_raisesFileNotFoundError(
self):
bad_file_path = './credential.json'
with self.assertRaises(FileNotFoundError):
authenticator.GetGoogleSheetsServiceByCredential(
gcp_credential_path=bad_file_path)
if __name__ == '__main__':
absltest.main()
<|reserved_special_token_1|>
# Copyright 2023 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the License);
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an AS IS BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for authenticator."""
from absl.testing import absltest
from model import authenticator
class AuthenticatorTest(absltest.TestCase):
"""Tests GetGoogleSheetsService method in authenticator module."""
def testGetGoogleSheetsServiceByCred_badFilePath_raisesFileNotFoundError(
self,
):
bad_file_path = './credential.json'
with self.assertRaises(FileNotFoundError):
authenticator.GetGoogleSheetsServiceByCredential(
gcp_credential_path=bad_file_path
)
if __name__ == '__main__':
absltest.main()
|
flexible
|
{
"blob_id": "86b24ddaae0d3477a3f82295224b7e84805eed91",
"index": 1413,
"step-1": "<mask token>\n\n\nclass AuthenticatorTest(absltest.TestCase):\n <mask token>\n\n def testGetGoogleSheetsServiceByCred_badFilePath_raisesFileNotFoundError(\n self):\n bad_file_path = './credential.json'\n with self.assertRaises(FileNotFoundError):\n authenticator.GetGoogleSheetsServiceByCredential(\n gcp_credential_path=bad_file_path)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass AuthenticatorTest(absltest.TestCase):\n \"\"\"Tests GetGoogleSheetsService method in authenticator module.\"\"\"\n\n def testGetGoogleSheetsServiceByCred_badFilePath_raisesFileNotFoundError(\n self):\n bad_file_path = './credential.json'\n with self.assertRaises(FileNotFoundError):\n authenticator.GetGoogleSheetsServiceByCredential(\n gcp_credential_path=bad_file_path)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass AuthenticatorTest(absltest.TestCase):\n \"\"\"Tests GetGoogleSheetsService method in authenticator module.\"\"\"\n\n def testGetGoogleSheetsServiceByCred_badFilePath_raisesFileNotFoundError(\n self):\n bad_file_path = './credential.json'\n with self.assertRaises(FileNotFoundError):\n authenticator.GetGoogleSheetsServiceByCredential(\n gcp_credential_path=bad_file_path)\n\n\nif __name__ == '__main__':\n absltest.main()\n",
"step-4": "<mask token>\nfrom absl.testing import absltest\nfrom model import authenticator\n\n\nclass AuthenticatorTest(absltest.TestCase):\n \"\"\"Tests GetGoogleSheetsService method in authenticator module.\"\"\"\n\n def testGetGoogleSheetsServiceByCred_badFilePath_raisesFileNotFoundError(\n self):\n bad_file_path = './credential.json'\n with self.assertRaises(FileNotFoundError):\n authenticator.GetGoogleSheetsServiceByCredential(\n gcp_credential_path=bad_file_path)\n\n\nif __name__ == '__main__':\n absltest.main()\n",
"step-5": "# Copyright 2023 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the License);\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an AS IS BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Tests for authenticator.\"\"\"\n\nfrom absl.testing import absltest\n\nfrom model import authenticator\n\n\nclass AuthenticatorTest(absltest.TestCase):\n \"\"\"Tests GetGoogleSheetsService method in authenticator module.\"\"\"\n\n def testGetGoogleSheetsServiceByCred_badFilePath_raisesFileNotFoundError(\n self,\n ):\n bad_file_path = './credential.json'\n\n with self.assertRaises(FileNotFoundError):\n authenticator.GetGoogleSheetsServiceByCredential(\n gcp_credential_path=bad_file_path\n )\n\n\nif __name__ == '__main__':\n absltest.main()\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
"""
This module is an intermediate layer between flopy version 3.2
and the inowas-modflow-configuration format.
Author: Ralf Junghanns
EMail: ralf.junghanns@gmail.com
"""
from .BasAdapter import BasAdapter
from .ChdAdapter import ChdAdapter
from .DisAdapter import DisAdapter
from .GhbAdapter import GhbAdapter
from .LpfAdapter import LpfAdapter
from .MfAdapter import MfAdapter
from .NwtAdapter import NwtAdapter
from .OcAdapter import OcAdapter
from .PcgAdapter import PcgAdapter
from .RchAdapter import RchAdapter
from .RivAdapter import RivAdapter
from .ReadBudget import ReadBudget
from .ReadDrawdown import ReadDrawdown
from .ReadHead import ReadHead
from .UpwAdapter import UpwAdapter
from .WelAdapter import WelAdapter
from .LmtAdapter import LmtAdapter
from .MtAdapter import MtAdapter
from .AdvAdapter import AdvAdapter
from .BtnAdapter import BtnAdapter
from .DspAdapter import DspAdapter
from .GcgAdapter import GcgAdapter
from .LktAdapter import LktAdapter
from .PhcAdapter import PhcAdapter
from .RctAdapter import RctAdapter
from .SftAdapter import SftAdapter
from .SsmAdapter import SsmAdapter
from .TobAdapter import TobAdapter
from .UztAdapter import UztAdapter
class InowasFlopyCalculationAdapter:
"""The Flopy Class"""
_version = None
_uuid = None
_mf = None
_mt = None
_report = ''
mf_package_order = [
'mf', 'dis', 'bas', 'bas6',
'riv', 'wel', 'rch', 'chd', 'ghb',
'lpf', 'upw', 'pcg', 'nwt', 'oc', 'lmt', 'lmt6'
]
mt_package_order = [
"mt", "btn", "adv", "dsp", "gcg", "ssm", "lkt",
"phc", "rct", "sft", "tob", "uzt"
]
def __init__(self, version, data, uuid):
self._mf_data = data.get("mf")
self._mt_data = data.get("mt")
self._version = version
self._uuid = uuid
if self._mf_data is not None:
package_content = self.read_packages(self._mf_data)
self.create_model(self.mf_package_order, package_content)
if self._mf_data.get("write_input"):
self.write_input_model(self._mf)
if self._mf_data.get("run_model"):
self._report += self.run_model(self._mf)
if self._mt_data is not None:
package_content = self.read_packages(self._mt_data)
self.create_model(self.mt_package_order, package_content)
if self._mt_data.get("write_input"):
self.write_input_model(self._mt)
if self._mt_data.get("run_model"):
self._report += self.run_model(self._mt)
@staticmethod
def read_packages(data):
package_content = {}
for package in data["packages"]:
print('Read Flopy Package: %s' % package)
package_content[package.lower()] = data[package]
return package_content
def create_model(self, package_order, package_content):
for package in package_order:
if package in package_content:
print('Create Flopy Package: %s' % package)
self.create_package(package, package_content[package])
@staticmethod
def write_input_model(model):
print('Write %s input files' % model)
model.write_input()
@staticmethod
def run_model(model):
print('Run the %s model' % model)
print(model.namefile)
print(model.exe_name)
success, report = model.run_model(report=True, silent=True)
return ' \n'.join(str(e) for e in report + [success])
def check_model(self):
if self._mf is not None:
self._mf.check()
if self._mt is not None:
self._mt.check()
def create_package(self, name, content):
# Modlfow packages
if name == 'mf':
self._mf = MfAdapter(content).get_package()
if name == 'dis':
DisAdapter(content).get_package(self._mf)
if name == 'bas' or name == 'bas6':
BasAdapter(content).get_package(self._mf)
if name == 'lpf':
LpfAdapter(content).get_package(self._mf)
if name == 'upw':
UpwAdapter(content).get_package(self._mf)
if name == 'pcg':
PcgAdapter(content).get_package(self._mf)
if name == 'nwt':
NwtAdapter(content).get_package(self._mf)
if name == 'oc':
OcAdapter(content).get_package(self._mf)
if name == 'riv':
RivAdapter(content).get_package(self._mf)
if name == 'wel':
WelAdapter(content).get_package(self._mf)
if name == 'rch':
RchAdapter(content).get_package(self._mf)
if name == 'chd':
ChdAdapter(content).get_package(self._mf)
if name == 'ghb':
GhbAdapter(content).get_package(self._mf)
if name == 'lmt':
LmtAdapter(content).get_package(self._mf)
# MT3D packages
if name == 'mt':
self._mt = MtAdapter(content).get_package(self._mf)
if name == 'adv':
AdvAdapter(content).get_package(self._mt)
if name == 'btn':
BtnAdapter(content).get_package(self._mt)
if name == 'dsp':
DspAdapter(content).get_package(self._mt)
if name == 'gcg':
GcgAdapter(content).get_package(self._mt)
if name == 'lkt':
LktAdapter(content).get_package(self._mt)
if name == 'phc':
PhcAdapter(content).get_package(self._mt)
if name == 'rct':
RctAdapter(content).get_package(self._mt)
if name == 'sft':
SftAdapter(content).get_package(self._mt)
if name == 'ssm':
SsmAdapter(content).get_package(self._mt)
if name == 'tob':
TobAdapter(content).get_package(self._mt)
if name == 'uzt':
UztAdapter(content).get_package(self._mt)
def response(self):
key = 'mf'
if 'MF' in self._mf_data:
key = 'MF'
heads = ReadHead(self._mf_data[key]['model_ws'])
drawdowns = ReadDrawdown(self._mf_data[key]['model_ws'])
budgets = ReadBudget(self._mf_data[key]['model_ws'])
response = {}
response['heads'] = heads.read_times()
response['drawdowns'] = drawdowns.read_times()
response['budgets'] = budgets.read_times()
response['number_of_layers'] = heads.read_number_of_layers()
return response
def response_message(self):
return self._report
|
normal
|
{
"blob_id": "fb64003c1acbddcbe952a17edcbf293a54ef28ae",
"index": 2185,
"step-1": "<mask token>\n\n\nclass InowasFlopyCalculationAdapter:\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def __init__(self, version, data, uuid):\n self._mf_data = data.get('mf')\n self._mt_data = data.get('mt')\n self._version = version\n self._uuid = uuid\n if self._mf_data is not None:\n package_content = self.read_packages(self._mf_data)\n self.create_model(self.mf_package_order, package_content)\n if self._mf_data.get('write_input'):\n self.write_input_model(self._mf)\n if self._mf_data.get('run_model'):\n self._report += self.run_model(self._mf)\n if self._mt_data is not None:\n package_content = self.read_packages(self._mt_data)\n self.create_model(self.mt_package_order, package_content)\n if self._mt_data.get('write_input'):\n self.write_input_model(self._mt)\n if self._mt_data.get('run_model'):\n self._report += self.run_model(self._mt)\n\n @staticmethod\n def read_packages(data):\n package_content = {}\n for package in data['packages']:\n print('Read Flopy Package: %s' % package)\n package_content[package.lower()] = data[package]\n return package_content\n <mask token>\n\n @staticmethod\n def write_input_model(model):\n print('Write %s input files' % model)\n model.write_input()\n <mask token>\n\n def check_model(self):\n if self._mf is not None:\n self._mf.check()\n if self._mt is not None:\n self._mt.check()\n\n def create_package(self, name, content):\n if name == 'mf':\n self._mf = MfAdapter(content).get_package()\n if name == 'dis':\n DisAdapter(content).get_package(self._mf)\n if name == 'bas' or name == 'bas6':\n BasAdapter(content).get_package(self._mf)\n if name == 'lpf':\n LpfAdapter(content).get_package(self._mf)\n if name == 'upw':\n UpwAdapter(content).get_package(self._mf)\n if name == 'pcg':\n PcgAdapter(content).get_package(self._mf)\n if name == 'nwt':\n NwtAdapter(content).get_package(self._mf)\n if name == 'oc':\n OcAdapter(content).get_package(self._mf)\n if name == 'riv':\n RivAdapter(content).get_package(self._mf)\n if name == 'wel':\n WelAdapter(content).get_package(self._mf)\n if name == 'rch':\n RchAdapter(content).get_package(self._mf)\n if name == 'chd':\n ChdAdapter(content).get_package(self._mf)\n if name == 'ghb':\n GhbAdapter(content).get_package(self._mf)\n if name == 'lmt':\n LmtAdapter(content).get_package(self._mf)\n if name == 'mt':\n self._mt = MtAdapter(content).get_package(self._mf)\n if name == 'adv':\n AdvAdapter(content).get_package(self._mt)\n if name == 'btn':\n BtnAdapter(content).get_package(self._mt)\n if name == 'dsp':\n DspAdapter(content).get_package(self._mt)\n if name == 'gcg':\n GcgAdapter(content).get_package(self._mt)\n if name == 'lkt':\n LktAdapter(content).get_package(self._mt)\n if name == 'phc':\n PhcAdapter(content).get_package(self._mt)\n if name == 'rct':\n RctAdapter(content).get_package(self._mt)\n if name == 'sft':\n SftAdapter(content).get_package(self._mt)\n if name == 'ssm':\n SsmAdapter(content).get_package(self._mt)\n if name == 'tob':\n TobAdapter(content).get_package(self._mt)\n if name == 'uzt':\n UztAdapter(content).get_package(self._mt)\n\n def response(self):\n key = 'mf'\n if 'MF' in self._mf_data:\n key = 'MF'\n heads = ReadHead(self._mf_data[key]['model_ws'])\n drawdowns = ReadDrawdown(self._mf_data[key]['model_ws'])\n budgets = ReadBudget(self._mf_data[key]['model_ws'])\n response = {}\n response['heads'] = heads.read_times()\n response['drawdowns'] = drawdowns.read_times()\n response['budgets'] = budgets.read_times()\n response['number_of_layers'] = heads.read_number_of_layers()\n return response\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass InowasFlopyCalculationAdapter:\n <mask token>\n _version = None\n _uuid = None\n _mf = None\n _mt = None\n _report = ''\n mf_package_order = ['mf', 'dis', 'bas', 'bas6', 'riv', 'wel', 'rch',\n 'chd', 'ghb', 'lpf', 'upw', 'pcg', 'nwt', 'oc', 'lmt', 'lmt6']\n mt_package_order = ['mt', 'btn', 'adv', 'dsp', 'gcg', 'ssm', 'lkt',\n 'phc', 'rct', 'sft', 'tob', 'uzt']\n\n def __init__(self, version, data, uuid):\n self._mf_data = data.get('mf')\n self._mt_data = data.get('mt')\n self._version = version\n self._uuid = uuid\n if self._mf_data is not None:\n package_content = self.read_packages(self._mf_data)\n self.create_model(self.mf_package_order, package_content)\n if self._mf_data.get('write_input'):\n self.write_input_model(self._mf)\n if self._mf_data.get('run_model'):\n self._report += self.run_model(self._mf)\n if self._mt_data is not None:\n package_content = self.read_packages(self._mt_data)\n self.create_model(self.mt_package_order, package_content)\n if self._mt_data.get('write_input'):\n self.write_input_model(self._mt)\n if self._mt_data.get('run_model'):\n self._report += self.run_model(self._mt)\n\n @staticmethod\n def read_packages(data):\n package_content = {}\n for package in data['packages']:\n print('Read Flopy Package: %s' % package)\n package_content[package.lower()] = data[package]\n return package_content\n\n def create_model(self, package_order, package_content):\n for package in package_order:\n if package in package_content:\n print('Create Flopy Package: %s' % package)\n self.create_package(package, package_content[package])\n\n @staticmethod\n def write_input_model(model):\n print('Write %s input files' % model)\n model.write_input()\n\n @staticmethod\n def run_model(model):\n print('Run the %s model' % model)\n print(model.namefile)\n print(model.exe_name)\n success, report = model.run_model(report=True, silent=True)\n return ' \\n'.join(str(e) for e in report + [success])\n\n def check_model(self):\n if self._mf is not None:\n self._mf.check()\n if self._mt is not None:\n self._mt.check()\n\n def create_package(self, name, content):\n if name == 'mf':\n self._mf = MfAdapter(content).get_package()\n if name == 'dis':\n DisAdapter(content).get_package(self._mf)\n if name == 'bas' or name == 'bas6':\n BasAdapter(content).get_package(self._mf)\n if name == 'lpf':\n LpfAdapter(content).get_package(self._mf)\n if name == 'upw':\n UpwAdapter(content).get_package(self._mf)\n if name == 'pcg':\n PcgAdapter(content).get_package(self._mf)\n if name == 'nwt':\n NwtAdapter(content).get_package(self._mf)\n if name == 'oc':\n OcAdapter(content).get_package(self._mf)\n if name == 'riv':\n RivAdapter(content).get_package(self._mf)\n if name == 'wel':\n WelAdapter(content).get_package(self._mf)\n if name == 'rch':\n RchAdapter(content).get_package(self._mf)\n if name == 'chd':\n ChdAdapter(content).get_package(self._mf)\n if name == 'ghb':\n GhbAdapter(content).get_package(self._mf)\n if name == 'lmt':\n LmtAdapter(content).get_package(self._mf)\n if name == 'mt':\n self._mt = MtAdapter(content).get_package(self._mf)\n if name == 'adv':\n AdvAdapter(content).get_package(self._mt)\n if name == 'btn':\n BtnAdapter(content).get_package(self._mt)\n if name == 'dsp':\n DspAdapter(content).get_package(self._mt)\n if name == 'gcg':\n GcgAdapter(content).get_package(self._mt)\n if name == 'lkt':\n LktAdapter(content).get_package(self._mt)\n if name == 'phc':\n PhcAdapter(content).get_package(self._mt)\n if name == 'rct':\n RctAdapter(content).get_package(self._mt)\n if name == 'sft':\n SftAdapter(content).get_package(self._mt)\n if name == 'ssm':\n SsmAdapter(content).get_package(self._mt)\n if name == 'tob':\n TobAdapter(content).get_package(self._mt)\n if name == 'uzt':\n UztAdapter(content).get_package(self._mt)\n\n def response(self):\n key = 'mf'\n if 'MF' in self._mf_data:\n key = 'MF'\n heads = ReadHead(self._mf_data[key]['model_ws'])\n drawdowns = ReadDrawdown(self._mf_data[key]['model_ws'])\n budgets = ReadBudget(self._mf_data[key]['model_ws'])\n response = {}\n response['heads'] = heads.read_times()\n response['drawdowns'] = drawdowns.read_times()\n response['budgets'] = budgets.read_times()\n response['number_of_layers'] = heads.read_number_of_layers()\n return response\n\n def response_message(self):\n return self._report\n",
"step-3": "<mask token>\n\n\nclass InowasFlopyCalculationAdapter:\n \"\"\"The Flopy Class\"\"\"\n _version = None\n _uuid = None\n _mf = None\n _mt = None\n _report = ''\n mf_package_order = ['mf', 'dis', 'bas', 'bas6', 'riv', 'wel', 'rch',\n 'chd', 'ghb', 'lpf', 'upw', 'pcg', 'nwt', 'oc', 'lmt', 'lmt6']\n mt_package_order = ['mt', 'btn', 'adv', 'dsp', 'gcg', 'ssm', 'lkt',\n 'phc', 'rct', 'sft', 'tob', 'uzt']\n\n def __init__(self, version, data, uuid):\n self._mf_data = data.get('mf')\n self._mt_data = data.get('mt')\n self._version = version\n self._uuid = uuid\n if self._mf_data is not None:\n package_content = self.read_packages(self._mf_data)\n self.create_model(self.mf_package_order, package_content)\n if self._mf_data.get('write_input'):\n self.write_input_model(self._mf)\n if self._mf_data.get('run_model'):\n self._report += self.run_model(self._mf)\n if self._mt_data is not None:\n package_content = self.read_packages(self._mt_data)\n self.create_model(self.mt_package_order, package_content)\n if self._mt_data.get('write_input'):\n self.write_input_model(self._mt)\n if self._mt_data.get('run_model'):\n self._report += self.run_model(self._mt)\n\n @staticmethod\n def read_packages(data):\n package_content = {}\n for package in data['packages']:\n print('Read Flopy Package: %s' % package)\n package_content[package.lower()] = data[package]\n return package_content\n\n def create_model(self, package_order, package_content):\n for package in package_order:\n if package in package_content:\n print('Create Flopy Package: %s' % package)\n self.create_package(package, package_content[package])\n\n @staticmethod\n def write_input_model(model):\n print('Write %s input files' % model)\n model.write_input()\n\n @staticmethod\n def run_model(model):\n print('Run the %s model' % model)\n print(model.namefile)\n print(model.exe_name)\n success, report = model.run_model(report=True, silent=True)\n return ' \\n'.join(str(e) for e in report + [success])\n\n def check_model(self):\n if self._mf is not None:\n self._mf.check()\n if self._mt is not None:\n self._mt.check()\n\n def create_package(self, name, content):\n if name == 'mf':\n self._mf = MfAdapter(content).get_package()\n if name == 'dis':\n DisAdapter(content).get_package(self._mf)\n if name == 'bas' or name == 'bas6':\n BasAdapter(content).get_package(self._mf)\n if name == 'lpf':\n LpfAdapter(content).get_package(self._mf)\n if name == 'upw':\n UpwAdapter(content).get_package(self._mf)\n if name == 'pcg':\n PcgAdapter(content).get_package(self._mf)\n if name == 'nwt':\n NwtAdapter(content).get_package(self._mf)\n if name == 'oc':\n OcAdapter(content).get_package(self._mf)\n if name == 'riv':\n RivAdapter(content).get_package(self._mf)\n if name == 'wel':\n WelAdapter(content).get_package(self._mf)\n if name == 'rch':\n RchAdapter(content).get_package(self._mf)\n if name == 'chd':\n ChdAdapter(content).get_package(self._mf)\n if name == 'ghb':\n GhbAdapter(content).get_package(self._mf)\n if name == 'lmt':\n LmtAdapter(content).get_package(self._mf)\n if name == 'mt':\n self._mt = MtAdapter(content).get_package(self._mf)\n if name == 'adv':\n AdvAdapter(content).get_package(self._mt)\n if name == 'btn':\n BtnAdapter(content).get_package(self._mt)\n if name == 'dsp':\n DspAdapter(content).get_package(self._mt)\n if name == 'gcg':\n GcgAdapter(content).get_package(self._mt)\n if name == 'lkt':\n LktAdapter(content).get_package(self._mt)\n if name == 'phc':\n PhcAdapter(content).get_package(self._mt)\n if name == 'rct':\n RctAdapter(content).get_package(self._mt)\n if name == 'sft':\n SftAdapter(content).get_package(self._mt)\n if name == 'ssm':\n SsmAdapter(content).get_package(self._mt)\n if name == 'tob':\n TobAdapter(content).get_package(self._mt)\n if name == 'uzt':\n UztAdapter(content).get_package(self._mt)\n\n def response(self):\n key = 'mf'\n if 'MF' in self._mf_data:\n key = 'MF'\n heads = ReadHead(self._mf_data[key]['model_ws'])\n drawdowns = ReadDrawdown(self._mf_data[key]['model_ws'])\n budgets = ReadBudget(self._mf_data[key]['model_ws'])\n response = {}\n response['heads'] = heads.read_times()\n response['drawdowns'] = drawdowns.read_times()\n response['budgets'] = budgets.read_times()\n response['number_of_layers'] = heads.read_number_of_layers()\n return response\n\n def response_message(self):\n return self._report\n",
"step-4": "<mask token>\nfrom .BasAdapter import BasAdapter\nfrom .ChdAdapter import ChdAdapter\nfrom .DisAdapter import DisAdapter\nfrom .GhbAdapter import GhbAdapter\nfrom .LpfAdapter import LpfAdapter\nfrom .MfAdapter import MfAdapter\nfrom .NwtAdapter import NwtAdapter\nfrom .OcAdapter import OcAdapter\nfrom .PcgAdapter import PcgAdapter\nfrom .RchAdapter import RchAdapter\nfrom .RivAdapter import RivAdapter\nfrom .ReadBudget import ReadBudget\nfrom .ReadDrawdown import ReadDrawdown\nfrom .ReadHead import ReadHead\nfrom .UpwAdapter import UpwAdapter\nfrom .WelAdapter import WelAdapter\nfrom .LmtAdapter import LmtAdapter\nfrom .MtAdapter import MtAdapter\nfrom .AdvAdapter import AdvAdapter\nfrom .BtnAdapter import BtnAdapter\nfrom .DspAdapter import DspAdapter\nfrom .GcgAdapter import GcgAdapter\nfrom .LktAdapter import LktAdapter\nfrom .PhcAdapter import PhcAdapter\nfrom .RctAdapter import RctAdapter\nfrom .SftAdapter import SftAdapter\nfrom .SsmAdapter import SsmAdapter\nfrom .TobAdapter import TobAdapter\nfrom .UztAdapter import UztAdapter\n\n\nclass InowasFlopyCalculationAdapter:\n \"\"\"The Flopy Class\"\"\"\n _version = None\n _uuid = None\n _mf = None\n _mt = None\n _report = ''\n mf_package_order = ['mf', 'dis', 'bas', 'bas6', 'riv', 'wel', 'rch',\n 'chd', 'ghb', 'lpf', 'upw', 'pcg', 'nwt', 'oc', 'lmt', 'lmt6']\n mt_package_order = ['mt', 'btn', 'adv', 'dsp', 'gcg', 'ssm', 'lkt',\n 'phc', 'rct', 'sft', 'tob', 'uzt']\n\n def __init__(self, version, data, uuid):\n self._mf_data = data.get('mf')\n self._mt_data = data.get('mt')\n self._version = version\n self._uuid = uuid\n if self._mf_data is not None:\n package_content = self.read_packages(self._mf_data)\n self.create_model(self.mf_package_order, package_content)\n if self._mf_data.get('write_input'):\n self.write_input_model(self._mf)\n if self._mf_data.get('run_model'):\n self._report += self.run_model(self._mf)\n if self._mt_data is not None:\n package_content = self.read_packages(self._mt_data)\n self.create_model(self.mt_package_order, package_content)\n if self._mt_data.get('write_input'):\n self.write_input_model(self._mt)\n if self._mt_data.get('run_model'):\n self._report += self.run_model(self._mt)\n\n @staticmethod\n def read_packages(data):\n package_content = {}\n for package in data['packages']:\n print('Read Flopy Package: %s' % package)\n package_content[package.lower()] = data[package]\n return package_content\n\n def create_model(self, package_order, package_content):\n for package in package_order:\n if package in package_content:\n print('Create Flopy Package: %s' % package)\n self.create_package(package, package_content[package])\n\n @staticmethod\n def write_input_model(model):\n print('Write %s input files' % model)\n model.write_input()\n\n @staticmethod\n def run_model(model):\n print('Run the %s model' % model)\n print(model.namefile)\n print(model.exe_name)\n success, report = model.run_model(report=True, silent=True)\n return ' \\n'.join(str(e) for e in report + [success])\n\n def check_model(self):\n if self._mf is not None:\n self._mf.check()\n if self._mt is not None:\n self._mt.check()\n\n def create_package(self, name, content):\n if name == 'mf':\n self._mf = MfAdapter(content).get_package()\n if name == 'dis':\n DisAdapter(content).get_package(self._mf)\n if name == 'bas' or name == 'bas6':\n BasAdapter(content).get_package(self._mf)\n if name == 'lpf':\n LpfAdapter(content).get_package(self._mf)\n if name == 'upw':\n UpwAdapter(content).get_package(self._mf)\n if name == 'pcg':\n PcgAdapter(content).get_package(self._mf)\n if name == 'nwt':\n NwtAdapter(content).get_package(self._mf)\n if name == 'oc':\n OcAdapter(content).get_package(self._mf)\n if name == 'riv':\n RivAdapter(content).get_package(self._mf)\n if name == 'wel':\n WelAdapter(content).get_package(self._mf)\n if name == 'rch':\n RchAdapter(content).get_package(self._mf)\n if name == 'chd':\n ChdAdapter(content).get_package(self._mf)\n if name == 'ghb':\n GhbAdapter(content).get_package(self._mf)\n if name == 'lmt':\n LmtAdapter(content).get_package(self._mf)\n if name == 'mt':\n self._mt = MtAdapter(content).get_package(self._mf)\n if name == 'adv':\n AdvAdapter(content).get_package(self._mt)\n if name == 'btn':\n BtnAdapter(content).get_package(self._mt)\n if name == 'dsp':\n DspAdapter(content).get_package(self._mt)\n if name == 'gcg':\n GcgAdapter(content).get_package(self._mt)\n if name == 'lkt':\n LktAdapter(content).get_package(self._mt)\n if name == 'phc':\n PhcAdapter(content).get_package(self._mt)\n if name == 'rct':\n RctAdapter(content).get_package(self._mt)\n if name == 'sft':\n SftAdapter(content).get_package(self._mt)\n if name == 'ssm':\n SsmAdapter(content).get_package(self._mt)\n if name == 'tob':\n TobAdapter(content).get_package(self._mt)\n if name == 'uzt':\n UztAdapter(content).get_package(self._mt)\n\n def response(self):\n key = 'mf'\n if 'MF' in self._mf_data:\n key = 'MF'\n heads = ReadHead(self._mf_data[key]['model_ws'])\n drawdowns = ReadDrawdown(self._mf_data[key]['model_ws'])\n budgets = ReadBudget(self._mf_data[key]['model_ws'])\n response = {}\n response['heads'] = heads.read_times()\n response['drawdowns'] = drawdowns.read_times()\n response['budgets'] = budgets.read_times()\n response['number_of_layers'] = heads.read_number_of_layers()\n return response\n\n def response_message(self):\n return self._report\n",
"step-5": "\"\"\"\nThis module is an intermediate layer between flopy version 3.2\nand the inowas-modflow-configuration format.\n\nAuthor: Ralf Junghanns\nEMail: ralf.junghanns@gmail.com\n\"\"\"\n\nfrom .BasAdapter import BasAdapter\nfrom .ChdAdapter import ChdAdapter\nfrom .DisAdapter import DisAdapter\nfrom .GhbAdapter import GhbAdapter\nfrom .LpfAdapter import LpfAdapter\nfrom .MfAdapter import MfAdapter\nfrom .NwtAdapter import NwtAdapter\nfrom .OcAdapter import OcAdapter\nfrom .PcgAdapter import PcgAdapter\nfrom .RchAdapter import RchAdapter\nfrom .RivAdapter import RivAdapter\nfrom .ReadBudget import ReadBudget\nfrom .ReadDrawdown import ReadDrawdown\nfrom .ReadHead import ReadHead\nfrom .UpwAdapter import UpwAdapter\nfrom .WelAdapter import WelAdapter\nfrom .LmtAdapter import LmtAdapter\nfrom .MtAdapter import MtAdapter\nfrom .AdvAdapter import AdvAdapter\nfrom .BtnAdapter import BtnAdapter\nfrom .DspAdapter import DspAdapter\nfrom .GcgAdapter import GcgAdapter\nfrom .LktAdapter import LktAdapter\nfrom .PhcAdapter import PhcAdapter\nfrom .RctAdapter import RctAdapter\nfrom .SftAdapter import SftAdapter\nfrom .SsmAdapter import SsmAdapter\nfrom .TobAdapter import TobAdapter\nfrom .UztAdapter import UztAdapter\n\n\nclass InowasFlopyCalculationAdapter:\n \"\"\"The Flopy Class\"\"\"\n\n _version = None\n _uuid = None\n _mf = None\n _mt = None\n _report = ''\n\n mf_package_order = [\n 'mf', 'dis', 'bas', 'bas6',\n 'riv', 'wel', 'rch', 'chd', 'ghb',\n 'lpf', 'upw', 'pcg', 'nwt', 'oc', 'lmt', 'lmt6'\n ]\n\n mt_package_order = [\n \"mt\", \"btn\", \"adv\", \"dsp\", \"gcg\", \"ssm\", \"lkt\",\n \"phc\", \"rct\", \"sft\", \"tob\", \"uzt\"\n ]\n\n def __init__(self, version, data, uuid):\n self._mf_data = data.get(\"mf\")\n self._mt_data = data.get(\"mt\")\n self._version = version\n self._uuid = uuid\n\n if self._mf_data is not None:\n package_content = self.read_packages(self._mf_data)\n self.create_model(self.mf_package_order, package_content)\n\n if self._mf_data.get(\"write_input\"):\n self.write_input_model(self._mf)\n\n if self._mf_data.get(\"run_model\"):\n self._report += self.run_model(self._mf)\n\n if self._mt_data is not None:\n package_content = self.read_packages(self._mt_data)\n self.create_model(self.mt_package_order, package_content)\n\n if self._mt_data.get(\"write_input\"):\n self.write_input_model(self._mt)\n\n if self._mt_data.get(\"run_model\"):\n self._report += self.run_model(self._mt)\n\n @staticmethod\n def read_packages(data):\n package_content = {}\n for package in data[\"packages\"]:\n print('Read Flopy Package: %s' % package)\n package_content[package.lower()] = data[package]\n return package_content\n\n def create_model(self, package_order, package_content):\n for package in package_order:\n if package in package_content:\n print('Create Flopy Package: %s' % package)\n self.create_package(package, package_content[package])\n\n @staticmethod\n def write_input_model(model):\n print('Write %s input files' % model)\n model.write_input()\n\n @staticmethod\n def run_model(model):\n print('Run the %s model' % model)\n print(model.namefile)\n print(model.exe_name)\n success, report = model.run_model(report=True, silent=True)\n return ' \\n'.join(str(e) for e in report + [success])\n\n def check_model(self):\n if self._mf is not None:\n self._mf.check()\n if self._mt is not None:\n self._mt.check()\n\n def create_package(self, name, content):\n # Modlfow packages\n if name == 'mf':\n self._mf = MfAdapter(content).get_package()\n if name == 'dis':\n DisAdapter(content).get_package(self._mf)\n if name == 'bas' or name == 'bas6':\n BasAdapter(content).get_package(self._mf)\n if name == 'lpf':\n LpfAdapter(content).get_package(self._mf)\n if name == 'upw':\n UpwAdapter(content).get_package(self._mf)\n if name == 'pcg':\n PcgAdapter(content).get_package(self._mf)\n if name == 'nwt':\n NwtAdapter(content).get_package(self._mf)\n if name == 'oc':\n OcAdapter(content).get_package(self._mf)\n if name == 'riv':\n RivAdapter(content).get_package(self._mf)\n if name == 'wel':\n WelAdapter(content).get_package(self._mf)\n if name == 'rch':\n RchAdapter(content).get_package(self._mf)\n if name == 'chd':\n ChdAdapter(content).get_package(self._mf)\n if name == 'ghb':\n GhbAdapter(content).get_package(self._mf)\n if name == 'lmt':\n LmtAdapter(content).get_package(self._mf)\n\n # MT3D packages\n if name == 'mt':\n self._mt = MtAdapter(content).get_package(self._mf)\n if name == 'adv':\n AdvAdapter(content).get_package(self._mt)\n if name == 'btn':\n BtnAdapter(content).get_package(self._mt)\n if name == 'dsp':\n DspAdapter(content).get_package(self._mt)\n if name == 'gcg':\n GcgAdapter(content).get_package(self._mt)\n if name == 'lkt':\n LktAdapter(content).get_package(self._mt)\n if name == 'phc':\n PhcAdapter(content).get_package(self._mt)\n if name == 'rct':\n RctAdapter(content).get_package(self._mt)\n if name == 'sft':\n SftAdapter(content).get_package(self._mt)\n if name == 'ssm':\n SsmAdapter(content).get_package(self._mt)\n if name == 'tob':\n TobAdapter(content).get_package(self._mt)\n if name == 'uzt':\n UztAdapter(content).get_package(self._mt)\n\n def response(self):\n key = 'mf'\n if 'MF' in self._mf_data:\n key = 'MF'\n\n heads = ReadHead(self._mf_data[key]['model_ws'])\n drawdowns = ReadDrawdown(self._mf_data[key]['model_ws'])\n budgets = ReadBudget(self._mf_data[key]['model_ws'])\n response = {}\n response['heads'] = heads.read_times()\n response['drawdowns'] = drawdowns.read_times()\n response['budgets'] = budgets.read_times()\n response['number_of_layers'] = heads.read_number_of_layers()\n\n return response\n\n def response_message(self):\n return self._report\n",
"step-ids": [
7,
11,
12,
13,
14
]
}
|
[
7,
11,
12,
13,
14
] |
from collections.abc import Iterator
import json
import click
def print_json(obj, err=False):
if isinstance(obj, Iterator):
obj = list(obj)
click.echo(json.dumps(obj, sort_keys=True, indent=4, ensure_ascii=False),
err=err)
def show_fields(*fields):
def show(obj, verbose=False):
if verbose:
return obj
about = {}
for entry in fields:
if isinstance(entry, str):
entry = (entry,)
name, *subpath = entry
try:
value = obj[name]
except KeyError:
continue
for sp in subpath:
if value is None:
break
elif callable(sp):
value = sp(value)
elif isinstance(value, list):
value = [v and v[sp] for v in value]
else:
value = value[sp]
about[name] = value
return about
return show
repo_info = show_fields(
("owner", "login"),
"name",
"url",
"html_url",
"clone_url",
"git_url",
"ssh_url",
"full_name",
"description",
"homepage",
"private",
"default_branch",
"created_at",
"updated_at",
"pushed_at",
"fork",
"forks_count",
"watchers_count",
"size",
"subscribers_count",
"stargazers_count",
"id",
"language",
"network_count",
"open_issues_count",
("parent", "full_name"),
("source", "full_name"),
)
gist_info = show_fields(
"id",
"url",
"git_push_url",
("files", lambda files: {
fname: {k:v for k,v in about.items() if k != 'content'}
for fname, about in files.items()
}),
"public",
"html_url",
("owner", "login"),
"description",
"created_at",
"updated_at",
"comments",
("fork_of", "id"),
("forks", "id"),
)
issue_info = show_fields(
("assignees", "login"),
"closed_at",
("closed_by", "login"),
"comments",
"created_at",
"html_url",
"id",
("labels", "name"),
"locked",
("milestone", "title"),
"number",
"state",
"title",
"updated_at",
"url",
("user", "login"),
"repository_url",
### pull_request
)
|
normal
|
{
"blob_id": "d340ac979f57cf4650131665e4fa5b9923f22a3e",
"index": 6691,
"step-1": "<mask token>\n\n\ndef print_json(obj, err=False):\n if isinstance(obj, Iterator):\n obj = list(obj)\n click.echo(json.dumps(obj, sort_keys=True, indent=4, ensure_ascii=False\n ), err=err)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef print_json(obj, err=False):\n if isinstance(obj, Iterator):\n obj = list(obj)\n click.echo(json.dumps(obj, sort_keys=True, indent=4, ensure_ascii=False\n ), err=err)\n\n\ndef show_fields(*fields):\n\n def show(obj, verbose=False):\n if verbose:\n return obj\n about = {}\n for entry in fields:\n if isinstance(entry, str):\n entry = entry,\n name, *subpath = entry\n try:\n value = obj[name]\n except KeyError:\n continue\n for sp in subpath:\n if value is None:\n break\n elif callable(sp):\n value = sp(value)\n elif isinstance(value, list):\n value = [(v and v[sp]) for v in value]\n else:\n value = value[sp]\n about[name] = value\n return about\n return show\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef print_json(obj, err=False):\n if isinstance(obj, Iterator):\n obj = list(obj)\n click.echo(json.dumps(obj, sort_keys=True, indent=4, ensure_ascii=False\n ), err=err)\n\n\ndef show_fields(*fields):\n\n def show(obj, verbose=False):\n if verbose:\n return obj\n about = {}\n for entry in fields:\n if isinstance(entry, str):\n entry = entry,\n name, *subpath = entry\n try:\n value = obj[name]\n except KeyError:\n continue\n for sp in subpath:\n if value is None:\n break\n elif callable(sp):\n value = sp(value)\n elif isinstance(value, list):\n value = [(v and v[sp]) for v in value]\n else:\n value = value[sp]\n about[name] = value\n return about\n return show\n\n\nrepo_info = show_fields(('owner', 'login'), 'name', 'url', 'html_url',\n 'clone_url', 'git_url', 'ssh_url', 'full_name', 'description',\n 'homepage', 'private', 'default_branch', 'created_at', 'updated_at',\n 'pushed_at', 'fork', 'forks_count', 'watchers_count', 'size',\n 'subscribers_count', 'stargazers_count', 'id', 'language',\n 'network_count', 'open_issues_count', ('parent', 'full_name'), (\n 'source', 'full_name'))\ngist_info = show_fields('id', 'url', 'git_push_url', ('files', lambda files:\n {fname: {k: v for k, v in about.items() if k != 'content'} for fname,\n about in files.items()}), 'public', 'html_url', ('owner', 'login'),\n 'description', 'created_at', 'updated_at', 'comments', ('fork_of', 'id'\n ), ('forks', 'id'))\nissue_info = show_fields(('assignees', 'login'), 'closed_at', ('closed_by',\n 'login'), 'comments', 'created_at', 'html_url', 'id', ('labels', 'name'\n ), 'locked', ('milestone', 'title'), 'number', 'state', 'title',\n 'updated_at', 'url', ('user', 'login'), 'repository_url')\n",
"step-4": "from collections.abc import Iterator\nimport json\nimport click\n\n\ndef print_json(obj, err=False):\n if isinstance(obj, Iterator):\n obj = list(obj)\n click.echo(json.dumps(obj, sort_keys=True, indent=4, ensure_ascii=False\n ), err=err)\n\n\ndef show_fields(*fields):\n\n def show(obj, verbose=False):\n if verbose:\n return obj\n about = {}\n for entry in fields:\n if isinstance(entry, str):\n entry = entry,\n name, *subpath = entry\n try:\n value = obj[name]\n except KeyError:\n continue\n for sp in subpath:\n if value is None:\n break\n elif callable(sp):\n value = sp(value)\n elif isinstance(value, list):\n value = [(v and v[sp]) for v in value]\n else:\n value = value[sp]\n about[name] = value\n return about\n return show\n\n\nrepo_info = show_fields(('owner', 'login'), 'name', 'url', 'html_url',\n 'clone_url', 'git_url', 'ssh_url', 'full_name', 'description',\n 'homepage', 'private', 'default_branch', 'created_at', 'updated_at',\n 'pushed_at', 'fork', 'forks_count', 'watchers_count', 'size',\n 'subscribers_count', 'stargazers_count', 'id', 'language',\n 'network_count', 'open_issues_count', ('parent', 'full_name'), (\n 'source', 'full_name'))\ngist_info = show_fields('id', 'url', 'git_push_url', ('files', lambda files:\n {fname: {k: v for k, v in about.items() if k != 'content'} for fname,\n about in files.items()}), 'public', 'html_url', ('owner', 'login'),\n 'description', 'created_at', 'updated_at', 'comments', ('fork_of', 'id'\n ), ('forks', 'id'))\nissue_info = show_fields(('assignees', 'login'), 'closed_at', ('closed_by',\n 'login'), 'comments', 'created_at', 'html_url', 'id', ('labels', 'name'\n ), 'locked', ('milestone', 'title'), 'number', 'state', 'title',\n 'updated_at', 'url', ('user', 'login'), 'repository_url')\n",
"step-5": "from collections.abc import Iterator\nimport json\nimport click\n\ndef print_json(obj, err=False):\n if isinstance(obj, Iterator):\n obj = list(obj)\n click.echo(json.dumps(obj, sort_keys=True, indent=4, ensure_ascii=False),\n err=err)\n\ndef show_fields(*fields):\n def show(obj, verbose=False):\n if verbose:\n return obj\n about = {}\n for entry in fields:\n if isinstance(entry, str):\n entry = (entry,)\n name, *subpath = entry\n try:\n value = obj[name]\n except KeyError:\n continue\n for sp in subpath:\n if value is None:\n break\n elif callable(sp):\n value = sp(value)\n elif isinstance(value, list):\n value = [v and v[sp] for v in value]\n else:\n value = value[sp]\n about[name] = value\n return about\n return show\n\nrepo_info = show_fields(\n (\"owner\", \"login\"),\n \"name\",\n \"url\",\n \"html_url\",\n \"clone_url\",\n \"git_url\",\n \"ssh_url\",\n \"full_name\",\n \"description\",\n \"homepage\",\n \"private\",\n \"default_branch\",\n \"created_at\",\n \"updated_at\",\n \"pushed_at\",\n \"fork\",\n \"forks_count\",\n \"watchers_count\",\n \"size\",\n \"subscribers_count\",\n \"stargazers_count\",\n \"id\",\n \"language\",\n \"network_count\",\n \"open_issues_count\",\n (\"parent\", \"full_name\"),\n (\"source\", \"full_name\"),\n)\n\ngist_info = show_fields(\n \"id\",\n \"url\",\n \"git_push_url\",\n (\"files\", lambda files: {\n fname: {k:v for k,v in about.items() if k != 'content'}\n for fname, about in files.items()\n }),\n \"public\",\n \"html_url\",\n (\"owner\", \"login\"),\n \"description\",\n \"created_at\",\n \"updated_at\",\n \"comments\",\n (\"fork_of\", \"id\"),\n (\"forks\", \"id\"),\n)\n\nissue_info = show_fields(\n (\"assignees\", \"login\"),\n \"closed_at\",\n (\"closed_by\", \"login\"),\n \"comments\",\n \"created_at\",\n \"html_url\",\n \"id\",\n (\"labels\", \"name\"),\n \"locked\",\n (\"milestone\", \"title\"),\n \"number\",\n \"state\",\n \"title\",\n \"updated_at\",\n \"url\",\n (\"user\", \"login\"),\n \"repository_url\",\n ### pull_request\n)\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
# Let's look at the lowercase letters.
import string
alphabet = " " + string.ascii_lowercase
|
normal
|
{
"blob_id": "da3be0d3b815e11d292a7c7e8f5ce32b35580f98",
"index": 1016,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nalphabet = ' ' + string.ascii_lowercase\n",
"step-3": "import string\nalphabet = ' ' + string.ascii_lowercase\n",
"step-4": "# Let's look at the lowercase letters.\nimport string\nalphabet = \" \" + string.ascii_lowercase\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
import praw
import config
from imgurpython import ImgurClient
import datetime
from time import sleep
def respond_to_comment(comment, album_user, album_url, num_images, num_gifs):
body = "Here is an album of all unique image/gif posts made by " \
"[{user}]({album_url}). ({num_images} images" \
")".format(user=album_user.name, album_url=album_url, num_images=num_images, num_gifs=num_gifs)
comment.reply(body)
return
def create_album(user, imgur_client, reddit_client):
album = imgur_client.create_album({"title": user.name, "privacy": "hidden"})
urls = []
images = []
for submission in reddit_client.redditor(user.name).submissions.top("all"):
if not submission.is_self and submission.url not in urls:
urls.append(submission.url)
try:
image = imgur_client.upload_from_url(submission.url, config=None, anon=False)
images.append(image["id"])
# Sleep command to avoid exceeding rate limit
# 86400 seconds per day / 12500 requests per day = 1 request every 6.9 seconds
sleep(8)
except:
None
if len(images) > 0:
imgur_client.album_add_images(album["id"], images)
return album["id"]
def update_album(user, imgur_client, reddit_client):
return
def is_image(url):
return True
def is_gif(url):
return True
def run_bot():
reddit = praw.Reddit(
client_id=config.CLIENT_ID_REDDIT,
client_secret=config.SECRET_CODE_REDDIT,
user_agent=config.USER_AGENT_REDDIT,
username=config.USERNAME_REDDIT,
password=config.PASSWORD_REDDIT
)
client=ImgurClient(
client_id=config.CLIENT_ID_IMGUR,
client_secret=config.SECRET_CODE_IMGUR,
access_token=config.ACCESS_TOKEN_IMGUR,
refresh_token=config.REFRESH_TOKEN_IMGUR
)
login_time = datetime.datetime.now(datetime.timezone.utc).timestamp()
print('Bot Initiation Successful')
print("Logged in at: {time}".format(time = login_time))
print("Logged into Reddit as: {user}".format(user=reddit.user.me().name))
print("Logged into Imgur as: {imgur_user}".format(imgur_user=""))
print("{api_calls} Imgur API calls remaining for the day.".format(api_calls=client.credits["ClientRemaining"]))
print("----------")
default_url = "https://imgur.com/"
command_call = '!compile-album'
subreddit = reddit.subreddit("all")
for comment in subreddit.stream.comments():
if command_call in comment.body and comment.created_utc > login_time:
parent_id = comment.parent_id
if parent_id[0:3] == "t1_":
parent_comment = reddit.comment(id=parent_id[3:])
album_id = create_album(parent_comment.author, client, reddit)
album = client.get_album(album_id)
respond_to_comment(comment, parent_comment.author, album.link, album.images_count, 0)
elif parent_id[0:3] == "t3_":
parent_submission = reddit.submission(id=parent_id[3:])
album_id = create_album(parent_submission.author, client, reddit)
album = client.get_album(album_id)
respond_to_comment(comment, parent_submission.author, album.link, album.images_count, 0)
run_bot()
|
normal
|
{
"blob_id": "ca009022832963934230e356f9ea9eaedac7378b",
"index": 1745,
"step-1": "<mask token>\n\n\ndef update_album(user, imgur_client, reddit_client):\n return\n\n\n<mask token>\n\n\ndef is_gif(url):\n return True\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef respond_to_comment(comment, album_user, album_url, num_images, num_gifs):\n body = (\n 'Here is an album of all unique image/gif posts made by [{user}]({album_url}). ({num_images} images)'\n .format(user=album_user.name, album_url=album_url, num_images=\n num_images, num_gifs=num_gifs))\n comment.reply(body)\n return\n\n\ndef create_album(user, imgur_client, reddit_client):\n album = imgur_client.create_album({'title': user.name, 'privacy': 'hidden'}\n )\n urls = []\n images = []\n for submission in reddit_client.redditor(user.name).submissions.top('all'):\n if not submission.is_self and submission.url not in urls:\n urls.append(submission.url)\n try:\n image = imgur_client.upload_from_url(submission.url, config\n =None, anon=False)\n images.append(image['id'])\n sleep(8)\n except:\n None\n if len(images) > 0:\n imgur_client.album_add_images(album['id'], images)\n return album['id']\n\n\ndef update_album(user, imgur_client, reddit_client):\n return\n\n\ndef is_image(url):\n return True\n\n\ndef is_gif(url):\n return True\n\n\ndef run_bot():\n reddit = praw.Reddit(client_id=config.CLIENT_ID_REDDIT, client_secret=\n config.SECRET_CODE_REDDIT, user_agent=config.USER_AGENT_REDDIT,\n username=config.USERNAME_REDDIT, password=config.PASSWORD_REDDIT)\n client = ImgurClient(client_id=config.CLIENT_ID_IMGUR, client_secret=\n config.SECRET_CODE_IMGUR, access_token=config.ACCESS_TOKEN_IMGUR,\n refresh_token=config.REFRESH_TOKEN_IMGUR)\n login_time = datetime.datetime.now(datetime.timezone.utc).timestamp()\n print('Bot Initiation Successful')\n print('Logged in at: {time}'.format(time=login_time))\n print('Logged into Reddit as: {user}'.format(user=reddit.user.me().name))\n print('Logged into Imgur as: {imgur_user}'.format(imgur_user=''))\n print('{api_calls} Imgur API calls remaining for the day.'.format(\n api_calls=client.credits['ClientRemaining']))\n print('----------')\n default_url = 'https://imgur.com/'\n command_call = '!compile-album'\n subreddit = reddit.subreddit('all')\n for comment in subreddit.stream.comments():\n if command_call in comment.body and comment.created_utc > login_time:\n parent_id = comment.parent_id\n if parent_id[0:3] == 't1_':\n parent_comment = reddit.comment(id=parent_id[3:])\n album_id = create_album(parent_comment.author, client, reddit)\n album = client.get_album(album_id)\n respond_to_comment(comment, parent_comment.author, album.\n link, album.images_count, 0)\n elif parent_id[0:3] == 't3_':\n parent_submission = reddit.submission(id=parent_id[3:])\n album_id = create_album(parent_submission.author, client,\n reddit)\n album = client.get_album(album_id)\n respond_to_comment(comment, parent_submission.author, album\n .link, album.images_count, 0)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef respond_to_comment(comment, album_user, album_url, num_images, num_gifs):\n body = (\n 'Here is an album of all unique image/gif posts made by [{user}]({album_url}). ({num_images} images)'\n .format(user=album_user.name, album_url=album_url, num_images=\n num_images, num_gifs=num_gifs))\n comment.reply(body)\n return\n\n\ndef create_album(user, imgur_client, reddit_client):\n album = imgur_client.create_album({'title': user.name, 'privacy': 'hidden'}\n )\n urls = []\n images = []\n for submission in reddit_client.redditor(user.name).submissions.top('all'):\n if not submission.is_self and submission.url not in urls:\n urls.append(submission.url)\n try:\n image = imgur_client.upload_from_url(submission.url, config\n =None, anon=False)\n images.append(image['id'])\n sleep(8)\n except:\n None\n if len(images) > 0:\n imgur_client.album_add_images(album['id'], images)\n return album['id']\n\n\ndef update_album(user, imgur_client, reddit_client):\n return\n\n\ndef is_image(url):\n return True\n\n\ndef is_gif(url):\n return True\n\n\ndef run_bot():\n reddit = praw.Reddit(client_id=config.CLIENT_ID_REDDIT, client_secret=\n config.SECRET_CODE_REDDIT, user_agent=config.USER_AGENT_REDDIT,\n username=config.USERNAME_REDDIT, password=config.PASSWORD_REDDIT)\n client = ImgurClient(client_id=config.CLIENT_ID_IMGUR, client_secret=\n config.SECRET_CODE_IMGUR, access_token=config.ACCESS_TOKEN_IMGUR,\n refresh_token=config.REFRESH_TOKEN_IMGUR)\n login_time = datetime.datetime.now(datetime.timezone.utc).timestamp()\n print('Bot Initiation Successful')\n print('Logged in at: {time}'.format(time=login_time))\n print('Logged into Reddit as: {user}'.format(user=reddit.user.me().name))\n print('Logged into Imgur as: {imgur_user}'.format(imgur_user=''))\n print('{api_calls} Imgur API calls remaining for the day.'.format(\n api_calls=client.credits['ClientRemaining']))\n print('----------')\n default_url = 'https://imgur.com/'\n command_call = '!compile-album'\n subreddit = reddit.subreddit('all')\n for comment in subreddit.stream.comments():\n if command_call in comment.body and comment.created_utc > login_time:\n parent_id = comment.parent_id\n if parent_id[0:3] == 't1_':\n parent_comment = reddit.comment(id=parent_id[3:])\n album_id = create_album(parent_comment.author, client, reddit)\n album = client.get_album(album_id)\n respond_to_comment(comment, parent_comment.author, album.\n link, album.images_count, 0)\n elif parent_id[0:3] == 't3_':\n parent_submission = reddit.submission(id=parent_id[3:])\n album_id = create_album(parent_submission.author, client,\n reddit)\n album = client.get_album(album_id)\n respond_to_comment(comment, parent_submission.author, album\n .link, album.images_count, 0)\n\n\nrun_bot()\n",
"step-4": "import praw\nimport config\nfrom imgurpython import ImgurClient\nimport datetime\nfrom time import sleep\n\n\ndef respond_to_comment(comment, album_user, album_url, num_images, num_gifs):\n body = (\n 'Here is an album of all unique image/gif posts made by [{user}]({album_url}). ({num_images} images)'\n .format(user=album_user.name, album_url=album_url, num_images=\n num_images, num_gifs=num_gifs))\n comment.reply(body)\n return\n\n\ndef create_album(user, imgur_client, reddit_client):\n album = imgur_client.create_album({'title': user.name, 'privacy': 'hidden'}\n )\n urls = []\n images = []\n for submission in reddit_client.redditor(user.name).submissions.top('all'):\n if not submission.is_self and submission.url not in urls:\n urls.append(submission.url)\n try:\n image = imgur_client.upload_from_url(submission.url, config\n =None, anon=False)\n images.append(image['id'])\n sleep(8)\n except:\n None\n if len(images) > 0:\n imgur_client.album_add_images(album['id'], images)\n return album['id']\n\n\ndef update_album(user, imgur_client, reddit_client):\n return\n\n\ndef is_image(url):\n return True\n\n\ndef is_gif(url):\n return True\n\n\ndef run_bot():\n reddit = praw.Reddit(client_id=config.CLIENT_ID_REDDIT, client_secret=\n config.SECRET_CODE_REDDIT, user_agent=config.USER_AGENT_REDDIT,\n username=config.USERNAME_REDDIT, password=config.PASSWORD_REDDIT)\n client = ImgurClient(client_id=config.CLIENT_ID_IMGUR, client_secret=\n config.SECRET_CODE_IMGUR, access_token=config.ACCESS_TOKEN_IMGUR,\n refresh_token=config.REFRESH_TOKEN_IMGUR)\n login_time = datetime.datetime.now(datetime.timezone.utc).timestamp()\n print('Bot Initiation Successful')\n print('Logged in at: {time}'.format(time=login_time))\n print('Logged into Reddit as: {user}'.format(user=reddit.user.me().name))\n print('Logged into Imgur as: {imgur_user}'.format(imgur_user=''))\n print('{api_calls} Imgur API calls remaining for the day.'.format(\n api_calls=client.credits['ClientRemaining']))\n print('----------')\n default_url = 'https://imgur.com/'\n command_call = '!compile-album'\n subreddit = reddit.subreddit('all')\n for comment in subreddit.stream.comments():\n if command_call in comment.body and comment.created_utc > login_time:\n parent_id = comment.parent_id\n if parent_id[0:3] == 't1_':\n parent_comment = reddit.comment(id=parent_id[3:])\n album_id = create_album(parent_comment.author, client, reddit)\n album = client.get_album(album_id)\n respond_to_comment(comment, parent_comment.author, album.\n link, album.images_count, 0)\n elif parent_id[0:3] == 't3_':\n parent_submission = reddit.submission(id=parent_id[3:])\n album_id = create_album(parent_submission.author, client,\n reddit)\n album = client.get_album(album_id)\n respond_to_comment(comment, parent_submission.author, album\n .link, album.images_count, 0)\n\n\nrun_bot()\n",
"step-5": "import praw\nimport config\nfrom imgurpython import ImgurClient\nimport datetime\nfrom time import sleep\n\n\ndef respond_to_comment(comment, album_user, album_url, num_images, num_gifs):\n body = \"Here is an album of all unique image/gif posts made by \" \\\n \"[{user}]({album_url}). ({num_images} images\" \\\n \")\".format(user=album_user.name, album_url=album_url, num_images=num_images, num_gifs=num_gifs)\n comment.reply(body)\n return\n\n\ndef create_album(user, imgur_client, reddit_client):\n album = imgur_client.create_album({\"title\": user.name, \"privacy\": \"hidden\"})\n urls = []\n images = []\n for submission in reddit_client.redditor(user.name).submissions.top(\"all\"):\n if not submission.is_self and submission.url not in urls:\n urls.append(submission.url)\n try:\n image = imgur_client.upload_from_url(submission.url, config=None, anon=False)\n images.append(image[\"id\"])\n # Sleep command to avoid exceeding rate limit\n # 86400 seconds per day / 12500 requests per day = 1 request every 6.9 seconds\n sleep(8)\n except:\n None\n if len(images) > 0:\n imgur_client.album_add_images(album[\"id\"], images)\n return album[\"id\"]\n\n\ndef update_album(user, imgur_client, reddit_client):\n return\n\n\ndef is_image(url):\n return True\n\n\ndef is_gif(url):\n return True\n\n\ndef run_bot():\n reddit = praw.Reddit(\n client_id=config.CLIENT_ID_REDDIT,\n client_secret=config.SECRET_CODE_REDDIT,\n user_agent=config.USER_AGENT_REDDIT,\n username=config.USERNAME_REDDIT,\n password=config.PASSWORD_REDDIT\n )\n\n client=ImgurClient(\n client_id=config.CLIENT_ID_IMGUR,\n client_secret=config.SECRET_CODE_IMGUR,\n access_token=config.ACCESS_TOKEN_IMGUR,\n refresh_token=config.REFRESH_TOKEN_IMGUR\n )\n login_time = datetime.datetime.now(datetime.timezone.utc).timestamp()\n print('Bot Initiation Successful')\n print(\"Logged in at: {time}\".format(time = login_time))\n print(\"Logged into Reddit as: {user}\".format(user=reddit.user.me().name))\n print(\"Logged into Imgur as: {imgur_user}\".format(imgur_user=\"\"))\n print(\"{api_calls} Imgur API calls remaining for the day.\".format(api_calls=client.credits[\"ClientRemaining\"]))\n print(\"----------\")\n default_url = \"https://imgur.com/\"\n command_call = '!compile-album'\n subreddit = reddit.subreddit(\"all\")\n for comment in subreddit.stream.comments():\n if command_call in comment.body and comment.created_utc > login_time:\n parent_id = comment.parent_id\n if parent_id[0:3] == \"t1_\":\n parent_comment = reddit.comment(id=parent_id[3:])\n album_id = create_album(parent_comment.author, client, reddit)\n album = client.get_album(album_id)\n respond_to_comment(comment, parent_comment.author, album.link, album.images_count, 0)\n elif parent_id[0:3] == \"t3_\":\n parent_submission = reddit.submission(id=parent_id[3:])\n album_id = create_album(parent_submission.author, client, reddit)\n album = client.get_album(album_id)\n respond_to_comment(comment, parent_submission.author, album.link, album.images_count, 0)\n\n\nrun_bot()",
"step-ids": [
2,
6,
7,
8,
9
]
}
|
[
2,
6,
7,
8,
9
] |
<|reserved_special_token_0|>
def lab1(x):
list1 = []
for i in range(4):
sum = x * i
list1.append(sum)
return list1
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def lab1(x):
list1 = []
for i in range(4):
sum = x * i
list1.append(sum)
return list1
def func1(x):
list2 = []
for m in multipliers():
list2.append(m(x))
return list2
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def multipliers():
return lab1(x)
def lab1(x):
list1 = []
for i in range(4):
sum = x * i
list1.append(sum)
return list1
def func1(x):
list2 = []
for m in multipliers():
list2.append(m(x))
return list2
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def multipliers():
return lab1(x)
def lab1(x):
list1 = []
for i in range(4):
sum = x * i
list1.append(sum)
return list1
def func1(x):
list2 = []
for m in multipliers():
list2.append(m(x))
return list2
print(func1(3))
<|reserved_special_token_1|>
# -*- coding:utf-8 -*-
'''
@author:oldwai
'''
# email: frankandrew@163.com
def multipliers():
return lab1(x)
def lab1(x):
list1 = []
for i in range(4):
sum = x*i
list1.append(sum)
return list1
#print ([m(2) for m in multipliers()])
def func1(x):
list2 = []
for m in multipliers():
list2.append(m(x))
return list2
print(func1(3))
|
flexible
|
{
"blob_id": "807e19f09f4a46b6c39457b8916714e2c54c3e8d",
"index": 5802,
"step-1": "<mask token>\n\n\ndef lab1(x):\n list1 = []\n for i in range(4):\n sum = x * i\n list1.append(sum)\n return list1\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef lab1(x):\n list1 = []\n for i in range(4):\n sum = x * i\n list1.append(sum)\n return list1\n\n\ndef func1(x):\n list2 = []\n for m in multipliers():\n list2.append(m(x))\n return list2\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef multipliers():\n return lab1(x)\n\n\ndef lab1(x):\n list1 = []\n for i in range(4):\n sum = x * i\n list1.append(sum)\n return list1\n\n\ndef func1(x):\n list2 = []\n for m in multipliers():\n list2.append(m(x))\n return list2\n\n\n<mask token>\n",
"step-4": "<mask token>\n\n\ndef multipliers():\n return lab1(x)\n\n\ndef lab1(x):\n list1 = []\n for i in range(4):\n sum = x * i\n list1.append(sum)\n return list1\n\n\ndef func1(x):\n list2 = []\n for m in multipliers():\n list2.append(m(x))\n return list2\n\n\nprint(func1(3))\n",
"step-5": "# -*- coding:utf-8 -*-\r\n'''\r\n@author:oldwai\r\n'''\r\n# email: frankandrew@163.com\r\n\r\n\r\ndef multipliers():\r\n return lab1(x)\r\n\r\n\r\ndef lab1(x):\r\n list1 = []\r\n for i in range(4):\r\n sum = x*i\r\n list1.append(sum)\r\n return list1\r\n\r\n#print ([m(2) for m in multipliers()])\r\ndef func1(x):\r\n list2 = []\r\n for m in multipliers():\r\n list2.append(m(x))\r\n return list2\r\n\r\nprint(func1(3))",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
<|reserved_special_token_0|>
class QRCodeConfig(PluginConfig):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class QRCodeConfig(PluginConfig):
name = 'netbox_qrcode'
verbose_name = 'qrcode'
description = 'Generate QR codes for the objects'
version = __version__
author = 'Nikolay Yuzefovich'
author_email = 'mgk.kolek@gmail.com'
required_settings = []
default_settings = {'with_text': True, 'text_fields': ['name', 'serial'
], 'font': 'TahomaBold', 'custom_text': None, 'text_location':
'right', 'qr_version': 1, 'qr_error_correction': 0, 'qr_box_size':
6, 'qr_border': 4, 'device': {'text_fields': ['name', 'serial']},
'rack': {'text_fields': ['name']}, 'cable': {'text_fields': [
'_termination_a_device', 'termination_a', '_termination_b_device',
'termination_b']}}
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class QRCodeConfig(PluginConfig):
name = 'netbox_qrcode'
verbose_name = 'qrcode'
description = 'Generate QR codes for the objects'
version = __version__
author = 'Nikolay Yuzefovich'
author_email = 'mgk.kolek@gmail.com'
required_settings = []
default_settings = {'with_text': True, 'text_fields': ['name', 'serial'
], 'font': 'TahomaBold', 'custom_text': None, 'text_location':
'right', 'qr_version': 1, 'qr_error_correction': 0, 'qr_box_size':
6, 'qr_border': 4, 'device': {'text_fields': ['name', 'serial']},
'rack': {'text_fields': ['name']}, 'cable': {'text_fields': [
'_termination_a_device', 'termination_a', '_termination_b_device',
'termination_b']}}
config = QRCodeConfig
<|reserved_special_token_1|>
from extras.plugins import PluginConfig
from .version import __version__
class QRCodeConfig(PluginConfig):
name = 'netbox_qrcode'
verbose_name = 'qrcode'
description = 'Generate QR codes for the objects'
version = __version__
author = 'Nikolay Yuzefovich'
author_email = 'mgk.kolek@gmail.com'
required_settings = []
default_settings = {'with_text': True, 'text_fields': ['name', 'serial'
], 'font': 'TahomaBold', 'custom_text': None, 'text_location':
'right', 'qr_version': 1, 'qr_error_correction': 0, 'qr_box_size':
6, 'qr_border': 4, 'device': {'text_fields': ['name', 'serial']},
'rack': {'text_fields': ['name']}, 'cable': {'text_fields': [
'_termination_a_device', 'termination_a', '_termination_b_device',
'termination_b']}}
config = QRCodeConfig
<|reserved_special_token_1|>
from extras.plugins import PluginConfig
from .version import __version__
class QRCodeConfig(PluginConfig):
name = 'netbox_qrcode'
verbose_name = 'qrcode'
description = 'Generate QR codes for the objects'
version = __version__
author = 'Nikolay Yuzefovich'
author_email = 'mgk.kolek@gmail.com'
required_settings = []
default_settings = {
'with_text': True,
'text_fields': ['name', 'serial'],
'font': 'TahomaBold',
'custom_text': None,
'text_location': 'right',
'qr_version': 1,
'qr_error_correction': 0,
'qr_box_size': 6,
'qr_border': 4,
'device': {
'text_fields': ['name', 'serial']
},
'rack': {
'text_fields': ['name']
},
'cable': {
'text_fields': [
'_termination_a_device',
'termination_a',
'_termination_b_device',
'termination_b',
]
}
}
config = QRCodeConfig # noqa E305
|
flexible
|
{
"blob_id": "6306acd1508698687842ba6b55a839743af420cc",
"index": 5840,
"step-1": "<mask token>\n\n\nclass QRCodeConfig(PluginConfig):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass QRCodeConfig(PluginConfig):\n name = 'netbox_qrcode'\n verbose_name = 'qrcode'\n description = 'Generate QR codes for the objects'\n version = __version__\n author = 'Nikolay Yuzefovich'\n author_email = 'mgk.kolek@gmail.com'\n required_settings = []\n default_settings = {'with_text': True, 'text_fields': ['name', 'serial'\n ], 'font': 'TahomaBold', 'custom_text': None, 'text_location':\n 'right', 'qr_version': 1, 'qr_error_correction': 0, 'qr_box_size': \n 6, 'qr_border': 4, 'device': {'text_fields': ['name', 'serial']},\n 'rack': {'text_fields': ['name']}, 'cable': {'text_fields': [\n '_termination_a_device', 'termination_a', '_termination_b_device',\n 'termination_b']}}\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass QRCodeConfig(PluginConfig):\n name = 'netbox_qrcode'\n verbose_name = 'qrcode'\n description = 'Generate QR codes for the objects'\n version = __version__\n author = 'Nikolay Yuzefovich'\n author_email = 'mgk.kolek@gmail.com'\n required_settings = []\n default_settings = {'with_text': True, 'text_fields': ['name', 'serial'\n ], 'font': 'TahomaBold', 'custom_text': None, 'text_location':\n 'right', 'qr_version': 1, 'qr_error_correction': 0, 'qr_box_size': \n 6, 'qr_border': 4, 'device': {'text_fields': ['name', 'serial']},\n 'rack': {'text_fields': ['name']}, 'cable': {'text_fields': [\n '_termination_a_device', 'termination_a', '_termination_b_device',\n 'termination_b']}}\n\n\nconfig = QRCodeConfig\n",
"step-4": "from extras.plugins import PluginConfig\nfrom .version import __version__\n\n\nclass QRCodeConfig(PluginConfig):\n name = 'netbox_qrcode'\n verbose_name = 'qrcode'\n description = 'Generate QR codes for the objects'\n version = __version__\n author = 'Nikolay Yuzefovich'\n author_email = 'mgk.kolek@gmail.com'\n required_settings = []\n default_settings = {'with_text': True, 'text_fields': ['name', 'serial'\n ], 'font': 'TahomaBold', 'custom_text': None, 'text_location':\n 'right', 'qr_version': 1, 'qr_error_correction': 0, 'qr_box_size': \n 6, 'qr_border': 4, 'device': {'text_fields': ['name', 'serial']},\n 'rack': {'text_fields': ['name']}, 'cable': {'text_fields': [\n '_termination_a_device', 'termination_a', '_termination_b_device',\n 'termination_b']}}\n\n\nconfig = QRCodeConfig\n",
"step-5": "from extras.plugins import PluginConfig\nfrom .version import __version__\n\n\nclass QRCodeConfig(PluginConfig):\n name = 'netbox_qrcode'\n verbose_name = 'qrcode'\n description = 'Generate QR codes for the objects'\n version = __version__\n author = 'Nikolay Yuzefovich'\n author_email = 'mgk.kolek@gmail.com'\n required_settings = []\n default_settings = {\n 'with_text': True,\n 'text_fields': ['name', 'serial'],\n 'font': 'TahomaBold',\n 'custom_text': None,\n 'text_location': 'right',\n 'qr_version': 1,\n 'qr_error_correction': 0,\n 'qr_box_size': 6,\n 'qr_border': 4,\n 'device': {\n 'text_fields': ['name', 'serial']\n },\n 'rack': {\n 'text_fields': ['name']\n },\n 'cable': {\n 'text_fields': [\n '_termination_a_device',\n 'termination_a',\n '_termination_b_device',\n 'termination_b',\n ]\n }\n }\n\nconfig = QRCodeConfig # noqa E305\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
<|reserved_special_token_0|>
class PrimaryCategoryAdmin(admin.ModelAdmin):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
class PlaceCategoryAdmin(admin.ModelAdmin):
list_display = ('category_name', 'is_paid', 'description', 'is_active',
'image', 'primary_category')
actions = None
def primary_category(self, obj):
return obj.primary_category.primary_name
def has_delete_permission(self, request, obj=None):
return False
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class PrimaryCategoryAdmin(admin.ModelAdmin):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def has_delete_permission(self, request, obj=None):
return False
class PlaceCategoryAdmin(admin.ModelAdmin):
list_display = ('category_name', 'is_paid', 'description', 'is_active',
'image', 'primary_category')
actions = None
def primary_category(self, obj):
return obj.primary_category.primary_name
def has_delete_permission(self, request, obj=None):
return False
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class PrimaryCategoryAdmin(admin.ModelAdmin):
list_display = 'primary_name', 'is_active', 'description', 'image'
actions = None
def has_delete_permission(self, request, obj=None):
return False
class PlaceCategoryAdmin(admin.ModelAdmin):
list_display = ('category_name', 'is_paid', 'description', 'is_active',
'image', 'primary_category')
actions = None
def primary_category(self, obj):
return obj.primary_category.primary_name
def has_delete_permission(self, request, obj=None):
return False
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class PrimaryCategoryAdmin(admin.ModelAdmin):
list_display = 'primary_name', 'is_active', 'description', 'image'
actions = None
def has_delete_permission(self, request, obj=None):
return False
class PlaceCategoryAdmin(admin.ModelAdmin):
list_display = ('category_name', 'is_paid', 'description', 'is_active',
'image', 'primary_category')
actions = None
def primary_category(self, obj):
return obj.primary_category.primary_name
def has_delete_permission(self, request, obj=None):
return False
admin.site.register(PrimaryCategory, PrimaryCategoryAdmin)
admin.site.register(PlaceCategory, PlaceCategoryAdmin)
<|reserved_special_token_1|>
from django.contrib import admin
from search.models import PrimaryCategory,PlaceCategory
class PrimaryCategoryAdmin(admin.ModelAdmin):
list_display = ('primary_name','is_active','description','image',)
actions = None
def has_delete_permission(self,request,obj=None):
return False
class PlaceCategoryAdmin(admin.ModelAdmin):
list_display = ('category_name','is_paid','description','is_active','image','primary_category')
actions = None
def primary_category(self,obj):
return obj.primary_category.primary_name
def has_delete_permission(self,request,obj=None):
return False
admin.site.register(PrimaryCategory,PrimaryCategoryAdmin)
admin.site.register(PlaceCategory,PlaceCategoryAdmin)
|
flexible
|
{
"blob_id": "606abf8501d85c29051df4bf0276ed5b098ee6c5",
"index": 8679,
"step-1": "<mask token>\n\n\nclass PrimaryCategoryAdmin(admin.ModelAdmin):\n <mask token>\n <mask token>\n <mask token>\n\n\nclass PlaceCategoryAdmin(admin.ModelAdmin):\n list_display = ('category_name', 'is_paid', 'description', 'is_active',\n 'image', 'primary_category')\n actions = None\n\n def primary_category(self, obj):\n return obj.primary_category.primary_name\n\n def has_delete_permission(self, request, obj=None):\n return False\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass PrimaryCategoryAdmin(admin.ModelAdmin):\n <mask token>\n <mask token>\n\n def has_delete_permission(self, request, obj=None):\n return False\n\n\nclass PlaceCategoryAdmin(admin.ModelAdmin):\n list_display = ('category_name', 'is_paid', 'description', 'is_active',\n 'image', 'primary_category')\n actions = None\n\n def primary_category(self, obj):\n return obj.primary_category.primary_name\n\n def has_delete_permission(self, request, obj=None):\n return False\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass PrimaryCategoryAdmin(admin.ModelAdmin):\n list_display = 'primary_name', 'is_active', 'description', 'image'\n actions = None\n\n def has_delete_permission(self, request, obj=None):\n return False\n\n\nclass PlaceCategoryAdmin(admin.ModelAdmin):\n list_display = ('category_name', 'is_paid', 'description', 'is_active',\n 'image', 'primary_category')\n actions = None\n\n def primary_category(self, obj):\n return obj.primary_category.primary_name\n\n def has_delete_permission(self, request, obj=None):\n return False\n\n\n<mask token>\n",
"step-4": "<mask token>\n\n\nclass PrimaryCategoryAdmin(admin.ModelAdmin):\n list_display = 'primary_name', 'is_active', 'description', 'image'\n actions = None\n\n def has_delete_permission(self, request, obj=None):\n return False\n\n\nclass PlaceCategoryAdmin(admin.ModelAdmin):\n list_display = ('category_name', 'is_paid', 'description', 'is_active',\n 'image', 'primary_category')\n actions = None\n\n def primary_category(self, obj):\n return obj.primary_category.primary_name\n\n def has_delete_permission(self, request, obj=None):\n return False\n\n\nadmin.site.register(PrimaryCategory, PrimaryCategoryAdmin)\nadmin.site.register(PlaceCategory, PlaceCategoryAdmin)\n",
"step-5": "from django.contrib import admin\nfrom search.models import PrimaryCategory,PlaceCategory\n\nclass PrimaryCategoryAdmin(admin.ModelAdmin):\n \n list_display = ('primary_name','is_active','description','image',)\n actions = None\n \n def has_delete_permission(self,request,obj=None):\n return False\n \n \nclass PlaceCategoryAdmin(admin.ModelAdmin):\n \n list_display = ('category_name','is_paid','description','is_active','image','primary_category')\n actions = None\n \n \n def primary_category(self,obj):\n \n return obj.primary_category.primary_name\n \n def has_delete_permission(self,request,obj=None):\n return False\n\nadmin.site.register(PrimaryCategory,PrimaryCategoryAdmin) \nadmin.site.register(PlaceCategory,PlaceCategoryAdmin)\n\n \n",
"step-ids": [
5,
6,
7,
8,
10
]
}
|
[
5,
6,
7,
8,
10
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.