code
stringlengths 13
6.09M
| order_type
stringclasses 2
values | original_example
dict | step_ids
listlengths 1
5
|
|---|---|---|---|
import dash
import dash_core_components as dcc
import dash_html_components as html
app = dash.Dash()
app.layout = html.Div(
children=[
html.Label('Dropdowm'),
dcc.Dropdown(
id='my-dropdown',
options=[
{'label': 'İstanbul', 'value': 34}, # seçeneleri dict tutan liste olarak veririz
{'label': 'Ankara', 'value': 6},
{'label': 'Erzurum', 'value': 25},
],
multi=True,
value=34,
searchable=True,
),
html.Label('Radio'),
dcc.RadioItems(
id='my-radio',
options=[
{'label': 'İstanbul', 'value': 34},
{'label': 'Ankara', 'value': 6},
{'label': 'Erzurum', 'value': 25},
],
value=34,
),
html.Label('Slider'),
dcc.Slider(
id='my-slider',
min=0,
max=20,
step=0.5,
value=10,
marks={i: i for i in range(0, 21)}
),
]
)
if __name__ == '__main__':
app.run_server()
|
normal
|
{
"blob_id": "443bf59bc3c5ed2114f0c276aa7134ff5bf7fb64",
"index": 7264,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nif __name__ == '__main__':\n app.run_server()\n",
"step-3": "<mask token>\napp = dash.Dash()\napp.layout = html.Div(children=[html.Label('Dropdowm'), dcc.Dropdown(id=\n 'my-dropdown', options=[{'label': 'İstanbul', 'value': 34}, {'label':\n 'Ankara', 'value': 6}, {'label': 'Erzurum', 'value': 25}], multi=True,\n value=34, searchable=True), html.Label('Radio'), dcc.RadioItems(id=\n 'my-radio', options=[{'label': 'İstanbul', 'value': 34}, {'label':\n 'Ankara', 'value': 6}, {'label': 'Erzurum', 'value': 25}], value=34),\n html.Label('Slider'), dcc.Slider(id='my-slider', min=0, max=20, step=\n 0.5, value=10, marks={i: i for i in range(0, 21)})])\nif __name__ == '__main__':\n app.run_server()\n",
"step-4": "import dash\nimport dash_core_components as dcc\nimport dash_html_components as html\napp = dash.Dash()\napp.layout = html.Div(children=[html.Label('Dropdowm'), dcc.Dropdown(id=\n 'my-dropdown', options=[{'label': 'İstanbul', 'value': 34}, {'label':\n 'Ankara', 'value': 6}, {'label': 'Erzurum', 'value': 25}], multi=True,\n value=34, searchable=True), html.Label('Radio'), dcc.RadioItems(id=\n 'my-radio', options=[{'label': 'İstanbul', 'value': 34}, {'label':\n 'Ankara', 'value': 6}, {'label': 'Erzurum', 'value': 25}], value=34),\n html.Label('Slider'), dcc.Slider(id='my-slider', min=0, max=20, step=\n 0.5, value=10, marks={i: i for i in range(0, 21)})])\nif __name__ == '__main__':\n app.run_server()\n",
"step-5": "import dash\nimport dash_core_components as dcc\nimport dash_html_components as html\n\napp = dash.Dash()\n\napp.layout = html.Div(\n children=[\n html.Label('Dropdowm'),\n dcc.Dropdown(\n id='my-dropdown',\n options=[\n {'label': 'İstanbul', 'value': 34}, # seçeneleri dict tutan liste olarak veririz\n {'label': 'Ankara', 'value': 6},\n {'label': 'Erzurum', 'value': 25},\n ],\n multi=True,\n value=34,\n searchable=True,\n ),\n html.Label('Radio'),\n dcc.RadioItems(\n id='my-radio',\n options=[\n {'label': 'İstanbul', 'value': 34},\n {'label': 'Ankara', 'value': 6},\n {'label': 'Erzurum', 'value': 25},\n ],\n value=34,\n ),\n html.Label('Slider'),\n dcc.Slider(\n id='my-slider',\n min=0,\n max=20,\n step=0.5,\n value=10,\n marks={i: i for i in range(0, 21)}\n ),\n ]\n)\n\nif __name__ == '__main__':\n app.run_server()\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
import nltk
class Text(object):
def __init__(self, text):
self.text = text
self.words = nltk.word_tokenize(text)
self.sents = nltk.sent_tokenize(text)
class Passage(Text):
def __init__(self, title, story, questions):
Text.__init__(self,story)
self.title = title
self.questions = questions
def display(self):
print self.title + '\n'
print self.text + '\n\n***\n'
for q in self.questions:
print '\n' + q.text + ' (' + q.qtype + ')'
for a in q.answers:
print '\t' + a.text
print '\n\tCorrect Answer: ' + q.correct_answer.text
class Question(Text):
def __init__(self, qtext, qtype, answers, correct_answer):
Text.__init__(self,qtext)
self.qtype = qtype
self.answers = answers
self.correct_answer = correct_answer
class Answer(Text):
def __init__(self, atext):
Text.__init__(self,atext)
|
normal
|
{
"blob_id": "5830a6001d7db50002c44aede6fb10938fa01dd1",
"index": 320,
"step-1": "import nltk\n\nclass Text(object):\n \n def __init__(self, text):\n self.text = text\n self.words = nltk.word_tokenize(text)\n self.sents = nltk.sent_tokenize(text)\n\nclass Passage(Text):\n\n def __init__(self, title, story, questions):\n Text.__init__(self,story)\n self.title = title\n self.questions = questions\n \n def display(self):\n print self.title + '\\n'\n print self.text + '\\n\\n***\\n'\n for q in self.questions:\n print '\\n' + q.text + ' (' + q.qtype + ')'\n for a in q.answers:\n print '\\t' + a.text\n print '\\n\\tCorrect Answer: ' + q.correct_answer.text\n \nclass Question(Text):\n \n def __init__(self, qtext, qtype, answers, correct_answer):\n Text.__init__(self,qtext)\n self.qtype = qtype\n self.answers = answers\n self.correct_answer = correct_answer\n\nclass Answer(Text):\n \n def __init__(self, atext):\n Text.__init__(self,atext)",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def solveCoarse(world, aFine, MbFine, AbFine, boundaryConditions):
NWorldCoarse = world.NWorldCoarse
NWorldFine = world.NWorldCoarse * world.NCoarseElement
NCoarseElement = world.NCoarseElement
NpFine = np.prod(NWorldFine + 1)
NpCoarse = np.prod(NWorldCoarse + 1)
if MbFine is None:
MbFine = np.zeros(NpFine)
if AbFine is None:
AbFine = np.zeros(NpFine)
boundaryMap = boundaryConditions == 0
fixedCoarse = util.boundarypIndexMap(NWorldCoarse, boundaryMap=boundaryMap)
freeCoarse = np.setdiff1d(np.arange(NpCoarse), fixedCoarse)
if aFine.ndim == 1:
ALocFine = world.ALocFine
else:
ALocFine = world.ALocMatrixFine
AFine = fem.assemblePatchMatrix(NWorldFine, ALocFine, aFine)
MFine = fem.assemblePatchMatrix(NWorldFine, world.MLocFine)
bFine = MFine * MbFine + AFine * AbFine
basis = fem.assembleProlongationMatrix(NWorldCoarse, NCoarseElement)
bCoarse = basis.T * bFine
ACoarse = basis.T * (AFine * basis)
ACoarseFree = ACoarse[freeCoarse][:, freeCoarse]
bCoarseFree = bCoarse[freeCoarse]
uCoarseFree = linalg.linSolve(ACoarseFree, bCoarseFree)
uCoarseFull = np.zeros(NpCoarse)
uCoarseFull[freeCoarse] = uCoarseFree
uCoarseFull = uCoarseFull
uFineFull = basis * uCoarseFull
return uCoarseFull, uFineFull
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def solveFine(world, aFine, MbFine, AbFine, boundaryConditions):
NWorldCoarse = world.NWorldCoarse
NWorldFine = world.NWorldCoarse * world.NCoarseElement
NpFine = np.prod(NWorldFine + 1)
if MbFine is None:
MbFine = np.zeros(NpFine)
if AbFine is None:
AbFine = np.zeros(NpFine)
boundaryMap = boundaryConditions == 0
fixedFine = util.boundarypIndexMap(NWorldFine, boundaryMap=boundaryMap)
freeFine = np.setdiff1d(np.arange(NpFine), fixedFine)
if aFine.ndim == 1:
ALocFine = world.ALocFine
else:
ALocFine = world.ALocMatrixFine
AFine = fem.assemblePatchMatrix(NWorldFine, ALocFine, aFine)
MFine = fem.assemblePatchMatrix(NWorldFine, world.MLocFine)
bFine = MFine * MbFine + AFine * AbFine
AFineFree = AFine[freeFine][:, freeFine]
bFineFree = bFine[freeFine]
uFineFree = linalg.linSolve(AFineFree, bFineFree)
uFineFull = np.zeros(NpFine)
uFineFull[freeFine] = uFineFree
uFineFull = uFineFull
return uFineFull, AFine, MFine
def solveCoarse(world, aFine, MbFine, AbFine, boundaryConditions):
NWorldCoarse = world.NWorldCoarse
NWorldFine = world.NWorldCoarse * world.NCoarseElement
NCoarseElement = world.NCoarseElement
NpFine = np.prod(NWorldFine + 1)
NpCoarse = np.prod(NWorldCoarse + 1)
if MbFine is None:
MbFine = np.zeros(NpFine)
if AbFine is None:
AbFine = np.zeros(NpFine)
boundaryMap = boundaryConditions == 0
fixedCoarse = util.boundarypIndexMap(NWorldCoarse, boundaryMap=boundaryMap)
freeCoarse = np.setdiff1d(np.arange(NpCoarse), fixedCoarse)
if aFine.ndim == 1:
ALocFine = world.ALocFine
else:
ALocFine = world.ALocMatrixFine
AFine = fem.assemblePatchMatrix(NWorldFine, ALocFine, aFine)
MFine = fem.assemblePatchMatrix(NWorldFine, world.MLocFine)
bFine = MFine * MbFine + AFine * AbFine
basis = fem.assembleProlongationMatrix(NWorldCoarse, NCoarseElement)
bCoarse = basis.T * bFine
ACoarse = basis.T * (AFine * basis)
ACoarseFree = ACoarse[freeCoarse][:, freeCoarse]
bCoarseFree = bCoarse[freeCoarse]
uCoarseFree = linalg.linSolve(ACoarseFree, bCoarseFree)
uCoarseFull = np.zeros(NpCoarse)
uCoarseFull[freeCoarse] = uCoarseFree
uCoarseFull = uCoarseFull
uFineFull = basis * uCoarseFull
return uCoarseFull, uFineFull
<|reserved_special_token_1|>
import numpy as np
import scipy.sparse as sparse
from .world import World
from . import util
from . import fem
from . import linalg
def solveFine(world, aFine, MbFine, AbFine, boundaryConditions):
NWorldCoarse = world.NWorldCoarse
NWorldFine = world.NWorldCoarse * world.NCoarseElement
NpFine = np.prod(NWorldFine + 1)
if MbFine is None:
MbFine = np.zeros(NpFine)
if AbFine is None:
AbFine = np.zeros(NpFine)
boundaryMap = boundaryConditions == 0
fixedFine = util.boundarypIndexMap(NWorldFine, boundaryMap=boundaryMap)
freeFine = np.setdiff1d(np.arange(NpFine), fixedFine)
if aFine.ndim == 1:
ALocFine = world.ALocFine
else:
ALocFine = world.ALocMatrixFine
AFine = fem.assemblePatchMatrix(NWorldFine, ALocFine, aFine)
MFine = fem.assemblePatchMatrix(NWorldFine, world.MLocFine)
bFine = MFine * MbFine + AFine * AbFine
AFineFree = AFine[freeFine][:, freeFine]
bFineFree = bFine[freeFine]
uFineFree = linalg.linSolve(AFineFree, bFineFree)
uFineFull = np.zeros(NpFine)
uFineFull[freeFine] = uFineFree
uFineFull = uFineFull
return uFineFull, AFine, MFine
def solveCoarse(world, aFine, MbFine, AbFine, boundaryConditions):
NWorldCoarse = world.NWorldCoarse
NWorldFine = world.NWorldCoarse * world.NCoarseElement
NCoarseElement = world.NCoarseElement
NpFine = np.prod(NWorldFine + 1)
NpCoarse = np.prod(NWorldCoarse + 1)
if MbFine is None:
MbFine = np.zeros(NpFine)
if AbFine is None:
AbFine = np.zeros(NpFine)
boundaryMap = boundaryConditions == 0
fixedCoarse = util.boundarypIndexMap(NWorldCoarse, boundaryMap=boundaryMap)
freeCoarse = np.setdiff1d(np.arange(NpCoarse), fixedCoarse)
if aFine.ndim == 1:
ALocFine = world.ALocFine
else:
ALocFine = world.ALocMatrixFine
AFine = fem.assemblePatchMatrix(NWorldFine, ALocFine, aFine)
MFine = fem.assemblePatchMatrix(NWorldFine, world.MLocFine)
bFine = MFine * MbFine + AFine * AbFine
basis = fem.assembleProlongationMatrix(NWorldCoarse, NCoarseElement)
bCoarse = basis.T * bFine
ACoarse = basis.T * (AFine * basis)
ACoarseFree = ACoarse[freeCoarse][:, freeCoarse]
bCoarseFree = bCoarse[freeCoarse]
uCoarseFree = linalg.linSolve(ACoarseFree, bCoarseFree)
uCoarseFull = np.zeros(NpCoarse)
uCoarseFull[freeCoarse] = uCoarseFree
uCoarseFull = uCoarseFull
uFineFull = basis * uCoarseFull
return uCoarseFull, uFineFull
|
flexible
|
{
"blob_id": "1b3493322fa85c2fe26a7f308466c4a1c72d5b35",
"index": 4637,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef solveCoarse(world, aFine, MbFine, AbFine, boundaryConditions):\n NWorldCoarse = world.NWorldCoarse\n NWorldFine = world.NWorldCoarse * world.NCoarseElement\n NCoarseElement = world.NCoarseElement\n NpFine = np.prod(NWorldFine + 1)\n NpCoarse = np.prod(NWorldCoarse + 1)\n if MbFine is None:\n MbFine = np.zeros(NpFine)\n if AbFine is None:\n AbFine = np.zeros(NpFine)\n boundaryMap = boundaryConditions == 0\n fixedCoarse = util.boundarypIndexMap(NWorldCoarse, boundaryMap=boundaryMap)\n freeCoarse = np.setdiff1d(np.arange(NpCoarse), fixedCoarse)\n if aFine.ndim == 1:\n ALocFine = world.ALocFine\n else:\n ALocFine = world.ALocMatrixFine\n AFine = fem.assemblePatchMatrix(NWorldFine, ALocFine, aFine)\n MFine = fem.assemblePatchMatrix(NWorldFine, world.MLocFine)\n bFine = MFine * MbFine + AFine * AbFine\n basis = fem.assembleProlongationMatrix(NWorldCoarse, NCoarseElement)\n bCoarse = basis.T * bFine\n ACoarse = basis.T * (AFine * basis)\n ACoarseFree = ACoarse[freeCoarse][:, freeCoarse]\n bCoarseFree = bCoarse[freeCoarse]\n uCoarseFree = linalg.linSolve(ACoarseFree, bCoarseFree)\n uCoarseFull = np.zeros(NpCoarse)\n uCoarseFull[freeCoarse] = uCoarseFree\n uCoarseFull = uCoarseFull\n uFineFull = basis * uCoarseFull\n return uCoarseFull, uFineFull\n",
"step-3": "<mask token>\n\n\ndef solveFine(world, aFine, MbFine, AbFine, boundaryConditions):\n NWorldCoarse = world.NWorldCoarse\n NWorldFine = world.NWorldCoarse * world.NCoarseElement\n NpFine = np.prod(NWorldFine + 1)\n if MbFine is None:\n MbFine = np.zeros(NpFine)\n if AbFine is None:\n AbFine = np.zeros(NpFine)\n boundaryMap = boundaryConditions == 0\n fixedFine = util.boundarypIndexMap(NWorldFine, boundaryMap=boundaryMap)\n freeFine = np.setdiff1d(np.arange(NpFine), fixedFine)\n if aFine.ndim == 1:\n ALocFine = world.ALocFine\n else:\n ALocFine = world.ALocMatrixFine\n AFine = fem.assemblePatchMatrix(NWorldFine, ALocFine, aFine)\n MFine = fem.assemblePatchMatrix(NWorldFine, world.MLocFine)\n bFine = MFine * MbFine + AFine * AbFine\n AFineFree = AFine[freeFine][:, freeFine]\n bFineFree = bFine[freeFine]\n uFineFree = linalg.linSolve(AFineFree, bFineFree)\n uFineFull = np.zeros(NpFine)\n uFineFull[freeFine] = uFineFree\n uFineFull = uFineFull\n return uFineFull, AFine, MFine\n\n\ndef solveCoarse(world, aFine, MbFine, AbFine, boundaryConditions):\n NWorldCoarse = world.NWorldCoarse\n NWorldFine = world.NWorldCoarse * world.NCoarseElement\n NCoarseElement = world.NCoarseElement\n NpFine = np.prod(NWorldFine + 1)\n NpCoarse = np.prod(NWorldCoarse + 1)\n if MbFine is None:\n MbFine = np.zeros(NpFine)\n if AbFine is None:\n AbFine = np.zeros(NpFine)\n boundaryMap = boundaryConditions == 0\n fixedCoarse = util.boundarypIndexMap(NWorldCoarse, boundaryMap=boundaryMap)\n freeCoarse = np.setdiff1d(np.arange(NpCoarse), fixedCoarse)\n if aFine.ndim == 1:\n ALocFine = world.ALocFine\n else:\n ALocFine = world.ALocMatrixFine\n AFine = fem.assemblePatchMatrix(NWorldFine, ALocFine, aFine)\n MFine = fem.assemblePatchMatrix(NWorldFine, world.MLocFine)\n bFine = MFine * MbFine + AFine * AbFine\n basis = fem.assembleProlongationMatrix(NWorldCoarse, NCoarseElement)\n bCoarse = basis.T * bFine\n ACoarse = basis.T * (AFine * basis)\n ACoarseFree = ACoarse[freeCoarse][:, freeCoarse]\n bCoarseFree = bCoarse[freeCoarse]\n uCoarseFree = linalg.linSolve(ACoarseFree, bCoarseFree)\n uCoarseFull = np.zeros(NpCoarse)\n uCoarseFull[freeCoarse] = uCoarseFree\n uCoarseFull = uCoarseFull\n uFineFull = basis * uCoarseFull\n return uCoarseFull, uFineFull\n",
"step-4": "import numpy as np\nimport scipy.sparse as sparse\nfrom .world import World\nfrom . import util\nfrom . import fem\nfrom . import linalg\n\n\ndef solveFine(world, aFine, MbFine, AbFine, boundaryConditions):\n NWorldCoarse = world.NWorldCoarse\n NWorldFine = world.NWorldCoarse * world.NCoarseElement\n NpFine = np.prod(NWorldFine + 1)\n if MbFine is None:\n MbFine = np.zeros(NpFine)\n if AbFine is None:\n AbFine = np.zeros(NpFine)\n boundaryMap = boundaryConditions == 0\n fixedFine = util.boundarypIndexMap(NWorldFine, boundaryMap=boundaryMap)\n freeFine = np.setdiff1d(np.arange(NpFine), fixedFine)\n if aFine.ndim == 1:\n ALocFine = world.ALocFine\n else:\n ALocFine = world.ALocMatrixFine\n AFine = fem.assemblePatchMatrix(NWorldFine, ALocFine, aFine)\n MFine = fem.assemblePatchMatrix(NWorldFine, world.MLocFine)\n bFine = MFine * MbFine + AFine * AbFine\n AFineFree = AFine[freeFine][:, freeFine]\n bFineFree = bFine[freeFine]\n uFineFree = linalg.linSolve(AFineFree, bFineFree)\n uFineFull = np.zeros(NpFine)\n uFineFull[freeFine] = uFineFree\n uFineFull = uFineFull\n return uFineFull, AFine, MFine\n\n\ndef solveCoarse(world, aFine, MbFine, AbFine, boundaryConditions):\n NWorldCoarse = world.NWorldCoarse\n NWorldFine = world.NWorldCoarse * world.NCoarseElement\n NCoarseElement = world.NCoarseElement\n NpFine = np.prod(NWorldFine + 1)\n NpCoarse = np.prod(NWorldCoarse + 1)\n if MbFine is None:\n MbFine = np.zeros(NpFine)\n if AbFine is None:\n AbFine = np.zeros(NpFine)\n boundaryMap = boundaryConditions == 0\n fixedCoarse = util.boundarypIndexMap(NWorldCoarse, boundaryMap=boundaryMap)\n freeCoarse = np.setdiff1d(np.arange(NpCoarse), fixedCoarse)\n if aFine.ndim == 1:\n ALocFine = world.ALocFine\n else:\n ALocFine = world.ALocMatrixFine\n AFine = fem.assemblePatchMatrix(NWorldFine, ALocFine, aFine)\n MFine = fem.assemblePatchMatrix(NWorldFine, world.MLocFine)\n bFine = MFine * MbFine + AFine * AbFine\n basis = fem.assembleProlongationMatrix(NWorldCoarse, NCoarseElement)\n bCoarse = basis.T * bFine\n ACoarse = basis.T * (AFine * basis)\n ACoarseFree = ACoarse[freeCoarse][:, freeCoarse]\n bCoarseFree = bCoarse[freeCoarse]\n uCoarseFree = linalg.linSolve(ACoarseFree, bCoarseFree)\n uCoarseFull = np.zeros(NpCoarse)\n uCoarseFull[freeCoarse] = uCoarseFree\n uCoarseFull = uCoarseFull\n uFineFull = basis * uCoarseFull\n return uCoarseFull, uFineFull\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
import time
import os
import random
def generate_sequence(difficulty):
print("Try to remember the numbers! : ")
random_list = random.sample(range(1, 101), difficulty)
time.sleep(2)
print(random_list)
time.sleep(0.7)
os.system('cls')
time.sleep(3)
return random_list
def get_list_from_user(difficulty):
print("WHAT WAS THE NUMBERS?? (Write each num at the same order and press Enter) : ")
user_list = []
for i in range(0, difficulty):
user_num = int(input('num: '))
user_list.append(user_num)
print("Your chosen numbers are : " + str(user_list))
time.sleep(3)
return user_list
def is_list_equal(a, b):
if a == b:
print("CORRECT answer! :) ")
time.sleep(2)
print("See you next time !")
time.sleep(3)
return True
else:
print("This is a WRONG answer !")
time.sleep(2)
print("See you next time ! :)")
time.sleep(3)
return False
def play_memory_game(user_input):
print("****** Welcome to the Memory Game! ******" + "\n")
a = generate_sequence(user_input)
b = get_list_from_user(user_input)
if is_list_equal(a, b) == True:
return True
else:
return False
|
normal
|
{
"blob_id": "bff9fb50f1901094c9ab3d61566509835c774f21",
"index": 6776,
"step-1": "<mask token>\n\n\ndef is_list_equal(a, b):\n if a == b:\n print('CORRECT answer! :) ')\n time.sleep(2)\n print('See you next time !')\n time.sleep(3)\n return True\n else:\n print('This is a WRONG answer !')\n time.sleep(2)\n print('See you next time ! :)')\n time.sleep(3)\n return False\n\n\ndef play_memory_game(user_input):\n print('****** Welcome to the Memory Game! ******' + '\\n')\n a = generate_sequence(user_input)\n b = get_list_from_user(user_input)\n if is_list_equal(a, b) == True:\n return True\n else:\n return False\n",
"step-2": "<mask token>\n\n\ndef generate_sequence(difficulty):\n print('Try to remember the numbers! : ')\n random_list = random.sample(range(1, 101), difficulty)\n time.sleep(2)\n print(random_list)\n time.sleep(0.7)\n os.system('cls')\n time.sleep(3)\n return random_list\n\n\n<mask token>\n\n\ndef is_list_equal(a, b):\n if a == b:\n print('CORRECT answer! :) ')\n time.sleep(2)\n print('See you next time !')\n time.sleep(3)\n return True\n else:\n print('This is a WRONG answer !')\n time.sleep(2)\n print('See you next time ! :)')\n time.sleep(3)\n return False\n\n\ndef play_memory_game(user_input):\n print('****** Welcome to the Memory Game! ******' + '\\n')\n a = generate_sequence(user_input)\n b = get_list_from_user(user_input)\n if is_list_equal(a, b) == True:\n return True\n else:\n return False\n",
"step-3": "<mask token>\n\n\ndef generate_sequence(difficulty):\n print('Try to remember the numbers! : ')\n random_list = random.sample(range(1, 101), difficulty)\n time.sleep(2)\n print(random_list)\n time.sleep(0.7)\n os.system('cls')\n time.sleep(3)\n return random_list\n\n\ndef get_list_from_user(difficulty):\n print(\n 'WHAT WAS THE NUMBERS?? (Write each num at the same order and press Enter) : '\n )\n user_list = []\n for i in range(0, difficulty):\n user_num = int(input('num: '))\n user_list.append(user_num)\n print('Your chosen numbers are : ' + str(user_list))\n time.sleep(3)\n return user_list\n\n\ndef is_list_equal(a, b):\n if a == b:\n print('CORRECT answer! :) ')\n time.sleep(2)\n print('See you next time !')\n time.sleep(3)\n return True\n else:\n print('This is a WRONG answer !')\n time.sleep(2)\n print('See you next time ! :)')\n time.sleep(3)\n return False\n\n\ndef play_memory_game(user_input):\n print('****** Welcome to the Memory Game! ******' + '\\n')\n a = generate_sequence(user_input)\n b = get_list_from_user(user_input)\n if is_list_equal(a, b) == True:\n return True\n else:\n return False\n",
"step-4": "import time\nimport os\nimport random\n\n\ndef generate_sequence(difficulty):\n print('Try to remember the numbers! : ')\n random_list = random.sample(range(1, 101), difficulty)\n time.sleep(2)\n print(random_list)\n time.sleep(0.7)\n os.system('cls')\n time.sleep(3)\n return random_list\n\n\ndef get_list_from_user(difficulty):\n print(\n 'WHAT WAS THE NUMBERS?? (Write each num at the same order and press Enter) : '\n )\n user_list = []\n for i in range(0, difficulty):\n user_num = int(input('num: '))\n user_list.append(user_num)\n print('Your chosen numbers are : ' + str(user_list))\n time.sleep(3)\n return user_list\n\n\ndef is_list_equal(a, b):\n if a == b:\n print('CORRECT answer! :) ')\n time.sleep(2)\n print('See you next time !')\n time.sleep(3)\n return True\n else:\n print('This is a WRONG answer !')\n time.sleep(2)\n print('See you next time ! :)')\n time.sleep(3)\n return False\n\n\ndef play_memory_game(user_input):\n print('****** Welcome to the Memory Game! ******' + '\\n')\n a = generate_sequence(user_input)\n b = get_list_from_user(user_input)\n if is_list_equal(a, b) == True:\n return True\n else:\n return False\n",
"step-5": "import time\nimport os\nimport random\n\n\ndef generate_sequence(difficulty):\n print(\"Try to remember the numbers! : \")\n random_list = random.sample(range(1, 101), difficulty)\n time.sleep(2)\n print(random_list)\n time.sleep(0.7)\n os.system('cls')\n time.sleep(3)\n return random_list\n\n\ndef get_list_from_user(difficulty):\n print(\"WHAT WAS THE NUMBERS?? (Write each num at the same order and press Enter) : \")\n user_list = []\n for i in range(0, difficulty):\n user_num = int(input('num: '))\n user_list.append(user_num)\n print(\"Your chosen numbers are : \" + str(user_list))\n time.sleep(3)\n return user_list\n\n\ndef is_list_equal(a, b):\n if a == b:\n print(\"CORRECT answer! :) \")\n time.sleep(2)\n print(\"See you next time !\")\n time.sleep(3)\n return True\n else:\n print(\"This is a WRONG answer !\")\n time.sleep(2)\n print(\"See you next time ! :)\")\n time.sleep(3)\n return False\n\n\ndef play_memory_game(user_input):\n print(\"****** Welcome to the Memory Game! ******\" + \"\\n\")\n a = generate_sequence(user_input)\n b = get_list_from_user(user_input)\n if is_list_equal(a, b) == True:\n return True\n else:\n return False\n\n\n\n\n\n\n\n\n\n\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
def equals(left, right, tol=0.001):
"""
Tests equality of left and right
Rosalind allows for a default [absolute] error of 0.001 in decimal
answers unless otherwise stated.
"""
try:
left = left.strip()
right = right.strip()
except AttributeError:
pass
try:
left = float(left)
right = float(right)
return abs(left - right) <= tol
except ValueError:
return left == right
<|reserved_special_token_0|>
<|reserved_special_token_1|>
def equals(left, right, tol=0.001):
"""
Tests equality of left and right
Rosalind allows for a default [absolute] error of 0.001 in decimal
answers unless otherwise stated.
"""
try:
left = left.strip()
right = right.strip()
except AttributeError:
pass
try:
left = float(left)
right = float(right)
return abs(left - right) <= tol
except ValueError:
return left == right
def all_equals(solution1, solution2, tol=0.001):
"""
Tests equality of all tokens in two solutions.
Rosalind allows for a default [absolute] error of 0.001 in all decimal
answers unless otherwise stated.
"""
tokens1 = solution1.split()
tokens2 = solution2.split()
for token1, token2 in zip(tokens1, tokens2):
if not equals(token1, token2, tol=tol):
print(token1, token2)
return False
return True
|
flexible
|
{
"blob_id": "b137fc40a5b2dec63c7abb6953664a969f5c126f",
"index": 8022,
"step-1": "<mask token>\n",
"step-2": "def equals(left, right, tol=0.001):\n \"\"\"\n Tests equality of left and right\n\n Rosalind allows for a default [absolute] error of 0.001 in decimal\n answers unless otherwise stated.\n \"\"\"\n try:\n left = left.strip()\n right = right.strip()\n except AttributeError:\n pass\n try:\n left = float(left)\n right = float(right)\n return abs(left - right) <= tol\n except ValueError:\n return left == right\n\n\n<mask token>\n",
"step-3": "def equals(left, right, tol=0.001):\n \"\"\"\n Tests equality of left and right\n\n Rosalind allows for a default [absolute] error of 0.001 in decimal\n answers unless otherwise stated.\n \"\"\"\n try:\n left = left.strip()\n right = right.strip()\n except AttributeError:\n pass\n try:\n left = float(left)\n right = float(right)\n return abs(left - right) <= tol\n except ValueError:\n return left == right\n\n\ndef all_equals(solution1, solution2, tol=0.001):\n \"\"\"\n Tests equality of all tokens in two solutions.\n\n Rosalind allows for a default [absolute] error of 0.001 in all decimal\n answers unless otherwise stated.\n \"\"\"\n tokens1 = solution1.split()\n tokens2 = solution2.split()\n for token1, token2 in zip(tokens1, tokens2):\n if not equals(token1, token2, tol=tol):\n print(token1, token2)\n return False\n return True\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
from package import *
class mysql(MakePackage):
dependencies = ["cmake"]
fetch="http://dev.mysql.com/get/Downloads/MySQL-5.6/mysql-5.6.10.tar.gz/from/http://cdn.mysql.com/"
config='cmake -G "Unix Makefiles" -DCMAKE_INSTALL_PREFIX=%(prefix)s -DWITH_READLINE=1'
|
normal
|
{
"blob_id": "ec90c731a0e546d9d399cbb68c92be1acca8cbe0",
"index": 518,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass mysql(MakePackage):\n <mask token>\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass mysql(MakePackage):\n dependencies = ['cmake']\n fetch = (\n 'http://dev.mysql.com/get/Downloads/MySQL-5.6/mysql-5.6.10.tar.gz/from/http://cdn.mysql.com/'\n )\n config = (\n 'cmake -G \"Unix Makefiles\" -DCMAKE_INSTALL_PREFIX=%(prefix)s -DWITH_READLINE=1'\n )\n",
"step-4": "from package import *\n\n\nclass mysql(MakePackage):\n dependencies = ['cmake']\n fetch = (\n 'http://dev.mysql.com/get/Downloads/MySQL-5.6/mysql-5.6.10.tar.gz/from/http://cdn.mysql.com/'\n )\n config = (\n 'cmake -G \"Unix Makefiles\" -DCMAKE_INSTALL_PREFIX=%(prefix)s -DWITH_READLINE=1'\n )\n",
"step-5": "\nfrom package import *\n\nclass mysql(MakePackage):\n dependencies = [\"cmake\"]\n fetch=\"http://dev.mysql.com/get/Downloads/MySQL-5.6/mysql-5.6.10.tar.gz/from/http://cdn.mysql.com/\"\n config='cmake -G \"Unix Makefiles\" -DCMAKE_INSTALL_PREFIX=%(prefix)s -DWITH_READLINE=1'\n\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Migration(migrations.Migration):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Migration(migrations.Migration):
dependencies = [('digressions', '0004_auto_20180303_1158')]
operations = [migrations.RemoveField(model_name='extraits', name=
'extraits_livre_id'), migrations.AddField(model_name='extraits',
name='extraits_livre_id', field=models.ForeignKey(default=
'du coté de chez Swann', on_delete=django.db.models.deletion.
CASCADE, to='digressions.Livre'), preserve_default=False)]
<|reserved_special_token_1|>
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [('digressions', '0004_auto_20180303_1158')]
operations = [migrations.RemoveField(model_name='extraits', name=
'extraits_livre_id'), migrations.AddField(model_name='extraits',
name='extraits_livre_id', field=models.ForeignKey(default=
'du coté de chez Swann', on_delete=django.db.models.deletion.
CASCADE, to='digressions.Livre'), preserve_default=False)]
<|reserved_special_token_1|>
# Generated by Django 2.0 on 2018-03-06 16:21
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('digressions', '0004_auto_20180303_1158'),
]
operations = [
migrations.RemoveField(
model_name='extraits',
name='extraits_livre_id',
),
migrations.AddField(
model_name='extraits',
name='extraits_livre_id',
field=models.ForeignKey(default='du coté de chez Swann', on_delete=django.db.models.deletion.CASCADE, to='digressions.Livre'),
preserve_default=False,
),
]
|
flexible
|
{
"blob_id": "38c21fb959d8b98b616006ea48bd720cc6f9995c",
"index": 1462,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Migration(migrations.Migration):\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Migration(migrations.Migration):\n dependencies = [('digressions', '0004_auto_20180303_1158')]\n operations = [migrations.RemoveField(model_name='extraits', name=\n 'extraits_livre_id'), migrations.AddField(model_name='extraits',\n name='extraits_livre_id', field=models.ForeignKey(default=\n 'du coté de chez Swann', on_delete=django.db.models.deletion.\n CASCADE, to='digressions.Livre'), preserve_default=False)]\n",
"step-4": "from django.db import migrations, models\nimport django.db.models.deletion\n\n\nclass Migration(migrations.Migration):\n dependencies = [('digressions', '0004_auto_20180303_1158')]\n operations = [migrations.RemoveField(model_name='extraits', name=\n 'extraits_livre_id'), migrations.AddField(model_name='extraits',\n name='extraits_livre_id', field=models.ForeignKey(default=\n 'du coté de chez Swann', on_delete=django.db.models.deletion.\n CASCADE, to='digressions.Livre'), preserve_default=False)]\n",
"step-5": "# Generated by Django 2.0 on 2018-03-06 16:21\n\nfrom django.db import migrations, models\nimport django.db.models.deletion\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('digressions', '0004_auto_20180303_1158'),\n ]\n\n operations = [\n migrations.RemoveField(\n model_name='extraits',\n name='extraits_livre_id',\n ),\n migrations.AddField(\n model_name='extraits',\n name='extraits_livre_id',\n field=models.ForeignKey(default='du coté de chez Swann', on_delete=django.db.models.deletion.CASCADE, to='digressions.Livre'),\n preserve_default=False,\n ),\n ]\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def reset(request):
request.session['count'] = 0
return redirect('/')
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def index(request):
if not 'word' in request.session:
request.session['word'] = 'Empty'
if not 'count' in request.session:
request.session['count'] = 0
if request.method == 'GET':
return render(request, 'app_one/index.html')
if request.method == 'POST':
request.session['word'] = get_random_string(length=14)
request.session['count'] += 1
return redirect('/')
def reset(request):
request.session['count'] = 0
return redirect('/')
<|reserved_special_token_1|>
from django.shortcuts import render, redirect
from django.utils.crypto import get_random_string
def index(request):
if not 'word' in request.session:
request.session['word'] = 'Empty'
if not 'count' in request.session:
request.session['count'] = 0
if request.method == 'GET':
return render(request, 'app_one/index.html')
if request.method == 'POST':
request.session['word'] = get_random_string(length=14)
request.session['count'] += 1
return redirect('/')
def reset(request):
request.session['count'] = 0
return redirect('/')
<|reserved_special_token_1|>
from django.shortcuts import render, redirect
from django.utils.crypto import get_random_string
def index(request):
if not "word" in request.session:
request.session["word"] = 'Empty'
if not "count" in request.session:
request.session["count"] = 0
if request.method == "GET":
return render(request, "app_one/index.html")
if request.method == "POST":
request.session['word'] = get_random_string(length=14)
request.session['count'] += 1
return redirect('/')
# def generator(request):
# return redirect('/')
def reset(request):
request.session['count'] = 0
return redirect('/')
|
flexible
|
{
"blob_id": "2ec5e43860a1d248a2f5cd1abc26676342275425",
"index": 8589,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef reset(request):\n request.session['count'] = 0\n return redirect('/')\n",
"step-3": "<mask token>\n\n\ndef index(request):\n if not 'word' in request.session:\n request.session['word'] = 'Empty'\n if not 'count' in request.session:\n request.session['count'] = 0\n if request.method == 'GET':\n return render(request, 'app_one/index.html')\n if request.method == 'POST':\n request.session['word'] = get_random_string(length=14)\n request.session['count'] += 1\n return redirect('/')\n\n\ndef reset(request):\n request.session['count'] = 0\n return redirect('/')\n",
"step-4": "from django.shortcuts import render, redirect\nfrom django.utils.crypto import get_random_string\n\n\ndef index(request):\n if not 'word' in request.session:\n request.session['word'] = 'Empty'\n if not 'count' in request.session:\n request.session['count'] = 0\n if request.method == 'GET':\n return render(request, 'app_one/index.html')\n if request.method == 'POST':\n request.session['word'] = get_random_string(length=14)\n request.session['count'] += 1\n return redirect('/')\n\n\ndef reset(request):\n request.session['count'] = 0\n return redirect('/')\n",
"step-5": "from django.shortcuts import render, redirect\nfrom django.utils.crypto import get_random_string\n\n\ndef index(request):\n if not \"word\" in request.session:\n request.session[\"word\"] = 'Empty'\n if not \"count\" in request.session:\n request.session[\"count\"] = 0\n if request.method == \"GET\":\n return render(request, \"app_one/index.html\")\n if request.method == \"POST\":\n request.session['word'] = get_random_string(length=14)\n request.session['count'] += 1\n return redirect('/')\n\n# def generator(request):\n \n# return redirect('/')\n\ndef reset(request):\n request.session['count'] = 0\n return redirect('/')",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
botName = 'firstBot'
username = 'mrthemafia'
password = 'oblivion'
client_id = 'Y3LQwponbEp07w'
client_secret = 'R4oyCEj6hSTJWHfWMwb-DGUOBm8'
<|reserved_special_token_1|>
botName = "firstBot"
username = "mrthemafia"
password = "oblivion"
client_id = "Y3LQwponbEp07w"
client_secret = "R4oyCEj6hSTJWHfWMwb-DGUOBm8"
|
flexible
|
{
"blob_id": "3031f695d57492cf3b29694fecd0a41c469a3e00",
"index": 7481,
"step-1": "<mask token>\n",
"step-2": "botName = 'firstBot'\nusername = 'mrthemafia'\npassword = 'oblivion'\nclient_id = 'Y3LQwponbEp07w'\nclient_secret = 'R4oyCEj6hSTJWHfWMwb-DGUOBm8'\n",
"step-3": "botName = \"firstBot\"\nusername = \"mrthemafia\"\npassword = \"oblivion\"\nclient_id = \"Y3LQwponbEp07w\"\nclient_secret = \"R4oyCEj6hSTJWHfWMwb-DGUOBm8\"\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
import re
class Zout:
def __init__(self, aline):
self.Str = aline
self.Var = ''
self.StN = ''
self.ZN = ''
self.ZName = ''
self.Motion = ''
self.Ztype = ''
self.tozout(aline)
def tozout(self, aline):
"""transform station statement to Cylinder Outputs struct"""
# SetAusg(A120,5,A.St201_Y1_2_SwivelUnit_backward);
#front|back|up|down|left|right
pattern = re.compile(r'.*(?P<Var>A.*[sS]t(?P<StN>\d+)_Y(?P<ZN>\d+)_[24]_(?P<ZName>\w+)_'
r'(?P<Motion>open|close|forward|backward|up|upward|down|downward|left|leftward|right|rightward))\s*\).*')
match = pattern.match(aline)
if match:
#print('match')
self.Var = match.group('Var')
self.StN = match.group('StN')
self.ZN = match.group('ZN')
self.ZName = match.group('ZName')
self.Motion = match.group('Motion')
# if re.compile(r'^up|down|left|right$').match(self.Motion):
# self.Motion = self.Motion+'ward'
# obj = re.compile(r'up|down|left|right')
# if obj.match(self.Motion):
# print('match')
# self.Motion = obj.subn('ward',self.Motion)[0]
self.Motion = re.sub(r'^(up|down|left|right)$',r'\1ward', self.Motion)
isgrippermatch = re.compile(r'.*(open|close).*').match(aline)
if isgrippermatch:
self.Ztype = 'gripper'
else:
self.Ztype = 'not gripper'
def display(self):
print(self.Var)
class Zouts:
def __init__(self):
self.elements = []
def search(self, StN, ZN, Motion):
for elem in self.elements:
print('elem:')
print(str(type(elem.StN)) + str(type(StN)))
print(elem.StN + '->' + StN + ':' + str(elem.StN == StN))
print(elem.Motion + '->' + ':' + str(elem.Motion == Motion))
if elem.StN == StN and elem.ZN == ZN and elem.Motion == Motion:
print('match')
return elem
print('not match')
return None
def add(self, zout):
self.elements.append(zout)
def display(self):
for elem in self.elements:
print(elem.Var)
|
normal
|
{
"blob_id": "71ebc6e9218085e887eda7843b5489837ed45c97",
"index": 880,
"step-1": "<mask token>\n\n\nclass Zouts:\n <mask token>\n\n def search(self, StN, ZN, Motion):\n for elem in self.elements:\n print('elem:')\n print(str(type(elem.StN)) + str(type(StN)))\n print(elem.StN + '->' + StN + ':' + str(elem.StN == StN))\n print(elem.Motion + '->' + ':' + str(elem.Motion == Motion))\n if elem.StN == StN and elem.ZN == ZN and elem.Motion == Motion:\n print('match')\n return elem\n print('not match')\n return None\n\n def add(self, zout):\n self.elements.append(zout)\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass Zout:\n <mask token>\n <mask token>\n <mask token>\n\n\nclass Zouts:\n\n def __init__(self):\n self.elements = []\n\n def search(self, StN, ZN, Motion):\n for elem in self.elements:\n print('elem:')\n print(str(type(elem.StN)) + str(type(StN)))\n print(elem.StN + '->' + StN + ':' + str(elem.StN == StN))\n print(elem.Motion + '->' + ':' + str(elem.Motion == Motion))\n if elem.StN == StN and elem.ZN == ZN and elem.Motion == Motion:\n print('match')\n return elem\n print('not match')\n return None\n\n def add(self, zout):\n self.elements.append(zout)\n\n def display(self):\n for elem in self.elements:\n print(elem.Var)\n",
"step-3": "<mask token>\n\n\nclass Zout:\n <mask token>\n\n def tozout(self, aline):\n \"\"\"transform station statement to Cylinder Outputs struct\"\"\"\n pattern = re.compile(\n '.*(?P<Var>A.*[sS]t(?P<StN>\\\\d+)_Y(?P<ZN>\\\\d+)_[24]_(?P<ZName>\\\\w+)_(?P<Motion>open|close|forward|backward|up|upward|down|downward|left|leftward|right|rightward))\\\\s*\\\\).*'\n )\n match = pattern.match(aline)\n if match:\n self.Var = match.group('Var')\n self.StN = match.group('StN')\n self.ZN = match.group('ZN')\n self.ZName = match.group('ZName')\n self.Motion = match.group('Motion')\n self.Motion = re.sub('^(up|down|left|right)$', '\\\\1ward', self.\n Motion)\n isgrippermatch = re.compile('.*(open|close).*').match(aline)\n if isgrippermatch:\n self.Ztype = 'gripper'\n else:\n self.Ztype = 'not gripper'\n <mask token>\n\n\nclass Zouts:\n\n def __init__(self):\n self.elements = []\n\n def search(self, StN, ZN, Motion):\n for elem in self.elements:\n print('elem:')\n print(str(type(elem.StN)) + str(type(StN)))\n print(elem.StN + '->' + StN + ':' + str(elem.StN == StN))\n print(elem.Motion + '->' + ':' + str(elem.Motion == Motion))\n if elem.StN == StN and elem.ZN == ZN and elem.Motion == Motion:\n print('match')\n return elem\n print('not match')\n return None\n\n def add(self, zout):\n self.elements.append(zout)\n\n def display(self):\n for elem in self.elements:\n print(elem.Var)\n",
"step-4": "import re\n\n\nclass Zout:\n\n def __init__(self, aline):\n self.Str = aline\n self.Var = ''\n self.StN = ''\n self.ZN = ''\n self.ZName = ''\n self.Motion = ''\n self.Ztype = ''\n self.tozout(aline)\n\n def tozout(self, aline):\n \"\"\"transform station statement to Cylinder Outputs struct\"\"\"\n pattern = re.compile(\n '.*(?P<Var>A.*[sS]t(?P<StN>\\\\d+)_Y(?P<ZN>\\\\d+)_[24]_(?P<ZName>\\\\w+)_(?P<Motion>open|close|forward|backward|up|upward|down|downward|left|leftward|right|rightward))\\\\s*\\\\).*'\n )\n match = pattern.match(aline)\n if match:\n self.Var = match.group('Var')\n self.StN = match.group('StN')\n self.ZN = match.group('ZN')\n self.ZName = match.group('ZName')\n self.Motion = match.group('Motion')\n self.Motion = re.sub('^(up|down|left|right)$', '\\\\1ward', self.\n Motion)\n isgrippermatch = re.compile('.*(open|close).*').match(aline)\n if isgrippermatch:\n self.Ztype = 'gripper'\n else:\n self.Ztype = 'not gripper'\n\n def display(self):\n print(self.Var)\n\n\nclass Zouts:\n\n def __init__(self):\n self.elements = []\n\n def search(self, StN, ZN, Motion):\n for elem in self.elements:\n print('elem:')\n print(str(type(elem.StN)) + str(type(StN)))\n print(elem.StN + '->' + StN + ':' + str(elem.StN == StN))\n print(elem.Motion + '->' + ':' + str(elem.Motion == Motion))\n if elem.StN == StN and elem.ZN == ZN and elem.Motion == Motion:\n print('match')\n return elem\n print('not match')\n return None\n\n def add(self, zout):\n self.elements.append(zout)\n\n def display(self):\n for elem in self.elements:\n print(elem.Var)\n",
"step-5": "import re\r\n\r\nclass Zout:\r\n def __init__(self, aline):\r\n self.Str = aline\r\n self.Var = ''\r\n self.StN = ''\r\n self.ZN = ''\r\n self.ZName = ''\r\n self.Motion = ''\r\n self.Ztype = ''\r\n self.tozout(aline)\r\n\r\n def tozout(self, aline):\r\n \"\"\"transform station statement to Cylinder Outputs struct\"\"\"\r\n # SetAusg(A120,5,A.St201_Y1_2_SwivelUnit_backward);\r\n #front|back|up|down|left|right\r\n pattern = re.compile(r'.*(?P<Var>A.*[sS]t(?P<StN>\\d+)_Y(?P<ZN>\\d+)_[24]_(?P<ZName>\\w+)_'\r\n r'(?P<Motion>open|close|forward|backward|up|upward|down|downward|left|leftward|right|rightward))\\s*\\).*')\r\n match = pattern.match(aline)\r\n if match:\r\n #print('match')\r\n self.Var = match.group('Var')\r\n self.StN = match.group('StN')\r\n self.ZN = match.group('ZN')\r\n self.ZName = match.group('ZName')\r\n self.Motion = match.group('Motion')\r\n # if re.compile(r'^up|down|left|right$').match(self.Motion):\r\n # self.Motion = self.Motion+'ward'\r\n # obj = re.compile(r'up|down|left|right')\r\n # if obj.match(self.Motion):\r\n # print('match')\r\n # self.Motion = obj.subn('ward',self.Motion)[0]\r\n self.Motion = re.sub(r'^(up|down|left|right)$',r'\\1ward', self.Motion)\r\n isgrippermatch = re.compile(r'.*(open|close).*').match(aline)\r\n if isgrippermatch:\r\n self.Ztype = 'gripper'\r\n else:\r\n self.Ztype = 'not gripper'\r\n\r\n def display(self):\r\n print(self.Var)\r\n\r\nclass Zouts:\r\n def __init__(self):\r\n self.elements = []\r\n\r\n def search(self, StN, ZN, Motion):\r\n for elem in self.elements:\r\n print('elem:')\r\n print(str(type(elem.StN)) + str(type(StN)))\r\n print(elem.StN + '->' + StN + ':' + str(elem.StN == StN))\r\n print(elem.Motion + '->' + ':' + str(elem.Motion == Motion))\r\n if elem.StN == StN and elem.ZN == ZN and elem.Motion == Motion:\r\n print('match')\r\n return elem\r\n print('not match')\r\n return None\r\n\r\n def add(self, zout):\r\n self.elements.append(zout)\r\n\r\n def display(self):\r\n for elem in self.elements:\r\n print(elem.Var)",
"step-ids": [
3,
6,
7,
10,
11
]
}
|
[
3,
6,
7,
10,
11
] |
#!/usr/bin/python
import os, sys
# Assuming /tmp/foo.txt exists and has read/write permissions.
ret = os.access("/tmp/foo.txt", os.F_OK)
print "F_OK - return value %s"% ret
ret = os.access("/tmp/foo.txt", os.R_OK)
print "R_OK - return value %s"% ret
ret = os.access("/tmp/foo.txt", os.W_OK)
print "W_OK - return value %s"% ret
ret = os.access("/tmp/foo.txt", os.X_OK)
print "X_OK - return value %s"% ret
This produces following result:
F_OK - return value True R_OK - return value True W_OK - return value True X_OK - return value False
|
normal
|
{
"blob_id": "c9b76fed088b85cf68e96778016d8974fea84933",
"index": 4050,
"step-1": "#!/usr/bin/python\r\nimport os, sys\r\n\r\n# Assuming /tmp/foo.txt exists and has read/write permissions.\r\n\r\nret = os.access(\"/tmp/foo.txt\", os.F_OK)\r\nprint \"F_OK - return value %s\"% ret\r\n\r\nret = os.access(\"/tmp/foo.txt\", os.R_OK)\r\nprint \"R_OK - return value %s\"% ret\r\n\r\nret = os.access(\"/tmp/foo.txt\", os.W_OK)\r\nprint \"W_OK - return value %s\"% ret\r\n\r\nret = os.access(\"/tmp/foo.txt\", os.X_OK)\r\nprint \"X_OK - return value %s\"% ret\r\nThis produces following result:\r\n\r\nF_OK - return value True R_OK - return value True W_OK - return value True X_OK - return value False\r\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
## More Review + More Linked Lists ##
##Given a pointer to the head node of a linked list whose data elements are in non-decreasing order, you must delete any duplicate nodes and print the updated list.
##Code handling I/O is provided in the editor. Complete the removeDuplicates(Node) function.
##Note: The head pointer may be null, indicating that the list is empty. Be sure to reset your next pointer when performing deletions to avoid breaking the list.
##Input Format
##The first line contains N, the number of nodes to be inserted.
##The N subsequent lines each contain an integer describing the data for a node being inserted at the list's tail;
##the lines of data will always be in non-decreasing order.
##Output Format
##Print the data for your list of ascending nodes as a single line of space-separated integers.
##Sample Input
##6
##1
##2
##2
##3
##3
##4
##Sample Output
##1 2 3 4
##Explanation
##N = 6, and our non-decreasing list is {1,2,2,3,3,4}. The data values 2 and 3 each have a duplicate,
##so we remove the two duplicate nodes and print our updated (ascending) list
class Node:
def __init__(self,data):
self.data = data
self.next = None
class Solution:
def insert(self,head,data):
p = Node(data)
if head==None:
head=p
elif head.next==None:
head.next=p
else:
start=head
while(start.next!=None):
start=start.next
start.next=p
return head
def display(self,head):
current = head
while current:
print current.data,
current = current.next
def removeDuplicates(self,head): ########
if head==None or head.next ==None: return head
tmp = head;
while tmp.next!=None:
if tmp.data==tmp.next.data: tmp.next=tmp.next.next;
else: tmp=tmp.next;
return head
mylist= Solution()
T=int(input())
head=None
for i in range(T):
data=int(input())
head=mylist.insert(head,data)
head=mylist.removeDuplicates(head)
mylist.display(head);
|
normal
|
{
"blob_id": "75990147e4a3dae1b590729ed659e2ddcbfb295d",
"index": 1636,
"step-1": "## More Review + More Linked Lists ##\n\n##Given a pointer to the head node of a linked list whose data elements are in non-decreasing order, you must delete any duplicate nodes and print the updated list.\n##Code handling I/O is provided in the editor. Complete the removeDuplicates(Node) function. \n##Note: The head pointer may be null, indicating that the list is empty. Be sure to reset your next pointer when performing deletions to avoid breaking the list.\n##Input Format\n##The first line contains N, the number of nodes to be inserted. \n##The N subsequent lines each contain an integer describing the data for a node being inserted at the list's tail;\n##the lines of data will always be in non-decreasing order.\n##Output Format\n##Print the data for your list of ascending nodes as a single line of space-separated integers.\n##Sample Input\n##6\n##1\n##2\n##2\n##3\n##3\n##4\n##Sample Output\n##1 2 3 4 \n##Explanation\n##N = 6, and our non-decreasing list is {1,2,2,3,3,4}. The data values 2 and 3 each have a duplicate,\n##so we remove the two duplicate nodes and print our updated (ascending) list\n\n\nclass Node:\n def __init__(self,data):\n self.data = data\n self.next = None\n\nclass Solution: \n def insert(self,head,data):\n p = Node(data) \n if head==None:\n head=p\n elif head.next==None:\n head.next=p\n else:\n start=head\n while(start.next!=None):\n start=start.next\n start.next=p\n return head\n \n def display(self,head):\n current = head\n while current:\n print current.data,\n current = current.next\n\n def removeDuplicates(self,head): ########\n if head==None or head.next ==None: return head\n tmp = head;\n while tmp.next!=None:\n if tmp.data==tmp.next.data: tmp.next=tmp.next.next;\n else: tmp=tmp.next;\n return head\n\nmylist= Solution()\nT=int(input())\nhead=None\nfor i in range(T):\n data=int(input())\n head=mylist.insert(head,data) \nhead=mylist.removeDuplicates(head)\nmylist.display(head);\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
try:
myfile = open('mydata.txt', encoding='utf-8')
except FileNotFoundError as ex:
print('file is not found')
print(ex.args)
else:
print('file :', myfile.read())
myfile.close()
finally:
print('finished working')
<|reserved_special_token_1|>
import sys
try:
myfile = open('mydata.txt', encoding='utf-8')
except FileNotFoundError as ex:
print('file is not found')
print(ex.args)
else:
print('file :', myfile.read())
myfile.close()
finally:
print('finished working')
<|reserved_special_token_1|>
import sys
try:
myfile = open("mydata.txt",encoding ="utf-8")
except FileNotFoundError as ex:
print("file is not found")
print(ex.args)
else:
print("file :",myfile.read())
myfile.close()
finally :
print("finished working")
|
flexible
|
{
"blob_id": "8bf75bf3b16296c36c34e8c4c50149259d792af7",
"index": 4319,
"step-1": "<mask token>\n",
"step-2": "<mask token>\ntry:\n myfile = open('mydata.txt', encoding='utf-8')\nexcept FileNotFoundError as ex:\n print('file is not found')\n print(ex.args)\nelse:\n print('file :', myfile.read())\n myfile.close()\nfinally:\n print('finished working')\n",
"step-3": "import sys\ntry:\n myfile = open('mydata.txt', encoding='utf-8')\nexcept FileNotFoundError as ex:\n print('file is not found')\n print(ex.args)\nelse:\n print('file :', myfile.read())\n myfile.close()\nfinally:\n print('finished working')\n",
"step-4": "import sys\n\ntry:\n myfile = open(\"mydata.txt\",encoding =\"utf-8\")\n\nexcept FileNotFoundError as ex:\n print(\"file is not found\")\n print(ex.args)\nelse:\n print(\"file :\",myfile.read())\n myfile.close()\nfinally :\n\n print(\"finished working\")\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
def get_encoding_type(file):
with open(file, 'rb') as f:
rawdata = f.read()
return detect(rawdata)['encoding']
def item_change_location(item, location, call):
"""Change location and remove holdinds infos
Arguments:
item {str} -- xml response of get item ws
location {str} -- new location_code
call {str} -- new call
Returns:
[str] -- mms_id, holding_id, pid
"""
mms_id, holding_id, pid = item.find('.//mms_id').text, item.find(
'.//holding_id').text, item.find('.//pid').text
item.find('.//item_data/location').text = location
item.find('.//item_data/alternative_call_number').text = ''
item.find('.//item_data/alternative_call_number_type').text = ''
holding_data = item.find('.//holding_data')
item.remove(holding_data)
if mms_id in processed_record_dict:
if location_code in processed_record_dict[mms_id]:
if processed_record_dict[mms_id][location_code] != call:
multi_call_report.write('{}\n'.format(barcode))
item.find('.//item_data/alternative_call_number').text = call
return mms_id, holding_id, pid
def update_holding_data(holding, new_call):
"""Change call (852$$h) and reset call type (852 fiest indicator)
Arguments:
holding {str} -- response of get holding ws
new_call {str} -- new value for call subfield
Returns:
str -- changed data
"""
holding_data = ET.fromstring(holding)
location_field = holding_data.find(".//datafield[@tag='852']")
location_field.set('ind1', ' ')
call_subfield = holding_data.find(
".//datafield[@tag='852']/subfield[@code='h']")
call_subfield.text = new_call
return ET.tostring(holding_data)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def get_encoding_type(file):
with open(file, 'rb') as f:
rawdata = f.read()
return detect(rawdata)['encoding']
def item_change_location(item, location, call):
"""Change location and remove holdinds infos
Arguments:
item {str} -- xml response of get item ws
location {str} -- new location_code
call {str} -- new call
Returns:
[str] -- mms_id, holding_id, pid
"""
mms_id, holding_id, pid = item.find('.//mms_id').text, item.find(
'.//holding_id').text, item.find('.//pid').text
item.find('.//item_data/location').text = location
item.find('.//item_data/alternative_call_number').text = ''
item.find('.//item_data/alternative_call_number_type').text = ''
holding_data = item.find('.//holding_data')
item.remove(holding_data)
if mms_id in processed_record_dict:
if location_code in processed_record_dict[mms_id]:
if processed_record_dict[mms_id][location_code] != call:
multi_call_report.write('{}\n'.format(barcode))
item.find('.//item_data/alternative_call_number').text = call
return mms_id, holding_id, pid
def update_holding_data(holding, new_call):
"""Change call (852$$h) and reset call type (852 fiest indicator)
Arguments:
holding {str} -- response of get holding ws
new_call {str} -- new value for call subfield
Returns:
str -- changed data
"""
holding_data = ET.fromstring(holding)
location_field = holding_data.find(".//datafield[@tag='852']")
location_field.set('ind1', ' ')
call_subfield = holding_data.find(
".//datafield[@tag='852']/subfield[@code='h']")
call_subfield.text = new_call
return ET.tostring(holding_data)
logs.init_logs(LOGS_DIR, SERVICE, LOGS_LEVEL)
<|reserved_special_token_0|>
log_module.info(
'Liste des localisation chargée pour la bibliothèque {} :: Main :: Début du traitement'
.format(LIBRARY_CODE))
<|reserved_special_token_0|>
report.write('Code-barres\tStatut\tMessage\n')
<|reserved_special_token_0|>
multi_call_report.write('code-barres\n')
<|reserved_special_token_0|>
with open(IN_FILE, 'r', encoding=from_codec, newline='') as f:
reader = csv.reader(f, delimiter=';')
headers = next(reader)
for row in reader:
if len(row) < 2:
continue
barcode = row[0]
if row[1] is None or row[1] == '':
log_module.error('{} :: Echec :: pas de cote fournie'.format(
barcode))
report.write('{}\tErreur Fichier\tPas de cote fournie\n'.format
(barcode))
continue
call = row[1].upper()
if row[3] is None or row[3] == '':
log_module.error('{} :: Echec :: pas de localisation fournie'.
format(barcode))
report.write('{}\tErreur Fichier\tPas de localisation fournie\n'
.format(barcode))
continue
if row[3] not in locations_dict:
log_module.error(
'{} :: Echec :: La localisation {} est inconnue dans Alma'.
format(barcode, row[3]))
report.write(
"{}\tErreur Fichier\tLa localisation '{}' est inconnue dans Alma\n"
.format(barcode, row[3]))
continue
location_code = locations_dict[row[3]]
log_module.debug('{} :: Succes :: A affecter dans la localisation {}'
.format(barcode, location_code))
status, response = alma_api.get_item_with_barcode(barcode)
if status == 'Error':
log_module.error('{} :: Echec :: {}'.format(barcode, response))
report.write('{}\tErreur Retrouve Exemplaire\t{}\n'.format(
barcode, response))
continue
item = ET.fromstring(response)
mms_id, old_holding_id, item_id = item_change_location(item,
location_code, call)
set_status, set_response = alma_api.set_item(mms_id, old_holding_id,
item_id, ET.tostring(item))
log_module.debug(set_response)
if set_status == 'Error':
log_module.error('{} :: Echec :: {}'.format(barcode, set_response))
report.write('{}\tErreur Mise à jour Exemplaire\t{}\n'.format(
barcode, set_response))
continue
changed_item = ET.fromstring(set_response)
new_holding_id = changed_item.find('.//holding_id').text
processed_record_dict[mms_id] = {location_code: call}
if new_holding_id not in toprocess_holding_dict:
toprocess_holding_dict[new_holding_id] = {'call': call,
'barcode': barcode}
log_module.info(
"{} :: Succes :: L'exemplaire est maintenant rattaché à la Holding {}"
.format(barcode, new_holding_id))
log_module.info('FIN DU DEPLACEMENT DES EXEMPLAIRES')
log_module.info('DEBUT DE LA MODIFICATION DES HOLDINGS')
for new_holding_id in toprocess_holding_dict.keys():
call = toprocess_holding_dict[new_holding_id]['call']
barcode = toprocess_holding_dict[new_holding_id]['barcode']
get_holding_status, get_holding_response = alma_api.get_holding(mms_id,
new_holding_id)
if get_holding_status == 'Error':
log_module.error('{} :: Echec :: {}'.format(new_holding_id,
get_holding_response))
report.write('{}\tErreur Retrouve Holding\t{}\n'.format(barcode,
get_holding_response))
continue
changed_holding = update_holding_data(get_holding_response, call)
set_holding_status, set_holding_response = alma_api.set_holding(mms_id,
new_holding_id, changed_holding)
if set_holding_status == 'Error':
log_module.error('{} :: Echec :: {}'.format(new_holding_id,
set_holding_response))
report.write('{}\tErreur Ecriture Holding\t{}\n'.format(barcode,
set_holding_response))
continue
log_module.debug(set_holding_response)
log_module.info('{} :: Succes :: La holding a été mise à jour'.format(
new_holding_id))
report.close
multi_call_report.close
log_module.info('FIN DU TRAITEMENT')
<|reserved_special_token_1|>
<|reserved_special_token_0|>
SERVICE = 'Recotation_en_masse'
LOGS_LEVEL = 'INFO'
LOGS_DIR = os.getenv('LOGS_PATH')
LIBRARY_CODE = 1601900000
REGION = 'EU'
INSTITUTION = 'ub'
API_KEY = os.getenv('PROD_UB_BIB_API')
FILE_NAME = 'Dewey 20201218 cotes OE Scoop V3'
IN_FILE = '/media/sf_Partage_LouxBox/{}.csv'.format(FILE_NAME)
OUT_FILE = '/media/sf_Partage_LouxBox/{}_Rapport.csv'.format(FILE_NAME)
CALL_ERROR_FILE = '/media/sf_Partage_LouxBox/{}_Anomalies_Cotes.csv'.format(
FILE_NAME)
def get_encoding_type(file):
with open(file, 'rb') as f:
rawdata = f.read()
return detect(rawdata)['encoding']
def item_change_location(item, location, call):
"""Change location and remove holdinds infos
Arguments:
item {str} -- xml response of get item ws
location {str} -- new location_code
call {str} -- new call
Returns:
[str] -- mms_id, holding_id, pid
"""
mms_id, holding_id, pid = item.find('.//mms_id').text, item.find(
'.//holding_id').text, item.find('.//pid').text
item.find('.//item_data/location').text = location
item.find('.//item_data/alternative_call_number').text = ''
item.find('.//item_data/alternative_call_number_type').text = ''
holding_data = item.find('.//holding_data')
item.remove(holding_data)
if mms_id in processed_record_dict:
if location_code in processed_record_dict[mms_id]:
if processed_record_dict[mms_id][location_code] != call:
multi_call_report.write('{}\n'.format(barcode))
item.find('.//item_data/alternative_call_number').text = call
return mms_id, holding_id, pid
def update_holding_data(holding, new_call):
"""Change call (852$$h) and reset call type (852 fiest indicator)
Arguments:
holding {str} -- response of get holding ws
new_call {str} -- new value for call subfield
Returns:
str -- changed data
"""
holding_data = ET.fromstring(holding)
location_field = holding_data.find(".//datafield[@tag='852']")
location_field.set('ind1', ' ')
call_subfield = holding_data.find(
".//datafield[@tag='852']/subfield[@code='h']")
call_subfield.text = new_call
return ET.tostring(holding_data)
logs.init_logs(LOGS_DIR, SERVICE, LOGS_LEVEL)
log_module = logging.getLogger(SERVICE)
conf = Alma_Apis.Alma(apikey=API_KEY, region='EU', service=SERVICE)
alma_api = Alma_Apis_Records.AlmaRecords(apikey=API_KEY, region=REGION,
service=SERVICE)
locations_dict = conf.get_locations(LIBRARY_CODE)
log_module.info(
'Liste des localisation chargée pour la bibliothèque {} :: Main :: Début du traitement'
.format(LIBRARY_CODE))
report = open(OUT_FILE, 'w', encoding='utf-8')
report.write('Code-barres\tStatut\tMessage\n')
processed_record_dict = {}
toprocess_holding_dict = {}
multi_call_report = open(CALL_ERROR_FILE, 'w', encoding='utf-8')
multi_call_report.write('code-barres\n')
from_codec = get_encoding_type(IN_FILE)
with open(IN_FILE, 'r', encoding=from_codec, newline='') as f:
reader = csv.reader(f, delimiter=';')
headers = next(reader)
for row in reader:
if len(row) < 2:
continue
barcode = row[0]
if row[1] is None or row[1] == '':
log_module.error('{} :: Echec :: pas de cote fournie'.format(
barcode))
report.write('{}\tErreur Fichier\tPas de cote fournie\n'.format
(barcode))
continue
call = row[1].upper()
if row[3] is None or row[3] == '':
log_module.error('{} :: Echec :: pas de localisation fournie'.
format(barcode))
report.write('{}\tErreur Fichier\tPas de localisation fournie\n'
.format(barcode))
continue
if row[3] not in locations_dict:
log_module.error(
'{} :: Echec :: La localisation {} est inconnue dans Alma'.
format(barcode, row[3]))
report.write(
"{}\tErreur Fichier\tLa localisation '{}' est inconnue dans Alma\n"
.format(barcode, row[3]))
continue
location_code = locations_dict[row[3]]
log_module.debug('{} :: Succes :: A affecter dans la localisation {}'
.format(barcode, location_code))
status, response = alma_api.get_item_with_barcode(barcode)
if status == 'Error':
log_module.error('{} :: Echec :: {}'.format(barcode, response))
report.write('{}\tErreur Retrouve Exemplaire\t{}\n'.format(
barcode, response))
continue
item = ET.fromstring(response)
mms_id, old_holding_id, item_id = item_change_location(item,
location_code, call)
set_status, set_response = alma_api.set_item(mms_id, old_holding_id,
item_id, ET.tostring(item))
log_module.debug(set_response)
if set_status == 'Error':
log_module.error('{} :: Echec :: {}'.format(barcode, set_response))
report.write('{}\tErreur Mise à jour Exemplaire\t{}\n'.format(
barcode, set_response))
continue
changed_item = ET.fromstring(set_response)
new_holding_id = changed_item.find('.//holding_id').text
processed_record_dict[mms_id] = {location_code: call}
if new_holding_id not in toprocess_holding_dict:
toprocess_holding_dict[new_holding_id] = {'call': call,
'barcode': barcode}
log_module.info(
"{} :: Succes :: L'exemplaire est maintenant rattaché à la Holding {}"
.format(barcode, new_holding_id))
log_module.info('FIN DU DEPLACEMENT DES EXEMPLAIRES')
log_module.info('DEBUT DE LA MODIFICATION DES HOLDINGS')
for new_holding_id in toprocess_holding_dict.keys():
call = toprocess_holding_dict[new_holding_id]['call']
barcode = toprocess_holding_dict[new_holding_id]['barcode']
get_holding_status, get_holding_response = alma_api.get_holding(mms_id,
new_holding_id)
if get_holding_status == 'Error':
log_module.error('{} :: Echec :: {}'.format(new_holding_id,
get_holding_response))
report.write('{}\tErreur Retrouve Holding\t{}\n'.format(barcode,
get_holding_response))
continue
changed_holding = update_holding_data(get_holding_response, call)
set_holding_status, set_holding_response = alma_api.set_holding(mms_id,
new_holding_id, changed_holding)
if set_holding_status == 'Error':
log_module.error('{} :: Echec :: {}'.format(new_holding_id,
set_holding_response))
report.write('{}\tErreur Ecriture Holding\t{}\n'.format(barcode,
set_holding_response))
continue
log_module.debug(set_holding_response)
log_module.info('{} :: Succes :: La holding a été mise à jour'.format(
new_holding_id))
report.close
multi_call_report.close
log_module.info('FIN DU TRAITEMENT')
<|reserved_special_token_1|>
import os
import re
import logging
import csv
import xml.etree.ElementTree as ET
from chardet import detect
from Abes_Apis_Interface.AbesXml import AbesXml
from Alma_Apis_Interface import Alma_Apis_Records
from Alma_Apis_Interface import Alma_Apis
from logs import logs
SERVICE = 'Recotation_en_masse'
LOGS_LEVEL = 'INFO'
LOGS_DIR = os.getenv('LOGS_PATH')
LIBRARY_CODE = 1601900000
REGION = 'EU'
INSTITUTION = 'ub'
API_KEY = os.getenv('PROD_UB_BIB_API')
FILE_NAME = 'Dewey 20201218 cotes OE Scoop V3'
IN_FILE = '/media/sf_Partage_LouxBox/{}.csv'.format(FILE_NAME)
OUT_FILE = '/media/sf_Partage_LouxBox/{}_Rapport.csv'.format(FILE_NAME)
CALL_ERROR_FILE = '/media/sf_Partage_LouxBox/{}_Anomalies_Cotes.csv'.format(
FILE_NAME)
def get_encoding_type(file):
with open(file, 'rb') as f:
rawdata = f.read()
return detect(rawdata)['encoding']
def item_change_location(item, location, call):
"""Change location and remove holdinds infos
Arguments:
item {str} -- xml response of get item ws
location {str} -- new location_code
call {str} -- new call
Returns:
[str] -- mms_id, holding_id, pid
"""
mms_id, holding_id, pid = item.find('.//mms_id').text, item.find(
'.//holding_id').text, item.find('.//pid').text
item.find('.//item_data/location').text = location
item.find('.//item_data/alternative_call_number').text = ''
item.find('.//item_data/alternative_call_number_type').text = ''
holding_data = item.find('.//holding_data')
item.remove(holding_data)
if mms_id in processed_record_dict:
if location_code in processed_record_dict[mms_id]:
if processed_record_dict[mms_id][location_code] != call:
multi_call_report.write('{}\n'.format(barcode))
item.find('.//item_data/alternative_call_number').text = call
return mms_id, holding_id, pid
def update_holding_data(holding, new_call):
"""Change call (852$$h) and reset call type (852 fiest indicator)
Arguments:
holding {str} -- response of get holding ws
new_call {str} -- new value for call subfield
Returns:
str -- changed data
"""
holding_data = ET.fromstring(holding)
location_field = holding_data.find(".//datafield[@tag='852']")
location_field.set('ind1', ' ')
call_subfield = holding_data.find(
".//datafield[@tag='852']/subfield[@code='h']")
call_subfield.text = new_call
return ET.tostring(holding_data)
logs.init_logs(LOGS_DIR, SERVICE, LOGS_LEVEL)
log_module = logging.getLogger(SERVICE)
conf = Alma_Apis.Alma(apikey=API_KEY, region='EU', service=SERVICE)
alma_api = Alma_Apis_Records.AlmaRecords(apikey=API_KEY, region=REGION,
service=SERVICE)
locations_dict = conf.get_locations(LIBRARY_CODE)
log_module.info(
'Liste des localisation chargée pour la bibliothèque {} :: Main :: Début du traitement'
.format(LIBRARY_CODE))
report = open(OUT_FILE, 'w', encoding='utf-8')
report.write('Code-barres\tStatut\tMessage\n')
processed_record_dict = {}
toprocess_holding_dict = {}
multi_call_report = open(CALL_ERROR_FILE, 'w', encoding='utf-8')
multi_call_report.write('code-barres\n')
from_codec = get_encoding_type(IN_FILE)
with open(IN_FILE, 'r', encoding=from_codec, newline='') as f:
reader = csv.reader(f, delimiter=';')
headers = next(reader)
for row in reader:
if len(row) < 2:
continue
barcode = row[0]
if row[1] is None or row[1] == '':
log_module.error('{} :: Echec :: pas de cote fournie'.format(
barcode))
report.write('{}\tErreur Fichier\tPas de cote fournie\n'.format
(barcode))
continue
call = row[1].upper()
if row[3] is None or row[3] == '':
log_module.error('{} :: Echec :: pas de localisation fournie'.
format(barcode))
report.write('{}\tErreur Fichier\tPas de localisation fournie\n'
.format(barcode))
continue
if row[3] not in locations_dict:
log_module.error(
'{} :: Echec :: La localisation {} est inconnue dans Alma'.
format(barcode, row[3]))
report.write(
"{}\tErreur Fichier\tLa localisation '{}' est inconnue dans Alma\n"
.format(barcode, row[3]))
continue
location_code = locations_dict[row[3]]
log_module.debug('{} :: Succes :: A affecter dans la localisation {}'
.format(barcode, location_code))
status, response = alma_api.get_item_with_barcode(barcode)
if status == 'Error':
log_module.error('{} :: Echec :: {}'.format(barcode, response))
report.write('{}\tErreur Retrouve Exemplaire\t{}\n'.format(
barcode, response))
continue
item = ET.fromstring(response)
mms_id, old_holding_id, item_id = item_change_location(item,
location_code, call)
set_status, set_response = alma_api.set_item(mms_id, old_holding_id,
item_id, ET.tostring(item))
log_module.debug(set_response)
if set_status == 'Error':
log_module.error('{} :: Echec :: {}'.format(barcode, set_response))
report.write('{}\tErreur Mise à jour Exemplaire\t{}\n'.format(
barcode, set_response))
continue
changed_item = ET.fromstring(set_response)
new_holding_id = changed_item.find('.//holding_id').text
processed_record_dict[mms_id] = {location_code: call}
if new_holding_id not in toprocess_holding_dict:
toprocess_holding_dict[new_holding_id] = {'call': call,
'barcode': barcode}
log_module.info(
"{} :: Succes :: L'exemplaire est maintenant rattaché à la Holding {}"
.format(barcode, new_holding_id))
log_module.info('FIN DU DEPLACEMENT DES EXEMPLAIRES')
log_module.info('DEBUT DE LA MODIFICATION DES HOLDINGS')
for new_holding_id in toprocess_holding_dict.keys():
call = toprocess_holding_dict[new_holding_id]['call']
barcode = toprocess_holding_dict[new_holding_id]['barcode']
get_holding_status, get_holding_response = alma_api.get_holding(mms_id,
new_holding_id)
if get_holding_status == 'Error':
log_module.error('{} :: Echec :: {}'.format(new_holding_id,
get_holding_response))
report.write('{}\tErreur Retrouve Holding\t{}\n'.format(barcode,
get_holding_response))
continue
changed_holding = update_holding_data(get_holding_response, call)
set_holding_status, set_holding_response = alma_api.set_holding(mms_id,
new_holding_id, changed_holding)
if set_holding_status == 'Error':
log_module.error('{} :: Echec :: {}'.format(new_holding_id,
set_holding_response))
report.write('{}\tErreur Ecriture Holding\t{}\n'.format(barcode,
set_holding_response))
continue
log_module.debug(set_holding_response)
log_module.info('{} :: Succes :: La holding a été mise à jour'.format(
new_holding_id))
report.close
multi_call_report.close
log_module.info('FIN DU TRAITEMENT')
<|reserved_special_token_1|>
#!/usr/bin/python3
# -*- coding: utf-8 -*-
#Modules externes
import os
import re
import logging
import csv
import xml.etree.ElementTree as ET
from chardet import detect
#Modules maison
from Abes_Apis_Interface.AbesXml import AbesXml
from Alma_Apis_Interface import Alma_Apis_Records
from Alma_Apis_Interface import Alma_Apis
from logs import logs
SERVICE = "Recotation_en_masse"
LOGS_LEVEL = 'INFO'
LOGS_DIR = os.getenv('LOGS_PATH')
LIBRARY_CODE = 1601900000
REGION = 'EU'
INSTITUTION = 'ub'
API_KEY = os.getenv('PROD_UB_BIB_API')
FILE_NAME = 'Dewey 20201218 cotes OE Scoop V3'
IN_FILE = '/media/sf_Partage_LouxBox/{}.csv'.format(FILE_NAME)
OUT_FILE = '/media/sf_Partage_LouxBox/{}_Rapport.csv'.format(FILE_NAME)
CALL_ERROR_FILE = '/media/sf_Partage_LouxBox/{}_Anomalies_Cotes.csv'.format(FILE_NAME)
# get file encoding type
def get_encoding_type(file):
with open(file, 'rb') as f:
rawdata = f.read()
return detect(rawdata)['encoding']
def item_change_location(item,location,call):
"""Change location and remove holdinds infos
Arguments:
item {str} -- xml response of get item ws
location {str} -- new location_code
call {str} -- new call
Returns:
[str] -- mms_id, holding_id, pid
"""
mms_id, holding_id, pid = item.find(".//mms_id").text, item.find(".//holding_id").text, item.find(".//pid").text
item.find(".//item_data/location").text = location
# On nettoie la cote présente au niveau de l'exemplaire
item.find(".//item_data/alternative_call_number").text = ''
item.find(".//item_data/alternative_call_number_type").text = ''
# On ne renvoie pas les infos de la holdings
holding_data = item.find(".//holding_data")
item.remove(holding_data)
# Si un autre exemplaire lié à la même notice a déjà été traité
if mms_id in processed_record_dict:
# Si la localisation était la même que celle de l'exemplaire déjà traité
if location_code in processed_record_dict[mms_id]:
# Si les cotes sont différentes alors on créé la cote sous l'exemplaire
if processed_record_dict[mms_id][location_code] != call:
multi_call_report.write("{}\n".format(barcode))
item.find(".//item_data/alternative_call_number").text = call
return mms_id, holding_id, pid
def update_holding_data(holding,new_call):
"""Change call (852$$h) and reset call type (852 fiest indicator)
Arguments:
holding {str} -- response of get holding ws
new_call {str} -- new value for call subfield
Returns:
str -- changed data
"""
holding_data = ET.fromstring(holding)
location_field =holding_data.find(".//datafield[@tag='852']")
location_field.set('ind1', ' ')
call_subfield = holding_data.find(".//datafield[@tag='852']/subfield[@code='h']")
call_subfield.text = new_call
return ET.tostring(holding_data)
#Init logger
logs.init_logs(LOGS_DIR,SERVICE,LOGS_LEVEL)
log_module = logging.getLogger(SERVICE)
conf = Alma_Apis.Alma(apikey=API_KEY, region='EU', service=SERVICE)
alma_api = Alma_Apis_Records.AlmaRecords(apikey=API_KEY, region=REGION, service=SERVICE)
#We get all the locations for the library in a dictionnary
locations_dict = conf.get_locations(LIBRARY_CODE)
log_module.info("Liste des localisation chargée pour la bibliothèque {} :: Main :: Début du traitement".format(LIBRARY_CODE))
report = open(OUT_FILE, "w", encoding='utf-8')
report.write("Code-barres\tStatut\tMessage\n")
processed_record_dict = {}
toprocess_holding_dict = {}
multi_call_report = open(CALL_ERROR_FILE, "w", encoding='utf-8')
multi_call_report.write("code-barres\n")
###Update item sequence
# ######################
from_codec = get_encoding_type(IN_FILE)
with open(IN_FILE, 'r', encoding=from_codec, newline='') as f:
reader = csv.reader(f, delimiter=';')
headers = next(reader)
# We read the file
for row in reader:
if len(row) < 2:
continue
barcode = row[0]
# Test if new call is defined
if row[1] is None or row[1] == '':
log_module.error("{} :: Echec :: pas de cote fournie".format(barcode))
report.write("{}\tErreur Fichier\tPas de cote fournie\n".format(barcode))
continue
call = row[1].upper()
# Test if new localisation is defined
if row[3] is None or row[3] == '':
log_module.error("{} :: Echec :: pas de localisation fournie".format(barcode))
report.write("{}\tErreur Fichier\tPas de localisation fournie\n".format(barcode))
continue
# log_module.info("{} :: Main :: Début du traitement".format(barcode))
# Transform location label in location code
if row[3] not in locations_dict:
log_module.error("{} :: Echec :: La localisation {} est inconnue dans Alma".format(barcode,row[3]))
report.write("{}\tErreur Fichier\tLa localisation '{}' est inconnue dans Alma\n".format(barcode,row[3]))
continue
location_code = locations_dict[row[3]]
log_module.debug("{} :: Succes :: A affecter dans la localisation {}".format(barcode,location_code))
# Get datas item with barcode
status, response = alma_api.get_item_with_barcode(barcode)
if status == 'Error':
log_module.error("{} :: Echec :: {}".format(barcode,response))
report.write("{}\tErreur Retrouve Exemplaire\t{}\n".format(barcode,response))
continue
# Change location and remove holdinds infos
item = ET.fromstring(response)
mms_id, old_holding_id,item_id = item_change_location(item,location_code, call)
# log_module.debug("{} :: {} - {} - {}".format(barcode,mms_id,old_holding_id,item_id))
# Upadte item in Alma
set_status, set_response = alma_api.set_item(mms_id, old_holding_id,item_id,ET.tostring(item))
log_module.debug(set_response)
if set_status == 'Error':
log_module.error("{} :: Echec :: {}".format(barcode,set_response))
report.write("{}\tErreur Mise à jour Exemplaire\t{}\n".format(barcode,set_response))
continue
changed_item = ET.fromstring(set_response)
new_holding_id = changed_item.find(".//holding_id").text
processed_record_dict[mms_id] = {
location_code: call
}
if new_holding_id not in toprocess_holding_dict:
toprocess_holding_dict[new_holding_id] = {
'call' : call,
'barcode': barcode
}
log_module.info("{} :: Succes :: L'exemplaire est maintenant rattaché à la Holding {}".format(barcode,new_holding_id))
log_module.info("FIN DU DEPLACEMENT DES EXEMPLAIRES")
###Update new holding sequence
# ############################
log_module.info("DEBUT DE LA MODIFICATION DES HOLDINGS")
for new_holding_id in toprocess_holding_dict.keys():
call = toprocess_holding_dict[new_holding_id]['call']
barcode = toprocess_holding_dict[new_holding_id]['barcode']
# Get new holding
get_holding_status, get_holding_response = alma_api.get_holding(mms_id, new_holding_id)
if get_holding_status == 'Error':
log_module.error("{} :: Echec :: {}".format(new_holding_id,get_holding_response))
report.write("{}\tErreur Retrouve Holding\t{}\n".format(barcode,get_holding_response))
continue
changed_holding = update_holding_data(get_holding_response,call)
#Update new Holding in Alma
set_holding_status, set_holding_response = alma_api.set_holding(mms_id, new_holding_id,changed_holding)
if set_holding_status == 'Error':
log_module.error("{} :: Echec :: {}".format(new_holding_id,set_holding_response))
report.write("{}\tErreur Ecriture Holding\t{}\n".format(barcode,set_holding_response))
continue
log_module.debug(set_holding_response)
log_module.info("{} :: Succes :: La holding a été mise à jour".format(new_holding_id))
report.close
multi_call_report.close
log_module.info("FIN DU TRAITEMENT")
|
flexible
|
{
"blob_id": "1f94ef0aae1128089b34fc952766cc3927677cdf",
"index": 5698,
"step-1": "<mask token>\n\n\ndef get_encoding_type(file):\n with open(file, 'rb') as f:\n rawdata = f.read()\n return detect(rawdata)['encoding']\n\n\ndef item_change_location(item, location, call):\n \"\"\"Change location and remove holdinds infos\n \n Arguments:\n item {str} -- xml response of get item ws\n location {str} -- new location_code\n call {str} -- new call\n\n Returns:\n [str] -- mms_id, holding_id, pid\n \"\"\"\n mms_id, holding_id, pid = item.find('.//mms_id').text, item.find(\n './/holding_id').text, item.find('.//pid').text\n item.find('.//item_data/location').text = location\n item.find('.//item_data/alternative_call_number').text = ''\n item.find('.//item_data/alternative_call_number_type').text = ''\n holding_data = item.find('.//holding_data')\n item.remove(holding_data)\n if mms_id in processed_record_dict:\n if location_code in processed_record_dict[mms_id]:\n if processed_record_dict[mms_id][location_code] != call:\n multi_call_report.write('{}\\n'.format(barcode))\n item.find('.//item_data/alternative_call_number').text = call\n return mms_id, holding_id, pid\n\n\ndef update_holding_data(holding, new_call):\n \"\"\"Change call (852$$h) and reset call type (852 fiest indicator)\n \n Arguments:\n holding {str} -- response of get holding ws \n new_call {str} -- new value for call subfield\n \n Returns:\n str -- changed data\n \"\"\"\n holding_data = ET.fromstring(holding)\n location_field = holding_data.find(\".//datafield[@tag='852']\")\n location_field.set('ind1', ' ')\n call_subfield = holding_data.find(\n \".//datafield[@tag='852']/subfield[@code='h']\")\n call_subfield.text = new_call\n return ET.tostring(holding_data)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef get_encoding_type(file):\n with open(file, 'rb') as f:\n rawdata = f.read()\n return detect(rawdata)['encoding']\n\n\ndef item_change_location(item, location, call):\n \"\"\"Change location and remove holdinds infos\n \n Arguments:\n item {str} -- xml response of get item ws\n location {str} -- new location_code\n call {str} -- new call\n\n Returns:\n [str] -- mms_id, holding_id, pid\n \"\"\"\n mms_id, holding_id, pid = item.find('.//mms_id').text, item.find(\n './/holding_id').text, item.find('.//pid').text\n item.find('.//item_data/location').text = location\n item.find('.//item_data/alternative_call_number').text = ''\n item.find('.//item_data/alternative_call_number_type').text = ''\n holding_data = item.find('.//holding_data')\n item.remove(holding_data)\n if mms_id in processed_record_dict:\n if location_code in processed_record_dict[mms_id]:\n if processed_record_dict[mms_id][location_code] != call:\n multi_call_report.write('{}\\n'.format(barcode))\n item.find('.//item_data/alternative_call_number').text = call\n return mms_id, holding_id, pid\n\n\ndef update_holding_data(holding, new_call):\n \"\"\"Change call (852$$h) and reset call type (852 fiest indicator)\n \n Arguments:\n holding {str} -- response of get holding ws \n new_call {str} -- new value for call subfield\n \n Returns:\n str -- changed data\n \"\"\"\n holding_data = ET.fromstring(holding)\n location_field = holding_data.find(\".//datafield[@tag='852']\")\n location_field.set('ind1', ' ')\n call_subfield = holding_data.find(\n \".//datafield[@tag='852']/subfield[@code='h']\")\n call_subfield.text = new_call\n return ET.tostring(holding_data)\n\n\nlogs.init_logs(LOGS_DIR, SERVICE, LOGS_LEVEL)\n<mask token>\nlog_module.info(\n 'Liste des localisation chargée pour la bibliothèque {} :: Main :: Début du traitement'\n .format(LIBRARY_CODE))\n<mask token>\nreport.write('Code-barres\\tStatut\\tMessage\\n')\n<mask token>\nmulti_call_report.write('code-barres\\n')\n<mask token>\nwith open(IN_FILE, 'r', encoding=from_codec, newline='') as f:\n reader = csv.reader(f, delimiter=';')\n headers = next(reader)\n for row in reader:\n if len(row) < 2:\n continue\n barcode = row[0]\n if row[1] is None or row[1] == '':\n log_module.error('{} :: Echec :: pas de cote fournie'.format(\n barcode))\n report.write('{}\\tErreur Fichier\\tPas de cote fournie\\n'.format\n (barcode))\n continue\n call = row[1].upper()\n if row[3] is None or row[3] == '':\n log_module.error('{} :: Echec :: pas de localisation fournie'.\n format(barcode))\n report.write('{}\\tErreur Fichier\\tPas de localisation fournie\\n'\n .format(barcode))\n continue\n if row[3] not in locations_dict:\n log_module.error(\n '{} :: Echec :: La localisation {} est inconnue dans Alma'.\n format(barcode, row[3]))\n report.write(\n \"{}\\tErreur Fichier\\tLa localisation '{}' est inconnue dans Alma\\n\"\n .format(barcode, row[3]))\n continue\n location_code = locations_dict[row[3]]\n log_module.debug('{} :: Succes :: A affecter dans la localisation {}'\n .format(barcode, location_code))\n status, response = alma_api.get_item_with_barcode(barcode)\n if status == 'Error':\n log_module.error('{} :: Echec :: {}'.format(barcode, response))\n report.write('{}\\tErreur Retrouve Exemplaire\\t{}\\n'.format(\n barcode, response))\n continue\n item = ET.fromstring(response)\n mms_id, old_holding_id, item_id = item_change_location(item,\n location_code, call)\n set_status, set_response = alma_api.set_item(mms_id, old_holding_id,\n item_id, ET.tostring(item))\n log_module.debug(set_response)\n if set_status == 'Error':\n log_module.error('{} :: Echec :: {}'.format(barcode, set_response))\n report.write('{}\\tErreur Mise à jour Exemplaire\\t{}\\n'.format(\n barcode, set_response))\n continue\n changed_item = ET.fromstring(set_response)\n new_holding_id = changed_item.find('.//holding_id').text\n processed_record_dict[mms_id] = {location_code: call}\n if new_holding_id not in toprocess_holding_dict:\n toprocess_holding_dict[new_holding_id] = {'call': call,\n 'barcode': barcode}\n log_module.info(\n \"{} :: Succes :: L'exemplaire est maintenant rattaché à la Holding {}\"\n .format(barcode, new_holding_id))\nlog_module.info('FIN DU DEPLACEMENT DES EXEMPLAIRES')\nlog_module.info('DEBUT DE LA MODIFICATION DES HOLDINGS')\nfor new_holding_id in toprocess_holding_dict.keys():\n call = toprocess_holding_dict[new_holding_id]['call']\n barcode = toprocess_holding_dict[new_holding_id]['barcode']\n get_holding_status, get_holding_response = alma_api.get_holding(mms_id,\n new_holding_id)\n if get_holding_status == 'Error':\n log_module.error('{} :: Echec :: {}'.format(new_holding_id,\n get_holding_response))\n report.write('{}\\tErreur Retrouve Holding\\t{}\\n'.format(barcode,\n get_holding_response))\n continue\n changed_holding = update_holding_data(get_holding_response, call)\n set_holding_status, set_holding_response = alma_api.set_holding(mms_id,\n new_holding_id, changed_holding)\n if set_holding_status == 'Error':\n log_module.error('{} :: Echec :: {}'.format(new_holding_id,\n set_holding_response))\n report.write('{}\\tErreur Ecriture Holding\\t{}\\n'.format(barcode,\n set_holding_response))\n continue\n log_module.debug(set_holding_response)\n log_module.info('{} :: Succes :: La holding a été mise à jour'.format(\n new_holding_id))\nreport.close\nmulti_call_report.close\nlog_module.info('FIN DU TRAITEMENT')\n",
"step-3": "<mask token>\nSERVICE = 'Recotation_en_masse'\nLOGS_LEVEL = 'INFO'\nLOGS_DIR = os.getenv('LOGS_PATH')\nLIBRARY_CODE = 1601900000\nREGION = 'EU'\nINSTITUTION = 'ub'\nAPI_KEY = os.getenv('PROD_UB_BIB_API')\nFILE_NAME = 'Dewey 20201218 cotes OE Scoop V3'\nIN_FILE = '/media/sf_Partage_LouxBox/{}.csv'.format(FILE_NAME)\nOUT_FILE = '/media/sf_Partage_LouxBox/{}_Rapport.csv'.format(FILE_NAME)\nCALL_ERROR_FILE = '/media/sf_Partage_LouxBox/{}_Anomalies_Cotes.csv'.format(\n FILE_NAME)\n\n\ndef get_encoding_type(file):\n with open(file, 'rb') as f:\n rawdata = f.read()\n return detect(rawdata)['encoding']\n\n\ndef item_change_location(item, location, call):\n \"\"\"Change location and remove holdinds infos\n \n Arguments:\n item {str} -- xml response of get item ws\n location {str} -- new location_code\n call {str} -- new call\n\n Returns:\n [str] -- mms_id, holding_id, pid\n \"\"\"\n mms_id, holding_id, pid = item.find('.//mms_id').text, item.find(\n './/holding_id').text, item.find('.//pid').text\n item.find('.//item_data/location').text = location\n item.find('.//item_data/alternative_call_number').text = ''\n item.find('.//item_data/alternative_call_number_type').text = ''\n holding_data = item.find('.//holding_data')\n item.remove(holding_data)\n if mms_id in processed_record_dict:\n if location_code in processed_record_dict[mms_id]:\n if processed_record_dict[mms_id][location_code] != call:\n multi_call_report.write('{}\\n'.format(barcode))\n item.find('.//item_data/alternative_call_number').text = call\n return mms_id, holding_id, pid\n\n\ndef update_holding_data(holding, new_call):\n \"\"\"Change call (852$$h) and reset call type (852 fiest indicator)\n \n Arguments:\n holding {str} -- response of get holding ws \n new_call {str} -- new value for call subfield\n \n Returns:\n str -- changed data\n \"\"\"\n holding_data = ET.fromstring(holding)\n location_field = holding_data.find(\".//datafield[@tag='852']\")\n location_field.set('ind1', ' ')\n call_subfield = holding_data.find(\n \".//datafield[@tag='852']/subfield[@code='h']\")\n call_subfield.text = new_call\n return ET.tostring(holding_data)\n\n\nlogs.init_logs(LOGS_DIR, SERVICE, LOGS_LEVEL)\nlog_module = logging.getLogger(SERVICE)\nconf = Alma_Apis.Alma(apikey=API_KEY, region='EU', service=SERVICE)\nalma_api = Alma_Apis_Records.AlmaRecords(apikey=API_KEY, region=REGION,\n service=SERVICE)\nlocations_dict = conf.get_locations(LIBRARY_CODE)\nlog_module.info(\n 'Liste des localisation chargée pour la bibliothèque {} :: Main :: Début du traitement'\n .format(LIBRARY_CODE))\nreport = open(OUT_FILE, 'w', encoding='utf-8')\nreport.write('Code-barres\\tStatut\\tMessage\\n')\nprocessed_record_dict = {}\ntoprocess_holding_dict = {}\nmulti_call_report = open(CALL_ERROR_FILE, 'w', encoding='utf-8')\nmulti_call_report.write('code-barres\\n')\nfrom_codec = get_encoding_type(IN_FILE)\nwith open(IN_FILE, 'r', encoding=from_codec, newline='') as f:\n reader = csv.reader(f, delimiter=';')\n headers = next(reader)\n for row in reader:\n if len(row) < 2:\n continue\n barcode = row[0]\n if row[1] is None or row[1] == '':\n log_module.error('{} :: Echec :: pas de cote fournie'.format(\n barcode))\n report.write('{}\\tErreur Fichier\\tPas de cote fournie\\n'.format\n (barcode))\n continue\n call = row[1].upper()\n if row[3] is None or row[3] == '':\n log_module.error('{} :: Echec :: pas de localisation fournie'.\n format(barcode))\n report.write('{}\\tErreur Fichier\\tPas de localisation fournie\\n'\n .format(barcode))\n continue\n if row[3] not in locations_dict:\n log_module.error(\n '{} :: Echec :: La localisation {} est inconnue dans Alma'.\n format(barcode, row[3]))\n report.write(\n \"{}\\tErreur Fichier\\tLa localisation '{}' est inconnue dans Alma\\n\"\n .format(barcode, row[3]))\n continue\n location_code = locations_dict[row[3]]\n log_module.debug('{} :: Succes :: A affecter dans la localisation {}'\n .format(barcode, location_code))\n status, response = alma_api.get_item_with_barcode(barcode)\n if status == 'Error':\n log_module.error('{} :: Echec :: {}'.format(barcode, response))\n report.write('{}\\tErreur Retrouve Exemplaire\\t{}\\n'.format(\n barcode, response))\n continue\n item = ET.fromstring(response)\n mms_id, old_holding_id, item_id = item_change_location(item,\n location_code, call)\n set_status, set_response = alma_api.set_item(mms_id, old_holding_id,\n item_id, ET.tostring(item))\n log_module.debug(set_response)\n if set_status == 'Error':\n log_module.error('{} :: Echec :: {}'.format(barcode, set_response))\n report.write('{}\\tErreur Mise à jour Exemplaire\\t{}\\n'.format(\n barcode, set_response))\n continue\n changed_item = ET.fromstring(set_response)\n new_holding_id = changed_item.find('.//holding_id').text\n processed_record_dict[mms_id] = {location_code: call}\n if new_holding_id not in toprocess_holding_dict:\n toprocess_holding_dict[new_holding_id] = {'call': call,\n 'barcode': barcode}\n log_module.info(\n \"{} :: Succes :: L'exemplaire est maintenant rattaché à la Holding {}\"\n .format(barcode, new_holding_id))\nlog_module.info('FIN DU DEPLACEMENT DES EXEMPLAIRES')\nlog_module.info('DEBUT DE LA MODIFICATION DES HOLDINGS')\nfor new_holding_id in toprocess_holding_dict.keys():\n call = toprocess_holding_dict[new_holding_id]['call']\n barcode = toprocess_holding_dict[new_holding_id]['barcode']\n get_holding_status, get_holding_response = alma_api.get_holding(mms_id,\n new_holding_id)\n if get_holding_status == 'Error':\n log_module.error('{} :: Echec :: {}'.format(new_holding_id,\n get_holding_response))\n report.write('{}\\tErreur Retrouve Holding\\t{}\\n'.format(barcode,\n get_holding_response))\n continue\n changed_holding = update_holding_data(get_holding_response, call)\n set_holding_status, set_holding_response = alma_api.set_holding(mms_id,\n new_holding_id, changed_holding)\n if set_holding_status == 'Error':\n log_module.error('{} :: Echec :: {}'.format(new_holding_id,\n set_holding_response))\n report.write('{}\\tErreur Ecriture Holding\\t{}\\n'.format(barcode,\n set_holding_response))\n continue\n log_module.debug(set_holding_response)\n log_module.info('{} :: Succes :: La holding a été mise à jour'.format(\n new_holding_id))\nreport.close\nmulti_call_report.close\nlog_module.info('FIN DU TRAITEMENT')\n",
"step-4": "import os\nimport re\nimport logging\nimport csv\nimport xml.etree.ElementTree as ET\nfrom chardet import detect\nfrom Abes_Apis_Interface.AbesXml import AbesXml\nfrom Alma_Apis_Interface import Alma_Apis_Records\nfrom Alma_Apis_Interface import Alma_Apis\nfrom logs import logs\nSERVICE = 'Recotation_en_masse'\nLOGS_LEVEL = 'INFO'\nLOGS_DIR = os.getenv('LOGS_PATH')\nLIBRARY_CODE = 1601900000\nREGION = 'EU'\nINSTITUTION = 'ub'\nAPI_KEY = os.getenv('PROD_UB_BIB_API')\nFILE_NAME = 'Dewey 20201218 cotes OE Scoop V3'\nIN_FILE = '/media/sf_Partage_LouxBox/{}.csv'.format(FILE_NAME)\nOUT_FILE = '/media/sf_Partage_LouxBox/{}_Rapport.csv'.format(FILE_NAME)\nCALL_ERROR_FILE = '/media/sf_Partage_LouxBox/{}_Anomalies_Cotes.csv'.format(\n FILE_NAME)\n\n\ndef get_encoding_type(file):\n with open(file, 'rb') as f:\n rawdata = f.read()\n return detect(rawdata)['encoding']\n\n\ndef item_change_location(item, location, call):\n \"\"\"Change location and remove holdinds infos\n \n Arguments:\n item {str} -- xml response of get item ws\n location {str} -- new location_code\n call {str} -- new call\n\n Returns:\n [str] -- mms_id, holding_id, pid\n \"\"\"\n mms_id, holding_id, pid = item.find('.//mms_id').text, item.find(\n './/holding_id').text, item.find('.//pid').text\n item.find('.//item_data/location').text = location\n item.find('.//item_data/alternative_call_number').text = ''\n item.find('.//item_data/alternative_call_number_type').text = ''\n holding_data = item.find('.//holding_data')\n item.remove(holding_data)\n if mms_id in processed_record_dict:\n if location_code in processed_record_dict[mms_id]:\n if processed_record_dict[mms_id][location_code] != call:\n multi_call_report.write('{}\\n'.format(barcode))\n item.find('.//item_data/alternative_call_number').text = call\n return mms_id, holding_id, pid\n\n\ndef update_holding_data(holding, new_call):\n \"\"\"Change call (852$$h) and reset call type (852 fiest indicator)\n \n Arguments:\n holding {str} -- response of get holding ws \n new_call {str} -- new value for call subfield\n \n Returns:\n str -- changed data\n \"\"\"\n holding_data = ET.fromstring(holding)\n location_field = holding_data.find(\".//datafield[@tag='852']\")\n location_field.set('ind1', ' ')\n call_subfield = holding_data.find(\n \".//datafield[@tag='852']/subfield[@code='h']\")\n call_subfield.text = new_call\n return ET.tostring(holding_data)\n\n\nlogs.init_logs(LOGS_DIR, SERVICE, LOGS_LEVEL)\nlog_module = logging.getLogger(SERVICE)\nconf = Alma_Apis.Alma(apikey=API_KEY, region='EU', service=SERVICE)\nalma_api = Alma_Apis_Records.AlmaRecords(apikey=API_KEY, region=REGION,\n service=SERVICE)\nlocations_dict = conf.get_locations(LIBRARY_CODE)\nlog_module.info(\n 'Liste des localisation chargée pour la bibliothèque {} :: Main :: Début du traitement'\n .format(LIBRARY_CODE))\nreport = open(OUT_FILE, 'w', encoding='utf-8')\nreport.write('Code-barres\\tStatut\\tMessage\\n')\nprocessed_record_dict = {}\ntoprocess_holding_dict = {}\nmulti_call_report = open(CALL_ERROR_FILE, 'w', encoding='utf-8')\nmulti_call_report.write('code-barres\\n')\nfrom_codec = get_encoding_type(IN_FILE)\nwith open(IN_FILE, 'r', encoding=from_codec, newline='') as f:\n reader = csv.reader(f, delimiter=';')\n headers = next(reader)\n for row in reader:\n if len(row) < 2:\n continue\n barcode = row[0]\n if row[1] is None or row[1] == '':\n log_module.error('{} :: Echec :: pas de cote fournie'.format(\n barcode))\n report.write('{}\\tErreur Fichier\\tPas de cote fournie\\n'.format\n (barcode))\n continue\n call = row[1].upper()\n if row[3] is None or row[3] == '':\n log_module.error('{} :: Echec :: pas de localisation fournie'.\n format(barcode))\n report.write('{}\\tErreur Fichier\\tPas de localisation fournie\\n'\n .format(barcode))\n continue\n if row[3] not in locations_dict:\n log_module.error(\n '{} :: Echec :: La localisation {} est inconnue dans Alma'.\n format(barcode, row[3]))\n report.write(\n \"{}\\tErreur Fichier\\tLa localisation '{}' est inconnue dans Alma\\n\"\n .format(barcode, row[3]))\n continue\n location_code = locations_dict[row[3]]\n log_module.debug('{} :: Succes :: A affecter dans la localisation {}'\n .format(barcode, location_code))\n status, response = alma_api.get_item_with_barcode(barcode)\n if status == 'Error':\n log_module.error('{} :: Echec :: {}'.format(barcode, response))\n report.write('{}\\tErreur Retrouve Exemplaire\\t{}\\n'.format(\n barcode, response))\n continue\n item = ET.fromstring(response)\n mms_id, old_holding_id, item_id = item_change_location(item,\n location_code, call)\n set_status, set_response = alma_api.set_item(mms_id, old_holding_id,\n item_id, ET.tostring(item))\n log_module.debug(set_response)\n if set_status == 'Error':\n log_module.error('{} :: Echec :: {}'.format(barcode, set_response))\n report.write('{}\\tErreur Mise à jour Exemplaire\\t{}\\n'.format(\n barcode, set_response))\n continue\n changed_item = ET.fromstring(set_response)\n new_holding_id = changed_item.find('.//holding_id').text\n processed_record_dict[mms_id] = {location_code: call}\n if new_holding_id not in toprocess_holding_dict:\n toprocess_holding_dict[new_holding_id] = {'call': call,\n 'barcode': barcode}\n log_module.info(\n \"{} :: Succes :: L'exemplaire est maintenant rattaché à la Holding {}\"\n .format(barcode, new_holding_id))\nlog_module.info('FIN DU DEPLACEMENT DES EXEMPLAIRES')\nlog_module.info('DEBUT DE LA MODIFICATION DES HOLDINGS')\nfor new_holding_id in toprocess_holding_dict.keys():\n call = toprocess_holding_dict[new_holding_id]['call']\n barcode = toprocess_holding_dict[new_holding_id]['barcode']\n get_holding_status, get_holding_response = alma_api.get_holding(mms_id,\n new_holding_id)\n if get_holding_status == 'Error':\n log_module.error('{} :: Echec :: {}'.format(new_holding_id,\n get_holding_response))\n report.write('{}\\tErreur Retrouve Holding\\t{}\\n'.format(barcode,\n get_holding_response))\n continue\n changed_holding = update_holding_data(get_holding_response, call)\n set_holding_status, set_holding_response = alma_api.set_holding(mms_id,\n new_holding_id, changed_holding)\n if set_holding_status == 'Error':\n log_module.error('{} :: Echec :: {}'.format(new_holding_id,\n set_holding_response))\n report.write('{}\\tErreur Ecriture Holding\\t{}\\n'.format(barcode,\n set_holding_response))\n continue\n log_module.debug(set_holding_response)\n log_module.info('{} :: Succes :: La holding a été mise à jour'.format(\n new_holding_id))\nreport.close\nmulti_call_report.close\nlog_module.info('FIN DU TRAITEMENT')\n",
"step-5": "#!/usr/bin/python3\n# -*- coding: utf-8 -*-\n#Modules externes\nimport os\nimport re\nimport logging\nimport csv\nimport xml.etree.ElementTree as ET\nfrom chardet import detect\n\n#Modules maison\nfrom Abes_Apis_Interface.AbesXml import AbesXml\nfrom Alma_Apis_Interface import Alma_Apis_Records\nfrom Alma_Apis_Interface import Alma_Apis\nfrom logs import logs\n\nSERVICE = \"Recotation_en_masse\"\n\nLOGS_LEVEL = 'INFO'\nLOGS_DIR = os.getenv('LOGS_PATH')\n\nLIBRARY_CODE = 1601900000\n\nREGION = 'EU'\nINSTITUTION = 'ub'\nAPI_KEY = os.getenv('PROD_UB_BIB_API')\n\nFILE_NAME = 'Dewey 20201218 cotes OE Scoop V3'\nIN_FILE = '/media/sf_Partage_LouxBox/{}.csv'.format(FILE_NAME)\nOUT_FILE = '/media/sf_Partage_LouxBox/{}_Rapport.csv'.format(FILE_NAME)\nCALL_ERROR_FILE = '/media/sf_Partage_LouxBox/{}_Anomalies_Cotes.csv'.format(FILE_NAME)\n\n# get file encoding type\ndef get_encoding_type(file):\n with open(file, 'rb') as f:\n rawdata = f.read()\n return detect(rawdata)['encoding']\n\ndef item_change_location(item,location,call):\n \"\"\"Change location and remove holdinds infos\n \n Arguments:\n item {str} -- xml response of get item ws\n location {str} -- new location_code\n call {str} -- new call\n\n Returns:\n [str] -- mms_id, holding_id, pid\n \"\"\"\n mms_id, holding_id, pid = item.find(\".//mms_id\").text, item.find(\".//holding_id\").text, item.find(\".//pid\").text\n item.find(\".//item_data/location\").text = location\n # On nettoie la cote présente au niveau de l'exemplaire\n item.find(\".//item_data/alternative_call_number\").text = ''\n item.find(\".//item_data/alternative_call_number_type\").text = ''\n # On ne renvoie pas les infos de la holdings\n holding_data = item.find(\".//holding_data\")\n item.remove(holding_data)\n # Si un autre exemplaire lié à la même notice a déjà été traité\n if mms_id in processed_record_dict:\n # Si la localisation était la même que celle de l'exemplaire déjà traité\n if location_code in processed_record_dict[mms_id]:\n # Si les cotes sont différentes alors on créé la cote sous l'exemplaire\n if processed_record_dict[mms_id][location_code] != call:\n multi_call_report.write(\"{}\\n\".format(barcode))\n item.find(\".//item_data/alternative_call_number\").text = call\n return mms_id, holding_id, pid\n\ndef update_holding_data(holding,new_call):\n \"\"\"Change call (852$$h) and reset call type (852 fiest indicator)\n \n Arguments:\n holding {str} -- response of get holding ws \n new_call {str} -- new value for call subfield\n \n Returns:\n str -- changed data\n \"\"\"\n holding_data = ET.fromstring(holding)\n location_field =holding_data.find(\".//datafield[@tag='852']\")\n location_field.set('ind1', ' ')\n call_subfield = holding_data.find(\".//datafield[@tag='852']/subfield[@code='h']\")\n call_subfield.text = new_call\n return ET.tostring(holding_data)\n\n#Init logger\nlogs.init_logs(LOGS_DIR,SERVICE,LOGS_LEVEL)\nlog_module = logging.getLogger(SERVICE)\n\n\nconf = Alma_Apis.Alma(apikey=API_KEY, region='EU', service=SERVICE)\nalma_api = Alma_Apis_Records.AlmaRecords(apikey=API_KEY, region=REGION, service=SERVICE)\n\n#We get all the locations for the library in a dictionnary\nlocations_dict = conf.get_locations(LIBRARY_CODE)\nlog_module.info(\"Liste des localisation chargée pour la bibliothèque {} :: Main :: Début du traitement\".format(LIBRARY_CODE))\n\nreport = open(OUT_FILE, \"w\", encoding='utf-8')\nreport.write(\"Code-barres\\tStatut\\tMessage\\n\")\n\nprocessed_record_dict = {}\ntoprocess_holding_dict = {}\nmulti_call_report = open(CALL_ERROR_FILE, \"w\", encoding='utf-8')\nmulti_call_report.write(\"code-barres\\n\")\n\n###Update item sequence\n# ###################### \nfrom_codec = get_encoding_type(IN_FILE)\nwith open(IN_FILE, 'r', encoding=from_codec, newline='') as f:\n reader = csv.reader(f, delimiter=';')\n headers = next(reader)\n # We read the file\n for row in reader:\n if len(row) < 2:\n continue\n barcode = row[0]\n # Test if new call is defined\n if row[1] is None or row[1] == '':\n log_module.error(\"{} :: Echec :: pas de cote fournie\".format(barcode))\n report.write(\"{}\\tErreur Fichier\\tPas de cote fournie\\n\".format(barcode))\n continue\n call = row[1].upper()\n # Test if new localisation is defined\n if row[3] is None or row[3] == '':\n log_module.error(\"{} :: Echec :: pas de localisation fournie\".format(barcode))\n report.write(\"{}\\tErreur Fichier\\tPas de localisation fournie\\n\".format(barcode))\n continue\n # log_module.info(\"{} :: Main :: Début du traitement\".format(barcode))\n # Transform location label in location code\n if row[3] not in locations_dict:\n log_module.error(\"{} :: Echec :: La localisation {} est inconnue dans Alma\".format(barcode,row[3]))\n report.write(\"{}\\tErreur Fichier\\tLa localisation '{}' est inconnue dans Alma\\n\".format(barcode,row[3]))\n continue\n location_code = locations_dict[row[3]]\n log_module.debug(\"{} :: Succes :: A affecter dans la localisation {}\".format(barcode,location_code))\n \n\n # Get datas item with barcode\n status, response = alma_api.get_item_with_barcode(barcode)\n if status == 'Error':\n log_module.error(\"{} :: Echec :: {}\".format(barcode,response))\n report.write(\"{}\\tErreur Retrouve Exemplaire\\t{}\\n\".format(barcode,response))\n continue\n # Change location and remove holdinds infos\n item = ET.fromstring(response)\n mms_id, old_holding_id,item_id = item_change_location(item,location_code, call)\n # log_module.debug(\"{} :: {} - {} - {}\".format(barcode,mms_id,old_holding_id,item_id))\n # Upadte item in Alma\n set_status, set_response = alma_api.set_item(mms_id, old_holding_id,item_id,ET.tostring(item))\n log_module.debug(set_response)\n if set_status == 'Error':\n log_module.error(\"{} :: Echec :: {}\".format(barcode,set_response))\n report.write(\"{}\\tErreur Mise à jour Exemplaire\\t{}\\n\".format(barcode,set_response))\n continue\n changed_item = ET.fromstring(set_response)\n new_holding_id = changed_item.find(\".//holding_id\").text\n processed_record_dict[mms_id] = {\n location_code: call\n }\n if new_holding_id not in toprocess_holding_dict:\n toprocess_holding_dict[new_holding_id] = {\n 'call' : call,\n 'barcode': barcode\n }\n log_module.info(\"{} :: Succes :: L'exemplaire est maintenant rattaché à la Holding {}\".format(barcode,new_holding_id))\nlog_module.info(\"FIN DU DEPLACEMENT DES EXEMPLAIRES\")\n\n###Update new holding sequence\n# ############################\nlog_module.info(\"DEBUT DE LA MODIFICATION DES HOLDINGS\")\nfor new_holding_id in toprocess_holding_dict.keys():\n call = toprocess_holding_dict[new_holding_id]['call']\n barcode = toprocess_holding_dict[new_holding_id]['barcode']\n # Get new holding\n get_holding_status, get_holding_response = alma_api.get_holding(mms_id, new_holding_id)\n if get_holding_status == 'Error':\n log_module.error(\"{} :: Echec :: {}\".format(new_holding_id,get_holding_response))\n report.write(\"{}\\tErreur Retrouve Holding\\t{}\\n\".format(barcode,get_holding_response))\n continue\n changed_holding = update_holding_data(get_holding_response,call)\n #Update new Holding in Alma\n set_holding_status, set_holding_response = alma_api.set_holding(mms_id, new_holding_id,changed_holding)\n if set_holding_status == 'Error':\n log_module.error(\"{} :: Echec :: {}\".format(new_holding_id,set_holding_response))\n report.write(\"{}\\tErreur Ecriture Holding\\t{}\\n\".format(barcode,set_holding_response))\n continue\n log_module.debug(set_holding_response)\n log_module.info(\"{} :: Succes :: La holding a été mise à jour\".format(new_holding_id))\n\nreport.close\n\nmulti_call_report.close\nlog_module.info(\"FIN DU TRAITEMENT\")\n\n ",
"step-ids": [
3,
4,
5,
6,
7
]
}
|
[
3,
4,
5,
6,
7
] |
<|reserved_special_token_0|>
class OneCase(object):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def __init__(self, case_path, *args, **kwargs):
self._case_path = str(case_path)
self._case_dict = {}
self._step_result = []
self._step_msg = []
self._passed = False
def run(self):
self.load_case(self._case_path)
self.satisfy_precondition(self._case_dict)
self.exec_steps(self._case_dict)
self.save_result()
def load_case(self, case_path):
self._case_dict = Case(file_path=case_path).case_dict
def satisfy_precondition(self, case_dict):
pre = case_dict.get('precondition')
if pre:
func_list = pre.get('prefunction')
for func in func_list:
_func = eval(func.get('func_name'))
_args = {_.get('name'): trans_type(_.get('value'), _.get(
'type')) for _ in func.get('args')}
_func(**_args)
check_dependency(pre.get('dependency'))
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def save_result(self):
"""
save result for this test
1) print to console
2) record to mysql
3) upload to testlink
"""
self.print_to_console()
def print_to_console(self):
if self._passed:
print('All steps passed for case: {}'.format(self._case_dict.
get('name')))
else:
err('Failed on case: {}'.format(self._case_dict.get('name')))
step_length = range(1, len(self._step_result) + 1)
for i, result, msg in zip(step_length, self._step_result, self.
_step_msg):
if result == self.FAIL:
err('Step {} failed for reason:\n\t{}'.format(i, msg))
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class OneCase(object):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def __init__(self, case_path, *args, **kwargs):
self._case_path = str(case_path)
self._case_dict = {}
self._step_result = []
self._step_msg = []
self._passed = False
def run(self):
self.load_case(self._case_path)
self.satisfy_precondition(self._case_dict)
self.exec_steps(self._case_dict)
self.save_result()
def load_case(self, case_path):
self._case_dict = Case(file_path=case_path).case_dict
def satisfy_precondition(self, case_dict):
pre = case_dict.get('precondition')
if pre:
func_list = pre.get('prefunction')
for func in func_list:
_func = eval(func.get('func_name'))
_args = {_.get('name'): trans_type(_.get('value'), _.get(
'type')) for _ in func.get('args')}
_func(**_args)
check_dependency(pre.get('dependency'))
def check_dependency(self):
pass
def exec_steps(self, case_dict):
"""
"""
for step in case_dict.get('step'):
_input = step.get('input')
res = {}
for protocol, _args in _input.iteritems():
req = get_conn(protocol)(**_args)
res = req.response
_output = step.get('output')
if _output.get('strict'):
pass
try:
for _ in _output.get('expect'):
_var = _.get('var')
_expect_value = trans_type(_['val']['value'], _['val'][
'type'])
_real_value = res.get(_var)
if _.get('cmp') == '==':
assert _expect_value == _real_value, 'Not equal! \n\tExpect: {}\n\tGot: {}'.format(
_expect_value, _real_value)
except AssertionError as e:
self._step_result.append(self.FAIL)
self._step_msg.append(e.message)
else:
self._step_result.append(self.PASS)
self._step_msg.append('Passed!')
self._passed = all(self._step_result)
def save_result(self):
"""
save result for this test
1) print to console
2) record to mysql
3) upload to testlink
"""
self.print_to_console()
def print_to_console(self):
if self._passed:
print('All steps passed for case: {}'.format(self._case_dict.
get('name')))
else:
err('Failed on case: {}'.format(self._case_dict.get('name')))
step_length = range(1, len(self._step_result) + 1)
for i, result, msg in zip(step_length, self._step_result, self.
_step_msg):
if result == self.FAIL:
err('Step {} failed for reason:\n\t{}'.format(i, msg))
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class OneCase(object):
<|reserved_special_token_0|>
PASS = True
FAIL = False
def __init__(self, case_path, *args, **kwargs):
self._case_path = str(case_path)
self._case_dict = {}
self._step_result = []
self._step_msg = []
self._passed = False
def run(self):
self.load_case(self._case_path)
self.satisfy_precondition(self._case_dict)
self.exec_steps(self._case_dict)
self.save_result()
def load_case(self, case_path):
self._case_dict = Case(file_path=case_path).case_dict
def satisfy_precondition(self, case_dict):
pre = case_dict.get('precondition')
if pre:
func_list = pre.get('prefunction')
for func in func_list:
_func = eval(func.get('func_name'))
_args = {_.get('name'): trans_type(_.get('value'), _.get(
'type')) for _ in func.get('args')}
_func(**_args)
check_dependency(pre.get('dependency'))
def check_dependency(self):
pass
def exec_steps(self, case_dict):
"""
"""
for step in case_dict.get('step'):
_input = step.get('input')
res = {}
for protocol, _args in _input.iteritems():
req = get_conn(protocol)(**_args)
res = req.response
_output = step.get('output')
if _output.get('strict'):
pass
try:
for _ in _output.get('expect'):
_var = _.get('var')
_expect_value = trans_type(_['val']['value'], _['val'][
'type'])
_real_value = res.get(_var)
if _.get('cmp') == '==':
assert _expect_value == _real_value, 'Not equal! \n\tExpect: {}\n\tGot: {}'.format(
_expect_value, _real_value)
except AssertionError as e:
self._step_result.append(self.FAIL)
self._step_msg.append(e.message)
else:
self._step_result.append(self.PASS)
self._step_msg.append('Passed!')
self._passed = all(self._step_result)
def save_result(self):
"""
save result for this test
1) print to console
2) record to mysql
3) upload to testlink
"""
self.print_to_console()
def print_to_console(self):
if self._passed:
print('All steps passed for case: {}'.format(self._case_dict.
get('name')))
else:
err('Failed on case: {}'.format(self._case_dict.get('name')))
step_length = range(1, len(self._step_result) + 1)
for i, result, msg in zip(step_length, self._step_result, self.
_step_msg):
if result == self.FAIL:
err('Step {} failed for reason:\n\t{}'.format(i, msg))
<|reserved_special_token_0|>
<|reserved_special_token_1|>
from src.testcase.case import Case
from src.utils import *
from src.protocol.register import get_conn
from src.precondition import *
class OneCase(object):
"""
Main flow of running one case's autotest
"""
PASS = True
FAIL = False
def __init__(self, case_path, *args, **kwargs):
self._case_path = str(case_path)
self._case_dict = {}
self._step_result = []
self._step_msg = []
self._passed = False
def run(self):
self.load_case(self._case_path)
self.satisfy_precondition(self._case_dict)
self.exec_steps(self._case_dict)
self.save_result()
def load_case(self, case_path):
self._case_dict = Case(file_path=case_path).case_dict
def satisfy_precondition(self, case_dict):
pre = case_dict.get('precondition')
if pre:
func_list = pre.get('prefunction')
for func in func_list:
_func = eval(func.get('func_name'))
_args = {_.get('name'): trans_type(_.get('value'), _.get(
'type')) for _ in func.get('args')}
_func(**_args)
check_dependency(pre.get('dependency'))
def check_dependency(self):
pass
def exec_steps(self, case_dict):
"""
"""
for step in case_dict.get('step'):
_input = step.get('input')
res = {}
for protocol, _args in _input.iteritems():
req = get_conn(protocol)(**_args)
res = req.response
_output = step.get('output')
if _output.get('strict'):
pass
try:
for _ in _output.get('expect'):
_var = _.get('var')
_expect_value = trans_type(_['val']['value'], _['val'][
'type'])
_real_value = res.get(_var)
if _.get('cmp') == '==':
assert _expect_value == _real_value, 'Not equal! \n\tExpect: {}\n\tGot: {}'.format(
_expect_value, _real_value)
except AssertionError as e:
self._step_result.append(self.FAIL)
self._step_msg.append(e.message)
else:
self._step_result.append(self.PASS)
self._step_msg.append('Passed!')
self._passed = all(self._step_result)
def save_result(self):
"""
save result for this test
1) print to console
2) record to mysql
3) upload to testlink
"""
self.print_to_console()
def print_to_console(self):
if self._passed:
print('All steps passed for case: {}'.format(self._case_dict.
get('name')))
else:
err('Failed on case: {}'.format(self._case_dict.get('name')))
step_length = range(1, len(self._step_result) + 1)
for i, result, msg in zip(step_length, self._step_result, self.
_step_msg):
if result == self.FAIL:
err('Step {} failed for reason:\n\t{}'.format(i, msg))
if __name__ == '__main__':
testcase = OneCase(
'/Users/eacon/github/APIAutoTestFramework/case/sample.json')
testcase.run()
<|reserved_special_token_1|>
from src.testcase.case import Case
from src.utils import *
from src.protocol.register import get_conn
from src.precondition import *
class OneCase(object):
"""
Main flow of running one case's autotest
"""
PASS = True
FAIL = False
def __init__(self, case_path, *args, **kwargs):
self._case_path = str(case_path)
self._case_dict = {}
self._step_result = []
self._step_msg = []
self._passed = False
def run(self):
self.load_case(self._case_path)
self.satisfy_precondition(self._case_dict)
self.exec_steps(self._case_dict)
self.save_result()
def load_case(self, case_path):
self._case_dict = Case(file_path=case_path).case_dict
def satisfy_precondition(self, case_dict):
pre = case_dict.get('precondition')
if pre:
# pre functions
func_list = pre.get('prefunction')
for func in func_list:
_func = eval(func.get('func_name'))
_args = {_.get('name'): trans_type(_.get('value'), _.get('type')) for _ in func.get('args')}
_func(**_args)
# dependency
check_dependency(pre.get('dependency'))
def check_dependency(self):
pass # ToDo
def exec_steps(self, case_dict):
"""
"""
for step in case_dict.get('step'):
# input
_input = step.get('input')
res = {}
for protocol, _args in _input.iteritems():
req = get_conn(protocol)(**_args)
res = req.response
# compare output
_output = step.get('output')
if _output.get('strict'):
pass # ToDo
try:
for _ in _output.get('expect'):
_var = _.get('var')
_expect_value = trans_type(_['val']['value'], _['val']['type'])
_real_value = res.get(_var)
if _.get('cmp') == '==':
assert _expect_value == _real_value, "Not equal! \n\tExpect: {}\n\tGot: {}".format(
_expect_value, _real_value)
except AssertionError as e:
self._step_result.append(self.FAIL)
self._step_msg.append(e.message)
else:
self._step_result.append(self.PASS)
self._step_msg.append('Passed!')
self._passed = all(self._step_result)
def save_result(self):
"""
save result for this test
1) print to console
2) record to mysql
3) upload to testlink
"""
self.print_to_console()
def print_to_console(self):
if self._passed:
print('All steps passed for case: {}'.format(self._case_dict.get('name')))
else:
err('Failed on case: {}'.format(self._case_dict.get('name')))
step_length = range(1, len(self._step_result) + 1)
for i, result, msg in zip(step_length, self._step_result, self._step_msg):
if result == self.FAIL:
err('Step {} failed for reason:\n\t{}'.format(i, msg))
if __name__ == '__main__':
testcase = OneCase('/Users/eacon/github/APIAutoTestFramework/case/sample.json')
testcase.run()
|
flexible
|
{
"blob_id": "f658959bf7fa5e02a577119930c9b9c1ef59f432",
"index": 2845,
"step-1": "<mask token>\n\n\nclass OneCase(object):\n <mask token>\n <mask token>\n <mask token>\n\n def __init__(self, case_path, *args, **kwargs):\n self._case_path = str(case_path)\n self._case_dict = {}\n self._step_result = []\n self._step_msg = []\n self._passed = False\n\n def run(self):\n self.load_case(self._case_path)\n self.satisfy_precondition(self._case_dict)\n self.exec_steps(self._case_dict)\n self.save_result()\n\n def load_case(self, case_path):\n self._case_dict = Case(file_path=case_path).case_dict\n\n def satisfy_precondition(self, case_dict):\n pre = case_dict.get('precondition')\n if pre:\n func_list = pre.get('prefunction')\n for func in func_list:\n _func = eval(func.get('func_name'))\n _args = {_.get('name'): trans_type(_.get('value'), _.get(\n 'type')) for _ in func.get('args')}\n _func(**_args)\n check_dependency(pre.get('dependency'))\n <mask token>\n <mask token>\n\n def save_result(self):\n \"\"\"\n save result for this test\n 1) print to console\n 2) record to mysql\n 3) upload to testlink\n \"\"\"\n self.print_to_console()\n\n def print_to_console(self):\n if self._passed:\n print('All steps passed for case: {}'.format(self._case_dict.\n get('name')))\n else:\n err('Failed on case: {}'.format(self._case_dict.get('name')))\n step_length = range(1, len(self._step_result) + 1)\n for i, result, msg in zip(step_length, self._step_result, self.\n _step_msg):\n if result == self.FAIL:\n err('Step {} failed for reason:\\n\\t{}'.format(i, msg))\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass OneCase(object):\n <mask token>\n <mask token>\n <mask token>\n\n def __init__(self, case_path, *args, **kwargs):\n self._case_path = str(case_path)\n self._case_dict = {}\n self._step_result = []\n self._step_msg = []\n self._passed = False\n\n def run(self):\n self.load_case(self._case_path)\n self.satisfy_precondition(self._case_dict)\n self.exec_steps(self._case_dict)\n self.save_result()\n\n def load_case(self, case_path):\n self._case_dict = Case(file_path=case_path).case_dict\n\n def satisfy_precondition(self, case_dict):\n pre = case_dict.get('precondition')\n if pre:\n func_list = pre.get('prefunction')\n for func in func_list:\n _func = eval(func.get('func_name'))\n _args = {_.get('name'): trans_type(_.get('value'), _.get(\n 'type')) for _ in func.get('args')}\n _func(**_args)\n check_dependency(pre.get('dependency'))\n\n def check_dependency(self):\n pass\n\n def exec_steps(self, case_dict):\n \"\"\"\n \"\"\"\n for step in case_dict.get('step'):\n _input = step.get('input')\n res = {}\n for protocol, _args in _input.iteritems():\n req = get_conn(protocol)(**_args)\n res = req.response\n _output = step.get('output')\n if _output.get('strict'):\n pass\n try:\n for _ in _output.get('expect'):\n _var = _.get('var')\n _expect_value = trans_type(_['val']['value'], _['val'][\n 'type'])\n _real_value = res.get(_var)\n if _.get('cmp') == '==':\n assert _expect_value == _real_value, 'Not equal! \\n\\tExpect: {}\\n\\tGot: {}'.format(\n _expect_value, _real_value)\n except AssertionError as e:\n self._step_result.append(self.FAIL)\n self._step_msg.append(e.message)\n else:\n self._step_result.append(self.PASS)\n self._step_msg.append('Passed!')\n self._passed = all(self._step_result)\n\n def save_result(self):\n \"\"\"\n save result for this test\n 1) print to console\n 2) record to mysql\n 3) upload to testlink\n \"\"\"\n self.print_to_console()\n\n def print_to_console(self):\n if self._passed:\n print('All steps passed for case: {}'.format(self._case_dict.\n get('name')))\n else:\n err('Failed on case: {}'.format(self._case_dict.get('name')))\n step_length = range(1, len(self._step_result) + 1)\n for i, result, msg in zip(step_length, self._step_result, self.\n _step_msg):\n if result == self.FAIL:\n err('Step {} failed for reason:\\n\\t{}'.format(i, msg))\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass OneCase(object):\n <mask token>\n PASS = True\n FAIL = False\n\n def __init__(self, case_path, *args, **kwargs):\n self._case_path = str(case_path)\n self._case_dict = {}\n self._step_result = []\n self._step_msg = []\n self._passed = False\n\n def run(self):\n self.load_case(self._case_path)\n self.satisfy_precondition(self._case_dict)\n self.exec_steps(self._case_dict)\n self.save_result()\n\n def load_case(self, case_path):\n self._case_dict = Case(file_path=case_path).case_dict\n\n def satisfy_precondition(self, case_dict):\n pre = case_dict.get('precondition')\n if pre:\n func_list = pre.get('prefunction')\n for func in func_list:\n _func = eval(func.get('func_name'))\n _args = {_.get('name'): trans_type(_.get('value'), _.get(\n 'type')) for _ in func.get('args')}\n _func(**_args)\n check_dependency(pre.get('dependency'))\n\n def check_dependency(self):\n pass\n\n def exec_steps(self, case_dict):\n \"\"\"\n \"\"\"\n for step in case_dict.get('step'):\n _input = step.get('input')\n res = {}\n for protocol, _args in _input.iteritems():\n req = get_conn(protocol)(**_args)\n res = req.response\n _output = step.get('output')\n if _output.get('strict'):\n pass\n try:\n for _ in _output.get('expect'):\n _var = _.get('var')\n _expect_value = trans_type(_['val']['value'], _['val'][\n 'type'])\n _real_value = res.get(_var)\n if _.get('cmp') == '==':\n assert _expect_value == _real_value, 'Not equal! \\n\\tExpect: {}\\n\\tGot: {}'.format(\n _expect_value, _real_value)\n except AssertionError as e:\n self._step_result.append(self.FAIL)\n self._step_msg.append(e.message)\n else:\n self._step_result.append(self.PASS)\n self._step_msg.append('Passed!')\n self._passed = all(self._step_result)\n\n def save_result(self):\n \"\"\"\n save result for this test\n 1) print to console\n 2) record to mysql\n 3) upload to testlink\n \"\"\"\n self.print_to_console()\n\n def print_to_console(self):\n if self._passed:\n print('All steps passed for case: {}'.format(self._case_dict.\n get('name')))\n else:\n err('Failed on case: {}'.format(self._case_dict.get('name')))\n step_length = range(1, len(self._step_result) + 1)\n for i, result, msg in zip(step_length, self._step_result, self.\n _step_msg):\n if result == self.FAIL:\n err('Step {} failed for reason:\\n\\t{}'.format(i, msg))\n\n\n<mask token>\n",
"step-4": "from src.testcase.case import Case\nfrom src.utils import *\nfrom src.protocol.register import get_conn\nfrom src.precondition import *\n\n\nclass OneCase(object):\n \"\"\"\n Main flow of running one case's autotest\n \"\"\"\n PASS = True\n FAIL = False\n\n def __init__(self, case_path, *args, **kwargs):\n self._case_path = str(case_path)\n self._case_dict = {}\n self._step_result = []\n self._step_msg = []\n self._passed = False\n\n def run(self):\n self.load_case(self._case_path)\n self.satisfy_precondition(self._case_dict)\n self.exec_steps(self._case_dict)\n self.save_result()\n\n def load_case(self, case_path):\n self._case_dict = Case(file_path=case_path).case_dict\n\n def satisfy_precondition(self, case_dict):\n pre = case_dict.get('precondition')\n if pre:\n func_list = pre.get('prefunction')\n for func in func_list:\n _func = eval(func.get('func_name'))\n _args = {_.get('name'): trans_type(_.get('value'), _.get(\n 'type')) for _ in func.get('args')}\n _func(**_args)\n check_dependency(pre.get('dependency'))\n\n def check_dependency(self):\n pass\n\n def exec_steps(self, case_dict):\n \"\"\"\n \"\"\"\n for step in case_dict.get('step'):\n _input = step.get('input')\n res = {}\n for protocol, _args in _input.iteritems():\n req = get_conn(protocol)(**_args)\n res = req.response\n _output = step.get('output')\n if _output.get('strict'):\n pass\n try:\n for _ in _output.get('expect'):\n _var = _.get('var')\n _expect_value = trans_type(_['val']['value'], _['val'][\n 'type'])\n _real_value = res.get(_var)\n if _.get('cmp') == '==':\n assert _expect_value == _real_value, 'Not equal! \\n\\tExpect: {}\\n\\tGot: {}'.format(\n _expect_value, _real_value)\n except AssertionError as e:\n self._step_result.append(self.FAIL)\n self._step_msg.append(e.message)\n else:\n self._step_result.append(self.PASS)\n self._step_msg.append('Passed!')\n self._passed = all(self._step_result)\n\n def save_result(self):\n \"\"\"\n save result for this test\n 1) print to console\n 2) record to mysql\n 3) upload to testlink\n \"\"\"\n self.print_to_console()\n\n def print_to_console(self):\n if self._passed:\n print('All steps passed for case: {}'.format(self._case_dict.\n get('name')))\n else:\n err('Failed on case: {}'.format(self._case_dict.get('name')))\n step_length = range(1, len(self._step_result) + 1)\n for i, result, msg in zip(step_length, self._step_result, self.\n _step_msg):\n if result == self.FAIL:\n err('Step {} failed for reason:\\n\\t{}'.format(i, msg))\n\n\nif __name__ == '__main__':\n testcase = OneCase(\n '/Users/eacon/github/APIAutoTestFramework/case/sample.json')\n testcase.run()\n",
"step-5": "from src.testcase.case import Case\nfrom src.utils import *\nfrom src.protocol.register import get_conn\nfrom src.precondition import *\n\n\nclass OneCase(object):\n \"\"\"\n Main flow of running one case's autotest\n \"\"\"\n PASS = True\n FAIL = False\n\n def __init__(self, case_path, *args, **kwargs):\n self._case_path = str(case_path)\n self._case_dict = {}\n self._step_result = []\n self._step_msg = []\n self._passed = False\n\n def run(self):\n self.load_case(self._case_path)\n self.satisfy_precondition(self._case_dict)\n self.exec_steps(self._case_dict)\n self.save_result()\n\n def load_case(self, case_path):\n self._case_dict = Case(file_path=case_path).case_dict\n\n def satisfy_precondition(self, case_dict):\n pre = case_dict.get('precondition')\n if pre:\n # pre functions\n func_list = pre.get('prefunction')\n for func in func_list:\n _func = eval(func.get('func_name'))\n _args = {_.get('name'): trans_type(_.get('value'), _.get('type')) for _ in func.get('args')}\n _func(**_args)\n # dependency\n check_dependency(pre.get('dependency'))\n\n def check_dependency(self):\n pass # ToDo\n\n def exec_steps(self, case_dict):\n \"\"\"\n \"\"\"\n for step in case_dict.get('step'):\n # input\n _input = step.get('input')\n res = {}\n for protocol, _args in _input.iteritems():\n req = get_conn(protocol)(**_args)\n res = req.response\n # compare output\n _output = step.get('output')\n if _output.get('strict'):\n pass # ToDo\n try:\n for _ in _output.get('expect'):\n _var = _.get('var')\n _expect_value = trans_type(_['val']['value'], _['val']['type'])\n _real_value = res.get(_var)\n if _.get('cmp') == '==':\n assert _expect_value == _real_value, \"Not equal! \\n\\tExpect: {}\\n\\tGot: {}\".format(\n _expect_value, _real_value)\n except AssertionError as e:\n self._step_result.append(self.FAIL)\n self._step_msg.append(e.message)\n else:\n self._step_result.append(self.PASS)\n self._step_msg.append('Passed!')\n self._passed = all(self._step_result)\n\n def save_result(self):\n \"\"\"\n save result for this test\n 1) print to console\n 2) record to mysql\n 3) upload to testlink\n \"\"\"\n self.print_to_console()\n\n def print_to_console(self):\n if self._passed:\n print('All steps passed for case: {}'.format(self._case_dict.get('name')))\n else:\n err('Failed on case: {}'.format(self._case_dict.get('name')))\n step_length = range(1, len(self._step_result) + 1)\n for i, result, msg in zip(step_length, self._step_result, self._step_msg):\n if result == self.FAIL:\n err('Step {} failed for reason:\\n\\t{}'.format(i, msg))\n\n\nif __name__ == '__main__':\n testcase = OneCase('/Users/eacon/github/APIAutoTestFramework/case/sample.json')\n testcase.run()",
"step-ids": [
7,
9,
10,
13,
14
]
}
|
[
7,
9,
10,
13,
14
] |
<|reserved_special_token_0|>
class AuditMiddleware(object):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def process_response(self, request, response):
signals.audit_presave.disconnect(dispatch_uid=(settings.
DISPATCH_UID, request))
return response
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class AuditMiddleware(object):
<|reserved_special_token_0|>
def process_request(self, request, *args, **kwargs):
if not settings.CHANGE_LOGGING:
return
user = getattr(request, 'user', None)
if user and not user.is_authenticated():
user = None
update_kwargs = {}
if user and isinstance(user, get_user_model()):
update_kwargs['user'] = user
if request.META.get('REMOTE_ADDR'):
update_kwargs['remote_addr'] = request.META.get('REMOTE_ADDR')
if request.META.get('REMOTE_HOST'):
update_kwargs['remote_host'] = request.META.get('REMOTE_HOST')
request._handler_func = partial(self.pre_action_handler,
update_kwargs=update_kwargs)
signals.audit_presave.connect(request._handler_func, dispatch_uid=(
settings.DISPATCH_UID, request))
def process_response(self, request, response):
signals.audit_presave.disconnect(dispatch_uid=(settings.
DISPATCH_UID, request))
return response
def pre_action_handler(self, sender, model_instance, audit_meta,
update_kwargs=None, **kwargs):
if audit_meta and getattr(audit_meta, 'audit'
) and update_kwargs is not None:
audit_meta.update_additional_kwargs(update_kwargs)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class AuditMiddleware(object):
"""
middleware to add the user from requests to ModelChange objects.
This is independent of request logging and can be used separately.
"""
def process_request(self, request, *args, **kwargs):
if not settings.CHANGE_LOGGING:
return
user = getattr(request, 'user', None)
if user and not user.is_authenticated():
user = None
update_kwargs = {}
if user and isinstance(user, get_user_model()):
update_kwargs['user'] = user
if request.META.get('REMOTE_ADDR'):
update_kwargs['remote_addr'] = request.META.get('REMOTE_ADDR')
if request.META.get('REMOTE_HOST'):
update_kwargs['remote_host'] = request.META.get('REMOTE_HOST')
request._handler_func = partial(self.pre_action_handler,
update_kwargs=update_kwargs)
signals.audit_presave.connect(request._handler_func, dispatch_uid=(
settings.DISPATCH_UID, request))
def process_response(self, request, response):
signals.audit_presave.disconnect(dispatch_uid=(settings.
DISPATCH_UID, request))
return response
def pre_action_handler(self, sender, model_instance, audit_meta,
update_kwargs=None, **kwargs):
if audit_meta and getattr(audit_meta, 'audit'
) and update_kwargs is not None:
audit_meta.update_additional_kwargs(update_kwargs)
<|reserved_special_token_1|>
from __future__ import unicode_literals
from functools import partial
from django.contrib.auth import get_user_model
from .default_settings import settings
from . import signals
class AuditMiddleware(object):
"""
middleware to add the user from requests to ModelChange objects.
This is independent of request logging and can be used separately.
"""
def process_request(self, request, *args, **kwargs):
if not settings.CHANGE_LOGGING:
return
user = getattr(request, 'user', None)
if user and not user.is_authenticated():
user = None
update_kwargs = {}
if user and isinstance(user, get_user_model()):
update_kwargs['user'] = user
if request.META.get('REMOTE_ADDR'):
update_kwargs['remote_addr'] = request.META.get('REMOTE_ADDR')
if request.META.get('REMOTE_HOST'):
update_kwargs['remote_host'] = request.META.get('REMOTE_HOST')
request._handler_func = partial(self.pre_action_handler,
update_kwargs=update_kwargs)
signals.audit_presave.connect(request._handler_func, dispatch_uid=(
settings.DISPATCH_UID, request))
def process_response(self, request, response):
signals.audit_presave.disconnect(dispatch_uid=(settings.
DISPATCH_UID, request))
return response
def pre_action_handler(self, sender, model_instance, audit_meta,
update_kwargs=None, **kwargs):
if audit_meta and getattr(audit_meta, 'audit'
) and update_kwargs is not None:
audit_meta.update_additional_kwargs(update_kwargs)
<|reserved_special_token_1|>
from __future__ import unicode_literals
from functools import partial
from django.contrib.auth import get_user_model
from .default_settings import settings
from . import signals
class AuditMiddleware(object):
"""
middleware to add the user from requests to ModelChange objects.
This is independent of request logging and can be used separately.
"""
def process_request(self, request, *args, **kwargs):
if not settings.CHANGE_LOGGING:
return
user = getattr(request, 'user', None)
if user and not user.is_authenticated():
user = None
# build kwargs to pass to the signal handler
update_kwargs = {}
if user and isinstance(user, get_user_model()):
update_kwargs['user'] = user
if request.META.get('REMOTE_ADDR'):
update_kwargs['remote_addr'] = request.META.get('REMOTE_ADDR')
if request.META.get('REMOTE_HOST'):
update_kwargs['remote_host'] = request.META.get('REMOTE_HOST')
# keep the strong ref on the request, its a sane lifetime
request._handler_func = partial(self.pre_action_handler, update_kwargs=update_kwargs)
signals.audit_presave.connect(request._handler_func, dispatch_uid=(settings.DISPATCH_UID, request,),)
def process_response(self, request, response):
# disconnect signals for this request
# runs even if change logging is disabled in case it was disabled after the signal was created
signals.audit_presave.disconnect(dispatch_uid=(settings.DISPATCH_UID, request,))
return response
def pre_action_handler(self, sender, model_instance, audit_meta, update_kwargs=None, **kwargs):
if audit_meta and getattr(audit_meta, 'audit') and update_kwargs is not None:
audit_meta.update_additional_kwargs(update_kwargs)
|
flexible
|
{
"blob_id": "0e03a3b3401075384e580bc2bb8af1a106f1d238",
"index": 2141,
"step-1": "<mask token>\n\n\nclass AuditMiddleware(object):\n <mask token>\n <mask token>\n\n def process_response(self, request, response):\n signals.audit_presave.disconnect(dispatch_uid=(settings.\n DISPATCH_UID, request))\n return response\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass AuditMiddleware(object):\n <mask token>\n\n def process_request(self, request, *args, **kwargs):\n if not settings.CHANGE_LOGGING:\n return\n user = getattr(request, 'user', None)\n if user and not user.is_authenticated():\n user = None\n update_kwargs = {}\n if user and isinstance(user, get_user_model()):\n update_kwargs['user'] = user\n if request.META.get('REMOTE_ADDR'):\n update_kwargs['remote_addr'] = request.META.get('REMOTE_ADDR')\n if request.META.get('REMOTE_HOST'):\n update_kwargs['remote_host'] = request.META.get('REMOTE_HOST')\n request._handler_func = partial(self.pre_action_handler,\n update_kwargs=update_kwargs)\n signals.audit_presave.connect(request._handler_func, dispatch_uid=(\n settings.DISPATCH_UID, request))\n\n def process_response(self, request, response):\n signals.audit_presave.disconnect(dispatch_uid=(settings.\n DISPATCH_UID, request))\n return response\n\n def pre_action_handler(self, sender, model_instance, audit_meta,\n update_kwargs=None, **kwargs):\n if audit_meta and getattr(audit_meta, 'audit'\n ) and update_kwargs is not None:\n audit_meta.update_additional_kwargs(update_kwargs)\n",
"step-3": "<mask token>\n\n\nclass AuditMiddleware(object):\n \"\"\"\n middleware to add the user from requests to ModelChange objects.\n This is independent of request logging and can be used separately.\n \"\"\"\n\n def process_request(self, request, *args, **kwargs):\n if not settings.CHANGE_LOGGING:\n return\n user = getattr(request, 'user', None)\n if user and not user.is_authenticated():\n user = None\n update_kwargs = {}\n if user and isinstance(user, get_user_model()):\n update_kwargs['user'] = user\n if request.META.get('REMOTE_ADDR'):\n update_kwargs['remote_addr'] = request.META.get('REMOTE_ADDR')\n if request.META.get('REMOTE_HOST'):\n update_kwargs['remote_host'] = request.META.get('REMOTE_HOST')\n request._handler_func = partial(self.pre_action_handler,\n update_kwargs=update_kwargs)\n signals.audit_presave.connect(request._handler_func, dispatch_uid=(\n settings.DISPATCH_UID, request))\n\n def process_response(self, request, response):\n signals.audit_presave.disconnect(dispatch_uid=(settings.\n DISPATCH_UID, request))\n return response\n\n def pre_action_handler(self, sender, model_instance, audit_meta,\n update_kwargs=None, **kwargs):\n if audit_meta and getattr(audit_meta, 'audit'\n ) and update_kwargs is not None:\n audit_meta.update_additional_kwargs(update_kwargs)\n",
"step-4": "from __future__ import unicode_literals\nfrom functools import partial\nfrom django.contrib.auth import get_user_model\nfrom .default_settings import settings\nfrom . import signals\n\n\nclass AuditMiddleware(object):\n \"\"\"\n middleware to add the user from requests to ModelChange objects.\n This is independent of request logging and can be used separately.\n \"\"\"\n\n def process_request(self, request, *args, **kwargs):\n if not settings.CHANGE_LOGGING:\n return\n user = getattr(request, 'user', None)\n if user and not user.is_authenticated():\n user = None\n update_kwargs = {}\n if user and isinstance(user, get_user_model()):\n update_kwargs['user'] = user\n if request.META.get('REMOTE_ADDR'):\n update_kwargs['remote_addr'] = request.META.get('REMOTE_ADDR')\n if request.META.get('REMOTE_HOST'):\n update_kwargs['remote_host'] = request.META.get('REMOTE_HOST')\n request._handler_func = partial(self.pre_action_handler,\n update_kwargs=update_kwargs)\n signals.audit_presave.connect(request._handler_func, dispatch_uid=(\n settings.DISPATCH_UID, request))\n\n def process_response(self, request, response):\n signals.audit_presave.disconnect(dispatch_uid=(settings.\n DISPATCH_UID, request))\n return response\n\n def pre_action_handler(self, sender, model_instance, audit_meta,\n update_kwargs=None, **kwargs):\n if audit_meta and getattr(audit_meta, 'audit'\n ) and update_kwargs is not None:\n audit_meta.update_additional_kwargs(update_kwargs)\n",
"step-5": "from __future__ import unicode_literals\n\nfrom functools import partial\nfrom django.contrib.auth import get_user_model\n\nfrom .default_settings import settings\nfrom . import signals\n\n\nclass AuditMiddleware(object):\n \"\"\"\n middleware to add the user from requests to ModelChange objects.\n This is independent of request logging and can be used separately.\n \"\"\"\n\n def process_request(self, request, *args, **kwargs):\n if not settings.CHANGE_LOGGING:\n return\n\n user = getattr(request, 'user', None)\n\n if user and not user.is_authenticated():\n user = None\n\n # build kwargs to pass to the signal handler\n update_kwargs = {}\n if user and isinstance(user, get_user_model()):\n update_kwargs['user'] = user\n if request.META.get('REMOTE_ADDR'):\n update_kwargs['remote_addr'] = request.META.get('REMOTE_ADDR')\n if request.META.get('REMOTE_HOST'):\n update_kwargs['remote_host'] = request.META.get('REMOTE_HOST')\n\n # keep the strong ref on the request, its a sane lifetime\n request._handler_func = partial(self.pre_action_handler, update_kwargs=update_kwargs)\n\n signals.audit_presave.connect(request._handler_func, dispatch_uid=(settings.DISPATCH_UID, request,),)\n\n def process_response(self, request, response):\n # disconnect signals for this request\n # runs even if change logging is disabled in case it was disabled after the signal was created\n signals.audit_presave.disconnect(dispatch_uid=(settings.DISPATCH_UID, request,))\n\n return response\n\n def pre_action_handler(self, sender, model_instance, audit_meta, update_kwargs=None, **kwargs):\n if audit_meta and getattr(audit_meta, 'audit') and update_kwargs is not None:\n audit_meta.update_additional_kwargs(update_kwargs)\n",
"step-ids": [
2,
4,
5,
6,
7
]
}
|
[
2,
4,
5,
6,
7
] |
import datetime
import traceback
import sys
import os
def getErrorReport():
errorReport = ErrorReport()
return errorReport
class ErrorReport():
def __init__(self):
return
def startLog(self):
timestamp = str(datetime.datetime.now())
fileName = 'Log_'+timestamp+'.txt.'
self.logFile = open(fileName,'w')
def endLog(self):
self.logFile.close()
def writeError(self):
traceback.print_exc(file=self.logFile)
self.logFile.write('\n')
self.logFile.flush()
os.fsync(self.logFile)
def writeMessage(self, message=''):
self.logFile.write(message)
self.logFile.write('\n\n')
self.logFile.flush()
os.fsync(self.logFile)
|
normal
|
{
"blob_id": "6abc8b97117257e16da1f7b730b09ee0f7bd4c6e",
"index": 4715,
"step-1": "<mask token>\n\n\nclass ErrorReport:\n <mask token>\n\n def startLog(self):\n timestamp = str(datetime.datetime.now())\n fileName = 'Log_' + timestamp + '.txt.'\n self.logFile = open(fileName, 'w')\n\n def endLog(self):\n self.logFile.close()\n\n def writeError(self):\n traceback.print_exc(file=self.logFile)\n self.logFile.write('\\n')\n self.logFile.flush()\n os.fsync(self.logFile)\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass ErrorReport:\n\n def __init__(self):\n return\n\n def startLog(self):\n timestamp = str(datetime.datetime.now())\n fileName = 'Log_' + timestamp + '.txt.'\n self.logFile = open(fileName, 'w')\n\n def endLog(self):\n self.logFile.close()\n\n def writeError(self):\n traceback.print_exc(file=self.logFile)\n self.logFile.write('\\n')\n self.logFile.flush()\n os.fsync(self.logFile)\n\n def writeMessage(self, message=''):\n self.logFile.write(message)\n self.logFile.write('\\n\\n')\n self.logFile.flush()\n os.fsync(self.logFile)\n",
"step-3": "<mask token>\n\n\ndef getErrorReport():\n errorReport = ErrorReport()\n return errorReport\n\n\nclass ErrorReport:\n\n def __init__(self):\n return\n\n def startLog(self):\n timestamp = str(datetime.datetime.now())\n fileName = 'Log_' + timestamp + '.txt.'\n self.logFile = open(fileName, 'w')\n\n def endLog(self):\n self.logFile.close()\n\n def writeError(self):\n traceback.print_exc(file=self.logFile)\n self.logFile.write('\\n')\n self.logFile.flush()\n os.fsync(self.logFile)\n\n def writeMessage(self, message=''):\n self.logFile.write(message)\n self.logFile.write('\\n\\n')\n self.logFile.flush()\n os.fsync(self.logFile)\n",
"step-4": "import datetime\nimport traceback\nimport sys\nimport os\n\n\ndef getErrorReport():\n errorReport = ErrorReport()\n return errorReport\n\n\nclass ErrorReport:\n\n def __init__(self):\n return\n\n def startLog(self):\n timestamp = str(datetime.datetime.now())\n fileName = 'Log_' + timestamp + '.txt.'\n self.logFile = open(fileName, 'w')\n\n def endLog(self):\n self.logFile.close()\n\n def writeError(self):\n traceback.print_exc(file=self.logFile)\n self.logFile.write('\\n')\n self.logFile.flush()\n os.fsync(self.logFile)\n\n def writeMessage(self, message=''):\n self.logFile.write(message)\n self.logFile.write('\\n\\n')\n self.logFile.flush()\n os.fsync(self.logFile)\n",
"step-5": "import datetime\nimport traceback\nimport sys\nimport os\n\n\ndef getErrorReport():\n errorReport = ErrorReport()\n return errorReport\n\n\nclass ErrorReport(): \n\n def __init__(self):\n return\n\n def startLog(self):\n timestamp = str(datetime.datetime.now())\n fileName = 'Log_'+timestamp+'.txt.'\n self.logFile = open(fileName,'w')\n\n def endLog(self):\n self.logFile.close()\n\n def writeError(self):\n traceback.print_exc(file=self.logFile)\n self.logFile.write('\\n')\n self.logFile.flush()\n os.fsync(self.logFile)\n\n def writeMessage(self, message=''):\n self.logFile.write(message)\n self.logFile.write('\\n\\n')\n self.logFile.flush()\n os.fsync(self.logFile)\n",
"step-ids": [
4,
6,
7,
8,
9
]
}
|
[
4,
6,
7,
8,
9
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
for i in range(N):
a, b, c, d = map(int, input().split())
A.append(a)
B.append(b)
C.append(c)
D.append(d)
<|reserved_special_token_0|>
for i in range(N):
for j in range(N):
AB.append(A[i] + B[j])
CD.append(C[i] + D[j])
AB.sort()
CD.sort()
<|reserved_special_token_0|>
while left < len(AB) and right >= 0:
total = AB[left] + CD[right]
if total == 0:
left_count, right_count = 1, 1
left_tmp = left
left += 1
while left < len(AB) and AB[left] + CD[right] == 0:
left_count += 1
left += 1
right -= 1
while right >= 0 and AB[left_tmp] + CD[right] == 0:
right_count += 1
right -= 1
answer += left_count * right_count
elif total > 0:
right -= 1
else:
left += 1
print(answer)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
input = sys.stdin.readline
N = int(input())
A, B, C, D = [], [], [], []
for i in range(N):
a, b, c, d = map(int, input().split())
A.append(a)
B.append(b)
C.append(c)
D.append(d)
AB = []
CD = []
for i in range(N):
for j in range(N):
AB.append(A[i] + B[j])
CD.append(C[i] + D[j])
AB.sort()
CD.sort()
answer = 0
left, right = 0, len(CD) - 1
while left < len(AB) and right >= 0:
total = AB[left] + CD[right]
if total == 0:
left_count, right_count = 1, 1
left_tmp = left
left += 1
while left < len(AB) and AB[left] + CD[right] == 0:
left_count += 1
left += 1
right -= 1
while right >= 0 and AB[left_tmp] + CD[right] == 0:
right_count += 1
right -= 1
answer += left_count * right_count
elif total > 0:
right -= 1
else:
left += 1
print(answer)
<|reserved_special_token_1|>
import sys
input = sys.stdin.readline
N = int(input())
A, B, C, D = [], [], [], []
for i in range(N):
a, b, c, d = map(int, input().split())
A.append(a)
B.append(b)
C.append(c)
D.append(d)
AB = []
CD = []
for i in range(N):
for j in range(N):
AB.append(A[i] + B[j])
CD.append(C[i] + D[j])
AB.sort()
CD.sort()
answer = 0
left, right = 0, len(CD) - 1
while left < len(AB) and right >= 0:
total = AB[left] + CD[right]
if total == 0:
left_count, right_count = 1, 1
left_tmp = left
left += 1
while left < len(AB) and AB[left] + CD[right] == 0:
left_count += 1
left += 1
right -= 1
while right >= 0 and AB[left_tmp] + CD[right] == 0:
right_count += 1
right -= 1
answer += left_count * right_count
elif total > 0:
right -= 1
else:
left += 1
print(answer)
<|reserved_special_token_1|>
import sys
input = sys.stdin.readline
N = int(input())
A, B, C, D = [], [], [], []
for i in range(N):
a, b, c, d = map(int, input().split())
A.append(a)
B.append(b)
C.append(c)
D.append(d)
AB = []
CD = []
for i in range(N):
for j in range(N):
AB.append(A[i] + B[j])
CD.append(C[i] + D[j])
AB.sort()
CD.sort()
answer = 0
left, right = 0, len(CD) - 1
while left < len(AB) and right >= 0:
total = AB[left] + CD[right]
if total == 0:
left_count, right_count = 1, 1
left_tmp = left
left += 1
while left < len(AB) and AB[left] + CD[right] == 0:
left_count += 1
left += 1
right -= 1
while right >= 0 and AB[left_tmp] + CD[right] == 0:
right_count += 1
right -= 1
answer += (left_count * right_count)
elif total > 0:
right -= 1
else:
left += 1
print(answer)
|
flexible
|
{
"blob_id": "2a9426653146603d9aa79a59ce181d97aa3c551c",
"index": 8525,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor i in range(N):\n a, b, c, d = map(int, input().split())\n A.append(a)\n B.append(b)\n C.append(c)\n D.append(d)\n<mask token>\nfor i in range(N):\n for j in range(N):\n AB.append(A[i] + B[j])\n CD.append(C[i] + D[j])\nAB.sort()\nCD.sort()\n<mask token>\nwhile left < len(AB) and right >= 0:\n total = AB[left] + CD[right]\n if total == 0:\n left_count, right_count = 1, 1\n left_tmp = left\n left += 1\n while left < len(AB) and AB[left] + CD[right] == 0:\n left_count += 1\n left += 1\n right -= 1\n while right >= 0 and AB[left_tmp] + CD[right] == 0:\n right_count += 1\n right -= 1\n answer += left_count * right_count\n elif total > 0:\n right -= 1\n else:\n left += 1\nprint(answer)\n",
"step-3": "<mask token>\ninput = sys.stdin.readline\nN = int(input())\nA, B, C, D = [], [], [], []\nfor i in range(N):\n a, b, c, d = map(int, input().split())\n A.append(a)\n B.append(b)\n C.append(c)\n D.append(d)\nAB = []\nCD = []\nfor i in range(N):\n for j in range(N):\n AB.append(A[i] + B[j])\n CD.append(C[i] + D[j])\nAB.sort()\nCD.sort()\nanswer = 0\nleft, right = 0, len(CD) - 1\nwhile left < len(AB) and right >= 0:\n total = AB[left] + CD[right]\n if total == 0:\n left_count, right_count = 1, 1\n left_tmp = left\n left += 1\n while left < len(AB) and AB[left] + CD[right] == 0:\n left_count += 1\n left += 1\n right -= 1\n while right >= 0 and AB[left_tmp] + CD[right] == 0:\n right_count += 1\n right -= 1\n answer += left_count * right_count\n elif total > 0:\n right -= 1\n else:\n left += 1\nprint(answer)\n",
"step-4": "import sys\ninput = sys.stdin.readline\nN = int(input())\nA, B, C, D = [], [], [], []\nfor i in range(N):\n a, b, c, d = map(int, input().split())\n A.append(a)\n B.append(b)\n C.append(c)\n D.append(d)\nAB = []\nCD = []\nfor i in range(N):\n for j in range(N):\n AB.append(A[i] + B[j])\n CD.append(C[i] + D[j])\nAB.sort()\nCD.sort()\nanswer = 0\nleft, right = 0, len(CD) - 1\nwhile left < len(AB) and right >= 0:\n total = AB[left] + CD[right]\n if total == 0:\n left_count, right_count = 1, 1\n left_tmp = left\n left += 1\n while left < len(AB) and AB[left] + CD[right] == 0:\n left_count += 1\n left += 1\n right -= 1\n while right >= 0 and AB[left_tmp] + CD[right] == 0:\n right_count += 1\n right -= 1\n answer += left_count * right_count\n elif total > 0:\n right -= 1\n else:\n left += 1\nprint(answer)\n",
"step-5": "import sys\ninput = sys.stdin.readline\n\nN = int(input())\nA, B, C, D = [], [], [], []\nfor i in range(N):\n a, b, c, d = map(int, input().split())\n A.append(a)\n B.append(b)\n C.append(c)\n D.append(d)\n\nAB = []\nCD = []\nfor i in range(N):\n for j in range(N):\n AB.append(A[i] + B[j])\n CD.append(C[i] + D[j])\n\nAB.sort()\nCD.sort()\n\nanswer = 0\nleft, right = 0, len(CD) - 1\nwhile left < len(AB) and right >= 0:\n total = AB[left] + CD[right]\n\n if total == 0: \n left_count, right_count = 1, 1\n left_tmp = left\n\n left += 1\n while left < len(AB) and AB[left] + CD[right] == 0:\n left_count += 1\n left += 1\n\n right -= 1\n while right >= 0 and AB[left_tmp] + CD[right] == 0:\n right_count += 1\n right -= 1\n \n answer += (left_count * right_count)\n\n elif total > 0:\n right -= 1\n else:\n left += 1\n\nprint(answer)",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
def compile_data(city_labels=None):
"""Compile all data into common structure
"""
pass
def compile_and_save_data(f_out, city_labels=None):
"""Compile and save all data
"""
pass
<|reserved_special_token_1|>
<|reserved_special_token_0|>
bikerawdata.add_system(city_label='taipei', bikesharesystem_data=taipei_system)
bikerawdata.add_parser(city_label='taipei', parser_func=parse_taipei_file)
bikerawdata.add_system(city_label='helsinki', bikesharesystem_data=
helsinki_system)
bikerawdata.add_parser(city_label='helsinki', parser_func=parse_helsiki_file)
bikerawdata.add_system(city_label='london', bikesharesystem_data=london_system)
bikerawdata.add_parser(city_label='london', parser_func=parse_london_file)
bikerawdata.add_system(city_label='toronto', bikesharesystem_data=
toronto_system)
bikerawdata.add_parser(city_label='toronto', parser_func=parse_toronto_file)
def compile_data(city_labels=None):
"""Compile all data into common structure
"""
pass
def compile_and_save_data(f_out, city_labels=None):
"""Compile and save all data
"""
pass
<|reserved_special_token_1|>
<|reserved_special_token_0|>
bikerawdata = BikeRawData()
bikerawdata.add_system(city_label='taipei', bikesharesystem_data=taipei_system)
bikerawdata.add_parser(city_label='taipei', parser_func=parse_taipei_file)
bikerawdata.add_system(city_label='helsinki', bikesharesystem_data=
helsinki_system)
bikerawdata.add_parser(city_label='helsinki', parser_func=parse_helsiki_file)
bikerawdata.add_system(city_label='london', bikesharesystem_data=london_system)
bikerawdata.add_parser(city_label='london', parser_func=parse_london_file)
bikerawdata.add_system(city_label='toronto', bikesharesystem_data=
toronto_system)
bikerawdata.add_parser(city_label='toronto', parser_func=parse_toronto_file)
def compile_data(city_labels=None):
"""Compile all data into common structure
"""
pass
def compile_and_save_data(f_out, city_labels=None):
"""Compile and save all data
"""
pass
<|reserved_special_token_1|>
<|reserved_special_token_0|>
from dataset_creators.cities import parse_taipei_file, taipei_system, parse_london_file, london_system, parse_helsiki_file, helsinki_system, parse_toronto_file, toronto_system
from ._bikerawdata import BikeRawData
bikerawdata = BikeRawData()
bikerawdata.add_system(city_label='taipei', bikesharesystem_data=taipei_system)
bikerawdata.add_parser(city_label='taipei', parser_func=parse_taipei_file)
bikerawdata.add_system(city_label='helsinki', bikesharesystem_data=
helsinki_system)
bikerawdata.add_parser(city_label='helsinki', parser_func=parse_helsiki_file)
bikerawdata.add_system(city_label='london', bikesharesystem_data=london_system)
bikerawdata.add_parser(city_label='london', parser_func=parse_london_file)
bikerawdata.add_system(city_label='toronto', bikesharesystem_data=
toronto_system)
bikerawdata.add_parser(city_label='toronto', parser_func=parse_toronto_file)
def compile_data(city_labels=None):
"""Compile all data into common structure
"""
pass
def compile_and_save_data(f_out, city_labels=None):
"""Compile and save all data
"""
pass
<|reserved_special_token_1|>
'''Instantiate data parsers for all cities.
If additional city parsers are added, the `bikerrawdata` instance in this file should be updated.
Written by: Anders Ohrn 2020
'''
from dataset_creators.cities import parse_taipei_file, taipei_system, \
parse_london_file, london_system, \
parse_helsiki_file, helsinki_system, \
parse_toronto_file, toronto_system
from ._bikerawdata import BikeRawData
bikerawdata = BikeRawData()
bikerawdata.add_system(city_label='taipei', bikesharesystem_data=taipei_system)
bikerawdata.add_parser(city_label='taipei', parser_func=parse_taipei_file)
bikerawdata.add_system(city_label='helsinki', bikesharesystem_data=helsinki_system)
bikerawdata.add_parser(city_label='helsinki', parser_func=parse_helsiki_file)
bikerawdata.add_system(city_label='london', bikesharesystem_data=london_system)
bikerawdata.add_parser(city_label='london', parser_func=parse_london_file)
bikerawdata.add_system(city_label='toronto', bikesharesystem_data=toronto_system)
bikerawdata.add_parser(city_label='toronto', parser_func=parse_toronto_file)
def compile_data(city_labels=None):
'''Compile all data into common structure
'''
pass
def compile_and_save_data(f_out, city_labels=None):
'''Compile and save all data
'''
pass
|
flexible
|
{
"blob_id": "53bf97d66d0b26c6b5639acd0261604082474e7b",
"index": 8163,
"step-1": "<mask token>\n\n\ndef compile_data(city_labels=None):\n \"\"\"Compile all data into common structure\n\n \"\"\"\n pass\n\n\ndef compile_and_save_data(f_out, city_labels=None):\n \"\"\"Compile and save all data\n\n \"\"\"\n pass\n",
"step-2": "<mask token>\nbikerawdata.add_system(city_label='taipei', bikesharesystem_data=taipei_system)\nbikerawdata.add_parser(city_label='taipei', parser_func=parse_taipei_file)\nbikerawdata.add_system(city_label='helsinki', bikesharesystem_data=\n helsinki_system)\nbikerawdata.add_parser(city_label='helsinki', parser_func=parse_helsiki_file)\nbikerawdata.add_system(city_label='london', bikesharesystem_data=london_system)\nbikerawdata.add_parser(city_label='london', parser_func=parse_london_file)\nbikerawdata.add_system(city_label='toronto', bikesharesystem_data=\n toronto_system)\nbikerawdata.add_parser(city_label='toronto', parser_func=parse_toronto_file)\n\n\ndef compile_data(city_labels=None):\n \"\"\"Compile all data into common structure\n\n \"\"\"\n pass\n\n\ndef compile_and_save_data(f_out, city_labels=None):\n \"\"\"Compile and save all data\n\n \"\"\"\n pass\n",
"step-3": "<mask token>\nbikerawdata = BikeRawData()\nbikerawdata.add_system(city_label='taipei', bikesharesystem_data=taipei_system)\nbikerawdata.add_parser(city_label='taipei', parser_func=parse_taipei_file)\nbikerawdata.add_system(city_label='helsinki', bikesharesystem_data=\n helsinki_system)\nbikerawdata.add_parser(city_label='helsinki', parser_func=parse_helsiki_file)\nbikerawdata.add_system(city_label='london', bikesharesystem_data=london_system)\nbikerawdata.add_parser(city_label='london', parser_func=parse_london_file)\nbikerawdata.add_system(city_label='toronto', bikesharesystem_data=\n toronto_system)\nbikerawdata.add_parser(city_label='toronto', parser_func=parse_toronto_file)\n\n\ndef compile_data(city_labels=None):\n \"\"\"Compile all data into common structure\n\n \"\"\"\n pass\n\n\ndef compile_and_save_data(f_out, city_labels=None):\n \"\"\"Compile and save all data\n\n \"\"\"\n pass\n",
"step-4": "<mask token>\nfrom dataset_creators.cities import parse_taipei_file, taipei_system, parse_london_file, london_system, parse_helsiki_file, helsinki_system, parse_toronto_file, toronto_system\nfrom ._bikerawdata import BikeRawData\nbikerawdata = BikeRawData()\nbikerawdata.add_system(city_label='taipei', bikesharesystem_data=taipei_system)\nbikerawdata.add_parser(city_label='taipei', parser_func=parse_taipei_file)\nbikerawdata.add_system(city_label='helsinki', bikesharesystem_data=\n helsinki_system)\nbikerawdata.add_parser(city_label='helsinki', parser_func=parse_helsiki_file)\nbikerawdata.add_system(city_label='london', bikesharesystem_data=london_system)\nbikerawdata.add_parser(city_label='london', parser_func=parse_london_file)\nbikerawdata.add_system(city_label='toronto', bikesharesystem_data=\n toronto_system)\nbikerawdata.add_parser(city_label='toronto', parser_func=parse_toronto_file)\n\n\ndef compile_data(city_labels=None):\n \"\"\"Compile all data into common structure\n\n \"\"\"\n pass\n\n\ndef compile_and_save_data(f_out, city_labels=None):\n \"\"\"Compile and save all data\n\n \"\"\"\n pass\n",
"step-5": "'''Instantiate data parsers for all cities.\n\nIf additional city parsers are added, the `bikerrawdata` instance in this file should be updated.\n\nWritten by: Anders Ohrn 2020\n\n'''\nfrom dataset_creators.cities import parse_taipei_file, taipei_system, \\\n parse_london_file, london_system, \\\n parse_helsiki_file, helsinki_system, \\\n parse_toronto_file, toronto_system\n\nfrom ._bikerawdata import BikeRawData\n\nbikerawdata = BikeRawData()\nbikerawdata.add_system(city_label='taipei', bikesharesystem_data=taipei_system)\nbikerawdata.add_parser(city_label='taipei', parser_func=parse_taipei_file)\nbikerawdata.add_system(city_label='helsinki', bikesharesystem_data=helsinki_system)\nbikerawdata.add_parser(city_label='helsinki', parser_func=parse_helsiki_file)\nbikerawdata.add_system(city_label='london', bikesharesystem_data=london_system)\nbikerawdata.add_parser(city_label='london', parser_func=parse_london_file)\nbikerawdata.add_system(city_label='toronto', bikesharesystem_data=toronto_system)\nbikerawdata.add_parser(city_label='toronto', parser_func=parse_toronto_file)\n\ndef compile_data(city_labels=None):\n '''Compile all data into common structure\n\n '''\n pass\n\ndef compile_and_save_data(f_out, city_labels=None):\n '''Compile and save all data\n\n '''\n pass",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
<|reserved_special_token_0|>
class ClientInline(admin.StackedInline):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
class ClientAdmin(admin.ModelAdmin):
inlines = [ClientInline]
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class ClientInline(admin.StackedInline):
model = Adress
can_delete = False
extra = 1
class ClientAdmin(admin.ModelAdmin):
inlines = [ClientInline]
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class ClientInline(admin.StackedInline):
model = Adress
can_delete = False
extra = 1
class ClientAdmin(admin.ModelAdmin):
inlines = [ClientInline]
admin.site.register(Client, ClientAdmin)
<|reserved_special_token_1|>
from django.contrib import admin
from .models import Client, Adress
class ClientInline(admin.StackedInline):
model = Adress
can_delete = False
extra = 1
class ClientAdmin(admin.ModelAdmin):
inlines = [ClientInline]
admin.site.register(Client, ClientAdmin)
<|reserved_special_token_1|>
from django.contrib import admin
from .models import Client, Adress
# Register your models here.
class ClientInline(admin.StackedInline):
model = Adress
can_delete = False
extra = 1
class ClientAdmin(admin.ModelAdmin):
inlines = [ClientInline]
admin.site.register(Client, ClientAdmin)
|
flexible
|
{
"blob_id": "ffd7aef2e72e64ac5b9f85b9d12845479187d89b",
"index": 2010,
"step-1": "<mask token>\n\n\nclass ClientInline(admin.StackedInline):\n <mask token>\n <mask token>\n <mask token>\n\n\nclass ClientAdmin(admin.ModelAdmin):\n inlines = [ClientInline]\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass ClientInline(admin.StackedInline):\n model = Adress\n can_delete = False\n extra = 1\n\n\nclass ClientAdmin(admin.ModelAdmin):\n inlines = [ClientInline]\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass ClientInline(admin.StackedInline):\n model = Adress\n can_delete = False\n extra = 1\n\n\nclass ClientAdmin(admin.ModelAdmin):\n inlines = [ClientInline]\n\n\nadmin.site.register(Client, ClientAdmin)\n",
"step-4": "from django.contrib import admin\nfrom .models import Client, Adress\n\n\nclass ClientInline(admin.StackedInline):\n model = Adress\n can_delete = False\n extra = 1\n\n\nclass ClientAdmin(admin.ModelAdmin):\n inlines = [ClientInline]\n\n\nadmin.site.register(Client, ClientAdmin)\n",
"step-5": "from django.contrib import admin\nfrom .models import Client, Adress\n\n# Register your models here.\n\nclass ClientInline(admin.StackedInline):\n model = Adress\n can_delete = False\n extra = 1\n\nclass ClientAdmin(admin.ModelAdmin):\n inlines = [ClientInline]\n\n\nadmin.site.register(Client, ClientAdmin)",
"step-ids": [
3,
4,
5,
6,
7
]
}
|
[
3,
4,
5,
6,
7
] |
# Generated by Django 2.1.5 on 2019-03-12 18:07
from django.db import migrations
def associate_experiments_to_organisms(apps, schema_editor):
"""Creates missing associations between experiments and organisms.
Based off of:
https://simpleisbetterthancomplex.com/tutorial/2017/09/26/how-to-create-django-data-migrations.html
We can't import the Experiment model directly as it may be a newer
version than this migration expects. We use the historical version.
"""
# I don't think this is truly necessary in this particular
# migration, but it seems to be a best practice for Django
# migrations and a lil extra safety never hurts.
Experiment = apps.get_model("data_refinery_common", "Experiment")
ExperimentOrganismAssociation = apps.get_model(
"data_refinery_common", "ExperimentOrganismAssociation"
)
for experiment in Experiment.objects.all():
organisms = experiment.organisms.all()
samples = experiment.samples.distinct("organism").exclude(
organism_id__in=organisms.values("id")
)
for sample in samples:
ExperimentOrganismAssociation.objects.get_or_create(
experiment=experiment, organism=sample.organism
)
# This is the same as experiment.update_organism_names but we
# can't use that method because of the apps.get_model
# weirdness. It seems to be this issue:
# https://stackoverflow.com/questions/44907306/django-unavailable-field-of-model-while-doing-migration
# The method is simple enough that I'd rather duplicate it
# than disregard the warning about newer versions.
experiment.organism_names = list(
set([organism.name for organism in experiment.organisms.all()])
)
experiment.save()
class Migration(migrations.Migration):
dependencies = [
("data_refinery_common", "0015_dataset_email_ccdl_ok"),
]
operations = [
migrations.RunPython(associate_experiments_to_organisms),
]
|
normal
|
{
"blob_id": "b4b2307897f64bb30cad2fbaaa1b320ae2aa7456",
"index": 8553,
"step-1": "<mask token>\n\n\nclass Migration(migrations.Migration):\n <mask token>\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass Migration(migrations.Migration):\n dependencies = [('data_refinery_common', '0015_dataset_email_ccdl_ok')]\n operations = [migrations.RunPython(associate_experiments_to_organisms)]\n",
"step-3": "<mask token>\n\n\ndef associate_experiments_to_organisms(apps, schema_editor):\n \"\"\"Creates missing associations between experiments and organisms.\n\n Based off of:\n https://simpleisbetterthancomplex.com/tutorial/2017/09/26/how-to-create-django-data-migrations.html\n We can't import the Experiment model directly as it may be a newer\n version than this migration expects. We use the historical version.\n \"\"\"\n Experiment = apps.get_model('data_refinery_common', 'Experiment')\n ExperimentOrganismAssociation = apps.get_model('data_refinery_common',\n 'ExperimentOrganismAssociation')\n for experiment in Experiment.objects.all():\n organisms = experiment.organisms.all()\n samples = experiment.samples.distinct('organism').exclude(\n organism_id__in=organisms.values('id'))\n for sample in samples:\n ExperimentOrganismAssociation.objects.get_or_create(experiment=\n experiment, organism=sample.organism)\n experiment.organism_names = list(set([organism.name for organism in\n experiment.organisms.all()]))\n experiment.save()\n\n\nclass Migration(migrations.Migration):\n dependencies = [('data_refinery_common', '0015_dataset_email_ccdl_ok')]\n operations = [migrations.RunPython(associate_experiments_to_organisms)]\n",
"step-4": "from django.db import migrations\n\n\ndef associate_experiments_to_organisms(apps, schema_editor):\n \"\"\"Creates missing associations between experiments and organisms.\n\n Based off of:\n https://simpleisbetterthancomplex.com/tutorial/2017/09/26/how-to-create-django-data-migrations.html\n We can't import the Experiment model directly as it may be a newer\n version than this migration expects. We use the historical version.\n \"\"\"\n Experiment = apps.get_model('data_refinery_common', 'Experiment')\n ExperimentOrganismAssociation = apps.get_model('data_refinery_common',\n 'ExperimentOrganismAssociation')\n for experiment in Experiment.objects.all():\n organisms = experiment.organisms.all()\n samples = experiment.samples.distinct('organism').exclude(\n organism_id__in=organisms.values('id'))\n for sample in samples:\n ExperimentOrganismAssociation.objects.get_or_create(experiment=\n experiment, organism=sample.organism)\n experiment.organism_names = list(set([organism.name for organism in\n experiment.organisms.all()]))\n experiment.save()\n\n\nclass Migration(migrations.Migration):\n dependencies = [('data_refinery_common', '0015_dataset_email_ccdl_ok')]\n operations = [migrations.RunPython(associate_experiments_to_organisms)]\n",
"step-5": "# Generated by Django 2.1.5 on 2019-03-12 18:07\n\nfrom django.db import migrations\n\n\ndef associate_experiments_to_organisms(apps, schema_editor):\n \"\"\"Creates missing associations between experiments and organisms.\n\n Based off of:\n https://simpleisbetterthancomplex.com/tutorial/2017/09/26/how-to-create-django-data-migrations.html\n We can't import the Experiment model directly as it may be a newer\n version than this migration expects. We use the historical version.\n \"\"\"\n # I don't think this is truly necessary in this particular\n # migration, but it seems to be a best practice for Django\n # migrations and a lil extra safety never hurts.\n Experiment = apps.get_model(\"data_refinery_common\", \"Experiment\")\n ExperimentOrganismAssociation = apps.get_model(\n \"data_refinery_common\", \"ExperimentOrganismAssociation\"\n )\n\n for experiment in Experiment.objects.all():\n organisms = experiment.organisms.all()\n samples = experiment.samples.distinct(\"organism\").exclude(\n organism_id__in=organisms.values(\"id\")\n )\n\n for sample in samples:\n ExperimentOrganismAssociation.objects.get_or_create(\n experiment=experiment, organism=sample.organism\n )\n\n # This is the same as experiment.update_organism_names but we\n # can't use that method because of the apps.get_model\n # weirdness. It seems to be this issue:\n # https://stackoverflow.com/questions/44907306/django-unavailable-field-of-model-while-doing-migration\n # The method is simple enough that I'd rather duplicate it\n # than disregard the warning about newer versions.\n experiment.organism_names = list(\n set([organism.name for organism in experiment.organisms.all()])\n )\n experiment.save()\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n (\"data_refinery_common\", \"0015_dataset_email_ccdl_ok\"),\n ]\n\n operations = [\n migrations.RunPython(associate_experiments_to_organisms),\n ]\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
REDIRECT_MAP = {'90': '19904201', '91': '19903329', '92': '19899125', '93':
'19901043', '94': '19903192', '95': '19899788', '97': '19904423', '98':
'19906163', '99': '19905540', '100': '19907871', '101': '19908147',
'102': '19910103', '103': '19909980', '104': '19911813', '105':
'19911767', '106': '19913625', '107': '19913832', '108': '19915603',
'109': '19915707', '110': '19915705', '111': '19915558', '112':
'19917330', '113': '19917085', '114': '19918316', '115': '19919617',
'116': '19918555', '117': '19919779', '118': '19920594', '119':
'19920805', '120': '19921503', '121': '19923032', '122': '19922349',
'123': '19923894', '124': '19924058', '125': '19924651', '126':
'19929744', '127': '19929743', '128': '19929742', '129': '19929184',
'130': '19929183', '131': '19928163', '132': '19927463', '133':
'19927462', '134': '19927461', '135': '19926742', '136': '19926741',
'137': '19926738', '138': '19930143', '139': '19930827', '140':
'19931617', '141': '19931616', '142': '19932324', '143': '19932321',
'144': '19932320', '145': '19932845', '146': '19932843', '147':
'19932842', '148': '19932839', '149': '19933621', '150': '19933618',
'151': '19934526', '152': '19934525', '153': '19934524', '154':
'19935167', '155': '19935165', '156': '19936598', '157': '19936596',
'158': '19936594', '160': '19937949', '161': '19937662', '162':
'19937662', '163': '19937662', '164': '19937662', '165': '19937662',
'166': '19940346', '167': '19939390', '168': '19938892', '169':
'19938886', '170': '19938874', '171': '19938181', '172': '19938179',
'173': '19938177', '174': '19937662', '175': '19937662', '176':
'800073144', '177': '800073141', '178': '800070989', '179': '800070987',
'180': '800070985', '181': '800068840', '182': '800068838', '183':
'800068837', '184': '800068835', '185': '800073405', '186': '800075467',
'187': '800075466', '188': '800077797', '189': '800077792', '190':
'800077788', '191': '800080302', '192': '800080300', '193': '800080299',
'194': '800080297', '195': '800080295', '196': '800080294', '197':
'800082560', '198': '800082559', '199': '800082558', '200': '800085053',
'201': '800085057', '202': '800085055', '203': '800087660', '204':
'800087637', '205': '800087636', '206': '800090260', '207': '800090259',
'208': '800090256', '209': '800090252', '210': '800090248', '211':
'800095783', '212': '800093475', '213': '800093472', '214': '800093469',
'215': '800093465', '216': '800097835', '217': '800097830', '218':
'800097828', '219': '800102815', '220': '800100696', '221': '800107510',
'222': '800105566', '223': '800105187', '224': '800105182', '225':
'800105176', '226': '800105171', '227': '800110082', '228': '800110080',
'229': '800110077', '230': '800107893', '231': '800112573', '232':
'800112572', '233': '800112570', '234': '800115083', '235': '800115080',
'236': '800117652', '237': '800136223', '238': '800135715', '239':
'800135712', '240': '800127734', '241': '800125056', '242': '800125055',
'243': '800125054', '244': '800122499', '245': '800122497', '246':
'800120063', '247': '800120060', '248': '800118016', '249': '800118015',
'250': '800138744', '251': '800138741', '252': '800138440', '253':
'800156510', '254': '800156507', '255': '800159343', '256': '800200950',
'257': '800200946', '258': '800180350', '259': '800180348', '260':
'800162155', '261': '800162153', '262': '800159803', '263': '800205850',
'264': '800205839', '265': '800210303', '266': '800210302', '267':
'800212467', '268': '800212465', '269': '800212462', '270': '800215849',
'271': '800218413', '272': '800220590', '273': '800220585', '274':
'800220581', '275': '800220568', '276': '800223836', '277': '800223835',
'278': '800226881', '279': '800226876', '280': '800226875', '281':
'800229066', '282': '800229064', '283': '800232046', '284': '800232043',
'285': '800234330', '286': '800234329', '287': '800234328', '288':
'800239516', '289': '800236806', '290': '800242231', '291': '800242196',
'292': '800242177', '293': '800245005', '294': '800247477', '295':
'800247307', '296': '800247092', '297': '800250315', '298': '800250206',
'299': '800250198', '300': '800252661', '301': '800252745', '302':
'800252731', '303': '800255314', '304': '800255226', '305': '800261560',
'306': '800264399', '307': '800264337', '308': '800262863', '309':
'800267317', '310': '800268635', '311': '800270225', '312': '800272621',
'313': '800272861', '314': '800275290', '315': '800275287', '316':
'800275259', '317': '800277905', '318': '800277897', '319': '800277966',
'320': '800280886', '321': '800280734', '322': '800280721', '323':
'800283469', '324': '800283455', '325': '800291555', '326': '800291531',
'327': '800288739', '328': '800286042', '329': '800286032', '330':
'800294431', '331': '800294423', '332': '800294394', '333': '800297383',
'334': '800299835', '335': '800302625', '336': '800305630', '337':
'800305626', '338': '800308225', '339': '800307935', '340': '800308160',
'341': '800308242', '342': '800310811', '343': '800310657', '344':
'800310651', '345': '800312843', '346': '800313657', '347': '800313593',
'348': '800313385', '349': '800315870', '350': '800315874', '351':
'800315004', '352': '800315980', '353': '800317852', '354': '800317851',
'355': '800317843', '356': '800317841', '357': '800320232', '358':
'800322836', '359': '800322833', '360': '800325648', '361': '800325641',
'362': '800328374', '363': '800328368', '364': '800330891', '365':
'800330882', '366': '800330878', '367': '800336505', '368': '800336491',
'369': '800338571', '370': '800341852', '371': '800339471', '372':
'800344570', '373': '800344561', '374': '800344557', '375': '800347295',
'376': '800348755', '377': '800350263', '378': '800350259', '379':
'800353149', '380': '800351527', '381': '800355911', '382': '800355907',
'383': '800358602', '384': '800358597', '385': '800357146', '386':
'800360127', '387': '800364368', '388': '800364364', '389': '800364360',
'390': '800369266', '391': '800367438', '392': '800367435', '393':
'800365869', '394': '800376494', '395': '800376495', '396': '800376499',
'397': '800376508', '398': '800376564', '399': '800376527', '400':
'800376534', '401': '800376542', '402': '800376553', '403': '800376547',
'404': '800373150', '405': '800373145', '406': '800372444', '407':
'800372437', '408': '800372425', '409': '800379488', '410': '800382132',
'411': '800382127', '412': '800382125', '413': '800386300', '414':
'800384980', '415': '800384977', '416': '800387613', '417': '800387609',
'418': '800390598', '419': '800390595', '420': '800390593', '421':
'800391756', '422': '800393267', '423': '800396025', '424': '800399068',
'425': '800401344', '426': '800404124', '427': '800408946', '428':
'800407272', '429': '800407265', '430': '800411526', '431': '800411522',
'432': '800414380', '433': '800413104', '434': '800413099', '435':
'800415905', '436': '800415900', '437': '800417356', '438': '800420038',
'439': '800420034', '440': '800420028', '441': '800422801', '442':
'800421597', '443': '800421594', '444': '800427313', '445': '800427308',
'446': '800427302', '447': '800427296', '448': '800428813', '449':
'800430293', '450': '800430281', '451': '800430273', '452': '800434255',
'453': '800434253', '454': '800434251', '455': '800434249', '456':
'800434246', '457': '800431774', '458': '800443507', '459': '800442246',
'460': '800440771', '461': '800439363', '462': '800439359', '463':
'800436898', '464': '800434258', '465': '800446256', '466': '800450435',
'467': '800450429', '468': '800450424', '469': '800452914', '470':
'800452909', '471': '800452023', '472': '800452016', '473': '800455755',
'474': '800455748', '475': '800457050', '476': '800458494', '477':
'800461157', '478': '800459620', '479': '800464361', '480': '800464980',
'481': '800462270', '482': '800465908', '483': '800465407', '484':
'800465404', '485': '800467476', '486': '800467755', '487': '800468407',
'488': '800468843', '489': '800469869', '490': '800469867', '491':
'800470232', '492': '800470228', '493': '800470224', '494': '800470783',
'495': '800471280', '496': '800471274', '497': '800471270', '498':
'800471737', '499': '800472257', '500': '800472252', '501': '800472248',
'502': '800472239', '503': '800472826', '504': '800473392', '505':
'800473387', '506': '800473386', '507': '800474131', '508': '800474822',
'509': '800476516', '510': '800476512', '511': '800477305', '512':
'800477304', '513': '800477299', '514': '800477851', '515': '800478313',
'516': '800478309', '517': '800478779', '518': '800479288', '519':
'800479679', '520': '800480262', '521': '800480257', '522': '800483194',
'523': '800482720', '524': '800482271', '525': '800481660', '526':
'800481208', '527': '800480699', '528': '800483203', '529': '800483712',
'530': '800484088', '531': '800484085', '532': '800484667', '533':
'800485151', '534': '800485686', '535': '800487288', '536': '800487265',
'537': '800487264', '538': '800487254', '539': '800487654', '540':
'800488015', '541': '800488014', '542': '800488638', '543': '800488635',
'544': '800489081', '545': '800489074', '546': '800489725', '547':
'800489722', '548': '800490703', '549': '800490702', '550': '800492228',
'551': '800494213', '552': '800494039', '553': '800494442', '554':
'800494426', '555': '800495547', '556': '800495446', '557': '800496750',
'558': '800498164', '559': '800498748', '560': '800499418', '561':
'800499229', '562': '800500847', '563': '800500844', '564': '800500802',
'565': '800501840', '566': '800501597', '567': '800502796', '568':
'800502789', '569': '800503614', '570': '800504092', '571': '800503911',
'572': '800508001', '573': '800507103', '574': '800506285', '575':
'800505846', '576': '800505807', '577': '800505069', '578': '800509304',
'579': '800509218', '580': '800508912', '581': '800509464', '582':
'800510151', '583': '800511800', '584': '800511318', '585': '800512405',
'586': '800512403', '587': '800513304', '588': '800513305', '589':
'800513635', '590': '800513633', '591': '800514762', '592': '800514759',
'593': '800515655', '594': '800515656', '595': '800516480', '596':
'800516479', '597': '800516478', '598': '800517736', '599': '800517735',
'600': '800517733', '601': '800517148', '602': '800517143', '603':
'800517138', '604': '800519296', '605': '800519292', '606': '800520855',
'607': '800520857', '608': '800520736', '609': '800521674', '610':
'800522862', '611': '800523828', '612': '800523825', '613': '800524526',
'614': '800524868', '615': '800525568', '616': '800525566', '617':
'800525848', '618': '800525847', '619': '800525845', '620': '800526925',
'621': '800526923', '622': '800526922', '623': '800528032', '624':
'800527784', '625': '800527783', '626': '800529243', '627': '800528930',
'628': '800528927', '629': '800530217', '630': '800530215', '631':
'800530212', '632': '800531040', '633': '800530845', '634': '800530842',
'635': '800531892', '636': '800532956', '637': '800532952', '638':
'800533102', '639': '800534375', '640': '800534368', '641': '800534363',
'642': '800535420', '643': '800535415', '644': '800535410', '645':
'800536088', '646': '800536085', '647': '800536084', '648': '800537422',
'649': '800537419', '650': '800537413', '651': '800565995', '652':
'800565992', '653': '800563301', '654': '800563298', '655': '800562019',
'656': '800562018', '657': '800560957', '658': '800560954', '659':
'800560953', '660': '800560950', '661': '800567960', '662': '800567958',
'663': '800567957', '664': '800566950', '665': '800566948', '666':
'800566947', '667': '800568961', '668': '800568959', '669': '800568957',
'670': '800569778', '671': '800569776', '672': '800569775', '673':
'800570677', '674': '800570673', '675': '800570647', '676': '800571691',
'677': '800571690', '678': '800571688', '679': '800573679', '680':
'800573678', '681': '800573673', '682': '800572880', '683': '800572878',
'684': '800572876', '685': '800574667', '686': '800574666', '687':
'800574665', '688': '800575627', '689': '800575624', '690': '800575622',
'691': '800576864', '692': '800576861', '693': '800576858', '694':
'800577693', '695': '800578651', '696': '800578648', '697': '800578653',
'698': '800580339', '699': '800581315', '700': '800582094', '701':
'800583021', '702': '800590020', '703': '800590019', '704': '800590018',
'705': '800589231', '706': '800589226', '707': '800588877', '708':
'800587042', '709': '800587039', '710': '800586085', '711': '800584924',
'712': '800583934', '713': '800590941', '714': '800590940', '715':
'800590939', '716': '800592923', '717': '800592921', '718': '800592920',
'719': '800591918', '720': '800591917', '721': '800591915', '722':
'800593832', '723': '800593829', '724': '800593824', '725': '800593890',
'726': '800594956', '727': '800594880', '728': '800594877', '729':
'800594876', '730': '800595884', '731': '800595883', '732': '800595882',
'733': '800595879', '734': '800596854', '735': '800597955', '736':
'800597961', '737': '800597957', '738': '800597954', '739': '800597951',
'740': '800598913', '741': '800600005', '742': '800600003', '743':
'800600000', '744': '800600977', '745': '800600975', '746': '800600973',
'747': '800601974', '748': '800603879', '749': '800603052', '750':
'800603050', '751': '800604977', '752': '800605959', '753': '800607128',
'754': '800608295', '755': '800608294', '756': '800608293', '757':
'800609876', '758': '800610697', '759': '800611768', '760': '800611766',
'761': '800611764', '762': '800612811', '763': '800612809', '764':
'800612806', '765': '800615487', '766': '800613824', '767': '800613823',
'768': '800617427', '769': '800617740', '770': '800618987', '771':
'800618794', '772': '800620463', '773': '800620507', '774': '800621873',
'775': '800621866', '776': '800621485', '777': '800623063', '778':
'800622785', '779': '800624082', '780': '800624606', '781': '800624605',
'782': '800624602', '783': '800626006', '784': '800626004', '785':
'800625998', '786': '800625995', '787': '800625959', '788': '800625684',
'789': '800627159', '790': '800627541', '791': '800628537', '792':
'800628472', '793': '800628440', '794': '800628412', '795': '800628391',
'796': '800629230', '797': '800629175', '798': '800630245', '799':
'800630236', '800': '800631787', '801': '800631425', '802': '800631385',
'803': '800631379', '804': '800631339', '805': '800631299', '806':
'800631198', '807': '800630886', '808': '800633920', '809': '800633720',
'810': '800633520', '811': '800634419', '812': '800635301', '813':
'800635068', '814': '800635957', '815': '800638994', '816': '800638105',
'817': '800637068', '818': '800636754', '819': '800636749', '820':
'800636075', '821': '800639448', '822': '800639234', '823': '800639026',
'824': '800640408', '825': '800640396', '826': '800640985', '827':
'800640977', '828': '800645321', '829': '800644531', '830': '800644235',
'831': '800643606', '832': '800642400', '833': '800641879', '834':
'800645756', '835': '800647017', '836': '800648350', '837': '800648289',
'838': '800648124', '839': '800647488', '840': '800649911', '841':
'800649906', '842': '800649535', '843': '800649521', '844': '800649507',
'845': '800649438', '846': '800649411', '847': '800650580', '848':
'800652017', '849': '800652004', '850': '800651999', '851': '800651955',
'852': '800651790', '853': '800651264', '854': '800651159', '855':
'800652276', '856': '800652260', '857': '800654483', '858': '800654117',
'859': '800654927', '860': '800656751', '861': '800656720', '862':
'800656504', '863': '800656476', '864': '800655926', '865': '800658883',
'866': '800659871', '867': '800659855', '868': '800657502', '869':
'800662419', '870': '800663417', '871': '800661565', '872': '800664542',
'873': '800665790', '874': '800667640', '875': '800668511', '876':
'800668354', '877': '800668932', '878': '800668884', '879': '800668870',
'880': '800668846', '881': '800670519', '882': '800670755', '883':
'800670804', '884': '800670005', '885': '800669956', '886': '800671522',
'887': '800670997', '888': '800676274', '889': '800674751', '890':
'800674396', '891': '800674387', '892': '800674369', '893': '800674171',
'894': '800674165', '895': '800673904', '896': '800673894', '897':
'800673042', '898': '800672682', '899': '800673037', '900': '800674363',
'901': '800671334', '902': '800676404', '903': '800677203', '904':
'800678281', '905': '800677753', '906': '800678579', '907': '800678543',
'908': '800682417', '909': '800680556', '910': '800680572', '911':
'800681753', '912': '800683728', '913': '800683445', '914': '800684755',
'915': '800685559', '916': '800685994', '917': '800686991', '918':
'800688325', '919': '800688988', '920': '800688986', '921': '800688811',
'922': '800688784', '923': '800690794', '924': '800690777', '925':
'800690766', '926': '800691744', '927': '800691714', '928': '800691608',
'929': '800691675', '930': '800692072', '931': '800692888', '932':
'800692853', '933': '800694793', '934': '800695410', '935': '800696421',
'936': '800696417', '937': '800696404', '938': '800696380', '939':
'800695901', '940': '800696527', '941': '800696521', '942': '800696516',
'943': '800697754', '944': '800698640', '945': '800700044', '946':
'800700030', '947': '800700001', '948': '800699969', '949': '800700477',
'950': '800700332', '951': '800701388', '952': '800701378', '953':
'800702260', '954': '800702167', '955': '800702170', '956': '800703184',
'957': '800703189', '958': '800704417', '959': '800704334', '960':
'800704331', '961': '800705315', '962': '800705310', '963': '800706319',
'964': '800706317', '965': '800707543', '966': '800707540', '967':
'800707378', '968': '800707376', '969': '800707372', '970': '800709165',
'971': '800709918', '972': '800709909', '973': '800709913', '974':
'800709590', '975': '800709592', '976': '800711385', '977': '800711436',
'978': '800711448', '979': '800712704', '980': '800712684', '981':
'800712697', '982': '800713805', '983': '800713786', '984': '800715143',
'985': '800715140', '986': '800717742', '987': '800717725', '988':
'800717083', '989': '800719807', '990': '800719797', '991': '800721331',
'992': '800721317', '993': '800722269', '994': '800722253', '995':
'800722190', '996': '800723313', '997': '800723082'}
REDIRECT_MAP_CATEGORIES = {'27': '438046136', '28': '438046133', '29':
'438046135', '30': '438046134', '31': '438046128', '32': '438046127',
'33': '438046130', '34': '438046131', '35': '438046132', '36': '438046129'}
<|reserved_special_token_1|>
REDIRECT_MAP = {
'90':'19904201',
'91':'19903329',
'92':'19899125',
'93':'19901043',
'94':'19903192',
'95':'19899788',
'97':'19904423',
'98':'19906163',
'99':'19905540',
'100':'19907871',
'101':'19908147',
'102':'19910103',
'103':'19909980',
'104':'19911813',
'105':'19911767',
'106':'19913625',
'107':'19913832',
'108':'19915603',
'109':'19915707',
'110':'19915705',
'111':'19915558',
'112':'19917330',
'113':'19917085',
'114':'19918316',
'115':'19919617',
'116':'19918555',
'117':'19919779',
'118':'19920594',
'119':'19920805',
'120':'19921503',
'121':'19923032',
'122':'19922349',
'123':'19923894',
'124':'19924058',
'125':'19924651',
'126':'19929744',
'127':'19929743',
'128':'19929742',
'129':'19929184',
'130':'19929183',
'131':'19928163',
'132':'19927463',
'133':'19927462',
'134':'19927461',
'135':'19926742',
'136':'19926741',
'137':'19926738',
'138':'19930143',
'139':'19930827',
'140':'19931617',
'141':'19931616',
'142':'19932324',
'143':'19932321',
'144':'19932320',
'145':'19932845',
'146':'19932843',
'147':'19932842',
'148':'19932839',
'149':'19933621',
'150':'19933618',
'151':'19934526',
'152':'19934525',
'153':'19934524',
'154':'19935167',
'155':'19935165',
'156':'19936598',
'157':'19936596',
'158':'19936594',
'160':'19937949',
'161':'19937662',
'162':'19937662',
'163':'19937662',
'164':'19937662',
'165':'19937662',
'166':'19940346',
'167':'19939390',
'168':'19938892',
'169':'19938886',
'170':'19938874',
'171':'19938181',
'172':'19938179',
'173':'19938177',
'174':'19937662',
'175':'19937662',
'176':'800073144',
'177':'800073141',
'178':'800070989',
'179':'800070987',
'180':'800070985',
'181':'800068840',
'182':'800068838',
'183':'800068837',
'184':'800068835',
'185':'800073405',
'186':'800075467',
'187':'800075466',
'188':'800077797',
'189':'800077792',
'190':'800077788',
'191':'800080302',
'192':'800080300',
'193':'800080299',
'194':'800080297',
'195':'800080295',
'196':'800080294',
'197':'800082560',
'198':'800082559',
'199':'800082558',
'200':'800085053',
'201':'800085057',
'202':'800085055',
'203':'800087660',
'204':'800087637',
'205':'800087636',
'206':'800090260',
'207':'800090259',
'208':'800090256',
'209':'800090252',
'210':'800090248',
'211':'800095783',
'212':'800093475',
'213':'800093472',
'214':'800093469',
'215':'800093465',
'216':'800097835',
'217':'800097830',
'218':'800097828',
'219':'800102815',
'220':'800100696',
'221':'800107510',
'222':'800105566',
'223':'800105187',
'224':'800105182',
'225':'800105176',
'226':'800105171',
'227':'800110082',
'228':'800110080',
'229':'800110077',
'230':'800107893',
'231':'800112573',
'232':'800112572',
'233':'800112570',
'234':'800115083',
'235':'800115080',
'236':'800117652',
'237':'800136223',
'238':'800135715',
'239':'800135712',
'240':'800127734',
'241':'800125056',
'242':'800125055',
'243':'800125054',
'244':'800122499',
'245':'800122497',
'246':'800120063',
'247':'800120060',
'248':'800118016',
'249':'800118015',
'250':'800138744',
'251':'800138741',
'252':'800138440',
'253':'800156510',
'254':'800156507',
'255':'800159343',
'256':'800200950',
'257':'800200946',
'258':'800180350',
'259':'800180348',
'260':'800162155',
'261':'800162153',
'262':'800159803',
'263':'800205850',
'264':'800205839',
'265':'800210303',
'266':'800210302',
'267':'800212467',
'268':'800212465',
'269':'800212462',
'270':'800215849',
'271':'800218413',
'272':'800220590',
'273':'800220585',
'274':'800220581',
'275':'800220568',
'276':'800223836',
'277':'800223835',
'278':'800226881',
'279':'800226876',
'280':'800226875',
'281':'800229066',
'282':'800229064',
'283':'800232046',
'284':'800232043',
'285':'800234330',
'286':'800234329',
'287':'800234328',
'288':'800239516',
'289':'800236806',
'290':'800242231',
'291':'800242196',
'292':'800242177',
'293':'800245005',
'294':'800247477',
'295':'800247307',
'296':'800247092',
'297':'800250315',
'298':'800250206',
'299':'800250198',
'300':'800252661',
'301':'800252745',
'302':'800252731',
'303':'800255314',
'304':'800255226',
'305':'800261560',
'306':'800264399',
'307':'800264337',
'308':'800262863',
'309':'800267317',
'310':'800268635',
'311':'800270225',
'312':'800272621',
'313':'800272861',
'314':'800275290',
'315':'800275287',
'316':'800275259',
'317':'800277905',
'318':'800277897',
'319':'800277966',
'320':'800280886',
'321':'800280734',
'322':'800280721',
'323':'800283469',
'324':'800283455',
'325':'800291555',
'326':'800291531',
'327':'800288739',
'328':'800286042',
'329':'800286032',
'330':'800294431',
'331':'800294423',
'332':'800294394',
'333':'800297383',
'334':'800299835',
'335':'800302625',
'336':'800305630',
'337':'800305626',
'338':'800308225',
'339':'800307935',
'340':'800308160',
'341':'800308242',
'342':'800310811',
'343':'800310657',
'344':'800310651',
'345':'800312843',
'346':'800313657',
'347':'800313593',
'348':'800313385',
'349':'800315870',
'350':'800315874',
'351':'800315004',
'352':'800315980',
'353':'800317852',
'354':'800317851',
'355':'800317843',
'356':'800317841',
'357':'800320232',
'358':'800322836',
'359':'800322833',
'360':'800325648',
'361':'800325641',
'362':'800328374',
'363':'800328368',
'364':'800330891',
'365':'800330882',
'366':'800330878',
'367':'800336505',
'368':'800336491',
'369':'800338571',
'370':'800341852',
'371':'800339471',
'372':'800344570',
'373':'800344561',
'374':'800344557',
'375':'800347295',
'376':'800348755',
'377':'800350263',
'378':'800350259',
'379':'800353149',
'380':'800351527',
'381':'800355911',
'382':'800355907',
'383':'800358602',
'384':'800358597',
'385':'800357146',
'386':'800360127',
'387':'800364368',
'388':'800364364',
'389':'800364360',
'390':'800369266',
'391':'800367438',
'392':'800367435',
'393':'800365869',
'394':'800376494',
'395':'800376495',
'396':'800376499',
'397':'800376508',
'398':'800376564',
'399':'800376527',
'400':'800376534',
'401':'800376542',
'402':'800376553',
'403':'800376547',
'404':'800373150',
'405':'800373145',
'406':'800372444',
'407':'800372437',
'408':'800372425',
'409':'800379488',
'410':'800382132',
'411':'800382127',
'412':'800382125',
'413':'800386300',
'414':'800384980',
'415':'800384977',
'416':'800387613',
'417':'800387609',
'418':'800390598',
'419':'800390595',
'420':'800390593',
'421':'800391756',
'422':'800393267',
'423':'800396025',
'424':'800399068',
'425':'800401344',
'426':'800404124',
'427':'800408946',
'428':'800407272',
'429':'800407265',
'430':'800411526',
'431':'800411522',
'432':'800414380',
'433':'800413104',
'434':'800413099',
'435':'800415905',
'436':'800415900',
'437':'800417356',
'438':'800420038',
'439':'800420034',
'440':'800420028',
'441':'800422801',
'442':'800421597',
'443':'800421594',
'444':'800427313',
'445':'800427308',
'446':'800427302',
'447':'800427296',
'448':'800428813',
'449':'800430293',
'450':'800430281',
'451':'800430273',
'452':'800434255',
'453':'800434253',
'454':'800434251',
'455':'800434249',
'456':'800434246',
'457':'800431774',
'458':'800443507',
'459':'800442246',
'460':'800440771',
'461':'800439363',
'462':'800439359',
'463':'800436898',
'464':'800434258',
'465':'800446256',
'466':'800450435',
'467':'800450429',
'468':'800450424',
'469':'800452914',
'470':'800452909',
'471':'800452023',
'472':'800452016',
'473':'800455755',
'474':'800455748',
'475':'800457050',
'476':'800458494',
'477':'800461157',
'478':'800459620',
'479':'800464361',
'480':'800464980',
'481':'800462270',
'482':'800465908',
'483':'800465407',
'484':'800465404',
'485':'800467476',
'486':'800467755',
'487':'800468407',
'488':'800468843',
'489':'800469869',
'490':'800469867',
'491':'800470232',
'492':'800470228',
'493':'800470224',
'494':'800470783',
'495':'800471280',
'496':'800471274',
'497':'800471270',
'498':'800471737',
'499':'800472257',
'500':'800472252',
'501':'800472248',
'502':'800472239',
'503':'800472826',
'504':'800473392',
'505':'800473387',
'506':'800473386',
'507':'800474131',
'508':'800474822',
'509':'800476516',
'510':'800476512',
'511':'800477305',
'512':'800477304',
'513':'800477299',
'514':'800477851',
'515':'800478313',
'516':'800478309',
'517':'800478779',
'518':'800479288',
'519':'800479679',
'520':'800480262',
'521':'800480257',
'522':'800483194',
'523':'800482720',
'524':'800482271',
'525':'800481660',
'526':'800481208',
'527':'800480699',
'528':'800483203',
'529':'800483712',
'530':'800484088',
'531':'800484085',
'532':'800484667',
'533':'800485151',
'534':'800485686',
'535':'800487288',
'536':'800487265',
'537':'800487264',
'538':'800487254',
'539':'800487654',
'540':'800488015',
'541':'800488014',
'542':'800488638',
'543':'800488635',
'544':'800489081',
'545':'800489074',
'546':'800489725',
'547':'800489722',
'548':'800490703',
'549':'800490702',
'550':'800492228',
'551':'800494213',
'552':'800494039',
'553':'800494442',
'554':'800494426',
'555':'800495547',
'556':'800495446',
'557':'800496750',
'558':'800498164',
'559':'800498748',
'560':'800499418',
'561':'800499229',
'562':'800500847',
'563':'800500844',
'564':'800500802',
'565':'800501840',
'566':'800501597',
'567':'800502796',
'568':'800502789',
'569':'800503614',
'570':'800504092',
'571':'800503911',
'572':'800508001',
'573':'800507103',
'574':'800506285',
'575':'800505846',
'576':'800505807',
'577':'800505069',
'578':'800509304',
'579':'800509218',
'580':'800508912',
'581':'800509464',
'582':'800510151',
'583':'800511800',
'584':'800511318',
'585':'800512405',
'586':'800512403',
'587':'800513304',
'588':'800513305',
'589':'800513635',
'590':'800513633',
'591':'800514762',
'592':'800514759',
'593':'800515655',
'594':'800515656',
'595':'800516480',
'596':'800516479',
'597':'800516478',
'598':'800517736',
'599':'800517735',
'600':'800517733',
'601':'800517148',
'602':'800517143',
'603':'800517138',
'604':'800519296',
'605':'800519292',
'606':'800520855',
'607':'800520857',
'608':'800520736',
'609':'800521674',
'610':'800522862',
'611':'800523828',
'612':'800523825',
'613':'800524526',
'614':'800524868',
'615':'800525568',
'616':'800525566',
'617':'800525848',
'618':'800525847',
'619':'800525845',
'620':'800526925',
'621':'800526923',
'622':'800526922',
'623':'800528032',
'624':'800527784',
'625':'800527783',
'626':'800529243',
'627':'800528930',
'628':'800528927',
'629':'800530217',
'630':'800530215',
'631':'800530212',
'632':'800531040',
'633':'800530845',
'634':'800530842',
'635':'800531892',
'636':'800532956',
'637':'800532952',
'638':'800533102',
'639':'800534375',
'640':'800534368',
'641':'800534363',
'642':'800535420',
'643':'800535415',
'644':'800535410',
'645':'800536088',
'646':'800536085',
'647':'800536084',
'648':'800537422',
'649':'800537419',
'650':'800537413',
'651':'800565995',
'652':'800565992',
'653':'800563301',
'654':'800563298',
'655':'800562019',
'656':'800562018',
'657':'800560957',
'658':'800560954',
'659':'800560953',
'660':'800560950',
'661':'800567960',
'662':'800567958',
'663':'800567957',
'664':'800566950',
'665':'800566948',
'666':'800566947',
'667':'800568961',
'668':'800568959',
'669':'800568957',
'670':'800569778',
'671':'800569776',
'672':'800569775',
'673':'800570677',
'674':'800570673',
'675':'800570647',
'676':'800571691',
'677':'800571690',
'678':'800571688',
'679':'800573679',
'680':'800573678',
'681':'800573673',
'682':'800572880',
'683':'800572878',
'684':'800572876',
'685':'800574667',
'686':'800574666',
'687':'800574665',
'688':'800575627',
'689':'800575624',
'690':'800575622',
'691':'800576864',
'692':'800576861',
'693':'800576858',
'694':'800577693',
'695':'800578651',
'696':'800578648',
'697':'800578653',
'698':'800580339',
'699':'800581315',
'700':'800582094',
'701':'800583021',
'702':'800590020',
'703':'800590019',
'704':'800590018',
'705':'800589231',
'706':'800589226',
'707':'800588877',
'708':'800587042',
'709':'800587039',
'710':'800586085',
'711':'800584924',
'712':'800583934',
'713':'800590941',
'714':'800590940',
'715':'800590939',
'716':'800592923',
'717':'800592921',
'718':'800592920',
'719':'800591918',
'720':'800591917',
'721':'800591915',
'722':'800593832',
'723':'800593829',
'724':'800593824',
'725':'800593890',
'726':'800594956',
'727':'800594880',
'728':'800594877',
'729':'800594876',
'730':'800595884',
'731':'800595883',
'732':'800595882',
'733':'800595879',
'734':'800596854',
'735':'800597955',
'736':'800597961',
'737':'800597957',
'738':'800597954',
'739':'800597951',
'740':'800598913',
'741':'800600005',
'742':'800600003',
'743':'800600000',
'744':'800600977',
'745':'800600975',
'746':'800600973',
'747':'800601974',
'748':'800603879',
'749':'800603052',
'750':'800603050',
'751':'800604977',
'752':'800605959',
'753':'800607128',
'754':'800608295',
'755':'800608294',
'756':'800608293',
'757':'800609876',
'758':'800610697',
'759':'800611768',
'760':'800611766',
'761':'800611764',
'762':'800612811',
'763':'800612809',
'764':'800612806',
'765':'800615487',
'766':'800613824',
'767':'800613823',
'768':'800617427',
'769':'800617740',
'770':'800618987',
'771':'800618794',
'772':'800620463',
'773':'800620507',
'774':'800621873',
'775':'800621866',
'776':'800621485',
'777':'800623063',
'778':'800622785',
'779':'800624082',
'780':'800624606',
'781':'800624605',
'782':'800624602',
'783':'800626006',
'784':'800626004',
'785':'800625998',
'786':'800625995',
'787':'800625959',
'788':'800625684',
'789':'800627159',
'790':'800627541',
'791':'800628537',
'792':'800628472',
'793':'800628440',
'794':'800628412',
'795':'800628391',
'796':'800629230',
'797':'800629175',
'798':'800630245',
'799':'800630236',
'800':'800631787',
'801':'800631425',
'802':'800631385',
'803':'800631379',
'804':'800631339',
'805':'800631299',
'806':'800631198',
'807':'800630886',
'808':'800633920',
'809':'800633720',
'810':'800633520',
'811':'800634419',
'812':'800635301',
'813':'800635068',
'814':'800635957',
'815':'800638994',
'816':'800638105',
'817':'800637068',
'818':'800636754',
'819':'800636749',
'820':'800636075',
'821':'800639448',
'822':'800639234',
'823':'800639026',
'824':'800640408',
'825':'800640396',
'826':'800640985',
'827':'800640977',
'828':'800645321',
'829':'800644531',
'830':'800644235',
'831':'800643606',
'832':'800642400',
'833':'800641879',
'834':'800645756',
'835':'800647017',
'836':'800648350',
'837':'800648289',
'838':'800648124',
'839':'800647488',
'840':'800649911',
'841':'800649906',
'842':'800649535',
'843':'800649521',
'844':'800649507',
'845':'800649438',
'846':'800649411',
'847':'800650580',
'848':'800652017',
'849':'800652004',
'850':'800651999',
'851':'800651955',
'852':'800651790',
'853':'800651264',
'854':'800651159',
'855':'800652276',
'856':'800652260',
'857':'800654483',
'858':'800654117',
'859':'800654927',
'860':'800656751',
'861':'800656720',
'862':'800656504',
'863':'800656476',
'864':'800655926',
'865':'800658883',
'866':'800659871',
'867':'800659855',
'868':'800657502',
'869':'800662419',
'870':'800663417',
'871':'800661565',
'872':'800664542',
'873':'800665790',
'874':'800667640',
'875':'800668511',
'876':'800668354',
'877':'800668932',
'878':'800668884',
'879':'800668870',
'880':'800668846',
'881':'800670519',
'882':'800670755',
'883':'800670804',
'884':'800670005',
'885':'800669956',
'886':'800671522',
'887':'800670997',
'888':'800676274',
'889':'800674751',
'890':'800674396',
'891':'800674387',
'892':'800674369',
'893':'800674171',
'894':'800674165',
'895':'800673904',
'896':'800673894',
'897':'800673042',
'898':'800672682',
'899':'800673037',
'900':'800674363',
'901':'800671334',
'902':'800676404',
'903':'800677203',
'904':'800678281',
'905':'800677753',
'906':'800678579',
'907':'800678543',
'908':'800682417',
'909':'800680556',
'910':'800680572',
'911':'800681753',
'912':'800683728',
'913':'800683445',
'914':'800684755',
'915':'800685559',
'916':'800685994',
'917':'800686991',
'918':'800688325',
'919':'800688988',
'920':'800688986',
'921':'800688811',
'922':'800688784',
'923':'800690794',
'924':'800690777',
'925':'800690766',
'926':'800691744',
'927':'800691714',
'928':'800691608',
'929':'800691675',
'930':'800692072',
'931':'800692888',
'932':'800692853',
'933':'800694793',
'934':'800695410',
'935':'800696421',
'936':'800696417',
'937':'800696404',
'938':'800696380',
'939':'800695901',
'940':'800696527',
'941':'800696521',
'942':'800696516',
'943':'800697754',
'944':'800698640',
'945':'800700044',
'946':'800700030',
'947':'800700001',
'948':'800699969',
'949':'800700477',
'950':'800700332',
'951':'800701388',
'952':'800701378',
'953':'800702260',
'954':'800702167',
'955':'800702170',
'956':'800703184',
'957':'800703189',
'958':'800704417',
'959':'800704334',
'960':'800704331',
'961':'800705315',
'962':'800705310',
'963':'800706319',
'964':'800706317',
'965':'800707543',
'966':'800707540',
'967':'800707378',
'968':'800707376',
'969':'800707372',
'970':'800709165',
'971':'800709918',
'972':'800709909',
'973':'800709913',
'974':'800709590',
'975':'800709592',
'976':'800711385',
'977':'800711436',
'978':'800711448',
'979':'800712704',
'980':'800712684',
'981':'800712697',
'982':'800713805',
'983':'800713786',
'984':'800715143',
'985':'800715140',
'986':'800717742',
'987':'800717725',
'988':'800717083',
'989':'800719807',
'990':'800719797',
'991':'800721331',
'992':'800721317',
'993':'800722269',
'994':'800722253',
'995':'800722190',
'996':'800723313',
'997':'800723082',
}
REDIRECT_MAP_CATEGORIES = {
'27':'438046136',
'28':'438046133',
'29':'438046135',
'30':'438046134',
'31':'438046128',
'32':'438046127',
'33':'438046130',
'34':'438046131',
'35':'438046132',
'36':'438046129',
}
|
flexible
|
{
"blob_id": "fb92912e1a752f3766f9439f75ca28379e23823f",
"index": 3600,
"step-1": "<mask token>\n",
"step-2": "REDIRECT_MAP = {'90': '19904201', '91': '19903329', '92': '19899125', '93':\n '19901043', '94': '19903192', '95': '19899788', '97': '19904423', '98':\n '19906163', '99': '19905540', '100': '19907871', '101': '19908147',\n '102': '19910103', '103': '19909980', '104': '19911813', '105':\n '19911767', '106': '19913625', '107': '19913832', '108': '19915603',\n '109': '19915707', '110': '19915705', '111': '19915558', '112':\n '19917330', '113': '19917085', '114': '19918316', '115': '19919617',\n '116': '19918555', '117': '19919779', '118': '19920594', '119':\n '19920805', '120': '19921503', '121': '19923032', '122': '19922349',\n '123': '19923894', '124': '19924058', '125': '19924651', '126':\n '19929744', '127': '19929743', '128': '19929742', '129': '19929184',\n '130': '19929183', '131': '19928163', '132': '19927463', '133':\n '19927462', '134': '19927461', '135': '19926742', '136': '19926741',\n '137': '19926738', '138': '19930143', '139': '19930827', '140':\n '19931617', '141': '19931616', '142': '19932324', '143': '19932321',\n '144': '19932320', '145': '19932845', '146': '19932843', '147':\n '19932842', '148': '19932839', '149': '19933621', '150': '19933618',\n '151': '19934526', '152': '19934525', '153': '19934524', '154':\n '19935167', '155': '19935165', '156': '19936598', '157': '19936596',\n '158': '19936594', '160': '19937949', '161': '19937662', '162':\n '19937662', '163': '19937662', '164': '19937662', '165': '19937662',\n '166': '19940346', '167': '19939390', '168': '19938892', '169':\n '19938886', '170': '19938874', '171': '19938181', '172': '19938179',\n '173': '19938177', '174': '19937662', '175': '19937662', '176':\n '800073144', '177': '800073141', '178': '800070989', '179': '800070987',\n '180': '800070985', '181': '800068840', '182': '800068838', '183':\n '800068837', '184': '800068835', '185': '800073405', '186': '800075467',\n '187': '800075466', '188': '800077797', '189': '800077792', '190':\n '800077788', '191': '800080302', '192': '800080300', '193': '800080299',\n '194': '800080297', '195': '800080295', '196': '800080294', '197':\n '800082560', '198': '800082559', '199': '800082558', '200': '800085053',\n '201': '800085057', '202': '800085055', '203': '800087660', '204':\n '800087637', '205': '800087636', '206': '800090260', '207': '800090259',\n '208': '800090256', '209': '800090252', '210': '800090248', '211':\n '800095783', '212': '800093475', '213': '800093472', '214': '800093469',\n '215': '800093465', '216': '800097835', '217': '800097830', '218':\n '800097828', '219': '800102815', '220': '800100696', '221': '800107510',\n '222': '800105566', '223': '800105187', '224': '800105182', '225':\n '800105176', '226': '800105171', '227': '800110082', '228': '800110080',\n '229': '800110077', '230': '800107893', '231': '800112573', '232':\n '800112572', '233': '800112570', '234': '800115083', '235': '800115080',\n '236': '800117652', '237': '800136223', '238': '800135715', '239':\n '800135712', '240': '800127734', '241': '800125056', '242': '800125055',\n '243': '800125054', '244': '800122499', '245': '800122497', '246':\n '800120063', '247': '800120060', '248': '800118016', '249': '800118015',\n '250': '800138744', '251': '800138741', '252': '800138440', '253':\n '800156510', '254': '800156507', '255': '800159343', '256': '800200950',\n '257': '800200946', '258': '800180350', '259': '800180348', '260':\n '800162155', '261': '800162153', '262': '800159803', '263': '800205850',\n '264': '800205839', '265': '800210303', '266': '800210302', '267':\n '800212467', '268': '800212465', '269': '800212462', '270': '800215849',\n '271': '800218413', '272': '800220590', '273': '800220585', '274':\n '800220581', '275': '800220568', '276': '800223836', '277': '800223835',\n '278': '800226881', '279': '800226876', '280': '800226875', '281':\n '800229066', '282': '800229064', '283': '800232046', '284': '800232043',\n '285': '800234330', '286': '800234329', '287': '800234328', '288':\n '800239516', '289': '800236806', '290': '800242231', '291': '800242196',\n '292': '800242177', '293': '800245005', '294': '800247477', '295':\n '800247307', '296': '800247092', '297': '800250315', '298': '800250206',\n '299': '800250198', '300': '800252661', '301': '800252745', '302':\n '800252731', '303': '800255314', '304': '800255226', '305': '800261560',\n '306': '800264399', '307': '800264337', '308': '800262863', '309':\n '800267317', '310': '800268635', '311': '800270225', '312': '800272621',\n '313': '800272861', '314': '800275290', '315': '800275287', '316':\n '800275259', '317': '800277905', '318': '800277897', '319': '800277966',\n '320': '800280886', '321': '800280734', '322': '800280721', '323':\n '800283469', '324': '800283455', '325': '800291555', '326': '800291531',\n '327': '800288739', '328': '800286042', '329': '800286032', '330':\n '800294431', '331': '800294423', '332': '800294394', '333': '800297383',\n '334': '800299835', '335': '800302625', '336': '800305630', '337':\n '800305626', '338': '800308225', '339': '800307935', '340': '800308160',\n '341': '800308242', '342': '800310811', '343': '800310657', '344':\n '800310651', '345': '800312843', '346': '800313657', '347': '800313593',\n '348': '800313385', '349': '800315870', '350': '800315874', '351':\n '800315004', '352': '800315980', '353': '800317852', '354': '800317851',\n '355': '800317843', '356': '800317841', '357': '800320232', '358':\n '800322836', '359': '800322833', '360': '800325648', '361': '800325641',\n '362': '800328374', '363': '800328368', '364': '800330891', '365':\n '800330882', '366': '800330878', '367': '800336505', '368': '800336491',\n '369': '800338571', '370': '800341852', '371': '800339471', '372':\n '800344570', '373': '800344561', '374': '800344557', '375': '800347295',\n '376': '800348755', '377': '800350263', '378': '800350259', '379':\n '800353149', '380': '800351527', '381': '800355911', '382': '800355907',\n '383': '800358602', '384': '800358597', '385': '800357146', '386':\n '800360127', '387': '800364368', '388': '800364364', '389': '800364360',\n '390': '800369266', '391': '800367438', '392': '800367435', '393':\n '800365869', '394': '800376494', '395': '800376495', '396': '800376499',\n '397': '800376508', '398': '800376564', '399': '800376527', '400':\n '800376534', '401': '800376542', '402': '800376553', '403': '800376547',\n '404': '800373150', '405': '800373145', '406': '800372444', '407':\n '800372437', '408': '800372425', '409': '800379488', '410': '800382132',\n '411': '800382127', '412': '800382125', '413': '800386300', '414':\n '800384980', '415': '800384977', '416': '800387613', '417': '800387609',\n '418': '800390598', '419': '800390595', '420': '800390593', '421':\n '800391756', '422': '800393267', '423': '800396025', '424': '800399068',\n '425': '800401344', '426': '800404124', '427': '800408946', '428':\n '800407272', '429': '800407265', '430': '800411526', '431': '800411522',\n '432': '800414380', '433': '800413104', '434': '800413099', '435':\n '800415905', '436': '800415900', '437': '800417356', '438': '800420038',\n '439': '800420034', '440': '800420028', '441': '800422801', '442':\n '800421597', '443': '800421594', '444': '800427313', '445': '800427308',\n '446': '800427302', '447': '800427296', '448': '800428813', '449':\n '800430293', '450': '800430281', '451': '800430273', '452': '800434255',\n '453': '800434253', '454': '800434251', '455': '800434249', '456':\n '800434246', '457': '800431774', '458': '800443507', '459': '800442246',\n '460': '800440771', '461': '800439363', '462': '800439359', '463':\n '800436898', '464': '800434258', '465': '800446256', '466': '800450435',\n '467': '800450429', '468': '800450424', '469': '800452914', '470':\n '800452909', '471': '800452023', '472': '800452016', '473': '800455755',\n '474': '800455748', '475': '800457050', '476': '800458494', '477':\n '800461157', '478': '800459620', '479': '800464361', '480': '800464980',\n '481': '800462270', '482': '800465908', '483': '800465407', '484':\n '800465404', '485': '800467476', '486': '800467755', '487': '800468407',\n '488': '800468843', '489': '800469869', '490': '800469867', '491':\n '800470232', '492': '800470228', '493': '800470224', '494': '800470783',\n '495': '800471280', '496': '800471274', '497': '800471270', '498':\n '800471737', '499': '800472257', '500': '800472252', '501': '800472248',\n '502': '800472239', '503': '800472826', '504': '800473392', '505':\n '800473387', '506': '800473386', '507': '800474131', '508': '800474822',\n '509': '800476516', '510': '800476512', '511': '800477305', '512':\n '800477304', '513': '800477299', '514': '800477851', '515': '800478313',\n '516': '800478309', '517': '800478779', '518': '800479288', '519':\n '800479679', '520': '800480262', '521': '800480257', '522': '800483194',\n '523': '800482720', '524': '800482271', '525': '800481660', '526':\n '800481208', '527': '800480699', '528': '800483203', '529': '800483712',\n '530': '800484088', '531': '800484085', '532': '800484667', '533':\n '800485151', '534': '800485686', '535': '800487288', '536': '800487265',\n '537': '800487264', '538': '800487254', '539': '800487654', '540':\n '800488015', '541': '800488014', '542': '800488638', '543': '800488635',\n '544': '800489081', '545': '800489074', '546': '800489725', '547':\n '800489722', '548': '800490703', '549': '800490702', '550': '800492228',\n '551': '800494213', '552': '800494039', '553': '800494442', '554':\n '800494426', '555': '800495547', '556': '800495446', '557': '800496750',\n '558': '800498164', '559': '800498748', '560': '800499418', '561':\n '800499229', '562': '800500847', '563': '800500844', '564': '800500802',\n '565': '800501840', '566': '800501597', '567': '800502796', '568':\n '800502789', '569': '800503614', '570': '800504092', '571': '800503911',\n '572': '800508001', '573': '800507103', '574': '800506285', '575':\n '800505846', '576': '800505807', '577': '800505069', '578': '800509304',\n '579': '800509218', '580': '800508912', '581': '800509464', '582':\n '800510151', '583': '800511800', '584': '800511318', '585': '800512405',\n '586': '800512403', '587': '800513304', '588': '800513305', '589':\n '800513635', '590': '800513633', '591': '800514762', '592': '800514759',\n '593': '800515655', '594': '800515656', '595': '800516480', '596':\n '800516479', '597': '800516478', '598': '800517736', '599': '800517735',\n '600': '800517733', '601': '800517148', '602': '800517143', '603':\n '800517138', '604': '800519296', '605': '800519292', '606': '800520855',\n '607': '800520857', '608': '800520736', '609': '800521674', '610':\n '800522862', '611': '800523828', '612': '800523825', '613': '800524526',\n '614': '800524868', '615': '800525568', '616': '800525566', '617':\n '800525848', '618': '800525847', '619': '800525845', '620': '800526925',\n '621': '800526923', '622': '800526922', '623': '800528032', '624':\n '800527784', '625': '800527783', '626': '800529243', '627': '800528930',\n '628': '800528927', '629': '800530217', '630': '800530215', '631':\n '800530212', '632': '800531040', '633': '800530845', '634': '800530842',\n '635': '800531892', '636': '800532956', '637': '800532952', '638':\n '800533102', '639': '800534375', '640': '800534368', '641': '800534363',\n '642': '800535420', '643': '800535415', '644': '800535410', '645':\n '800536088', '646': '800536085', '647': '800536084', '648': '800537422',\n '649': '800537419', '650': '800537413', '651': '800565995', '652':\n '800565992', '653': '800563301', '654': '800563298', '655': '800562019',\n '656': '800562018', '657': '800560957', '658': '800560954', '659':\n '800560953', '660': '800560950', '661': '800567960', '662': '800567958',\n '663': '800567957', '664': '800566950', '665': '800566948', '666':\n '800566947', '667': '800568961', '668': '800568959', '669': '800568957',\n '670': '800569778', '671': '800569776', '672': '800569775', '673':\n '800570677', '674': '800570673', '675': '800570647', '676': '800571691',\n '677': '800571690', '678': '800571688', '679': '800573679', '680':\n '800573678', '681': '800573673', '682': '800572880', '683': '800572878',\n '684': '800572876', '685': '800574667', '686': '800574666', '687':\n '800574665', '688': '800575627', '689': '800575624', '690': '800575622',\n '691': '800576864', '692': '800576861', '693': '800576858', '694':\n '800577693', '695': '800578651', '696': '800578648', '697': '800578653',\n '698': '800580339', '699': '800581315', '700': '800582094', '701':\n '800583021', '702': '800590020', '703': '800590019', '704': '800590018',\n '705': '800589231', '706': '800589226', '707': '800588877', '708':\n '800587042', '709': '800587039', '710': '800586085', '711': '800584924',\n '712': '800583934', '713': '800590941', '714': '800590940', '715':\n '800590939', '716': '800592923', '717': '800592921', '718': '800592920',\n '719': '800591918', '720': '800591917', '721': '800591915', '722':\n '800593832', '723': '800593829', '724': '800593824', '725': '800593890',\n '726': '800594956', '727': '800594880', '728': '800594877', '729':\n '800594876', '730': '800595884', '731': '800595883', '732': '800595882',\n '733': '800595879', '734': '800596854', '735': '800597955', '736':\n '800597961', '737': '800597957', '738': '800597954', '739': '800597951',\n '740': '800598913', '741': '800600005', '742': '800600003', '743':\n '800600000', '744': '800600977', '745': '800600975', '746': '800600973',\n '747': '800601974', '748': '800603879', '749': '800603052', '750':\n '800603050', '751': '800604977', '752': '800605959', '753': '800607128',\n '754': '800608295', '755': '800608294', '756': '800608293', '757':\n '800609876', '758': '800610697', '759': '800611768', '760': '800611766',\n '761': '800611764', '762': '800612811', '763': '800612809', '764':\n '800612806', '765': '800615487', '766': '800613824', '767': '800613823',\n '768': '800617427', '769': '800617740', '770': '800618987', '771':\n '800618794', '772': '800620463', '773': '800620507', '774': '800621873',\n '775': '800621866', '776': '800621485', '777': '800623063', '778':\n '800622785', '779': '800624082', '780': '800624606', '781': '800624605',\n '782': '800624602', '783': '800626006', '784': '800626004', '785':\n '800625998', '786': '800625995', '787': '800625959', '788': '800625684',\n '789': '800627159', '790': '800627541', '791': '800628537', '792':\n '800628472', '793': '800628440', '794': '800628412', '795': '800628391',\n '796': '800629230', '797': '800629175', '798': '800630245', '799':\n '800630236', '800': '800631787', '801': '800631425', '802': '800631385',\n '803': '800631379', '804': '800631339', '805': '800631299', '806':\n '800631198', '807': '800630886', '808': '800633920', '809': '800633720',\n '810': '800633520', '811': '800634419', '812': '800635301', '813':\n '800635068', '814': '800635957', '815': '800638994', '816': '800638105',\n '817': '800637068', '818': '800636754', '819': '800636749', '820':\n '800636075', '821': '800639448', '822': '800639234', '823': '800639026',\n '824': '800640408', '825': '800640396', '826': '800640985', '827':\n '800640977', '828': '800645321', '829': '800644531', '830': '800644235',\n '831': '800643606', '832': '800642400', '833': '800641879', '834':\n '800645756', '835': '800647017', '836': '800648350', '837': '800648289',\n '838': '800648124', '839': '800647488', '840': '800649911', '841':\n '800649906', '842': '800649535', '843': '800649521', '844': '800649507',\n '845': '800649438', '846': '800649411', '847': '800650580', '848':\n '800652017', '849': '800652004', '850': '800651999', '851': '800651955',\n '852': '800651790', '853': '800651264', '854': '800651159', '855':\n '800652276', '856': '800652260', '857': '800654483', '858': '800654117',\n '859': '800654927', '860': '800656751', '861': '800656720', '862':\n '800656504', '863': '800656476', '864': '800655926', '865': '800658883',\n '866': '800659871', '867': '800659855', '868': '800657502', '869':\n '800662419', '870': '800663417', '871': '800661565', '872': '800664542',\n '873': '800665790', '874': '800667640', '875': '800668511', '876':\n '800668354', '877': '800668932', '878': '800668884', '879': '800668870',\n '880': '800668846', '881': '800670519', '882': '800670755', '883':\n '800670804', '884': '800670005', '885': '800669956', '886': '800671522',\n '887': '800670997', '888': '800676274', '889': '800674751', '890':\n '800674396', '891': '800674387', '892': '800674369', '893': '800674171',\n '894': '800674165', '895': '800673904', '896': '800673894', '897':\n '800673042', '898': '800672682', '899': '800673037', '900': '800674363',\n '901': '800671334', '902': '800676404', '903': '800677203', '904':\n '800678281', '905': '800677753', '906': '800678579', '907': '800678543',\n '908': '800682417', '909': '800680556', '910': '800680572', '911':\n '800681753', '912': '800683728', '913': '800683445', '914': '800684755',\n '915': '800685559', '916': '800685994', '917': '800686991', '918':\n '800688325', '919': '800688988', '920': '800688986', '921': '800688811',\n '922': '800688784', '923': '800690794', '924': '800690777', '925':\n '800690766', '926': '800691744', '927': '800691714', '928': '800691608',\n '929': '800691675', '930': '800692072', '931': '800692888', '932':\n '800692853', '933': '800694793', '934': '800695410', '935': '800696421',\n '936': '800696417', '937': '800696404', '938': '800696380', '939':\n '800695901', '940': '800696527', '941': '800696521', '942': '800696516',\n '943': '800697754', '944': '800698640', '945': '800700044', '946':\n '800700030', '947': '800700001', '948': '800699969', '949': '800700477',\n '950': '800700332', '951': '800701388', '952': '800701378', '953':\n '800702260', '954': '800702167', '955': '800702170', '956': '800703184',\n '957': '800703189', '958': '800704417', '959': '800704334', '960':\n '800704331', '961': '800705315', '962': '800705310', '963': '800706319',\n '964': '800706317', '965': '800707543', '966': '800707540', '967':\n '800707378', '968': '800707376', '969': '800707372', '970': '800709165',\n '971': '800709918', '972': '800709909', '973': '800709913', '974':\n '800709590', '975': '800709592', '976': '800711385', '977': '800711436',\n '978': '800711448', '979': '800712704', '980': '800712684', '981':\n '800712697', '982': '800713805', '983': '800713786', '984': '800715143',\n '985': '800715140', '986': '800717742', '987': '800717725', '988':\n '800717083', '989': '800719807', '990': '800719797', '991': '800721331',\n '992': '800721317', '993': '800722269', '994': '800722253', '995':\n '800722190', '996': '800723313', '997': '800723082'}\nREDIRECT_MAP_CATEGORIES = {'27': '438046136', '28': '438046133', '29':\n '438046135', '30': '438046134', '31': '438046128', '32': '438046127',\n '33': '438046130', '34': '438046131', '35': '438046132', '36': '438046129'}\n",
"step-3": "REDIRECT_MAP = {\n '90':'19904201',\n '91':'19903329',\n '92':'19899125',\n '93':'19901043',\n '94':'19903192',\n '95':'19899788',\n '97':'19904423',\n '98':'19906163',\n '99':'19905540',\n '100':'19907871',\n '101':'19908147',\n '102':'19910103',\n '103':'19909980',\n '104':'19911813',\n '105':'19911767',\n '106':'19913625',\n '107':'19913832',\n '108':'19915603',\n '109':'19915707',\n '110':'19915705',\n '111':'19915558',\n '112':'19917330',\n '113':'19917085',\n '114':'19918316',\n '115':'19919617',\n '116':'19918555',\n '117':'19919779',\n '118':'19920594',\n '119':'19920805',\n '120':'19921503',\n '121':'19923032',\n '122':'19922349',\n '123':'19923894',\n '124':'19924058',\n '125':'19924651',\n '126':'19929744',\n '127':'19929743',\n '128':'19929742',\n '129':'19929184',\n '130':'19929183',\n '131':'19928163',\n '132':'19927463',\n '133':'19927462',\n '134':'19927461',\n '135':'19926742',\n '136':'19926741',\n '137':'19926738',\n '138':'19930143',\n '139':'19930827',\n '140':'19931617',\n '141':'19931616',\n '142':'19932324',\n '143':'19932321',\n '144':'19932320',\n '145':'19932845',\n '146':'19932843',\n '147':'19932842',\n '148':'19932839',\n '149':'19933621',\n '150':'19933618',\n '151':'19934526',\n '152':'19934525',\n '153':'19934524',\n '154':'19935167',\n '155':'19935165',\n '156':'19936598',\n '157':'19936596',\n '158':'19936594',\n '160':'19937949',\n '161':'19937662',\n '162':'19937662',\n '163':'19937662',\n '164':'19937662',\n '165':'19937662',\n '166':'19940346',\n '167':'19939390',\n '168':'19938892',\n '169':'19938886',\n '170':'19938874',\n '171':'19938181',\n '172':'19938179',\n '173':'19938177',\n '174':'19937662',\n '175':'19937662',\n '176':'800073144',\n '177':'800073141',\n '178':'800070989',\n '179':'800070987',\n '180':'800070985',\n '181':'800068840',\n '182':'800068838',\n '183':'800068837',\n '184':'800068835',\n '185':'800073405',\n '186':'800075467',\n '187':'800075466',\n '188':'800077797',\n '189':'800077792',\n '190':'800077788',\n '191':'800080302',\n '192':'800080300',\n '193':'800080299',\n '194':'800080297',\n '195':'800080295',\n '196':'800080294',\n '197':'800082560',\n '198':'800082559',\n '199':'800082558',\n '200':'800085053',\n '201':'800085057',\n '202':'800085055',\n '203':'800087660',\n '204':'800087637',\n '205':'800087636',\n '206':'800090260',\n '207':'800090259',\n '208':'800090256',\n '209':'800090252',\n '210':'800090248',\n '211':'800095783',\n '212':'800093475',\n '213':'800093472',\n '214':'800093469',\n '215':'800093465',\n '216':'800097835',\n '217':'800097830',\n '218':'800097828',\n '219':'800102815',\n '220':'800100696',\n '221':'800107510',\n '222':'800105566',\n '223':'800105187',\n '224':'800105182',\n '225':'800105176',\n '226':'800105171',\n '227':'800110082',\n '228':'800110080',\n '229':'800110077',\n '230':'800107893',\n '231':'800112573',\n '232':'800112572',\n '233':'800112570',\n '234':'800115083',\n '235':'800115080',\n '236':'800117652',\n '237':'800136223',\n '238':'800135715',\n '239':'800135712',\n '240':'800127734',\n '241':'800125056',\n '242':'800125055',\n '243':'800125054',\n '244':'800122499',\n '245':'800122497',\n '246':'800120063',\n '247':'800120060',\n '248':'800118016',\n '249':'800118015',\n '250':'800138744',\n '251':'800138741',\n '252':'800138440',\n '253':'800156510',\n '254':'800156507',\n '255':'800159343',\n '256':'800200950',\n '257':'800200946',\n '258':'800180350',\n '259':'800180348',\n '260':'800162155',\n '261':'800162153',\n '262':'800159803',\n '263':'800205850',\n '264':'800205839',\n '265':'800210303',\n '266':'800210302',\n '267':'800212467',\n '268':'800212465',\n '269':'800212462',\n '270':'800215849',\n '271':'800218413',\n '272':'800220590',\n '273':'800220585',\n '274':'800220581',\n '275':'800220568',\n '276':'800223836',\n '277':'800223835',\n '278':'800226881',\n '279':'800226876',\n '280':'800226875',\n '281':'800229066',\n '282':'800229064',\n '283':'800232046',\n '284':'800232043',\n '285':'800234330',\n '286':'800234329',\n '287':'800234328',\n '288':'800239516',\n '289':'800236806',\n '290':'800242231',\n '291':'800242196',\n '292':'800242177',\n '293':'800245005',\n '294':'800247477',\n '295':'800247307',\n '296':'800247092',\n '297':'800250315',\n '298':'800250206',\n '299':'800250198',\n '300':'800252661',\n '301':'800252745',\n '302':'800252731',\n '303':'800255314',\n '304':'800255226',\n '305':'800261560',\n '306':'800264399',\n '307':'800264337',\n '308':'800262863',\n '309':'800267317',\n '310':'800268635',\n '311':'800270225',\n '312':'800272621',\n '313':'800272861',\n '314':'800275290',\n '315':'800275287',\n '316':'800275259',\n '317':'800277905',\n '318':'800277897',\n '319':'800277966',\n '320':'800280886',\n '321':'800280734',\n '322':'800280721',\n '323':'800283469',\n '324':'800283455',\n '325':'800291555',\n '326':'800291531',\n '327':'800288739',\n '328':'800286042',\n '329':'800286032',\n '330':'800294431',\n '331':'800294423',\n '332':'800294394',\n '333':'800297383',\n '334':'800299835',\n '335':'800302625',\n '336':'800305630',\n '337':'800305626',\n '338':'800308225',\n '339':'800307935',\n '340':'800308160',\n '341':'800308242',\n '342':'800310811',\n '343':'800310657',\n '344':'800310651',\n '345':'800312843',\n '346':'800313657',\n '347':'800313593',\n '348':'800313385',\n '349':'800315870',\n '350':'800315874',\n '351':'800315004',\n '352':'800315980',\n '353':'800317852',\n '354':'800317851',\n '355':'800317843',\n '356':'800317841',\n '357':'800320232',\n '358':'800322836',\n '359':'800322833',\n '360':'800325648',\n '361':'800325641',\n '362':'800328374',\n '363':'800328368',\n '364':'800330891',\n '365':'800330882',\n '366':'800330878',\n '367':'800336505',\n '368':'800336491',\n '369':'800338571',\n '370':'800341852',\n '371':'800339471',\n '372':'800344570',\n '373':'800344561',\n '374':'800344557',\n '375':'800347295',\n '376':'800348755',\n '377':'800350263',\n '378':'800350259',\n '379':'800353149',\n '380':'800351527',\n '381':'800355911',\n '382':'800355907',\n '383':'800358602',\n '384':'800358597',\n '385':'800357146',\n '386':'800360127',\n '387':'800364368',\n '388':'800364364',\n '389':'800364360',\n '390':'800369266',\n '391':'800367438',\n '392':'800367435',\n '393':'800365869',\n '394':'800376494',\n '395':'800376495',\n '396':'800376499',\n '397':'800376508',\n '398':'800376564',\n '399':'800376527',\n '400':'800376534',\n '401':'800376542',\n '402':'800376553',\n '403':'800376547',\n '404':'800373150',\n '405':'800373145',\n '406':'800372444',\n '407':'800372437',\n '408':'800372425',\n '409':'800379488',\n '410':'800382132',\n '411':'800382127',\n '412':'800382125',\n '413':'800386300',\n '414':'800384980',\n '415':'800384977',\n '416':'800387613',\n '417':'800387609',\n '418':'800390598',\n '419':'800390595',\n '420':'800390593',\n '421':'800391756',\n '422':'800393267',\n '423':'800396025',\n '424':'800399068',\n '425':'800401344',\n '426':'800404124',\n '427':'800408946',\n '428':'800407272',\n '429':'800407265',\n '430':'800411526',\n '431':'800411522',\n '432':'800414380',\n '433':'800413104',\n '434':'800413099',\n '435':'800415905',\n '436':'800415900',\n '437':'800417356',\n '438':'800420038',\n '439':'800420034',\n '440':'800420028',\n '441':'800422801',\n '442':'800421597',\n '443':'800421594',\n '444':'800427313',\n '445':'800427308',\n '446':'800427302',\n '447':'800427296',\n '448':'800428813',\n '449':'800430293',\n '450':'800430281',\n '451':'800430273',\n '452':'800434255',\n '453':'800434253',\n '454':'800434251',\n '455':'800434249',\n '456':'800434246',\n '457':'800431774',\n '458':'800443507',\n '459':'800442246',\n '460':'800440771',\n '461':'800439363',\n '462':'800439359',\n '463':'800436898',\n '464':'800434258',\n '465':'800446256',\n '466':'800450435',\n '467':'800450429',\n '468':'800450424',\n '469':'800452914',\n '470':'800452909',\n '471':'800452023',\n '472':'800452016',\n '473':'800455755',\n '474':'800455748',\n '475':'800457050',\n '476':'800458494',\n '477':'800461157',\n '478':'800459620',\n '479':'800464361',\n '480':'800464980',\n '481':'800462270',\n '482':'800465908',\n '483':'800465407',\n '484':'800465404',\n '485':'800467476',\n '486':'800467755',\n '487':'800468407',\n '488':'800468843',\n '489':'800469869',\n '490':'800469867',\n '491':'800470232',\n '492':'800470228',\n '493':'800470224',\n '494':'800470783',\n '495':'800471280',\n '496':'800471274',\n '497':'800471270',\n '498':'800471737',\n '499':'800472257',\n '500':'800472252',\n '501':'800472248',\n '502':'800472239',\n '503':'800472826',\n '504':'800473392',\n '505':'800473387',\n '506':'800473386',\n '507':'800474131',\n '508':'800474822',\n '509':'800476516',\n '510':'800476512',\n '511':'800477305',\n '512':'800477304',\n '513':'800477299',\n '514':'800477851',\n '515':'800478313',\n '516':'800478309',\n '517':'800478779',\n '518':'800479288',\n '519':'800479679',\n '520':'800480262',\n '521':'800480257',\n '522':'800483194',\n '523':'800482720',\n '524':'800482271',\n '525':'800481660',\n '526':'800481208',\n '527':'800480699',\n '528':'800483203',\n '529':'800483712',\n '530':'800484088',\n '531':'800484085',\n '532':'800484667',\n '533':'800485151',\n '534':'800485686',\n '535':'800487288',\n '536':'800487265',\n '537':'800487264',\n '538':'800487254',\n '539':'800487654',\n '540':'800488015',\n '541':'800488014',\n '542':'800488638',\n '543':'800488635',\n '544':'800489081',\n '545':'800489074',\n '546':'800489725',\n '547':'800489722',\n '548':'800490703',\n '549':'800490702',\n '550':'800492228',\n '551':'800494213',\n '552':'800494039',\n '553':'800494442',\n '554':'800494426',\n '555':'800495547',\n '556':'800495446',\n '557':'800496750',\n '558':'800498164',\n '559':'800498748',\n '560':'800499418',\n '561':'800499229',\n '562':'800500847',\n '563':'800500844',\n '564':'800500802',\n '565':'800501840',\n '566':'800501597',\n '567':'800502796',\n '568':'800502789',\n '569':'800503614',\n '570':'800504092',\n '571':'800503911',\n '572':'800508001',\n '573':'800507103',\n '574':'800506285',\n '575':'800505846',\n '576':'800505807',\n '577':'800505069',\n '578':'800509304',\n '579':'800509218',\n '580':'800508912',\n '581':'800509464',\n '582':'800510151',\n '583':'800511800',\n '584':'800511318',\n '585':'800512405',\n '586':'800512403',\n '587':'800513304',\n '588':'800513305',\n '589':'800513635',\n '590':'800513633',\n '591':'800514762',\n '592':'800514759',\n '593':'800515655',\n '594':'800515656',\n '595':'800516480',\n '596':'800516479',\n '597':'800516478',\n '598':'800517736',\n '599':'800517735',\n '600':'800517733',\n '601':'800517148',\n '602':'800517143',\n '603':'800517138',\n '604':'800519296',\n '605':'800519292',\n '606':'800520855',\n '607':'800520857',\n '608':'800520736',\n '609':'800521674',\n '610':'800522862',\n '611':'800523828',\n '612':'800523825',\n '613':'800524526',\n '614':'800524868',\n '615':'800525568',\n '616':'800525566',\n '617':'800525848',\n '618':'800525847',\n '619':'800525845',\n '620':'800526925',\n '621':'800526923',\n '622':'800526922',\n '623':'800528032',\n '624':'800527784',\n '625':'800527783',\n '626':'800529243',\n '627':'800528930',\n '628':'800528927',\n '629':'800530217',\n '630':'800530215',\n '631':'800530212',\n '632':'800531040',\n '633':'800530845',\n '634':'800530842',\n '635':'800531892',\n '636':'800532956',\n '637':'800532952',\n '638':'800533102',\n '639':'800534375',\n '640':'800534368',\n '641':'800534363',\n '642':'800535420',\n '643':'800535415',\n '644':'800535410',\n '645':'800536088',\n '646':'800536085',\n '647':'800536084',\n '648':'800537422',\n '649':'800537419',\n '650':'800537413',\n '651':'800565995',\n '652':'800565992',\n '653':'800563301',\n '654':'800563298',\n '655':'800562019',\n '656':'800562018',\n '657':'800560957',\n '658':'800560954',\n '659':'800560953',\n '660':'800560950',\n '661':'800567960',\n '662':'800567958',\n '663':'800567957',\n '664':'800566950',\n '665':'800566948',\n '666':'800566947',\n '667':'800568961',\n '668':'800568959',\n '669':'800568957',\n '670':'800569778',\n '671':'800569776',\n '672':'800569775',\n '673':'800570677',\n '674':'800570673',\n '675':'800570647',\n '676':'800571691',\n '677':'800571690',\n '678':'800571688',\n '679':'800573679',\n '680':'800573678',\n '681':'800573673',\n '682':'800572880',\n '683':'800572878',\n '684':'800572876',\n '685':'800574667',\n '686':'800574666',\n '687':'800574665',\n '688':'800575627',\n '689':'800575624',\n '690':'800575622',\n '691':'800576864',\n '692':'800576861',\n '693':'800576858',\n '694':'800577693',\n '695':'800578651',\n '696':'800578648',\n '697':'800578653',\n '698':'800580339',\n '699':'800581315',\n '700':'800582094',\n '701':'800583021',\n '702':'800590020',\n '703':'800590019',\n '704':'800590018',\n '705':'800589231',\n '706':'800589226',\n '707':'800588877',\n '708':'800587042',\n '709':'800587039',\n '710':'800586085',\n '711':'800584924',\n '712':'800583934',\n '713':'800590941',\n '714':'800590940',\n '715':'800590939',\n '716':'800592923',\n '717':'800592921',\n '718':'800592920',\n '719':'800591918',\n '720':'800591917',\n '721':'800591915',\n '722':'800593832',\n '723':'800593829',\n '724':'800593824',\n '725':'800593890',\n '726':'800594956',\n '727':'800594880',\n '728':'800594877',\n '729':'800594876',\n '730':'800595884',\n '731':'800595883',\n '732':'800595882',\n '733':'800595879',\n '734':'800596854',\n '735':'800597955',\n '736':'800597961',\n '737':'800597957',\n '738':'800597954',\n '739':'800597951',\n '740':'800598913',\n '741':'800600005',\n '742':'800600003',\n '743':'800600000',\n '744':'800600977',\n '745':'800600975',\n '746':'800600973',\n '747':'800601974',\n '748':'800603879',\n '749':'800603052',\n '750':'800603050',\n '751':'800604977',\n '752':'800605959',\n '753':'800607128',\n '754':'800608295',\n '755':'800608294',\n '756':'800608293',\n '757':'800609876',\n '758':'800610697',\n '759':'800611768',\n '760':'800611766',\n '761':'800611764',\n '762':'800612811',\n '763':'800612809',\n '764':'800612806',\n '765':'800615487',\n '766':'800613824',\n '767':'800613823',\n '768':'800617427',\n '769':'800617740',\n '770':'800618987',\n '771':'800618794',\n '772':'800620463',\n '773':'800620507',\n '774':'800621873',\n '775':'800621866',\n '776':'800621485',\n '777':'800623063',\n '778':'800622785',\n '779':'800624082',\n '780':'800624606',\n '781':'800624605',\n '782':'800624602',\n '783':'800626006',\n '784':'800626004',\n '785':'800625998',\n '786':'800625995',\n '787':'800625959',\n '788':'800625684',\n '789':'800627159',\n '790':'800627541',\n '791':'800628537',\n '792':'800628472',\n '793':'800628440',\n '794':'800628412',\n '795':'800628391',\n '796':'800629230',\n '797':'800629175',\n '798':'800630245',\n '799':'800630236',\n '800':'800631787',\n '801':'800631425',\n '802':'800631385',\n '803':'800631379',\n '804':'800631339',\n '805':'800631299',\n '806':'800631198',\n '807':'800630886',\n '808':'800633920',\n '809':'800633720',\n '810':'800633520',\n '811':'800634419',\n '812':'800635301',\n '813':'800635068',\n '814':'800635957',\n '815':'800638994',\n '816':'800638105',\n '817':'800637068',\n '818':'800636754',\n '819':'800636749',\n '820':'800636075',\n '821':'800639448',\n '822':'800639234',\n '823':'800639026',\n '824':'800640408',\n '825':'800640396',\n '826':'800640985',\n '827':'800640977',\n '828':'800645321',\n '829':'800644531',\n '830':'800644235',\n '831':'800643606',\n '832':'800642400',\n '833':'800641879',\n '834':'800645756',\n '835':'800647017',\n '836':'800648350',\n '837':'800648289',\n '838':'800648124',\n '839':'800647488',\n '840':'800649911',\n '841':'800649906',\n '842':'800649535',\n '843':'800649521',\n '844':'800649507',\n '845':'800649438',\n '846':'800649411',\n '847':'800650580',\n '848':'800652017',\n '849':'800652004',\n '850':'800651999',\n '851':'800651955',\n '852':'800651790',\n '853':'800651264',\n '854':'800651159',\n '855':'800652276',\n '856':'800652260',\n '857':'800654483',\n '858':'800654117',\n '859':'800654927',\n '860':'800656751',\n '861':'800656720',\n '862':'800656504',\n '863':'800656476',\n '864':'800655926',\n '865':'800658883',\n '866':'800659871',\n '867':'800659855',\n '868':'800657502',\n '869':'800662419',\n '870':'800663417',\n '871':'800661565',\n '872':'800664542',\n '873':'800665790',\n '874':'800667640',\n '875':'800668511',\n '876':'800668354',\n '877':'800668932',\n '878':'800668884',\n '879':'800668870',\n '880':'800668846',\n '881':'800670519',\n '882':'800670755',\n '883':'800670804',\n '884':'800670005',\n '885':'800669956',\n '886':'800671522',\n '887':'800670997',\n '888':'800676274',\n '889':'800674751',\n '890':'800674396',\n '891':'800674387',\n '892':'800674369',\n '893':'800674171',\n '894':'800674165',\n '895':'800673904',\n '896':'800673894',\n '897':'800673042',\n '898':'800672682',\n '899':'800673037',\n '900':'800674363',\n '901':'800671334',\n '902':'800676404',\n '903':'800677203',\n '904':'800678281',\n '905':'800677753',\n '906':'800678579',\n '907':'800678543',\n '908':'800682417',\n '909':'800680556',\n '910':'800680572',\n '911':'800681753',\n '912':'800683728',\n '913':'800683445',\n '914':'800684755',\n '915':'800685559',\n '916':'800685994',\n '917':'800686991',\n '918':'800688325',\n '919':'800688988',\n '920':'800688986',\n '921':'800688811',\n '922':'800688784',\n '923':'800690794',\n '924':'800690777',\n '925':'800690766',\n '926':'800691744',\n '927':'800691714',\n '928':'800691608',\n '929':'800691675',\n '930':'800692072',\n '931':'800692888',\n '932':'800692853',\n '933':'800694793',\n '934':'800695410',\n '935':'800696421',\n '936':'800696417',\n '937':'800696404',\n '938':'800696380',\n '939':'800695901',\n '940':'800696527',\n '941':'800696521',\n '942':'800696516',\n '943':'800697754',\n '944':'800698640',\n '945':'800700044',\n '946':'800700030',\n '947':'800700001',\n '948':'800699969',\n '949':'800700477',\n '950':'800700332',\n '951':'800701388',\n '952':'800701378',\n '953':'800702260',\n '954':'800702167',\n '955':'800702170',\n '956':'800703184',\n '957':'800703189',\n '958':'800704417',\n '959':'800704334',\n '960':'800704331',\n '961':'800705315',\n '962':'800705310',\n '963':'800706319',\n '964':'800706317',\n '965':'800707543',\n '966':'800707540',\n '967':'800707378',\n '968':'800707376',\n '969':'800707372',\n '970':'800709165',\n '971':'800709918',\n '972':'800709909',\n '973':'800709913',\n '974':'800709590',\n '975':'800709592',\n '976':'800711385',\n '977':'800711436',\n '978':'800711448',\n '979':'800712704',\n '980':'800712684',\n '981':'800712697',\n '982':'800713805',\n '983':'800713786',\n '984':'800715143',\n '985':'800715140',\n '986':'800717742',\n '987':'800717725',\n '988':'800717083',\n '989':'800719807',\n '990':'800719797',\n '991':'800721331',\n '992':'800721317',\n '993':'800722269',\n '994':'800722253',\n '995':'800722190',\n '996':'800723313',\n '997':'800723082',\n}\n\nREDIRECT_MAP_CATEGORIES = {\n '27':'438046136',\n '28':'438046133',\n '29':'438046135',\n '30':'438046134',\n '31':'438046128',\n '32':'438046127',\n '33':'438046130',\n '34':'438046131',\n '35':'438046132',\n '36':'438046129',\n}\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
<|reserved_special_token_0|>
class AudioPopupNotifier(Notifier):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def notify(self):
display(Audio(self.audio, autoplay=True))
time.sleep(3)
display(HTML(self.template.format(self.message)))
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class AudioPopupNotifier(Notifier):
<|reserved_special_token_0|>
def __init__(self, message='Cell Completed', audio_file='pad_confirm.wav'):
super(AudioPopupNotifier, self).__init__()
self.message = message
self.audio_file = audio_file
try:
self.audio = pkg_resources.resource_string('inotifications',
'sounds/{}'.format(audio_file))
except IOError:
self.audio = audio_file
self.template = '<script type="text/javascript">alert("{}");</script>'
def notify(self):
display(Audio(self.audio, autoplay=True))
time.sleep(3)
display(HTML(self.template.format(self.message)))
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class AudioPopupNotifier(Notifier):
"""Play Sound and show Popup upon cell completion"""
def __init__(self, message='Cell Completed', audio_file='pad_confirm.wav'):
super(AudioPopupNotifier, self).__init__()
self.message = message
self.audio_file = audio_file
try:
self.audio = pkg_resources.resource_string('inotifications',
'sounds/{}'.format(audio_file))
except IOError:
self.audio = audio_file
self.template = '<script type="text/javascript">alert("{}");</script>'
def notify(self):
display(Audio(self.audio, autoplay=True))
time.sleep(3)
display(HTML(self.template.format(self.message)))
<|reserved_special_token_1|>
from inotifier import Notifier
from IPython.display import display, Audio, HTML
import pkg_resources
import time
class AudioPopupNotifier(Notifier):
"""Play Sound and show Popup upon cell completion"""
def __init__(self, message='Cell Completed', audio_file='pad_confirm.wav'):
super(AudioPopupNotifier, self).__init__()
self.message = message
self.audio_file = audio_file
try:
self.audio = pkg_resources.resource_string('inotifications',
'sounds/{}'.format(audio_file))
except IOError:
self.audio = audio_file
self.template = '<script type="text/javascript">alert("{}");</script>'
def notify(self):
display(Audio(self.audio, autoplay=True))
time.sleep(3)
display(HTML(self.template.format(self.message)))
<|reserved_special_token_1|>
from inotifier import Notifier
from IPython.display import display, Audio, HTML
import pkg_resources
import time
class AudioPopupNotifier(Notifier):
"""Play Sound and show Popup upon cell completion"""
def __init__(self, message="Cell Completed", audio_file="pad_confirm.wav"):
super(AudioPopupNotifier, self).__init__()
self.message = message
self.audio_file = audio_file
try:
self.audio = pkg_resources.resource_string('inotifications', 'sounds/{}'.format(audio_file))
except IOError:
self.audio = audio_file
self.template = '<script type="text/javascript">alert("{}");</script>'
def notify(self):
display(Audio(self.audio, autoplay=True))
time.sleep(3)
display(HTML(self.template.format(self.message)))
|
flexible
|
{
"blob_id": "94a3a74260fac58b4cad7422608f91ae3a1a0272",
"index": 6247,
"step-1": "<mask token>\n\n\nclass AudioPopupNotifier(Notifier):\n <mask token>\n <mask token>\n\n def notify(self):\n display(Audio(self.audio, autoplay=True))\n time.sleep(3)\n display(HTML(self.template.format(self.message)))\n",
"step-2": "<mask token>\n\n\nclass AudioPopupNotifier(Notifier):\n <mask token>\n\n def __init__(self, message='Cell Completed', audio_file='pad_confirm.wav'):\n super(AudioPopupNotifier, self).__init__()\n self.message = message\n self.audio_file = audio_file\n try:\n self.audio = pkg_resources.resource_string('inotifications',\n 'sounds/{}'.format(audio_file))\n except IOError:\n self.audio = audio_file\n self.template = '<script type=\"text/javascript\">alert(\"{}\");</script>'\n\n def notify(self):\n display(Audio(self.audio, autoplay=True))\n time.sleep(3)\n display(HTML(self.template.format(self.message)))\n",
"step-3": "<mask token>\n\n\nclass AudioPopupNotifier(Notifier):\n \"\"\"Play Sound and show Popup upon cell completion\"\"\"\n\n def __init__(self, message='Cell Completed', audio_file='pad_confirm.wav'):\n super(AudioPopupNotifier, self).__init__()\n self.message = message\n self.audio_file = audio_file\n try:\n self.audio = pkg_resources.resource_string('inotifications',\n 'sounds/{}'.format(audio_file))\n except IOError:\n self.audio = audio_file\n self.template = '<script type=\"text/javascript\">alert(\"{}\");</script>'\n\n def notify(self):\n display(Audio(self.audio, autoplay=True))\n time.sleep(3)\n display(HTML(self.template.format(self.message)))\n",
"step-4": "from inotifier import Notifier\nfrom IPython.display import display, Audio, HTML\nimport pkg_resources\nimport time\n\n\nclass AudioPopupNotifier(Notifier):\n \"\"\"Play Sound and show Popup upon cell completion\"\"\"\n\n def __init__(self, message='Cell Completed', audio_file='pad_confirm.wav'):\n super(AudioPopupNotifier, self).__init__()\n self.message = message\n self.audio_file = audio_file\n try:\n self.audio = pkg_resources.resource_string('inotifications',\n 'sounds/{}'.format(audio_file))\n except IOError:\n self.audio = audio_file\n self.template = '<script type=\"text/javascript\">alert(\"{}\");</script>'\n\n def notify(self):\n display(Audio(self.audio, autoplay=True))\n time.sleep(3)\n display(HTML(self.template.format(self.message)))\n",
"step-5": "from inotifier import Notifier\nfrom IPython.display import display, Audio, HTML\n\nimport pkg_resources\nimport time\n\n\nclass AudioPopupNotifier(Notifier):\n \"\"\"Play Sound and show Popup upon cell completion\"\"\"\n\n def __init__(self, message=\"Cell Completed\", audio_file=\"pad_confirm.wav\"):\n super(AudioPopupNotifier, self).__init__()\n self.message = message\n self.audio_file = audio_file\n try:\n self.audio = pkg_resources.resource_string('inotifications', 'sounds/{}'.format(audio_file))\n except IOError:\n self.audio = audio_file\n\n self.template = '<script type=\"text/javascript\">alert(\"{}\");</script>'\n\n def notify(self):\n display(Audio(self.audio, autoplay=True))\n time.sleep(3)\n display(HTML(self.template.format(self.message)))\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
# -*- coding: utf-8 -*-
# author : rovo98
# date: 2018.3.19
# this is a demo for test calling functions.
n1 = 255
n2 = 1000
print(hex(n1))
print(hex(n2))
print(abs(-119999))
|
normal
|
{
"blob_id": "31064145ae2702f93a475d0957395c62a6b320ee",
"index": 1741,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint(hex(n1))\nprint(hex(n2))\nprint(abs(-119999))\n",
"step-3": "n1 = 255\nn2 = 1000\nprint(hex(n1))\nprint(hex(n2))\nprint(abs(-119999))\n",
"step-4": "# -*- coding: utf-8 -*-\r\n# author : rovo98\r\n# date: 2018.3.19\r\n\r\n\r\n# this is a demo for test calling functions.\r\nn1 = 255\r\nn2 = 1000\r\n\r\nprint(hex(n1))\r\nprint(hex(n2))\r\n\r\nprint(abs(-119999))",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
def run(address):
ths = []
with grpc.insecure_channel(address) as channel:
for i in range(1):
th = threading.Thread(target=send, args=(channel,))
ths.append(th)
st = int(time.time())
for th in ths:
th.start()
for th in ths:
th.join()
et = int(time.time())
<|reserved_special_token_0|>
def send(channel):
stub = inference_service_pb2_grpc.InferenceServiceStub(channel)
request = inference_service_pb2.InferenceMessage()
request_data = dict()
request_data['serviceId'] = 'xxxxxxxxx'
request_data['applyId'] = ''
request_data['caseid'] = uuid.uuid1().hex
feature_data = dict()
feature_data['fid1'] = 5.1
feature_data['fid2'] = 6.2
feature_data['fid3'] = 7.6
request_data['featureData'] = feature_data
request_data['sendToRemoteFeatureData'] = feature_data
print(json.dumps(request_data, indent=4))
request.body = json.dumps(request_data).encode(encoding='utf-8')
print(stub.inference(request))
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def run(address):
ths = []
with grpc.insecure_channel(address) as channel:
for i in range(1):
th = threading.Thread(target=send, args=(channel,))
ths.append(th)
st = int(time.time())
for th in ths:
th.start()
for th in ths:
th.join()
et = int(time.time())
def process_response(call_future):
print(call_future.result())
def send(channel):
stub = inference_service_pb2_grpc.InferenceServiceStub(channel)
request = inference_service_pb2.InferenceMessage()
request_data = dict()
request_data['serviceId'] = 'xxxxxxxxx'
request_data['applyId'] = ''
request_data['caseid'] = uuid.uuid1().hex
feature_data = dict()
feature_data['fid1'] = 5.1
feature_data['fid2'] = 6.2
feature_data['fid3'] = 7.6
request_data['featureData'] = feature_data
request_data['sendToRemoteFeatureData'] = feature_data
print(json.dumps(request_data, indent=4))
request.body = json.dumps(request_data).encode(encoding='utf-8')
print(stub.inference(request))
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def run(address):
ths = []
with grpc.insecure_channel(address) as channel:
for i in range(1):
th = threading.Thread(target=send, args=(channel,))
ths.append(th)
st = int(time.time())
for th in ths:
th.start()
for th in ths:
th.join()
et = int(time.time())
def process_response(call_future):
print(call_future.result())
def send(channel):
stub = inference_service_pb2_grpc.InferenceServiceStub(channel)
request = inference_service_pb2.InferenceMessage()
request_data = dict()
request_data['serviceId'] = 'xxxxxxxxx'
request_data['applyId'] = ''
request_data['caseid'] = uuid.uuid1().hex
feature_data = dict()
feature_data['fid1'] = 5.1
feature_data['fid2'] = 6.2
feature_data['fid3'] = 7.6
request_data['featureData'] = feature_data
request_data['sendToRemoteFeatureData'] = feature_data
print(json.dumps(request_data, indent=4))
request.body = json.dumps(request_data).encode(encoding='utf-8')
print(stub.inference(request))
if __name__ == '__main__':
run(sys.argv[1])
<|reserved_special_token_1|>
import grpc
import time
import json
import sys
import uuid
from arch.api.proto import inference_service_pb2
from arch.api.proto import inference_service_pb2_grpc
import threading
def run(address):
ths = []
with grpc.insecure_channel(address) as channel:
for i in range(1):
th = threading.Thread(target=send, args=(channel,))
ths.append(th)
st = int(time.time())
for th in ths:
th.start()
for th in ths:
th.join()
et = int(time.time())
def process_response(call_future):
print(call_future.result())
def send(channel):
stub = inference_service_pb2_grpc.InferenceServiceStub(channel)
request = inference_service_pb2.InferenceMessage()
request_data = dict()
request_data['serviceId'] = 'xxxxxxxxx'
request_data['applyId'] = ''
request_data['caseid'] = uuid.uuid1().hex
feature_data = dict()
feature_data['fid1'] = 5.1
feature_data['fid2'] = 6.2
feature_data['fid3'] = 7.6
request_data['featureData'] = feature_data
request_data['sendToRemoteFeatureData'] = feature_data
print(json.dumps(request_data, indent=4))
request.body = json.dumps(request_data).encode(encoding='utf-8')
print(stub.inference(request))
if __name__ == '__main__':
run(sys.argv[1])
<|reserved_special_token_1|>
import grpc
import time
import json
import sys
import uuid
from arch.api.proto import inference_service_pb2
from arch.api.proto import inference_service_pb2_grpc
import threading
def run(address):
ths = []
with grpc.insecure_channel(address) as channel:
for i in range(1):
th = threading.Thread(target=send, args=(channel,))
ths.append(th)
st = int(time.time())
for th in ths:
th.start()
for th in ths:
th.join()
et = int(time.time())
def process_response(call_future):
print(call_future.result())
def send(channel):
stub = inference_service_pb2_grpc.InferenceServiceStub(channel)
request = inference_service_pb2.InferenceMessage()
request_data = dict()
request_data['serviceId'] = 'xxxxxxxxx'
request_data['applyId'] = ''
# request_data['modelId'] = 'arbiter-10000#guest-10000#host-10000#model' # You can specify the model id this way
# request_data['modelVersion'] = 'acd3e1807a1211e9969aacde48001122' # You can specify the model version this way
request_data['caseid'] = uuid.uuid1().hex
feature_data = dict()
feature_data['fid1'] = 5.1
feature_data['fid2'] = 6.2
feature_data['fid3'] = 7.6
request_data['featureData'] = feature_data
request_data['sendToRemoteFeatureData'] = feature_data
print(json.dumps(request_data, indent=4))
request.body = json.dumps(request_data).encode(encoding='utf-8')
print(stub.inference(request))
if __name__ == '__main__':
run(sys.argv[1])
|
flexible
|
{
"blob_id": "5430e1861a6244c25c00699323efa0921a5af940",
"index": 3709,
"step-1": "<mask token>\n\n\ndef run(address):\n ths = []\n with grpc.insecure_channel(address) as channel:\n for i in range(1):\n th = threading.Thread(target=send, args=(channel,))\n ths.append(th)\n st = int(time.time())\n for th in ths:\n th.start()\n for th in ths:\n th.join()\n et = int(time.time())\n\n\n<mask token>\n\n\ndef send(channel):\n stub = inference_service_pb2_grpc.InferenceServiceStub(channel)\n request = inference_service_pb2.InferenceMessage()\n request_data = dict()\n request_data['serviceId'] = 'xxxxxxxxx'\n request_data['applyId'] = ''\n request_data['caseid'] = uuid.uuid1().hex\n feature_data = dict()\n feature_data['fid1'] = 5.1\n feature_data['fid2'] = 6.2\n feature_data['fid3'] = 7.6\n request_data['featureData'] = feature_data\n request_data['sendToRemoteFeatureData'] = feature_data\n print(json.dumps(request_data, indent=4))\n request.body = json.dumps(request_data).encode(encoding='utf-8')\n print(stub.inference(request))\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef run(address):\n ths = []\n with grpc.insecure_channel(address) as channel:\n for i in range(1):\n th = threading.Thread(target=send, args=(channel,))\n ths.append(th)\n st = int(time.time())\n for th in ths:\n th.start()\n for th in ths:\n th.join()\n et = int(time.time())\n\n\ndef process_response(call_future):\n print(call_future.result())\n\n\ndef send(channel):\n stub = inference_service_pb2_grpc.InferenceServiceStub(channel)\n request = inference_service_pb2.InferenceMessage()\n request_data = dict()\n request_data['serviceId'] = 'xxxxxxxxx'\n request_data['applyId'] = ''\n request_data['caseid'] = uuid.uuid1().hex\n feature_data = dict()\n feature_data['fid1'] = 5.1\n feature_data['fid2'] = 6.2\n feature_data['fid3'] = 7.6\n request_data['featureData'] = feature_data\n request_data['sendToRemoteFeatureData'] = feature_data\n print(json.dumps(request_data, indent=4))\n request.body = json.dumps(request_data).encode(encoding='utf-8')\n print(stub.inference(request))\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef run(address):\n ths = []\n with grpc.insecure_channel(address) as channel:\n for i in range(1):\n th = threading.Thread(target=send, args=(channel,))\n ths.append(th)\n st = int(time.time())\n for th in ths:\n th.start()\n for th in ths:\n th.join()\n et = int(time.time())\n\n\ndef process_response(call_future):\n print(call_future.result())\n\n\ndef send(channel):\n stub = inference_service_pb2_grpc.InferenceServiceStub(channel)\n request = inference_service_pb2.InferenceMessage()\n request_data = dict()\n request_data['serviceId'] = 'xxxxxxxxx'\n request_data['applyId'] = ''\n request_data['caseid'] = uuid.uuid1().hex\n feature_data = dict()\n feature_data['fid1'] = 5.1\n feature_data['fid2'] = 6.2\n feature_data['fid3'] = 7.6\n request_data['featureData'] = feature_data\n request_data['sendToRemoteFeatureData'] = feature_data\n print(json.dumps(request_data, indent=4))\n request.body = json.dumps(request_data).encode(encoding='utf-8')\n print(stub.inference(request))\n\n\nif __name__ == '__main__':\n run(sys.argv[1])\n",
"step-4": "import grpc\nimport time\nimport json\nimport sys\nimport uuid\nfrom arch.api.proto import inference_service_pb2\nfrom arch.api.proto import inference_service_pb2_grpc\nimport threading\n\n\ndef run(address):\n ths = []\n with grpc.insecure_channel(address) as channel:\n for i in range(1):\n th = threading.Thread(target=send, args=(channel,))\n ths.append(th)\n st = int(time.time())\n for th in ths:\n th.start()\n for th in ths:\n th.join()\n et = int(time.time())\n\n\ndef process_response(call_future):\n print(call_future.result())\n\n\ndef send(channel):\n stub = inference_service_pb2_grpc.InferenceServiceStub(channel)\n request = inference_service_pb2.InferenceMessage()\n request_data = dict()\n request_data['serviceId'] = 'xxxxxxxxx'\n request_data['applyId'] = ''\n request_data['caseid'] = uuid.uuid1().hex\n feature_data = dict()\n feature_data['fid1'] = 5.1\n feature_data['fid2'] = 6.2\n feature_data['fid3'] = 7.6\n request_data['featureData'] = feature_data\n request_data['sendToRemoteFeatureData'] = feature_data\n print(json.dumps(request_data, indent=4))\n request.body = json.dumps(request_data).encode(encoding='utf-8')\n print(stub.inference(request))\n\n\nif __name__ == '__main__':\n run(sys.argv[1])\n",
"step-5": "import grpc\nimport time\nimport json\nimport sys\nimport uuid\n\nfrom arch.api.proto import inference_service_pb2\nfrom arch.api.proto import inference_service_pb2_grpc\nimport threading\n\n\ndef run(address):\n ths = []\n with grpc.insecure_channel(address) as channel:\n for i in range(1):\n th = threading.Thread(target=send, args=(channel,))\n ths.append(th)\n st = int(time.time())\n for th in ths:\n th.start()\n for th in ths:\n th.join()\n et = int(time.time())\n\n\ndef process_response(call_future):\n print(call_future.result())\n\n\ndef send(channel):\n stub = inference_service_pb2_grpc.InferenceServiceStub(channel)\n request = inference_service_pb2.InferenceMessage()\n request_data = dict()\n request_data['serviceId'] = 'xxxxxxxxx'\n request_data['applyId'] = ''\n # request_data['modelId'] = 'arbiter-10000#guest-10000#host-10000#model' # You can specify the model id this way\n # request_data['modelVersion'] = 'acd3e1807a1211e9969aacde48001122' # You can specify the model version this way\n request_data['caseid'] = uuid.uuid1().hex\n\n feature_data = dict()\n feature_data['fid1'] = 5.1\n feature_data['fid2'] = 6.2\n feature_data['fid3'] = 7.6\n request_data['featureData'] = feature_data\n request_data['sendToRemoteFeatureData'] = feature_data\n\n print(json.dumps(request_data, indent=4))\n\n request.body = json.dumps(request_data).encode(encoding='utf-8')\n print(stub.inference(request))\n\n\nif __name__ == '__main__':\n run(sys.argv[1])\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
<|reserved_special_token_0|>
class FeatureExtractor:
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def encode(self):
""" encodes the text in the Document object, and then adds it to the encoding attribute """
text_lines = [line.text for line in self._document.lines]
encodings = self._bc.encode(text_lines)
for line, encoding in zip(self._document.lines, encodings):
line.encoding = encoding
return self._document
def end(self):
""" Closes the BertClient connection to BertServer """
self._bc.close()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class FeatureExtractor:
<|reserved_special_token_0|>
def __init__(self, document):
self._document = document
self._bc = BertClient()
def encode(self):
""" encodes the text in the Document object, and then adds it to the encoding attribute """
text_lines = [line.text for line in self._document.lines]
encodings = self._bc.encode(text_lines)
for line, encoding in zip(self._document.lines, encodings):
line.encoding = encoding
return self._document
def end(self):
""" Closes the BertClient connection to BertServer """
self._bc.close()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class FeatureExtractor:
"""Uses Bert-as-a-Server to set up a BertClient and embed text in a Document.
Attributes:
document (Document): This object encompasses the extracted text from one of the
PDF documents. There is an encoding field on each Line which is where the
embedding from BERT will be included, and where the text that gets encoded will
be provided.
_bc (BertClient): Connection to the BertServer which can be used for encoding.
"""
def __init__(self, document):
self._document = document
self._bc = BertClient()
def encode(self):
""" encodes the text in the Document object, and then adds it to the encoding attribute """
text_lines = [line.text for line in self._document.lines]
encodings = self._bc.encode(text_lines)
for line, encoding in zip(self._document.lines, encodings):
line.encoding = encoding
return self._document
def end(self):
""" Closes the BertClient connection to BertServer """
self._bc.close()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
from bert_serving.client import BertClient
class FeatureExtractor:
"""Uses Bert-as-a-Server to set up a BertClient and embed text in a Document.
Attributes:
document (Document): This object encompasses the extracted text from one of the
PDF documents. There is an encoding field on each Line which is where the
embedding from BERT will be included, and where the text that gets encoded will
be provided.
_bc (BertClient): Connection to the BertServer which can be used for encoding.
"""
def __init__(self, document):
self._document = document
self._bc = BertClient()
def encode(self):
""" encodes the text in the Document object, and then adds it to the encoding attribute """
text_lines = [line.text for line in self._document.lines]
encodings = self._bc.encode(text_lines)
for line, encoding in zip(self._document.lines, encodings):
line.encoding = encoding
return self._document
def end(self):
""" Closes the BertClient connection to BertServer """
self._bc.close()
<|reserved_special_token_1|>
"""
This module is used to extract features from the lines extracted from documents
using BERT encodings. This package leverages the bert-as-a-server package to create the
embeddings.
Example:
feature_extractor = FeatureExtractor(document) # document is of class Document
encoded_doc = feature_extractor.encode()
feature_extractor.end()
Todo:
* lines --> sentences for a better representation of the embeddings
* try different BERT models
* train the BERT model for a specific task before encoding
"""
from bert_serving.client import BertClient
class FeatureExtractor:
"""Uses Bert-as-a-Server to set up a BertClient and embed text in a Document.
Attributes:
document (Document): This object encompasses the extracted text from one of the
PDF documents. There is an encoding field on each Line which is where the
embedding from BERT will be included, and where the text that gets encoded will
be provided.
_bc (BertClient): Connection to the BertServer which can be used for encoding.
"""
def __init__(self, document):
self._document = document
self._bc = BertClient()
def encode(self):
""" encodes the text in the Document object, and then adds it to the encoding attribute """
text_lines = [line.text for line in self._document.lines]
encodings = self._bc.encode(text_lines)
for (line, encoding) in zip(self._document.lines, encodings):
line.encoding = encoding
return self._document
def end(self):
""" Closes the BertClient connection to BertServer """
self._bc.close()
|
flexible
|
{
"blob_id": "882d265f14c04b2f2f626504d18e2cd07dcc8637",
"index": 3042,
"step-1": "<mask token>\n\n\nclass FeatureExtractor:\n <mask token>\n <mask token>\n\n def encode(self):\n \"\"\" encodes the text in the Document object, and then adds it to the encoding attribute \"\"\"\n text_lines = [line.text for line in self._document.lines]\n encodings = self._bc.encode(text_lines)\n for line, encoding in zip(self._document.lines, encodings):\n line.encoding = encoding\n return self._document\n\n def end(self):\n \"\"\" Closes the BertClient connection to BertServer \"\"\"\n self._bc.close()\n",
"step-2": "<mask token>\n\n\nclass FeatureExtractor:\n <mask token>\n\n def __init__(self, document):\n self._document = document\n self._bc = BertClient()\n\n def encode(self):\n \"\"\" encodes the text in the Document object, and then adds it to the encoding attribute \"\"\"\n text_lines = [line.text for line in self._document.lines]\n encodings = self._bc.encode(text_lines)\n for line, encoding in zip(self._document.lines, encodings):\n line.encoding = encoding\n return self._document\n\n def end(self):\n \"\"\" Closes the BertClient connection to BertServer \"\"\"\n self._bc.close()\n",
"step-3": "<mask token>\n\n\nclass FeatureExtractor:\n \"\"\"Uses Bert-as-a-Server to set up a BertClient and embed text in a Document.\n\n Attributes:\n document (Document): This object encompasses the extracted text from one of the\n PDF documents. There is an encoding field on each Line which is where the\n embedding from BERT will be included, and where the text that gets encoded will\n be provided.\n _bc (BertClient): Connection to the BertServer which can be used for encoding.\n\n \"\"\"\n\n def __init__(self, document):\n self._document = document\n self._bc = BertClient()\n\n def encode(self):\n \"\"\" encodes the text in the Document object, and then adds it to the encoding attribute \"\"\"\n text_lines = [line.text for line in self._document.lines]\n encodings = self._bc.encode(text_lines)\n for line, encoding in zip(self._document.lines, encodings):\n line.encoding = encoding\n return self._document\n\n def end(self):\n \"\"\" Closes the BertClient connection to BertServer \"\"\"\n self._bc.close()\n",
"step-4": "<mask token>\nfrom bert_serving.client import BertClient\n\n\nclass FeatureExtractor:\n \"\"\"Uses Bert-as-a-Server to set up a BertClient and embed text in a Document.\n\n Attributes:\n document (Document): This object encompasses the extracted text from one of the\n PDF documents. There is an encoding field on each Line which is where the\n embedding from BERT will be included, and where the text that gets encoded will\n be provided.\n _bc (BertClient): Connection to the BertServer which can be used for encoding.\n\n \"\"\"\n\n def __init__(self, document):\n self._document = document\n self._bc = BertClient()\n\n def encode(self):\n \"\"\" encodes the text in the Document object, and then adds it to the encoding attribute \"\"\"\n text_lines = [line.text for line in self._document.lines]\n encodings = self._bc.encode(text_lines)\n for line, encoding in zip(self._document.lines, encodings):\n line.encoding = encoding\n return self._document\n\n def end(self):\n \"\"\" Closes the BertClient connection to BertServer \"\"\"\n self._bc.close()\n",
"step-5": "\"\"\"\n\nThis module is used to extract features from the lines extracted from documents\nusing BERT encodings. This package leverages the bert-as-a-server package to create the\nembeddings.\n\nExample:\n feature_extractor = FeatureExtractor(document) # document is of class Document\n encoded_doc = feature_extractor.encode()\n feature_extractor.end()\n\nTodo:\n * lines --> sentences for a better representation of the embeddings\n * try different BERT models\n * train the BERT model for a specific task before encoding\n\n\n\"\"\"\n\nfrom bert_serving.client import BertClient\n\n\nclass FeatureExtractor:\n \"\"\"Uses Bert-as-a-Server to set up a BertClient and embed text in a Document.\n\n Attributes:\n document (Document): This object encompasses the extracted text from one of the\n PDF documents. There is an encoding field on each Line which is where the\n embedding from BERT will be included, and where the text that gets encoded will\n be provided.\n _bc (BertClient): Connection to the BertServer which can be used for encoding.\n\n \"\"\"\n\n def __init__(self, document):\n self._document = document\n self._bc = BertClient()\n\n def encode(self):\n \"\"\" encodes the text in the Document object, and then adds it to the encoding attribute \"\"\"\n text_lines = [line.text for line in self._document.lines]\n encodings = self._bc.encode(text_lines)\n for (line, encoding) in zip(self._document.lines, encodings):\n line.encoding = encoding\n return self._document\n\n def end(self):\n \"\"\" Closes the BertClient connection to BertServer \"\"\"\n self._bc.close()\n",
"step-ids": [
3,
4,
5,
6,
7
]
}
|
[
3,
4,
5,
6,
7
] |
'''
Faraday Penetration Test IDE
Copyright (C) 2013 Infobyte LLC (http://www.infobytesec.com/)
See the file 'doc/LICENSE' for the license information
'''
"""
This module contains some useful functions to embedd an IPython shell.
This allows to interactively test things.
TODO: create a QT Widget capable of running the IPython shell whitout
blocking the entire app. Kind of the http://ipython.scipy.org/moin/Cookbook/EmbeddingInGTK
"""
import traceback
import model.api
IPYTHON_BANNER = "\n".join(["-"*45,
"Starting embedded IPython Shell...",
"Press CTRL + D to exit.",
"-"*45])
IPYTHON_EXIT_MSG = "\n".join(["-"*45,
"Exiting IPython Shell...",
"Returning normal execution.",
"-"*45])
__ipython_active = False
def embedd_ipython011(local_ns={}, global_ns={}):
from IPython.config.loader import Config
from IPython.frontend.terminal.embed import InteractiveShellEmbed
cfg = Config()
ipshell = InteractiveShellEmbed(config=cfg,
banner1 = IPYTHON_BANNER,
exit_msg = IPYTHON_EXIT_MSG)
ipshell(local_ns=local_ns, global_ns=global_ns)
def embedd_ipython010(local_ns={}, global_ns={}):
from IPython.Shell import IPShellEmbed
ipshell = IPShellEmbed( [""],
banner = IPYTHON_BANNER,
exit_msg = IPYTHON_EXIT_MSG
)
ipshell(local_ns=local_ns, global_ns=global_ns)
def embedd(local_ns={}, global_ns={}):
global __ipython_active
if __ipython_active:
return
__ipython_active = True
try:
import IPython
version = IPython.__version__.split(".")[1]
if int(version) > 10:
embedd_ipython011(local_ns, global_ns)
else:
embedd_ipython010(local_ns, global_ns)
except Exception, e:
msg = "An error ocurred while trying to embedd the IPython Shell\n%s"
model.api.log(msg % str(e), "ERROR")
model.api.devlog(msg % traceback.format_exc())
finally:
__ipython_active = False
def embeddQT(local_ns={}, global_ns={}):
global __ipython_active
if __ipython_active:
return
__ipython_active = True
try:
from IPython.Shell import IPShellQt
ipshell = IPShellQt( [""],
user_ns=local_ns,
user_global_ns=global_ns
)
ipshell.run()
except Exception:
model.api.devlog("An error ocurred while trying to embedd the IPython Shell\n%s" % traceback.format_exc())
finally:
__ipython_active = False
|
normal
|
{
"blob_id": "3eb071fa826c838d847e3f97abe3b706760a1336",
"index": 1309,
"step-1": "'''\nFaraday Penetration Test IDE\nCopyright (C) 2013 Infobyte LLC (http://www.infobytesec.com/)\nSee the file 'doc/LICENSE' for the license information\n\n'''\n\"\"\"\nThis module contains some useful functions to embedd an IPython shell.\nThis allows to interactively test things.\nTODO: create a QT Widget capable of running the IPython shell whitout\nblocking the entire app. Kind of the http://ipython.scipy.org/moin/Cookbook/EmbeddingInGTK\n\"\"\"\n\nimport traceback\nimport model.api\n\nIPYTHON_BANNER = \"\\n\".join([\"-\"*45,\n \"Starting embedded IPython Shell...\",\n \"Press CTRL + D to exit.\",\n \"-\"*45])\n\nIPYTHON_EXIT_MSG = \"\\n\".join([\"-\"*45,\n \"Exiting IPython Shell...\",\n \"Returning normal execution.\",\n \"-\"*45])\n\n__ipython_active = False\n\n \n \n\ndef embedd_ipython011(local_ns={}, global_ns={}):\n from IPython.config.loader import Config\n from IPython.frontend.terminal.embed import InteractiveShellEmbed\n cfg = Config() \n ipshell = InteractiveShellEmbed(config=cfg,\n banner1 = IPYTHON_BANNER,\n exit_msg = IPYTHON_EXIT_MSG)\n \n ipshell(local_ns=local_ns, global_ns=global_ns)\n\n\ndef embedd_ipython010(local_ns={}, global_ns={}):\n from IPython.Shell import IPShellEmbed\n ipshell = IPShellEmbed( [\"\"],\n banner = IPYTHON_BANNER,\n exit_msg = IPYTHON_EXIT_MSG\n )\n ipshell(local_ns=local_ns, global_ns=global_ns)\n \n\ndef embedd(local_ns={}, global_ns={}):\n global __ipython_active\n if __ipython_active:\n return\n\n __ipython_active = True\n try:\n import IPython\n version = IPython.__version__.split(\".\")[1]\n if int(version) > 10:\n embedd_ipython011(local_ns, global_ns)\n else:\n embedd_ipython010(local_ns, global_ns)\n \n except Exception, e:\n msg = \"An error ocurred while trying to embedd the IPython Shell\\n%s\"\n model.api.log(msg % str(e), \"ERROR\")\n model.api.devlog(msg % traceback.format_exc())\n finally:\n __ipython_active = False\n\n\ndef embeddQT(local_ns={}, global_ns={}):\n \n\n global __ipython_active\n if __ipython_active:\n return\n __ipython_active = True\n try:\n from IPython.Shell import IPShellQt\n ipshell = IPShellQt( [\"\"],\n user_ns=local_ns,\n user_global_ns=global_ns\n )\n ipshell.run()\n except Exception:\n model.api.devlog(\"An error ocurred while trying to embedd the IPython Shell\\n%s\" % traceback.format_exc())\n finally:\n __ipython_active = False\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
def gen_metadata(fn):
metadata = {}
lines = open(fn, 'r').readlines()
for line in lines:
line = line.rstrip()
if len(line) == 0:
continue
elif line.startswith('#'):
continue
elif line.startswith('%'):
continue
else:
firstWord = line.split()[0]
if line.startswith('RingThresh'):
if 'RingThresh' not in metadata.keys():
metadata.update({'RingThresh': {}})
strippedline = line.split(firstWord)[1].strip()
secondword = strippedline.split()[0]
metadata['RingThresh'].update({secondword: strippedline.
split(secondword)[1].split('#')[0].strip()})
else:
metadata.update({firstWord: line.split(firstWord)[1].split(
'#')[0].strip()})
return metadata
<|reserved_special_token_0|>
<|reserved_special_token_1|>
def gen_metadata(fn):
metadata = {}
lines = open(fn, 'r').readlines()
for line in lines:
line = line.rstrip()
if len(line) == 0:
continue
elif line.startswith('#'):
continue
elif line.startswith('%'):
continue
else:
firstWord = line.split()[0]
if line.startswith('RingThresh'):
if 'RingThresh' not in metadata.keys():
metadata.update({'RingThresh': {}})
strippedline = line.split(firstWord)[1].strip()
secondword = strippedline.split()[0]
metadata['RingThresh'].update({secondword: strippedline.
split(secondword)[1].split('#')[0].strip()})
else:
metadata.update({firstWord: line.split(firstWord)[1].split(
'#')[0].strip()})
return metadata
def SetupPayloads(inp):
flow_input = {'input': {'inject_source_endpoint_id': inp['sourceEP'],
'funcx_endpoint_non_compute': inp['sourceNCEP'],
'proc_endpoint_non_compute': inp['procNCEP'], 'inject_source_path':
inp['sourcePath'], 'inject_destination_endpoint_id': inp[
'remoteDataEP'], 'extract_source_endpoint_id': inp['remoteDataEP'],
'funcx_endpoint_compute': inp['funcx_endpoint_compute'],
'inject_destination_path': inp['executePath'],
'extract_source_path': inp['executeResultPath'],
'extract_destination_endpoint_id': inp['destEP'],
'extract_destination_path': inp['resultPath'], 'paramFileName': inp
['pfName'], 'startLayerNr': inp['startLayerNr'], 'endLayerNr': inp[
'endLayerNr'], 'nFrames': inp['nFrames'], 'numProcs': inp[
'numProcs'], 'numBlocks': inp['numBlocks'], 'timePath': inp[
'timePath'], 'StartFileNrFirstLayer': inp['startNrFirstLayer'],
'NrFilesPerSweep': inp['nrFilesPerSweep'], 'FileStem': inp[
'fileStem'], 'SeedFolder': inp['seedFolder'], 'RawFolder': inp[
'rawFolder'], 'darkFN': inp['darkFN'], 'StartNr': inp['startNr'],
'EndNr': inp['endNr'], 'extract_recursive': False,
'inject_recursive': True}}
flow_input['input'].update({'multipletasks': [{'startLayerNr': inp[
'startLayerNr'], 'endLayerNr': inp['endLayerNr'], 'numProcs': inp[
'numProcs'], 'nFrames': inp['nFrames'], 'numBlocks': inp[
'numBlocks'], 'blockNr': idx, 'timePath': inp['timePath'],
'FileStem': inp['fileStem'], 'SeedFolder': inp['seedFolder'],
'RawFolder': inp['rawFolder'], 'paramFileName': inp['pfName']} for
idx in range(inp['numBlocks'])]})
flow_input['input'].update({'pilot': {'dataset':
f"{inp['sourcePath']}/{inp['fileStem']}_Layer_{str(inp['startLayerNr']).zfill(4)}_Analysis_Time_{inp['timePath']}/{inp['fileStem']}_Layer_{str(inp['startLayerNr']).zfill(4)}_Analysis_Time_{inp['timePath']}/"
, 'index': inp['portal_id'], 'project': 'hedm',
'source_globus_endpoint': inp['sourceEP']}})
flow_input['input']['pilot'].update({'metadata': gen_metadata(inp[
'pfName'])})
flow_input['input']['pilot']['metadata'].update({'exp_id':
f"{inp['experimentName']}_{inp['fileStem']}_{inp['timePath']}"})
flow_input['input']['pilot']['metadata'].update({'time_path': inp[
'timePath']})
flow_input['input']['pilot']['metadata'].update({'startNr': inp[
'startNr'], 'endNr': inp['endNr']})
return flow_input
<|reserved_special_token_1|>
def gen_metadata(fn):
metadata = {}
lines = open(fn,'r').readlines()
for line in lines:
line = line.rstrip()
if len(line) == 0:
continue
elif line.startswith('#'):
continue
elif line.startswith('%'):
continue
else:
# Special case RingThresh
firstWord = line.split()[0]
if line.startswith('RingThresh'):
if 'RingThresh' not in metadata.keys():
metadata.update({'RingThresh':{}})
strippedline = line.split(firstWord)[1].strip()
secondword = strippedline.split()[0]
metadata['RingThresh'].update({secondword:strippedline.split(secondword)[1].split('#')[0].strip()})
else:
metadata.update({firstWord : line.split(firstWord)[1].split('#')[0].strip()})
return metadata
def SetupPayloads(inp):
flow_input = {
"input": {
"inject_source_endpoint_id": inp['sourceEP'],
"funcx_endpoint_non_compute": inp['sourceNCEP'],
"proc_endpoint_non_compute": inp['procNCEP'],
"inject_source_path": inp['sourcePath'],
"inject_destination_endpoint_id": inp['remoteDataEP'],
"extract_source_endpoint_id": inp['remoteDataEP'],
"funcx_endpoint_compute": inp['funcx_endpoint_compute'],
"inject_destination_path": inp['executePath'],
"extract_source_path": inp['executeResultPath'],
"extract_destination_endpoint_id": inp['destEP'],
"extract_destination_path": inp['resultPath'],
"paramFileName": inp['pfName'],
"startLayerNr": inp['startLayerNr'],
"endLayerNr": inp['endLayerNr'],
"nFrames": inp['nFrames'],
"numProcs": inp['numProcs'],
"numBlocks": inp['numBlocks'],
"timePath": inp['timePath'],
"StartFileNrFirstLayer": inp['startNrFirstLayer'],
"NrFilesPerSweep": inp['nrFilesPerSweep'],
"FileStem": inp['fileStem'],
"SeedFolder": inp['seedFolder'],
"RawFolder": inp['rawFolder'],
"darkFN": inp['darkFN'],
"StartNr": inp['startNr'],
"EndNr": inp['endNr'],
'extract_recursive': False,
'inject_recursive': True,}
}
flow_input['input'].update({
'multipletasks':[{
'startLayerNr':inp['startLayerNr'],
'endLayerNr':inp['endLayerNr'],
'numProcs':inp['numProcs'],
'nFrames':inp['nFrames'],
'numBlocks':inp['numBlocks'],
'blockNr':idx,
'timePath':inp['timePath'],
'FileStem':inp['fileStem'],
'SeedFolder':inp['seedFolder'],
'RawFolder':inp['rawFolder'],
'paramFileName':inp['pfName'],
}
for idx in range(inp['numBlocks'])
]
})
flow_input['input'].update({
'pilot':{
'dataset':f'{inp["sourcePath"]}/{inp["fileStem"]}_Layer_{str(inp["startLayerNr"]).zfill(4)}_Analysis_Time_{inp["timePath"]}/{inp["fileStem"]}_Layer_{str(inp["startLayerNr"]).zfill(4)}_Analysis_Time_{inp["timePath"]}/',
'index':inp['portal_id'],
'project':'hedm',
'source_globus_endpoint':inp['sourceEP'],
}
})
flow_input['input']['pilot'].update({
'metadata':gen_metadata(inp['pfName']),
})
flow_input['input']['pilot']['metadata'].update({
'exp_id':f'{inp["experimentName"]}_{inp["fileStem"]}_{inp["timePath"]}',
})
flow_input['input']['pilot']['metadata'].update({
'time_path':inp["timePath"],
})
flow_input['input']['pilot']['metadata'].update({
'startNr':inp["startNr"],
'endNr':inp["endNr"],
})
return flow_input
|
flexible
|
{
"blob_id": "5066c2a5219cf1b233b4985efc7a4eb494b784ca",
"index": 7363,
"step-1": "<mask token>\n",
"step-2": "def gen_metadata(fn):\n metadata = {}\n lines = open(fn, 'r').readlines()\n for line in lines:\n line = line.rstrip()\n if len(line) == 0:\n continue\n elif line.startswith('#'):\n continue\n elif line.startswith('%'):\n continue\n else:\n firstWord = line.split()[0]\n if line.startswith('RingThresh'):\n if 'RingThresh' not in metadata.keys():\n metadata.update({'RingThresh': {}})\n strippedline = line.split(firstWord)[1].strip()\n secondword = strippedline.split()[0]\n metadata['RingThresh'].update({secondword: strippedline.\n split(secondword)[1].split('#')[0].strip()})\n else:\n metadata.update({firstWord: line.split(firstWord)[1].split(\n '#')[0].strip()})\n return metadata\n\n\n<mask token>\n",
"step-3": "def gen_metadata(fn):\n metadata = {}\n lines = open(fn, 'r').readlines()\n for line in lines:\n line = line.rstrip()\n if len(line) == 0:\n continue\n elif line.startswith('#'):\n continue\n elif line.startswith('%'):\n continue\n else:\n firstWord = line.split()[0]\n if line.startswith('RingThresh'):\n if 'RingThresh' not in metadata.keys():\n metadata.update({'RingThresh': {}})\n strippedline = line.split(firstWord)[1].strip()\n secondword = strippedline.split()[0]\n metadata['RingThresh'].update({secondword: strippedline.\n split(secondword)[1].split('#')[0].strip()})\n else:\n metadata.update({firstWord: line.split(firstWord)[1].split(\n '#')[0].strip()})\n return metadata\n\n\ndef SetupPayloads(inp):\n flow_input = {'input': {'inject_source_endpoint_id': inp['sourceEP'],\n 'funcx_endpoint_non_compute': inp['sourceNCEP'],\n 'proc_endpoint_non_compute': inp['procNCEP'], 'inject_source_path':\n inp['sourcePath'], 'inject_destination_endpoint_id': inp[\n 'remoteDataEP'], 'extract_source_endpoint_id': inp['remoteDataEP'],\n 'funcx_endpoint_compute': inp['funcx_endpoint_compute'],\n 'inject_destination_path': inp['executePath'],\n 'extract_source_path': inp['executeResultPath'],\n 'extract_destination_endpoint_id': inp['destEP'],\n 'extract_destination_path': inp['resultPath'], 'paramFileName': inp\n ['pfName'], 'startLayerNr': inp['startLayerNr'], 'endLayerNr': inp[\n 'endLayerNr'], 'nFrames': inp['nFrames'], 'numProcs': inp[\n 'numProcs'], 'numBlocks': inp['numBlocks'], 'timePath': inp[\n 'timePath'], 'StartFileNrFirstLayer': inp['startNrFirstLayer'],\n 'NrFilesPerSweep': inp['nrFilesPerSweep'], 'FileStem': inp[\n 'fileStem'], 'SeedFolder': inp['seedFolder'], 'RawFolder': inp[\n 'rawFolder'], 'darkFN': inp['darkFN'], 'StartNr': inp['startNr'],\n 'EndNr': inp['endNr'], 'extract_recursive': False,\n 'inject_recursive': True}}\n flow_input['input'].update({'multipletasks': [{'startLayerNr': inp[\n 'startLayerNr'], 'endLayerNr': inp['endLayerNr'], 'numProcs': inp[\n 'numProcs'], 'nFrames': inp['nFrames'], 'numBlocks': inp[\n 'numBlocks'], 'blockNr': idx, 'timePath': inp['timePath'],\n 'FileStem': inp['fileStem'], 'SeedFolder': inp['seedFolder'],\n 'RawFolder': inp['rawFolder'], 'paramFileName': inp['pfName']} for\n idx in range(inp['numBlocks'])]})\n flow_input['input'].update({'pilot': {'dataset':\n f\"{inp['sourcePath']}/{inp['fileStem']}_Layer_{str(inp['startLayerNr']).zfill(4)}_Analysis_Time_{inp['timePath']}/{inp['fileStem']}_Layer_{str(inp['startLayerNr']).zfill(4)}_Analysis_Time_{inp['timePath']}/\"\n , 'index': inp['portal_id'], 'project': 'hedm',\n 'source_globus_endpoint': inp['sourceEP']}})\n flow_input['input']['pilot'].update({'metadata': gen_metadata(inp[\n 'pfName'])})\n flow_input['input']['pilot']['metadata'].update({'exp_id':\n f\"{inp['experimentName']}_{inp['fileStem']}_{inp['timePath']}\"})\n flow_input['input']['pilot']['metadata'].update({'time_path': inp[\n 'timePath']})\n flow_input['input']['pilot']['metadata'].update({'startNr': inp[\n 'startNr'], 'endNr': inp['endNr']})\n return flow_input\n",
"step-4": "def gen_metadata(fn):\n\tmetadata = {}\n\tlines = open(fn,'r').readlines()\n\tfor line in lines:\n\t\tline = line.rstrip()\n\t\tif len(line) == 0:\n\t\t\tcontinue\n\t\telif line.startswith('#'):\n\t\t\tcontinue\n\t\telif line.startswith('%'):\n\t\t\tcontinue\n\t\telse:\n\t\t\t# Special case RingThresh\n\t\t\tfirstWord = line.split()[0]\n\t\t\tif line.startswith('RingThresh'):\n\t\t\t\tif 'RingThresh' not in metadata.keys():\n\t\t\t\t\tmetadata.update({'RingThresh':{}})\n\t\t\t\tstrippedline = line.split(firstWord)[1].strip()\n\t\t\t\tsecondword = strippedline.split()[0]\n\t\t\t\tmetadata['RingThresh'].update({secondword:strippedline.split(secondword)[1].split('#')[0].strip()})\n\t\t\telse:\n\t\t\t\tmetadata.update({firstWord : line.split(firstWord)[1].split('#')[0].strip()})\n\treturn metadata\n\ndef SetupPayloads(inp):\n\tflow_input = {\n\t\t\"input\": {\n\t\t\t\"inject_source_endpoint_id\":\t\tinp['sourceEP'],\n\t\t\t\"funcx_endpoint_non_compute\":\t\tinp['sourceNCEP'],\n\t\t\t\"proc_endpoint_non_compute\":\t\tinp['procNCEP'],\n\t\t\t\"inject_source_path\":\t\t\t\tinp['sourcePath'],\n\t\t\t\"inject_destination_endpoint_id\":\tinp['remoteDataEP'],\n\t\t\t\"extract_source_endpoint_id\":\t\tinp['remoteDataEP'],\n\t\t\t\"funcx_endpoint_compute\":\t\t\tinp['funcx_endpoint_compute'],\n\t\t\t\"inject_destination_path\":\t\t\tinp['executePath'],\n\t\t\t\"extract_source_path\":\t\t\t\tinp['executeResultPath'],\n\t\t\t\"extract_destination_endpoint_id\":\tinp['destEP'],\n\t\t\t\"extract_destination_path\":\t\t\tinp['resultPath'],\n\t\t\t\"paramFileName\":\t\t\t\t\tinp['pfName'],\n\t\t\t\"startLayerNr\":\t\t\t\t\t\tinp['startLayerNr'],\n\t\t\t\"endLayerNr\":\t\t\t\t\t\tinp['endLayerNr'],\n\t\t\t\"nFrames\":\t\t\t\t\t\t\tinp['nFrames'],\n\t\t\t\"numProcs\":\t\t\t\t\t\t\tinp['numProcs'],\n\t\t\t\"numBlocks\":\t\t\t\t\t\tinp['numBlocks'],\n\t\t\t\"timePath\":\t\t\t\t\t\t\tinp['timePath'],\n\t\t\t\"StartFileNrFirstLayer\":\t\t\tinp['startNrFirstLayer'],\n\t\t\t\"NrFilesPerSweep\":\t\t\t\t\tinp['nrFilesPerSweep'],\n\t\t\t\"FileStem\":\t\t\t\t\t\t\tinp['fileStem'],\n\t\t\t\"SeedFolder\":\t\t\t\t\t\tinp['seedFolder'],\n\t\t\t\"RawFolder\":\t\t\t\t\t\tinp['rawFolder'],\n\t\t\t\"darkFN\":\t\t\t\t\t\t\tinp['darkFN'],\n\t\t\t\"StartNr\":\t\t\t\t\t\t\tinp['startNr'],\n\t\t\t\"EndNr\":\t\t\t\t\t\t\tinp['endNr'],\n\t\t\t'extract_recursive':\t\t\t\tFalse,\n\t\t\t'inject_recursive':\t\t\t\t\tTrue,}\n\t\t}\n\tflow_input['input'].update({\n\t\t\t'multipletasks':[{\n\t\t\t\t'startLayerNr':inp['startLayerNr'],\n\t\t\t\t'endLayerNr':inp['endLayerNr'],\n\t\t\t\t'numProcs':inp['numProcs'],\n\t\t\t\t'nFrames':inp['nFrames'],\n\t\t\t\t'numBlocks':inp['numBlocks'],\n\t\t\t\t'blockNr':idx,\n\t\t\t\t'timePath':inp['timePath'],\n\t\t\t\t'FileStem':inp['fileStem'],\n\t\t\t\t'SeedFolder':inp['seedFolder'],\n\t\t\t\t'RawFolder':inp['rawFolder'],\n\t\t\t\t'paramFileName':inp['pfName'],\n\t\t\t\t}\n\t\t\tfor idx in range(inp['numBlocks'])\n\t\t]\n\t})\n\tflow_input['input'].update({\n\t\t'pilot':{\n\t\t\t'dataset':f'{inp[\"sourcePath\"]}/{inp[\"fileStem\"]}_Layer_{str(inp[\"startLayerNr\"]).zfill(4)}_Analysis_Time_{inp[\"timePath\"]}/{inp[\"fileStem\"]}_Layer_{str(inp[\"startLayerNr\"]).zfill(4)}_Analysis_Time_{inp[\"timePath\"]}/',\n\t\t\t'index':inp['portal_id'],\n\t\t\t'project':'hedm',\n\t\t\t'source_globus_endpoint':inp['sourceEP'],\n\t\t}\n\t})\n\t\n\tflow_input['input']['pilot'].update({\n\t\t'metadata':gen_metadata(inp['pfName']),\n\t})\n\tflow_input['input']['pilot']['metadata'].update({\n\t\t'exp_id':f'{inp[\"experimentName\"]}_{inp[\"fileStem\"]}_{inp[\"timePath\"]}',\n\t})\n\tflow_input['input']['pilot']['metadata'].update({\n\t\t'time_path':inp[\"timePath\"],\n\t})\n\tflow_input['input']['pilot']['metadata'].update({\n\t\t'startNr':inp[\"startNr\"],\n\t\t'endNr':inp[\"endNr\"],\n\t})\n\treturn flow_input\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
from django.conf.urls import patterns, include, url
# Uncomment the next two lines to enable the admin:
# from django.contrib import admin
# admin.autodiscover()
urlpatterns = patterns('accounts.views',
url(r'^$', 'home', name='home'),
url(r'^login/$', 'login', name='login'),
url(r'^logout/$', 'logout', name='logout'),
url(r'^register/$', 'register', name='register'),
url(r'^dashboard/', 'dashboard', name='dashboard'),
url(r'^rewards/', 'rewards', name='rewards'),
url(r'get_all_data/', 'get_all_data', name='get_all_data'),
)
|
normal
|
{
"blob_id": "798ddd4a6e4febb4664bf1c973877628d1a45c71",
"index": 368,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nurlpatterns = patterns('accounts.views', url('^$', 'home', name='home'),\n url('^login/$', 'login', name='login'), url('^logout/$', 'logout', name\n ='logout'), url('^register/$', 'register', name='register'), url(\n '^dashboard/', 'dashboard', name='dashboard'), url('^rewards/',\n 'rewards', name='rewards'), url('get_all_data/', 'get_all_data', name=\n 'get_all_data'))\n",
"step-3": "from django.conf.urls import patterns, include, url\nurlpatterns = patterns('accounts.views', url('^$', 'home', name='home'),\n url('^login/$', 'login', name='login'), url('^logout/$', 'logout', name\n ='logout'), url('^register/$', 'register', name='register'), url(\n '^dashboard/', 'dashboard', name='dashboard'), url('^rewards/',\n 'rewards', name='rewards'), url('get_all_data/', 'get_all_data', name=\n 'get_all_data'))\n",
"step-4": "from django.conf.urls import patterns, include, url\n\n# Uncomment the next two lines to enable the admin:\n# from django.contrib import admin\n# admin.autodiscover()\n\nurlpatterns = patterns('accounts.views',\n url(r'^$', 'home', name='home'),\n url(r'^login/$', 'login', name='login'),\n url(r'^logout/$', 'logout', name='logout'),\n url(r'^register/$', 'register', name='register'),\n url(r'^dashboard/', 'dashboard', name='dashboard'),\n url(r'^rewards/', 'rewards', name='rewards'),\n url(r'get_all_data/', 'get_all_data', name='get_all_data'),\n)\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
def sift_up(heap: List, pos: int=None):
if pos is None:
pos = len(heap) - 1
current, parent = pos, (pos - 1) // 2
while current > 0:
if heap[current] > heap[parent]:
heap[current], heap[parent] = heap[parent], heap[current]
else:
break
current, parent = parent, (parent - 1) // 2
<|reserved_special_token_0|>
def insert(heap: List, number: int):
heap.append(number)
sift_up(heap, len(heap) - 1)
def heapify(array: List):
for idx in range(len(array), -1, -1):
sift_down(array, idx)
def pop(heap: List):
root = heap[0]
if heap:
heap[0] = heap[-1]
heap.pop()
sift_down(heap)
return root
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def sift_up(heap: List, pos: int=None):
if pos is None:
pos = len(heap) - 1
current, parent = pos, (pos - 1) // 2
while current > 0:
if heap[current] > heap[parent]:
heap[current], heap[parent] = heap[parent], heap[current]
else:
break
current, parent = parent, (parent - 1) // 2
def sift_down(heap: List, pos: int=0):
while pos < len(heap):
left = pos * 2 + 1
right = pos * 2 + 2
if right < len(heap):
max_child = left if heap[left] > heap[right] else right
elif left < len(heap):
max_child = left
else:
return
if heap[pos] < heap[max_child]:
heap[pos], heap[max_child] = heap[max_child], heap[pos]
pos = max_child
def insert(heap: List, number: int):
heap.append(number)
sift_up(heap, len(heap) - 1)
def heapify(array: List):
for idx in range(len(array), -1, -1):
sift_down(array, idx)
def pop(heap: List):
root = heap[0]
if heap:
heap[0] = heap[-1]
heap.pop()
sift_down(heap)
return root
def make_answer(ops):
heap = list()
for op in ops:
op = op.split()
if len(op) > 1:
insert(heap, int(op[1]))
else:
yield pop(heap)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def sift_up(heap: List, pos: int=None):
if pos is None:
pos = len(heap) - 1
current, parent = pos, (pos - 1) // 2
while current > 0:
if heap[current] > heap[parent]:
heap[current], heap[parent] = heap[parent], heap[current]
else:
break
current, parent = parent, (parent - 1) // 2
def sift_down(heap: List, pos: int=0):
while pos < len(heap):
left = pos * 2 + 1
right = pos * 2 + 2
if right < len(heap):
max_child = left if heap[left] > heap[right] else right
elif left < len(heap):
max_child = left
else:
return
if heap[pos] < heap[max_child]:
heap[pos], heap[max_child] = heap[max_child], heap[pos]
pos = max_child
def insert(heap: List, number: int):
heap.append(number)
sift_up(heap, len(heap) - 1)
def heapify(array: List):
for idx in range(len(array), -1, -1):
sift_down(array, idx)
def pop(heap: List):
root = heap[0]
if heap:
heap[0] = heap[-1]
heap.pop()
sift_down(heap)
return root
def make_answer(ops):
heap = list()
for op in ops:
op = op.split()
if len(op) > 1:
insert(heap, int(op[1]))
else:
yield pop(heap)
if __name__ == '__main__':
pass
<|reserved_special_token_1|>
from typing import List
def sift_up(heap: List, pos: int=None):
if pos is None:
pos = len(heap) - 1
current, parent = pos, (pos - 1) // 2
while current > 0:
if heap[current] > heap[parent]:
heap[current], heap[parent] = heap[parent], heap[current]
else:
break
current, parent = parent, (parent - 1) // 2
def sift_down(heap: List, pos: int=0):
while pos < len(heap):
left = pos * 2 + 1
right = pos * 2 + 2
if right < len(heap):
max_child = left if heap[left] > heap[right] else right
elif left < len(heap):
max_child = left
else:
return
if heap[pos] < heap[max_child]:
heap[pos], heap[max_child] = heap[max_child], heap[pos]
pos = max_child
def insert(heap: List, number: int):
heap.append(number)
sift_up(heap, len(heap) - 1)
def heapify(array: List):
for idx in range(len(array), -1, -1):
sift_down(array, idx)
def pop(heap: List):
root = heap[0]
if heap:
heap[0] = heap[-1]
heap.pop()
sift_down(heap)
return root
def make_answer(ops):
heap = list()
for op in ops:
op = op.split()
if len(op) > 1:
insert(heap, int(op[1]))
else:
yield pop(heap)
if __name__ == '__main__':
pass
<|reserved_special_token_1|>
from typing import List
def sift_up(heap: List, pos: int = None):
if pos is None:
pos = len(heap) - 1
current, parent = pos, (pos - 1) // 2
while current > 0:
if heap[current] > heap[parent]:
heap[current], heap[parent] = heap[parent], heap[current]
else:
break
current, parent = parent, (parent - 1) // 2
def sift_down(heap: List, pos: int = 0):
while pos < len(heap):
left = pos * 2 + 1
right = pos * 2 + 2
if right < len(heap):
max_child = left if heap[left] > heap[right] else right
elif left < len(heap):
max_child = left
else:
return
if heap[pos] < heap[max_child]:
heap[pos], heap[max_child] = heap[max_child], heap[pos]
pos = max_child
def insert(heap: List, number: int):
heap.append(number)
sift_up(heap, len(heap) - 1)
def heapify(array: List):
for idx in range(len(array), -1, -1):
sift_down(array, idx)
def pop(heap: List):
root = heap[0]
if heap:
heap[0] = heap[-1]
heap.pop()
sift_down(heap)
return root
def make_answer(ops):
heap = list()
for op in ops:
op = op.split()
if len(op) > 1:
insert(heap, int(op[1]))
else:
yield(pop(heap))
if __name__ == "__main__":
pass
|
flexible
|
{
"blob_id": "9cc6700ab14bed9d69d90c1540f6d42186033a19",
"index": 5052,
"step-1": "<mask token>\n\n\ndef sift_up(heap: List, pos: int=None):\n if pos is None:\n pos = len(heap) - 1\n current, parent = pos, (pos - 1) // 2\n while current > 0:\n if heap[current] > heap[parent]:\n heap[current], heap[parent] = heap[parent], heap[current]\n else:\n break\n current, parent = parent, (parent - 1) // 2\n\n\n<mask token>\n\n\ndef insert(heap: List, number: int):\n heap.append(number)\n sift_up(heap, len(heap) - 1)\n\n\ndef heapify(array: List):\n for idx in range(len(array), -1, -1):\n sift_down(array, idx)\n\n\ndef pop(heap: List):\n root = heap[0]\n if heap:\n heap[0] = heap[-1]\n heap.pop()\n sift_down(heap)\n return root\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef sift_up(heap: List, pos: int=None):\n if pos is None:\n pos = len(heap) - 1\n current, parent = pos, (pos - 1) // 2\n while current > 0:\n if heap[current] > heap[parent]:\n heap[current], heap[parent] = heap[parent], heap[current]\n else:\n break\n current, parent = parent, (parent - 1) // 2\n\n\ndef sift_down(heap: List, pos: int=0):\n while pos < len(heap):\n left = pos * 2 + 1\n right = pos * 2 + 2\n if right < len(heap):\n max_child = left if heap[left] > heap[right] else right\n elif left < len(heap):\n max_child = left\n else:\n return\n if heap[pos] < heap[max_child]:\n heap[pos], heap[max_child] = heap[max_child], heap[pos]\n pos = max_child\n\n\ndef insert(heap: List, number: int):\n heap.append(number)\n sift_up(heap, len(heap) - 1)\n\n\ndef heapify(array: List):\n for idx in range(len(array), -1, -1):\n sift_down(array, idx)\n\n\ndef pop(heap: List):\n root = heap[0]\n if heap:\n heap[0] = heap[-1]\n heap.pop()\n sift_down(heap)\n return root\n\n\ndef make_answer(ops):\n heap = list()\n for op in ops:\n op = op.split()\n if len(op) > 1:\n insert(heap, int(op[1]))\n else:\n yield pop(heap)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef sift_up(heap: List, pos: int=None):\n if pos is None:\n pos = len(heap) - 1\n current, parent = pos, (pos - 1) // 2\n while current > 0:\n if heap[current] > heap[parent]:\n heap[current], heap[parent] = heap[parent], heap[current]\n else:\n break\n current, parent = parent, (parent - 1) // 2\n\n\ndef sift_down(heap: List, pos: int=0):\n while pos < len(heap):\n left = pos * 2 + 1\n right = pos * 2 + 2\n if right < len(heap):\n max_child = left if heap[left] > heap[right] else right\n elif left < len(heap):\n max_child = left\n else:\n return\n if heap[pos] < heap[max_child]:\n heap[pos], heap[max_child] = heap[max_child], heap[pos]\n pos = max_child\n\n\ndef insert(heap: List, number: int):\n heap.append(number)\n sift_up(heap, len(heap) - 1)\n\n\ndef heapify(array: List):\n for idx in range(len(array), -1, -1):\n sift_down(array, idx)\n\n\ndef pop(heap: List):\n root = heap[0]\n if heap:\n heap[0] = heap[-1]\n heap.pop()\n sift_down(heap)\n return root\n\n\ndef make_answer(ops):\n heap = list()\n for op in ops:\n op = op.split()\n if len(op) > 1:\n insert(heap, int(op[1]))\n else:\n yield pop(heap)\n\n\nif __name__ == '__main__':\n pass\n",
"step-4": "from typing import List\n\n\ndef sift_up(heap: List, pos: int=None):\n if pos is None:\n pos = len(heap) - 1\n current, parent = pos, (pos - 1) // 2\n while current > 0:\n if heap[current] > heap[parent]:\n heap[current], heap[parent] = heap[parent], heap[current]\n else:\n break\n current, parent = parent, (parent - 1) // 2\n\n\ndef sift_down(heap: List, pos: int=0):\n while pos < len(heap):\n left = pos * 2 + 1\n right = pos * 2 + 2\n if right < len(heap):\n max_child = left if heap[left] > heap[right] else right\n elif left < len(heap):\n max_child = left\n else:\n return\n if heap[pos] < heap[max_child]:\n heap[pos], heap[max_child] = heap[max_child], heap[pos]\n pos = max_child\n\n\ndef insert(heap: List, number: int):\n heap.append(number)\n sift_up(heap, len(heap) - 1)\n\n\ndef heapify(array: List):\n for idx in range(len(array), -1, -1):\n sift_down(array, idx)\n\n\ndef pop(heap: List):\n root = heap[0]\n if heap:\n heap[0] = heap[-1]\n heap.pop()\n sift_down(heap)\n return root\n\n\ndef make_answer(ops):\n heap = list()\n for op in ops:\n op = op.split()\n if len(op) > 1:\n insert(heap, int(op[1]))\n else:\n yield pop(heap)\n\n\nif __name__ == '__main__':\n pass\n",
"step-5": "from typing import List\n\n\ndef sift_up(heap: List, pos: int = None):\n if pos is None:\n pos = len(heap) - 1\n current, parent = pos, (pos - 1) // 2\n\n while current > 0:\n if heap[current] > heap[parent]:\n heap[current], heap[parent] = heap[parent], heap[current]\n else:\n break\n current, parent = parent, (parent - 1) // 2\n\n\ndef sift_down(heap: List, pos: int = 0):\n while pos < len(heap):\n left = pos * 2 + 1\n right = pos * 2 + 2\n if right < len(heap):\n max_child = left if heap[left] > heap[right] else right\n elif left < len(heap):\n max_child = left\n else:\n return\n\n if heap[pos] < heap[max_child]:\n heap[pos], heap[max_child] = heap[max_child], heap[pos]\n\n pos = max_child\n\n\ndef insert(heap: List, number: int):\n heap.append(number)\n sift_up(heap, len(heap) - 1)\n\n\ndef heapify(array: List):\n for idx in range(len(array), -1, -1):\n sift_down(array, idx)\n\n\ndef pop(heap: List):\n root = heap[0]\n if heap:\n heap[0] = heap[-1]\n heap.pop()\n sift_down(heap)\n return root\n\n\ndef make_answer(ops):\n heap = list()\n for op in ops:\n op = op.split()\n if len(op) > 1:\n insert(heap, int(op[1]))\n else:\n yield(pop(heap))\n\n\nif __name__ == \"__main__\":\n pass\n",
"step-ids": [
4,
6,
7,
8,
9
]
}
|
[
4,
6,
7,
8,
9
] |
<|reserved_special_token_0|>
class AdmLicenseBc(AdmLicense):
def bc_judge(self):
global com_id, com_name
alb = AdmLicenseBc()
count_bc = 0
count = 0
while count_bc == 0:
result = alb.adm_license_judge()
com_id = result[0]
com_name = result[1]
key = dk().search_key(com_name)
if com_id == None:
pass
else:
count += 1
com_url = f'https://www.qcc.com/firm_{com_id}.html'
hds = gh().header()
time.sleep(random.randint(3, 5))
res = requests.get(com_url, headers=hds).text
if '<script>window.location.href' in res:
print('访问频繁,需验证!{bc_judge}')
input('暂停')
elif '<script>location.href="/user_login"</script>' in res:
print('Cookie失效,需更换!{bc_judge}')
input('程序暂停运行!')
elif '您的账号访问超频,请稍后访问或联系客服人员' in res:
print('账号访问超频,请更换账号!{bc_judge}')
input('程序暂停运行!')
else:
tree = etree.HTML(res)
try:
count_bc = tree.xpath(
'//div[@class="tcaption"]/h3[contains(text(),"[工商局]")]/following-sibling::span[1]/text()'
)[0]
count_bc = int(count_bc)
except:
count_bc = 0
localtime = tm().get_localtime()
print(localtime)
if count_bc == 0:
print(f'计数器:{count}\n公司ID:{com_id}\n行政许可信息[工商局]条数:无')
else:
print(
f'计数器:{count}\n公司ID:{com_id}\n行政许可信息[工商局]条数:{count_bc}'
)
status_column = 'status_credit_adm_license_bc'
count_column = 'count_credit_adm_license_bc'
alb.upd_status(com_id, status_column, count_column,
count_bc)
return com_id, com_name, count_bc
def get_page_count(self):
alb = AdmLicenseBc()
result = alb.bc_judge()
com_id = result[0]
com_name = result[1]
count_record = result[2]
if count_record % 10 == 0:
count_page = count_record // 10
else:
count_page = count_record // 10 + 1
value = [com_id, com_name, count_page, count_record]
return value
def get_page_info(self):
alb = AdmLicenseBc()
value = alb.get_page_count()
com_id = value[0]
com_name = value[1]
count_page = value[2]
count_record = value[3]
key = dk().search_key(com_name)
count = 0
for page in range(1, count_page + 1):
index_url = 'https://www.qcc.com'
page_url = (
f'{index_url}/company_getinfos?unique={com_id}&companyname={key}&p={page}&tab=run&box=licens'
)
hds = gh().header()
hds.update({'Referer': f'{index_url}/firm_{com_id}.html'})
time.sleep(random.randint(1, 2))
res = requests.get(page_url, headers=hds).text
if '<script>window.location.href' in res:
print('访问频繁,需验证!{get_page_info[2]}')
input('暂停')
elif '<script>location.href="/user_login"</script>' in res:
print('Cookie失效,需更换!{get_page_info[2]}')
input('程序暂停运行!')
elif '您的账号访问超频,请稍后访问或联系客服人员' in res:
print('账号访问超频,请更换账号!{get_page_info[2]}')
input('程序暂停运行!')
else:
tree = etree.HTML(res)
content_li = tree.xpath(
'//table[@class="ntable ntable-odd"]/tr[position()>2]')
for nbr, content in enumerate(content_li, 1):
count += 1
try:
license_num = content.xpath('td[1]/text()')[0]
license_doc_num = content.xpath('td[2]/text()')[0]
license_doc_name = content.xpath('td[3]/text()')[0]
valid_period_from = content.xpath('td[4]/text()')[0]
valid_period_to = content.xpath('td[5]/text()')[0]
license_office = content.xpath('td[6]/text()')[0]
license_content = content.xpath('td[7]/text()')[0]
except:
license_num = None
license_doc_num = None
license_doc_name = None
valid_period_from = None
valid_period_to = None
license_office = None
license_content = None
print('\n{0}--总第{1}条----{2}/{3}页--{0}\n'.format('-' * 9,
count, page, count_page))
localtime = tm().get_localtime()
create_time = localtime
print(f'当前时间:{create_time}')
print(
f"""公司ID:{com_id}
序号:{license_num}
许可文件编号:{license_doc_num}
许可文件名称:{license_doc_name}
有效期自:{valid_period_from}
有效期至:{valid_period_to}
许可机关:{license_office}
许可内容:{license_content}"""
)
if license_num == None:
ins = """
INSERT INTO
`com_credit_adm_license_bc`
(`com_id`,`license_num`,`license_doc_num`,`license_doc_name`,`valid_period_from`,
`valid_period_to`,`license_office`,`license_content`,`create_time`)
VALUES
(NULL,NULL,NULL,NULL,NULL,
NULL,NULL,NULL,NULL);
"""
else:
ins = f"""
INSERT INTO
`com_credit_adm_license_bc`
(`com_id`,`license_num`,`license_doc_num`,`license_doc_name`,`valid_period_from`,
`valid_period_to`,`license_office`,`license_content`,`create_time`)
VALUES
("{com_id}","{license_num}","{license_doc_num}","{license_doc_name}","{valid_period_from}",
"{valid_period_to}","{license_office}","{license_content}","{create_time}");
"""
db().inssts(ins)
upd = f"""
UPDATE
`com_info`
SET
`status_credit_adm_license_bc` = 1
WHERE
`com_id` = "{com_id}" ;
"""
db().updsts(upd)
localtime = tm().get_localtime()
print('\n{1}\n{0}数据采集完成!{0}\n{1}'.format('+' * 7, '+' * 25))
print(f'当前时间:{localtime}\n')
time.sleep(3)
class AdmLicenseCc(AdmLicense):
def cc_judge(self):
global com_id, com_name
alb = AdmLicenseCc()
count_cc = 0
count = 0
while count_cc == 0:
result = alb.adm_license_judge()
com_id = result[0]
com_name = result[1]
key = dk().search_key(com_name)
if com_id == None:
pass
else:
count += 1
com_url = f'https://www.qcc.com/firm_{com_id}.html'
hds = gh().header()
time.sleep(random.randint(3, 5))
res = requests.get(com_url, headers=hds).text
if '<script>window.location.href' in res:
print('访问频繁,需验证!{cc_judge}')
input('暂停')
elif '<script>location.href="/user_login"</script>' in res:
print('Cookie失效,需更换!{cc_judge}')
input('程序暂停运行!')
elif '您的账号访问超频,请稍后访问或联系客服人员' in res:
print('账号访问超频,请更换账号!{cc_judge}')
input('程序暂停运行!')
else:
tree = etree.HTML(res)
try:
count_cc = tree.xpath(
'//div[@class="tcaption"]/h3[contains(text(),"[信用中国]")]/following-sibling::span[1]/text()'
)[0]
count_cc = int(count_cc)
except:
count_cc = 0
localtime = tm().get_localtime()
print(localtime)
if count_cc == 0:
print(f'计数器:{count}\n公司ID:{com_id}\n行政许可信息[工商局]条数:无')
else:
print(
f'计数器:{count}\n公司ID:{com_id}\n行政许可信息[工商局]条数:{count_cc}'
)
status_column = 'status_credit_adm_license_cc'
count_column = 'count_credit_adm_license_cc'
cd.upd_status(com_id, status_column, count_column, count_cc
)
return com_id, com_name, count_cc
def get_page_info(self):
global project_name, license_status, license_content, expire_time, approval_category, area
alb = AdmLicenseCc()
value = alb.cc_judge()
com_id = value[0]
com_name = value[1]
count_cc = value[2]
key = dk().search_key(com_name)
count = 0
index_url = 'https://www.qcc.com'
page_url = (
f'{index_url}/company_getinfos?unique={com_id}&companyname={key}&p={page}&tab=run'
)
hds = gh().header()
hds.update({'Referer': f'{index_url}/firm_{com_id}.html'})
time.sleep(random.randint(3, 5))
res = requests.get(page_url, headers=hds).text
if '<script>window.location.href' in res:
print('访问频繁,需验证!{cc_judge}')
input('暂停')
elif '<script>location.href="/user_login"</script>' in res:
print('Cookie失效,需更换!{cc_judge}')
input('程序暂停运行!')
elif '您的账号访问超频,请稍后访问或联系客服人员' in res:
print('账号访问超频,请更换账号!{cc_judge}')
input('程序暂停运行!')
else:
tree = etree.HTML(res)
content_li = tree.xpath(
'//div[@class="tcaption"]/span[contains(text(),"[信用中国]")]/parent::div/following-sibling::table[@class="ntable ntable-odd"]/tr[position()>2]'
)
for nbr, content in enumerate(content_li, 1):
count += 1
try:
license_num = content.xpath('td[1]/text()')[0]
dec_book_num = content.xpath('td[2]/text()')[0]
license_office = content.xpath('td[3]/text()')[0]
dec_date = content.xpath('td[4]/text()')[0]
time.sleep(random.randint(1, 2))
dt_id = content.xpath(
'td[5]/a[@class="xzxukeView"]/@onclick')[0].split(
'xzxukeView("')[1].split('")')[0]
dt_url = 'https://www.qcc.com/company_xzxukeView'
para = {'id': f'{dt_id}'}
res_info = requests.post(dt_url, headers=hds, data=para
).text
status = json.loads(res_info)['status']
if status == 200:
data = json.loads(res_info)['data']
project_name = data['name']
license_status = data['status']
license_content = data['content']
expire_time = data['expire_time']
approval_category = data['type']
area = data['province']
else:
print(f'响应失败!\n状态码:{status}')
input('程序暂停运行!')
except:
license_num = None
dec_book_num = None
license_office = None
dec_date = None
dt_id = None
project_name = None
license_status = None
license_content = None
expire_time = None
approval_category = None
print('\n{0}--总第{1}条----{2}/{3}页--{0}\n'.format('-' * 9,
count, page, count_page))
localtime = tm().get_localtime()
create_time = localtime
print(f'当前时间:{create_time}')
print(
f"""公司ID:{com_id}
序号:{license_num}
决定文书号:{dec_book_num}
许可机关:{license_office}
详情ID:{dt_id}
决定日期:{dec_date}
项目名称:{project_name}
许可状态:{license_status}
许可内容:{license_content}
截止时间:{expire_time}
审批类别:{approval_category}
地域:{area}
创建/入库时间:{create_time}"""
)
input('Pause')
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class AdmLicense:
<|reserved_special_token_0|>
def upd_status(self, com_id, status_column, count_column, count):
if count == -1:
status = -1
elif count == 0:
status = 0
else:
status = 9
upd = f"""
UPDATE
`com_info`
SET
`{status_column}` = "{status}",`{count_column}` = "{count}"
WHERE
`com_id` = "{com_id}" ;
"""
db().updsts(upd)
<|reserved_special_token_0|>
class AdmLicenseBc(AdmLicense):
def bc_judge(self):
global com_id, com_name
alb = AdmLicenseBc()
count_bc = 0
count = 0
while count_bc == 0:
result = alb.adm_license_judge()
com_id = result[0]
com_name = result[1]
key = dk().search_key(com_name)
if com_id == None:
pass
else:
count += 1
com_url = f'https://www.qcc.com/firm_{com_id}.html'
hds = gh().header()
time.sleep(random.randint(3, 5))
res = requests.get(com_url, headers=hds).text
if '<script>window.location.href' in res:
print('访问频繁,需验证!{bc_judge}')
input('暂停')
elif '<script>location.href="/user_login"</script>' in res:
print('Cookie失效,需更换!{bc_judge}')
input('程序暂停运行!')
elif '您的账号访问超频,请稍后访问或联系客服人员' in res:
print('账号访问超频,请更换账号!{bc_judge}')
input('程序暂停运行!')
else:
tree = etree.HTML(res)
try:
count_bc = tree.xpath(
'//div[@class="tcaption"]/h3[contains(text(),"[工商局]")]/following-sibling::span[1]/text()'
)[0]
count_bc = int(count_bc)
except:
count_bc = 0
localtime = tm().get_localtime()
print(localtime)
if count_bc == 0:
print(f'计数器:{count}\n公司ID:{com_id}\n行政许可信息[工商局]条数:无')
else:
print(
f'计数器:{count}\n公司ID:{com_id}\n行政许可信息[工商局]条数:{count_bc}'
)
status_column = 'status_credit_adm_license_bc'
count_column = 'count_credit_adm_license_bc'
alb.upd_status(com_id, status_column, count_column,
count_bc)
return com_id, com_name, count_bc
def get_page_count(self):
alb = AdmLicenseBc()
result = alb.bc_judge()
com_id = result[0]
com_name = result[1]
count_record = result[2]
if count_record % 10 == 0:
count_page = count_record // 10
else:
count_page = count_record // 10 + 1
value = [com_id, com_name, count_page, count_record]
return value
def get_page_info(self):
alb = AdmLicenseBc()
value = alb.get_page_count()
com_id = value[0]
com_name = value[1]
count_page = value[2]
count_record = value[3]
key = dk().search_key(com_name)
count = 0
for page in range(1, count_page + 1):
index_url = 'https://www.qcc.com'
page_url = (
f'{index_url}/company_getinfos?unique={com_id}&companyname={key}&p={page}&tab=run&box=licens'
)
hds = gh().header()
hds.update({'Referer': f'{index_url}/firm_{com_id}.html'})
time.sleep(random.randint(1, 2))
res = requests.get(page_url, headers=hds).text
if '<script>window.location.href' in res:
print('访问频繁,需验证!{get_page_info[2]}')
input('暂停')
elif '<script>location.href="/user_login"</script>' in res:
print('Cookie失效,需更换!{get_page_info[2]}')
input('程序暂停运行!')
elif '您的账号访问超频,请稍后访问或联系客服人员' in res:
print('账号访问超频,请更换账号!{get_page_info[2]}')
input('程序暂停运行!')
else:
tree = etree.HTML(res)
content_li = tree.xpath(
'//table[@class="ntable ntable-odd"]/tr[position()>2]')
for nbr, content in enumerate(content_li, 1):
count += 1
try:
license_num = content.xpath('td[1]/text()')[0]
license_doc_num = content.xpath('td[2]/text()')[0]
license_doc_name = content.xpath('td[3]/text()')[0]
valid_period_from = content.xpath('td[4]/text()')[0]
valid_period_to = content.xpath('td[5]/text()')[0]
license_office = content.xpath('td[6]/text()')[0]
license_content = content.xpath('td[7]/text()')[0]
except:
license_num = None
license_doc_num = None
license_doc_name = None
valid_period_from = None
valid_period_to = None
license_office = None
license_content = None
print('\n{0}--总第{1}条----{2}/{3}页--{0}\n'.format('-' * 9,
count, page, count_page))
localtime = tm().get_localtime()
create_time = localtime
print(f'当前时间:{create_time}')
print(
f"""公司ID:{com_id}
序号:{license_num}
许可文件编号:{license_doc_num}
许可文件名称:{license_doc_name}
有效期自:{valid_period_from}
有效期至:{valid_period_to}
许可机关:{license_office}
许可内容:{license_content}"""
)
if license_num == None:
ins = """
INSERT INTO
`com_credit_adm_license_bc`
(`com_id`,`license_num`,`license_doc_num`,`license_doc_name`,`valid_period_from`,
`valid_period_to`,`license_office`,`license_content`,`create_time`)
VALUES
(NULL,NULL,NULL,NULL,NULL,
NULL,NULL,NULL,NULL);
"""
else:
ins = f"""
INSERT INTO
`com_credit_adm_license_bc`
(`com_id`,`license_num`,`license_doc_num`,`license_doc_name`,`valid_period_from`,
`valid_period_to`,`license_office`,`license_content`,`create_time`)
VALUES
("{com_id}","{license_num}","{license_doc_num}","{license_doc_name}","{valid_period_from}",
"{valid_period_to}","{license_office}","{license_content}","{create_time}");
"""
db().inssts(ins)
upd = f"""
UPDATE
`com_info`
SET
`status_credit_adm_license_bc` = 1
WHERE
`com_id` = "{com_id}" ;
"""
db().updsts(upd)
localtime = tm().get_localtime()
print('\n{1}\n{0}数据采集完成!{0}\n{1}'.format('+' * 7, '+' * 25))
print(f'当前时间:{localtime}\n')
time.sleep(3)
class AdmLicenseCc(AdmLicense):
def cc_judge(self):
global com_id, com_name
alb = AdmLicenseCc()
count_cc = 0
count = 0
while count_cc == 0:
result = alb.adm_license_judge()
com_id = result[0]
com_name = result[1]
key = dk().search_key(com_name)
if com_id == None:
pass
else:
count += 1
com_url = f'https://www.qcc.com/firm_{com_id}.html'
hds = gh().header()
time.sleep(random.randint(3, 5))
res = requests.get(com_url, headers=hds).text
if '<script>window.location.href' in res:
print('访问频繁,需验证!{cc_judge}')
input('暂停')
elif '<script>location.href="/user_login"</script>' in res:
print('Cookie失效,需更换!{cc_judge}')
input('程序暂停运行!')
elif '您的账号访问超频,请稍后访问或联系客服人员' in res:
print('账号访问超频,请更换账号!{cc_judge}')
input('程序暂停运行!')
else:
tree = etree.HTML(res)
try:
count_cc = tree.xpath(
'//div[@class="tcaption"]/h3[contains(text(),"[信用中国]")]/following-sibling::span[1]/text()'
)[0]
count_cc = int(count_cc)
except:
count_cc = 0
localtime = tm().get_localtime()
print(localtime)
if count_cc == 0:
print(f'计数器:{count}\n公司ID:{com_id}\n行政许可信息[工商局]条数:无')
else:
print(
f'计数器:{count}\n公司ID:{com_id}\n行政许可信息[工商局]条数:{count_cc}'
)
status_column = 'status_credit_adm_license_cc'
count_column = 'count_credit_adm_license_cc'
cd.upd_status(com_id, status_column, count_column, count_cc
)
return com_id, com_name, count_cc
def get_page_info(self):
global project_name, license_status, license_content, expire_time, approval_category, area
alb = AdmLicenseCc()
value = alb.cc_judge()
com_id = value[0]
com_name = value[1]
count_cc = value[2]
key = dk().search_key(com_name)
count = 0
index_url = 'https://www.qcc.com'
page_url = (
f'{index_url}/company_getinfos?unique={com_id}&companyname={key}&p={page}&tab=run'
)
hds = gh().header()
hds.update({'Referer': f'{index_url}/firm_{com_id}.html'})
time.sleep(random.randint(3, 5))
res = requests.get(page_url, headers=hds).text
if '<script>window.location.href' in res:
print('访问频繁,需验证!{cc_judge}')
input('暂停')
elif '<script>location.href="/user_login"</script>' in res:
print('Cookie失效,需更换!{cc_judge}')
input('程序暂停运行!')
elif '您的账号访问超频,请稍后访问或联系客服人员' in res:
print('账号访问超频,请更换账号!{cc_judge}')
input('程序暂停运行!')
else:
tree = etree.HTML(res)
content_li = tree.xpath(
'//div[@class="tcaption"]/span[contains(text(),"[信用中国]")]/parent::div/following-sibling::table[@class="ntable ntable-odd"]/tr[position()>2]'
)
for nbr, content in enumerate(content_li, 1):
count += 1
try:
license_num = content.xpath('td[1]/text()')[0]
dec_book_num = content.xpath('td[2]/text()')[0]
license_office = content.xpath('td[3]/text()')[0]
dec_date = content.xpath('td[4]/text()')[0]
time.sleep(random.randint(1, 2))
dt_id = content.xpath(
'td[5]/a[@class="xzxukeView"]/@onclick')[0].split(
'xzxukeView("')[1].split('")')[0]
dt_url = 'https://www.qcc.com/company_xzxukeView'
para = {'id': f'{dt_id}'}
res_info = requests.post(dt_url, headers=hds, data=para
).text
status = json.loads(res_info)['status']
if status == 200:
data = json.loads(res_info)['data']
project_name = data['name']
license_status = data['status']
license_content = data['content']
expire_time = data['expire_time']
approval_category = data['type']
area = data['province']
else:
print(f'响应失败!\n状态码:{status}')
input('程序暂停运行!')
except:
license_num = None
dec_book_num = None
license_office = None
dec_date = None
dt_id = None
project_name = None
license_status = None
license_content = None
expire_time = None
approval_category = None
print('\n{0}--总第{1}条----{2}/{3}页--{0}\n'.format('-' * 9,
count, page, count_page))
localtime = tm().get_localtime()
create_time = localtime
print(f'当前时间:{create_time}')
print(
f"""公司ID:{com_id}
序号:{license_num}
决定文书号:{dec_book_num}
许可机关:{license_office}
详情ID:{dt_id}
决定日期:{dec_date}
项目名称:{project_name}
许可状态:{license_status}
许可内容:{license_content}
截止时间:{expire_time}
审批类别:{approval_category}
地域:{area}
创建/入库时间:{create_time}"""
)
input('Pause')
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class AdmLicense:
def get_com_id(self):
sel = """
SELECT `com_id`,`com_name`
FROM `com_info`
WHERE `origin`
IS NOT NULL AND LENGTH(`com_id`) > 5 AND `status_credit_adm_license` IS NULL
ORDER BY RAND() LIMIT 1;
"""
result = db().selsts(sel)
if result == ():
result = [None, None]
else:
result = result[0]
return result
def upd_status(self, com_id, status_column, count_column, count):
if count == -1:
status = -1
elif count == 0:
status = 0
else:
status = 9
upd = f"""
UPDATE
`com_info`
SET
`{status_column}` = "{status}",`{count_column}` = "{count}"
WHERE
`com_id` = "{com_id}" ;
"""
db().updsts(upd)
<|reserved_special_token_0|>
class AdmLicenseBc(AdmLicense):
def bc_judge(self):
global com_id, com_name
alb = AdmLicenseBc()
count_bc = 0
count = 0
while count_bc == 0:
result = alb.adm_license_judge()
com_id = result[0]
com_name = result[1]
key = dk().search_key(com_name)
if com_id == None:
pass
else:
count += 1
com_url = f'https://www.qcc.com/firm_{com_id}.html'
hds = gh().header()
time.sleep(random.randint(3, 5))
res = requests.get(com_url, headers=hds).text
if '<script>window.location.href' in res:
print('访问频繁,需验证!{bc_judge}')
input('暂停')
elif '<script>location.href="/user_login"</script>' in res:
print('Cookie失效,需更换!{bc_judge}')
input('程序暂停运行!')
elif '您的账号访问超频,请稍后访问或联系客服人员' in res:
print('账号访问超频,请更换账号!{bc_judge}')
input('程序暂停运行!')
else:
tree = etree.HTML(res)
try:
count_bc = tree.xpath(
'//div[@class="tcaption"]/h3[contains(text(),"[工商局]")]/following-sibling::span[1]/text()'
)[0]
count_bc = int(count_bc)
except:
count_bc = 0
localtime = tm().get_localtime()
print(localtime)
if count_bc == 0:
print(f'计数器:{count}\n公司ID:{com_id}\n行政许可信息[工商局]条数:无')
else:
print(
f'计数器:{count}\n公司ID:{com_id}\n行政许可信息[工商局]条数:{count_bc}'
)
status_column = 'status_credit_adm_license_bc'
count_column = 'count_credit_adm_license_bc'
alb.upd_status(com_id, status_column, count_column,
count_bc)
return com_id, com_name, count_bc
def get_page_count(self):
alb = AdmLicenseBc()
result = alb.bc_judge()
com_id = result[0]
com_name = result[1]
count_record = result[2]
if count_record % 10 == 0:
count_page = count_record // 10
else:
count_page = count_record // 10 + 1
value = [com_id, com_name, count_page, count_record]
return value
def get_page_info(self):
alb = AdmLicenseBc()
value = alb.get_page_count()
com_id = value[0]
com_name = value[1]
count_page = value[2]
count_record = value[3]
key = dk().search_key(com_name)
count = 0
for page in range(1, count_page + 1):
index_url = 'https://www.qcc.com'
page_url = (
f'{index_url}/company_getinfos?unique={com_id}&companyname={key}&p={page}&tab=run&box=licens'
)
hds = gh().header()
hds.update({'Referer': f'{index_url}/firm_{com_id}.html'})
time.sleep(random.randint(1, 2))
res = requests.get(page_url, headers=hds).text
if '<script>window.location.href' in res:
print('访问频繁,需验证!{get_page_info[2]}')
input('暂停')
elif '<script>location.href="/user_login"</script>' in res:
print('Cookie失效,需更换!{get_page_info[2]}')
input('程序暂停运行!')
elif '您的账号访问超频,请稍后访问或联系客服人员' in res:
print('账号访问超频,请更换账号!{get_page_info[2]}')
input('程序暂停运行!')
else:
tree = etree.HTML(res)
content_li = tree.xpath(
'//table[@class="ntable ntable-odd"]/tr[position()>2]')
for nbr, content in enumerate(content_li, 1):
count += 1
try:
license_num = content.xpath('td[1]/text()')[0]
license_doc_num = content.xpath('td[2]/text()')[0]
license_doc_name = content.xpath('td[3]/text()')[0]
valid_period_from = content.xpath('td[4]/text()')[0]
valid_period_to = content.xpath('td[5]/text()')[0]
license_office = content.xpath('td[6]/text()')[0]
license_content = content.xpath('td[7]/text()')[0]
except:
license_num = None
license_doc_num = None
license_doc_name = None
valid_period_from = None
valid_period_to = None
license_office = None
license_content = None
print('\n{0}--总第{1}条----{2}/{3}页--{0}\n'.format('-' * 9,
count, page, count_page))
localtime = tm().get_localtime()
create_time = localtime
print(f'当前时间:{create_time}')
print(
f"""公司ID:{com_id}
序号:{license_num}
许可文件编号:{license_doc_num}
许可文件名称:{license_doc_name}
有效期自:{valid_period_from}
有效期至:{valid_period_to}
许可机关:{license_office}
许可内容:{license_content}"""
)
if license_num == None:
ins = """
INSERT INTO
`com_credit_adm_license_bc`
(`com_id`,`license_num`,`license_doc_num`,`license_doc_name`,`valid_period_from`,
`valid_period_to`,`license_office`,`license_content`,`create_time`)
VALUES
(NULL,NULL,NULL,NULL,NULL,
NULL,NULL,NULL,NULL);
"""
else:
ins = f"""
INSERT INTO
`com_credit_adm_license_bc`
(`com_id`,`license_num`,`license_doc_num`,`license_doc_name`,`valid_period_from`,
`valid_period_to`,`license_office`,`license_content`,`create_time`)
VALUES
("{com_id}","{license_num}","{license_doc_num}","{license_doc_name}","{valid_period_from}",
"{valid_period_to}","{license_office}","{license_content}","{create_time}");
"""
db().inssts(ins)
upd = f"""
UPDATE
`com_info`
SET
`status_credit_adm_license_bc` = 1
WHERE
`com_id` = "{com_id}" ;
"""
db().updsts(upd)
localtime = tm().get_localtime()
print('\n{1}\n{0}数据采集完成!{0}\n{1}'.format('+' * 7, '+' * 25))
print(f'当前时间:{localtime}\n')
time.sleep(3)
class AdmLicenseCc(AdmLicense):
def cc_judge(self):
global com_id, com_name
alb = AdmLicenseCc()
count_cc = 0
count = 0
while count_cc == 0:
result = alb.adm_license_judge()
com_id = result[0]
com_name = result[1]
key = dk().search_key(com_name)
if com_id == None:
pass
else:
count += 1
com_url = f'https://www.qcc.com/firm_{com_id}.html'
hds = gh().header()
time.sleep(random.randint(3, 5))
res = requests.get(com_url, headers=hds).text
if '<script>window.location.href' in res:
print('访问频繁,需验证!{cc_judge}')
input('暂停')
elif '<script>location.href="/user_login"</script>' in res:
print('Cookie失效,需更换!{cc_judge}')
input('程序暂停运行!')
elif '您的账号访问超频,请稍后访问或联系客服人员' in res:
print('账号访问超频,请更换账号!{cc_judge}')
input('程序暂停运行!')
else:
tree = etree.HTML(res)
try:
count_cc = tree.xpath(
'//div[@class="tcaption"]/h3[contains(text(),"[信用中国]")]/following-sibling::span[1]/text()'
)[0]
count_cc = int(count_cc)
except:
count_cc = 0
localtime = tm().get_localtime()
print(localtime)
if count_cc == 0:
print(f'计数器:{count}\n公司ID:{com_id}\n行政许可信息[工商局]条数:无')
else:
print(
f'计数器:{count}\n公司ID:{com_id}\n行政许可信息[工商局]条数:{count_cc}'
)
status_column = 'status_credit_adm_license_cc'
count_column = 'count_credit_adm_license_cc'
cd.upd_status(com_id, status_column, count_column, count_cc
)
return com_id, com_name, count_cc
def get_page_info(self):
global project_name, license_status, license_content, expire_time, approval_category, area
alb = AdmLicenseCc()
value = alb.cc_judge()
com_id = value[0]
com_name = value[1]
count_cc = value[2]
key = dk().search_key(com_name)
count = 0
index_url = 'https://www.qcc.com'
page_url = (
f'{index_url}/company_getinfos?unique={com_id}&companyname={key}&p={page}&tab=run'
)
hds = gh().header()
hds.update({'Referer': f'{index_url}/firm_{com_id}.html'})
time.sleep(random.randint(3, 5))
res = requests.get(page_url, headers=hds).text
if '<script>window.location.href' in res:
print('访问频繁,需验证!{cc_judge}')
input('暂停')
elif '<script>location.href="/user_login"</script>' in res:
print('Cookie失效,需更换!{cc_judge}')
input('程序暂停运行!')
elif '您的账号访问超频,请稍后访问或联系客服人员' in res:
print('账号访问超频,请更换账号!{cc_judge}')
input('程序暂停运行!')
else:
tree = etree.HTML(res)
content_li = tree.xpath(
'//div[@class="tcaption"]/span[contains(text(),"[信用中国]")]/parent::div/following-sibling::table[@class="ntable ntable-odd"]/tr[position()>2]'
)
for nbr, content in enumerate(content_li, 1):
count += 1
try:
license_num = content.xpath('td[1]/text()')[0]
dec_book_num = content.xpath('td[2]/text()')[0]
license_office = content.xpath('td[3]/text()')[0]
dec_date = content.xpath('td[4]/text()')[0]
time.sleep(random.randint(1, 2))
dt_id = content.xpath(
'td[5]/a[@class="xzxukeView"]/@onclick')[0].split(
'xzxukeView("')[1].split('")')[0]
dt_url = 'https://www.qcc.com/company_xzxukeView'
para = {'id': f'{dt_id}'}
res_info = requests.post(dt_url, headers=hds, data=para
).text
status = json.loads(res_info)['status']
if status == 200:
data = json.loads(res_info)['data']
project_name = data['name']
license_status = data['status']
license_content = data['content']
expire_time = data['expire_time']
approval_category = data['type']
area = data['province']
else:
print(f'响应失败!\n状态码:{status}')
input('程序暂停运行!')
except:
license_num = None
dec_book_num = None
license_office = None
dec_date = None
dt_id = None
project_name = None
license_status = None
license_content = None
expire_time = None
approval_category = None
print('\n{0}--总第{1}条----{2}/{3}页--{0}\n'.format('-' * 9,
count, page, count_page))
localtime = tm().get_localtime()
create_time = localtime
print(f'当前时间:{create_time}')
print(
f"""公司ID:{com_id}
序号:{license_num}
决定文书号:{dec_book_num}
许可机关:{license_office}
详情ID:{dt_id}
决定日期:{dec_date}
项目名称:{project_name}
许可状态:{license_status}
许可内容:{license_content}
截止时间:{expire_time}
审批类别:{approval_category}
地域:{area}
创建/入库时间:{create_time}"""
)
input('Pause')
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class AdmLicense:
def get_com_id(self):
sel = """
SELECT `com_id`,`com_name`
FROM `com_info`
WHERE `origin`
IS NOT NULL AND LENGTH(`com_id`) > 5 AND `status_credit_adm_license` IS NULL
ORDER BY RAND() LIMIT 1;
"""
result = db().selsts(sel)
if result == ():
result = [None, None]
else:
result = result[0]
return result
def upd_status(self, com_id, status_column, count_column, count):
if count == -1:
status = -1
elif count == 0:
status = 0
else:
status = 9
upd = f"""
UPDATE
`com_info`
SET
`{status_column}` = "{status}",`{count_column}` = "{count}"
WHERE
`com_id` = "{com_id}" ;
"""
db().updsts(upd)
def adm_license_judge(self):
global com_id, com_name
al = AdmLicense()
count_adm_license = 0
count = 0
while count_adm_license == 0 or count_adm_license == -1:
result = al.get_com_id()
com_id = result[0]
com_name = result[1]
if com_id == None:
pass
else:
count += 1
com_url = f'https://www.qcc.com/firm_{com_id}.html'
hds = gh().header()
time.sleep(random.randint(3, 5))
res = requests.get(com_url, headers=hds).text
if '<script>window.location.href' in res:
print('访问频繁,需验证!{adm_license_judge}')
input('暂停')
elif '<script>location.href="/user_login"</script>' in res:
print('Cookie失效,需更换!{adm_license_judge}')
input('程序暂停运行!')
elif '您的账号访问超频,请稍后访问或联系客服人员' in res:
print('账号访问超频,请更换账号!{adm_license_judge}')
input('程序暂停运行!')
else:
tree = etree.HTML(res)
try:
count_adm_license = tree.xpath(
'//div[@class="company-nav-items"]/span[contains(text(),"行政许可")]/span/text()|//div[@class="company-nav-items"]/a[@data-pos="licenslist"]/span/text()'
)[0]
count_adm_license = int(count_adm_license)
except:
count_adm_license = -1
localtime = tm().get_localtime()
print(localtime)
if count_adm_license == 0 or count_adm_license == -1:
print(f'计数器:{count}\n公司ID:{com_id}\n行政许可信息条数:无')
else:
print(
f'计数器:{count}\n公司ID:{com_id}\n行政许可信息条数:{count_adm_license}'
)
status_column = 'status_credit_adm_license'
count_column = 'count_credit_adm_license'
al.upd_status(com_id, status_column, count_column,
count_adm_license)
return com_id, com_name, count_adm_license
class AdmLicenseBc(AdmLicense):
def bc_judge(self):
global com_id, com_name
alb = AdmLicenseBc()
count_bc = 0
count = 0
while count_bc == 0:
result = alb.adm_license_judge()
com_id = result[0]
com_name = result[1]
key = dk().search_key(com_name)
if com_id == None:
pass
else:
count += 1
com_url = f'https://www.qcc.com/firm_{com_id}.html'
hds = gh().header()
time.sleep(random.randint(3, 5))
res = requests.get(com_url, headers=hds).text
if '<script>window.location.href' in res:
print('访问频繁,需验证!{bc_judge}')
input('暂停')
elif '<script>location.href="/user_login"</script>' in res:
print('Cookie失效,需更换!{bc_judge}')
input('程序暂停运行!')
elif '您的账号访问超频,请稍后访问或联系客服人员' in res:
print('账号访问超频,请更换账号!{bc_judge}')
input('程序暂停运行!')
else:
tree = etree.HTML(res)
try:
count_bc = tree.xpath(
'//div[@class="tcaption"]/h3[contains(text(),"[工商局]")]/following-sibling::span[1]/text()'
)[0]
count_bc = int(count_bc)
except:
count_bc = 0
localtime = tm().get_localtime()
print(localtime)
if count_bc == 0:
print(f'计数器:{count}\n公司ID:{com_id}\n行政许可信息[工商局]条数:无')
else:
print(
f'计数器:{count}\n公司ID:{com_id}\n行政许可信息[工商局]条数:{count_bc}'
)
status_column = 'status_credit_adm_license_bc'
count_column = 'count_credit_adm_license_bc'
alb.upd_status(com_id, status_column, count_column,
count_bc)
return com_id, com_name, count_bc
def get_page_count(self):
alb = AdmLicenseBc()
result = alb.bc_judge()
com_id = result[0]
com_name = result[1]
count_record = result[2]
if count_record % 10 == 0:
count_page = count_record // 10
else:
count_page = count_record // 10 + 1
value = [com_id, com_name, count_page, count_record]
return value
def get_page_info(self):
alb = AdmLicenseBc()
value = alb.get_page_count()
com_id = value[0]
com_name = value[1]
count_page = value[2]
count_record = value[3]
key = dk().search_key(com_name)
count = 0
for page in range(1, count_page + 1):
index_url = 'https://www.qcc.com'
page_url = (
f'{index_url}/company_getinfos?unique={com_id}&companyname={key}&p={page}&tab=run&box=licens'
)
hds = gh().header()
hds.update({'Referer': f'{index_url}/firm_{com_id}.html'})
time.sleep(random.randint(1, 2))
res = requests.get(page_url, headers=hds).text
if '<script>window.location.href' in res:
print('访问频繁,需验证!{get_page_info[2]}')
input('暂停')
elif '<script>location.href="/user_login"</script>' in res:
print('Cookie失效,需更换!{get_page_info[2]}')
input('程序暂停运行!')
elif '您的账号访问超频,请稍后访问或联系客服人员' in res:
print('账号访问超频,请更换账号!{get_page_info[2]}')
input('程序暂停运行!')
else:
tree = etree.HTML(res)
content_li = tree.xpath(
'//table[@class="ntable ntable-odd"]/tr[position()>2]')
for nbr, content in enumerate(content_li, 1):
count += 1
try:
license_num = content.xpath('td[1]/text()')[0]
license_doc_num = content.xpath('td[2]/text()')[0]
license_doc_name = content.xpath('td[3]/text()')[0]
valid_period_from = content.xpath('td[4]/text()')[0]
valid_period_to = content.xpath('td[5]/text()')[0]
license_office = content.xpath('td[6]/text()')[0]
license_content = content.xpath('td[7]/text()')[0]
except:
license_num = None
license_doc_num = None
license_doc_name = None
valid_period_from = None
valid_period_to = None
license_office = None
license_content = None
print('\n{0}--总第{1}条----{2}/{3}页--{0}\n'.format('-' * 9,
count, page, count_page))
localtime = tm().get_localtime()
create_time = localtime
print(f'当前时间:{create_time}')
print(
f"""公司ID:{com_id}
序号:{license_num}
许可文件编号:{license_doc_num}
许可文件名称:{license_doc_name}
有效期自:{valid_period_from}
有效期至:{valid_period_to}
许可机关:{license_office}
许可内容:{license_content}"""
)
if license_num == None:
ins = """
INSERT INTO
`com_credit_adm_license_bc`
(`com_id`,`license_num`,`license_doc_num`,`license_doc_name`,`valid_period_from`,
`valid_period_to`,`license_office`,`license_content`,`create_time`)
VALUES
(NULL,NULL,NULL,NULL,NULL,
NULL,NULL,NULL,NULL);
"""
else:
ins = f"""
INSERT INTO
`com_credit_adm_license_bc`
(`com_id`,`license_num`,`license_doc_num`,`license_doc_name`,`valid_period_from`,
`valid_period_to`,`license_office`,`license_content`,`create_time`)
VALUES
("{com_id}","{license_num}","{license_doc_num}","{license_doc_name}","{valid_period_from}",
"{valid_period_to}","{license_office}","{license_content}","{create_time}");
"""
db().inssts(ins)
upd = f"""
UPDATE
`com_info`
SET
`status_credit_adm_license_bc` = 1
WHERE
`com_id` = "{com_id}" ;
"""
db().updsts(upd)
localtime = tm().get_localtime()
print('\n{1}\n{0}数据采集完成!{0}\n{1}'.format('+' * 7, '+' * 25))
print(f'当前时间:{localtime}\n')
time.sleep(3)
class AdmLicenseCc(AdmLicense):
def cc_judge(self):
global com_id, com_name
alb = AdmLicenseCc()
count_cc = 0
count = 0
while count_cc == 0:
result = alb.adm_license_judge()
com_id = result[0]
com_name = result[1]
key = dk().search_key(com_name)
if com_id == None:
pass
else:
count += 1
com_url = f'https://www.qcc.com/firm_{com_id}.html'
hds = gh().header()
time.sleep(random.randint(3, 5))
res = requests.get(com_url, headers=hds).text
if '<script>window.location.href' in res:
print('访问频繁,需验证!{cc_judge}')
input('暂停')
elif '<script>location.href="/user_login"</script>' in res:
print('Cookie失效,需更换!{cc_judge}')
input('程序暂停运行!')
elif '您的账号访问超频,请稍后访问或联系客服人员' in res:
print('账号访问超频,请更换账号!{cc_judge}')
input('程序暂停运行!')
else:
tree = etree.HTML(res)
try:
count_cc = tree.xpath(
'//div[@class="tcaption"]/h3[contains(text(),"[信用中国]")]/following-sibling::span[1]/text()'
)[0]
count_cc = int(count_cc)
except:
count_cc = 0
localtime = tm().get_localtime()
print(localtime)
if count_cc == 0:
print(f'计数器:{count}\n公司ID:{com_id}\n行政许可信息[工商局]条数:无')
else:
print(
f'计数器:{count}\n公司ID:{com_id}\n行政许可信息[工商局]条数:{count_cc}'
)
status_column = 'status_credit_adm_license_cc'
count_column = 'count_credit_adm_license_cc'
cd.upd_status(com_id, status_column, count_column, count_cc
)
return com_id, com_name, count_cc
def get_page_info(self):
global project_name, license_status, license_content, expire_time, approval_category, area
alb = AdmLicenseCc()
value = alb.cc_judge()
com_id = value[0]
com_name = value[1]
count_cc = value[2]
key = dk().search_key(com_name)
count = 0
index_url = 'https://www.qcc.com'
page_url = (
f'{index_url}/company_getinfos?unique={com_id}&companyname={key}&p={page}&tab=run'
)
hds = gh().header()
hds.update({'Referer': f'{index_url}/firm_{com_id}.html'})
time.sleep(random.randint(3, 5))
res = requests.get(page_url, headers=hds).text
if '<script>window.location.href' in res:
print('访问频繁,需验证!{cc_judge}')
input('暂停')
elif '<script>location.href="/user_login"</script>' in res:
print('Cookie失效,需更换!{cc_judge}')
input('程序暂停运行!')
elif '您的账号访问超频,请稍后访问或联系客服人员' in res:
print('账号访问超频,请更换账号!{cc_judge}')
input('程序暂停运行!')
else:
tree = etree.HTML(res)
content_li = tree.xpath(
'//div[@class="tcaption"]/span[contains(text(),"[信用中国]")]/parent::div/following-sibling::table[@class="ntable ntable-odd"]/tr[position()>2]'
)
for nbr, content in enumerate(content_li, 1):
count += 1
try:
license_num = content.xpath('td[1]/text()')[0]
dec_book_num = content.xpath('td[2]/text()')[0]
license_office = content.xpath('td[3]/text()')[0]
dec_date = content.xpath('td[4]/text()')[0]
time.sleep(random.randint(1, 2))
dt_id = content.xpath(
'td[5]/a[@class="xzxukeView"]/@onclick')[0].split(
'xzxukeView("')[1].split('")')[0]
dt_url = 'https://www.qcc.com/company_xzxukeView'
para = {'id': f'{dt_id}'}
res_info = requests.post(dt_url, headers=hds, data=para
).text
status = json.loads(res_info)['status']
if status == 200:
data = json.loads(res_info)['data']
project_name = data['name']
license_status = data['status']
license_content = data['content']
expire_time = data['expire_time']
approval_category = data['type']
area = data['province']
else:
print(f'响应失败!\n状态码:{status}')
input('程序暂停运行!')
except:
license_num = None
dec_book_num = None
license_office = None
dec_date = None
dt_id = None
project_name = None
license_status = None
license_content = None
expire_time = None
approval_category = None
print('\n{0}--总第{1}条----{2}/{3}页--{0}\n'.format('-' * 9,
count, page, count_page))
localtime = tm().get_localtime()
create_time = localtime
print(f'当前时间:{create_time}')
print(
f"""公司ID:{com_id}
序号:{license_num}
决定文书号:{dec_book_num}
许可机关:{license_office}
详情ID:{dt_id}
决定日期:{dec_date}
项目名称:{project_name}
许可状态:{license_status}
许可内容:{license_content}
截止时间:{expire_time}
审批类别:{approval_category}
地域:{area}
创建/入库时间:{create_time}"""
)
input('Pause')
<|reserved_special_token_0|>
<|reserved_special_token_1|>
#! /usr/bin/env python3
# -*- coding:utf-8 -*-
"""
企查查-行政许可[工商局]
"""
import json
import time
import random
import requests
from lxml import etree
from support.use_mysql import QccMysql as db
from support.others import DealKey as dk
from support.others import TimeInfo as tm
from support.headers import GeneralHeaders as gh
class AdmLicense():
def get_com_id(self): # 随机获取一条符合条件的公司信息
sel = """
SELECT `com_id`,`com_name`
FROM `com_info`
WHERE `origin`
IS NOT NULL AND LENGTH(`com_id`) > 5 AND `status_credit_adm_license` IS NULL
ORDER BY RAND() LIMIT 1;
"""
# 测试sql#
# sel = """
# SELECT `com_id`, `com_name`
# FROM `com_info`
# WHERE com_id = '299eee201318f0283f086b4847d69fc7';
# """
# 测试sql#
result = db().selsts(sel)
if result == ():
result = [None, None]
else:
result = result[0]
return result
def upd_status(self, com_id,status_column,count_column, count): # 更新com_info表相关字段状态码
if count == -1:
status = -1
elif count == 0:
status = 0
else:
status = 9
upd = f"""
UPDATE
`com_info`
SET
`{status_column}` = "{status}",`{count_column}` = "{count}"
WHERE
`com_id` = "{com_id}" ;
"""
db().updsts(upd)
def adm_license_judge(self): # 判断行政许可信息,如果有记录则执行解析,返回该公司相关信息
global com_id, com_name
al = AdmLicense()
count_adm_license = 0
count = 0
while count_adm_license == 0 or count_adm_license == -1:
result = al.get_com_id()
com_id = result[0]
com_name = result[1]
if com_id == None:
pass
else:
count += 1
com_url = f'https://www.qcc.com/firm_{com_id}.html'
hds = gh().header()
time.sleep(random.randint(3, 5))
res = requests.get(com_url, headers=hds).text
if '<script>window.location.href' in res:
print('访问频繁,需验证!{adm_license_judge}')
input('暂停')
elif '<script>location.href="/user_login"</script>' in res:
print('Cookie失效,需更换!{adm_license_judge}')
input('程序暂停运行!')
elif '您的账号访问超频,请稍后访问或联系客服人员' in res:
print('账号访问超频,请更换账号!{adm_license_judge}')
input('程序暂停运行!')
else:
tree = etree.HTML(res)
try:
count_adm_license = tree.xpath('//div[@class="company-nav-items"]/span[contains(text(),"行政许可")]/span/text()|//div[@class="company-nav-items"]/a[@data-pos="licenslist"]/span/text()')[0]
count_adm_license = int(count_adm_license)
except:
count_adm_license = -1
localtime = tm().get_localtime() # 当前时间
print(localtime)
if count_adm_license == 0 or count_adm_license == -1:
print(f'计数器:{count}\n公司ID:{com_id}\n行政许可信息条数:无')
else:
print(f'计数器:{count}\n公司ID:{com_id}\n行政许可信息条数:{count_adm_license}')
status_column = 'status_credit_adm_license' #表字段名
count_column = 'count_credit_adm_license' #表字段名
al.upd_status(com_id,status_column,count_column,count_adm_license)
return com_id, com_name, count_adm_license
class AdmLicenseBc(AdmLicense):
def bc_judge(self):
global com_id,com_name
alb = AdmLicenseBc()
count_bc = 0
count = 0
while count_bc == 0:
result = alb.adm_license_judge()
com_id = result[0]
com_name = result[1]
key = dk().search_key(com_name)
if com_id == None:
pass
else:
count += 1
com_url = f'https://www.qcc.com/firm_{com_id}.html'
hds = gh().header()
time.sleep(random.randint(3, 5))
res = requests.get(com_url, headers=hds).text
if '<script>window.location.href' in res:
print('访问频繁,需验证!{bc_judge}')
input('暂停')
elif '<script>location.href="/user_login"</script>' in res:
print('Cookie失效,需更换!{bc_judge}')
input('程序暂停运行!')
elif '您的账号访问超频,请稍后访问或联系客服人员' in res:
print('账号访问超频,请更换账号!{bc_judge}')
input('程序暂停运行!')
else:
tree = etree.HTML(res)
try:
count_bc = tree.xpath('//div[@class="tcaption"]/h3[contains(text(),"[工商局]")]/following-sibling::span[1]/text()')[0]
count_bc = int(count_bc)
except:
count_bc = 0
localtime = tm().get_localtime() # 当前时间
print(localtime)
if count_bc == 0:
print(f'计数器:{count}\n公司ID:{com_id}\n行政许可信息[工商局]条数:无')
else:
print(f'计数器:{count}\n公司ID:{com_id}\n行政许可信息[工商局]条数:{count_bc}')
status_column = 'status_credit_adm_license_bc' # 表字段名
count_column = 'count_credit_adm_license_bc' # 表字段名
alb.upd_status(com_id, status_column, count_column, count_bc)
return com_id, com_name, count_bc
def get_page_count(self): # 获取页码长度
alb = AdmLicenseBc()
result = alb.bc_judge()
com_id = result[0]
com_name = result[1]
count_record = result[2]
if count_record % 10 == 0:
count_page = count_record // 10
else:
count_page = count_record // 10 + 1
value = [com_id, com_name, count_page, count_record]
return value
def get_page_info(self): # 解析页面内容
alb = AdmLicenseBc()
value = alb.get_page_count()
com_id = value[0]
com_name = value[1]
count_page = value[2]
count_record = value[3]
key = dk().search_key(com_name)
count = 0
for page in range(1, count_page + 1):
index_url = 'https://www.qcc.com'
page_url = f'{index_url}/company_getinfos?unique={com_id}&companyname={key}&p={page}&tab=run&box=licens'
hds = gh().header()
hds.update({'Referer': f'{index_url}/firm_{com_id}.html'})
time.sleep(random.randint(1, 2))
res = requests.get(page_url, headers=hds).text
if '<script>window.location.href' in res:
print('访问频繁,需验证!{get_page_info[2]}')
input('暂停')
elif '<script>location.href="/user_login"</script>' in res:
print('Cookie失效,需更换!{get_page_info[2]}')
input('程序暂停运行!')
elif '您的账号访问超频,请稍后访问或联系客服人员' in res:
print('账号访问超频,请更换账号!{get_page_info[2]}')
input('程序暂停运行!')
else:
tree = etree.HTML(res)
content_li = tree.xpath('//table[@class="ntable ntable-odd"]/tr[position()>2]')
for nbr, content in enumerate(content_li, 1):
count += 1
try:
license_num = content.xpath('td[1]/text()')[0]
license_doc_num = content.xpath('td[2]/text()')[0]
license_doc_name = content.xpath('td[3]/text()')[0]
valid_period_from = content.xpath('td[4]/text()')[0]
valid_period_to = content.xpath('td[5]/text()')[0]
license_office = content.xpath('td[6]/text()')[0]
license_content = content.xpath('td[7]/text()')[0]
except:
license_num = None
license_doc_num = None
license_doc_name = None
valid_period_from = None
valid_period_to = None
license_office = None
license_content = None
print('\n{0}--总第{1}条----{2}/{3}页--{0}\n'.format('-' * 9, count, page, count_page))
localtime = tm().get_localtime() # 当前时间
create_time = localtime
print(f'当前时间:{create_time}')
print(f'公司ID:{com_id}\n序号:{license_num}\n许可文件编号:{license_doc_num}\n许可文件名称:{license_doc_name}\n有效期自:{valid_period_from}\n'
f'有效期至:{valid_period_to}\n许可机关:{license_office}\n许可内容:{license_content}')
if license_num == None:
ins = """
INSERT INTO
`com_credit_adm_license_bc`
(`com_id`,`license_num`,`license_doc_num`,`license_doc_name`,`valid_period_from`,
`valid_period_to`,`license_office`,`license_content`,`create_time`)
VALUES
(NULL,NULL,NULL,NULL,NULL,
NULL,NULL,NULL,NULL);
"""
else:
ins = f"""
INSERT INTO
`com_credit_adm_license_bc`
(`com_id`,`license_num`,`license_doc_num`,`license_doc_name`,`valid_period_from`,
`valid_period_to`,`license_office`,`license_content`,`create_time`)
VALUES
("{com_id}","{license_num}","{license_doc_num}","{license_doc_name}","{valid_period_from}",
"{valid_period_to}","{license_office}","{license_content}","{create_time}");
"""
db().inssts(ins)
upd = f"""
UPDATE
`com_info`
SET
`status_credit_adm_license_bc` = 1
WHERE
`com_id` = "{com_id}" ;
"""
db().updsts(upd)
localtime = tm().get_localtime() # 当前时间
print('\n{1}\n{0}数据采集完成!{0}\n{1}'.format('+' * 7, '+' * 25))
print(f'当前时间:{localtime}\n')
time.sleep(3)
class AdmLicenseCc(AdmLicense): #行政许可[信用中国]
def cc_judge(self):
global com_id,com_name
alb = AdmLicenseCc()
count_cc = 0
count = 0
while count_cc == 0:
result = alb.adm_license_judge()
com_id = result[0]
com_name = result[1]
key = dk().search_key(com_name)
if com_id == None:
pass
else:
count += 1
com_url = f'https://www.qcc.com/firm_{com_id}.html'
hds = gh().header()
time.sleep(random.randint(3, 5))
res = requests.get(com_url, headers=hds).text
if '<script>window.location.href' in res:
print('访问频繁,需验证!{cc_judge}')
input('暂停')
elif '<script>location.href="/user_login"</script>' in res:
print('Cookie失效,需更换!{cc_judge}')
input('程序暂停运行!')
elif '您的账号访问超频,请稍后访问或联系客服人员' in res:
print('账号访问超频,请更换账号!{cc_judge}')
input('程序暂停运行!')
else:
tree = etree.HTML(res)
try:
count_cc = tree.xpath('//div[@class="tcaption"]/h3[contains(text(),"[信用中国]")]/following-sibling::span[1]/text()')[0]
count_cc = int(count_cc)
except:
count_cc = 0
localtime = tm().get_localtime() # 当前时间
print(localtime)
if count_cc == 0:
print(f'计数器:{count}\n公司ID:{com_id}\n行政许可信息[工商局]条数:无')
else:
print(f'计数器:{count}\n公司ID:{com_id}\n行政许可信息[工商局]条数:{count_cc}')
status_column = 'status_credit_adm_license_cc' # 表字段名
count_column = 'count_credit_adm_license_cc' # 表字段名
cd.upd_status(com_id, status_column, count_column, count_cc)
return com_id, com_name, count_cc
def get_page_info(self): # 解析页面内容
global project_name,license_status,license_content,expire_time,approval_category,area
alb = AdmLicenseCc()
value = alb.cc_judge()
com_id = value[0]
com_name = value[1]
count_cc = value[2]
key = dk().search_key(com_name)
count = 0
index_url = 'https://www.qcc.com'
page_url = f'{index_url}/company_getinfos?unique={com_id}&companyname={key}&p={page}&tab=run'
hds = gh().header()
hds.update({'Referer': f'{index_url}/firm_{com_id}.html'})
time.sleep(random.randint(3, 5))
res = requests.get(page_url, headers=hds).text
if '<script>window.location.href' in res:
print('访问频繁,需验证!{cc_judge}')
input('暂停')
elif '<script>location.href="/user_login"</script>' in res:
print('Cookie失效,需更换!{cc_judge}')
input('程序暂停运行!')
elif '您的账号访问超频,请稍后访问或联系客服人员' in res:
print('账号访问超频,请更换账号!{cc_judge}')
input('程序暂停运行!')
else:
tree = etree.HTML(res)
content_li = tree.xpath('//div[@class="tcaption"]/span[contains(text(),"[信用中国]")]/parent::div/following-sibling::table[@class="ntable ntable-odd"]/tr[position()>2]')
for nbr, content in enumerate(content_li, 1):
count += 1
try:
license_num = content.xpath('td[1]/text()')[0]
dec_book_num = content.xpath('td[2]/text()')[0]
license_office = content.xpath('td[3]/text()')[0]
dec_date = content.xpath('td[4]/text()')[0]
time.sleep(random.randint(1, 2))
dt_id = content.xpath('td[5]/a[@class="xzxukeView"]/@onclick')[0].split('xzxukeView("')[1].split('")')[0]
dt_url = 'https://www.qcc.com/company_xzxukeView'
para = {'id':f'{dt_id}'}
res_info = requests.post(dt_url, headers=hds,data=para).text
status = json.loads(res_info)['status']
if status == 200:
data = json.loads(res_info)['data']
project_name = data['name']
license_status = data['status']
license_content = data['content']
expire_time = data['expire_time']
approval_category = data['type']
area = data['province']
else:
print(f'响应失败!\n状态码:{status}')
input('程序暂停运行!')
except:
license_num = None
dec_book_num = None
license_office = None
dec_date = None
dt_id = None
project_name = None
license_status = None
license_content = None
expire_time = None
approval_category = None
print('\n{0}--总第{1}条----{2}/{3}页--{0}\n'.format('-' * 9, count, page, count_page))
localtime = tm().get_localtime() # 当前时间
create_time = localtime
print(f'当前时间:{create_time}')
print(f'公司ID:{com_id}\n序号:{license_num}\n决定文书号:{dec_book_num}\n许可机关:{license_office}\n详情ID:{dt_id}\n'
f'决定日期:{dec_date}\n项目名称:{project_name}\n许可状态:{license_status}\n许可内容:{license_content}\n截止时间:{expire_time}\n'
f'审批类别:{approval_category}\n地域:{area}\n创建/入库时间:{create_time}')
input('Pause')
if __name__ == '__main__':
cc = AdmLicenseCc()
cc.get_page_info()
|
flexible
|
{
"blob_id": "63822d60ef9dcc1e123a3d20874e9f492b439c6d",
"index": 3313,
"step-1": "<mask token>\n\n\nclass AdmLicenseBc(AdmLicense):\n\n def bc_judge(self):\n global com_id, com_name\n alb = AdmLicenseBc()\n count_bc = 0\n count = 0\n while count_bc == 0:\n result = alb.adm_license_judge()\n com_id = result[0]\n com_name = result[1]\n key = dk().search_key(com_name)\n if com_id == None:\n pass\n else:\n count += 1\n com_url = f'https://www.qcc.com/firm_{com_id}.html'\n hds = gh().header()\n time.sleep(random.randint(3, 5))\n res = requests.get(com_url, headers=hds).text\n if '<script>window.location.href' in res:\n print('访问频繁,需验证!{bc_judge}')\n input('暂停')\n elif '<script>location.href=\"/user_login\"</script>' in res:\n print('Cookie失效,需更换!{bc_judge}')\n input('程序暂停运行!')\n elif '您的账号访问超频,请稍后访问或联系客服人员' in res:\n print('账号访问超频,请更换账号!{bc_judge}')\n input('程序暂停运行!')\n else:\n tree = etree.HTML(res)\n try:\n count_bc = tree.xpath(\n '//div[@class=\"tcaption\"]/h3[contains(text(),\"[工商局]\")]/following-sibling::span[1]/text()'\n )[0]\n count_bc = int(count_bc)\n except:\n count_bc = 0\n localtime = tm().get_localtime()\n print(localtime)\n if count_bc == 0:\n print(f'计数器:{count}\\n公司ID:{com_id}\\n行政许可信息[工商局]条数:无')\n else:\n print(\n f'计数器:{count}\\n公司ID:{com_id}\\n行政许可信息[工商局]条数:{count_bc}'\n )\n status_column = 'status_credit_adm_license_bc'\n count_column = 'count_credit_adm_license_bc'\n alb.upd_status(com_id, status_column, count_column,\n count_bc)\n return com_id, com_name, count_bc\n\n def get_page_count(self):\n alb = AdmLicenseBc()\n result = alb.bc_judge()\n com_id = result[0]\n com_name = result[1]\n count_record = result[2]\n if count_record % 10 == 0:\n count_page = count_record // 10\n else:\n count_page = count_record // 10 + 1\n value = [com_id, com_name, count_page, count_record]\n return value\n\n def get_page_info(self):\n alb = AdmLicenseBc()\n value = alb.get_page_count()\n com_id = value[0]\n com_name = value[1]\n count_page = value[2]\n count_record = value[3]\n key = dk().search_key(com_name)\n count = 0\n for page in range(1, count_page + 1):\n index_url = 'https://www.qcc.com'\n page_url = (\n f'{index_url}/company_getinfos?unique={com_id}&companyname={key}&p={page}&tab=run&box=licens'\n )\n hds = gh().header()\n hds.update({'Referer': f'{index_url}/firm_{com_id}.html'})\n time.sleep(random.randint(1, 2))\n res = requests.get(page_url, headers=hds).text\n if '<script>window.location.href' in res:\n print('访问频繁,需验证!{get_page_info[2]}')\n input('暂停')\n elif '<script>location.href=\"/user_login\"</script>' in res:\n print('Cookie失效,需更换!{get_page_info[2]}')\n input('程序暂停运行!')\n elif '您的账号访问超频,请稍后访问或联系客服人员' in res:\n print('账号访问超频,请更换账号!{get_page_info[2]}')\n input('程序暂停运行!')\n else:\n tree = etree.HTML(res)\n content_li = tree.xpath(\n '//table[@class=\"ntable ntable-odd\"]/tr[position()>2]')\n for nbr, content in enumerate(content_li, 1):\n count += 1\n try:\n license_num = content.xpath('td[1]/text()')[0]\n license_doc_num = content.xpath('td[2]/text()')[0]\n license_doc_name = content.xpath('td[3]/text()')[0]\n valid_period_from = content.xpath('td[4]/text()')[0]\n valid_period_to = content.xpath('td[5]/text()')[0]\n license_office = content.xpath('td[6]/text()')[0]\n license_content = content.xpath('td[7]/text()')[0]\n except:\n license_num = None\n license_doc_num = None\n license_doc_name = None\n valid_period_from = None\n valid_period_to = None\n license_office = None\n license_content = None\n print('\\n{0}--总第{1}条----{2}/{3}页--{0}\\n'.format('-' * 9,\n count, page, count_page))\n localtime = tm().get_localtime()\n create_time = localtime\n print(f'当前时间:{create_time}')\n print(\n f\"\"\"公司ID:{com_id}\n序号:{license_num}\n许可文件编号:{license_doc_num}\n许可文件名称:{license_doc_name}\n有效期自:{valid_period_from}\n有效期至:{valid_period_to}\n许可机关:{license_office}\n许可内容:{license_content}\"\"\"\n )\n if license_num == None:\n ins = \"\"\"\n INSERT INTO\n `com_credit_adm_license_bc`\n (`com_id`,`license_num`,`license_doc_num`,`license_doc_name`,`valid_period_from`,\n `valid_period_to`,`license_office`,`license_content`,`create_time`)\n VALUES\n (NULL,NULL,NULL,NULL,NULL,\n NULL,NULL,NULL,NULL);\n \"\"\"\n else:\n ins = f\"\"\"\n INSERT INTO\n `com_credit_adm_license_bc`\n (`com_id`,`license_num`,`license_doc_num`,`license_doc_name`,`valid_period_from`,\n `valid_period_to`,`license_office`,`license_content`,`create_time`)\n VALUES\n (\"{com_id}\",\"{license_num}\",\"{license_doc_num}\",\"{license_doc_name}\",\"{valid_period_from}\",\n \"{valid_period_to}\",\"{license_office}\",\"{license_content}\",\"{create_time}\");\n \"\"\"\n db().inssts(ins)\n upd = f\"\"\"\n UPDATE \n `com_info` \n SET\n `status_credit_adm_license_bc` = 1\n WHERE \n `com_id` = \"{com_id}\" ;\n \"\"\"\n db().updsts(upd)\n localtime = tm().get_localtime()\n print('\\n{1}\\n{0}数据采集完成!{0}\\n{1}'.format('+' * 7, '+' * 25))\n print(f'当前时间:{localtime}\\n')\n time.sleep(3)\n\n\nclass AdmLicenseCc(AdmLicense):\n\n def cc_judge(self):\n global com_id, com_name\n alb = AdmLicenseCc()\n count_cc = 0\n count = 0\n while count_cc == 0:\n result = alb.adm_license_judge()\n com_id = result[0]\n com_name = result[1]\n key = dk().search_key(com_name)\n if com_id == None:\n pass\n else:\n count += 1\n com_url = f'https://www.qcc.com/firm_{com_id}.html'\n hds = gh().header()\n time.sleep(random.randint(3, 5))\n res = requests.get(com_url, headers=hds).text\n if '<script>window.location.href' in res:\n print('访问频繁,需验证!{cc_judge}')\n input('暂停')\n elif '<script>location.href=\"/user_login\"</script>' in res:\n print('Cookie失效,需更换!{cc_judge}')\n input('程序暂停运行!')\n elif '您的账号访问超频,请稍后访问或联系客服人员' in res:\n print('账号访问超频,请更换账号!{cc_judge}')\n input('程序暂停运行!')\n else:\n tree = etree.HTML(res)\n try:\n count_cc = tree.xpath(\n '//div[@class=\"tcaption\"]/h3[contains(text(),\"[信用中国]\")]/following-sibling::span[1]/text()'\n )[0]\n count_cc = int(count_cc)\n except:\n count_cc = 0\n localtime = tm().get_localtime()\n print(localtime)\n if count_cc == 0:\n print(f'计数器:{count}\\n公司ID:{com_id}\\n行政许可信息[工商局]条数:无')\n else:\n print(\n f'计数器:{count}\\n公司ID:{com_id}\\n行政许可信息[工商局]条数:{count_cc}'\n )\n status_column = 'status_credit_adm_license_cc'\n count_column = 'count_credit_adm_license_cc'\n cd.upd_status(com_id, status_column, count_column, count_cc\n )\n return com_id, com_name, count_cc\n\n def get_page_info(self):\n global project_name, license_status, license_content, expire_time, approval_category, area\n alb = AdmLicenseCc()\n value = alb.cc_judge()\n com_id = value[0]\n com_name = value[1]\n count_cc = value[2]\n key = dk().search_key(com_name)\n count = 0\n index_url = 'https://www.qcc.com'\n page_url = (\n f'{index_url}/company_getinfos?unique={com_id}&companyname={key}&p={page}&tab=run'\n )\n hds = gh().header()\n hds.update({'Referer': f'{index_url}/firm_{com_id}.html'})\n time.sleep(random.randint(3, 5))\n res = requests.get(page_url, headers=hds).text\n if '<script>window.location.href' in res:\n print('访问频繁,需验证!{cc_judge}')\n input('暂停')\n elif '<script>location.href=\"/user_login\"</script>' in res:\n print('Cookie失效,需更换!{cc_judge}')\n input('程序暂停运行!')\n elif '您的账号访问超频,请稍后访问或联系客服人员' in res:\n print('账号访问超频,请更换账号!{cc_judge}')\n input('程序暂停运行!')\n else:\n tree = etree.HTML(res)\n content_li = tree.xpath(\n '//div[@class=\"tcaption\"]/span[contains(text(),\"[信用中国]\")]/parent::div/following-sibling::table[@class=\"ntable ntable-odd\"]/tr[position()>2]'\n )\n for nbr, content in enumerate(content_li, 1):\n count += 1\n try:\n license_num = content.xpath('td[1]/text()')[0]\n dec_book_num = content.xpath('td[2]/text()')[0]\n license_office = content.xpath('td[3]/text()')[0]\n dec_date = content.xpath('td[4]/text()')[0]\n time.sleep(random.randint(1, 2))\n dt_id = content.xpath(\n 'td[5]/a[@class=\"xzxukeView\"]/@onclick')[0].split(\n 'xzxukeView(\"')[1].split('\")')[0]\n dt_url = 'https://www.qcc.com/company_xzxukeView'\n para = {'id': f'{dt_id}'}\n res_info = requests.post(dt_url, headers=hds, data=para\n ).text\n status = json.loads(res_info)['status']\n if status == 200:\n data = json.loads(res_info)['data']\n project_name = data['name']\n license_status = data['status']\n license_content = data['content']\n expire_time = data['expire_time']\n approval_category = data['type']\n area = data['province']\n else:\n print(f'响应失败!\\n状态码:{status}')\n input('程序暂停运行!')\n except:\n license_num = None\n dec_book_num = None\n license_office = None\n dec_date = None\n dt_id = None\n project_name = None\n license_status = None\n license_content = None\n expire_time = None\n approval_category = None\n print('\\n{0}--总第{1}条----{2}/{3}页--{0}\\n'.format('-' * 9,\n count, page, count_page))\n localtime = tm().get_localtime()\n create_time = localtime\n print(f'当前时间:{create_time}')\n print(\n f\"\"\"公司ID:{com_id}\n序号:{license_num}\n决定文书号:{dec_book_num}\n许可机关:{license_office}\n详情ID:{dt_id}\n决定日期:{dec_date}\n项目名称:{project_name}\n许可状态:{license_status}\n许可内容:{license_content}\n截止时间:{expire_time}\n审批类别:{approval_category}\n地域:{area}\n创建/入库时间:{create_time}\"\"\"\n )\n input('Pause')\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass AdmLicense:\n <mask token>\n\n def upd_status(self, com_id, status_column, count_column, count):\n if count == -1:\n status = -1\n elif count == 0:\n status = 0\n else:\n status = 9\n upd = f\"\"\"\n UPDATE \n `com_info` \n SET\n `{status_column}` = \"{status}\",`{count_column}` = \"{count}\"\n WHERE \n `com_id` = \"{com_id}\" ;\n \"\"\"\n db().updsts(upd)\n <mask token>\n\n\nclass AdmLicenseBc(AdmLicense):\n\n def bc_judge(self):\n global com_id, com_name\n alb = AdmLicenseBc()\n count_bc = 0\n count = 0\n while count_bc == 0:\n result = alb.adm_license_judge()\n com_id = result[0]\n com_name = result[1]\n key = dk().search_key(com_name)\n if com_id == None:\n pass\n else:\n count += 1\n com_url = f'https://www.qcc.com/firm_{com_id}.html'\n hds = gh().header()\n time.sleep(random.randint(3, 5))\n res = requests.get(com_url, headers=hds).text\n if '<script>window.location.href' in res:\n print('访问频繁,需验证!{bc_judge}')\n input('暂停')\n elif '<script>location.href=\"/user_login\"</script>' in res:\n print('Cookie失效,需更换!{bc_judge}')\n input('程序暂停运行!')\n elif '您的账号访问超频,请稍后访问或联系客服人员' in res:\n print('账号访问超频,请更换账号!{bc_judge}')\n input('程序暂停运行!')\n else:\n tree = etree.HTML(res)\n try:\n count_bc = tree.xpath(\n '//div[@class=\"tcaption\"]/h3[contains(text(),\"[工商局]\")]/following-sibling::span[1]/text()'\n )[0]\n count_bc = int(count_bc)\n except:\n count_bc = 0\n localtime = tm().get_localtime()\n print(localtime)\n if count_bc == 0:\n print(f'计数器:{count}\\n公司ID:{com_id}\\n行政许可信息[工商局]条数:无')\n else:\n print(\n f'计数器:{count}\\n公司ID:{com_id}\\n行政许可信息[工商局]条数:{count_bc}'\n )\n status_column = 'status_credit_adm_license_bc'\n count_column = 'count_credit_adm_license_bc'\n alb.upd_status(com_id, status_column, count_column,\n count_bc)\n return com_id, com_name, count_bc\n\n def get_page_count(self):\n alb = AdmLicenseBc()\n result = alb.bc_judge()\n com_id = result[0]\n com_name = result[1]\n count_record = result[2]\n if count_record % 10 == 0:\n count_page = count_record // 10\n else:\n count_page = count_record // 10 + 1\n value = [com_id, com_name, count_page, count_record]\n return value\n\n def get_page_info(self):\n alb = AdmLicenseBc()\n value = alb.get_page_count()\n com_id = value[0]\n com_name = value[1]\n count_page = value[2]\n count_record = value[3]\n key = dk().search_key(com_name)\n count = 0\n for page in range(1, count_page + 1):\n index_url = 'https://www.qcc.com'\n page_url = (\n f'{index_url}/company_getinfos?unique={com_id}&companyname={key}&p={page}&tab=run&box=licens'\n )\n hds = gh().header()\n hds.update({'Referer': f'{index_url}/firm_{com_id}.html'})\n time.sleep(random.randint(1, 2))\n res = requests.get(page_url, headers=hds).text\n if '<script>window.location.href' in res:\n print('访问频繁,需验证!{get_page_info[2]}')\n input('暂停')\n elif '<script>location.href=\"/user_login\"</script>' in res:\n print('Cookie失效,需更换!{get_page_info[2]}')\n input('程序暂停运行!')\n elif '您的账号访问超频,请稍后访问或联系客服人员' in res:\n print('账号访问超频,请更换账号!{get_page_info[2]}')\n input('程序暂停运行!')\n else:\n tree = etree.HTML(res)\n content_li = tree.xpath(\n '//table[@class=\"ntable ntable-odd\"]/tr[position()>2]')\n for nbr, content in enumerate(content_li, 1):\n count += 1\n try:\n license_num = content.xpath('td[1]/text()')[0]\n license_doc_num = content.xpath('td[2]/text()')[0]\n license_doc_name = content.xpath('td[3]/text()')[0]\n valid_period_from = content.xpath('td[4]/text()')[0]\n valid_period_to = content.xpath('td[5]/text()')[0]\n license_office = content.xpath('td[6]/text()')[0]\n license_content = content.xpath('td[7]/text()')[0]\n except:\n license_num = None\n license_doc_num = None\n license_doc_name = None\n valid_period_from = None\n valid_period_to = None\n license_office = None\n license_content = None\n print('\\n{0}--总第{1}条----{2}/{3}页--{0}\\n'.format('-' * 9,\n count, page, count_page))\n localtime = tm().get_localtime()\n create_time = localtime\n print(f'当前时间:{create_time}')\n print(\n f\"\"\"公司ID:{com_id}\n序号:{license_num}\n许可文件编号:{license_doc_num}\n许可文件名称:{license_doc_name}\n有效期自:{valid_period_from}\n有效期至:{valid_period_to}\n许可机关:{license_office}\n许可内容:{license_content}\"\"\"\n )\n if license_num == None:\n ins = \"\"\"\n INSERT INTO\n `com_credit_adm_license_bc`\n (`com_id`,`license_num`,`license_doc_num`,`license_doc_name`,`valid_period_from`,\n `valid_period_to`,`license_office`,`license_content`,`create_time`)\n VALUES\n (NULL,NULL,NULL,NULL,NULL,\n NULL,NULL,NULL,NULL);\n \"\"\"\n else:\n ins = f\"\"\"\n INSERT INTO\n `com_credit_adm_license_bc`\n (`com_id`,`license_num`,`license_doc_num`,`license_doc_name`,`valid_period_from`,\n `valid_period_to`,`license_office`,`license_content`,`create_time`)\n VALUES\n (\"{com_id}\",\"{license_num}\",\"{license_doc_num}\",\"{license_doc_name}\",\"{valid_period_from}\",\n \"{valid_period_to}\",\"{license_office}\",\"{license_content}\",\"{create_time}\");\n \"\"\"\n db().inssts(ins)\n upd = f\"\"\"\n UPDATE \n `com_info` \n SET\n `status_credit_adm_license_bc` = 1\n WHERE \n `com_id` = \"{com_id}\" ;\n \"\"\"\n db().updsts(upd)\n localtime = tm().get_localtime()\n print('\\n{1}\\n{0}数据采集完成!{0}\\n{1}'.format('+' * 7, '+' * 25))\n print(f'当前时间:{localtime}\\n')\n time.sleep(3)\n\n\nclass AdmLicenseCc(AdmLicense):\n\n def cc_judge(self):\n global com_id, com_name\n alb = AdmLicenseCc()\n count_cc = 0\n count = 0\n while count_cc == 0:\n result = alb.adm_license_judge()\n com_id = result[0]\n com_name = result[1]\n key = dk().search_key(com_name)\n if com_id == None:\n pass\n else:\n count += 1\n com_url = f'https://www.qcc.com/firm_{com_id}.html'\n hds = gh().header()\n time.sleep(random.randint(3, 5))\n res = requests.get(com_url, headers=hds).text\n if '<script>window.location.href' in res:\n print('访问频繁,需验证!{cc_judge}')\n input('暂停')\n elif '<script>location.href=\"/user_login\"</script>' in res:\n print('Cookie失效,需更换!{cc_judge}')\n input('程序暂停运行!')\n elif '您的账号访问超频,请稍后访问或联系客服人员' in res:\n print('账号访问超频,请更换账号!{cc_judge}')\n input('程序暂停运行!')\n else:\n tree = etree.HTML(res)\n try:\n count_cc = tree.xpath(\n '//div[@class=\"tcaption\"]/h3[contains(text(),\"[信用中国]\")]/following-sibling::span[1]/text()'\n )[0]\n count_cc = int(count_cc)\n except:\n count_cc = 0\n localtime = tm().get_localtime()\n print(localtime)\n if count_cc == 0:\n print(f'计数器:{count}\\n公司ID:{com_id}\\n行政许可信息[工商局]条数:无')\n else:\n print(\n f'计数器:{count}\\n公司ID:{com_id}\\n行政许可信息[工商局]条数:{count_cc}'\n )\n status_column = 'status_credit_adm_license_cc'\n count_column = 'count_credit_adm_license_cc'\n cd.upd_status(com_id, status_column, count_column, count_cc\n )\n return com_id, com_name, count_cc\n\n def get_page_info(self):\n global project_name, license_status, license_content, expire_time, approval_category, area\n alb = AdmLicenseCc()\n value = alb.cc_judge()\n com_id = value[0]\n com_name = value[1]\n count_cc = value[2]\n key = dk().search_key(com_name)\n count = 0\n index_url = 'https://www.qcc.com'\n page_url = (\n f'{index_url}/company_getinfos?unique={com_id}&companyname={key}&p={page}&tab=run'\n )\n hds = gh().header()\n hds.update({'Referer': f'{index_url}/firm_{com_id}.html'})\n time.sleep(random.randint(3, 5))\n res = requests.get(page_url, headers=hds).text\n if '<script>window.location.href' in res:\n print('访问频繁,需验证!{cc_judge}')\n input('暂停')\n elif '<script>location.href=\"/user_login\"</script>' in res:\n print('Cookie失效,需更换!{cc_judge}')\n input('程序暂停运行!')\n elif '您的账号访问超频,请稍后访问或联系客服人员' in res:\n print('账号访问超频,请更换账号!{cc_judge}')\n input('程序暂停运行!')\n else:\n tree = etree.HTML(res)\n content_li = tree.xpath(\n '//div[@class=\"tcaption\"]/span[contains(text(),\"[信用中国]\")]/parent::div/following-sibling::table[@class=\"ntable ntable-odd\"]/tr[position()>2]'\n )\n for nbr, content in enumerate(content_li, 1):\n count += 1\n try:\n license_num = content.xpath('td[1]/text()')[0]\n dec_book_num = content.xpath('td[2]/text()')[0]\n license_office = content.xpath('td[3]/text()')[0]\n dec_date = content.xpath('td[4]/text()')[0]\n time.sleep(random.randint(1, 2))\n dt_id = content.xpath(\n 'td[5]/a[@class=\"xzxukeView\"]/@onclick')[0].split(\n 'xzxukeView(\"')[1].split('\")')[0]\n dt_url = 'https://www.qcc.com/company_xzxukeView'\n para = {'id': f'{dt_id}'}\n res_info = requests.post(dt_url, headers=hds, data=para\n ).text\n status = json.loads(res_info)['status']\n if status == 200:\n data = json.loads(res_info)['data']\n project_name = data['name']\n license_status = data['status']\n license_content = data['content']\n expire_time = data['expire_time']\n approval_category = data['type']\n area = data['province']\n else:\n print(f'响应失败!\\n状态码:{status}')\n input('程序暂停运行!')\n except:\n license_num = None\n dec_book_num = None\n license_office = None\n dec_date = None\n dt_id = None\n project_name = None\n license_status = None\n license_content = None\n expire_time = None\n approval_category = None\n print('\\n{0}--总第{1}条----{2}/{3}页--{0}\\n'.format('-' * 9,\n count, page, count_page))\n localtime = tm().get_localtime()\n create_time = localtime\n print(f'当前时间:{create_time}')\n print(\n f\"\"\"公司ID:{com_id}\n序号:{license_num}\n决定文书号:{dec_book_num}\n许可机关:{license_office}\n详情ID:{dt_id}\n决定日期:{dec_date}\n项目名称:{project_name}\n许可状态:{license_status}\n许可内容:{license_content}\n截止时间:{expire_time}\n审批类别:{approval_category}\n地域:{area}\n创建/入库时间:{create_time}\"\"\"\n )\n input('Pause')\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass AdmLicense:\n\n def get_com_id(self):\n sel = \"\"\"\n SELECT `com_id`,`com_name`\n FROM `com_info`\n WHERE `origin`\n IS NOT NULL AND LENGTH(`com_id`) > 5 AND `status_credit_adm_license` IS NULL\n ORDER BY RAND() LIMIT 1;\n \"\"\"\n result = db().selsts(sel)\n if result == ():\n result = [None, None]\n else:\n result = result[0]\n return result\n\n def upd_status(self, com_id, status_column, count_column, count):\n if count == -1:\n status = -1\n elif count == 0:\n status = 0\n else:\n status = 9\n upd = f\"\"\"\n UPDATE \n `com_info` \n SET\n `{status_column}` = \"{status}\",`{count_column}` = \"{count}\"\n WHERE \n `com_id` = \"{com_id}\" ;\n \"\"\"\n db().updsts(upd)\n <mask token>\n\n\nclass AdmLicenseBc(AdmLicense):\n\n def bc_judge(self):\n global com_id, com_name\n alb = AdmLicenseBc()\n count_bc = 0\n count = 0\n while count_bc == 0:\n result = alb.adm_license_judge()\n com_id = result[0]\n com_name = result[1]\n key = dk().search_key(com_name)\n if com_id == None:\n pass\n else:\n count += 1\n com_url = f'https://www.qcc.com/firm_{com_id}.html'\n hds = gh().header()\n time.sleep(random.randint(3, 5))\n res = requests.get(com_url, headers=hds).text\n if '<script>window.location.href' in res:\n print('访问频繁,需验证!{bc_judge}')\n input('暂停')\n elif '<script>location.href=\"/user_login\"</script>' in res:\n print('Cookie失效,需更换!{bc_judge}')\n input('程序暂停运行!')\n elif '您的账号访问超频,请稍后访问或联系客服人员' in res:\n print('账号访问超频,请更换账号!{bc_judge}')\n input('程序暂停运行!')\n else:\n tree = etree.HTML(res)\n try:\n count_bc = tree.xpath(\n '//div[@class=\"tcaption\"]/h3[contains(text(),\"[工商局]\")]/following-sibling::span[1]/text()'\n )[0]\n count_bc = int(count_bc)\n except:\n count_bc = 0\n localtime = tm().get_localtime()\n print(localtime)\n if count_bc == 0:\n print(f'计数器:{count}\\n公司ID:{com_id}\\n行政许可信息[工商局]条数:无')\n else:\n print(\n f'计数器:{count}\\n公司ID:{com_id}\\n行政许可信息[工商局]条数:{count_bc}'\n )\n status_column = 'status_credit_adm_license_bc'\n count_column = 'count_credit_adm_license_bc'\n alb.upd_status(com_id, status_column, count_column,\n count_bc)\n return com_id, com_name, count_bc\n\n def get_page_count(self):\n alb = AdmLicenseBc()\n result = alb.bc_judge()\n com_id = result[0]\n com_name = result[1]\n count_record = result[2]\n if count_record % 10 == 0:\n count_page = count_record // 10\n else:\n count_page = count_record // 10 + 1\n value = [com_id, com_name, count_page, count_record]\n return value\n\n def get_page_info(self):\n alb = AdmLicenseBc()\n value = alb.get_page_count()\n com_id = value[0]\n com_name = value[1]\n count_page = value[2]\n count_record = value[3]\n key = dk().search_key(com_name)\n count = 0\n for page in range(1, count_page + 1):\n index_url = 'https://www.qcc.com'\n page_url = (\n f'{index_url}/company_getinfos?unique={com_id}&companyname={key}&p={page}&tab=run&box=licens'\n )\n hds = gh().header()\n hds.update({'Referer': f'{index_url}/firm_{com_id}.html'})\n time.sleep(random.randint(1, 2))\n res = requests.get(page_url, headers=hds).text\n if '<script>window.location.href' in res:\n print('访问频繁,需验证!{get_page_info[2]}')\n input('暂停')\n elif '<script>location.href=\"/user_login\"</script>' in res:\n print('Cookie失效,需更换!{get_page_info[2]}')\n input('程序暂停运行!')\n elif '您的账号访问超频,请稍后访问或联系客服人员' in res:\n print('账号访问超频,请更换账号!{get_page_info[2]}')\n input('程序暂停运行!')\n else:\n tree = etree.HTML(res)\n content_li = tree.xpath(\n '//table[@class=\"ntable ntable-odd\"]/tr[position()>2]')\n for nbr, content in enumerate(content_li, 1):\n count += 1\n try:\n license_num = content.xpath('td[1]/text()')[0]\n license_doc_num = content.xpath('td[2]/text()')[0]\n license_doc_name = content.xpath('td[3]/text()')[0]\n valid_period_from = content.xpath('td[4]/text()')[0]\n valid_period_to = content.xpath('td[5]/text()')[0]\n license_office = content.xpath('td[6]/text()')[0]\n license_content = content.xpath('td[7]/text()')[0]\n except:\n license_num = None\n license_doc_num = None\n license_doc_name = None\n valid_period_from = None\n valid_period_to = None\n license_office = None\n license_content = None\n print('\\n{0}--总第{1}条----{2}/{3}页--{0}\\n'.format('-' * 9,\n count, page, count_page))\n localtime = tm().get_localtime()\n create_time = localtime\n print(f'当前时间:{create_time}')\n print(\n f\"\"\"公司ID:{com_id}\n序号:{license_num}\n许可文件编号:{license_doc_num}\n许可文件名称:{license_doc_name}\n有效期自:{valid_period_from}\n有效期至:{valid_period_to}\n许可机关:{license_office}\n许可内容:{license_content}\"\"\"\n )\n if license_num == None:\n ins = \"\"\"\n INSERT INTO\n `com_credit_adm_license_bc`\n (`com_id`,`license_num`,`license_doc_num`,`license_doc_name`,`valid_period_from`,\n `valid_period_to`,`license_office`,`license_content`,`create_time`)\n VALUES\n (NULL,NULL,NULL,NULL,NULL,\n NULL,NULL,NULL,NULL);\n \"\"\"\n else:\n ins = f\"\"\"\n INSERT INTO\n `com_credit_adm_license_bc`\n (`com_id`,`license_num`,`license_doc_num`,`license_doc_name`,`valid_period_from`,\n `valid_period_to`,`license_office`,`license_content`,`create_time`)\n VALUES\n (\"{com_id}\",\"{license_num}\",\"{license_doc_num}\",\"{license_doc_name}\",\"{valid_period_from}\",\n \"{valid_period_to}\",\"{license_office}\",\"{license_content}\",\"{create_time}\");\n \"\"\"\n db().inssts(ins)\n upd = f\"\"\"\n UPDATE \n `com_info` \n SET\n `status_credit_adm_license_bc` = 1\n WHERE \n `com_id` = \"{com_id}\" ;\n \"\"\"\n db().updsts(upd)\n localtime = tm().get_localtime()\n print('\\n{1}\\n{0}数据采集完成!{0}\\n{1}'.format('+' * 7, '+' * 25))\n print(f'当前时间:{localtime}\\n')\n time.sleep(3)\n\n\nclass AdmLicenseCc(AdmLicense):\n\n def cc_judge(self):\n global com_id, com_name\n alb = AdmLicenseCc()\n count_cc = 0\n count = 0\n while count_cc == 0:\n result = alb.adm_license_judge()\n com_id = result[0]\n com_name = result[1]\n key = dk().search_key(com_name)\n if com_id == None:\n pass\n else:\n count += 1\n com_url = f'https://www.qcc.com/firm_{com_id}.html'\n hds = gh().header()\n time.sleep(random.randint(3, 5))\n res = requests.get(com_url, headers=hds).text\n if '<script>window.location.href' in res:\n print('访问频繁,需验证!{cc_judge}')\n input('暂停')\n elif '<script>location.href=\"/user_login\"</script>' in res:\n print('Cookie失效,需更换!{cc_judge}')\n input('程序暂停运行!')\n elif '您的账号访问超频,请稍后访问或联系客服人员' in res:\n print('账号访问超频,请更换账号!{cc_judge}')\n input('程序暂停运行!')\n else:\n tree = etree.HTML(res)\n try:\n count_cc = tree.xpath(\n '//div[@class=\"tcaption\"]/h3[contains(text(),\"[信用中国]\")]/following-sibling::span[1]/text()'\n )[0]\n count_cc = int(count_cc)\n except:\n count_cc = 0\n localtime = tm().get_localtime()\n print(localtime)\n if count_cc == 0:\n print(f'计数器:{count}\\n公司ID:{com_id}\\n行政许可信息[工商局]条数:无')\n else:\n print(\n f'计数器:{count}\\n公司ID:{com_id}\\n行政许可信息[工商局]条数:{count_cc}'\n )\n status_column = 'status_credit_adm_license_cc'\n count_column = 'count_credit_adm_license_cc'\n cd.upd_status(com_id, status_column, count_column, count_cc\n )\n return com_id, com_name, count_cc\n\n def get_page_info(self):\n global project_name, license_status, license_content, expire_time, approval_category, area\n alb = AdmLicenseCc()\n value = alb.cc_judge()\n com_id = value[0]\n com_name = value[1]\n count_cc = value[2]\n key = dk().search_key(com_name)\n count = 0\n index_url = 'https://www.qcc.com'\n page_url = (\n f'{index_url}/company_getinfos?unique={com_id}&companyname={key}&p={page}&tab=run'\n )\n hds = gh().header()\n hds.update({'Referer': f'{index_url}/firm_{com_id}.html'})\n time.sleep(random.randint(3, 5))\n res = requests.get(page_url, headers=hds).text\n if '<script>window.location.href' in res:\n print('访问频繁,需验证!{cc_judge}')\n input('暂停')\n elif '<script>location.href=\"/user_login\"</script>' in res:\n print('Cookie失效,需更换!{cc_judge}')\n input('程序暂停运行!')\n elif '您的账号访问超频,请稍后访问或联系客服人员' in res:\n print('账号访问超频,请更换账号!{cc_judge}')\n input('程序暂停运行!')\n else:\n tree = etree.HTML(res)\n content_li = tree.xpath(\n '//div[@class=\"tcaption\"]/span[contains(text(),\"[信用中国]\")]/parent::div/following-sibling::table[@class=\"ntable ntable-odd\"]/tr[position()>2]'\n )\n for nbr, content in enumerate(content_li, 1):\n count += 1\n try:\n license_num = content.xpath('td[1]/text()')[0]\n dec_book_num = content.xpath('td[2]/text()')[0]\n license_office = content.xpath('td[3]/text()')[0]\n dec_date = content.xpath('td[4]/text()')[0]\n time.sleep(random.randint(1, 2))\n dt_id = content.xpath(\n 'td[5]/a[@class=\"xzxukeView\"]/@onclick')[0].split(\n 'xzxukeView(\"')[1].split('\")')[0]\n dt_url = 'https://www.qcc.com/company_xzxukeView'\n para = {'id': f'{dt_id}'}\n res_info = requests.post(dt_url, headers=hds, data=para\n ).text\n status = json.loads(res_info)['status']\n if status == 200:\n data = json.loads(res_info)['data']\n project_name = data['name']\n license_status = data['status']\n license_content = data['content']\n expire_time = data['expire_time']\n approval_category = data['type']\n area = data['province']\n else:\n print(f'响应失败!\\n状态码:{status}')\n input('程序暂停运行!')\n except:\n license_num = None\n dec_book_num = None\n license_office = None\n dec_date = None\n dt_id = None\n project_name = None\n license_status = None\n license_content = None\n expire_time = None\n approval_category = None\n print('\\n{0}--总第{1}条----{2}/{3}页--{0}\\n'.format('-' * 9,\n count, page, count_page))\n localtime = tm().get_localtime()\n create_time = localtime\n print(f'当前时间:{create_time}')\n print(\n f\"\"\"公司ID:{com_id}\n序号:{license_num}\n决定文书号:{dec_book_num}\n许可机关:{license_office}\n详情ID:{dt_id}\n决定日期:{dec_date}\n项目名称:{project_name}\n许可状态:{license_status}\n许可内容:{license_content}\n截止时间:{expire_time}\n审批类别:{approval_category}\n地域:{area}\n创建/入库时间:{create_time}\"\"\"\n )\n input('Pause')\n\n\n<mask token>\n",
"step-4": "<mask token>\n\n\nclass AdmLicense:\n\n def get_com_id(self):\n sel = \"\"\"\n SELECT `com_id`,`com_name`\n FROM `com_info`\n WHERE `origin`\n IS NOT NULL AND LENGTH(`com_id`) > 5 AND `status_credit_adm_license` IS NULL\n ORDER BY RAND() LIMIT 1;\n \"\"\"\n result = db().selsts(sel)\n if result == ():\n result = [None, None]\n else:\n result = result[0]\n return result\n\n def upd_status(self, com_id, status_column, count_column, count):\n if count == -1:\n status = -1\n elif count == 0:\n status = 0\n else:\n status = 9\n upd = f\"\"\"\n UPDATE \n `com_info` \n SET\n `{status_column}` = \"{status}\",`{count_column}` = \"{count}\"\n WHERE \n `com_id` = \"{com_id}\" ;\n \"\"\"\n db().updsts(upd)\n\n def adm_license_judge(self):\n global com_id, com_name\n al = AdmLicense()\n count_adm_license = 0\n count = 0\n while count_adm_license == 0 or count_adm_license == -1:\n result = al.get_com_id()\n com_id = result[0]\n com_name = result[1]\n if com_id == None:\n pass\n else:\n count += 1\n com_url = f'https://www.qcc.com/firm_{com_id}.html'\n hds = gh().header()\n time.sleep(random.randint(3, 5))\n res = requests.get(com_url, headers=hds).text\n if '<script>window.location.href' in res:\n print('访问频繁,需验证!{adm_license_judge}')\n input('暂停')\n elif '<script>location.href=\"/user_login\"</script>' in res:\n print('Cookie失效,需更换!{adm_license_judge}')\n input('程序暂停运行!')\n elif '您的账号访问超频,请稍后访问或联系客服人员' in res:\n print('账号访问超频,请更换账号!{adm_license_judge}')\n input('程序暂停运行!')\n else:\n tree = etree.HTML(res)\n try:\n count_adm_license = tree.xpath(\n '//div[@class=\"company-nav-items\"]/span[contains(text(),\"行政许可\")]/span/text()|//div[@class=\"company-nav-items\"]/a[@data-pos=\"licenslist\"]/span/text()'\n )[0]\n count_adm_license = int(count_adm_license)\n except:\n count_adm_license = -1\n localtime = tm().get_localtime()\n print(localtime)\n if count_adm_license == 0 or count_adm_license == -1:\n print(f'计数器:{count}\\n公司ID:{com_id}\\n行政许可信息条数:无')\n else:\n print(\n f'计数器:{count}\\n公司ID:{com_id}\\n行政许可信息条数:{count_adm_license}'\n )\n status_column = 'status_credit_adm_license'\n count_column = 'count_credit_adm_license'\n al.upd_status(com_id, status_column, count_column,\n count_adm_license)\n return com_id, com_name, count_adm_license\n\n\nclass AdmLicenseBc(AdmLicense):\n\n def bc_judge(self):\n global com_id, com_name\n alb = AdmLicenseBc()\n count_bc = 0\n count = 0\n while count_bc == 0:\n result = alb.adm_license_judge()\n com_id = result[0]\n com_name = result[1]\n key = dk().search_key(com_name)\n if com_id == None:\n pass\n else:\n count += 1\n com_url = f'https://www.qcc.com/firm_{com_id}.html'\n hds = gh().header()\n time.sleep(random.randint(3, 5))\n res = requests.get(com_url, headers=hds).text\n if '<script>window.location.href' in res:\n print('访问频繁,需验证!{bc_judge}')\n input('暂停')\n elif '<script>location.href=\"/user_login\"</script>' in res:\n print('Cookie失效,需更换!{bc_judge}')\n input('程序暂停运行!')\n elif '您的账号访问超频,请稍后访问或联系客服人员' in res:\n print('账号访问超频,请更换账号!{bc_judge}')\n input('程序暂停运行!')\n else:\n tree = etree.HTML(res)\n try:\n count_bc = tree.xpath(\n '//div[@class=\"tcaption\"]/h3[contains(text(),\"[工商局]\")]/following-sibling::span[1]/text()'\n )[0]\n count_bc = int(count_bc)\n except:\n count_bc = 0\n localtime = tm().get_localtime()\n print(localtime)\n if count_bc == 0:\n print(f'计数器:{count}\\n公司ID:{com_id}\\n行政许可信息[工商局]条数:无')\n else:\n print(\n f'计数器:{count}\\n公司ID:{com_id}\\n行政许可信息[工商局]条数:{count_bc}'\n )\n status_column = 'status_credit_adm_license_bc'\n count_column = 'count_credit_adm_license_bc'\n alb.upd_status(com_id, status_column, count_column,\n count_bc)\n return com_id, com_name, count_bc\n\n def get_page_count(self):\n alb = AdmLicenseBc()\n result = alb.bc_judge()\n com_id = result[0]\n com_name = result[1]\n count_record = result[2]\n if count_record % 10 == 0:\n count_page = count_record // 10\n else:\n count_page = count_record // 10 + 1\n value = [com_id, com_name, count_page, count_record]\n return value\n\n def get_page_info(self):\n alb = AdmLicenseBc()\n value = alb.get_page_count()\n com_id = value[0]\n com_name = value[1]\n count_page = value[2]\n count_record = value[3]\n key = dk().search_key(com_name)\n count = 0\n for page in range(1, count_page + 1):\n index_url = 'https://www.qcc.com'\n page_url = (\n f'{index_url}/company_getinfos?unique={com_id}&companyname={key}&p={page}&tab=run&box=licens'\n )\n hds = gh().header()\n hds.update({'Referer': f'{index_url}/firm_{com_id}.html'})\n time.sleep(random.randint(1, 2))\n res = requests.get(page_url, headers=hds).text\n if '<script>window.location.href' in res:\n print('访问频繁,需验证!{get_page_info[2]}')\n input('暂停')\n elif '<script>location.href=\"/user_login\"</script>' in res:\n print('Cookie失效,需更换!{get_page_info[2]}')\n input('程序暂停运行!')\n elif '您的账号访问超频,请稍后访问或联系客服人员' in res:\n print('账号访问超频,请更换账号!{get_page_info[2]}')\n input('程序暂停运行!')\n else:\n tree = etree.HTML(res)\n content_li = tree.xpath(\n '//table[@class=\"ntable ntable-odd\"]/tr[position()>2]')\n for nbr, content in enumerate(content_li, 1):\n count += 1\n try:\n license_num = content.xpath('td[1]/text()')[0]\n license_doc_num = content.xpath('td[2]/text()')[0]\n license_doc_name = content.xpath('td[3]/text()')[0]\n valid_period_from = content.xpath('td[4]/text()')[0]\n valid_period_to = content.xpath('td[5]/text()')[0]\n license_office = content.xpath('td[6]/text()')[0]\n license_content = content.xpath('td[7]/text()')[0]\n except:\n license_num = None\n license_doc_num = None\n license_doc_name = None\n valid_period_from = None\n valid_period_to = None\n license_office = None\n license_content = None\n print('\\n{0}--总第{1}条----{2}/{3}页--{0}\\n'.format('-' * 9,\n count, page, count_page))\n localtime = tm().get_localtime()\n create_time = localtime\n print(f'当前时间:{create_time}')\n print(\n f\"\"\"公司ID:{com_id}\n序号:{license_num}\n许可文件编号:{license_doc_num}\n许可文件名称:{license_doc_name}\n有效期自:{valid_period_from}\n有效期至:{valid_period_to}\n许可机关:{license_office}\n许可内容:{license_content}\"\"\"\n )\n if license_num == None:\n ins = \"\"\"\n INSERT INTO\n `com_credit_adm_license_bc`\n (`com_id`,`license_num`,`license_doc_num`,`license_doc_name`,`valid_period_from`,\n `valid_period_to`,`license_office`,`license_content`,`create_time`)\n VALUES\n (NULL,NULL,NULL,NULL,NULL,\n NULL,NULL,NULL,NULL);\n \"\"\"\n else:\n ins = f\"\"\"\n INSERT INTO\n `com_credit_adm_license_bc`\n (`com_id`,`license_num`,`license_doc_num`,`license_doc_name`,`valid_period_from`,\n `valid_period_to`,`license_office`,`license_content`,`create_time`)\n VALUES\n (\"{com_id}\",\"{license_num}\",\"{license_doc_num}\",\"{license_doc_name}\",\"{valid_period_from}\",\n \"{valid_period_to}\",\"{license_office}\",\"{license_content}\",\"{create_time}\");\n \"\"\"\n db().inssts(ins)\n upd = f\"\"\"\n UPDATE \n `com_info` \n SET\n `status_credit_adm_license_bc` = 1\n WHERE \n `com_id` = \"{com_id}\" ;\n \"\"\"\n db().updsts(upd)\n localtime = tm().get_localtime()\n print('\\n{1}\\n{0}数据采集完成!{0}\\n{1}'.format('+' * 7, '+' * 25))\n print(f'当前时间:{localtime}\\n')\n time.sleep(3)\n\n\nclass AdmLicenseCc(AdmLicense):\n\n def cc_judge(self):\n global com_id, com_name\n alb = AdmLicenseCc()\n count_cc = 0\n count = 0\n while count_cc == 0:\n result = alb.adm_license_judge()\n com_id = result[0]\n com_name = result[1]\n key = dk().search_key(com_name)\n if com_id == None:\n pass\n else:\n count += 1\n com_url = f'https://www.qcc.com/firm_{com_id}.html'\n hds = gh().header()\n time.sleep(random.randint(3, 5))\n res = requests.get(com_url, headers=hds).text\n if '<script>window.location.href' in res:\n print('访问频繁,需验证!{cc_judge}')\n input('暂停')\n elif '<script>location.href=\"/user_login\"</script>' in res:\n print('Cookie失效,需更换!{cc_judge}')\n input('程序暂停运行!')\n elif '您的账号访问超频,请稍后访问或联系客服人员' in res:\n print('账号访问超频,请更换账号!{cc_judge}')\n input('程序暂停运行!')\n else:\n tree = etree.HTML(res)\n try:\n count_cc = tree.xpath(\n '//div[@class=\"tcaption\"]/h3[contains(text(),\"[信用中国]\")]/following-sibling::span[1]/text()'\n )[0]\n count_cc = int(count_cc)\n except:\n count_cc = 0\n localtime = tm().get_localtime()\n print(localtime)\n if count_cc == 0:\n print(f'计数器:{count}\\n公司ID:{com_id}\\n行政许可信息[工商局]条数:无')\n else:\n print(\n f'计数器:{count}\\n公司ID:{com_id}\\n行政许可信息[工商局]条数:{count_cc}'\n )\n status_column = 'status_credit_adm_license_cc'\n count_column = 'count_credit_adm_license_cc'\n cd.upd_status(com_id, status_column, count_column, count_cc\n )\n return com_id, com_name, count_cc\n\n def get_page_info(self):\n global project_name, license_status, license_content, expire_time, approval_category, area\n alb = AdmLicenseCc()\n value = alb.cc_judge()\n com_id = value[0]\n com_name = value[1]\n count_cc = value[2]\n key = dk().search_key(com_name)\n count = 0\n index_url = 'https://www.qcc.com'\n page_url = (\n f'{index_url}/company_getinfos?unique={com_id}&companyname={key}&p={page}&tab=run'\n )\n hds = gh().header()\n hds.update({'Referer': f'{index_url}/firm_{com_id}.html'})\n time.sleep(random.randint(3, 5))\n res = requests.get(page_url, headers=hds).text\n if '<script>window.location.href' in res:\n print('访问频繁,需验证!{cc_judge}')\n input('暂停')\n elif '<script>location.href=\"/user_login\"</script>' in res:\n print('Cookie失效,需更换!{cc_judge}')\n input('程序暂停运行!')\n elif '您的账号访问超频,请稍后访问或联系客服人员' in res:\n print('账号访问超频,请更换账号!{cc_judge}')\n input('程序暂停运行!')\n else:\n tree = etree.HTML(res)\n content_li = tree.xpath(\n '//div[@class=\"tcaption\"]/span[contains(text(),\"[信用中国]\")]/parent::div/following-sibling::table[@class=\"ntable ntable-odd\"]/tr[position()>2]'\n )\n for nbr, content in enumerate(content_li, 1):\n count += 1\n try:\n license_num = content.xpath('td[1]/text()')[0]\n dec_book_num = content.xpath('td[2]/text()')[0]\n license_office = content.xpath('td[3]/text()')[0]\n dec_date = content.xpath('td[4]/text()')[0]\n time.sleep(random.randint(1, 2))\n dt_id = content.xpath(\n 'td[5]/a[@class=\"xzxukeView\"]/@onclick')[0].split(\n 'xzxukeView(\"')[1].split('\")')[0]\n dt_url = 'https://www.qcc.com/company_xzxukeView'\n para = {'id': f'{dt_id}'}\n res_info = requests.post(dt_url, headers=hds, data=para\n ).text\n status = json.loads(res_info)['status']\n if status == 200:\n data = json.loads(res_info)['data']\n project_name = data['name']\n license_status = data['status']\n license_content = data['content']\n expire_time = data['expire_time']\n approval_category = data['type']\n area = data['province']\n else:\n print(f'响应失败!\\n状态码:{status}')\n input('程序暂停运行!')\n except:\n license_num = None\n dec_book_num = None\n license_office = None\n dec_date = None\n dt_id = None\n project_name = None\n license_status = None\n license_content = None\n expire_time = None\n approval_category = None\n print('\\n{0}--总第{1}条----{2}/{3}页--{0}\\n'.format('-' * 9,\n count, page, count_page))\n localtime = tm().get_localtime()\n create_time = localtime\n print(f'当前时间:{create_time}')\n print(\n f\"\"\"公司ID:{com_id}\n序号:{license_num}\n决定文书号:{dec_book_num}\n许可机关:{license_office}\n详情ID:{dt_id}\n决定日期:{dec_date}\n项目名称:{project_name}\n许可状态:{license_status}\n许可内容:{license_content}\n截止时间:{expire_time}\n审批类别:{approval_category}\n地域:{area}\n创建/入库时间:{create_time}\"\"\"\n )\n input('Pause')\n\n\n<mask token>\n",
"step-5": "#! /usr/bin/env python3\n# -*- coding:utf-8 -*-\n\"\"\"\n企查查-行政许可[工商局]\n\"\"\"\nimport json\nimport time\nimport random\nimport requests\n\nfrom lxml import etree\n\nfrom support.use_mysql import QccMysql as db\nfrom support.others import DealKey as dk\nfrom support.others import TimeInfo as tm\nfrom support.headers import GeneralHeaders as gh\n\nclass AdmLicense():\n def get_com_id(self): # 随机获取一条符合条件的公司信息\n sel = \"\"\"\n SELECT `com_id`,`com_name`\n FROM `com_info`\n WHERE `origin`\n IS NOT NULL AND LENGTH(`com_id`) > 5 AND `status_credit_adm_license` IS NULL\n ORDER BY RAND() LIMIT 1;\n \"\"\"\n\n # 测试sql#\n # sel = \"\"\"\n # SELECT `com_id`, `com_name`\n # FROM `com_info`\n # WHERE com_id = '299eee201318f0283f086b4847d69fc7';\n # \"\"\"\n # 测试sql#\n\n result = db().selsts(sel)\n if result == ():\n result = [None, None]\n else:\n result = result[0]\n return result\n\n def upd_status(self, com_id,status_column,count_column, count): # 更新com_info表相关字段状态码\n if count == -1:\n status = -1\n elif count == 0:\n status = 0\n else:\n status = 9\n upd = f\"\"\"\n UPDATE \n `com_info` \n SET\n `{status_column}` = \"{status}\",`{count_column}` = \"{count}\"\n WHERE \n `com_id` = \"{com_id}\" ;\n \"\"\"\n db().updsts(upd)\n\n def adm_license_judge(self): # 判断行政许可信息,如果有记录则执行解析,返回该公司相关信息\n global com_id, com_name\n al = AdmLicense()\n count_adm_license = 0\n count = 0\n while count_adm_license == 0 or count_adm_license == -1:\n result = al.get_com_id()\n com_id = result[0]\n com_name = result[1]\n if com_id == None:\n pass\n else:\n count += 1\n com_url = f'https://www.qcc.com/firm_{com_id}.html'\n hds = gh().header()\n time.sleep(random.randint(3, 5))\n res = requests.get(com_url, headers=hds).text\n if '<script>window.location.href' in res:\n print('访问频繁,需验证!{adm_license_judge}')\n input('暂停')\n elif '<script>location.href=\"/user_login\"</script>' in res:\n print('Cookie失效,需更换!{adm_license_judge}')\n input('程序暂停运行!')\n elif '您的账号访问超频,请稍后访问或联系客服人员' in res:\n print('账号访问超频,请更换账号!{adm_license_judge}')\n input('程序暂停运行!')\n else:\n tree = etree.HTML(res)\n try:\n count_adm_license = tree.xpath('//div[@class=\"company-nav-items\"]/span[contains(text(),\"行政许可\")]/span/text()|//div[@class=\"company-nav-items\"]/a[@data-pos=\"licenslist\"]/span/text()')[0]\n count_adm_license = int(count_adm_license)\n except:\n count_adm_license = -1\n localtime = tm().get_localtime() # 当前时间\n print(localtime)\n if count_adm_license == 0 or count_adm_license == -1:\n print(f'计数器:{count}\\n公司ID:{com_id}\\n行政许可信息条数:无')\n else:\n print(f'计数器:{count}\\n公司ID:{com_id}\\n行政许可信息条数:{count_adm_license}')\n status_column = 'status_credit_adm_license' #表字段名\n count_column = 'count_credit_adm_license' #表字段名\n al.upd_status(com_id,status_column,count_column,count_adm_license)\n return com_id, com_name, count_adm_license\n\nclass AdmLicenseBc(AdmLicense):\n def bc_judge(self):\n global com_id,com_name\n alb = AdmLicenseBc()\n count_bc = 0\n count = 0\n while count_bc == 0:\n result = alb.adm_license_judge()\n com_id = result[0]\n com_name = result[1]\n key = dk().search_key(com_name)\n if com_id == None:\n pass\n else:\n count += 1\n com_url = f'https://www.qcc.com/firm_{com_id}.html'\n hds = gh().header()\n time.sleep(random.randint(3, 5))\n res = requests.get(com_url, headers=hds).text\n if '<script>window.location.href' in res:\n print('访问频繁,需验证!{bc_judge}')\n input('暂停')\n elif '<script>location.href=\"/user_login\"</script>' in res:\n print('Cookie失效,需更换!{bc_judge}')\n input('程序暂停运行!')\n elif '您的账号访问超频,请稍后访问或联系客服人员' in res:\n print('账号访问超频,请更换账号!{bc_judge}')\n input('程序暂停运行!')\n else:\n tree = etree.HTML(res)\n try:\n count_bc = tree.xpath('//div[@class=\"tcaption\"]/h3[contains(text(),\"[工商局]\")]/following-sibling::span[1]/text()')[0]\n count_bc = int(count_bc)\n except:\n count_bc = 0\n localtime = tm().get_localtime() # 当前时间\n print(localtime)\n if count_bc == 0:\n print(f'计数器:{count}\\n公司ID:{com_id}\\n行政许可信息[工商局]条数:无')\n else:\n print(f'计数器:{count}\\n公司ID:{com_id}\\n行政许可信息[工商局]条数:{count_bc}')\n status_column = 'status_credit_adm_license_bc' # 表字段名\n count_column = 'count_credit_adm_license_bc' # 表字段名\n alb.upd_status(com_id, status_column, count_column, count_bc)\n return com_id, com_name, count_bc\n\n def get_page_count(self): # 获取页码长度\n alb = AdmLicenseBc()\n result = alb.bc_judge()\n com_id = result[0]\n com_name = result[1]\n count_record = result[2]\n if count_record % 10 == 0:\n count_page = count_record // 10\n else:\n count_page = count_record // 10 + 1\n value = [com_id, com_name, count_page, count_record]\n return value\n\n def get_page_info(self): # 解析页面内容\n alb = AdmLicenseBc()\n value = alb.get_page_count()\n com_id = value[0]\n com_name = value[1]\n count_page = value[2]\n count_record = value[3]\n key = dk().search_key(com_name)\n count = 0\n for page in range(1, count_page + 1):\n index_url = 'https://www.qcc.com'\n page_url = f'{index_url}/company_getinfos?unique={com_id}&companyname={key}&p={page}&tab=run&box=licens'\n hds = gh().header()\n hds.update({'Referer': f'{index_url}/firm_{com_id}.html'})\n time.sleep(random.randint(1, 2))\n res = requests.get(page_url, headers=hds).text\n if '<script>window.location.href' in res:\n print('访问频繁,需验证!{get_page_info[2]}')\n input('暂停')\n elif '<script>location.href=\"/user_login\"</script>' in res:\n print('Cookie失效,需更换!{get_page_info[2]}')\n input('程序暂停运行!')\n elif '您的账号访问超频,请稍后访问或联系客服人员' in res:\n print('账号访问超频,请更换账号!{get_page_info[2]}')\n input('程序暂停运行!')\n else:\n tree = etree.HTML(res)\n content_li = tree.xpath('//table[@class=\"ntable ntable-odd\"]/tr[position()>2]')\n for nbr, content in enumerate(content_li, 1):\n count += 1\n try:\n license_num = content.xpath('td[1]/text()')[0]\n license_doc_num = content.xpath('td[2]/text()')[0]\n license_doc_name = content.xpath('td[3]/text()')[0]\n valid_period_from = content.xpath('td[4]/text()')[0]\n valid_period_to = content.xpath('td[5]/text()')[0]\n license_office = content.xpath('td[6]/text()')[0]\n license_content = content.xpath('td[7]/text()')[0]\n except:\n license_num = None\n license_doc_num = None\n license_doc_name = None\n valid_period_from = None\n valid_period_to = None\n license_office = None\n license_content = None\n\n print('\\n{0}--总第{1}条----{2}/{3}页--{0}\\n'.format('-' * 9, count, page, count_page))\n localtime = tm().get_localtime() # 当前时间\n create_time = localtime\n print(f'当前时间:{create_time}')\n print(f'公司ID:{com_id}\\n序号:{license_num}\\n许可文件编号:{license_doc_num}\\n许可文件名称:{license_doc_name}\\n有效期自:{valid_period_from}\\n'\n f'有效期至:{valid_period_to}\\n许可机关:{license_office}\\n许可内容:{license_content}')\n if license_num == None:\n ins = \"\"\"\n INSERT INTO\n `com_credit_adm_license_bc`\n (`com_id`,`license_num`,`license_doc_num`,`license_doc_name`,`valid_period_from`,\n `valid_period_to`,`license_office`,`license_content`,`create_time`)\n VALUES\n (NULL,NULL,NULL,NULL,NULL,\n NULL,NULL,NULL,NULL);\n \"\"\"\n else:\n ins = f\"\"\"\n INSERT INTO\n `com_credit_adm_license_bc`\n (`com_id`,`license_num`,`license_doc_num`,`license_doc_name`,`valid_period_from`,\n `valid_period_to`,`license_office`,`license_content`,`create_time`)\n VALUES\n (\"{com_id}\",\"{license_num}\",\"{license_doc_num}\",\"{license_doc_name}\",\"{valid_period_from}\",\n \"{valid_period_to}\",\"{license_office}\",\"{license_content}\",\"{create_time}\");\n \"\"\"\n db().inssts(ins)\n\n upd = f\"\"\"\n UPDATE \n `com_info` \n SET\n `status_credit_adm_license_bc` = 1\n WHERE \n `com_id` = \"{com_id}\" ;\n \"\"\"\n db().updsts(upd)\n\n localtime = tm().get_localtime() # 当前时间\n print('\\n{1}\\n{0}数据采集完成!{0}\\n{1}'.format('+' * 7, '+' * 25))\n print(f'当前时间:{localtime}\\n')\n time.sleep(3)\n\nclass AdmLicenseCc(AdmLicense): #行政许可[信用中国]\n def cc_judge(self):\n global com_id,com_name\n alb = AdmLicenseCc()\n count_cc = 0\n count = 0\n while count_cc == 0:\n result = alb.adm_license_judge()\n com_id = result[0]\n com_name = result[1]\n key = dk().search_key(com_name)\n if com_id == None:\n pass\n else:\n count += 1\n com_url = f'https://www.qcc.com/firm_{com_id}.html'\n hds = gh().header()\n time.sleep(random.randint(3, 5))\n res = requests.get(com_url, headers=hds).text\n if '<script>window.location.href' in res:\n print('访问频繁,需验证!{cc_judge}')\n input('暂停')\n elif '<script>location.href=\"/user_login\"</script>' in res:\n print('Cookie失效,需更换!{cc_judge}')\n input('程序暂停运行!')\n elif '您的账号访问超频,请稍后访问或联系客服人员' in res:\n print('账号访问超频,请更换账号!{cc_judge}')\n input('程序暂停运行!')\n else:\n tree = etree.HTML(res)\n try:\n count_cc = tree.xpath('//div[@class=\"tcaption\"]/h3[contains(text(),\"[信用中国]\")]/following-sibling::span[1]/text()')[0]\n count_cc = int(count_cc)\n except:\n count_cc = 0\n localtime = tm().get_localtime() # 当前时间\n print(localtime)\n if count_cc == 0:\n print(f'计数器:{count}\\n公司ID:{com_id}\\n行政许可信息[工商局]条数:无')\n else:\n print(f'计数器:{count}\\n公司ID:{com_id}\\n行政许可信息[工商局]条数:{count_cc}')\n status_column = 'status_credit_adm_license_cc' # 表字段名\n count_column = 'count_credit_adm_license_cc' # 表字段名\n cd.upd_status(com_id, status_column, count_column, count_cc)\n return com_id, com_name, count_cc\n\n def get_page_info(self): # 解析页面内容\n global project_name,license_status,license_content,expire_time,approval_category,area\n alb = AdmLicenseCc()\n value = alb.cc_judge()\n com_id = value[0]\n com_name = value[1]\n count_cc = value[2]\n key = dk().search_key(com_name)\n count = 0\n index_url = 'https://www.qcc.com'\n page_url = f'{index_url}/company_getinfos?unique={com_id}&companyname={key}&p={page}&tab=run'\n hds = gh().header()\n hds.update({'Referer': f'{index_url}/firm_{com_id}.html'})\n time.sleep(random.randint(3, 5))\n res = requests.get(page_url, headers=hds).text\n if '<script>window.location.href' in res:\n print('访问频繁,需验证!{cc_judge}')\n input('暂停')\n elif '<script>location.href=\"/user_login\"</script>' in res:\n print('Cookie失效,需更换!{cc_judge}')\n input('程序暂停运行!')\n elif '您的账号访问超频,请稍后访问或联系客服人员' in res:\n print('账号访问超频,请更换账号!{cc_judge}')\n input('程序暂停运行!')\n else:\n tree = etree.HTML(res)\n content_li = tree.xpath('//div[@class=\"tcaption\"]/span[contains(text(),\"[信用中国]\")]/parent::div/following-sibling::table[@class=\"ntable ntable-odd\"]/tr[position()>2]')\n for nbr, content in enumerate(content_li, 1):\n count += 1\n try:\n license_num = content.xpath('td[1]/text()')[0]\n dec_book_num = content.xpath('td[2]/text()')[0]\n license_office = content.xpath('td[3]/text()')[0]\n dec_date = content.xpath('td[4]/text()')[0]\n time.sleep(random.randint(1, 2))\n dt_id = content.xpath('td[5]/a[@class=\"xzxukeView\"]/@onclick')[0].split('xzxukeView(\"')[1].split('\")')[0]\n dt_url = 'https://www.qcc.com/company_xzxukeView'\n para = {'id':f'{dt_id}'}\n res_info = requests.post(dt_url, headers=hds,data=para).text\n status = json.loads(res_info)['status']\n if status == 200:\n data = json.loads(res_info)['data']\n project_name = data['name']\n license_status = data['status']\n license_content = data['content']\n expire_time = data['expire_time']\n approval_category = data['type']\n area = data['province']\n else:\n print(f'响应失败!\\n状态码:{status}')\n input('程序暂停运行!')\n except:\n license_num = None\n dec_book_num = None\n license_office = None\n dec_date = None\n dt_id = None\n project_name = None\n license_status = None\n license_content = None\n expire_time = None\n approval_category = None\n print('\\n{0}--总第{1}条----{2}/{3}页--{0}\\n'.format('-' * 9, count, page, count_page))\n localtime = tm().get_localtime() # 当前时间\n create_time = localtime\n print(f'当前时间:{create_time}')\n print(f'公司ID:{com_id}\\n序号:{license_num}\\n决定文书号:{dec_book_num}\\n许可机关:{license_office}\\n详情ID:{dt_id}\\n'\n f'决定日期:{dec_date}\\n项目名称:{project_name}\\n许可状态:{license_status}\\n许可内容:{license_content}\\n截止时间:{expire_time}\\n'\n f'审批类别:{approval_category}\\n地域:{area}\\n创建/入库时间:{create_time}')\n input('Pause')\n\n\n\n\n\nif __name__ == '__main__':\n cc = AdmLicenseCc()\n cc.get_page_info()",
"step-ids": [
7,
9,
10,
11,
14
]
}
|
[
7,
9,
10,
11,
14
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
matplotlib.style.use('ggplot')
<|reserved_special_token_0|>
sys.path.append('masterThesisPack/')
<|reserved_special_token_0|>
raw.wind_along.plot(ax=ax)
ax.axhline(y=3 * std, c='k', ls='dashed')
ax.axhline(y=-3 * std, c='k', ls='dashed')
ax.set_ylabel('Vento a 10m de altura [m.s$^{-1}$]')
<|reserved_special_token_1|>
<|reserved_special_token_0|>
matplotlib.style.use('ggplot')
<|reserved_special_token_0|>
sys.path.append('masterThesisPack/')
<|reserved_special_token_0|>
BASE_DIR = oceano.make_dir()
DATA_DIR = BASE_DIR.replace('github/', 'ventopcse/data/')
SAVE_DIR = BASE_DIR + 'dissertacao/presentation/figures/'
raw = xr.open_dataset(DATA_DIR + 'Est_lajeSantos/lajesantos.nc')
raw = raw.to_dataframe()
data = raw.copy()
treat = data.copy()
treat[treat > 3 * treat.std()] = np.nan
std = treat.wind_along.std()
fig, ax = plt.subplots()
raw.wind_along.plot(ax=ax)
ax.axhline(y=3 * std, c='k', ls='dashed')
ax.axhline(y=-3 * std, c='k', ls='dashed')
ax.set_ylabel('Vento a 10m de altura [m.s$^{-1}$]')
<|reserved_special_token_1|>
import glob
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import xarray as xr
import pandas as pd
import os
import pickle
from scipy.interpolate import griddata
from mpl_toolkits.basemap import Basemap
from mpl_toolkits.axes_grid1 import make_axes_locatable
from matplotlib import dates
import datetime
import matplotlib
matplotlib.style.use('ggplot')
import sys
sys.path.append('masterThesisPack/')
import masterThesisPack as oceano
BASE_DIR = oceano.make_dir()
DATA_DIR = BASE_DIR.replace('github/', 'ventopcse/data/')
SAVE_DIR = BASE_DIR + 'dissertacao/presentation/figures/'
raw = xr.open_dataset(DATA_DIR + 'Est_lajeSantos/lajesantos.nc')
raw = raw.to_dataframe()
data = raw.copy()
treat = data.copy()
treat[treat > 3 * treat.std()] = np.nan
std = treat.wind_along.std()
fig, ax = plt.subplots()
raw.wind_along.plot(ax=ax)
ax.axhline(y=3 * std, c='k', ls='dashed')
ax.axhline(y=-3 * std, c='k', ls='dashed')
ax.set_ylabel('Vento a 10m de altura [m.s$^{-1}$]')
<|reserved_special_token_1|>
# add some description here
import glob
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import xarray as xr
import pandas as pd
import os
import pickle
from scipy.interpolate import griddata
from mpl_toolkits.basemap import Basemap
from mpl_toolkits.axes_grid1 import make_axes_locatable
from matplotlib import dates
import datetime
import matplotlib
matplotlib.style.use('ggplot')
import sys
sys.path.append('masterThesisPack/')
import masterThesisPack as oceano
BASE_DIR = oceano.make_dir()
DATA_DIR = BASE_DIR.replace('github/', 'ventopcse/data/')
SAVE_DIR = BASE_DIR + 'dissertacao/presentation/figures/'
# importing laje de santos data
raw = xr.open_dataset(DATA_DIR+'Est_lajeSantos/lajesantos.nc')
raw = raw.to_dataframe()
# cut only a period
# raw = raw['2015-04':]
data = raw.copy()
treat = data.copy()
treat[treat > 3*treat.std()] = np.nan
std = treat.wind_along.std()
fig,ax = plt.subplots()
raw.wind_along.plot(ax=ax)
ax.axhline(y=3*std,c='k',ls='dashed')
ax.axhline(y=-3*std,c='k',ls='dashed')
ax.set_ylabel(r'Vento a 10m de altura [m.s$^{-1}$]')
# plt.savefig(SAVE_DIR.replace('github','gitlab') + 'qualityControl.png',dpi=250)
|
flexible
|
{
"blob_id": "4c1fea4dcf143ec976d3956039616963760d5af6",
"index": 5030,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nmatplotlib.style.use('ggplot')\n<mask token>\nsys.path.append('masterThesisPack/')\n<mask token>\nraw.wind_along.plot(ax=ax)\nax.axhline(y=3 * std, c='k', ls='dashed')\nax.axhline(y=-3 * std, c='k', ls='dashed')\nax.set_ylabel('Vento a 10m de altura [m.s$^{-1}$]')\n",
"step-3": "<mask token>\nmatplotlib.style.use('ggplot')\n<mask token>\nsys.path.append('masterThesisPack/')\n<mask token>\nBASE_DIR = oceano.make_dir()\nDATA_DIR = BASE_DIR.replace('github/', 'ventopcse/data/')\nSAVE_DIR = BASE_DIR + 'dissertacao/presentation/figures/'\nraw = xr.open_dataset(DATA_DIR + 'Est_lajeSantos/lajesantos.nc')\nraw = raw.to_dataframe()\ndata = raw.copy()\ntreat = data.copy()\ntreat[treat > 3 * treat.std()] = np.nan\nstd = treat.wind_along.std()\nfig, ax = plt.subplots()\nraw.wind_along.plot(ax=ax)\nax.axhline(y=3 * std, c='k', ls='dashed')\nax.axhline(y=-3 * std, c='k', ls='dashed')\nax.set_ylabel('Vento a 10m de altura [m.s$^{-1}$]')\n",
"step-4": "import glob\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\nimport xarray as xr\nimport pandas as pd\nimport os\nimport pickle\nfrom scipy.interpolate import griddata\nfrom mpl_toolkits.basemap import Basemap\nfrom mpl_toolkits.axes_grid1 import make_axes_locatable\nfrom matplotlib import dates\nimport datetime\nimport matplotlib\nmatplotlib.style.use('ggplot')\nimport sys\nsys.path.append('masterThesisPack/')\nimport masterThesisPack as oceano\nBASE_DIR = oceano.make_dir()\nDATA_DIR = BASE_DIR.replace('github/', 'ventopcse/data/')\nSAVE_DIR = BASE_DIR + 'dissertacao/presentation/figures/'\nraw = xr.open_dataset(DATA_DIR + 'Est_lajeSantos/lajesantos.nc')\nraw = raw.to_dataframe()\ndata = raw.copy()\ntreat = data.copy()\ntreat[treat > 3 * treat.std()] = np.nan\nstd = treat.wind_along.std()\nfig, ax = plt.subplots()\nraw.wind_along.plot(ax=ax)\nax.axhline(y=3 * std, c='k', ls='dashed')\nax.axhline(y=-3 * std, c='k', ls='dashed')\nax.set_ylabel('Vento a 10m de altura [m.s$^{-1}$]')\n",
"step-5": "# add some description here\n\nimport glob\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\nimport xarray as xr\nimport pandas as pd\nimport os\nimport pickle\nfrom scipy.interpolate import griddata\nfrom mpl_toolkits.basemap import Basemap\nfrom mpl_toolkits.axes_grid1 import make_axes_locatable\nfrom matplotlib import dates\nimport datetime\n\nimport matplotlib\nmatplotlib.style.use('ggplot')\n\nimport sys\nsys.path.append('masterThesisPack/')\n\nimport masterThesisPack as oceano\n\nBASE_DIR = oceano.make_dir()\nDATA_DIR = BASE_DIR.replace('github/', 'ventopcse/data/')\nSAVE_DIR = BASE_DIR + 'dissertacao/presentation/figures/'\n\n# importing laje de santos data\nraw = xr.open_dataset(DATA_DIR+'Est_lajeSantos/lajesantos.nc')\nraw = raw.to_dataframe()\n\n# cut only a period\n# raw = raw['2015-04':]\ndata = raw.copy()\ntreat = data.copy()\ntreat[treat > 3*treat.std()] = np.nan\n\nstd = treat.wind_along.std()\n\nfig,ax = plt.subplots()\n\nraw.wind_along.plot(ax=ax)\nax.axhline(y=3*std,c='k',ls='dashed')\nax.axhline(y=-3*std,c='k',ls='dashed')\nax.set_ylabel(r'Vento a 10m de altura [m.s$^{-1}$]')\n\n\n# plt.savefig(SAVE_DIR.replace('github','gitlab') + 'qualityControl.png',dpi=250)\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
load_dotenv(dotenv_path=env_path)
<|reserved_special_token_0|>
@botCommand.event
async def on_ready():
print(f'{client.user} is connected to the following guild:\n')
@botCommand.command(pass_context=True, aliases=['y'])
async def youtube(ctx, *, search):
query_string = urllib.parse.urlencode({'search_query': search})
htm_content = urllib.request.urlopen('http://www.youtube.com/results?' +
query_string)
print('/watch\\?v=(.{11})')
search_results = re.findall('/watch\\?v=(.{11})', htm_content.read().
decode('utf-8'))
await ctx.send('http://www.youtube.com/watch?v=' + search_results[0])
<|reserved_special_token_0|>
@botCommand.command(pass_context=True, aliases=['p', 'play'])
async def plays(ctx, *, url):
server = ctx.message.guild
global voice
channel = ctx.message.author.voice.channel
if not str(url).startswith('http'):
query_string = urllib.parse.urlencode({'search_query': url})
htm_content = urllib.request.urlopen(
'http://www.youtube.com/results?' + query_string)
print('/watch\\?v=(.{11})')
search_results = re.findall('/watch\\?v=(.{11})', htm_content.read(
).decode('utf-8'))
url = 'http://www.youtube.com/watch?v=' + search_results[0]
if voice:
print('ok')
else:
voice = await channel.connect()
await ctx.send(f'Joined {channel}')
def check_queue():
print('Test')
Queue_infile = os.path.isdir('./Queue')
if Queue_infile is True:
DIR = os.path.abspath(os.path.realpath('Queue'))
length = len(os.listdir(DIR))
still_q = length - 1
try:
first_file = os.listdir(DIR)[0]
except:
print('No more queue\n')
queues.clear()
return
main_location = os.path.dirname(os.path.realpath(__file__))
song_path = os.path.abspath(os.path.realpath('Queue') + '\\' +
first_file)
if length != 0:
print('Song done , playing next queue\n')
print(f'song still in queue: {still_q}')
song_there = os.path.isfile('song.mp3')
if song_there:
os.remove('song.mp3')
shutil.move(song_path, main_location)
for file in os.listdir('./'):
if file.endswith('.mp3'):
os.rename(file, 'song.mp3')
voice.play(discord.FFmpegPCMAudio('song.mp3'), after=lambda
e: check_queue())
voice.source = discord.PCMVolumeTransformer(voice.source)
voice.source.volume = 0.07
else:
queues.clear()
return
else:
queues.clear()
print('No song founds')
def add_queue():
print('Test')
Queue_infile = os.path.isdir('./Queue')
if Queue_infile is False:
os.mkdir('Queue')
DIR = os.path.abspath(os.path.realpath('Queue'))
q_num = len(os.listdir(DIR))
q_num += 1
add_queue = True
while add_queue:
if q_num in queues:
q_num += 1
else:
add_queue = False
queues[q_num] = q_num
queue_path = os.path.abspath(os.path.realpath('Queue') +
f'\\song{q_num}.%(ext)s')
ydl_opts = {'format': 'bestaudio/best', 'quiet': True, 'outtmpl':
queue_path, 'postprocessors': [{'key': 'FFmpegExtractAudio',
'preferredcodec': 'mp3', 'preferredquality': '192'}]}
with youtube_dl.YoutubeDL(ydl_opts) as ydl:
print('Downloading audio now\n')
ydl.download([url])
print('Song added to queue\n')
song_there = os.path.isfile('song.mp3')
try:
if song_there:
os.remove('song.mp3')
queues.clear()
print('remove old song file')
except PermissionError:
add_queue()
await ctx.send('Adding song to the queue')
return
Queue_infile = os.path.isdir('./Queue')
try:
Queue_folder = './Queue'
if Queue_infile is True:
print('Removed old Queue folder')
shutil.rmtree(Queue_folder)
except:
print('No old queue folder')
await ctx.send('Getting everything ready now')
ydl_opts = {'format': 'bestaudio/best', 'quiet': True, 'postprocessors':
[{'key': 'FFmpegExtractAudio', 'preferredcodec': 'mp3',
'preferredquality': '192'}]}
with youtube_dl.YoutubeDL(ydl_opts) as ydl:
print('Downloading audio now\n')
ydl.download([url])
for file in os.listdir('./'):
if file.endswith('.mp3'):
name = file
print(f'renamed file : {file}\n')
os.rename(file, 'song.mp3')
voice.play(discord.FFmpegPCMAudio('song.mp3'), after=lambda e:
check_queue())
voice.source = discord.PCMVolumeTransformer(voice.source)
voice.source.volume = 0.07
nname = name.rsplit('-', 1)
await ctx.send(f'Playing :notes: `{nname[0]}` :notes:')
print('Playing\n')
<|reserved_special_token_0|>
@botCommand.command(pass_context=True)
async def ping(ctx):
await ctx.send('test')
@botCommand.command(pass_context=True)
async def join(ctx):
global vc
channel = ctx.message.author.voice.channel
vc = channel.connect()
await channel.connect()
@botCommand.event
async def on_message(message):
if message.author == client.user:
return
msg1 = '<@333863300892721152> davis kok pepe ya'
if message.content == 'command list':
await message.channel.send(
'- davis mah\n- davis\n- .plays + youtubeURL')
if message.content == 'davis mah':
for x in range(3):
await message.channel.send('davis mah paling jago')
if message.content == 'davis':
response = msg1
for x in range(3):
await message.channel.send(response)
if message.content == 'bel sama jessica':
response = 'jessica lah , https://imgur.com/TrtyIVa'
await message.channel.send(response)
if message.content == 'ig jessica':
response = 'https://www.instagram.com/h.yojeong/'
await message.channel.send(response)
await botCommand.process_commands(message)
botCommand.run(token)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
env_path = Path('.') / '.env'
load_dotenv(dotenv_path=env_path)
client = discord.Client()
botCommand = commands.Bot(command_prefix='.')
token = os.getenv('DISCORD_TOKEN')
players = {}
@botCommand.event
async def on_ready():
print(f'{client.user} is connected to the following guild:\n')
@botCommand.command(pass_context=True, aliases=['y'])
async def youtube(ctx, *, search):
query_string = urllib.parse.urlencode({'search_query': search})
htm_content = urllib.request.urlopen('http://www.youtube.com/results?' +
query_string)
print('/watch\\?v=(.{11})')
search_results = re.findall('/watch\\?v=(.{11})', htm_content.read().
decode('utf-8'))
await ctx.send('http://www.youtube.com/watch?v=' + search_results[0])
voice = None
q_num = 0
@botCommand.command(pass_context=True, aliases=['p', 'play'])
async def plays(ctx, *, url):
server = ctx.message.guild
global voice
channel = ctx.message.author.voice.channel
if not str(url).startswith('http'):
query_string = urllib.parse.urlencode({'search_query': url})
htm_content = urllib.request.urlopen(
'http://www.youtube.com/results?' + query_string)
print('/watch\\?v=(.{11})')
search_results = re.findall('/watch\\?v=(.{11})', htm_content.read(
).decode('utf-8'))
url = 'http://www.youtube.com/watch?v=' + search_results[0]
if voice:
print('ok')
else:
voice = await channel.connect()
await ctx.send(f'Joined {channel}')
def check_queue():
print('Test')
Queue_infile = os.path.isdir('./Queue')
if Queue_infile is True:
DIR = os.path.abspath(os.path.realpath('Queue'))
length = len(os.listdir(DIR))
still_q = length - 1
try:
first_file = os.listdir(DIR)[0]
except:
print('No more queue\n')
queues.clear()
return
main_location = os.path.dirname(os.path.realpath(__file__))
song_path = os.path.abspath(os.path.realpath('Queue') + '\\' +
first_file)
if length != 0:
print('Song done , playing next queue\n')
print(f'song still in queue: {still_q}')
song_there = os.path.isfile('song.mp3')
if song_there:
os.remove('song.mp3')
shutil.move(song_path, main_location)
for file in os.listdir('./'):
if file.endswith('.mp3'):
os.rename(file, 'song.mp3')
voice.play(discord.FFmpegPCMAudio('song.mp3'), after=lambda
e: check_queue())
voice.source = discord.PCMVolumeTransformer(voice.source)
voice.source.volume = 0.07
else:
queues.clear()
return
else:
queues.clear()
print('No song founds')
def add_queue():
print('Test')
Queue_infile = os.path.isdir('./Queue')
if Queue_infile is False:
os.mkdir('Queue')
DIR = os.path.abspath(os.path.realpath('Queue'))
q_num = len(os.listdir(DIR))
q_num += 1
add_queue = True
while add_queue:
if q_num in queues:
q_num += 1
else:
add_queue = False
queues[q_num] = q_num
queue_path = os.path.abspath(os.path.realpath('Queue') +
f'\\song{q_num}.%(ext)s')
ydl_opts = {'format': 'bestaudio/best', 'quiet': True, 'outtmpl':
queue_path, 'postprocessors': [{'key': 'FFmpegExtractAudio',
'preferredcodec': 'mp3', 'preferredquality': '192'}]}
with youtube_dl.YoutubeDL(ydl_opts) as ydl:
print('Downloading audio now\n')
ydl.download([url])
print('Song added to queue\n')
song_there = os.path.isfile('song.mp3')
try:
if song_there:
os.remove('song.mp3')
queues.clear()
print('remove old song file')
except PermissionError:
add_queue()
await ctx.send('Adding song to the queue')
return
Queue_infile = os.path.isdir('./Queue')
try:
Queue_folder = './Queue'
if Queue_infile is True:
print('Removed old Queue folder')
shutil.rmtree(Queue_folder)
except:
print('No old queue folder')
await ctx.send('Getting everything ready now')
ydl_opts = {'format': 'bestaudio/best', 'quiet': True, 'postprocessors':
[{'key': 'FFmpegExtractAudio', 'preferredcodec': 'mp3',
'preferredquality': '192'}]}
with youtube_dl.YoutubeDL(ydl_opts) as ydl:
print('Downloading audio now\n')
ydl.download([url])
for file in os.listdir('./'):
if file.endswith('.mp3'):
name = file
print(f'renamed file : {file}\n')
os.rename(file, 'song.mp3')
voice.play(discord.FFmpegPCMAudio('song.mp3'), after=lambda e:
check_queue())
voice.source = discord.PCMVolumeTransformer(voice.source)
voice.source.volume = 0.07
nname = name.rsplit('-', 1)
await ctx.send(f'Playing :notes: `{nname[0]}` :notes:')
print('Playing\n')
queues = {}
@botCommand.command(pass_context=True)
async def ping(ctx):
await ctx.send('test')
@botCommand.command(pass_context=True)
async def join(ctx):
global vc
channel = ctx.message.author.voice.channel
vc = channel.connect()
await channel.connect()
@botCommand.event
async def on_message(message):
if message.author == client.user:
return
msg1 = '<@333863300892721152> davis kok pepe ya'
if message.content == 'command list':
await message.channel.send(
'- davis mah\n- davis\n- .plays + youtubeURL')
if message.content == 'davis mah':
for x in range(3):
await message.channel.send('davis mah paling jago')
if message.content == 'davis':
response = msg1
for x in range(3):
await message.channel.send(response)
if message.content == 'bel sama jessica':
response = 'jessica lah , https://imgur.com/TrtyIVa'
await message.channel.send(response)
if message.content == 'ig jessica':
response = 'https://www.instagram.com/h.yojeong/'
await message.channel.send(response)
await botCommand.process_commands(message)
botCommand.run(token)
<|reserved_special_token_1|>
import os
import shutil
import discord
import youtube_dl
from discord.ext import commands
import urllib.parse
import urllib.request
import re
import dotenv
from pathlib import Path
from dotenv import load_dotenv
env_path = Path('.') / '.env'
load_dotenv(dotenv_path=env_path)
client = discord.Client()
botCommand = commands.Bot(command_prefix='.')
token = os.getenv('DISCORD_TOKEN')
players = {}
@botCommand.event
async def on_ready():
print(f'{client.user} is connected to the following guild:\n')
@botCommand.command(pass_context=True, aliases=['y'])
async def youtube(ctx, *, search):
query_string = urllib.parse.urlencode({'search_query': search})
htm_content = urllib.request.urlopen('http://www.youtube.com/results?' +
query_string)
print('/watch\\?v=(.{11})')
search_results = re.findall('/watch\\?v=(.{11})', htm_content.read().
decode('utf-8'))
await ctx.send('http://www.youtube.com/watch?v=' + search_results[0])
voice = None
q_num = 0
@botCommand.command(pass_context=True, aliases=['p', 'play'])
async def plays(ctx, *, url):
server = ctx.message.guild
global voice
channel = ctx.message.author.voice.channel
if not str(url).startswith('http'):
query_string = urllib.parse.urlencode({'search_query': url})
htm_content = urllib.request.urlopen(
'http://www.youtube.com/results?' + query_string)
print('/watch\\?v=(.{11})')
search_results = re.findall('/watch\\?v=(.{11})', htm_content.read(
).decode('utf-8'))
url = 'http://www.youtube.com/watch?v=' + search_results[0]
if voice:
print('ok')
else:
voice = await channel.connect()
await ctx.send(f'Joined {channel}')
def check_queue():
print('Test')
Queue_infile = os.path.isdir('./Queue')
if Queue_infile is True:
DIR = os.path.abspath(os.path.realpath('Queue'))
length = len(os.listdir(DIR))
still_q = length - 1
try:
first_file = os.listdir(DIR)[0]
except:
print('No more queue\n')
queues.clear()
return
main_location = os.path.dirname(os.path.realpath(__file__))
song_path = os.path.abspath(os.path.realpath('Queue') + '\\' +
first_file)
if length != 0:
print('Song done , playing next queue\n')
print(f'song still in queue: {still_q}')
song_there = os.path.isfile('song.mp3')
if song_there:
os.remove('song.mp3')
shutil.move(song_path, main_location)
for file in os.listdir('./'):
if file.endswith('.mp3'):
os.rename(file, 'song.mp3')
voice.play(discord.FFmpegPCMAudio('song.mp3'), after=lambda
e: check_queue())
voice.source = discord.PCMVolumeTransformer(voice.source)
voice.source.volume = 0.07
else:
queues.clear()
return
else:
queues.clear()
print('No song founds')
def add_queue():
print('Test')
Queue_infile = os.path.isdir('./Queue')
if Queue_infile is False:
os.mkdir('Queue')
DIR = os.path.abspath(os.path.realpath('Queue'))
q_num = len(os.listdir(DIR))
q_num += 1
add_queue = True
while add_queue:
if q_num in queues:
q_num += 1
else:
add_queue = False
queues[q_num] = q_num
queue_path = os.path.abspath(os.path.realpath('Queue') +
f'\\song{q_num}.%(ext)s')
ydl_opts = {'format': 'bestaudio/best', 'quiet': True, 'outtmpl':
queue_path, 'postprocessors': [{'key': 'FFmpegExtractAudio',
'preferredcodec': 'mp3', 'preferredquality': '192'}]}
with youtube_dl.YoutubeDL(ydl_opts) as ydl:
print('Downloading audio now\n')
ydl.download([url])
print('Song added to queue\n')
song_there = os.path.isfile('song.mp3')
try:
if song_there:
os.remove('song.mp3')
queues.clear()
print('remove old song file')
except PermissionError:
add_queue()
await ctx.send('Adding song to the queue')
return
Queue_infile = os.path.isdir('./Queue')
try:
Queue_folder = './Queue'
if Queue_infile is True:
print('Removed old Queue folder')
shutil.rmtree(Queue_folder)
except:
print('No old queue folder')
await ctx.send('Getting everything ready now')
ydl_opts = {'format': 'bestaudio/best', 'quiet': True, 'postprocessors':
[{'key': 'FFmpegExtractAudio', 'preferredcodec': 'mp3',
'preferredquality': '192'}]}
with youtube_dl.YoutubeDL(ydl_opts) as ydl:
print('Downloading audio now\n')
ydl.download([url])
for file in os.listdir('./'):
if file.endswith('.mp3'):
name = file
print(f'renamed file : {file}\n')
os.rename(file, 'song.mp3')
voice.play(discord.FFmpegPCMAudio('song.mp3'), after=lambda e:
check_queue())
voice.source = discord.PCMVolumeTransformer(voice.source)
voice.source.volume = 0.07
nname = name.rsplit('-', 1)
await ctx.send(f'Playing :notes: `{nname[0]}` :notes:')
print('Playing\n')
queues = {}
@botCommand.command(pass_context=True)
async def ping(ctx):
await ctx.send('test')
@botCommand.command(pass_context=True)
async def join(ctx):
global vc
channel = ctx.message.author.voice.channel
vc = channel.connect()
await channel.connect()
@botCommand.event
async def on_message(message):
if message.author == client.user:
return
msg1 = '<@333863300892721152> davis kok pepe ya'
if message.content == 'command list':
await message.channel.send(
'- davis mah\n- davis\n- .plays + youtubeURL')
if message.content == 'davis mah':
for x in range(3):
await message.channel.send('davis mah paling jago')
if message.content == 'davis':
response = msg1
for x in range(3):
await message.channel.send(response)
if message.content == 'bel sama jessica':
response = 'jessica lah , https://imgur.com/TrtyIVa'
await message.channel.send(response)
if message.content == 'ig jessica':
response = 'https://www.instagram.com/h.yojeong/'
await message.channel.send(response)
await botCommand.process_commands(message)
botCommand.run(token)
<|reserved_special_token_1|>
# bot.py
import os
import shutil
import discord
import youtube_dl
from discord.ext import commands
import urllib.parse
import urllib.request
import re
import dotenv
from pathlib import Path # Python 3.6+ only
from dotenv import load_dotenv
env_path = Path('.') / '.env'
load_dotenv(dotenv_path=env_path)
client = discord.Client()
botCommand = commands.Bot(command_prefix='.')
token = os.getenv("DISCORD_TOKEN")
players = {}
@botCommand.event
async def on_ready():
print(
f'{client.user} is connected to the following guild:\n'
)
@botCommand.command(pass_context=True, aliases=['y'])
async def youtube(ctx, *, search):
query_string = urllib.parse.urlencode({
'search_query': search
})
htm_content = urllib.request.urlopen(
'http://www.youtube.com/results?' + query_string
)
print(r'/watch\?v=(.{11})')
search_results = re.findall(r'/watch\?v=(.{11})', htm_content.read().decode('utf-8'))
await ctx.send('http://www.youtube.com/watch?v=' + search_results[0])
voice = None
q_num = 0
@botCommand.command(pass_context=True, aliases=['p', 'play'])
async def plays(ctx, *, url):
server = ctx.message.guild
global voice
channel = ctx.message.author.voice.channel
if not str(url).startswith('http'):
query_string = urllib.parse.urlencode({
'search_query': url
})
htm_content = urllib.request.urlopen(
'http://www.youtube.com/results?' + query_string
)
print(r'/watch\?v=(.{11})')
search_results = re.findall(r'/watch\?v=(.{11})', htm_content.read().decode('utf-8'))
url = 'http://www.youtube.com/watch?v=' + search_results[0]
if voice:
print("ok")
else:
voice = await channel.connect()
await ctx.send(f"Joined {channel}")
# if voice is None:
# voice = await channel.connect()
# song_there = os.path.isfile("song.mp3")
def check_queue():
print('Test')
Queue_infile = os.path.isdir("./Queue")
if Queue_infile is True:
DIR = os.path.abspath(os.path.realpath("Queue"))
length = len(os.listdir(DIR))
still_q = length - 1
try:
first_file = os.listdir(DIR)[0]
except:
print("No more queue\n")
queues.clear()
return
main_location = os.path.dirname(os.path.realpath(__file__))
song_path = os.path.abspath(os.path.realpath("Queue") + "\\" + first_file)
if length != 0:
print("Song done , playing next queue\n")
print(f"song still in queue: {still_q}")
song_there = os.path.isfile("song.mp3")
if song_there:
os.remove("song.mp3")
shutil.move(song_path, main_location)
for file in os.listdir("./"):
if file.endswith(".mp3"):
os.rename(file, 'song.mp3')
voice.play(discord.FFmpegPCMAudio('song.mp3'), after=lambda e: check_queue())
voice.source = discord.PCMVolumeTransformer(voice.source)
voice.source.volume = 0.07
else:
queues.clear()
return
else:
queues.clear()
print("No song founds")
def add_queue():
print("Test")
Queue_infile = os.path.isdir("./Queue")
if Queue_infile is False:
os.mkdir("Queue")
DIR = os.path.abspath(os.path.realpath("Queue"))
q_num = len(os.listdir(DIR))
q_num += 1
add_queue = True
while add_queue:
if q_num in queues:
q_num += 1
else:
add_queue = False
queues[q_num] = q_num
queue_path = os.path.abspath(os.path.realpath("Queue") + f"\song{q_num}.%(ext)s")
ydl_opts = {
'format': 'bestaudio/best',
'quiet': True,
'outtmpl': queue_path,
'postprocessors': [{
'key': 'FFmpegExtractAudio',
'preferredcodec': 'mp3',
'preferredquality': '192'
}],
}
with youtube_dl.YoutubeDL(ydl_opts) as ydl:
print("Downloading audio now\n")
ydl.download([url])
print("Song added to queue\n")
song_there = os.path.isfile("song.mp3")
try:
if song_there:
os.remove("song.mp3")
queues.clear()
print("remove old song file")
except PermissionError:
add_queue()
await ctx.send("Adding song to the queue")
return
Queue_infile = os.path.isdir("./Queue")
try:
Queue_folder = "./Queue"
if Queue_infile is True:
print("Removed old Queue folder")
shutil.rmtree(Queue_folder)
except:
print("No old queue folder")
await ctx.send("Getting everything ready now")
# voice = get(client.voice_clients, guild=ctx.guild)
ydl_opts = {
'format': 'bestaudio/best',
'quiet': True,
'postprocessors': [{
'key': 'FFmpegExtractAudio',
'preferredcodec': 'mp3',
'preferredquality': '192'
}],
}
with youtube_dl.YoutubeDL(ydl_opts) as ydl:
print("Downloading audio now\n")
ydl.download([url])
for file in os.listdir("./"):
if file.endswith(".mp3"):
name = file
print(f"renamed file : {file}\n")
os.rename(file, "song.mp3")
voice.play(discord.FFmpegPCMAudio('song.mp3'), after=lambda e: check_queue())
voice.source = discord.PCMVolumeTransformer(voice.source)
voice.source.volume = 0.07
nname = name.rsplit("-", 1)
await ctx.send(f"Playing :notes: `{nname[0]}` :notes:")
print("Playing\n")
queues = {}
@botCommand.command(pass_context=True)
async def ping(ctx):
await ctx.send('test')
@botCommand.command(pass_context=True)
async def join(ctx):
global vc
channel = ctx.message.author.voice.channel
vc = channel.connect()
await channel.connect()
@botCommand.event
async def on_message(message):
if message.author == client.user:
return
msg1 = '<@333863300892721152> davis kok pepe ya'
if message.content == 'command list':
await message.channel.send('- davis mah\n- davis\n- .plays + youtubeURL')
if message.content == 'davis mah':
for x in range(3):
await message.channel.send('davis mah paling jago')
if message.content == 'davis':
response = msg1
for x in range(3):
await message.channel.send(response)
if message.content == 'bel sama jessica':
response = 'jessica lah , https://imgur.com/TrtyIVa'
await message.channel.send(response)
if message.content == 'ig jessica':
response = 'https://www.instagram.com/h.yojeong/'
await message.channel.send(response)
await botCommand.process_commands(message)
botCommand.run(token)
|
flexible
|
{
"blob_id": "94ca18088664393fdfdc68bfb8bcad8b78e9e36a",
"index": 7887,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nload_dotenv(dotenv_path=env_path)\n<mask token>\n\n\n@botCommand.event\nasync def on_ready():\n print(f'{client.user} is connected to the following guild:\\n')\n\n\n@botCommand.command(pass_context=True, aliases=['y'])\nasync def youtube(ctx, *, search):\n query_string = urllib.parse.urlencode({'search_query': search})\n htm_content = urllib.request.urlopen('http://www.youtube.com/results?' +\n query_string)\n print('/watch\\\\?v=(.{11})')\n search_results = re.findall('/watch\\\\?v=(.{11})', htm_content.read().\n decode('utf-8'))\n await ctx.send('http://www.youtube.com/watch?v=' + search_results[0])\n\n\n<mask token>\n\n\n@botCommand.command(pass_context=True, aliases=['p', 'play'])\nasync def plays(ctx, *, url):\n server = ctx.message.guild\n global voice\n channel = ctx.message.author.voice.channel\n if not str(url).startswith('http'):\n query_string = urllib.parse.urlencode({'search_query': url})\n htm_content = urllib.request.urlopen(\n 'http://www.youtube.com/results?' + query_string)\n print('/watch\\\\?v=(.{11})')\n search_results = re.findall('/watch\\\\?v=(.{11})', htm_content.read(\n ).decode('utf-8'))\n url = 'http://www.youtube.com/watch?v=' + search_results[0]\n if voice:\n print('ok')\n else:\n voice = await channel.connect()\n await ctx.send(f'Joined {channel}')\n\n def check_queue():\n print('Test')\n Queue_infile = os.path.isdir('./Queue')\n if Queue_infile is True:\n DIR = os.path.abspath(os.path.realpath('Queue'))\n length = len(os.listdir(DIR))\n still_q = length - 1\n try:\n first_file = os.listdir(DIR)[0]\n except:\n print('No more queue\\n')\n queues.clear()\n return\n main_location = os.path.dirname(os.path.realpath(__file__))\n song_path = os.path.abspath(os.path.realpath('Queue') + '\\\\' +\n first_file)\n if length != 0:\n print('Song done , playing next queue\\n')\n print(f'song still in queue: {still_q}')\n song_there = os.path.isfile('song.mp3')\n if song_there:\n os.remove('song.mp3')\n shutil.move(song_path, main_location)\n for file in os.listdir('./'):\n if file.endswith('.mp3'):\n os.rename(file, 'song.mp3')\n voice.play(discord.FFmpegPCMAudio('song.mp3'), after=lambda\n e: check_queue())\n voice.source = discord.PCMVolumeTransformer(voice.source)\n voice.source.volume = 0.07\n else:\n queues.clear()\n return\n else:\n queues.clear()\n print('No song founds')\n\n def add_queue():\n print('Test')\n Queue_infile = os.path.isdir('./Queue')\n if Queue_infile is False:\n os.mkdir('Queue')\n DIR = os.path.abspath(os.path.realpath('Queue'))\n q_num = len(os.listdir(DIR))\n q_num += 1\n add_queue = True\n while add_queue:\n if q_num in queues:\n q_num += 1\n else:\n add_queue = False\n queues[q_num] = q_num\n queue_path = os.path.abspath(os.path.realpath('Queue') +\n f'\\\\song{q_num}.%(ext)s')\n ydl_opts = {'format': 'bestaudio/best', 'quiet': True, 'outtmpl':\n queue_path, 'postprocessors': [{'key': 'FFmpegExtractAudio',\n 'preferredcodec': 'mp3', 'preferredquality': '192'}]}\n with youtube_dl.YoutubeDL(ydl_opts) as ydl:\n print('Downloading audio now\\n')\n ydl.download([url])\n print('Song added to queue\\n')\n song_there = os.path.isfile('song.mp3')\n try:\n if song_there:\n os.remove('song.mp3')\n queues.clear()\n print('remove old song file')\n except PermissionError:\n add_queue()\n await ctx.send('Adding song to the queue')\n return\n Queue_infile = os.path.isdir('./Queue')\n try:\n Queue_folder = './Queue'\n if Queue_infile is True:\n print('Removed old Queue folder')\n shutil.rmtree(Queue_folder)\n except:\n print('No old queue folder')\n await ctx.send('Getting everything ready now')\n ydl_opts = {'format': 'bestaudio/best', 'quiet': True, 'postprocessors':\n [{'key': 'FFmpegExtractAudio', 'preferredcodec': 'mp3',\n 'preferredquality': '192'}]}\n with youtube_dl.YoutubeDL(ydl_opts) as ydl:\n print('Downloading audio now\\n')\n ydl.download([url])\n for file in os.listdir('./'):\n if file.endswith('.mp3'):\n name = file\n print(f'renamed file : {file}\\n')\n os.rename(file, 'song.mp3')\n voice.play(discord.FFmpegPCMAudio('song.mp3'), after=lambda e:\n check_queue())\n voice.source = discord.PCMVolumeTransformer(voice.source)\n voice.source.volume = 0.07\n nname = name.rsplit('-', 1)\n await ctx.send(f'Playing :notes: `{nname[0]}` :notes:')\n print('Playing\\n')\n\n\n<mask token>\n\n\n@botCommand.command(pass_context=True)\nasync def ping(ctx):\n await ctx.send('test')\n\n\n@botCommand.command(pass_context=True)\nasync def join(ctx):\n global vc\n channel = ctx.message.author.voice.channel\n vc = channel.connect()\n await channel.connect()\n\n\n@botCommand.event\nasync def on_message(message):\n if message.author == client.user:\n return\n msg1 = '<@333863300892721152> davis kok pepe ya'\n if message.content == 'command list':\n await message.channel.send(\n '- davis mah\\n- davis\\n- .plays + youtubeURL')\n if message.content == 'davis mah':\n for x in range(3):\n await message.channel.send('davis mah paling jago')\n if message.content == 'davis':\n response = msg1\n for x in range(3):\n await message.channel.send(response)\n if message.content == 'bel sama jessica':\n response = 'jessica lah , https://imgur.com/TrtyIVa'\n await message.channel.send(response)\n if message.content == 'ig jessica':\n response = 'https://www.instagram.com/h.yojeong/'\n await message.channel.send(response)\n await botCommand.process_commands(message)\n\n\nbotCommand.run(token)\n",
"step-3": "<mask token>\nenv_path = Path('.') / '.env'\nload_dotenv(dotenv_path=env_path)\nclient = discord.Client()\nbotCommand = commands.Bot(command_prefix='.')\ntoken = os.getenv('DISCORD_TOKEN')\nplayers = {}\n\n\n@botCommand.event\nasync def on_ready():\n print(f'{client.user} is connected to the following guild:\\n')\n\n\n@botCommand.command(pass_context=True, aliases=['y'])\nasync def youtube(ctx, *, search):\n query_string = urllib.parse.urlencode({'search_query': search})\n htm_content = urllib.request.urlopen('http://www.youtube.com/results?' +\n query_string)\n print('/watch\\\\?v=(.{11})')\n search_results = re.findall('/watch\\\\?v=(.{11})', htm_content.read().\n decode('utf-8'))\n await ctx.send('http://www.youtube.com/watch?v=' + search_results[0])\n\n\nvoice = None\nq_num = 0\n\n\n@botCommand.command(pass_context=True, aliases=['p', 'play'])\nasync def plays(ctx, *, url):\n server = ctx.message.guild\n global voice\n channel = ctx.message.author.voice.channel\n if not str(url).startswith('http'):\n query_string = urllib.parse.urlencode({'search_query': url})\n htm_content = urllib.request.urlopen(\n 'http://www.youtube.com/results?' + query_string)\n print('/watch\\\\?v=(.{11})')\n search_results = re.findall('/watch\\\\?v=(.{11})', htm_content.read(\n ).decode('utf-8'))\n url = 'http://www.youtube.com/watch?v=' + search_results[0]\n if voice:\n print('ok')\n else:\n voice = await channel.connect()\n await ctx.send(f'Joined {channel}')\n\n def check_queue():\n print('Test')\n Queue_infile = os.path.isdir('./Queue')\n if Queue_infile is True:\n DIR = os.path.abspath(os.path.realpath('Queue'))\n length = len(os.listdir(DIR))\n still_q = length - 1\n try:\n first_file = os.listdir(DIR)[0]\n except:\n print('No more queue\\n')\n queues.clear()\n return\n main_location = os.path.dirname(os.path.realpath(__file__))\n song_path = os.path.abspath(os.path.realpath('Queue') + '\\\\' +\n first_file)\n if length != 0:\n print('Song done , playing next queue\\n')\n print(f'song still in queue: {still_q}')\n song_there = os.path.isfile('song.mp3')\n if song_there:\n os.remove('song.mp3')\n shutil.move(song_path, main_location)\n for file in os.listdir('./'):\n if file.endswith('.mp3'):\n os.rename(file, 'song.mp3')\n voice.play(discord.FFmpegPCMAudio('song.mp3'), after=lambda\n e: check_queue())\n voice.source = discord.PCMVolumeTransformer(voice.source)\n voice.source.volume = 0.07\n else:\n queues.clear()\n return\n else:\n queues.clear()\n print('No song founds')\n\n def add_queue():\n print('Test')\n Queue_infile = os.path.isdir('./Queue')\n if Queue_infile is False:\n os.mkdir('Queue')\n DIR = os.path.abspath(os.path.realpath('Queue'))\n q_num = len(os.listdir(DIR))\n q_num += 1\n add_queue = True\n while add_queue:\n if q_num in queues:\n q_num += 1\n else:\n add_queue = False\n queues[q_num] = q_num\n queue_path = os.path.abspath(os.path.realpath('Queue') +\n f'\\\\song{q_num}.%(ext)s')\n ydl_opts = {'format': 'bestaudio/best', 'quiet': True, 'outtmpl':\n queue_path, 'postprocessors': [{'key': 'FFmpegExtractAudio',\n 'preferredcodec': 'mp3', 'preferredquality': '192'}]}\n with youtube_dl.YoutubeDL(ydl_opts) as ydl:\n print('Downloading audio now\\n')\n ydl.download([url])\n print('Song added to queue\\n')\n song_there = os.path.isfile('song.mp3')\n try:\n if song_there:\n os.remove('song.mp3')\n queues.clear()\n print('remove old song file')\n except PermissionError:\n add_queue()\n await ctx.send('Adding song to the queue')\n return\n Queue_infile = os.path.isdir('./Queue')\n try:\n Queue_folder = './Queue'\n if Queue_infile is True:\n print('Removed old Queue folder')\n shutil.rmtree(Queue_folder)\n except:\n print('No old queue folder')\n await ctx.send('Getting everything ready now')\n ydl_opts = {'format': 'bestaudio/best', 'quiet': True, 'postprocessors':\n [{'key': 'FFmpegExtractAudio', 'preferredcodec': 'mp3',\n 'preferredquality': '192'}]}\n with youtube_dl.YoutubeDL(ydl_opts) as ydl:\n print('Downloading audio now\\n')\n ydl.download([url])\n for file in os.listdir('./'):\n if file.endswith('.mp3'):\n name = file\n print(f'renamed file : {file}\\n')\n os.rename(file, 'song.mp3')\n voice.play(discord.FFmpegPCMAudio('song.mp3'), after=lambda e:\n check_queue())\n voice.source = discord.PCMVolumeTransformer(voice.source)\n voice.source.volume = 0.07\n nname = name.rsplit('-', 1)\n await ctx.send(f'Playing :notes: `{nname[0]}` :notes:')\n print('Playing\\n')\n\n\nqueues = {}\n\n\n@botCommand.command(pass_context=True)\nasync def ping(ctx):\n await ctx.send('test')\n\n\n@botCommand.command(pass_context=True)\nasync def join(ctx):\n global vc\n channel = ctx.message.author.voice.channel\n vc = channel.connect()\n await channel.connect()\n\n\n@botCommand.event\nasync def on_message(message):\n if message.author == client.user:\n return\n msg1 = '<@333863300892721152> davis kok pepe ya'\n if message.content == 'command list':\n await message.channel.send(\n '- davis mah\\n- davis\\n- .plays + youtubeURL')\n if message.content == 'davis mah':\n for x in range(3):\n await message.channel.send('davis mah paling jago')\n if message.content == 'davis':\n response = msg1\n for x in range(3):\n await message.channel.send(response)\n if message.content == 'bel sama jessica':\n response = 'jessica lah , https://imgur.com/TrtyIVa'\n await message.channel.send(response)\n if message.content == 'ig jessica':\n response = 'https://www.instagram.com/h.yojeong/'\n await message.channel.send(response)\n await botCommand.process_commands(message)\n\n\nbotCommand.run(token)\n",
"step-4": "import os\nimport shutil\nimport discord\nimport youtube_dl\nfrom discord.ext import commands\nimport urllib.parse\nimport urllib.request\nimport re\nimport dotenv\nfrom pathlib import Path\nfrom dotenv import load_dotenv\nenv_path = Path('.') / '.env'\nload_dotenv(dotenv_path=env_path)\nclient = discord.Client()\nbotCommand = commands.Bot(command_prefix='.')\ntoken = os.getenv('DISCORD_TOKEN')\nplayers = {}\n\n\n@botCommand.event\nasync def on_ready():\n print(f'{client.user} is connected to the following guild:\\n')\n\n\n@botCommand.command(pass_context=True, aliases=['y'])\nasync def youtube(ctx, *, search):\n query_string = urllib.parse.urlencode({'search_query': search})\n htm_content = urllib.request.urlopen('http://www.youtube.com/results?' +\n query_string)\n print('/watch\\\\?v=(.{11})')\n search_results = re.findall('/watch\\\\?v=(.{11})', htm_content.read().\n decode('utf-8'))\n await ctx.send('http://www.youtube.com/watch?v=' + search_results[0])\n\n\nvoice = None\nq_num = 0\n\n\n@botCommand.command(pass_context=True, aliases=['p', 'play'])\nasync def plays(ctx, *, url):\n server = ctx.message.guild\n global voice\n channel = ctx.message.author.voice.channel\n if not str(url).startswith('http'):\n query_string = urllib.parse.urlencode({'search_query': url})\n htm_content = urllib.request.urlopen(\n 'http://www.youtube.com/results?' + query_string)\n print('/watch\\\\?v=(.{11})')\n search_results = re.findall('/watch\\\\?v=(.{11})', htm_content.read(\n ).decode('utf-8'))\n url = 'http://www.youtube.com/watch?v=' + search_results[0]\n if voice:\n print('ok')\n else:\n voice = await channel.connect()\n await ctx.send(f'Joined {channel}')\n\n def check_queue():\n print('Test')\n Queue_infile = os.path.isdir('./Queue')\n if Queue_infile is True:\n DIR = os.path.abspath(os.path.realpath('Queue'))\n length = len(os.listdir(DIR))\n still_q = length - 1\n try:\n first_file = os.listdir(DIR)[0]\n except:\n print('No more queue\\n')\n queues.clear()\n return\n main_location = os.path.dirname(os.path.realpath(__file__))\n song_path = os.path.abspath(os.path.realpath('Queue') + '\\\\' +\n first_file)\n if length != 0:\n print('Song done , playing next queue\\n')\n print(f'song still in queue: {still_q}')\n song_there = os.path.isfile('song.mp3')\n if song_there:\n os.remove('song.mp3')\n shutil.move(song_path, main_location)\n for file in os.listdir('./'):\n if file.endswith('.mp3'):\n os.rename(file, 'song.mp3')\n voice.play(discord.FFmpegPCMAudio('song.mp3'), after=lambda\n e: check_queue())\n voice.source = discord.PCMVolumeTransformer(voice.source)\n voice.source.volume = 0.07\n else:\n queues.clear()\n return\n else:\n queues.clear()\n print('No song founds')\n\n def add_queue():\n print('Test')\n Queue_infile = os.path.isdir('./Queue')\n if Queue_infile is False:\n os.mkdir('Queue')\n DIR = os.path.abspath(os.path.realpath('Queue'))\n q_num = len(os.listdir(DIR))\n q_num += 1\n add_queue = True\n while add_queue:\n if q_num in queues:\n q_num += 1\n else:\n add_queue = False\n queues[q_num] = q_num\n queue_path = os.path.abspath(os.path.realpath('Queue') +\n f'\\\\song{q_num}.%(ext)s')\n ydl_opts = {'format': 'bestaudio/best', 'quiet': True, 'outtmpl':\n queue_path, 'postprocessors': [{'key': 'FFmpegExtractAudio',\n 'preferredcodec': 'mp3', 'preferredquality': '192'}]}\n with youtube_dl.YoutubeDL(ydl_opts) as ydl:\n print('Downloading audio now\\n')\n ydl.download([url])\n print('Song added to queue\\n')\n song_there = os.path.isfile('song.mp3')\n try:\n if song_there:\n os.remove('song.mp3')\n queues.clear()\n print('remove old song file')\n except PermissionError:\n add_queue()\n await ctx.send('Adding song to the queue')\n return\n Queue_infile = os.path.isdir('./Queue')\n try:\n Queue_folder = './Queue'\n if Queue_infile is True:\n print('Removed old Queue folder')\n shutil.rmtree(Queue_folder)\n except:\n print('No old queue folder')\n await ctx.send('Getting everything ready now')\n ydl_opts = {'format': 'bestaudio/best', 'quiet': True, 'postprocessors':\n [{'key': 'FFmpegExtractAudio', 'preferredcodec': 'mp3',\n 'preferredquality': '192'}]}\n with youtube_dl.YoutubeDL(ydl_opts) as ydl:\n print('Downloading audio now\\n')\n ydl.download([url])\n for file in os.listdir('./'):\n if file.endswith('.mp3'):\n name = file\n print(f'renamed file : {file}\\n')\n os.rename(file, 'song.mp3')\n voice.play(discord.FFmpegPCMAudio('song.mp3'), after=lambda e:\n check_queue())\n voice.source = discord.PCMVolumeTransformer(voice.source)\n voice.source.volume = 0.07\n nname = name.rsplit('-', 1)\n await ctx.send(f'Playing :notes: `{nname[0]}` :notes:')\n print('Playing\\n')\n\n\nqueues = {}\n\n\n@botCommand.command(pass_context=True)\nasync def ping(ctx):\n await ctx.send('test')\n\n\n@botCommand.command(pass_context=True)\nasync def join(ctx):\n global vc\n channel = ctx.message.author.voice.channel\n vc = channel.connect()\n await channel.connect()\n\n\n@botCommand.event\nasync def on_message(message):\n if message.author == client.user:\n return\n msg1 = '<@333863300892721152> davis kok pepe ya'\n if message.content == 'command list':\n await message.channel.send(\n '- davis mah\\n- davis\\n- .plays + youtubeURL')\n if message.content == 'davis mah':\n for x in range(3):\n await message.channel.send('davis mah paling jago')\n if message.content == 'davis':\n response = msg1\n for x in range(3):\n await message.channel.send(response)\n if message.content == 'bel sama jessica':\n response = 'jessica lah , https://imgur.com/TrtyIVa'\n await message.channel.send(response)\n if message.content == 'ig jessica':\n response = 'https://www.instagram.com/h.yojeong/'\n await message.channel.send(response)\n await botCommand.process_commands(message)\n\n\nbotCommand.run(token)\n",
"step-5": "# bot.py\nimport os\nimport shutil\nimport discord\nimport youtube_dl\nfrom discord.ext import commands\nimport urllib.parse\nimport urllib.request\nimport re\nimport dotenv\nfrom pathlib import Path # Python 3.6+ only\nfrom dotenv import load_dotenv\n\nenv_path = Path('.') / '.env'\nload_dotenv(dotenv_path=env_path)\n\nclient = discord.Client()\nbotCommand = commands.Bot(command_prefix='.')\ntoken = os.getenv(\"DISCORD_TOKEN\")\nplayers = {}\n\n\n@botCommand.event\nasync def on_ready():\n print(\n f'{client.user} is connected to the following guild:\\n'\n )\n\n\n@botCommand.command(pass_context=True, aliases=['y'])\nasync def youtube(ctx, *, search):\n query_string = urllib.parse.urlencode({\n 'search_query': search\n })\n\n htm_content = urllib.request.urlopen(\n 'http://www.youtube.com/results?' + query_string\n )\n print(r'/watch\\?v=(.{11})')\n\n search_results = re.findall(r'/watch\\?v=(.{11})', htm_content.read().decode('utf-8'))\n await ctx.send('http://www.youtube.com/watch?v=' + search_results[0])\n\n\nvoice = None\n\nq_num = 0\n\n\n@botCommand.command(pass_context=True, aliases=['p', 'play'])\nasync def plays(ctx, *, url):\n server = ctx.message.guild\n global voice\n channel = ctx.message.author.voice.channel\n if not str(url).startswith('http'):\n query_string = urllib.parse.urlencode({\n 'search_query': url\n })\n\n htm_content = urllib.request.urlopen(\n 'http://www.youtube.com/results?' + query_string\n )\n print(r'/watch\\?v=(.{11})')\n\n search_results = re.findall(r'/watch\\?v=(.{11})', htm_content.read().decode('utf-8'))\n url = 'http://www.youtube.com/watch?v=' + search_results[0]\n\n if voice:\n print(\"ok\")\n else:\n\n voice = await channel.connect()\n await ctx.send(f\"Joined {channel}\")\n\n # if voice is None:\n # voice = await channel.connect()\n # song_there = os.path.isfile(\"song.mp3\")\n\n def check_queue():\n print('Test')\n Queue_infile = os.path.isdir(\"./Queue\")\n if Queue_infile is True:\n DIR = os.path.abspath(os.path.realpath(\"Queue\"))\n length = len(os.listdir(DIR))\n still_q = length - 1\n try:\n first_file = os.listdir(DIR)[0]\n except:\n print(\"No more queue\\n\")\n queues.clear()\n return\n main_location = os.path.dirname(os.path.realpath(__file__))\n song_path = os.path.abspath(os.path.realpath(\"Queue\") + \"\\\\\" + first_file)\n if length != 0:\n print(\"Song done , playing next queue\\n\")\n print(f\"song still in queue: {still_q}\")\n song_there = os.path.isfile(\"song.mp3\")\n if song_there:\n os.remove(\"song.mp3\")\n shutil.move(song_path, main_location)\n for file in os.listdir(\"./\"):\n if file.endswith(\".mp3\"):\n os.rename(file, 'song.mp3')\n\n voice.play(discord.FFmpegPCMAudio('song.mp3'), after=lambda e: check_queue())\n voice.source = discord.PCMVolumeTransformer(voice.source)\n voice.source.volume = 0.07\n else:\n queues.clear()\n return\n else:\n queues.clear()\n print(\"No song founds\")\n\n def add_queue():\n print(\"Test\")\n Queue_infile = os.path.isdir(\"./Queue\")\n if Queue_infile is False:\n os.mkdir(\"Queue\")\n DIR = os.path.abspath(os.path.realpath(\"Queue\"))\n q_num = len(os.listdir(DIR))\n q_num += 1\n add_queue = True\n while add_queue:\n if q_num in queues:\n q_num += 1\n else:\n add_queue = False\n queues[q_num] = q_num\n\n queue_path = os.path.abspath(os.path.realpath(\"Queue\") + f\"\\song{q_num}.%(ext)s\")\n ydl_opts = {\n 'format': 'bestaudio/best',\n 'quiet': True,\n 'outtmpl': queue_path,\n 'postprocessors': [{\n 'key': 'FFmpegExtractAudio',\n 'preferredcodec': 'mp3',\n 'preferredquality': '192'\n }],\n }\n\n with youtube_dl.YoutubeDL(ydl_opts) as ydl:\n print(\"Downloading audio now\\n\")\n ydl.download([url])\n\n print(\"Song added to queue\\n\")\n\n song_there = os.path.isfile(\"song.mp3\")\n try:\n if song_there:\n os.remove(\"song.mp3\")\n queues.clear()\n print(\"remove old song file\")\n except PermissionError:\n add_queue()\n await ctx.send(\"Adding song to the queue\")\n return\n\n Queue_infile = os.path.isdir(\"./Queue\")\n try:\n Queue_folder = \"./Queue\"\n if Queue_infile is True:\n print(\"Removed old Queue folder\")\n shutil.rmtree(Queue_folder)\n except:\n print(\"No old queue folder\")\n\n await ctx.send(\"Getting everything ready now\")\n\n # voice = get(client.voice_clients, guild=ctx.guild)\n\n ydl_opts = {\n 'format': 'bestaudio/best',\n 'quiet': True,\n 'postprocessors': [{\n 'key': 'FFmpegExtractAudio',\n 'preferredcodec': 'mp3',\n 'preferredquality': '192'\n }],\n }\n\n with youtube_dl.YoutubeDL(ydl_opts) as ydl:\n print(\"Downloading audio now\\n\")\n ydl.download([url])\n\n for file in os.listdir(\"./\"):\n if file.endswith(\".mp3\"):\n name = file\n print(f\"renamed file : {file}\\n\")\n os.rename(file, \"song.mp3\")\n\n voice.play(discord.FFmpegPCMAudio('song.mp3'), after=lambda e: check_queue())\n voice.source = discord.PCMVolumeTransformer(voice.source)\n voice.source.volume = 0.07\n\n nname = name.rsplit(\"-\", 1)\n await ctx.send(f\"Playing :notes: `{nname[0]}` :notes:\")\n print(\"Playing\\n\")\n\n\nqueues = {}\n\n\n@botCommand.command(pass_context=True)\nasync def ping(ctx):\n await ctx.send('test')\n\n\n@botCommand.command(pass_context=True)\nasync def join(ctx):\n global vc\n channel = ctx.message.author.voice.channel\n vc = channel.connect()\n await channel.connect()\n\n\n@botCommand.event\nasync def on_message(message):\n if message.author == client.user:\n return\n\n msg1 = '<@333863300892721152> davis kok pepe ya'\n\n if message.content == 'command list':\n await message.channel.send('- davis mah\\n- davis\\n- .plays + youtubeURL')\n\n if message.content == 'davis mah':\n for x in range(3):\n await message.channel.send('davis mah paling jago')\n if message.content == 'davis':\n response = msg1\n for x in range(3):\n await message.channel.send(response)\n if message.content == 'bel sama jessica':\n response = 'jessica lah , https://imgur.com/TrtyIVa'\n await message.channel.send(response)\n if message.content == 'ig jessica':\n response = 'https://www.instagram.com/h.yojeong/'\n await message.channel.send(response)\n await botCommand.process_commands(message)\n\n\nbotCommand.run(token)\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
for i in range(20):
print('The ' + str(i) + '-th graph.')
Ground_sample = np.load(Ground_FOLDER + 'Ground-True-' + str(i) + '.npy')
CGRNN_sample = np.load(CGRNN_FOLDER + 'CGRNN-' + str(i) + '.npy')
SRNN_sample = np.load(SRNN_FOLDER + 'SRNN-' + str(i) + '.npy')
VRNN_sample = np.load(VRNN_FOLDER + 'VRNN-' + str(i) + '.npy')
ssRnnRbm_sample = np.load(ssRnnRbm_FOLDER + 'ssRnnRbm-' + str(i) + '.npy')
midiwrite(Ground_FOLDER + 'Ground-True-' + str(i) + '.mid',
Ground_sample, (1, 128), 0.25)
midiwrite(CGRNN_FOLDER + 'CGRNN-' + str(i) + '.mid', CGRNN_sample, (1,
128), 0.25)
midiwrite(SRNN_FOLDER + 'SRNN-' + str(i) + '.mid', SRNN_sample, (1, 128
), 0.25)
midiwrite(VRNN_FOLDER + 'VRNN-' + str(i) + '.mid', VRNN_sample, (1, 128
), 0.25)
midiwrite(ssRnnRbm_FOLDER + 'ssRnnRbm-' + str(i) + '.mid',
ssRnnRbm_sample, (1, 128), 0.25)
pass
<|reserved_special_token_1|>
<|reserved_special_token_0|>
CGRNN_FOLDER = 'Samples/CGRNN/'
SRNN_FOLDER = 'Samples/SRNN/'
VRNN_FOLDER = 'Samples/VRNN/'
ssRnnRbm_FOLDER = 'Samples/ssRnnRbm/'
Ground_FOLDER = 'Samples/'
for i in range(20):
print('The ' + str(i) + '-th graph.')
Ground_sample = np.load(Ground_FOLDER + 'Ground-True-' + str(i) + '.npy')
CGRNN_sample = np.load(CGRNN_FOLDER + 'CGRNN-' + str(i) + '.npy')
SRNN_sample = np.load(SRNN_FOLDER + 'SRNN-' + str(i) + '.npy')
VRNN_sample = np.load(VRNN_FOLDER + 'VRNN-' + str(i) + '.npy')
ssRnnRbm_sample = np.load(ssRnnRbm_FOLDER + 'ssRnnRbm-' + str(i) + '.npy')
midiwrite(Ground_FOLDER + 'Ground-True-' + str(i) + '.mid',
Ground_sample, (1, 128), 0.25)
midiwrite(CGRNN_FOLDER + 'CGRNN-' + str(i) + '.mid', CGRNN_sample, (1,
128), 0.25)
midiwrite(SRNN_FOLDER + 'SRNN-' + str(i) + '.mid', SRNN_sample, (1, 128
), 0.25)
midiwrite(VRNN_FOLDER + 'VRNN-' + str(i) + '.mid', VRNN_sample, (1, 128
), 0.25)
midiwrite(ssRnnRbm_FOLDER + 'ssRnnRbm-' + str(i) + '.mid',
ssRnnRbm_sample, (1, 128), 0.25)
pass
<|reserved_special_token_1|>
<|reserved_special_token_0|>
import numpy as np
from midi.utils import midiread, midiwrite
CGRNN_FOLDER = 'Samples/CGRNN/'
SRNN_FOLDER = 'Samples/SRNN/'
VRNN_FOLDER = 'Samples/VRNN/'
ssRnnRbm_FOLDER = 'Samples/ssRnnRbm/'
Ground_FOLDER = 'Samples/'
for i in range(20):
print('The ' + str(i) + '-th graph.')
Ground_sample = np.load(Ground_FOLDER + 'Ground-True-' + str(i) + '.npy')
CGRNN_sample = np.load(CGRNN_FOLDER + 'CGRNN-' + str(i) + '.npy')
SRNN_sample = np.load(SRNN_FOLDER + 'SRNN-' + str(i) + '.npy')
VRNN_sample = np.load(VRNN_FOLDER + 'VRNN-' + str(i) + '.npy')
ssRnnRbm_sample = np.load(ssRnnRbm_FOLDER + 'ssRnnRbm-' + str(i) + '.npy')
midiwrite(Ground_FOLDER + 'Ground-True-' + str(i) + '.mid',
Ground_sample, (1, 128), 0.25)
midiwrite(CGRNN_FOLDER + 'CGRNN-' + str(i) + '.mid', CGRNN_sample, (1,
128), 0.25)
midiwrite(SRNN_FOLDER + 'SRNN-' + str(i) + '.mid', SRNN_sample, (1, 128
), 0.25)
midiwrite(VRNN_FOLDER + 'VRNN-' + str(i) + '.mid', VRNN_sample, (1, 128
), 0.25)
midiwrite(ssRnnRbm_FOLDER + 'ssRnnRbm-' + str(i) + '.mid',
ssRnnRbm_sample, (1, 128), 0.25)
pass
<|reserved_special_token_1|>
"""#########################################################################
Author: Yingru Liu
Institute: Stony Brook University
Descriptions: transer the numpy files of the midi songs into midi files.
(Cause the code privided by RNN-RBM tutorial to save midi
runs in python 2.7 but my code is in python 3.6)
----2017.12.29
#########################################################################"""
import numpy as np
from midi.utils import midiread, midiwrite
#
CGRNN_FOLDER = "Samples/CGRNN/"
SRNN_FOLDER = "Samples/SRNN/"
VRNN_FOLDER = "Samples/VRNN/"
ssRnnRbm_FOLDER = "Samples/ssRnnRbm/"
Ground_FOLDER = "Samples/"
for i in range(20):
print('The ' + str(i) + '-th graph.')
Ground_sample = np.load(Ground_FOLDER + 'Ground-True-' + str(i) + '.npy')
CGRNN_sample = np.load(CGRNN_FOLDER + 'CGRNN-' + str(i) + '.npy')
SRNN_sample = np.load(SRNN_FOLDER + 'SRNN-' + str(i) + '.npy')
VRNN_sample = np.load(VRNN_FOLDER + 'VRNN-' + str(i) + '.npy')
ssRnnRbm_sample = np.load(ssRnnRbm_FOLDER + 'ssRnnRbm-' + str(i) + '.npy')
midiwrite(Ground_FOLDER + 'Ground-True-' + str(i) + '.mid', Ground_sample, (1, 128), 0.25)
midiwrite(CGRNN_FOLDER + 'CGRNN-' + str(i) + '.mid', CGRNN_sample, (1, 128), 0.25)
midiwrite(SRNN_FOLDER + 'SRNN-' + str(i) + '.mid', SRNN_sample, (1, 128), 0.25)
midiwrite(VRNN_FOLDER + 'VRNN-' + str(i) + '.mid', VRNN_sample, (1, 128), 0.25)
midiwrite(ssRnnRbm_FOLDER + 'ssRnnRbm-' + str(i) + '.mid', ssRnnRbm_sample, (1, 128), 0.25)
pass
|
flexible
|
{
"blob_id": "af152e0b739305866902ee141f94641b17ff03ea",
"index": 6496,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor i in range(20):\n print('The ' + str(i) + '-th graph.')\n Ground_sample = np.load(Ground_FOLDER + 'Ground-True-' + str(i) + '.npy')\n CGRNN_sample = np.load(CGRNN_FOLDER + 'CGRNN-' + str(i) + '.npy')\n SRNN_sample = np.load(SRNN_FOLDER + 'SRNN-' + str(i) + '.npy')\n VRNN_sample = np.load(VRNN_FOLDER + 'VRNN-' + str(i) + '.npy')\n ssRnnRbm_sample = np.load(ssRnnRbm_FOLDER + 'ssRnnRbm-' + str(i) + '.npy')\n midiwrite(Ground_FOLDER + 'Ground-True-' + str(i) + '.mid',\n Ground_sample, (1, 128), 0.25)\n midiwrite(CGRNN_FOLDER + 'CGRNN-' + str(i) + '.mid', CGRNN_sample, (1, \n 128), 0.25)\n midiwrite(SRNN_FOLDER + 'SRNN-' + str(i) + '.mid', SRNN_sample, (1, 128\n ), 0.25)\n midiwrite(VRNN_FOLDER + 'VRNN-' + str(i) + '.mid', VRNN_sample, (1, 128\n ), 0.25)\n midiwrite(ssRnnRbm_FOLDER + 'ssRnnRbm-' + str(i) + '.mid',\n ssRnnRbm_sample, (1, 128), 0.25)\n pass\n",
"step-3": "<mask token>\nCGRNN_FOLDER = 'Samples/CGRNN/'\nSRNN_FOLDER = 'Samples/SRNN/'\nVRNN_FOLDER = 'Samples/VRNN/'\nssRnnRbm_FOLDER = 'Samples/ssRnnRbm/'\nGround_FOLDER = 'Samples/'\nfor i in range(20):\n print('The ' + str(i) + '-th graph.')\n Ground_sample = np.load(Ground_FOLDER + 'Ground-True-' + str(i) + '.npy')\n CGRNN_sample = np.load(CGRNN_FOLDER + 'CGRNN-' + str(i) + '.npy')\n SRNN_sample = np.load(SRNN_FOLDER + 'SRNN-' + str(i) + '.npy')\n VRNN_sample = np.load(VRNN_FOLDER + 'VRNN-' + str(i) + '.npy')\n ssRnnRbm_sample = np.load(ssRnnRbm_FOLDER + 'ssRnnRbm-' + str(i) + '.npy')\n midiwrite(Ground_FOLDER + 'Ground-True-' + str(i) + '.mid',\n Ground_sample, (1, 128), 0.25)\n midiwrite(CGRNN_FOLDER + 'CGRNN-' + str(i) + '.mid', CGRNN_sample, (1, \n 128), 0.25)\n midiwrite(SRNN_FOLDER + 'SRNN-' + str(i) + '.mid', SRNN_sample, (1, 128\n ), 0.25)\n midiwrite(VRNN_FOLDER + 'VRNN-' + str(i) + '.mid', VRNN_sample, (1, 128\n ), 0.25)\n midiwrite(ssRnnRbm_FOLDER + 'ssRnnRbm-' + str(i) + '.mid',\n ssRnnRbm_sample, (1, 128), 0.25)\n pass\n",
"step-4": "<mask token>\nimport numpy as np\nfrom midi.utils import midiread, midiwrite\nCGRNN_FOLDER = 'Samples/CGRNN/'\nSRNN_FOLDER = 'Samples/SRNN/'\nVRNN_FOLDER = 'Samples/VRNN/'\nssRnnRbm_FOLDER = 'Samples/ssRnnRbm/'\nGround_FOLDER = 'Samples/'\nfor i in range(20):\n print('The ' + str(i) + '-th graph.')\n Ground_sample = np.load(Ground_FOLDER + 'Ground-True-' + str(i) + '.npy')\n CGRNN_sample = np.load(CGRNN_FOLDER + 'CGRNN-' + str(i) + '.npy')\n SRNN_sample = np.load(SRNN_FOLDER + 'SRNN-' + str(i) + '.npy')\n VRNN_sample = np.load(VRNN_FOLDER + 'VRNN-' + str(i) + '.npy')\n ssRnnRbm_sample = np.load(ssRnnRbm_FOLDER + 'ssRnnRbm-' + str(i) + '.npy')\n midiwrite(Ground_FOLDER + 'Ground-True-' + str(i) + '.mid',\n Ground_sample, (1, 128), 0.25)\n midiwrite(CGRNN_FOLDER + 'CGRNN-' + str(i) + '.mid', CGRNN_sample, (1, \n 128), 0.25)\n midiwrite(SRNN_FOLDER + 'SRNN-' + str(i) + '.mid', SRNN_sample, (1, 128\n ), 0.25)\n midiwrite(VRNN_FOLDER + 'VRNN-' + str(i) + '.mid', VRNN_sample, (1, 128\n ), 0.25)\n midiwrite(ssRnnRbm_FOLDER + 'ssRnnRbm-' + str(i) + '.mid',\n ssRnnRbm_sample, (1, 128), 0.25)\n pass\n",
"step-5": "\"\"\"#########################################################################\r\nAuthor: Yingru Liu\r\nInstitute: Stony Brook University\r\nDescriptions: transer the numpy files of the midi songs into midi files.\r\n (Cause the code privided by RNN-RBM tutorial to save midi\r\n runs in python 2.7 but my code is in python 3.6)\r\n ----2017.12.29\r\n#########################################################################\"\"\"\r\nimport numpy as np\r\nfrom midi.utils import midiread, midiwrite\r\n#\r\nCGRNN_FOLDER = \"Samples/CGRNN/\"\r\nSRNN_FOLDER = \"Samples/SRNN/\"\r\nVRNN_FOLDER = \"Samples/VRNN/\"\r\nssRnnRbm_FOLDER = \"Samples/ssRnnRbm/\"\r\nGround_FOLDER = \"Samples/\"\r\n\r\nfor i in range(20):\r\n print('The ' + str(i) + '-th graph.')\r\n Ground_sample = np.load(Ground_FOLDER + 'Ground-True-' + str(i) + '.npy')\r\n CGRNN_sample = np.load(CGRNN_FOLDER + 'CGRNN-' + str(i) + '.npy')\r\n SRNN_sample = np.load(SRNN_FOLDER + 'SRNN-' + str(i) + '.npy')\r\n VRNN_sample = np.load(VRNN_FOLDER + 'VRNN-' + str(i) + '.npy')\r\n ssRnnRbm_sample = np.load(ssRnnRbm_FOLDER + 'ssRnnRbm-' + str(i) + '.npy')\r\n midiwrite(Ground_FOLDER + 'Ground-True-' + str(i) + '.mid', Ground_sample, (1, 128), 0.25)\r\n midiwrite(CGRNN_FOLDER + 'CGRNN-' + str(i) + '.mid', CGRNN_sample, (1, 128), 0.25)\r\n midiwrite(SRNN_FOLDER + 'SRNN-' + str(i) + '.mid', SRNN_sample, (1, 128), 0.25)\r\n midiwrite(VRNN_FOLDER + 'VRNN-' + str(i) + '.mid', VRNN_sample, (1, 128), 0.25)\r\n midiwrite(ssRnnRbm_FOLDER + 'ssRnnRbm-' + str(i) + '.mid', ssRnnRbm_sample, (1, 128), 0.25)\r\n pass",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
"""
Remove tool_consumer_info_product_family_code from GradingInfo.
Revision ID: 106d94be7705
Revises: 973c9358b616
Create Date: 2023-07-06 11:23:10.850486
"""
import sqlalchemy as sa
from alembic import op
# revision identifiers, used by Alembic.
revision = "106d94be7705"
down_revision = "973c9358b616"
def upgrade():
op.drop_column("lis_result_sourcedid", "tool_consumer_info_product_family_code")
def downgrade():
op.add_column(
"lis_result_sourcedid",
sa.Column(
"tool_consumer_info_product_family_code",
sa.TEXT(),
autoincrement=False,
nullable=True,
),
)
|
normal
|
{
"blob_id": "46d85a3babab4b18f4e0e0384f254f6105cf691d",
"index": 1490,
"step-1": "<mask token>\n\n\ndef upgrade():\n op.drop_column('lis_result_sourcedid',\n 'tool_consumer_info_product_family_code')\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef upgrade():\n op.drop_column('lis_result_sourcedid',\n 'tool_consumer_info_product_family_code')\n\n\ndef downgrade():\n op.add_column('lis_result_sourcedid', sa.Column(\n 'tool_consumer_info_product_family_code', sa.TEXT(), autoincrement=\n False, nullable=True))\n",
"step-3": "<mask token>\nrevision = '106d94be7705'\ndown_revision = '973c9358b616'\n\n\ndef upgrade():\n op.drop_column('lis_result_sourcedid',\n 'tool_consumer_info_product_family_code')\n\n\ndef downgrade():\n op.add_column('lis_result_sourcedid', sa.Column(\n 'tool_consumer_info_product_family_code', sa.TEXT(), autoincrement=\n False, nullable=True))\n",
"step-4": "<mask token>\nimport sqlalchemy as sa\nfrom alembic import op\nrevision = '106d94be7705'\ndown_revision = '973c9358b616'\n\n\ndef upgrade():\n op.drop_column('lis_result_sourcedid',\n 'tool_consumer_info_product_family_code')\n\n\ndef downgrade():\n op.add_column('lis_result_sourcedid', sa.Column(\n 'tool_consumer_info_product_family_code', sa.TEXT(), autoincrement=\n False, nullable=True))\n",
"step-5": "\"\"\"\nRemove tool_consumer_info_product_family_code from GradingInfo.\n\nRevision ID: 106d94be7705\nRevises: 973c9358b616\nCreate Date: 2023-07-06 11:23:10.850486\n\n\"\"\"\nimport sqlalchemy as sa\nfrom alembic import op\n\n# revision identifiers, used by Alembic.\nrevision = \"106d94be7705\"\ndown_revision = \"973c9358b616\"\n\n\ndef upgrade():\n op.drop_column(\"lis_result_sourcedid\", \"tool_consumer_info_product_family_code\")\n\n\ndef downgrade():\n op.add_column(\n \"lis_result_sourcedid\",\n sa.Column(\n \"tool_consumer_info_product_family_code\",\n sa.TEXT(),\n autoincrement=False,\n nullable=True,\n ),\n )\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class ModelSpellVariantPair(Base):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class ModelSpellVariantPair(Base):
__tablename__ = 'spell_variant_pair'
uuid = Column(UUID(as_uuid=True), server_default=sqlalchemy.text(
'uuid_generate_v4()'), unique=True, nullable=False, primary_key=True)
class_id = Column(UUID(as_uuid=True), ForeignKey('class.uuid', ondelete
='CASCADE'), nullable=False)
spells = relationship('ModelSpell', backref='spell_variant_pair',
cascade='all, delete-orphan')
<|reserved_special_token_1|>
import sqlalchemy
from .base import Base
from sqlalchemy import Column, Integer, String, ForeignKey
from sqlalchemy.dialects.postgresql import UUID
from sqlalchemy.orm import relationship
class ModelSpellVariantPair(Base):
__tablename__ = 'spell_variant_pair'
uuid = Column(UUID(as_uuid=True), server_default=sqlalchemy.text(
'uuid_generate_v4()'), unique=True, nullable=False, primary_key=True)
class_id = Column(UUID(as_uuid=True), ForeignKey('class.uuid', ondelete
='CASCADE'), nullable=False)
spells = relationship('ModelSpell', backref='spell_variant_pair',
cascade='all, delete-orphan')
<|reserved_special_token_1|>
import sqlalchemy
from .base import Base
from sqlalchemy import Column, Integer, String, ForeignKey
from sqlalchemy.dialects.postgresql import UUID
from sqlalchemy.orm import relationship
class ModelSpellVariantPair(Base):
__tablename__ = "spell_variant_pair"
uuid = Column(
UUID(as_uuid=True),
server_default=sqlalchemy.text("uuid_generate_v4()"),
unique=True,
nullable=False,
primary_key=True,
)
class_id = Column(
UUID(as_uuid=True), ForeignKey("class.uuid", ondelete="CASCADE"), nullable=False
)
spells = relationship(
"ModelSpell", backref="spell_variant_pair", cascade="all, delete-orphan"
)
|
flexible
|
{
"blob_id": "4958d6d88b762e6fbe860123b7274c16b6452605",
"index": 7674,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass ModelSpellVariantPair(Base):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass ModelSpellVariantPair(Base):\n __tablename__ = 'spell_variant_pair'\n uuid = Column(UUID(as_uuid=True), server_default=sqlalchemy.text(\n 'uuid_generate_v4()'), unique=True, nullable=False, primary_key=True)\n class_id = Column(UUID(as_uuid=True), ForeignKey('class.uuid', ondelete\n ='CASCADE'), nullable=False)\n spells = relationship('ModelSpell', backref='spell_variant_pair',\n cascade='all, delete-orphan')\n",
"step-4": "import sqlalchemy\nfrom .base import Base\nfrom sqlalchemy import Column, Integer, String, ForeignKey\nfrom sqlalchemy.dialects.postgresql import UUID\nfrom sqlalchemy.orm import relationship\n\n\nclass ModelSpellVariantPair(Base):\n __tablename__ = 'spell_variant_pair'\n uuid = Column(UUID(as_uuid=True), server_default=sqlalchemy.text(\n 'uuid_generate_v4()'), unique=True, nullable=False, primary_key=True)\n class_id = Column(UUID(as_uuid=True), ForeignKey('class.uuid', ondelete\n ='CASCADE'), nullable=False)\n spells = relationship('ModelSpell', backref='spell_variant_pair',\n cascade='all, delete-orphan')\n",
"step-5": "import sqlalchemy\nfrom .base import Base\nfrom sqlalchemy import Column, Integer, String, ForeignKey\nfrom sqlalchemy.dialects.postgresql import UUID\nfrom sqlalchemy.orm import relationship\n\n\nclass ModelSpellVariantPair(Base):\n __tablename__ = \"spell_variant_pair\"\n\n uuid = Column(\n UUID(as_uuid=True),\n server_default=sqlalchemy.text(\"uuid_generate_v4()\"),\n unique=True,\n nullable=False,\n primary_key=True,\n )\n class_id = Column(\n UUID(as_uuid=True), ForeignKey(\"class.uuid\", ondelete=\"CASCADE\"), nullable=False\n )\n spells = relationship(\n \"ModelSpell\", backref=\"spell_variant_pair\", cascade=\"all, delete-orphan\"\n )\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
def load_clusters(file_name):
if file_name not in load_clusters._cache:
cluster_data = ClusterData()
cluster_data.load_file(file_name)
else:
cluster_data = load_clusters._cache[file_name]
return cluster_data
<|reserved_special_token_0|>
def get_ngal_fit(param_fname, cluster_num, color, plot_fit=True, spider=
False, manual_calc=False):
param = dtk.Param(param_fname)
cluster_loc = param.get_string('cluster_loc')
if cluster_num is None:
cluster_num = param.get_int('cluster_load_num')
zmrh5_loc = param.get_string('zmrh5_loc')
zmr_sdss = ZMR(zmrh5_loc)
zmr_fit = ZMR('output/' + param_fname + '/zmr_lkhd_cores.param')
m_bins = zmr_fit.m_bins
r_bins = zmr_fit.r_bins
zmr_core_ngal, zmr_core_ngal_err = zmr_fit.get_ngal()
zmr_core_ngal = zmr_core_ngal[0]
zmr_core_ngal_err = zmr_core_ngal_err[0]
zmr_sdss_ngal, zmr_sdss_ngal_err = zmr_sdss.get_ngal()
zmr_sdss_ngal = zmr_sdss_ngal[0]
zmr_sdss_ngal_err = zmr_sdss_ngal_err[0]
if manual_calc:
model_fit_fname = ('figs/' + param_fname +
'/calc_likelihood_bounds.py/grid_fit_param.txt')
model_fit = load_fit_limits(model_fit_fname)
m_infall = 10 ** model_fit['mi']
if 'rd' in model_fit:
r_disrupt = model_fit['rd'] / 1000.0
else:
r_disrupt = np.inf
print(cluster_loc)
cluster_data = load_clusters(cluster_loc)
if cluster_num == -1:
cluster_num = cluster_data.num
cluster_ngal = np.zeros(cluster_num)
cluster_m_i = np.zeros(cluster_num)
for i in range(0, cluster_num):
mass_index = cluster_data.get_cluster_mass_bin(i, m_bins)
cluster_m_i[i] = mass_index
cluster_ngal[i] = cluster_data.get_ngal(i, m_infall, r_disrupt)[1]
ngal_mean = np.zeros(len(m_bins) - 1)
ngal_err = np.zeros(len(m_bins) - 1)
ngal_std = np.zeros(len(m_bins) - 1)
for i in range(0, len(m_bins) - 1):
slct = cluster_m_i == i
ngal_mean[i] = np.mean(cluster_ngal[slct])
ngal_std[i] = np.std(cluster_ngal[slct])
ngal_err[i] = ngal_std[i] / np.sqrt(np.sum(slct))
plt.plot(dtk.bins_avg(m_bins), ngal_mean, '-x', color=color, label=
'Ngal recalc')
if plot_fit:
plt.plot(dtk.bins_avg(m_bins), zmr_core_ngal, '-', color=color)
plt.fill_between(dtk.bins_avg(m_bins), zmr_core_ngal -
zmr_core_ngal_err, zmr_core_ngal + zmr_core_ngal_err, color=
color, alpha=0.3)
offset_amount = 1.025
if spider:
markerfacecolor = 'None'
markeredgecolor = color
xaxis_offset = offset_amount
lw = 1
else:
markerfacecolor = color
markeredgecolor = 'None'
xaxis_offset = 1.0 / offset_amount
lw = 2
if 'mstar-1' in param_fname and 'spider' in param_fname:
print('SPIDERSS!: ', zmr_sdss_ngal)
zmr_sdss_ngal[zmr_sdss_ngal < 0.1] = np.nan
plt.errorbar(dtk.bins_avg(m_bins) * xaxis_offset, zmr_sdss_ngal, yerr=
zmr_sdss_ngal_err, fmt='o', capsize=0, lw=lw, color=color,
markeredgecolor=markeredgecolor, markerfacecolor=markerfacecolor)
plt.yscale('log')
plt.xscale('log')
def format_plot():
p4 = plt.plot([], [], 'tab:purple', lw=5, label='{:1.2f}~L$_*$'.format(0.4)
)
p3 = plt.plot([], [], 'tab:red', lw=5, label='{:1.2f}~L$_*$'.format(0.63))
p2 = plt.plot([], [], 'tab:green', lw=5, label='{:1.2f}~L$_*$'.format(1.0))
p12 = plt.plot([], [], 'tab:orange', lw=5, label='{:1.2f}~L$_*$'.format
(1.58))
p1 = plt.plot([], [], 'tab:blue', lw=5, label='{:1.2f}~L$_*$'.format(2.5))
plt.errorbar([], [], yerr=[], fmt='o', lw=2, color='k', label=
'redMaPPer', capsize=0)
plt.plot([], [], color='k', label='Core Model')
plt.legend(ncol=2, loc='best', framealpha=0.0)
plt.xlabel('M$_{200c}$ [h$^{-1}$ M$_\\odot$]')
plt.ylabel('Projected N$_{\\rm{gal}}$')
plt.ylim([0.1, 3000.0])
plt.xlim([100000000000000.0, 5000000000000000.0])
plt.tight_layout()
def plot_ngal_fits():
get_ngal_fit('params/cfn/simet/mstar1/mean/a3_rd.param', None, 'c')
get_ngal_fit('params/cfn/simet/mstar0.5/mean/a3_rd.param', None, 'g')
get_ngal_fit('params/cfn/simet/mstar0/mean/a3_rd.param', None, 'b')
get_ngal_fit('params/cfn/simet/mstar-1/mean/a3_rd.param', None, 'r')
get_ngal_fit('params/cfn/spider/mstar1/mean/spider_rd.param', None, 'c',
plot_fit=False, spider=True)
get_ngal_fit('params/cfn/spider/mstar0.5/mean/spider_rd.param', None,
'g', plot_fit=False, spider=True)
get_ngal_fit('params/cfn/spider/mstar0/mean/spider_rd.param', None, 'b',
plot_fit=False, spider=True)
get_ngal_fit('params/cfn/spider/mstar-1/mean/spider_rd.param', None,
'r', plot_fit=False, spider=True)
format_plot()
def plot_ngal_fits2(pattern, mstars):
color_cycle = ['tab:blue', 'tab:orange', 'tab:green', 'tab:red',
'tab:purple', 'tab:brown', 'tab:pink', 'tab:gray', 'tab:olive',
'tab:cyan']
for mstar, color in zip(mstars, color_cycle):
get_ngal_fit(pattern.replace('${mstarval}', mstar), None, color)
format_plot()
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
if os.environ.get('DISPLAY') is None:
matplotlib.use('Agg')
<|reserved_special_token_0|>
rc('text', usetex=True)
rc('font', **{'family': 'serif', 'serif': ['Computer Modern Roman']})
rc('font', size=18)
def load_clusters(file_name):
if file_name not in load_clusters._cache:
cluster_data = ClusterData()
cluster_data.load_file(file_name)
else:
cluster_data = load_clusters._cache[file_name]
return cluster_data
<|reserved_special_token_0|>
def get_ngal_fit(param_fname, cluster_num, color, plot_fit=True, spider=
False, manual_calc=False):
param = dtk.Param(param_fname)
cluster_loc = param.get_string('cluster_loc')
if cluster_num is None:
cluster_num = param.get_int('cluster_load_num')
zmrh5_loc = param.get_string('zmrh5_loc')
zmr_sdss = ZMR(zmrh5_loc)
zmr_fit = ZMR('output/' + param_fname + '/zmr_lkhd_cores.param')
m_bins = zmr_fit.m_bins
r_bins = zmr_fit.r_bins
zmr_core_ngal, zmr_core_ngal_err = zmr_fit.get_ngal()
zmr_core_ngal = zmr_core_ngal[0]
zmr_core_ngal_err = zmr_core_ngal_err[0]
zmr_sdss_ngal, zmr_sdss_ngal_err = zmr_sdss.get_ngal()
zmr_sdss_ngal = zmr_sdss_ngal[0]
zmr_sdss_ngal_err = zmr_sdss_ngal_err[0]
if manual_calc:
model_fit_fname = ('figs/' + param_fname +
'/calc_likelihood_bounds.py/grid_fit_param.txt')
model_fit = load_fit_limits(model_fit_fname)
m_infall = 10 ** model_fit['mi']
if 'rd' in model_fit:
r_disrupt = model_fit['rd'] / 1000.0
else:
r_disrupt = np.inf
print(cluster_loc)
cluster_data = load_clusters(cluster_loc)
if cluster_num == -1:
cluster_num = cluster_data.num
cluster_ngal = np.zeros(cluster_num)
cluster_m_i = np.zeros(cluster_num)
for i in range(0, cluster_num):
mass_index = cluster_data.get_cluster_mass_bin(i, m_bins)
cluster_m_i[i] = mass_index
cluster_ngal[i] = cluster_data.get_ngal(i, m_infall, r_disrupt)[1]
ngal_mean = np.zeros(len(m_bins) - 1)
ngal_err = np.zeros(len(m_bins) - 1)
ngal_std = np.zeros(len(m_bins) - 1)
for i in range(0, len(m_bins) - 1):
slct = cluster_m_i == i
ngal_mean[i] = np.mean(cluster_ngal[slct])
ngal_std[i] = np.std(cluster_ngal[slct])
ngal_err[i] = ngal_std[i] / np.sqrt(np.sum(slct))
plt.plot(dtk.bins_avg(m_bins), ngal_mean, '-x', color=color, label=
'Ngal recalc')
if plot_fit:
plt.plot(dtk.bins_avg(m_bins), zmr_core_ngal, '-', color=color)
plt.fill_between(dtk.bins_avg(m_bins), zmr_core_ngal -
zmr_core_ngal_err, zmr_core_ngal + zmr_core_ngal_err, color=
color, alpha=0.3)
offset_amount = 1.025
if spider:
markerfacecolor = 'None'
markeredgecolor = color
xaxis_offset = offset_amount
lw = 1
else:
markerfacecolor = color
markeredgecolor = 'None'
xaxis_offset = 1.0 / offset_amount
lw = 2
if 'mstar-1' in param_fname and 'spider' in param_fname:
print('SPIDERSS!: ', zmr_sdss_ngal)
zmr_sdss_ngal[zmr_sdss_ngal < 0.1] = np.nan
plt.errorbar(dtk.bins_avg(m_bins) * xaxis_offset, zmr_sdss_ngal, yerr=
zmr_sdss_ngal_err, fmt='o', capsize=0, lw=lw, color=color,
markeredgecolor=markeredgecolor, markerfacecolor=markerfacecolor)
plt.yscale('log')
plt.xscale('log')
def format_plot():
p4 = plt.plot([], [], 'tab:purple', lw=5, label='{:1.2f}~L$_*$'.format(0.4)
)
p3 = plt.plot([], [], 'tab:red', lw=5, label='{:1.2f}~L$_*$'.format(0.63))
p2 = plt.plot([], [], 'tab:green', lw=5, label='{:1.2f}~L$_*$'.format(1.0))
p12 = plt.plot([], [], 'tab:orange', lw=5, label='{:1.2f}~L$_*$'.format
(1.58))
p1 = plt.plot([], [], 'tab:blue', lw=5, label='{:1.2f}~L$_*$'.format(2.5))
plt.errorbar([], [], yerr=[], fmt='o', lw=2, color='k', label=
'redMaPPer', capsize=0)
plt.plot([], [], color='k', label='Core Model')
plt.legend(ncol=2, loc='best', framealpha=0.0)
plt.xlabel('M$_{200c}$ [h$^{-1}$ M$_\\odot$]')
plt.ylabel('Projected N$_{\\rm{gal}}$')
plt.ylim([0.1, 3000.0])
plt.xlim([100000000000000.0, 5000000000000000.0])
plt.tight_layout()
def plot_ngal_fits():
get_ngal_fit('params/cfn/simet/mstar1/mean/a3_rd.param', None, 'c')
get_ngal_fit('params/cfn/simet/mstar0.5/mean/a3_rd.param', None, 'g')
get_ngal_fit('params/cfn/simet/mstar0/mean/a3_rd.param', None, 'b')
get_ngal_fit('params/cfn/simet/mstar-1/mean/a3_rd.param', None, 'r')
get_ngal_fit('params/cfn/spider/mstar1/mean/spider_rd.param', None, 'c',
plot_fit=False, spider=True)
get_ngal_fit('params/cfn/spider/mstar0.5/mean/spider_rd.param', None,
'g', plot_fit=False, spider=True)
get_ngal_fit('params/cfn/spider/mstar0/mean/spider_rd.param', None, 'b',
plot_fit=False, spider=True)
get_ngal_fit('params/cfn/spider/mstar-1/mean/spider_rd.param', None,
'r', plot_fit=False, spider=True)
format_plot()
def plot_ngal_fits2(pattern, mstars):
color_cycle = ['tab:blue', 'tab:orange', 'tab:green', 'tab:red',
'tab:purple', 'tab:brown', 'tab:pink', 'tab:gray', 'tab:olive',
'tab:cyan']
for mstar, color in zip(mstars, color_cycle):
get_ngal_fit(pattern.replace('${mstarval}', mstar), None, color)
format_plot()
if __name__ == '__main__':
if len(sys.argv) > 2:
plot_name = sys.argv[1]
else:
plot_name = 'OR_McClintock2019'
mstars = ['-1', '-0.5', '0', '0.5', '1']
if plot_name == 'OR_Simet2017':
pattern = (
'params/rmba/auto/make_all_OR.high_richness.low_rez.min20.sh/crit/mstar${mstarval}/OR_rd_zoom.param'
)
plot_ngal_fits2(pattern, mstars)
elif plot_name == 'OR_McClintock2019':
pattern = (
'params/rmba/auto/make_all_OR.McClintock.high_richness.low_rez.min20.sh/crit/mstar${mstarval}/OR_rd_zoom.param'
)
plot_ngal_fits2(pattern, mstars)
dtk.save_figs('figs/' + __file__ + '/' + plot_name + '/', extension='.pdf')
plt.show()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
if os.environ.get('DISPLAY') is None:
matplotlib.use('Agg')
<|reserved_special_token_0|>
rc('text', usetex=True)
rc('font', **{'family': 'serif', 'serif': ['Computer Modern Roman']})
rc('font', size=18)
def load_clusters(file_name):
if file_name not in load_clusters._cache:
cluster_data = ClusterData()
cluster_data.load_file(file_name)
else:
cluster_data = load_clusters._cache[file_name]
return cluster_data
load_clusters._cache = {}
def get_ngal_fit(param_fname, cluster_num, color, plot_fit=True, spider=
False, manual_calc=False):
param = dtk.Param(param_fname)
cluster_loc = param.get_string('cluster_loc')
if cluster_num is None:
cluster_num = param.get_int('cluster_load_num')
zmrh5_loc = param.get_string('zmrh5_loc')
zmr_sdss = ZMR(zmrh5_loc)
zmr_fit = ZMR('output/' + param_fname + '/zmr_lkhd_cores.param')
m_bins = zmr_fit.m_bins
r_bins = zmr_fit.r_bins
zmr_core_ngal, zmr_core_ngal_err = zmr_fit.get_ngal()
zmr_core_ngal = zmr_core_ngal[0]
zmr_core_ngal_err = zmr_core_ngal_err[0]
zmr_sdss_ngal, zmr_sdss_ngal_err = zmr_sdss.get_ngal()
zmr_sdss_ngal = zmr_sdss_ngal[0]
zmr_sdss_ngal_err = zmr_sdss_ngal_err[0]
if manual_calc:
model_fit_fname = ('figs/' + param_fname +
'/calc_likelihood_bounds.py/grid_fit_param.txt')
model_fit = load_fit_limits(model_fit_fname)
m_infall = 10 ** model_fit['mi']
if 'rd' in model_fit:
r_disrupt = model_fit['rd'] / 1000.0
else:
r_disrupt = np.inf
print(cluster_loc)
cluster_data = load_clusters(cluster_loc)
if cluster_num == -1:
cluster_num = cluster_data.num
cluster_ngal = np.zeros(cluster_num)
cluster_m_i = np.zeros(cluster_num)
for i in range(0, cluster_num):
mass_index = cluster_data.get_cluster_mass_bin(i, m_bins)
cluster_m_i[i] = mass_index
cluster_ngal[i] = cluster_data.get_ngal(i, m_infall, r_disrupt)[1]
ngal_mean = np.zeros(len(m_bins) - 1)
ngal_err = np.zeros(len(m_bins) - 1)
ngal_std = np.zeros(len(m_bins) - 1)
for i in range(0, len(m_bins) - 1):
slct = cluster_m_i == i
ngal_mean[i] = np.mean(cluster_ngal[slct])
ngal_std[i] = np.std(cluster_ngal[slct])
ngal_err[i] = ngal_std[i] / np.sqrt(np.sum(slct))
plt.plot(dtk.bins_avg(m_bins), ngal_mean, '-x', color=color, label=
'Ngal recalc')
if plot_fit:
plt.plot(dtk.bins_avg(m_bins), zmr_core_ngal, '-', color=color)
plt.fill_between(dtk.bins_avg(m_bins), zmr_core_ngal -
zmr_core_ngal_err, zmr_core_ngal + zmr_core_ngal_err, color=
color, alpha=0.3)
offset_amount = 1.025
if spider:
markerfacecolor = 'None'
markeredgecolor = color
xaxis_offset = offset_amount
lw = 1
else:
markerfacecolor = color
markeredgecolor = 'None'
xaxis_offset = 1.0 / offset_amount
lw = 2
if 'mstar-1' in param_fname and 'spider' in param_fname:
print('SPIDERSS!: ', zmr_sdss_ngal)
zmr_sdss_ngal[zmr_sdss_ngal < 0.1] = np.nan
plt.errorbar(dtk.bins_avg(m_bins) * xaxis_offset, zmr_sdss_ngal, yerr=
zmr_sdss_ngal_err, fmt='o', capsize=0, lw=lw, color=color,
markeredgecolor=markeredgecolor, markerfacecolor=markerfacecolor)
plt.yscale('log')
plt.xscale('log')
def format_plot():
p4 = plt.plot([], [], 'tab:purple', lw=5, label='{:1.2f}~L$_*$'.format(0.4)
)
p3 = plt.plot([], [], 'tab:red', lw=5, label='{:1.2f}~L$_*$'.format(0.63))
p2 = plt.plot([], [], 'tab:green', lw=5, label='{:1.2f}~L$_*$'.format(1.0))
p12 = plt.plot([], [], 'tab:orange', lw=5, label='{:1.2f}~L$_*$'.format
(1.58))
p1 = plt.plot([], [], 'tab:blue', lw=5, label='{:1.2f}~L$_*$'.format(2.5))
plt.errorbar([], [], yerr=[], fmt='o', lw=2, color='k', label=
'redMaPPer', capsize=0)
plt.plot([], [], color='k', label='Core Model')
plt.legend(ncol=2, loc='best', framealpha=0.0)
plt.xlabel('M$_{200c}$ [h$^{-1}$ M$_\\odot$]')
plt.ylabel('Projected N$_{\\rm{gal}}$')
plt.ylim([0.1, 3000.0])
plt.xlim([100000000000000.0, 5000000000000000.0])
plt.tight_layout()
def plot_ngal_fits():
get_ngal_fit('params/cfn/simet/mstar1/mean/a3_rd.param', None, 'c')
get_ngal_fit('params/cfn/simet/mstar0.5/mean/a3_rd.param', None, 'g')
get_ngal_fit('params/cfn/simet/mstar0/mean/a3_rd.param', None, 'b')
get_ngal_fit('params/cfn/simet/mstar-1/mean/a3_rd.param', None, 'r')
get_ngal_fit('params/cfn/spider/mstar1/mean/spider_rd.param', None, 'c',
plot_fit=False, spider=True)
get_ngal_fit('params/cfn/spider/mstar0.5/mean/spider_rd.param', None,
'g', plot_fit=False, spider=True)
get_ngal_fit('params/cfn/spider/mstar0/mean/spider_rd.param', None, 'b',
plot_fit=False, spider=True)
get_ngal_fit('params/cfn/spider/mstar-1/mean/spider_rd.param', None,
'r', plot_fit=False, spider=True)
format_plot()
def plot_ngal_fits2(pattern, mstars):
color_cycle = ['tab:blue', 'tab:orange', 'tab:green', 'tab:red',
'tab:purple', 'tab:brown', 'tab:pink', 'tab:gray', 'tab:olive',
'tab:cyan']
for mstar, color in zip(mstars, color_cycle):
get_ngal_fit(pattern.replace('${mstarval}', mstar), None, color)
format_plot()
if __name__ == '__main__':
if len(sys.argv) > 2:
plot_name = sys.argv[1]
else:
plot_name = 'OR_McClintock2019'
mstars = ['-1', '-0.5', '0', '0.5', '1']
if plot_name == 'OR_Simet2017':
pattern = (
'params/rmba/auto/make_all_OR.high_richness.low_rez.min20.sh/crit/mstar${mstarval}/OR_rd_zoom.param'
)
plot_ngal_fits2(pattern, mstars)
elif plot_name == 'OR_McClintock2019':
pattern = (
'params/rmba/auto/make_all_OR.McClintock.high_richness.low_rez.min20.sh/crit/mstar${mstarval}/OR_rd_zoom.param'
)
plot_ngal_fits2(pattern, mstars)
dtk.save_figs('figs/' + __file__ + '/' + plot_name + '/', extension='.pdf')
plt.show()
<|reserved_special_token_1|>
from __future__ import print_function, division
import numpy as np
import matplotlib
import os
if os.environ.get('DISPLAY') is None:
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import matplotlib.colors as clr
import dtk
import sys
import time
import numpy.random
from matplotlib.colors import LogNorm
from scipy.optimize import minimize
from calc_ngal import *
from generate_parameter_dist import *
from zmr import ZMR
from matplotlib import rc
rc('text', usetex=True)
rc('font', **{'family': 'serif', 'serif': ['Computer Modern Roman']})
rc('font', size=18)
def load_clusters(file_name):
if file_name not in load_clusters._cache:
cluster_data = ClusterData()
cluster_data.load_file(file_name)
else:
cluster_data = load_clusters._cache[file_name]
return cluster_data
load_clusters._cache = {}
def get_ngal_fit(param_fname, cluster_num, color, plot_fit=True, spider=
False, manual_calc=False):
param = dtk.Param(param_fname)
cluster_loc = param.get_string('cluster_loc')
if cluster_num is None:
cluster_num = param.get_int('cluster_load_num')
zmrh5_loc = param.get_string('zmrh5_loc')
zmr_sdss = ZMR(zmrh5_loc)
zmr_fit = ZMR('output/' + param_fname + '/zmr_lkhd_cores.param')
m_bins = zmr_fit.m_bins
r_bins = zmr_fit.r_bins
zmr_core_ngal, zmr_core_ngal_err = zmr_fit.get_ngal()
zmr_core_ngal = zmr_core_ngal[0]
zmr_core_ngal_err = zmr_core_ngal_err[0]
zmr_sdss_ngal, zmr_sdss_ngal_err = zmr_sdss.get_ngal()
zmr_sdss_ngal = zmr_sdss_ngal[0]
zmr_sdss_ngal_err = zmr_sdss_ngal_err[0]
if manual_calc:
model_fit_fname = ('figs/' + param_fname +
'/calc_likelihood_bounds.py/grid_fit_param.txt')
model_fit = load_fit_limits(model_fit_fname)
m_infall = 10 ** model_fit['mi']
if 'rd' in model_fit:
r_disrupt = model_fit['rd'] / 1000.0
else:
r_disrupt = np.inf
print(cluster_loc)
cluster_data = load_clusters(cluster_loc)
if cluster_num == -1:
cluster_num = cluster_data.num
cluster_ngal = np.zeros(cluster_num)
cluster_m_i = np.zeros(cluster_num)
for i in range(0, cluster_num):
mass_index = cluster_data.get_cluster_mass_bin(i, m_bins)
cluster_m_i[i] = mass_index
cluster_ngal[i] = cluster_data.get_ngal(i, m_infall, r_disrupt)[1]
ngal_mean = np.zeros(len(m_bins) - 1)
ngal_err = np.zeros(len(m_bins) - 1)
ngal_std = np.zeros(len(m_bins) - 1)
for i in range(0, len(m_bins) - 1):
slct = cluster_m_i == i
ngal_mean[i] = np.mean(cluster_ngal[slct])
ngal_std[i] = np.std(cluster_ngal[slct])
ngal_err[i] = ngal_std[i] / np.sqrt(np.sum(slct))
plt.plot(dtk.bins_avg(m_bins), ngal_mean, '-x', color=color, label=
'Ngal recalc')
if plot_fit:
plt.plot(dtk.bins_avg(m_bins), zmr_core_ngal, '-', color=color)
plt.fill_between(dtk.bins_avg(m_bins), zmr_core_ngal -
zmr_core_ngal_err, zmr_core_ngal + zmr_core_ngal_err, color=
color, alpha=0.3)
offset_amount = 1.025
if spider:
markerfacecolor = 'None'
markeredgecolor = color
xaxis_offset = offset_amount
lw = 1
else:
markerfacecolor = color
markeredgecolor = 'None'
xaxis_offset = 1.0 / offset_amount
lw = 2
if 'mstar-1' in param_fname and 'spider' in param_fname:
print('SPIDERSS!: ', zmr_sdss_ngal)
zmr_sdss_ngal[zmr_sdss_ngal < 0.1] = np.nan
plt.errorbar(dtk.bins_avg(m_bins) * xaxis_offset, zmr_sdss_ngal, yerr=
zmr_sdss_ngal_err, fmt='o', capsize=0, lw=lw, color=color,
markeredgecolor=markeredgecolor, markerfacecolor=markerfacecolor)
plt.yscale('log')
plt.xscale('log')
def format_plot():
p4 = plt.plot([], [], 'tab:purple', lw=5, label='{:1.2f}~L$_*$'.format(0.4)
)
p3 = plt.plot([], [], 'tab:red', lw=5, label='{:1.2f}~L$_*$'.format(0.63))
p2 = plt.plot([], [], 'tab:green', lw=5, label='{:1.2f}~L$_*$'.format(1.0))
p12 = plt.plot([], [], 'tab:orange', lw=5, label='{:1.2f}~L$_*$'.format
(1.58))
p1 = plt.plot([], [], 'tab:blue', lw=5, label='{:1.2f}~L$_*$'.format(2.5))
plt.errorbar([], [], yerr=[], fmt='o', lw=2, color='k', label=
'redMaPPer', capsize=0)
plt.plot([], [], color='k', label='Core Model')
plt.legend(ncol=2, loc='best', framealpha=0.0)
plt.xlabel('M$_{200c}$ [h$^{-1}$ M$_\\odot$]')
plt.ylabel('Projected N$_{\\rm{gal}}$')
plt.ylim([0.1, 3000.0])
plt.xlim([100000000000000.0, 5000000000000000.0])
plt.tight_layout()
def plot_ngal_fits():
get_ngal_fit('params/cfn/simet/mstar1/mean/a3_rd.param', None, 'c')
get_ngal_fit('params/cfn/simet/mstar0.5/mean/a3_rd.param', None, 'g')
get_ngal_fit('params/cfn/simet/mstar0/mean/a3_rd.param', None, 'b')
get_ngal_fit('params/cfn/simet/mstar-1/mean/a3_rd.param', None, 'r')
get_ngal_fit('params/cfn/spider/mstar1/mean/spider_rd.param', None, 'c',
plot_fit=False, spider=True)
get_ngal_fit('params/cfn/spider/mstar0.5/mean/spider_rd.param', None,
'g', plot_fit=False, spider=True)
get_ngal_fit('params/cfn/spider/mstar0/mean/spider_rd.param', None, 'b',
plot_fit=False, spider=True)
get_ngal_fit('params/cfn/spider/mstar-1/mean/spider_rd.param', None,
'r', plot_fit=False, spider=True)
format_plot()
def plot_ngal_fits2(pattern, mstars):
color_cycle = ['tab:blue', 'tab:orange', 'tab:green', 'tab:red',
'tab:purple', 'tab:brown', 'tab:pink', 'tab:gray', 'tab:olive',
'tab:cyan']
for mstar, color in zip(mstars, color_cycle):
get_ngal_fit(pattern.replace('${mstarval}', mstar), None, color)
format_plot()
if __name__ == '__main__':
if len(sys.argv) > 2:
plot_name = sys.argv[1]
else:
plot_name = 'OR_McClintock2019'
mstars = ['-1', '-0.5', '0', '0.5', '1']
if plot_name == 'OR_Simet2017':
pattern = (
'params/rmba/auto/make_all_OR.high_richness.low_rez.min20.sh/crit/mstar${mstarval}/OR_rd_zoom.param'
)
plot_ngal_fits2(pattern, mstars)
elif plot_name == 'OR_McClintock2019':
pattern = (
'params/rmba/auto/make_all_OR.McClintock.high_richness.low_rez.min20.sh/crit/mstar${mstarval}/OR_rd_zoom.param'
)
plot_ngal_fits2(pattern, mstars)
dtk.save_figs('figs/' + __file__ + '/' + plot_name + '/', extension='.pdf')
plt.show()
<|reserved_special_token_1|>
#!/usr/bin/env python2.7
from __future__ import print_function, division
import numpy as np
import matplotlib
import os
#checks if there is a display to use.
if os.environ.get('DISPLAY') is None:
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import matplotlib.colors as clr
import dtk
import sys
import time
import numpy.random
from matplotlib.colors import LogNorm
from scipy.optimize import minimize
from calc_ngal import *
from generate_parameter_dist import *
from zmr import ZMR
from matplotlib import rc
rc('text', usetex=True)
rc('font', **{'family':'serif', 'serif':['Computer Modern Roman'], })
rc('font', size=18)
def load_clusters(file_name):
if file_name not in load_clusters._cache:
cluster_data = ClusterData()
cluster_data.load_file(file_name)
else:
cluster_data = load_clusters._cache[file_name]
return cluster_data
load_clusters._cache = {}
def get_ngal_fit(param_fname, cluster_num, color, plot_fit=True, spider=False, manual_calc=False):
param = dtk.Param(param_fname)
cluster_loc = param.get_string('cluster_loc')
if cluster_num is None:
cluster_num = param.get_int('cluster_load_num')
zmrh5_loc = param.get_string('zmrh5_loc')
zmr_sdss = ZMR(zmrh5_loc)
zmr_fit = ZMR("output/"+param_fname+"/zmr_lkhd_cores.param")
m_bins = zmr_fit.m_bins
r_bins = zmr_fit.r_bins
zmr_core_ngal, zmr_core_ngal_err = zmr_fit.get_ngal() # only one z-bin, so we don't select it out
zmr_core_ngal = zmr_core_ngal[0]
zmr_core_ngal_err = zmr_core_ngal_err[0]
zmr_sdss_ngal, zmr_sdss_ngal_err = zmr_sdss.get_ngal()
zmr_sdss_ngal = zmr_sdss_ngal[0]
zmr_sdss_ngal_err = zmr_sdss_ngal_err[0]
if manual_calc:
model_fit_fname = "figs/"+param_fname+"/calc_likelihood_bounds.py/grid_fit_param.txt"
model_fit = load_fit_limits(model_fit_fname)
m_infall = 10**model_fit['mi']
if 'rd' in model_fit:
# print(model_fit['rd'])
r_disrupt = model_fit['rd']/1000.0 #convert to mpc/h from kpc/h
else:
r_disrupt = np.inf
# print("\ncalculating ngal for ", param_fname)
# print("\tmodel_fit_fname:", model_fit_fname)
# print("\tmodel params: {:.2e} {:.3f}".format(m_infall, r_disrupt))
print(cluster_loc)
cluster_data = load_clusters(cluster_loc)
if cluster_num == -1:
cluster_num = cluster_data.num
cluster_ngal = np.zeros(cluster_num)
cluster_m_i = np.zeros(cluster_num)
for i in range(0, cluster_num):
mass_index = cluster_data.get_cluster_mass_bin(i, m_bins)
cluster_m_i[i] = mass_index
cluster_ngal[i] = cluster_data.get_ngal(i, m_infall, r_disrupt)[1]
ngal_mean = np.zeros(len(m_bins)-1)
ngal_err = np.zeros(len(m_bins)-1)
ngal_std = np.zeros(len(m_bins)-1)
for i in range(0, len(m_bins)-1):
slct = cluster_m_i == i
ngal_mean[i] = np.mean(cluster_ngal[slct])
ngal_std[i] = np.std(cluster_ngal[slct])
ngal_err[i] = ngal_std[i]/np.sqrt(np.sum(slct))
# print("{:.2e}->{:.2e}: {}".format(m_bins[i], m_bins[i+1], np.sum(slct)))
plt.plot(dtk.bins_avg(m_bins), ngal_mean, '-x', color=color, label='Ngal recalc')
if plot_fit:
plt.plot(dtk.bins_avg(m_bins), zmr_core_ngal, '-', color=color)
plt.fill_between(dtk.bins_avg(m_bins), zmr_core_ngal-zmr_core_ngal_err, zmr_core_ngal+zmr_core_ngal_err, color=color, alpha=0.3)
offset_amount = 1.025
if spider:
markerfacecolor='None'
markeredgecolor=color
xaxis_offset=offset_amount
lw = 1
else:
markerfacecolor=color
markeredgecolor='None'
xaxis_offset=1./offset_amount
lw = 2
# remove problematic 2.5 L* low mass cluster in the spider sample
if "mstar-1" in param_fname and "spider" in param_fname:
print("SPIDERSS!: ", zmr_sdss_ngal)
zmr_sdss_ngal[zmr_sdss_ngal < 0.1 ] = np.nan
plt.errorbar(dtk.bins_avg(m_bins)*xaxis_offset, zmr_sdss_ngal,
yerr=zmr_sdss_ngal_err, fmt='o', capsize=0, lw=lw, color=color,
markeredgecolor=markeredgecolor, markerfacecolor=markerfacecolor)
# plt.fill_between(dtk.bins_avg(m_bins), ngal_mean-ngal_err, ngal_mean+ngal_err, color=color, alpha=0.3)
plt.yscale('log')
plt.xscale('log')
# plt.legend(loc='best')
def format_plot():
p4 = plt.plot([],[], 'tab:purple', lw=5, label=r'{:1.2f}~L$_*$'.format(0.4))
p3 = plt.plot([],[], 'tab:red', lw=5, label=r'{:1.2f}~L$_*$'.format(0.63))
p2 = plt.plot([],[], 'tab:green', lw=5, label=r'{:1.2f}~L$_*$'.format(1.0))
p12 = plt.plot([],[], 'tab:orange',lw=5, label=r'{:1.2f}~L$_*$'.format(1.58))
p1 = plt.plot([],[], 'tab:blue',lw=5, label=r'{:1.2f}~L$_*$'.format(2.5))
plt.errorbar([], [], yerr=[], fmt='o', lw=2, color='k', label="redMaPPer", capsize=0)
plt.plot([], [], color='k', label="Core Model")
# plt.errorbar([], [], yerr=[], fmt='o', lw=1, color='k', markerfacecolor='none', label='SPIDERS clusters', capsize=0)
plt.legend(ncol=2, loc='best', framealpha=0.0)
plt.xlabel(r'M$_{200c}$ [h$^{-1}$ M$_\odot$]')
plt.ylabel(r'Projected N$_{\rm{gal}}$')
plt.ylim([1e-1, 3e3])
plt.xlim([1e14, 5e15])
plt.tight_layout()
def plot_ngal_fits():
get_ngal_fit("params/cfn/simet/mstar1/mean/a3_rd.param", None, 'c')
get_ngal_fit("params/cfn/simet/mstar0.5/mean/a3_rd.param", None, 'g')
get_ngal_fit("params/cfn/simet/mstar0/mean/a3_rd.param", None, 'b')
get_ngal_fit("params/cfn/simet/mstar-1/mean/a3_rd.param", None, 'r')
#just spider points
get_ngal_fit("params/cfn/spider/mstar1/mean/spider_rd.param", None, 'c', plot_fit=False, spider=True)
get_ngal_fit("params/cfn/spider/mstar0.5/mean/spider_rd.param", None, 'g', plot_fit=False, spider=True)
get_ngal_fit("params/cfn/spider/mstar0/mean/spider_rd.param", None, 'b', plot_fit=False, spider=True)
get_ngal_fit("params/cfn/spider/mstar-1/mean/spider_rd.param", None, 'r', plot_fit=False, spider=True)
# get_ngal_fit("params/cfn/spider/mstar0/mean/spider_rd.param", None, 'm', plot_fit=False, spider=True)
# get_ngal_fit("params/cfn/spider/mstar0/mean/bcg_rd.param", None, 'c', plot_fit=False, spider=True)
format_plot()
def plot_ngal_fits2(pattern, mstars):
color_cycle = ['tab:blue', 'tab:orange', 'tab:green', 'tab:red', 'tab:purple', 'tab:brown', 'tab:pink', 'tab:gray', 'tab:olive', 'tab:cyan']
for mstar, color in zip(mstars, color_cycle):
get_ngal_fit(pattern.replace("${mstarval}", mstar), None, color)
format_plot()
if __name__ == "__main__":
if len(sys.argv) > 2:
plot_name = sys.argv[1]
else:
plot_name = "OR_McClintock2019"
mstars = ['-1', '-0.5', '0', '0.5', '1']
if plot_name == "OR_Simet2017":
pattern = 'params/rmba/auto/make_all_OR.high_richness.low_rez.min20.sh/crit/mstar${mstarval}/OR_rd_zoom.param'
plot_ngal_fits2(pattern, mstars)
elif plot_name == "OR_McClintock2019":
pattern = 'params/rmba/auto/make_all_OR.McClintock.high_richness.low_rez.min20.sh/crit/mstar${mstarval}/OR_rd_zoom.param'
plot_ngal_fits2(pattern, mstars)
# plot_ngal_fits()
dtk.save_figs("figs/"+__file__+"/"+plot_name+"/", extension='.pdf')
plt.show()
|
flexible
|
{
"blob_id": "3acbb37809462ee69ff8792b4ad86b31dba5d630",
"index": 3821,
"step-1": "<mask token>\n\n\ndef load_clusters(file_name):\n if file_name not in load_clusters._cache:\n cluster_data = ClusterData()\n cluster_data.load_file(file_name)\n else:\n cluster_data = load_clusters._cache[file_name]\n return cluster_data\n\n\n<mask token>\n\n\ndef get_ngal_fit(param_fname, cluster_num, color, plot_fit=True, spider=\n False, manual_calc=False):\n param = dtk.Param(param_fname)\n cluster_loc = param.get_string('cluster_loc')\n if cluster_num is None:\n cluster_num = param.get_int('cluster_load_num')\n zmrh5_loc = param.get_string('zmrh5_loc')\n zmr_sdss = ZMR(zmrh5_loc)\n zmr_fit = ZMR('output/' + param_fname + '/zmr_lkhd_cores.param')\n m_bins = zmr_fit.m_bins\n r_bins = zmr_fit.r_bins\n zmr_core_ngal, zmr_core_ngal_err = zmr_fit.get_ngal()\n zmr_core_ngal = zmr_core_ngal[0]\n zmr_core_ngal_err = zmr_core_ngal_err[0]\n zmr_sdss_ngal, zmr_sdss_ngal_err = zmr_sdss.get_ngal()\n zmr_sdss_ngal = zmr_sdss_ngal[0]\n zmr_sdss_ngal_err = zmr_sdss_ngal_err[0]\n if manual_calc:\n model_fit_fname = ('figs/' + param_fname +\n '/calc_likelihood_bounds.py/grid_fit_param.txt')\n model_fit = load_fit_limits(model_fit_fname)\n m_infall = 10 ** model_fit['mi']\n if 'rd' in model_fit:\n r_disrupt = model_fit['rd'] / 1000.0\n else:\n r_disrupt = np.inf\n print(cluster_loc)\n cluster_data = load_clusters(cluster_loc)\n if cluster_num == -1:\n cluster_num = cluster_data.num\n cluster_ngal = np.zeros(cluster_num)\n cluster_m_i = np.zeros(cluster_num)\n for i in range(0, cluster_num):\n mass_index = cluster_data.get_cluster_mass_bin(i, m_bins)\n cluster_m_i[i] = mass_index\n cluster_ngal[i] = cluster_data.get_ngal(i, m_infall, r_disrupt)[1]\n ngal_mean = np.zeros(len(m_bins) - 1)\n ngal_err = np.zeros(len(m_bins) - 1)\n ngal_std = np.zeros(len(m_bins) - 1)\n for i in range(0, len(m_bins) - 1):\n slct = cluster_m_i == i\n ngal_mean[i] = np.mean(cluster_ngal[slct])\n ngal_std[i] = np.std(cluster_ngal[slct])\n ngal_err[i] = ngal_std[i] / np.sqrt(np.sum(slct))\n plt.plot(dtk.bins_avg(m_bins), ngal_mean, '-x', color=color, label=\n 'Ngal recalc')\n if plot_fit:\n plt.plot(dtk.bins_avg(m_bins), zmr_core_ngal, '-', color=color)\n plt.fill_between(dtk.bins_avg(m_bins), zmr_core_ngal -\n zmr_core_ngal_err, zmr_core_ngal + zmr_core_ngal_err, color=\n color, alpha=0.3)\n offset_amount = 1.025\n if spider:\n markerfacecolor = 'None'\n markeredgecolor = color\n xaxis_offset = offset_amount\n lw = 1\n else:\n markerfacecolor = color\n markeredgecolor = 'None'\n xaxis_offset = 1.0 / offset_amount\n lw = 2\n if 'mstar-1' in param_fname and 'spider' in param_fname:\n print('SPIDERSS!: ', zmr_sdss_ngal)\n zmr_sdss_ngal[zmr_sdss_ngal < 0.1] = np.nan\n plt.errorbar(dtk.bins_avg(m_bins) * xaxis_offset, zmr_sdss_ngal, yerr=\n zmr_sdss_ngal_err, fmt='o', capsize=0, lw=lw, color=color,\n markeredgecolor=markeredgecolor, markerfacecolor=markerfacecolor)\n plt.yscale('log')\n plt.xscale('log')\n\n\ndef format_plot():\n p4 = plt.plot([], [], 'tab:purple', lw=5, label='{:1.2f}~L$_*$'.format(0.4)\n )\n p3 = plt.plot([], [], 'tab:red', lw=5, label='{:1.2f}~L$_*$'.format(0.63))\n p2 = plt.plot([], [], 'tab:green', lw=5, label='{:1.2f}~L$_*$'.format(1.0))\n p12 = plt.plot([], [], 'tab:orange', lw=5, label='{:1.2f}~L$_*$'.format\n (1.58))\n p1 = plt.plot([], [], 'tab:blue', lw=5, label='{:1.2f}~L$_*$'.format(2.5))\n plt.errorbar([], [], yerr=[], fmt='o', lw=2, color='k', label=\n 'redMaPPer', capsize=0)\n plt.plot([], [], color='k', label='Core Model')\n plt.legend(ncol=2, loc='best', framealpha=0.0)\n plt.xlabel('M$_{200c}$ [h$^{-1}$ M$_\\\\odot$]')\n plt.ylabel('Projected N$_{\\\\rm{gal}}$')\n plt.ylim([0.1, 3000.0])\n plt.xlim([100000000000000.0, 5000000000000000.0])\n plt.tight_layout()\n\n\ndef plot_ngal_fits():\n get_ngal_fit('params/cfn/simet/mstar1/mean/a3_rd.param', None, 'c')\n get_ngal_fit('params/cfn/simet/mstar0.5/mean/a3_rd.param', None, 'g')\n get_ngal_fit('params/cfn/simet/mstar0/mean/a3_rd.param', None, 'b')\n get_ngal_fit('params/cfn/simet/mstar-1/mean/a3_rd.param', None, 'r')\n get_ngal_fit('params/cfn/spider/mstar1/mean/spider_rd.param', None, 'c',\n plot_fit=False, spider=True)\n get_ngal_fit('params/cfn/spider/mstar0.5/mean/spider_rd.param', None,\n 'g', plot_fit=False, spider=True)\n get_ngal_fit('params/cfn/spider/mstar0/mean/spider_rd.param', None, 'b',\n plot_fit=False, spider=True)\n get_ngal_fit('params/cfn/spider/mstar-1/mean/spider_rd.param', None,\n 'r', plot_fit=False, spider=True)\n format_plot()\n\n\ndef plot_ngal_fits2(pattern, mstars):\n color_cycle = ['tab:blue', 'tab:orange', 'tab:green', 'tab:red',\n 'tab:purple', 'tab:brown', 'tab:pink', 'tab:gray', 'tab:olive',\n 'tab:cyan']\n for mstar, color in zip(mstars, color_cycle):\n get_ngal_fit(pattern.replace('${mstarval}', mstar), None, color)\n format_plot()\n\n\n<mask token>\n",
"step-2": "<mask token>\nif os.environ.get('DISPLAY') is None:\n matplotlib.use('Agg')\n<mask token>\nrc('text', usetex=True)\nrc('font', **{'family': 'serif', 'serif': ['Computer Modern Roman']})\nrc('font', size=18)\n\n\ndef load_clusters(file_name):\n if file_name not in load_clusters._cache:\n cluster_data = ClusterData()\n cluster_data.load_file(file_name)\n else:\n cluster_data = load_clusters._cache[file_name]\n return cluster_data\n\n\n<mask token>\n\n\ndef get_ngal_fit(param_fname, cluster_num, color, plot_fit=True, spider=\n False, manual_calc=False):\n param = dtk.Param(param_fname)\n cluster_loc = param.get_string('cluster_loc')\n if cluster_num is None:\n cluster_num = param.get_int('cluster_load_num')\n zmrh5_loc = param.get_string('zmrh5_loc')\n zmr_sdss = ZMR(zmrh5_loc)\n zmr_fit = ZMR('output/' + param_fname + '/zmr_lkhd_cores.param')\n m_bins = zmr_fit.m_bins\n r_bins = zmr_fit.r_bins\n zmr_core_ngal, zmr_core_ngal_err = zmr_fit.get_ngal()\n zmr_core_ngal = zmr_core_ngal[0]\n zmr_core_ngal_err = zmr_core_ngal_err[0]\n zmr_sdss_ngal, zmr_sdss_ngal_err = zmr_sdss.get_ngal()\n zmr_sdss_ngal = zmr_sdss_ngal[0]\n zmr_sdss_ngal_err = zmr_sdss_ngal_err[0]\n if manual_calc:\n model_fit_fname = ('figs/' + param_fname +\n '/calc_likelihood_bounds.py/grid_fit_param.txt')\n model_fit = load_fit_limits(model_fit_fname)\n m_infall = 10 ** model_fit['mi']\n if 'rd' in model_fit:\n r_disrupt = model_fit['rd'] / 1000.0\n else:\n r_disrupt = np.inf\n print(cluster_loc)\n cluster_data = load_clusters(cluster_loc)\n if cluster_num == -1:\n cluster_num = cluster_data.num\n cluster_ngal = np.zeros(cluster_num)\n cluster_m_i = np.zeros(cluster_num)\n for i in range(0, cluster_num):\n mass_index = cluster_data.get_cluster_mass_bin(i, m_bins)\n cluster_m_i[i] = mass_index\n cluster_ngal[i] = cluster_data.get_ngal(i, m_infall, r_disrupt)[1]\n ngal_mean = np.zeros(len(m_bins) - 1)\n ngal_err = np.zeros(len(m_bins) - 1)\n ngal_std = np.zeros(len(m_bins) - 1)\n for i in range(0, len(m_bins) - 1):\n slct = cluster_m_i == i\n ngal_mean[i] = np.mean(cluster_ngal[slct])\n ngal_std[i] = np.std(cluster_ngal[slct])\n ngal_err[i] = ngal_std[i] / np.sqrt(np.sum(slct))\n plt.plot(dtk.bins_avg(m_bins), ngal_mean, '-x', color=color, label=\n 'Ngal recalc')\n if plot_fit:\n plt.plot(dtk.bins_avg(m_bins), zmr_core_ngal, '-', color=color)\n plt.fill_between(dtk.bins_avg(m_bins), zmr_core_ngal -\n zmr_core_ngal_err, zmr_core_ngal + zmr_core_ngal_err, color=\n color, alpha=0.3)\n offset_amount = 1.025\n if spider:\n markerfacecolor = 'None'\n markeredgecolor = color\n xaxis_offset = offset_amount\n lw = 1\n else:\n markerfacecolor = color\n markeredgecolor = 'None'\n xaxis_offset = 1.0 / offset_amount\n lw = 2\n if 'mstar-1' in param_fname and 'spider' in param_fname:\n print('SPIDERSS!: ', zmr_sdss_ngal)\n zmr_sdss_ngal[zmr_sdss_ngal < 0.1] = np.nan\n plt.errorbar(dtk.bins_avg(m_bins) * xaxis_offset, zmr_sdss_ngal, yerr=\n zmr_sdss_ngal_err, fmt='o', capsize=0, lw=lw, color=color,\n markeredgecolor=markeredgecolor, markerfacecolor=markerfacecolor)\n plt.yscale('log')\n plt.xscale('log')\n\n\ndef format_plot():\n p4 = plt.plot([], [], 'tab:purple', lw=5, label='{:1.2f}~L$_*$'.format(0.4)\n )\n p3 = plt.plot([], [], 'tab:red', lw=5, label='{:1.2f}~L$_*$'.format(0.63))\n p2 = plt.plot([], [], 'tab:green', lw=5, label='{:1.2f}~L$_*$'.format(1.0))\n p12 = plt.plot([], [], 'tab:orange', lw=5, label='{:1.2f}~L$_*$'.format\n (1.58))\n p1 = plt.plot([], [], 'tab:blue', lw=5, label='{:1.2f}~L$_*$'.format(2.5))\n plt.errorbar([], [], yerr=[], fmt='o', lw=2, color='k', label=\n 'redMaPPer', capsize=0)\n plt.plot([], [], color='k', label='Core Model')\n plt.legend(ncol=2, loc='best', framealpha=0.0)\n plt.xlabel('M$_{200c}$ [h$^{-1}$ M$_\\\\odot$]')\n plt.ylabel('Projected N$_{\\\\rm{gal}}$')\n plt.ylim([0.1, 3000.0])\n plt.xlim([100000000000000.0, 5000000000000000.0])\n plt.tight_layout()\n\n\ndef plot_ngal_fits():\n get_ngal_fit('params/cfn/simet/mstar1/mean/a3_rd.param', None, 'c')\n get_ngal_fit('params/cfn/simet/mstar0.5/mean/a3_rd.param', None, 'g')\n get_ngal_fit('params/cfn/simet/mstar0/mean/a3_rd.param', None, 'b')\n get_ngal_fit('params/cfn/simet/mstar-1/mean/a3_rd.param', None, 'r')\n get_ngal_fit('params/cfn/spider/mstar1/mean/spider_rd.param', None, 'c',\n plot_fit=False, spider=True)\n get_ngal_fit('params/cfn/spider/mstar0.5/mean/spider_rd.param', None,\n 'g', plot_fit=False, spider=True)\n get_ngal_fit('params/cfn/spider/mstar0/mean/spider_rd.param', None, 'b',\n plot_fit=False, spider=True)\n get_ngal_fit('params/cfn/spider/mstar-1/mean/spider_rd.param', None,\n 'r', plot_fit=False, spider=True)\n format_plot()\n\n\ndef plot_ngal_fits2(pattern, mstars):\n color_cycle = ['tab:blue', 'tab:orange', 'tab:green', 'tab:red',\n 'tab:purple', 'tab:brown', 'tab:pink', 'tab:gray', 'tab:olive',\n 'tab:cyan']\n for mstar, color in zip(mstars, color_cycle):\n get_ngal_fit(pattern.replace('${mstarval}', mstar), None, color)\n format_plot()\n\n\nif __name__ == '__main__':\n if len(sys.argv) > 2:\n plot_name = sys.argv[1]\n else:\n plot_name = 'OR_McClintock2019'\n mstars = ['-1', '-0.5', '0', '0.5', '1']\n if plot_name == 'OR_Simet2017':\n pattern = (\n 'params/rmba/auto/make_all_OR.high_richness.low_rez.min20.sh/crit/mstar${mstarval}/OR_rd_zoom.param'\n )\n plot_ngal_fits2(pattern, mstars)\n elif plot_name == 'OR_McClintock2019':\n pattern = (\n 'params/rmba/auto/make_all_OR.McClintock.high_richness.low_rez.min20.sh/crit/mstar${mstarval}/OR_rd_zoom.param'\n )\n plot_ngal_fits2(pattern, mstars)\n dtk.save_figs('figs/' + __file__ + '/' + plot_name + '/', extension='.pdf')\n plt.show()\n",
"step-3": "<mask token>\nif os.environ.get('DISPLAY') is None:\n matplotlib.use('Agg')\n<mask token>\nrc('text', usetex=True)\nrc('font', **{'family': 'serif', 'serif': ['Computer Modern Roman']})\nrc('font', size=18)\n\n\ndef load_clusters(file_name):\n if file_name not in load_clusters._cache:\n cluster_data = ClusterData()\n cluster_data.load_file(file_name)\n else:\n cluster_data = load_clusters._cache[file_name]\n return cluster_data\n\n\nload_clusters._cache = {}\n\n\ndef get_ngal_fit(param_fname, cluster_num, color, plot_fit=True, spider=\n False, manual_calc=False):\n param = dtk.Param(param_fname)\n cluster_loc = param.get_string('cluster_loc')\n if cluster_num is None:\n cluster_num = param.get_int('cluster_load_num')\n zmrh5_loc = param.get_string('zmrh5_loc')\n zmr_sdss = ZMR(zmrh5_loc)\n zmr_fit = ZMR('output/' + param_fname + '/zmr_lkhd_cores.param')\n m_bins = zmr_fit.m_bins\n r_bins = zmr_fit.r_bins\n zmr_core_ngal, zmr_core_ngal_err = zmr_fit.get_ngal()\n zmr_core_ngal = zmr_core_ngal[0]\n zmr_core_ngal_err = zmr_core_ngal_err[0]\n zmr_sdss_ngal, zmr_sdss_ngal_err = zmr_sdss.get_ngal()\n zmr_sdss_ngal = zmr_sdss_ngal[0]\n zmr_sdss_ngal_err = zmr_sdss_ngal_err[0]\n if manual_calc:\n model_fit_fname = ('figs/' + param_fname +\n '/calc_likelihood_bounds.py/grid_fit_param.txt')\n model_fit = load_fit_limits(model_fit_fname)\n m_infall = 10 ** model_fit['mi']\n if 'rd' in model_fit:\n r_disrupt = model_fit['rd'] / 1000.0\n else:\n r_disrupt = np.inf\n print(cluster_loc)\n cluster_data = load_clusters(cluster_loc)\n if cluster_num == -1:\n cluster_num = cluster_data.num\n cluster_ngal = np.zeros(cluster_num)\n cluster_m_i = np.zeros(cluster_num)\n for i in range(0, cluster_num):\n mass_index = cluster_data.get_cluster_mass_bin(i, m_bins)\n cluster_m_i[i] = mass_index\n cluster_ngal[i] = cluster_data.get_ngal(i, m_infall, r_disrupt)[1]\n ngal_mean = np.zeros(len(m_bins) - 1)\n ngal_err = np.zeros(len(m_bins) - 1)\n ngal_std = np.zeros(len(m_bins) - 1)\n for i in range(0, len(m_bins) - 1):\n slct = cluster_m_i == i\n ngal_mean[i] = np.mean(cluster_ngal[slct])\n ngal_std[i] = np.std(cluster_ngal[slct])\n ngal_err[i] = ngal_std[i] / np.sqrt(np.sum(slct))\n plt.plot(dtk.bins_avg(m_bins), ngal_mean, '-x', color=color, label=\n 'Ngal recalc')\n if plot_fit:\n plt.plot(dtk.bins_avg(m_bins), zmr_core_ngal, '-', color=color)\n plt.fill_between(dtk.bins_avg(m_bins), zmr_core_ngal -\n zmr_core_ngal_err, zmr_core_ngal + zmr_core_ngal_err, color=\n color, alpha=0.3)\n offset_amount = 1.025\n if spider:\n markerfacecolor = 'None'\n markeredgecolor = color\n xaxis_offset = offset_amount\n lw = 1\n else:\n markerfacecolor = color\n markeredgecolor = 'None'\n xaxis_offset = 1.0 / offset_amount\n lw = 2\n if 'mstar-1' in param_fname and 'spider' in param_fname:\n print('SPIDERSS!: ', zmr_sdss_ngal)\n zmr_sdss_ngal[zmr_sdss_ngal < 0.1] = np.nan\n plt.errorbar(dtk.bins_avg(m_bins) * xaxis_offset, zmr_sdss_ngal, yerr=\n zmr_sdss_ngal_err, fmt='o', capsize=0, lw=lw, color=color,\n markeredgecolor=markeredgecolor, markerfacecolor=markerfacecolor)\n plt.yscale('log')\n plt.xscale('log')\n\n\ndef format_plot():\n p4 = plt.plot([], [], 'tab:purple', lw=5, label='{:1.2f}~L$_*$'.format(0.4)\n )\n p3 = plt.plot([], [], 'tab:red', lw=5, label='{:1.2f}~L$_*$'.format(0.63))\n p2 = plt.plot([], [], 'tab:green', lw=5, label='{:1.2f}~L$_*$'.format(1.0))\n p12 = plt.plot([], [], 'tab:orange', lw=5, label='{:1.2f}~L$_*$'.format\n (1.58))\n p1 = plt.plot([], [], 'tab:blue', lw=5, label='{:1.2f}~L$_*$'.format(2.5))\n plt.errorbar([], [], yerr=[], fmt='o', lw=2, color='k', label=\n 'redMaPPer', capsize=0)\n plt.plot([], [], color='k', label='Core Model')\n plt.legend(ncol=2, loc='best', framealpha=0.0)\n plt.xlabel('M$_{200c}$ [h$^{-1}$ M$_\\\\odot$]')\n plt.ylabel('Projected N$_{\\\\rm{gal}}$')\n plt.ylim([0.1, 3000.0])\n plt.xlim([100000000000000.0, 5000000000000000.0])\n plt.tight_layout()\n\n\ndef plot_ngal_fits():\n get_ngal_fit('params/cfn/simet/mstar1/mean/a3_rd.param', None, 'c')\n get_ngal_fit('params/cfn/simet/mstar0.5/mean/a3_rd.param', None, 'g')\n get_ngal_fit('params/cfn/simet/mstar0/mean/a3_rd.param', None, 'b')\n get_ngal_fit('params/cfn/simet/mstar-1/mean/a3_rd.param', None, 'r')\n get_ngal_fit('params/cfn/spider/mstar1/mean/spider_rd.param', None, 'c',\n plot_fit=False, spider=True)\n get_ngal_fit('params/cfn/spider/mstar0.5/mean/spider_rd.param', None,\n 'g', plot_fit=False, spider=True)\n get_ngal_fit('params/cfn/spider/mstar0/mean/spider_rd.param', None, 'b',\n plot_fit=False, spider=True)\n get_ngal_fit('params/cfn/spider/mstar-1/mean/spider_rd.param', None,\n 'r', plot_fit=False, spider=True)\n format_plot()\n\n\ndef plot_ngal_fits2(pattern, mstars):\n color_cycle = ['tab:blue', 'tab:orange', 'tab:green', 'tab:red',\n 'tab:purple', 'tab:brown', 'tab:pink', 'tab:gray', 'tab:olive',\n 'tab:cyan']\n for mstar, color in zip(mstars, color_cycle):\n get_ngal_fit(pattern.replace('${mstarval}', mstar), None, color)\n format_plot()\n\n\nif __name__ == '__main__':\n if len(sys.argv) > 2:\n plot_name = sys.argv[1]\n else:\n plot_name = 'OR_McClintock2019'\n mstars = ['-1', '-0.5', '0', '0.5', '1']\n if plot_name == 'OR_Simet2017':\n pattern = (\n 'params/rmba/auto/make_all_OR.high_richness.low_rez.min20.sh/crit/mstar${mstarval}/OR_rd_zoom.param'\n )\n plot_ngal_fits2(pattern, mstars)\n elif plot_name == 'OR_McClintock2019':\n pattern = (\n 'params/rmba/auto/make_all_OR.McClintock.high_richness.low_rez.min20.sh/crit/mstar${mstarval}/OR_rd_zoom.param'\n )\n plot_ngal_fits2(pattern, mstars)\n dtk.save_figs('figs/' + __file__ + '/' + plot_name + '/', extension='.pdf')\n plt.show()\n",
"step-4": "from __future__ import print_function, division\nimport numpy as np\nimport matplotlib\nimport os\nif os.environ.get('DISPLAY') is None:\n matplotlib.use('Agg')\nimport matplotlib.pyplot as plt\nimport matplotlib.colors as clr\nimport dtk\nimport sys\nimport time\nimport numpy.random\nfrom matplotlib.colors import LogNorm\nfrom scipy.optimize import minimize\nfrom calc_ngal import *\nfrom generate_parameter_dist import *\nfrom zmr import ZMR\nfrom matplotlib import rc\nrc('text', usetex=True)\nrc('font', **{'family': 'serif', 'serif': ['Computer Modern Roman']})\nrc('font', size=18)\n\n\ndef load_clusters(file_name):\n if file_name not in load_clusters._cache:\n cluster_data = ClusterData()\n cluster_data.load_file(file_name)\n else:\n cluster_data = load_clusters._cache[file_name]\n return cluster_data\n\n\nload_clusters._cache = {}\n\n\ndef get_ngal_fit(param_fname, cluster_num, color, plot_fit=True, spider=\n False, manual_calc=False):\n param = dtk.Param(param_fname)\n cluster_loc = param.get_string('cluster_loc')\n if cluster_num is None:\n cluster_num = param.get_int('cluster_load_num')\n zmrh5_loc = param.get_string('zmrh5_loc')\n zmr_sdss = ZMR(zmrh5_loc)\n zmr_fit = ZMR('output/' + param_fname + '/zmr_lkhd_cores.param')\n m_bins = zmr_fit.m_bins\n r_bins = zmr_fit.r_bins\n zmr_core_ngal, zmr_core_ngal_err = zmr_fit.get_ngal()\n zmr_core_ngal = zmr_core_ngal[0]\n zmr_core_ngal_err = zmr_core_ngal_err[0]\n zmr_sdss_ngal, zmr_sdss_ngal_err = zmr_sdss.get_ngal()\n zmr_sdss_ngal = zmr_sdss_ngal[0]\n zmr_sdss_ngal_err = zmr_sdss_ngal_err[0]\n if manual_calc:\n model_fit_fname = ('figs/' + param_fname +\n '/calc_likelihood_bounds.py/grid_fit_param.txt')\n model_fit = load_fit_limits(model_fit_fname)\n m_infall = 10 ** model_fit['mi']\n if 'rd' in model_fit:\n r_disrupt = model_fit['rd'] / 1000.0\n else:\n r_disrupt = np.inf\n print(cluster_loc)\n cluster_data = load_clusters(cluster_loc)\n if cluster_num == -1:\n cluster_num = cluster_data.num\n cluster_ngal = np.zeros(cluster_num)\n cluster_m_i = np.zeros(cluster_num)\n for i in range(0, cluster_num):\n mass_index = cluster_data.get_cluster_mass_bin(i, m_bins)\n cluster_m_i[i] = mass_index\n cluster_ngal[i] = cluster_data.get_ngal(i, m_infall, r_disrupt)[1]\n ngal_mean = np.zeros(len(m_bins) - 1)\n ngal_err = np.zeros(len(m_bins) - 1)\n ngal_std = np.zeros(len(m_bins) - 1)\n for i in range(0, len(m_bins) - 1):\n slct = cluster_m_i == i\n ngal_mean[i] = np.mean(cluster_ngal[slct])\n ngal_std[i] = np.std(cluster_ngal[slct])\n ngal_err[i] = ngal_std[i] / np.sqrt(np.sum(slct))\n plt.plot(dtk.bins_avg(m_bins), ngal_mean, '-x', color=color, label=\n 'Ngal recalc')\n if plot_fit:\n plt.plot(dtk.bins_avg(m_bins), zmr_core_ngal, '-', color=color)\n plt.fill_between(dtk.bins_avg(m_bins), zmr_core_ngal -\n zmr_core_ngal_err, zmr_core_ngal + zmr_core_ngal_err, color=\n color, alpha=0.3)\n offset_amount = 1.025\n if spider:\n markerfacecolor = 'None'\n markeredgecolor = color\n xaxis_offset = offset_amount\n lw = 1\n else:\n markerfacecolor = color\n markeredgecolor = 'None'\n xaxis_offset = 1.0 / offset_amount\n lw = 2\n if 'mstar-1' in param_fname and 'spider' in param_fname:\n print('SPIDERSS!: ', zmr_sdss_ngal)\n zmr_sdss_ngal[zmr_sdss_ngal < 0.1] = np.nan\n plt.errorbar(dtk.bins_avg(m_bins) * xaxis_offset, zmr_sdss_ngal, yerr=\n zmr_sdss_ngal_err, fmt='o', capsize=0, lw=lw, color=color,\n markeredgecolor=markeredgecolor, markerfacecolor=markerfacecolor)\n plt.yscale('log')\n plt.xscale('log')\n\n\ndef format_plot():\n p4 = plt.plot([], [], 'tab:purple', lw=5, label='{:1.2f}~L$_*$'.format(0.4)\n )\n p3 = plt.plot([], [], 'tab:red', lw=5, label='{:1.2f}~L$_*$'.format(0.63))\n p2 = plt.plot([], [], 'tab:green', lw=5, label='{:1.2f}~L$_*$'.format(1.0))\n p12 = plt.plot([], [], 'tab:orange', lw=5, label='{:1.2f}~L$_*$'.format\n (1.58))\n p1 = plt.plot([], [], 'tab:blue', lw=5, label='{:1.2f}~L$_*$'.format(2.5))\n plt.errorbar([], [], yerr=[], fmt='o', lw=2, color='k', label=\n 'redMaPPer', capsize=0)\n plt.plot([], [], color='k', label='Core Model')\n plt.legend(ncol=2, loc='best', framealpha=0.0)\n plt.xlabel('M$_{200c}$ [h$^{-1}$ M$_\\\\odot$]')\n plt.ylabel('Projected N$_{\\\\rm{gal}}$')\n plt.ylim([0.1, 3000.0])\n plt.xlim([100000000000000.0, 5000000000000000.0])\n plt.tight_layout()\n\n\ndef plot_ngal_fits():\n get_ngal_fit('params/cfn/simet/mstar1/mean/a3_rd.param', None, 'c')\n get_ngal_fit('params/cfn/simet/mstar0.5/mean/a3_rd.param', None, 'g')\n get_ngal_fit('params/cfn/simet/mstar0/mean/a3_rd.param', None, 'b')\n get_ngal_fit('params/cfn/simet/mstar-1/mean/a3_rd.param', None, 'r')\n get_ngal_fit('params/cfn/spider/mstar1/mean/spider_rd.param', None, 'c',\n plot_fit=False, spider=True)\n get_ngal_fit('params/cfn/spider/mstar0.5/mean/spider_rd.param', None,\n 'g', plot_fit=False, spider=True)\n get_ngal_fit('params/cfn/spider/mstar0/mean/spider_rd.param', None, 'b',\n plot_fit=False, spider=True)\n get_ngal_fit('params/cfn/spider/mstar-1/mean/spider_rd.param', None,\n 'r', plot_fit=False, spider=True)\n format_plot()\n\n\ndef plot_ngal_fits2(pattern, mstars):\n color_cycle = ['tab:blue', 'tab:orange', 'tab:green', 'tab:red',\n 'tab:purple', 'tab:brown', 'tab:pink', 'tab:gray', 'tab:olive',\n 'tab:cyan']\n for mstar, color in zip(mstars, color_cycle):\n get_ngal_fit(pattern.replace('${mstarval}', mstar), None, color)\n format_plot()\n\n\nif __name__ == '__main__':\n if len(sys.argv) > 2:\n plot_name = sys.argv[1]\n else:\n plot_name = 'OR_McClintock2019'\n mstars = ['-1', '-0.5', '0', '0.5', '1']\n if plot_name == 'OR_Simet2017':\n pattern = (\n 'params/rmba/auto/make_all_OR.high_richness.low_rez.min20.sh/crit/mstar${mstarval}/OR_rd_zoom.param'\n )\n plot_ngal_fits2(pattern, mstars)\n elif plot_name == 'OR_McClintock2019':\n pattern = (\n 'params/rmba/auto/make_all_OR.McClintock.high_richness.low_rez.min20.sh/crit/mstar${mstarval}/OR_rd_zoom.param'\n )\n plot_ngal_fits2(pattern, mstars)\n dtk.save_figs('figs/' + __file__ + '/' + plot_name + '/', extension='.pdf')\n plt.show()\n",
"step-5": "#!/usr/bin/env python2.7\n\nfrom __future__ import print_function, division \nimport numpy as np\nimport matplotlib\nimport os\n#checks if there is a display to use.\nif os.environ.get('DISPLAY') is None:\n matplotlib.use('Agg')\n\nimport matplotlib.pyplot as plt\nimport matplotlib.colors as clr\nimport dtk\nimport sys\nimport time\nimport numpy.random\nfrom matplotlib.colors import LogNorm\nfrom scipy.optimize import minimize\n\nfrom calc_ngal import *\nfrom generate_parameter_dist import *\nfrom zmr import ZMR\nfrom matplotlib import rc\nrc('text', usetex=True)\nrc('font', **{'family':'serif', 'serif':['Computer Modern Roman'], })\nrc('font', size=18)\n\ndef load_clusters(file_name):\n if file_name not in load_clusters._cache:\n cluster_data = ClusterData()\n cluster_data.load_file(file_name)\n else:\n cluster_data = load_clusters._cache[file_name]\n return cluster_data\n\nload_clusters._cache = {}\n\ndef get_ngal_fit(param_fname, cluster_num, color, plot_fit=True, spider=False, manual_calc=False):\n param = dtk.Param(param_fname)\n cluster_loc = param.get_string('cluster_loc')\n if cluster_num is None:\n cluster_num = param.get_int('cluster_load_num')\n zmrh5_loc = param.get_string('zmrh5_loc')\n zmr_sdss = ZMR(zmrh5_loc)\n zmr_fit = ZMR(\"output/\"+param_fname+\"/zmr_lkhd_cores.param\")\n m_bins = zmr_fit.m_bins\n r_bins = zmr_fit.r_bins\n zmr_core_ngal, zmr_core_ngal_err = zmr_fit.get_ngal() # only one z-bin, so we don't select it out\n zmr_core_ngal = zmr_core_ngal[0]\n zmr_core_ngal_err = zmr_core_ngal_err[0]\n zmr_sdss_ngal, zmr_sdss_ngal_err = zmr_sdss.get_ngal()\n zmr_sdss_ngal = zmr_sdss_ngal[0]\n zmr_sdss_ngal_err = zmr_sdss_ngal_err[0]\n\n if manual_calc:\n model_fit_fname = \"figs/\"+param_fname+\"/calc_likelihood_bounds.py/grid_fit_param.txt\"\n model_fit = load_fit_limits(model_fit_fname)\n m_infall = 10**model_fit['mi']\n if 'rd' in model_fit:\n # print(model_fit['rd'])\n r_disrupt = model_fit['rd']/1000.0 #convert to mpc/h from kpc/h\n else:\n r_disrupt = np.inf\n # print(\"\\ncalculating ngal for \", param_fname)\n # print(\"\\tmodel_fit_fname:\", model_fit_fname)\n # print(\"\\tmodel params: {:.2e} {:.3f}\".format(m_infall, r_disrupt))\n print(cluster_loc)\n cluster_data = load_clusters(cluster_loc)\n if cluster_num == -1:\n cluster_num = cluster_data.num\n cluster_ngal = np.zeros(cluster_num)\n cluster_m_i = np.zeros(cluster_num)\n for i in range(0, cluster_num):\n mass_index = cluster_data.get_cluster_mass_bin(i, m_bins)\n cluster_m_i[i] = mass_index\n cluster_ngal[i] = cluster_data.get_ngal(i, m_infall, r_disrupt)[1]\n ngal_mean = np.zeros(len(m_bins)-1)\n ngal_err = np.zeros(len(m_bins)-1)\n ngal_std = np.zeros(len(m_bins)-1)\n for i in range(0, len(m_bins)-1):\n slct = cluster_m_i == i\n ngal_mean[i] = np.mean(cluster_ngal[slct])\n ngal_std[i] = np.std(cluster_ngal[slct])\n ngal_err[i] = ngal_std[i]/np.sqrt(np.sum(slct))\n # print(\"{:.2e}->{:.2e}: {}\".format(m_bins[i], m_bins[i+1], np.sum(slct)))\n plt.plot(dtk.bins_avg(m_bins), ngal_mean, '-x', color=color, label='Ngal recalc')\n if plot_fit:\n plt.plot(dtk.bins_avg(m_bins), zmr_core_ngal, '-', color=color)\n plt.fill_between(dtk.bins_avg(m_bins), zmr_core_ngal-zmr_core_ngal_err, zmr_core_ngal+zmr_core_ngal_err, color=color, alpha=0.3)\n offset_amount = 1.025\n if spider:\n markerfacecolor='None'\n markeredgecolor=color\n xaxis_offset=offset_amount\n lw = 1\n else:\n markerfacecolor=color\n markeredgecolor='None'\n xaxis_offset=1./offset_amount\n lw = 2\n \n # remove problematic 2.5 L* low mass cluster in the spider sample\n if \"mstar-1\" in param_fname and \"spider\" in param_fname:\n print(\"SPIDERSS!: \", zmr_sdss_ngal)\n zmr_sdss_ngal[zmr_sdss_ngal < 0.1 ] = np.nan\n plt.errorbar(dtk.bins_avg(m_bins)*xaxis_offset, zmr_sdss_ngal,\n yerr=zmr_sdss_ngal_err, fmt='o', capsize=0, lw=lw, color=color,\n markeredgecolor=markeredgecolor, markerfacecolor=markerfacecolor)\n # plt.fill_between(dtk.bins_avg(m_bins), ngal_mean-ngal_err, ngal_mean+ngal_err, color=color, alpha=0.3)\n plt.yscale('log')\n plt.xscale('log')\n # plt.legend(loc='best')\ndef format_plot():\n\n p4 = plt.plot([],[], 'tab:purple', lw=5, label=r'{:1.2f}~L$_*$'.format(0.4))\n p3 = plt.plot([],[], 'tab:red', lw=5, label=r'{:1.2f}~L$_*$'.format(0.63))\n p2 = plt.plot([],[], 'tab:green', lw=5, label=r'{:1.2f}~L$_*$'.format(1.0))\n p12 = plt.plot([],[], 'tab:orange',lw=5, label=r'{:1.2f}~L$_*$'.format(1.58))\n p1 = plt.plot([],[], 'tab:blue',lw=5, label=r'{:1.2f}~L$_*$'.format(2.5))\n plt.errorbar([], [], yerr=[], fmt='o', lw=2, color='k', label=\"redMaPPer\", capsize=0)\n plt.plot([], [], color='k', label=\"Core Model\")\n # plt.errorbar([], [], yerr=[], fmt='o', lw=1, color='k', markerfacecolor='none', label='SPIDERS clusters', capsize=0)\n plt.legend(ncol=2, loc='best', framealpha=0.0)\n\n plt.xlabel(r'M$_{200c}$ [h$^{-1}$ M$_\\odot$]')\n plt.ylabel(r'Projected N$_{\\rm{gal}}$')\n plt.ylim([1e-1, 3e3])\n plt.xlim([1e14, 5e15])\n plt.tight_layout()\n\ndef plot_ngal_fits():\n get_ngal_fit(\"params/cfn/simet/mstar1/mean/a3_rd.param\", None, 'c')\n get_ngal_fit(\"params/cfn/simet/mstar0.5/mean/a3_rd.param\", None, 'g')\n get_ngal_fit(\"params/cfn/simet/mstar0/mean/a3_rd.param\", None, 'b')\n get_ngal_fit(\"params/cfn/simet/mstar-1/mean/a3_rd.param\", None, 'r')\n\n #just spider points\n get_ngal_fit(\"params/cfn/spider/mstar1/mean/spider_rd.param\", None, 'c', plot_fit=False, spider=True)\n get_ngal_fit(\"params/cfn/spider/mstar0.5/mean/spider_rd.param\", None, 'g', plot_fit=False, spider=True)\n get_ngal_fit(\"params/cfn/spider/mstar0/mean/spider_rd.param\", None, 'b', plot_fit=False, spider=True)\n get_ngal_fit(\"params/cfn/spider/mstar-1/mean/spider_rd.param\", None, 'r', plot_fit=False, spider=True)\n\n # get_ngal_fit(\"params/cfn/spider/mstar0/mean/spider_rd.param\", None, 'm', plot_fit=False, spider=True)\n # get_ngal_fit(\"params/cfn/spider/mstar0/mean/bcg_rd.param\", None, 'c', plot_fit=False, spider=True)\n format_plot()\ndef plot_ngal_fits2(pattern, mstars):\n color_cycle = ['tab:blue', 'tab:orange', 'tab:green', 'tab:red', 'tab:purple', 'tab:brown', 'tab:pink', 'tab:gray', 'tab:olive', 'tab:cyan']\n for mstar, color in zip(mstars, color_cycle):\n get_ngal_fit(pattern.replace(\"${mstarval}\", mstar), None, color)\n format_plot()\n\nif __name__ == \"__main__\":\n if len(sys.argv) > 2:\n plot_name = sys.argv[1]\n else:\n plot_name = \"OR_McClintock2019\"\n mstars = ['-1', '-0.5', '0', '0.5', '1']\n if plot_name == \"OR_Simet2017\":\n pattern = 'params/rmba/auto/make_all_OR.high_richness.low_rez.min20.sh/crit/mstar${mstarval}/OR_rd_zoom.param'\n plot_ngal_fits2(pattern, mstars)\n elif plot_name == \"OR_McClintock2019\":\n pattern = 'params/rmba/auto/make_all_OR.McClintock.high_richness.low_rez.min20.sh/crit/mstar${mstarval}/OR_rd_zoom.param'\n plot_ngal_fits2(pattern, mstars)\n # plot_ngal_fits()\n dtk.save_figs(\"figs/\"+__file__+\"/\"+plot_name+\"/\", extension='.pdf')\n plt.show()\n",
"step-ids": [
5,
6,
7,
8,
9
]
}
|
[
5,
6,
7,
8,
9
] |
<|reserved_special_token_0|>
def get_sma(stock_code, ndays):
stock_data = st.get_csv_data(stock_code, 'price')
sma_data = SMA(stock_data, ndays)
sma_data = sma_data.dropna()
return sma_data['SMA']
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def SMA(data, ndays):
SMA = pd.Series(data['close'].rolling(ndays).mean(), name='SMA')
data = data.join(SMA)
return data
<|reserved_special_token_0|>
def get_sma(stock_code, ndays):
stock_data = st.get_csv_data(stock_code, 'price')
sma_data = SMA(stock_data, ndays)
sma_data = sma_data.dropna()
return sma_data['SMA']
def get_ewma(stock_code, ndays):
stock_data = st.get_csv_data(stock_code, 'price')
ewma_data = EWMA(stock_data, ndays)
ewma_data = ewma_data.dropna()
return ewma_data['EWMA']
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def SMA(data, ndays):
SMA = pd.Series(data['close'].rolling(ndays).mean(), name='SMA')
data = data.join(SMA)
return data
def EWMA(data, ndays):
EMA = pd.Series(pd.DataFrame.ewm(data['close'], span=ndays, min_periods
=ndays - 1).mean(), name='EWMA')
data = data.join(EMA)
return data
def get_sma(stock_code, ndays):
stock_data = st.get_csv_data(stock_code, 'price')
sma_data = SMA(stock_data, ndays)
sma_data = sma_data.dropna()
return sma_data['SMA']
def get_ewma(stock_code, ndays):
stock_data = st.get_csv_data(stock_code, 'price')
ewma_data = EWMA(stock_data, ndays)
ewma_data = ewma_data.dropna()
return ewma_data['EWMA']
<|reserved_special_token_1|>
import pandas as pd
import matplotlib.pyplot as plt
import data.stock as st
def SMA(data, ndays):
SMA = pd.Series(data['close'].rolling(ndays).mean(), name='SMA')
data = data.join(SMA)
return data
def EWMA(data, ndays):
EMA = pd.Series(pd.DataFrame.ewm(data['close'], span=ndays, min_periods
=ndays - 1).mean(), name='EWMA')
data = data.join(EMA)
return data
def get_sma(stock_code, ndays):
stock_data = st.get_csv_data(stock_code, 'price')
sma_data = SMA(stock_data, ndays)
sma_data = sma_data.dropna()
return sma_data['SMA']
def get_ewma(stock_code, ndays):
stock_data = st.get_csv_data(stock_code, 'price')
ewma_data = EWMA(stock_data, ndays)
ewma_data = ewma_data.dropna()
return ewma_data['EWMA']
<|reserved_special_token_1|>
# Moving Averages Code
# Load the necessary packages and modules
import pandas as pd
import matplotlib.pyplot as plt
import data.stock as st
# Simple Moving Average
def SMA(data, ndays):
SMA = pd.Series(data['close'].rolling(ndays).mean(), name='SMA')
# SMA = pd.Series(pd.rolling_mean(data['close'], ndays), name='SMA')
data = data.join(SMA)
return data
# Exponentially-weighted Moving Average
def EWMA(data, ndays):
EMA = pd.Series(pd.DataFrame.ewm(data['close'],
span=ndays,
min_periods=ndays - 1).mean(),
name='EWMA')
data = data.join(EMA)
return data
# Retrieve the Nifty data from Yahoo finance:
# XSHE000002_data = st.get_csv_data('000002.XSHE', 'price')
# close = XSHE000002_data['close']
#
# # Compute the 50-day SMA for NIFTY
# n = 50
# SMA_NIFTY = SMA(XSHE000002_data, n)
# SMA_NIFTY = SMA_NIFTY.dropna()
# SMA = SMA_NIFTY['SMA']
def get_sma(stock_code, ndays):
stock_data = st.get_csv_data(stock_code, 'price')
sma_data = SMA(stock_data, ndays)
sma_data = sma_data.dropna()
return sma_data['SMA']
def get_ewma(stock_code, ndays):
stock_data = st.get_csv_data(stock_code, 'price')
ewma_data = EWMA(stock_data, ndays)
ewma_data = ewma_data.dropna()
return ewma_data['EWMA']
# Compute the 200-day EWMA for NIFTY
# ew = 200
# EWMA_NIFTY = EWMA(XSHE000002_data, ew)
# EWMA_NIFTY = EWMA_NIFTY.dropna()
# EWMA = EWMA_NIFTY['EWMA_200']
# Plotting the NIFTY Price Series chart and Moving Averages below
# plt.figure(figsize=(9, 5))
# plt.plot(XSHE000002_data['close'], lw=1, label='NSE Prices')
# plt.plot(SMA, 'g', lw=1, label='50-day SMA (green)')
# plt.plot(EWMA, 'r', lw=1, label='200-day EWMA (red)')
# plt.legend(loc=2, prop={'size': 11})
# plt.grid(True)
# plt.setp(plt.gca().get_xticklabels(), rotation=30)
# plt.show()
|
flexible
|
{
"blob_id": "4c9f2b6fd119daa58b7f1dd7153c90df747e62cb",
"index": 1249,
"step-1": "<mask token>\n\n\ndef get_sma(stock_code, ndays):\n stock_data = st.get_csv_data(stock_code, 'price')\n sma_data = SMA(stock_data, ndays)\n sma_data = sma_data.dropna()\n return sma_data['SMA']\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef SMA(data, ndays):\n SMA = pd.Series(data['close'].rolling(ndays).mean(), name='SMA')\n data = data.join(SMA)\n return data\n\n\n<mask token>\n\n\ndef get_sma(stock_code, ndays):\n stock_data = st.get_csv_data(stock_code, 'price')\n sma_data = SMA(stock_data, ndays)\n sma_data = sma_data.dropna()\n return sma_data['SMA']\n\n\ndef get_ewma(stock_code, ndays):\n stock_data = st.get_csv_data(stock_code, 'price')\n ewma_data = EWMA(stock_data, ndays)\n ewma_data = ewma_data.dropna()\n return ewma_data['EWMA']\n",
"step-3": "<mask token>\n\n\ndef SMA(data, ndays):\n SMA = pd.Series(data['close'].rolling(ndays).mean(), name='SMA')\n data = data.join(SMA)\n return data\n\n\ndef EWMA(data, ndays):\n EMA = pd.Series(pd.DataFrame.ewm(data['close'], span=ndays, min_periods\n =ndays - 1).mean(), name='EWMA')\n data = data.join(EMA)\n return data\n\n\ndef get_sma(stock_code, ndays):\n stock_data = st.get_csv_data(stock_code, 'price')\n sma_data = SMA(stock_data, ndays)\n sma_data = sma_data.dropna()\n return sma_data['SMA']\n\n\ndef get_ewma(stock_code, ndays):\n stock_data = st.get_csv_data(stock_code, 'price')\n ewma_data = EWMA(stock_data, ndays)\n ewma_data = ewma_data.dropna()\n return ewma_data['EWMA']\n",
"step-4": "import pandas as pd\nimport matplotlib.pyplot as plt\nimport data.stock as st\n\n\ndef SMA(data, ndays):\n SMA = pd.Series(data['close'].rolling(ndays).mean(), name='SMA')\n data = data.join(SMA)\n return data\n\n\ndef EWMA(data, ndays):\n EMA = pd.Series(pd.DataFrame.ewm(data['close'], span=ndays, min_periods\n =ndays - 1).mean(), name='EWMA')\n data = data.join(EMA)\n return data\n\n\ndef get_sma(stock_code, ndays):\n stock_data = st.get_csv_data(stock_code, 'price')\n sma_data = SMA(stock_data, ndays)\n sma_data = sma_data.dropna()\n return sma_data['SMA']\n\n\ndef get_ewma(stock_code, ndays):\n stock_data = st.get_csv_data(stock_code, 'price')\n ewma_data = EWMA(stock_data, ndays)\n ewma_data = ewma_data.dropna()\n return ewma_data['EWMA']\n",
"step-5": "# Moving Averages Code\n\n# Load the necessary packages and modules\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport data.stock as st\n\n\n# Simple Moving Average \ndef SMA(data, ndays):\n SMA = pd.Series(data['close'].rolling(ndays).mean(), name='SMA')\n # SMA = pd.Series(pd.rolling_mean(data['close'], ndays), name='SMA')\n data = data.join(SMA)\n return data\n\n\n# Exponentially-weighted Moving Average\ndef EWMA(data, ndays):\n EMA = pd.Series(pd.DataFrame.ewm(data['close'],\n span=ndays,\n min_periods=ndays - 1).mean(),\n name='EWMA')\n data = data.join(EMA)\n return data\n\n\n# Retrieve the Nifty data from Yahoo finance:\n# XSHE000002_data = st.get_csv_data('000002.XSHE', 'price')\n# close = XSHE000002_data['close']\n#\n# # Compute the 50-day SMA for NIFTY\n# n = 50\n# SMA_NIFTY = SMA(XSHE000002_data, n)\n# SMA_NIFTY = SMA_NIFTY.dropna()\n# SMA = SMA_NIFTY['SMA']\n\n\ndef get_sma(stock_code, ndays):\n stock_data = st.get_csv_data(stock_code, 'price')\n sma_data = SMA(stock_data, ndays)\n sma_data = sma_data.dropna()\n return sma_data['SMA']\n\n\ndef get_ewma(stock_code, ndays):\n stock_data = st.get_csv_data(stock_code, 'price')\n ewma_data = EWMA(stock_data, ndays)\n ewma_data = ewma_data.dropna()\n return ewma_data['EWMA']\n\n# Compute the 200-day EWMA for NIFTY\n# ew = 200\n# EWMA_NIFTY = EWMA(XSHE000002_data, ew)\n# EWMA_NIFTY = EWMA_NIFTY.dropna()\n# EWMA = EWMA_NIFTY['EWMA_200']\n\n# Plotting the NIFTY Price Series chart and Moving Averages below\n# plt.figure(figsize=(9, 5))\n# plt.plot(XSHE000002_data['close'], lw=1, label='NSE Prices')\n# plt.plot(SMA, 'g', lw=1, label='50-day SMA (green)')\n# plt.plot(EWMA, 'r', lw=1, label='200-day EWMA (red)')\n# plt.legend(loc=2, prop={'size': 11})\n# plt.grid(True)\n# plt.setp(plt.gca().get_xticklabels(), rotation=30)\n# plt.show()\n",
"step-ids": [
1,
3,
4,
5,
6
]
}
|
[
1,
3,
4,
5,
6
] |
<|reserved_special_token_0|>
def patch_rom(rom_path, payload_path, c_code_path, entry_code_path, out_path):
rom = list(Path(rom_path).read_bytes())
payload = list(Path(payload_path).read_bytes())
c_code = list(Path(c_code_path).read_bytes())
entry_code = list(Path(entry_code_path).read_bytes())
jump = [60, 31, 176, 120, 3, 224, 248, 9]
jump_ram = [60, 31, 128, 64, 3, 224, 248, 9]
entry_inject = 4096
jump_address = 263024
render_inject = 2149056272 - VIRTUAL_TO_ROM
payload_address = 7864320
c_code_address = 7864832
force_easy_ball = 239312
for i in range(0, len(jump)):
rom[jump_address + i] = jump_ram[i]
rom[render_inject + i] = jump_ram[i]
for i in range(8, 12):
rom[render_inject + i] = 0
for i in range(0, len(payload)):
rom[payload_address + i] = payload[i]
for i in range(0, len(c_code)):
rom[c_code_address + i] = c_code[i]
for i in range(0, len(entry_code)):
rom[entry_inject + i] = entry_code[i]
rom[force_easy_ball] = 0
rom[force_easy_ball + 1] = 0
rom[force_easy_ball + 2] = 0
rom[force_easy_ball + 3] = 0
buf = bytearray(rom)
cksm.update_checksum(buf)
f = open(out_path, 'w+b')
f.write(buf)
f.close()
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def patch_rom(rom_path, payload_path, c_code_path, entry_code_path, out_path):
rom = list(Path(rom_path).read_bytes())
payload = list(Path(payload_path).read_bytes())
c_code = list(Path(c_code_path).read_bytes())
entry_code = list(Path(entry_code_path).read_bytes())
jump = [60, 31, 176, 120, 3, 224, 248, 9]
jump_ram = [60, 31, 128, 64, 3, 224, 248, 9]
entry_inject = 4096
jump_address = 263024
render_inject = 2149056272 - VIRTUAL_TO_ROM
payload_address = 7864320
c_code_address = 7864832
force_easy_ball = 239312
for i in range(0, len(jump)):
rom[jump_address + i] = jump_ram[i]
rom[render_inject + i] = jump_ram[i]
for i in range(8, 12):
rom[render_inject + i] = 0
for i in range(0, len(payload)):
rom[payload_address + i] = payload[i]
for i in range(0, len(c_code)):
rom[c_code_address + i] = c_code[i]
for i in range(0, len(entry_code)):
rom[entry_inject + i] = entry_code[i]
rom[force_easy_ball] = 0
rom[force_easy_ball + 1] = 0
rom[force_easy_ball + 2] = 0
rom[force_easy_ball + 3] = 0
buf = bytearray(rom)
cksm.update_checksum(buf)
f = open(out_path, 'w+b')
f.write(buf)
f.close()
if __name__ == '__main__':
if len(sys.argv) < 5:
print(
'Usage: glovepatch.py <glover_rom> <binary to inject> <c code binary> <entry code> <output>'
)
sys.exit(0)
patch_rom(sys.argv[1], sys.argv[2], sys.argv[3], sys.argv[4], sys.argv[5])
<|reserved_special_token_1|>
<|reserved_special_token_0|>
VIRTUAL_TO_ROM = 2148528128
def patch_rom(rom_path, payload_path, c_code_path, entry_code_path, out_path):
rom = list(Path(rom_path).read_bytes())
payload = list(Path(payload_path).read_bytes())
c_code = list(Path(c_code_path).read_bytes())
entry_code = list(Path(entry_code_path).read_bytes())
jump = [60, 31, 176, 120, 3, 224, 248, 9]
jump_ram = [60, 31, 128, 64, 3, 224, 248, 9]
entry_inject = 4096
jump_address = 263024
render_inject = 2149056272 - VIRTUAL_TO_ROM
payload_address = 7864320
c_code_address = 7864832
force_easy_ball = 239312
for i in range(0, len(jump)):
rom[jump_address + i] = jump_ram[i]
rom[render_inject + i] = jump_ram[i]
for i in range(8, 12):
rom[render_inject + i] = 0
for i in range(0, len(payload)):
rom[payload_address + i] = payload[i]
for i in range(0, len(c_code)):
rom[c_code_address + i] = c_code[i]
for i in range(0, len(entry_code)):
rom[entry_inject + i] = entry_code[i]
rom[force_easy_ball] = 0
rom[force_easy_ball + 1] = 0
rom[force_easy_ball + 2] = 0
rom[force_easy_ball + 3] = 0
buf = bytearray(rom)
cksm.update_checksum(buf)
f = open(out_path, 'w+b')
f.write(buf)
f.close()
if __name__ == '__main__':
if len(sys.argv) < 5:
print(
'Usage: glovepatch.py <glover_rom> <binary to inject> <c code binary> <entry code> <output>'
)
sys.exit(0)
patch_rom(sys.argv[1], sys.argv[2], sys.argv[3], sys.argv[4], sys.argv[5])
<|reserved_special_token_1|>
import sys
import cksm
from pathlib import Path
VIRTUAL_TO_ROM = 2148528128
def patch_rom(rom_path, payload_path, c_code_path, entry_code_path, out_path):
rom = list(Path(rom_path).read_bytes())
payload = list(Path(payload_path).read_bytes())
c_code = list(Path(c_code_path).read_bytes())
entry_code = list(Path(entry_code_path).read_bytes())
jump = [60, 31, 176, 120, 3, 224, 248, 9]
jump_ram = [60, 31, 128, 64, 3, 224, 248, 9]
entry_inject = 4096
jump_address = 263024
render_inject = 2149056272 - VIRTUAL_TO_ROM
payload_address = 7864320
c_code_address = 7864832
force_easy_ball = 239312
for i in range(0, len(jump)):
rom[jump_address + i] = jump_ram[i]
rom[render_inject + i] = jump_ram[i]
for i in range(8, 12):
rom[render_inject + i] = 0
for i in range(0, len(payload)):
rom[payload_address + i] = payload[i]
for i in range(0, len(c_code)):
rom[c_code_address + i] = c_code[i]
for i in range(0, len(entry_code)):
rom[entry_inject + i] = entry_code[i]
rom[force_easy_ball] = 0
rom[force_easy_ball + 1] = 0
rom[force_easy_ball + 2] = 0
rom[force_easy_ball + 3] = 0
buf = bytearray(rom)
cksm.update_checksum(buf)
f = open(out_path, 'w+b')
f.write(buf)
f.close()
if __name__ == '__main__':
if len(sys.argv) < 5:
print(
'Usage: glovepatch.py <glover_rom> <binary to inject> <c code binary> <entry code> <output>'
)
sys.exit(0)
patch_rom(sys.argv[1], sys.argv[2], sys.argv[3], sys.argv[4], sys.argv[5])
<|reserved_special_token_1|>
#!/usr/bin/env python3
import sys
import cksm
from pathlib import Path
VIRTUAL_TO_ROM = 0x800ff000
def patch_rom(rom_path, payload_path, c_code_path, entry_code_path, out_path):
rom = list(Path(rom_path).read_bytes())
payload = list(Path(payload_path).read_bytes())
c_code = list(Path(c_code_path).read_bytes())
entry_code = list(Path(entry_code_path).read_bytes())
jump = [0x3C, 0x1F, 0xB0, 0x78, 0x03, 0xE0, 0xF8, 0x09] # code that jumps to payload
jump_ram = [0x3C, 0x1F, 0x80, 0x40, 0x03, 0xE0, 0xF8, 0x09]
entry_inject = 0x1000
jump_address = 0x40370
render_inject = 0x8017FF10-VIRTUAL_TO_ROM
payload_address = 0x780000
c_code_address = 0x780200
force_easy_ball = 0x3A6D0 # nop this address to always make ball behave like easy mode
for i in range(0, len(jump)):
rom[jump_address+i] = jump_ram[i]
rom[render_inject+i] = jump_ram[i]
# need to nop the call right after render_inject
for i in range(8, 12):
rom[render_inject+i] = 0x00
for i in range(0, len(payload)):
rom[payload_address+i] = payload[i]
for i in range(0, len(c_code)):
rom[c_code_address+i] = c_code[i]
for i in range(0, len(entry_code)):
rom[entry_inject+i] = entry_code[i]
# same as this gs code:
# 801396D0 0000
# 801396D1 0000
# 801396D2 0000
# 801396D3 0000
rom[force_easy_ball] = 0x00
rom[force_easy_ball+1] = 0x00
rom[force_easy_ball+2] = 0x00
rom[force_easy_ball+3] = 0x00
buf = bytearray(rom)
cksm.update_checksum(buf)
f = open(out_path, 'w+b')
f.write(buf)
f.close()
if __name__ == '__main__':
if len(sys.argv) < 5:
print("Usage: glovepatch.py <glover_rom> <binary to inject> <c code binary> <entry code> <output>")
sys.exit(0)
patch_rom(sys.argv[1], sys.argv[2], sys.argv[3], sys.argv[4], sys.argv[5])
|
flexible
|
{
"blob_id": "f566c42674728f1874d89b15102627c3b404c9a0",
"index": 3534,
"step-1": "<mask token>\n\n\ndef patch_rom(rom_path, payload_path, c_code_path, entry_code_path, out_path):\n rom = list(Path(rom_path).read_bytes())\n payload = list(Path(payload_path).read_bytes())\n c_code = list(Path(c_code_path).read_bytes())\n entry_code = list(Path(entry_code_path).read_bytes())\n jump = [60, 31, 176, 120, 3, 224, 248, 9]\n jump_ram = [60, 31, 128, 64, 3, 224, 248, 9]\n entry_inject = 4096\n jump_address = 263024\n render_inject = 2149056272 - VIRTUAL_TO_ROM\n payload_address = 7864320\n c_code_address = 7864832\n force_easy_ball = 239312\n for i in range(0, len(jump)):\n rom[jump_address + i] = jump_ram[i]\n rom[render_inject + i] = jump_ram[i]\n for i in range(8, 12):\n rom[render_inject + i] = 0\n for i in range(0, len(payload)):\n rom[payload_address + i] = payload[i]\n for i in range(0, len(c_code)):\n rom[c_code_address + i] = c_code[i]\n for i in range(0, len(entry_code)):\n rom[entry_inject + i] = entry_code[i]\n rom[force_easy_ball] = 0\n rom[force_easy_ball + 1] = 0\n rom[force_easy_ball + 2] = 0\n rom[force_easy_ball + 3] = 0\n buf = bytearray(rom)\n cksm.update_checksum(buf)\n f = open(out_path, 'w+b')\n f.write(buf)\n f.close()\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef patch_rom(rom_path, payload_path, c_code_path, entry_code_path, out_path):\n rom = list(Path(rom_path).read_bytes())\n payload = list(Path(payload_path).read_bytes())\n c_code = list(Path(c_code_path).read_bytes())\n entry_code = list(Path(entry_code_path).read_bytes())\n jump = [60, 31, 176, 120, 3, 224, 248, 9]\n jump_ram = [60, 31, 128, 64, 3, 224, 248, 9]\n entry_inject = 4096\n jump_address = 263024\n render_inject = 2149056272 - VIRTUAL_TO_ROM\n payload_address = 7864320\n c_code_address = 7864832\n force_easy_ball = 239312\n for i in range(0, len(jump)):\n rom[jump_address + i] = jump_ram[i]\n rom[render_inject + i] = jump_ram[i]\n for i in range(8, 12):\n rom[render_inject + i] = 0\n for i in range(0, len(payload)):\n rom[payload_address + i] = payload[i]\n for i in range(0, len(c_code)):\n rom[c_code_address + i] = c_code[i]\n for i in range(0, len(entry_code)):\n rom[entry_inject + i] = entry_code[i]\n rom[force_easy_ball] = 0\n rom[force_easy_ball + 1] = 0\n rom[force_easy_ball + 2] = 0\n rom[force_easy_ball + 3] = 0\n buf = bytearray(rom)\n cksm.update_checksum(buf)\n f = open(out_path, 'w+b')\n f.write(buf)\n f.close()\n\n\nif __name__ == '__main__':\n if len(sys.argv) < 5:\n print(\n 'Usage: glovepatch.py <glover_rom> <binary to inject> <c code binary> <entry code> <output>'\n )\n sys.exit(0)\n patch_rom(sys.argv[1], sys.argv[2], sys.argv[3], sys.argv[4], sys.argv[5])\n",
"step-3": "<mask token>\nVIRTUAL_TO_ROM = 2148528128\n\n\ndef patch_rom(rom_path, payload_path, c_code_path, entry_code_path, out_path):\n rom = list(Path(rom_path).read_bytes())\n payload = list(Path(payload_path).read_bytes())\n c_code = list(Path(c_code_path).read_bytes())\n entry_code = list(Path(entry_code_path).read_bytes())\n jump = [60, 31, 176, 120, 3, 224, 248, 9]\n jump_ram = [60, 31, 128, 64, 3, 224, 248, 9]\n entry_inject = 4096\n jump_address = 263024\n render_inject = 2149056272 - VIRTUAL_TO_ROM\n payload_address = 7864320\n c_code_address = 7864832\n force_easy_ball = 239312\n for i in range(0, len(jump)):\n rom[jump_address + i] = jump_ram[i]\n rom[render_inject + i] = jump_ram[i]\n for i in range(8, 12):\n rom[render_inject + i] = 0\n for i in range(0, len(payload)):\n rom[payload_address + i] = payload[i]\n for i in range(0, len(c_code)):\n rom[c_code_address + i] = c_code[i]\n for i in range(0, len(entry_code)):\n rom[entry_inject + i] = entry_code[i]\n rom[force_easy_ball] = 0\n rom[force_easy_ball + 1] = 0\n rom[force_easy_ball + 2] = 0\n rom[force_easy_ball + 3] = 0\n buf = bytearray(rom)\n cksm.update_checksum(buf)\n f = open(out_path, 'w+b')\n f.write(buf)\n f.close()\n\n\nif __name__ == '__main__':\n if len(sys.argv) < 5:\n print(\n 'Usage: glovepatch.py <glover_rom> <binary to inject> <c code binary> <entry code> <output>'\n )\n sys.exit(0)\n patch_rom(sys.argv[1], sys.argv[2], sys.argv[3], sys.argv[4], sys.argv[5])\n",
"step-4": "import sys\nimport cksm\nfrom pathlib import Path\nVIRTUAL_TO_ROM = 2148528128\n\n\ndef patch_rom(rom_path, payload_path, c_code_path, entry_code_path, out_path):\n rom = list(Path(rom_path).read_bytes())\n payload = list(Path(payload_path).read_bytes())\n c_code = list(Path(c_code_path).read_bytes())\n entry_code = list(Path(entry_code_path).read_bytes())\n jump = [60, 31, 176, 120, 3, 224, 248, 9]\n jump_ram = [60, 31, 128, 64, 3, 224, 248, 9]\n entry_inject = 4096\n jump_address = 263024\n render_inject = 2149056272 - VIRTUAL_TO_ROM\n payload_address = 7864320\n c_code_address = 7864832\n force_easy_ball = 239312\n for i in range(0, len(jump)):\n rom[jump_address + i] = jump_ram[i]\n rom[render_inject + i] = jump_ram[i]\n for i in range(8, 12):\n rom[render_inject + i] = 0\n for i in range(0, len(payload)):\n rom[payload_address + i] = payload[i]\n for i in range(0, len(c_code)):\n rom[c_code_address + i] = c_code[i]\n for i in range(0, len(entry_code)):\n rom[entry_inject + i] = entry_code[i]\n rom[force_easy_ball] = 0\n rom[force_easy_ball + 1] = 0\n rom[force_easy_ball + 2] = 0\n rom[force_easy_ball + 3] = 0\n buf = bytearray(rom)\n cksm.update_checksum(buf)\n f = open(out_path, 'w+b')\n f.write(buf)\n f.close()\n\n\nif __name__ == '__main__':\n if len(sys.argv) < 5:\n print(\n 'Usage: glovepatch.py <glover_rom> <binary to inject> <c code binary> <entry code> <output>'\n )\n sys.exit(0)\n patch_rom(sys.argv[1], sys.argv[2], sys.argv[3], sys.argv[4], sys.argv[5])\n",
"step-5": "#!/usr/bin/env python3\n\nimport sys\n\nimport cksm\nfrom pathlib import Path\n\nVIRTUAL_TO_ROM = 0x800ff000\n\ndef patch_rom(rom_path, payload_path, c_code_path, entry_code_path, out_path):\n rom = list(Path(rom_path).read_bytes())\n payload = list(Path(payload_path).read_bytes())\n c_code = list(Path(c_code_path).read_bytes())\n entry_code = list(Path(entry_code_path).read_bytes())\n\n jump = [0x3C, 0x1F, 0xB0, 0x78, 0x03, 0xE0, 0xF8, 0x09] # code that jumps to payload\n jump_ram = [0x3C, 0x1F, 0x80, 0x40, 0x03, 0xE0, 0xF8, 0x09]\n entry_inject = 0x1000\n jump_address = 0x40370\n render_inject = 0x8017FF10-VIRTUAL_TO_ROM\n payload_address = 0x780000\n c_code_address = 0x780200\n force_easy_ball = 0x3A6D0 # nop this address to always make ball behave like easy mode\n\n for i in range(0, len(jump)):\n rom[jump_address+i] = jump_ram[i]\n rom[render_inject+i] = jump_ram[i]\n\n # need to nop the call right after render_inject\n for i in range(8, 12):\n rom[render_inject+i] = 0x00\n\n for i in range(0, len(payload)):\n rom[payload_address+i] = payload[i]\n\n for i in range(0, len(c_code)):\n rom[c_code_address+i] = c_code[i]\n\n for i in range(0, len(entry_code)):\n rom[entry_inject+i] = entry_code[i]\n\n\n # same as this gs code:\n # 801396D0 0000\n # 801396D1 0000\n # 801396D2 0000\n # 801396D3 0000\n rom[force_easy_ball] = 0x00\n rom[force_easy_ball+1] = 0x00\n rom[force_easy_ball+2] = 0x00\n rom[force_easy_ball+3] = 0x00\n\n buf = bytearray(rom)\n cksm.update_checksum(buf)\n\n f = open(out_path, 'w+b')\n f.write(buf)\n f.close()\n\nif __name__ == '__main__':\n if len(sys.argv) < 5:\n print(\"Usage: glovepatch.py <glover_rom> <binary to inject> <c code binary> <entry code> <output>\")\n sys.exit(0)\n patch_rom(sys.argv[1], sys.argv[2], sys.argv[3], sys.argv[4], sys.argv[5])\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
<|reserved_special_token_0|>
@application.route('/')
def index():
return make_response(render_template('index.html'))
@application.route('/getGraph', methods=['POST', 'GET'])
def getgraph():
if request.method == 'POST':
if 'data' in request.form:
if path.exists('static/jsons/' + request.form['data'] + '.json'):
with open('static/jsons/' + request.form['data'] + '.json', 'r'
) as file:
jsonStr = file.read()
jsonStr = json.loads(jsonStr)
return jsonify(jsonStr)
else:
return '<h1>404 NOT FOUND'
else:
return '<h1>400 BAD REQUEST'
elif 'graph' in request.args:
if request.args['graph'] == 'pagaOra':
return make_response(render_template('graphs/pagaOra.html'))
elif request.args['graph'] == 'iscrittiAtn':
if 'sex' in request.args:
return make_response(render_template(
'graphs/iscrittiAtn.html', sex=int(request.args['sex'])))
else:
return make_response(render_template(
'graphs/iscrittiAtn.html', sex=0))
elif request.args['graph'] == 'disoccupati':
return make_response(render_template(
'graphs/disoccupatiGraph.html'))
elif request.args['graph'] == 'iscrittiProv':
return make_response(render_template('graphs/iscrittiProv.html'))
elif request.args['graph'] == 'mf' and 'atn' in request.args:
dir = 'graphs/mf/mf' + request.args['atn'] + '.html'
print(dir)
if path.exists('templates/' + dir):
if 'year' in request.args:
return make_response(render_template(dir, year=int(
request.args['year'])))
else:
return make_response(render_template(dir, year=0))
elif request.args['graph'] == 'emig' and 'prov' in request.args:
dir = 'graphs/emig/iscrittiEmig' + request.args['prov'] + '.html'
if path.exists('templates/' + dir):
return make_response(render_template(dir))
return '<h1>400 BAD REQUEST'
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
@application.route('/')
def index():
return make_response(render_template('index.html'))
@application.route('/getGraph', methods=['POST', 'GET'])
def getgraph():
if request.method == 'POST':
if 'data' in request.form:
if path.exists('static/jsons/' + request.form['data'] + '.json'):
with open('static/jsons/' + request.form['data'] + '.json', 'r'
) as file:
jsonStr = file.read()
jsonStr = json.loads(jsonStr)
return jsonify(jsonStr)
else:
return '<h1>404 NOT FOUND'
else:
return '<h1>400 BAD REQUEST'
elif 'graph' in request.args:
if request.args['graph'] == 'pagaOra':
return make_response(render_template('graphs/pagaOra.html'))
elif request.args['graph'] == 'iscrittiAtn':
if 'sex' in request.args:
return make_response(render_template(
'graphs/iscrittiAtn.html', sex=int(request.args['sex'])))
else:
return make_response(render_template(
'graphs/iscrittiAtn.html', sex=0))
elif request.args['graph'] == 'disoccupati':
return make_response(render_template(
'graphs/disoccupatiGraph.html'))
elif request.args['graph'] == 'iscrittiProv':
return make_response(render_template('graphs/iscrittiProv.html'))
elif request.args['graph'] == 'mf' and 'atn' in request.args:
dir = 'graphs/mf/mf' + request.args['atn'] + '.html'
print(dir)
if path.exists('templates/' + dir):
if 'year' in request.args:
return make_response(render_template(dir, year=int(
request.args['year'])))
else:
return make_response(render_template(dir, year=0))
elif request.args['graph'] == 'emig' and 'prov' in request.args:
dir = 'graphs/emig/iscrittiEmig' + request.args['prov'] + '.html'
if path.exists('templates/' + dir):
return make_response(render_template(dir))
return '<h1>400 BAD REQUEST'
@application.route('/doUpdate')
def updateData():
with open('static/notUpdating/iscrittiAteneo.csv', newline='') as f:
reader = csv.reader(f)
data = list(reader)[1:]
iscrittiAteneo = {'Venezia CF': [], 'Verona': [], 'Venezia IUAV': [
], 'Padova': []}
for row in data:
row = row[0].split(';')
if row[1] == 'Padova' or 'Venezia C' in row[1] or row[1
] == 'Venezia Iuav' or row[1] == 'Verona':
tmp = row[1]
if 'Venezia C' in row[1]:
tmp = 'Venezia CF'
if tmp == 'Venezia Iuav':
tmp = 'Venezia IUAV'
iscrittiAteneo[tmp].append(row[0] + ';' + row[3] + ';' + row[4]
)
iscrittiAteneoJson = json.dumps(iscrittiAteneo)
open('static/jsons/iscrittiAteneo.json', 'wb').write(iscrittiAteneoJson
.encode())
with open('static/notUpdating/iscrittiEmig.json', newline='') as f:
reader = json.load(f)
iscrittiEmig = {'vicenza': [], 'verona': [], 'venezia': [],
'padova': [], 'treviso': [], 'belluno': [], 'rovigo': []}
for row in reader['records']:
if row[4].lower() == 'padova' or row[4].lower(
) == 'vicenza' or row[4].lower() == 'venezia' or row[4].lower(
) == 'verona' or row[4].lower() == 'treviso' or row[4].lower(
) == 'belluno' or row[4].lower() == 'rovigo':
iscrittiEmig[row[4].lower()].append(row[1] + ';' + row[4] +
';' + row[2] + ';' + str(row[6]))
lista = {'vicenza': [], 'verona': [], 'venezia': [], 'padova': [],
'treviso': [], 'belluno': [], 'rovigo': []}
count = 0
for key in iscrittiEmig.keys():
while len(iscrittiEmig[key]) > 2:
tmp = iscrittiEmig[key].pop(0).split(';')
if count == 0:
count = int(tmp[3])
tmp2 = iscrittiEmig[key][0].split(';')[2]
if tmp[2] == tmp2:
count += int(tmp[3])
else:
lista[tmp[1].lower()].append(tmp[0] + ';' + tmp[2] +
';' + str(count))
count = 0
iscrittiEmigJson = json.dumps(lista)
open('static/jsons/iscrittiEmig.json', 'wb').write(iscrittiEmigJson.
encode())
with open('static/notUpdating/retribuzioneMedia.csv', newline='') as f:
reader = csv.reader(f)
data = list(reader)[1:]
retribuzione = {'Vicenza': [], 'Verona': [], 'Venezia': [],
'Padova': [], 'Treviso': [], 'Belluno': [], 'Rovigo': []}
for row in data:
if (row[1] == 'Padova' or row[1] == 'Vicenza' or row[1] ==
'Venezia' or row[1] == 'Verona' or row[1] == 'Treviso' or
row[1] == 'Belluno' or row[1] == 'Rovigo') and row[5
] != 'totale' and 'media)' in row[3]:
tmp = row[5]
if 'nessun' in tmp:
tmp = 'nessuno'
retribuzione[row[1]].append(tmp + ';' + str(row[8]))
retribuzioneMediaJson = json.dumps(retribuzione)
open('static/jsons/retribuzioneMedia.json', 'wb').write(
retribuzioneMediaJson.encode())
with open('static/notUpdating/taxDisocc.csv', newline='') as f:
reader = csv.reader(f)
data = list(reader)[1:]
lavoro = {'Vicenza': [], 'Verona': [], 'Venezia': [], 'Padova': [],
'Treviso': [], 'Belluno': [], 'Rovigo': []}
for row in data:
if row[7] == '15-24 anni' and row[5] != 'totale':
if row[5] == 'femmine':
lavoro[row[1]].append(str(row[10]))
else:
lavoro[row[1]].append(str(row[8]) + ';' + str(row[10]))
for key in lavoro.keys():
tmp = lavoro[key][0] + ';' + lavoro[key][2]
tmp2 = lavoro[key][1] + ';' + lavoro[key][3]
lavoro[key].clear()
lavoro[key].append(tmp)
lavoro[key].append(tmp2)
disoccupazioneJson = json.dumps(lavoro)
open('static/jsons/disoccupazione.json', 'wb').write(disoccupazioneJson
.encode())
iscritti = requests.get(
'http://dati.ustat.miur.it/dataset/3dd9ca7f-9cc9-4a1a-915c-e569b181dbd5/resource/eae4ee94-0797-41d2-b007-bc6dad3ef3e2/download/iscrittixresidenza.csv'
, allow_redirects=True)
open('static/iscrittiProvincia.csv', 'wb').write(iscritti.content)
with open('static/iscrittiProvincia.csv', newline='') as f:
reader = csv.reader(f)
data = list(reader)[1:]
iscrittiProvincia = {'vicenza': [], 'verona': [], 'venezia': [],
'padova': [], 'treviso': [], 'belluno': [], 'rovigo': []}
for row in data:
row = row[0].split(';')
if row[2].lower() == 'padova' or row[2].lower(
) == 'vicenza' or row[2].lower() == 'venezia' or row[2].lower(
) == 'verona' or row[2].lower() == 'treviso' or row[2].lower(
) == 'belluno' or row[2].lower() == 'rovigo':
iscrittiProvincia[row[2].lower()].append(str(row[0]) + ';' +
str(int(row[3]) + int(row[4])))
iscrittiProvinciaJson = json.dumps(iscrittiProvincia)
open('static/jsons/iscrittiProvincia.json', 'wb').write(
iscrittiProvinciaJson.encode())
return '200'
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
@application.route('/')
def index():
return make_response(render_template('index.html'))
@application.route('/getGraph', methods=['POST', 'GET'])
def getgraph():
if request.method == 'POST':
if 'data' in request.form:
if path.exists('static/jsons/' + request.form['data'] + '.json'):
with open('static/jsons/' + request.form['data'] + '.json', 'r'
) as file:
jsonStr = file.read()
jsonStr = json.loads(jsonStr)
return jsonify(jsonStr)
else:
return '<h1>404 NOT FOUND'
else:
return '<h1>400 BAD REQUEST'
elif 'graph' in request.args:
if request.args['graph'] == 'pagaOra':
return make_response(render_template('graphs/pagaOra.html'))
elif request.args['graph'] == 'iscrittiAtn':
if 'sex' in request.args:
return make_response(render_template(
'graphs/iscrittiAtn.html', sex=int(request.args['sex'])))
else:
return make_response(render_template(
'graphs/iscrittiAtn.html', sex=0))
elif request.args['graph'] == 'disoccupati':
return make_response(render_template(
'graphs/disoccupatiGraph.html'))
elif request.args['graph'] == 'iscrittiProv':
return make_response(render_template('graphs/iscrittiProv.html'))
elif request.args['graph'] == 'mf' and 'atn' in request.args:
dir = 'graphs/mf/mf' + request.args['atn'] + '.html'
print(dir)
if path.exists('templates/' + dir):
if 'year' in request.args:
return make_response(render_template(dir, year=int(
request.args['year'])))
else:
return make_response(render_template(dir, year=0))
elif request.args['graph'] == 'emig' and 'prov' in request.args:
dir = 'graphs/emig/iscrittiEmig' + request.args['prov'] + '.html'
if path.exists('templates/' + dir):
return make_response(render_template(dir))
return '<h1>400 BAD REQUEST'
@application.route('/doUpdate')
def updateData():
with open('static/notUpdating/iscrittiAteneo.csv', newline='') as f:
reader = csv.reader(f)
data = list(reader)[1:]
iscrittiAteneo = {'Venezia CF': [], 'Verona': [], 'Venezia IUAV': [
], 'Padova': []}
for row in data:
row = row[0].split(';')
if row[1] == 'Padova' or 'Venezia C' in row[1] or row[1
] == 'Venezia Iuav' or row[1] == 'Verona':
tmp = row[1]
if 'Venezia C' in row[1]:
tmp = 'Venezia CF'
if tmp == 'Venezia Iuav':
tmp = 'Venezia IUAV'
iscrittiAteneo[tmp].append(row[0] + ';' + row[3] + ';' + row[4]
)
iscrittiAteneoJson = json.dumps(iscrittiAteneo)
open('static/jsons/iscrittiAteneo.json', 'wb').write(iscrittiAteneoJson
.encode())
with open('static/notUpdating/iscrittiEmig.json', newline='') as f:
reader = json.load(f)
iscrittiEmig = {'vicenza': [], 'verona': [], 'venezia': [],
'padova': [], 'treviso': [], 'belluno': [], 'rovigo': []}
for row in reader['records']:
if row[4].lower() == 'padova' or row[4].lower(
) == 'vicenza' or row[4].lower() == 'venezia' or row[4].lower(
) == 'verona' or row[4].lower() == 'treviso' or row[4].lower(
) == 'belluno' or row[4].lower() == 'rovigo':
iscrittiEmig[row[4].lower()].append(row[1] + ';' + row[4] +
';' + row[2] + ';' + str(row[6]))
lista = {'vicenza': [], 'verona': [], 'venezia': [], 'padova': [],
'treviso': [], 'belluno': [], 'rovigo': []}
count = 0
for key in iscrittiEmig.keys():
while len(iscrittiEmig[key]) > 2:
tmp = iscrittiEmig[key].pop(0).split(';')
if count == 0:
count = int(tmp[3])
tmp2 = iscrittiEmig[key][0].split(';')[2]
if tmp[2] == tmp2:
count += int(tmp[3])
else:
lista[tmp[1].lower()].append(tmp[0] + ';' + tmp[2] +
';' + str(count))
count = 0
iscrittiEmigJson = json.dumps(lista)
open('static/jsons/iscrittiEmig.json', 'wb').write(iscrittiEmigJson.
encode())
with open('static/notUpdating/retribuzioneMedia.csv', newline='') as f:
reader = csv.reader(f)
data = list(reader)[1:]
retribuzione = {'Vicenza': [], 'Verona': [], 'Venezia': [],
'Padova': [], 'Treviso': [], 'Belluno': [], 'Rovigo': []}
for row in data:
if (row[1] == 'Padova' or row[1] == 'Vicenza' or row[1] ==
'Venezia' or row[1] == 'Verona' or row[1] == 'Treviso' or
row[1] == 'Belluno' or row[1] == 'Rovigo') and row[5
] != 'totale' and 'media)' in row[3]:
tmp = row[5]
if 'nessun' in tmp:
tmp = 'nessuno'
retribuzione[row[1]].append(tmp + ';' + str(row[8]))
retribuzioneMediaJson = json.dumps(retribuzione)
open('static/jsons/retribuzioneMedia.json', 'wb').write(
retribuzioneMediaJson.encode())
with open('static/notUpdating/taxDisocc.csv', newline='') as f:
reader = csv.reader(f)
data = list(reader)[1:]
lavoro = {'Vicenza': [], 'Verona': [], 'Venezia': [], 'Padova': [],
'Treviso': [], 'Belluno': [], 'Rovigo': []}
for row in data:
if row[7] == '15-24 anni' and row[5] != 'totale':
if row[5] == 'femmine':
lavoro[row[1]].append(str(row[10]))
else:
lavoro[row[1]].append(str(row[8]) + ';' + str(row[10]))
for key in lavoro.keys():
tmp = lavoro[key][0] + ';' + lavoro[key][2]
tmp2 = lavoro[key][1] + ';' + lavoro[key][3]
lavoro[key].clear()
lavoro[key].append(tmp)
lavoro[key].append(tmp2)
disoccupazioneJson = json.dumps(lavoro)
open('static/jsons/disoccupazione.json', 'wb').write(disoccupazioneJson
.encode())
iscritti = requests.get(
'http://dati.ustat.miur.it/dataset/3dd9ca7f-9cc9-4a1a-915c-e569b181dbd5/resource/eae4ee94-0797-41d2-b007-bc6dad3ef3e2/download/iscrittixresidenza.csv'
, allow_redirects=True)
open('static/iscrittiProvincia.csv', 'wb').write(iscritti.content)
with open('static/iscrittiProvincia.csv', newline='') as f:
reader = csv.reader(f)
data = list(reader)[1:]
iscrittiProvincia = {'vicenza': [], 'verona': [], 'venezia': [],
'padova': [], 'treviso': [], 'belluno': [], 'rovigo': []}
for row in data:
row = row[0].split(';')
if row[2].lower() == 'padova' or row[2].lower(
) == 'vicenza' or row[2].lower() == 'venezia' or row[2].lower(
) == 'verona' or row[2].lower() == 'treviso' or row[2].lower(
) == 'belluno' or row[2].lower() == 'rovigo':
iscrittiProvincia[row[2].lower()].append(str(row[0]) + ';' +
str(int(row[3]) + int(row[4])))
iscrittiProvinciaJson = json.dumps(iscrittiProvincia)
open('static/jsons/iscrittiProvincia.json', 'wb').write(
iscrittiProvinciaJson.encode())
return '200'
updateData()
if __name__ == '__main__':
application.run(debug=True, port=80)
<|reserved_special_token_1|>
from flask import Flask, render_template, jsonify, request, make_response
import requests
import json
from os import path
import csv
application = Flask(__name__)
@application.route('/')
def index():
return make_response(render_template('index.html'))
@application.route('/getGraph', methods=['POST', 'GET'])
def getgraph():
if request.method == 'POST':
if 'data' in request.form:
if path.exists('static/jsons/' + request.form['data'] + '.json'):
with open('static/jsons/' + request.form['data'] + '.json', 'r'
) as file:
jsonStr = file.read()
jsonStr = json.loads(jsonStr)
return jsonify(jsonStr)
else:
return '<h1>404 NOT FOUND'
else:
return '<h1>400 BAD REQUEST'
elif 'graph' in request.args:
if request.args['graph'] == 'pagaOra':
return make_response(render_template('graphs/pagaOra.html'))
elif request.args['graph'] == 'iscrittiAtn':
if 'sex' in request.args:
return make_response(render_template(
'graphs/iscrittiAtn.html', sex=int(request.args['sex'])))
else:
return make_response(render_template(
'graphs/iscrittiAtn.html', sex=0))
elif request.args['graph'] == 'disoccupati':
return make_response(render_template(
'graphs/disoccupatiGraph.html'))
elif request.args['graph'] == 'iscrittiProv':
return make_response(render_template('graphs/iscrittiProv.html'))
elif request.args['graph'] == 'mf' and 'atn' in request.args:
dir = 'graphs/mf/mf' + request.args['atn'] + '.html'
print(dir)
if path.exists('templates/' + dir):
if 'year' in request.args:
return make_response(render_template(dir, year=int(
request.args['year'])))
else:
return make_response(render_template(dir, year=0))
elif request.args['graph'] == 'emig' and 'prov' in request.args:
dir = 'graphs/emig/iscrittiEmig' + request.args['prov'] + '.html'
if path.exists('templates/' + dir):
return make_response(render_template(dir))
return '<h1>400 BAD REQUEST'
@application.route('/doUpdate')
def updateData():
with open('static/notUpdating/iscrittiAteneo.csv', newline='') as f:
reader = csv.reader(f)
data = list(reader)[1:]
iscrittiAteneo = {'Venezia CF': [], 'Verona': [], 'Venezia IUAV': [
], 'Padova': []}
for row in data:
row = row[0].split(';')
if row[1] == 'Padova' or 'Venezia C' in row[1] or row[1
] == 'Venezia Iuav' or row[1] == 'Verona':
tmp = row[1]
if 'Venezia C' in row[1]:
tmp = 'Venezia CF'
if tmp == 'Venezia Iuav':
tmp = 'Venezia IUAV'
iscrittiAteneo[tmp].append(row[0] + ';' + row[3] + ';' + row[4]
)
iscrittiAteneoJson = json.dumps(iscrittiAteneo)
open('static/jsons/iscrittiAteneo.json', 'wb').write(iscrittiAteneoJson
.encode())
with open('static/notUpdating/iscrittiEmig.json', newline='') as f:
reader = json.load(f)
iscrittiEmig = {'vicenza': [], 'verona': [], 'venezia': [],
'padova': [], 'treviso': [], 'belluno': [], 'rovigo': []}
for row in reader['records']:
if row[4].lower() == 'padova' or row[4].lower(
) == 'vicenza' or row[4].lower() == 'venezia' or row[4].lower(
) == 'verona' or row[4].lower() == 'treviso' or row[4].lower(
) == 'belluno' or row[4].lower() == 'rovigo':
iscrittiEmig[row[4].lower()].append(row[1] + ';' + row[4] +
';' + row[2] + ';' + str(row[6]))
lista = {'vicenza': [], 'verona': [], 'venezia': [], 'padova': [],
'treviso': [], 'belluno': [], 'rovigo': []}
count = 0
for key in iscrittiEmig.keys():
while len(iscrittiEmig[key]) > 2:
tmp = iscrittiEmig[key].pop(0).split(';')
if count == 0:
count = int(tmp[3])
tmp2 = iscrittiEmig[key][0].split(';')[2]
if tmp[2] == tmp2:
count += int(tmp[3])
else:
lista[tmp[1].lower()].append(tmp[0] + ';' + tmp[2] +
';' + str(count))
count = 0
iscrittiEmigJson = json.dumps(lista)
open('static/jsons/iscrittiEmig.json', 'wb').write(iscrittiEmigJson.
encode())
with open('static/notUpdating/retribuzioneMedia.csv', newline='') as f:
reader = csv.reader(f)
data = list(reader)[1:]
retribuzione = {'Vicenza': [], 'Verona': [], 'Venezia': [],
'Padova': [], 'Treviso': [], 'Belluno': [], 'Rovigo': []}
for row in data:
if (row[1] == 'Padova' or row[1] == 'Vicenza' or row[1] ==
'Venezia' or row[1] == 'Verona' or row[1] == 'Treviso' or
row[1] == 'Belluno' or row[1] == 'Rovigo') and row[5
] != 'totale' and 'media)' in row[3]:
tmp = row[5]
if 'nessun' in tmp:
tmp = 'nessuno'
retribuzione[row[1]].append(tmp + ';' + str(row[8]))
retribuzioneMediaJson = json.dumps(retribuzione)
open('static/jsons/retribuzioneMedia.json', 'wb').write(
retribuzioneMediaJson.encode())
with open('static/notUpdating/taxDisocc.csv', newline='') as f:
reader = csv.reader(f)
data = list(reader)[1:]
lavoro = {'Vicenza': [], 'Verona': [], 'Venezia': [], 'Padova': [],
'Treviso': [], 'Belluno': [], 'Rovigo': []}
for row in data:
if row[7] == '15-24 anni' and row[5] != 'totale':
if row[5] == 'femmine':
lavoro[row[1]].append(str(row[10]))
else:
lavoro[row[1]].append(str(row[8]) + ';' + str(row[10]))
for key in lavoro.keys():
tmp = lavoro[key][0] + ';' + lavoro[key][2]
tmp2 = lavoro[key][1] + ';' + lavoro[key][3]
lavoro[key].clear()
lavoro[key].append(tmp)
lavoro[key].append(tmp2)
disoccupazioneJson = json.dumps(lavoro)
open('static/jsons/disoccupazione.json', 'wb').write(disoccupazioneJson
.encode())
iscritti = requests.get(
'http://dati.ustat.miur.it/dataset/3dd9ca7f-9cc9-4a1a-915c-e569b181dbd5/resource/eae4ee94-0797-41d2-b007-bc6dad3ef3e2/download/iscrittixresidenza.csv'
, allow_redirects=True)
open('static/iscrittiProvincia.csv', 'wb').write(iscritti.content)
with open('static/iscrittiProvincia.csv', newline='') as f:
reader = csv.reader(f)
data = list(reader)[1:]
iscrittiProvincia = {'vicenza': [], 'verona': [], 'venezia': [],
'padova': [], 'treviso': [], 'belluno': [], 'rovigo': []}
for row in data:
row = row[0].split(';')
if row[2].lower() == 'padova' or row[2].lower(
) == 'vicenza' or row[2].lower() == 'venezia' or row[2].lower(
) == 'verona' or row[2].lower() == 'treviso' or row[2].lower(
) == 'belluno' or row[2].lower() == 'rovigo':
iscrittiProvincia[row[2].lower()].append(str(row[0]) + ';' +
str(int(row[3]) + int(row[4])))
iscrittiProvinciaJson = json.dumps(iscrittiProvincia)
open('static/jsons/iscrittiProvincia.json', 'wb').write(
iscrittiProvinciaJson.encode())
return '200'
updateData()
if __name__ == '__main__':
application.run(debug=True, port=80)
<|reserved_special_token_1|>
from flask import Flask, render_template, jsonify, request, make_response #BSD License
import requests #Apache 2.0
#StdLibs
import json
from os import path
import csv
###################################################
#Programmato da Alex Prosdocimo e Matteo Mirandola#
###################################################
application = Flask(__name__)
@application.route("/") # Index
def index():
return make_response(render_template("index.html"))
@application.route("/getGraph", methods=["POST", "GET"])
def getgraph():
#Metodo POST: responsabile di ottnere i dati in formato json dal server.
#Il server si aspetta un campo data che contenga il nome di un file esistente nel server nella cartella /static/json/
#Se non trova il file da un 404
#Se non trova il campo data da un 400
if request.method == "POST":
if('data' in request.form):
if(path.exists("static/jsons/" + request.form['data'] + ".json")):
with open("static/jsons/" + request.form['data'] + ".json", "r") as file:
jsonStr = file.read()
jsonStr = json.loads(jsonStr)
return jsonify(jsonStr)
else:
return "<h1>404 NOT FOUND"
else:
return "<h1>400 BAD REQUEST"
else:
#Metodo GET:
#si aspetta un campo graph che contenga uno dei nomi sotto presenti
#nel caso di mf e emig si aspetta anche un secondo campo che specifichi
#l'università o la provincia-
#Inoltre, iscrittiAtn e mf POSSONO (ma non devono necessariamente) avere
#un campo aggiuntivo che filtri i dati di uno specifico anno o per uno specifico sesso2
if 'graph' in request.args:
# HBar Graph per la paga oraria provinciale a seconda del livello di istruzione
if(request.args['graph'] == "pagaOra"):
return make_response(render_template("graphs/pagaOra.html"))
# Line Graph per gli iscritti alle università nel veneto per anno
elif(request.args['graph'] == "iscrittiAtn"):
if('sex' in request.args):
return make_response(render_template("graphs/iscrittiAtn.html", sex=int(request.args['sex'])))
else:
return make_response(render_template("graphs/iscrittiAtn.html", sex=0))
elif(request.args['graph'] == "disoccupati"):
return make_response(render_template("graphs/disoccupatiGraph.html"))
elif(request.args['graph'] == "iscrittiProv"):
return make_response(render_template("graphs/iscrittiProv.html"))
# Donut Graph per la distribuzione di m/f nelle università in veneto
elif(request.args['graph'] == "mf" and 'atn' in request.args):
dir = "graphs/mf/mf" + request.args['atn'] + ".html"
print(dir)
if(path.exists("templates/" + dir)):
if('year' in request.args):
return make_response(render_template(dir, year=int(request.args['year'])))
else:
return make_response(render_template(dir, year=0))
# Polar Area Graph per gli studenti emigrati in altre regioni
elif(request.args['graph'] == "emig" and "prov" in request.args):
dir = "graphs/emig/iscrittiEmig" + \
request.args['prov'] + ".html"
if(path.exists("templates/" + dir)):
return make_response(render_template(dir))
return "<h1>400 BAD REQUEST"
#Per aggiornare i dataset:
#A causa di un errore nella creazione del file riguardante gli iscritti per ogni ateneo da parte del MIUR il file
#riguardante gli iscritti per ateneo non sono scaricabili dinamicamente e va sostituito manualmente.
#Allo stesso modo, i dati ottenuti tramite l'istat non sono scaricabili dinamicamente tramite la api in quanto
#le sue prestazioni sono limitate (oltre a non permettere i filtri necessari per ottenere i file).
#Il dataset delle provincie viene aggiornato automaticamente ogni settimana. Gli altri vanno sostituiti manualmente.
#I dataset statici vanno inseriti nella cartella /static/notUpdating/
#Il dataset riguardante gli iscritti per ateneo va scaricato a questo link http://dati.ustat.miur.it/dataset/3dd9ca7f-9cc9-4a1a-915c-e569b181dbd5/resource/32d26e28-a0b5-45f3-9152-6072164f3e63/download/iscrittixateneo.csv
#e rinominato iscrittiAteneo.csv
#Il dataset riguardante gli iscritti emigrati dalla regione è stato creato manualmente a partire da altri dati e non può essere aggiornato
#I dataset riguardanti la percentuale di disoccupazione e la retribuzione oraria media sono reperibili a questo portale http://dati.istat.it/
#Sfortunatamente la funzione di ricerca del sito è molto lenta e limitata, comunque sia i due data set sono "Tasso di Disoccupazione - Dati Provinciali"
#e "Retribuzione oraria media per titolo di studio". In entrambi i casi, è necessario filtrare i risultati per le sole provincie del Veneto.
#I file vanno rinominati retribuzioneMedia.csv e taxDisocc.csv
#Fortunatamente, si aggiornano solo annualmente
@application.route("/doUpdate")
def updateData():
#File iscritti per ateneo
#I dati vengono inseriti in un dizionario come array, il formato è più sotto
with open('static/notUpdating/iscrittiAteneo.csv', newline='') as f: #Qui si può cambiare il nome del file se necessario, basta che sia in formato csv corretto
reader = csv.reader(f)
data = list(reader)[1:]
iscrittiAteneo = {
'Venezia CF': [],
'Verona': [],
'Venezia IUAV': [],
'Padova': []}
for row in data:
row = row[0].split(';')
if row[1] == 'Padova' or 'Venezia C' in row[1] or row[1] == 'Venezia Iuav' or row[1] == 'Verona':
tmp = row[1]
if 'Venezia C' in row[1]:
tmp = 'Venezia CF'
if tmp == 'Venezia Iuav':
tmp = 'Venezia IUAV'
iscrittiAteneo[tmp].append(
row[0] + ';' + row[3] + ';' + row[4])
iscrittiAteneoJson = json.dumps(iscrittiAteneo)
# Formato: {"nomeAteneo" : ["annoScolastico;numeroIscrittiMaschi;numeroIscrittiFemmine",...,...],...,...}
open('static/jsons/iscrittiAteneo.json',
"wb").write(iscrittiAteneoJson.encode())
# File iscritti emigrati in altre regioni
with open('static/notUpdating/iscrittiEmig.json', newline='') as f: #Qui si può cambiare il nome del file se necessario, basta che sia in formato csv corretto
reader = json.load(f)
iscrittiEmig = {
'vicenza': [],
'verona': [],
'venezia': [],
'padova': [],
'treviso': [],
'belluno': [],
'rovigo': []}
for row in reader['records']:
if row[4].lower() == 'padova' or row[4].lower() == 'vicenza' or row[4].lower() == 'venezia' or row[4].lower() == 'verona' or row[4].lower() == 'treviso' or row[4].lower() == 'belluno' or row[4].lower() == 'rovigo':
iscrittiEmig[row[4].lower()].append(
row[1] + ';' + row[4] + ';' + row[2] + ';' + str(row[6]))
lista = {
'vicenza': [],
'verona': [],
'venezia': [],
'padova': [],
'treviso': [],
'belluno': [],
'rovigo': []
}
count = 0
for key in iscrittiEmig.keys():
while len(iscrittiEmig[key]) > 2:
tmp = iscrittiEmig[key].pop(0).split(';')
if count == 0:
count = int(tmp[3])
tmp2 = iscrittiEmig[key][0].split(';')[2]
if tmp[2] == tmp2:
count += int(tmp[3])
else:
lista[tmp[1].lower()].append(
tmp[0] + ';' + tmp[2] + ';' + str(count))
count = 0
iscrittiEmigJson = json.dumps(lista)
# Formato: {"cittàInMinuscolo" : ["annoScolastico;CittàDiProvenienzaInMaiuscolo;RegioneDiEsodo;NumeroStudenti",...,...],...,...}
open('static/jsons/iscrittiEmig.json',
"wb").write(iscrittiEmigJson.encode())
# File paga media oraria per titolo di studio
with open('static/notUpdating/retribuzioneMedia.csv', newline='') as f:
reader = csv.reader(f)
data = list(reader)[1:]
retribuzione = {
'Vicenza': [],
'Verona': [],
'Venezia': [],
'Padova': [],
'Treviso': [],
'Belluno': [],
'Rovigo': []}
for row in data:
if (row[1] == 'Padova' or row[1] == 'Vicenza' or row[1] == 'Venezia' or row[1] == 'Verona' or row[1] == 'Treviso' or row[1] == 'Belluno' or row[1] == 'Rovigo') and (row[5] != 'totale') and 'media)' in row[3]:
# La lista è divisa in titolo di studio, reddito medio orario
tmp = row[5]
if 'nessun' in tmp:
tmp = 'nessuno'
retribuzione[row[1]].append(tmp + ';' + str(row[8]))
retribuzioneMediaJson = json.dumps(retribuzione)
# Formato: {"nomeCittà" : ["laurea;media", "diploma;media", "nulla;media"],...,...}
open('static/jsons/retribuzioneMedia.json',
"wb").write(retribuzioneMediaJson.encode())
# File %disoccupazione
with open('static/notUpdating/taxDisocc.csv', newline='') as f: #Qui si può cambiare il nome del file se necessario, basta che sia in formato csv corretto
reader = csv.reader(f)
data = list(reader)[1:]
lavoro = {
'Vicenza': [],
'Verona': [],
'Venezia': [],
'Padova': [],
'Treviso': [],
'Belluno': [],
'Rovigo': []}
for row in data:
if (row[7] == '15-24 anni') and row[5] != 'totale':
if row[5] == 'femmine':
lavoro[row[1]].append(str(row[10]))
else:
lavoro[row[1]].append(str(row[8]) + ';' + str(row[10]))
for key in lavoro.keys():
tmp = lavoro[key][0] + ';' + lavoro[key][2]
tmp2 = lavoro[key][1] + ';' + lavoro[key][3]
lavoro[key].clear()
lavoro[key].append(tmp)
lavoro[key].append(tmp2)
disoccupazioneJson = json.dumps(lavoro)
# Formato: {"nomeCittà" : ["anno;percMaschi;percFemmine","anno;percMaschi;percFemmine"x],...,...}
open('static/jsons/disoccupazione.json',
"wb").write(disoccupazioneJson.encode())
# File iscritti totali per provincia
iscritti = requests.get(
'http://dati.ustat.miur.it/dataset/3dd9ca7f-9cc9-4a1a-915c-e569b181dbd5/resource/eae4ee94-0797-41d2-b007-bc6dad3ef3e2/download/iscrittixresidenza.csv', allow_redirects=True)
open('static/iscrittiProvincia.csv', 'wb').write(iscritti.content) #Qui si può cambiare il nome del file se necessario, basta che sia in formato csv corretto
with open('static/iscrittiProvincia.csv', newline='') as f:
reader = csv.reader(f)
data = list(reader)[1:]
iscrittiProvincia = {
'vicenza': [],
'verona': [],
'venezia': [],
'padova': [],
'treviso': [],
'belluno': [],
'rovigo': []}
for row in data:
row = row[0].split(';')
if row[2].lower() == 'padova' or row[2].lower() == 'vicenza' or row[2].lower() == 'venezia' or row[2].lower() == 'verona' or row[2].lower() == 'treviso' or row[2].lower() == 'belluno' or row[2].lower() == 'rovigo':
iscrittiProvincia[row[2].lower()].append(
str(row[0]) + ';' + str(int(row[3])+int(row[4])))
iscrittiProvinciaJson = json.dumps(iscrittiProvincia)
# Formato: {"nomeCittà" : ["anno;numero"],...,...}
open('static/jsons/iscrittiProvincia.json',
"wb").write(iscrittiProvinciaJson.encode())
return "200"
#########
#Startup#
#########
#Ad ogni riavvio forzato dell'applicazione, i dati vengono aggiornati (ci impiega qualche secondo al maassimo)
updateData()
if __name__ == '__main__':
application.run(debug=True, port=80)
|
flexible
|
{
"blob_id": "14b9927435536a4b29b0930791ab4525acd80bc9",
"index": 5783,
"step-1": "<mask token>\n\n\n@application.route('/')\ndef index():\n return make_response(render_template('index.html'))\n\n\n@application.route('/getGraph', methods=['POST', 'GET'])\ndef getgraph():\n if request.method == 'POST':\n if 'data' in request.form:\n if path.exists('static/jsons/' + request.form['data'] + '.json'):\n with open('static/jsons/' + request.form['data'] + '.json', 'r'\n ) as file:\n jsonStr = file.read()\n jsonStr = json.loads(jsonStr)\n return jsonify(jsonStr)\n else:\n return '<h1>404 NOT FOUND'\n else:\n return '<h1>400 BAD REQUEST'\n elif 'graph' in request.args:\n if request.args['graph'] == 'pagaOra':\n return make_response(render_template('graphs/pagaOra.html'))\n elif request.args['graph'] == 'iscrittiAtn':\n if 'sex' in request.args:\n return make_response(render_template(\n 'graphs/iscrittiAtn.html', sex=int(request.args['sex'])))\n else:\n return make_response(render_template(\n 'graphs/iscrittiAtn.html', sex=0))\n elif request.args['graph'] == 'disoccupati':\n return make_response(render_template(\n 'graphs/disoccupatiGraph.html'))\n elif request.args['graph'] == 'iscrittiProv':\n return make_response(render_template('graphs/iscrittiProv.html'))\n elif request.args['graph'] == 'mf' and 'atn' in request.args:\n dir = 'graphs/mf/mf' + request.args['atn'] + '.html'\n print(dir)\n if path.exists('templates/' + dir):\n if 'year' in request.args:\n return make_response(render_template(dir, year=int(\n request.args['year'])))\n else:\n return make_response(render_template(dir, year=0))\n elif request.args['graph'] == 'emig' and 'prov' in request.args:\n dir = 'graphs/emig/iscrittiEmig' + request.args['prov'] + '.html'\n if path.exists('templates/' + dir):\n return make_response(render_template(dir))\n return '<h1>400 BAD REQUEST'\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\n@application.route('/')\ndef index():\n return make_response(render_template('index.html'))\n\n\n@application.route('/getGraph', methods=['POST', 'GET'])\ndef getgraph():\n if request.method == 'POST':\n if 'data' in request.form:\n if path.exists('static/jsons/' + request.form['data'] + '.json'):\n with open('static/jsons/' + request.form['data'] + '.json', 'r'\n ) as file:\n jsonStr = file.read()\n jsonStr = json.loads(jsonStr)\n return jsonify(jsonStr)\n else:\n return '<h1>404 NOT FOUND'\n else:\n return '<h1>400 BAD REQUEST'\n elif 'graph' in request.args:\n if request.args['graph'] == 'pagaOra':\n return make_response(render_template('graphs/pagaOra.html'))\n elif request.args['graph'] == 'iscrittiAtn':\n if 'sex' in request.args:\n return make_response(render_template(\n 'graphs/iscrittiAtn.html', sex=int(request.args['sex'])))\n else:\n return make_response(render_template(\n 'graphs/iscrittiAtn.html', sex=0))\n elif request.args['graph'] == 'disoccupati':\n return make_response(render_template(\n 'graphs/disoccupatiGraph.html'))\n elif request.args['graph'] == 'iscrittiProv':\n return make_response(render_template('graphs/iscrittiProv.html'))\n elif request.args['graph'] == 'mf' and 'atn' in request.args:\n dir = 'graphs/mf/mf' + request.args['atn'] + '.html'\n print(dir)\n if path.exists('templates/' + dir):\n if 'year' in request.args:\n return make_response(render_template(dir, year=int(\n request.args['year'])))\n else:\n return make_response(render_template(dir, year=0))\n elif request.args['graph'] == 'emig' and 'prov' in request.args:\n dir = 'graphs/emig/iscrittiEmig' + request.args['prov'] + '.html'\n if path.exists('templates/' + dir):\n return make_response(render_template(dir))\n return '<h1>400 BAD REQUEST'\n\n\n@application.route('/doUpdate')\ndef updateData():\n with open('static/notUpdating/iscrittiAteneo.csv', newline='') as f:\n reader = csv.reader(f)\n data = list(reader)[1:]\n iscrittiAteneo = {'Venezia CF': [], 'Verona': [], 'Venezia IUAV': [\n ], 'Padova': []}\n for row in data:\n row = row[0].split(';')\n if row[1] == 'Padova' or 'Venezia C' in row[1] or row[1\n ] == 'Venezia Iuav' or row[1] == 'Verona':\n tmp = row[1]\n if 'Venezia C' in row[1]:\n tmp = 'Venezia CF'\n if tmp == 'Venezia Iuav':\n tmp = 'Venezia IUAV'\n iscrittiAteneo[tmp].append(row[0] + ';' + row[3] + ';' + row[4]\n )\n iscrittiAteneoJson = json.dumps(iscrittiAteneo)\n open('static/jsons/iscrittiAteneo.json', 'wb').write(iscrittiAteneoJson\n .encode())\n with open('static/notUpdating/iscrittiEmig.json', newline='') as f:\n reader = json.load(f)\n iscrittiEmig = {'vicenza': [], 'verona': [], 'venezia': [],\n 'padova': [], 'treviso': [], 'belluno': [], 'rovigo': []}\n for row in reader['records']:\n if row[4].lower() == 'padova' or row[4].lower(\n ) == 'vicenza' or row[4].lower() == 'venezia' or row[4].lower(\n ) == 'verona' or row[4].lower() == 'treviso' or row[4].lower(\n ) == 'belluno' or row[4].lower() == 'rovigo':\n iscrittiEmig[row[4].lower()].append(row[1] + ';' + row[4] +\n ';' + row[2] + ';' + str(row[6]))\n lista = {'vicenza': [], 'verona': [], 'venezia': [], 'padova': [],\n 'treviso': [], 'belluno': [], 'rovigo': []}\n count = 0\n for key in iscrittiEmig.keys():\n while len(iscrittiEmig[key]) > 2:\n tmp = iscrittiEmig[key].pop(0).split(';')\n if count == 0:\n count = int(tmp[3])\n tmp2 = iscrittiEmig[key][0].split(';')[2]\n if tmp[2] == tmp2:\n count += int(tmp[3])\n else:\n lista[tmp[1].lower()].append(tmp[0] + ';' + tmp[2] +\n ';' + str(count))\n count = 0\n iscrittiEmigJson = json.dumps(lista)\n open('static/jsons/iscrittiEmig.json', 'wb').write(iscrittiEmigJson.\n encode())\n with open('static/notUpdating/retribuzioneMedia.csv', newline='') as f:\n reader = csv.reader(f)\n data = list(reader)[1:]\n retribuzione = {'Vicenza': [], 'Verona': [], 'Venezia': [],\n 'Padova': [], 'Treviso': [], 'Belluno': [], 'Rovigo': []}\n for row in data:\n if (row[1] == 'Padova' or row[1] == 'Vicenza' or row[1] ==\n 'Venezia' or row[1] == 'Verona' or row[1] == 'Treviso' or \n row[1] == 'Belluno' or row[1] == 'Rovigo') and row[5\n ] != 'totale' and 'media)' in row[3]:\n tmp = row[5]\n if 'nessun' in tmp:\n tmp = 'nessuno'\n retribuzione[row[1]].append(tmp + ';' + str(row[8]))\n retribuzioneMediaJson = json.dumps(retribuzione)\n open('static/jsons/retribuzioneMedia.json', 'wb').write(\n retribuzioneMediaJson.encode())\n with open('static/notUpdating/taxDisocc.csv', newline='') as f:\n reader = csv.reader(f)\n data = list(reader)[1:]\n lavoro = {'Vicenza': [], 'Verona': [], 'Venezia': [], 'Padova': [],\n 'Treviso': [], 'Belluno': [], 'Rovigo': []}\n for row in data:\n if row[7] == '15-24 anni' and row[5] != 'totale':\n if row[5] == 'femmine':\n lavoro[row[1]].append(str(row[10]))\n else:\n lavoro[row[1]].append(str(row[8]) + ';' + str(row[10]))\n for key in lavoro.keys():\n tmp = lavoro[key][0] + ';' + lavoro[key][2]\n tmp2 = lavoro[key][1] + ';' + lavoro[key][3]\n lavoro[key].clear()\n lavoro[key].append(tmp)\n lavoro[key].append(tmp2)\n disoccupazioneJson = json.dumps(lavoro)\n open('static/jsons/disoccupazione.json', 'wb').write(disoccupazioneJson\n .encode())\n iscritti = requests.get(\n 'http://dati.ustat.miur.it/dataset/3dd9ca7f-9cc9-4a1a-915c-e569b181dbd5/resource/eae4ee94-0797-41d2-b007-bc6dad3ef3e2/download/iscrittixresidenza.csv'\n , allow_redirects=True)\n open('static/iscrittiProvincia.csv', 'wb').write(iscritti.content)\n with open('static/iscrittiProvincia.csv', newline='') as f:\n reader = csv.reader(f)\n data = list(reader)[1:]\n iscrittiProvincia = {'vicenza': [], 'verona': [], 'venezia': [],\n 'padova': [], 'treviso': [], 'belluno': [], 'rovigo': []}\n for row in data:\n row = row[0].split(';')\n if row[2].lower() == 'padova' or row[2].lower(\n ) == 'vicenza' or row[2].lower() == 'venezia' or row[2].lower(\n ) == 'verona' or row[2].lower() == 'treviso' or row[2].lower(\n ) == 'belluno' or row[2].lower() == 'rovigo':\n iscrittiProvincia[row[2].lower()].append(str(row[0]) + ';' +\n str(int(row[3]) + int(row[4])))\n iscrittiProvinciaJson = json.dumps(iscrittiProvincia)\n open('static/jsons/iscrittiProvincia.json', 'wb').write(\n iscrittiProvinciaJson.encode())\n return '200'\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\n@application.route('/')\ndef index():\n return make_response(render_template('index.html'))\n\n\n@application.route('/getGraph', methods=['POST', 'GET'])\ndef getgraph():\n if request.method == 'POST':\n if 'data' in request.form:\n if path.exists('static/jsons/' + request.form['data'] + '.json'):\n with open('static/jsons/' + request.form['data'] + '.json', 'r'\n ) as file:\n jsonStr = file.read()\n jsonStr = json.loads(jsonStr)\n return jsonify(jsonStr)\n else:\n return '<h1>404 NOT FOUND'\n else:\n return '<h1>400 BAD REQUEST'\n elif 'graph' in request.args:\n if request.args['graph'] == 'pagaOra':\n return make_response(render_template('graphs/pagaOra.html'))\n elif request.args['graph'] == 'iscrittiAtn':\n if 'sex' in request.args:\n return make_response(render_template(\n 'graphs/iscrittiAtn.html', sex=int(request.args['sex'])))\n else:\n return make_response(render_template(\n 'graphs/iscrittiAtn.html', sex=0))\n elif request.args['graph'] == 'disoccupati':\n return make_response(render_template(\n 'graphs/disoccupatiGraph.html'))\n elif request.args['graph'] == 'iscrittiProv':\n return make_response(render_template('graphs/iscrittiProv.html'))\n elif request.args['graph'] == 'mf' and 'atn' in request.args:\n dir = 'graphs/mf/mf' + request.args['atn'] + '.html'\n print(dir)\n if path.exists('templates/' + dir):\n if 'year' in request.args:\n return make_response(render_template(dir, year=int(\n request.args['year'])))\n else:\n return make_response(render_template(dir, year=0))\n elif request.args['graph'] == 'emig' and 'prov' in request.args:\n dir = 'graphs/emig/iscrittiEmig' + request.args['prov'] + '.html'\n if path.exists('templates/' + dir):\n return make_response(render_template(dir))\n return '<h1>400 BAD REQUEST'\n\n\n@application.route('/doUpdate')\ndef updateData():\n with open('static/notUpdating/iscrittiAteneo.csv', newline='') as f:\n reader = csv.reader(f)\n data = list(reader)[1:]\n iscrittiAteneo = {'Venezia CF': [], 'Verona': [], 'Venezia IUAV': [\n ], 'Padova': []}\n for row in data:\n row = row[0].split(';')\n if row[1] == 'Padova' or 'Venezia C' in row[1] or row[1\n ] == 'Venezia Iuav' or row[1] == 'Verona':\n tmp = row[1]\n if 'Venezia C' in row[1]:\n tmp = 'Venezia CF'\n if tmp == 'Venezia Iuav':\n tmp = 'Venezia IUAV'\n iscrittiAteneo[tmp].append(row[0] + ';' + row[3] + ';' + row[4]\n )\n iscrittiAteneoJson = json.dumps(iscrittiAteneo)\n open('static/jsons/iscrittiAteneo.json', 'wb').write(iscrittiAteneoJson\n .encode())\n with open('static/notUpdating/iscrittiEmig.json', newline='') as f:\n reader = json.load(f)\n iscrittiEmig = {'vicenza': [], 'verona': [], 'venezia': [],\n 'padova': [], 'treviso': [], 'belluno': [], 'rovigo': []}\n for row in reader['records']:\n if row[4].lower() == 'padova' or row[4].lower(\n ) == 'vicenza' or row[4].lower() == 'venezia' or row[4].lower(\n ) == 'verona' or row[4].lower() == 'treviso' or row[4].lower(\n ) == 'belluno' or row[4].lower() == 'rovigo':\n iscrittiEmig[row[4].lower()].append(row[1] + ';' + row[4] +\n ';' + row[2] + ';' + str(row[6]))\n lista = {'vicenza': [], 'verona': [], 'venezia': [], 'padova': [],\n 'treviso': [], 'belluno': [], 'rovigo': []}\n count = 0\n for key in iscrittiEmig.keys():\n while len(iscrittiEmig[key]) > 2:\n tmp = iscrittiEmig[key].pop(0).split(';')\n if count == 0:\n count = int(tmp[3])\n tmp2 = iscrittiEmig[key][0].split(';')[2]\n if tmp[2] == tmp2:\n count += int(tmp[3])\n else:\n lista[tmp[1].lower()].append(tmp[0] + ';' + tmp[2] +\n ';' + str(count))\n count = 0\n iscrittiEmigJson = json.dumps(lista)\n open('static/jsons/iscrittiEmig.json', 'wb').write(iscrittiEmigJson.\n encode())\n with open('static/notUpdating/retribuzioneMedia.csv', newline='') as f:\n reader = csv.reader(f)\n data = list(reader)[1:]\n retribuzione = {'Vicenza': [], 'Verona': [], 'Venezia': [],\n 'Padova': [], 'Treviso': [], 'Belluno': [], 'Rovigo': []}\n for row in data:\n if (row[1] == 'Padova' or row[1] == 'Vicenza' or row[1] ==\n 'Venezia' or row[1] == 'Verona' or row[1] == 'Treviso' or \n row[1] == 'Belluno' or row[1] == 'Rovigo') and row[5\n ] != 'totale' and 'media)' in row[3]:\n tmp = row[5]\n if 'nessun' in tmp:\n tmp = 'nessuno'\n retribuzione[row[1]].append(tmp + ';' + str(row[8]))\n retribuzioneMediaJson = json.dumps(retribuzione)\n open('static/jsons/retribuzioneMedia.json', 'wb').write(\n retribuzioneMediaJson.encode())\n with open('static/notUpdating/taxDisocc.csv', newline='') as f:\n reader = csv.reader(f)\n data = list(reader)[1:]\n lavoro = {'Vicenza': [], 'Verona': [], 'Venezia': [], 'Padova': [],\n 'Treviso': [], 'Belluno': [], 'Rovigo': []}\n for row in data:\n if row[7] == '15-24 anni' and row[5] != 'totale':\n if row[5] == 'femmine':\n lavoro[row[1]].append(str(row[10]))\n else:\n lavoro[row[1]].append(str(row[8]) + ';' + str(row[10]))\n for key in lavoro.keys():\n tmp = lavoro[key][0] + ';' + lavoro[key][2]\n tmp2 = lavoro[key][1] + ';' + lavoro[key][3]\n lavoro[key].clear()\n lavoro[key].append(tmp)\n lavoro[key].append(tmp2)\n disoccupazioneJson = json.dumps(lavoro)\n open('static/jsons/disoccupazione.json', 'wb').write(disoccupazioneJson\n .encode())\n iscritti = requests.get(\n 'http://dati.ustat.miur.it/dataset/3dd9ca7f-9cc9-4a1a-915c-e569b181dbd5/resource/eae4ee94-0797-41d2-b007-bc6dad3ef3e2/download/iscrittixresidenza.csv'\n , allow_redirects=True)\n open('static/iscrittiProvincia.csv', 'wb').write(iscritti.content)\n with open('static/iscrittiProvincia.csv', newline='') as f:\n reader = csv.reader(f)\n data = list(reader)[1:]\n iscrittiProvincia = {'vicenza': [], 'verona': [], 'venezia': [],\n 'padova': [], 'treviso': [], 'belluno': [], 'rovigo': []}\n for row in data:\n row = row[0].split(';')\n if row[2].lower() == 'padova' or row[2].lower(\n ) == 'vicenza' or row[2].lower() == 'venezia' or row[2].lower(\n ) == 'verona' or row[2].lower() == 'treviso' or row[2].lower(\n ) == 'belluno' or row[2].lower() == 'rovigo':\n iscrittiProvincia[row[2].lower()].append(str(row[0]) + ';' +\n str(int(row[3]) + int(row[4])))\n iscrittiProvinciaJson = json.dumps(iscrittiProvincia)\n open('static/jsons/iscrittiProvincia.json', 'wb').write(\n iscrittiProvinciaJson.encode())\n return '200'\n\n\nupdateData()\nif __name__ == '__main__':\n application.run(debug=True, port=80)\n",
"step-4": "from flask import Flask, render_template, jsonify, request, make_response\nimport requests\nimport json\nfrom os import path\nimport csv\napplication = Flask(__name__)\n\n\n@application.route('/')\ndef index():\n return make_response(render_template('index.html'))\n\n\n@application.route('/getGraph', methods=['POST', 'GET'])\ndef getgraph():\n if request.method == 'POST':\n if 'data' in request.form:\n if path.exists('static/jsons/' + request.form['data'] + '.json'):\n with open('static/jsons/' + request.form['data'] + '.json', 'r'\n ) as file:\n jsonStr = file.read()\n jsonStr = json.loads(jsonStr)\n return jsonify(jsonStr)\n else:\n return '<h1>404 NOT FOUND'\n else:\n return '<h1>400 BAD REQUEST'\n elif 'graph' in request.args:\n if request.args['graph'] == 'pagaOra':\n return make_response(render_template('graphs/pagaOra.html'))\n elif request.args['graph'] == 'iscrittiAtn':\n if 'sex' in request.args:\n return make_response(render_template(\n 'graphs/iscrittiAtn.html', sex=int(request.args['sex'])))\n else:\n return make_response(render_template(\n 'graphs/iscrittiAtn.html', sex=0))\n elif request.args['graph'] == 'disoccupati':\n return make_response(render_template(\n 'graphs/disoccupatiGraph.html'))\n elif request.args['graph'] == 'iscrittiProv':\n return make_response(render_template('graphs/iscrittiProv.html'))\n elif request.args['graph'] == 'mf' and 'atn' in request.args:\n dir = 'graphs/mf/mf' + request.args['atn'] + '.html'\n print(dir)\n if path.exists('templates/' + dir):\n if 'year' in request.args:\n return make_response(render_template(dir, year=int(\n request.args['year'])))\n else:\n return make_response(render_template(dir, year=0))\n elif request.args['graph'] == 'emig' and 'prov' in request.args:\n dir = 'graphs/emig/iscrittiEmig' + request.args['prov'] + '.html'\n if path.exists('templates/' + dir):\n return make_response(render_template(dir))\n return '<h1>400 BAD REQUEST'\n\n\n@application.route('/doUpdate')\ndef updateData():\n with open('static/notUpdating/iscrittiAteneo.csv', newline='') as f:\n reader = csv.reader(f)\n data = list(reader)[1:]\n iscrittiAteneo = {'Venezia CF': [], 'Verona': [], 'Venezia IUAV': [\n ], 'Padova': []}\n for row in data:\n row = row[0].split(';')\n if row[1] == 'Padova' or 'Venezia C' in row[1] or row[1\n ] == 'Venezia Iuav' or row[1] == 'Verona':\n tmp = row[1]\n if 'Venezia C' in row[1]:\n tmp = 'Venezia CF'\n if tmp == 'Venezia Iuav':\n tmp = 'Venezia IUAV'\n iscrittiAteneo[tmp].append(row[0] + ';' + row[3] + ';' + row[4]\n )\n iscrittiAteneoJson = json.dumps(iscrittiAteneo)\n open('static/jsons/iscrittiAteneo.json', 'wb').write(iscrittiAteneoJson\n .encode())\n with open('static/notUpdating/iscrittiEmig.json', newline='') as f:\n reader = json.load(f)\n iscrittiEmig = {'vicenza': [], 'verona': [], 'venezia': [],\n 'padova': [], 'treviso': [], 'belluno': [], 'rovigo': []}\n for row in reader['records']:\n if row[4].lower() == 'padova' or row[4].lower(\n ) == 'vicenza' or row[4].lower() == 'venezia' or row[4].lower(\n ) == 'verona' or row[4].lower() == 'treviso' or row[4].lower(\n ) == 'belluno' or row[4].lower() == 'rovigo':\n iscrittiEmig[row[4].lower()].append(row[1] + ';' + row[4] +\n ';' + row[2] + ';' + str(row[6]))\n lista = {'vicenza': [], 'verona': [], 'venezia': [], 'padova': [],\n 'treviso': [], 'belluno': [], 'rovigo': []}\n count = 0\n for key in iscrittiEmig.keys():\n while len(iscrittiEmig[key]) > 2:\n tmp = iscrittiEmig[key].pop(0).split(';')\n if count == 0:\n count = int(tmp[3])\n tmp2 = iscrittiEmig[key][0].split(';')[2]\n if tmp[2] == tmp2:\n count += int(tmp[3])\n else:\n lista[tmp[1].lower()].append(tmp[0] + ';' + tmp[2] +\n ';' + str(count))\n count = 0\n iscrittiEmigJson = json.dumps(lista)\n open('static/jsons/iscrittiEmig.json', 'wb').write(iscrittiEmigJson.\n encode())\n with open('static/notUpdating/retribuzioneMedia.csv', newline='') as f:\n reader = csv.reader(f)\n data = list(reader)[1:]\n retribuzione = {'Vicenza': [], 'Verona': [], 'Venezia': [],\n 'Padova': [], 'Treviso': [], 'Belluno': [], 'Rovigo': []}\n for row in data:\n if (row[1] == 'Padova' or row[1] == 'Vicenza' or row[1] ==\n 'Venezia' or row[1] == 'Verona' or row[1] == 'Treviso' or \n row[1] == 'Belluno' or row[1] == 'Rovigo') and row[5\n ] != 'totale' and 'media)' in row[3]:\n tmp = row[5]\n if 'nessun' in tmp:\n tmp = 'nessuno'\n retribuzione[row[1]].append(tmp + ';' + str(row[8]))\n retribuzioneMediaJson = json.dumps(retribuzione)\n open('static/jsons/retribuzioneMedia.json', 'wb').write(\n retribuzioneMediaJson.encode())\n with open('static/notUpdating/taxDisocc.csv', newline='') as f:\n reader = csv.reader(f)\n data = list(reader)[1:]\n lavoro = {'Vicenza': [], 'Verona': [], 'Venezia': [], 'Padova': [],\n 'Treviso': [], 'Belluno': [], 'Rovigo': []}\n for row in data:\n if row[7] == '15-24 anni' and row[5] != 'totale':\n if row[5] == 'femmine':\n lavoro[row[1]].append(str(row[10]))\n else:\n lavoro[row[1]].append(str(row[8]) + ';' + str(row[10]))\n for key in lavoro.keys():\n tmp = lavoro[key][0] + ';' + lavoro[key][2]\n tmp2 = lavoro[key][1] + ';' + lavoro[key][3]\n lavoro[key].clear()\n lavoro[key].append(tmp)\n lavoro[key].append(tmp2)\n disoccupazioneJson = json.dumps(lavoro)\n open('static/jsons/disoccupazione.json', 'wb').write(disoccupazioneJson\n .encode())\n iscritti = requests.get(\n 'http://dati.ustat.miur.it/dataset/3dd9ca7f-9cc9-4a1a-915c-e569b181dbd5/resource/eae4ee94-0797-41d2-b007-bc6dad3ef3e2/download/iscrittixresidenza.csv'\n , allow_redirects=True)\n open('static/iscrittiProvincia.csv', 'wb').write(iscritti.content)\n with open('static/iscrittiProvincia.csv', newline='') as f:\n reader = csv.reader(f)\n data = list(reader)[1:]\n iscrittiProvincia = {'vicenza': [], 'verona': [], 'venezia': [],\n 'padova': [], 'treviso': [], 'belluno': [], 'rovigo': []}\n for row in data:\n row = row[0].split(';')\n if row[2].lower() == 'padova' or row[2].lower(\n ) == 'vicenza' or row[2].lower() == 'venezia' or row[2].lower(\n ) == 'verona' or row[2].lower() == 'treviso' or row[2].lower(\n ) == 'belluno' or row[2].lower() == 'rovigo':\n iscrittiProvincia[row[2].lower()].append(str(row[0]) + ';' +\n str(int(row[3]) + int(row[4])))\n iscrittiProvinciaJson = json.dumps(iscrittiProvincia)\n open('static/jsons/iscrittiProvincia.json', 'wb').write(\n iscrittiProvinciaJson.encode())\n return '200'\n\n\nupdateData()\nif __name__ == '__main__':\n application.run(debug=True, port=80)\n",
"step-5": "from flask import Flask, render_template, jsonify, request, make_response #BSD License\r\nimport requests #Apache 2.0\r\n\r\n#StdLibs\r\nimport json\r\nfrom os import path\r\n\r\nimport csv\r\n\r\n###################################################\r\n#Programmato da Alex Prosdocimo e Matteo Mirandola#\r\n###################################################\r\n\r\napplication = Flask(__name__)\r\n\r\n\r\n@application.route(\"/\") # Index\r\ndef index():\r\n return make_response(render_template(\"index.html\"))\r\n\r\n\r\n@application.route(\"/getGraph\", methods=[\"POST\", \"GET\"])\r\ndef getgraph():\r\n #Metodo POST: responsabile di ottnere i dati in formato json dal server. \r\n #Il server si aspetta un campo data che contenga il nome di un file esistente nel server nella cartella /static/json/\r\n #Se non trova il file da un 404\r\n #Se non trova il campo data da un 400\r\n if request.method == \"POST\":\r\n if('data' in request.form):\r\n if(path.exists(\"static/jsons/\" + request.form['data'] + \".json\")):\r\n with open(\"static/jsons/\" + request.form['data'] + \".json\", \"r\") as file:\r\n jsonStr = file.read()\r\n jsonStr = json.loads(jsonStr)\r\n return jsonify(jsonStr)\r\n else:\r\n return \"<h1>404 NOT FOUND\"\r\n else:\r\n return \"<h1>400 BAD REQUEST\"\r\n else:\r\n #Metodo GET:\r\n #si aspetta un campo graph che contenga uno dei nomi sotto presenti\r\n #nel caso di mf e emig si aspetta anche un secondo campo che specifichi\r\n #l'università o la provincia-\r\n #Inoltre, iscrittiAtn e mf POSSONO (ma non devono necessariamente) avere\r\n #un campo aggiuntivo che filtri i dati di uno specifico anno o per uno specifico sesso2\r\n if 'graph' in request.args:\r\n\r\n # HBar Graph per la paga oraria provinciale a seconda del livello di istruzione\r\n if(request.args['graph'] == \"pagaOra\"):\r\n return make_response(render_template(\"graphs/pagaOra.html\"))\r\n\r\n # Line Graph per gli iscritti alle università nel veneto per anno\r\n elif(request.args['graph'] == \"iscrittiAtn\"):\r\n if('sex' in request.args):\r\n return make_response(render_template(\"graphs/iscrittiAtn.html\", sex=int(request.args['sex'])))\r\n else:\r\n return make_response(render_template(\"graphs/iscrittiAtn.html\", sex=0))\r\n\r\n elif(request.args['graph'] == \"disoccupati\"):\r\n return make_response(render_template(\"graphs/disoccupatiGraph.html\"))\r\n\r\n elif(request.args['graph'] == \"iscrittiProv\"):\r\n return make_response(render_template(\"graphs/iscrittiProv.html\"))\r\n\r\n # Donut Graph per la distribuzione di m/f nelle università in veneto\r\n elif(request.args['graph'] == \"mf\" and 'atn' in request.args):\r\n dir = \"graphs/mf/mf\" + request.args['atn'] + \".html\"\r\n print(dir)\r\n if(path.exists(\"templates/\" + dir)):\r\n if('year' in request.args):\r\n return make_response(render_template(dir, year=int(request.args['year'])))\r\n else:\r\n return make_response(render_template(dir, year=0))\r\n\r\n # Polar Area Graph per gli studenti emigrati in altre regioni\r\n elif(request.args['graph'] == \"emig\" and \"prov\" in request.args):\r\n dir = \"graphs/emig/iscrittiEmig\" + \\\r\n request.args['prov'] + \".html\"\r\n if(path.exists(\"templates/\" + dir)):\r\n return make_response(render_template(dir))\r\n\r\n return \"<h1>400 BAD REQUEST\"\r\n\r\n#Per aggiornare i dataset:\r\n#A causa di un errore nella creazione del file riguardante gli iscritti per ogni ateneo da parte del MIUR il file\r\n#riguardante gli iscritti per ateneo non sono scaricabili dinamicamente e va sostituito manualmente.\r\n#Allo stesso modo, i dati ottenuti tramite l'istat non sono scaricabili dinamicamente tramite la api in quanto\r\n#le sue prestazioni sono limitate (oltre a non permettere i filtri necessari per ottenere i file).\r\n#Il dataset delle provincie viene aggiornato automaticamente ogni settimana. Gli altri vanno sostituiti manualmente.\r\n\r\n#I dataset statici vanno inseriti nella cartella /static/notUpdating/\r\n#Il dataset riguardante gli iscritti per ateneo va scaricato a questo link http://dati.ustat.miur.it/dataset/3dd9ca7f-9cc9-4a1a-915c-e569b181dbd5/resource/32d26e28-a0b5-45f3-9152-6072164f3e63/download/iscrittixateneo.csv\r\n#e rinominato iscrittiAteneo.csv\r\n\r\n#Il dataset riguardante gli iscritti emigrati dalla regione è stato creato manualmente a partire da altri dati e non può essere aggiornato\r\n\r\n#I dataset riguardanti la percentuale di disoccupazione e la retribuzione oraria media sono reperibili a questo portale http://dati.istat.it/\r\n#Sfortunatamente la funzione di ricerca del sito è molto lenta e limitata, comunque sia i due data set sono \"Tasso di Disoccupazione - Dati Provinciali\"\r\n#e \"Retribuzione oraria media per titolo di studio\". In entrambi i casi, è necessario filtrare i risultati per le sole provincie del Veneto.\r\n#I file vanno rinominati retribuzioneMedia.csv e taxDisocc.csv\r\n\r\n#Fortunatamente, si aggiornano solo annualmente\r\n\r\n@application.route(\"/doUpdate\")\r\ndef updateData():\r\n #File iscritti per ateneo\r\n #I dati vengono inseriti in un dizionario come array, il formato è più sotto\r\n with open('static/notUpdating/iscrittiAteneo.csv', newline='') as f: #Qui si può cambiare il nome del file se necessario, basta che sia in formato csv corretto\r\n reader = csv.reader(f)\r\n data = list(reader)[1:]\r\n iscrittiAteneo = {\r\n 'Venezia CF': [],\r\n 'Verona': [],\r\n 'Venezia IUAV': [],\r\n 'Padova': []}\r\n\r\n for row in data:\r\n row = row[0].split(';')\r\n if row[1] == 'Padova' or 'Venezia C' in row[1] or row[1] == 'Venezia Iuav' or row[1] == 'Verona':\r\n tmp = row[1]\r\n if 'Venezia C' in row[1]:\r\n tmp = 'Venezia CF'\r\n if tmp == 'Venezia Iuav':\r\n tmp = 'Venezia IUAV'\r\n iscrittiAteneo[tmp].append(\r\n row[0] + ';' + row[3] + ';' + row[4])\r\n\r\n iscrittiAteneoJson = json.dumps(iscrittiAteneo)\r\n # Formato: {\"nomeAteneo\" : [\"annoScolastico;numeroIscrittiMaschi;numeroIscrittiFemmine\",...,...],...,...}\r\n open('static/jsons/iscrittiAteneo.json',\r\n \"wb\").write(iscrittiAteneoJson.encode())\r\n\r\n # File iscritti emigrati in altre regioni\r\n with open('static/notUpdating/iscrittiEmig.json', newline='') as f: #Qui si può cambiare il nome del file se necessario, basta che sia in formato csv corretto\r\n\r\n reader = json.load(f)\r\n iscrittiEmig = {\r\n 'vicenza': [],\r\n 'verona': [],\r\n 'venezia': [],\r\n 'padova': [],\r\n 'treviso': [],\r\n 'belluno': [],\r\n 'rovigo': []}\r\n\r\n for row in reader['records']:\r\n if row[4].lower() == 'padova' or row[4].lower() == 'vicenza' or row[4].lower() == 'venezia' or row[4].lower() == 'verona' or row[4].lower() == 'treviso' or row[4].lower() == 'belluno' or row[4].lower() == 'rovigo':\r\n iscrittiEmig[row[4].lower()].append(\r\n row[1] + ';' + row[4] + ';' + row[2] + ';' + str(row[6]))\r\n lista = {\r\n 'vicenza': [],\r\n 'verona': [],\r\n 'venezia': [],\r\n 'padova': [],\r\n 'treviso': [],\r\n 'belluno': [],\r\n 'rovigo': []\r\n }\r\n count = 0\r\n\r\n for key in iscrittiEmig.keys():\r\n while len(iscrittiEmig[key]) > 2:\r\n tmp = iscrittiEmig[key].pop(0).split(';')\r\n if count == 0:\r\n count = int(tmp[3])\r\n tmp2 = iscrittiEmig[key][0].split(';')[2]\r\n if tmp[2] == tmp2:\r\n\r\n count += int(tmp[3])\r\n\r\n else:\r\n lista[tmp[1].lower()].append(\r\n tmp[0] + ';' + tmp[2] + ';' + str(count))\r\n count = 0\r\n\r\n iscrittiEmigJson = json.dumps(lista)\r\n # Formato: {\"cittàInMinuscolo\" : [\"annoScolastico;CittàDiProvenienzaInMaiuscolo;RegioneDiEsodo;NumeroStudenti\",...,...],...,...}\r\n open('static/jsons/iscrittiEmig.json',\r\n \"wb\").write(iscrittiEmigJson.encode())\r\n # File paga media oraria per titolo di studio\r\n with open('static/notUpdating/retribuzioneMedia.csv', newline='') as f:\r\n reader = csv.reader(f)\r\n data = list(reader)[1:]\r\n retribuzione = {\r\n 'Vicenza': [],\r\n 'Verona': [],\r\n 'Venezia': [],\r\n 'Padova': [],\r\n 'Treviso': [],\r\n 'Belluno': [],\r\n 'Rovigo': []}\r\n\r\n for row in data:\r\n if (row[1] == 'Padova' or row[1] == 'Vicenza' or row[1] == 'Venezia' or row[1] == 'Verona' or row[1] == 'Treviso' or row[1] == 'Belluno' or row[1] == 'Rovigo') and (row[5] != 'totale') and 'media)' in row[3]:\r\n # La lista è divisa in titolo di studio, reddito medio orario\r\n tmp = row[5]\r\n if 'nessun' in tmp:\r\n tmp = 'nessuno'\r\n retribuzione[row[1]].append(tmp + ';' + str(row[8]))\r\n\r\n retribuzioneMediaJson = json.dumps(retribuzione)\r\n # Formato: {\"nomeCittà\" : [\"laurea;media\", \"diploma;media\", \"nulla;media\"],...,...}\r\n open('static/jsons/retribuzioneMedia.json',\r\n \"wb\").write(retribuzioneMediaJson.encode())\r\n\r\n # File %disoccupazione\r\n with open('static/notUpdating/taxDisocc.csv', newline='') as f: #Qui si può cambiare il nome del file se necessario, basta che sia in formato csv corretto\r\n reader = csv.reader(f)\r\n data = list(reader)[1:]\r\n lavoro = {\r\n 'Vicenza': [],\r\n 'Verona': [],\r\n 'Venezia': [],\r\n 'Padova': [],\r\n 'Treviso': [],\r\n 'Belluno': [],\r\n 'Rovigo': []}\r\n\r\n for row in data:\r\n if (row[7] == '15-24 anni') and row[5] != 'totale':\r\n if row[5] == 'femmine':\r\n lavoro[row[1]].append(str(row[10]))\r\n else:\r\n lavoro[row[1]].append(str(row[8]) + ';' + str(row[10]))\r\n for key in lavoro.keys():\r\n tmp = lavoro[key][0] + ';' + lavoro[key][2]\r\n tmp2 = lavoro[key][1] + ';' + lavoro[key][3]\r\n lavoro[key].clear()\r\n lavoro[key].append(tmp)\r\n lavoro[key].append(tmp2)\r\n\r\n disoccupazioneJson = json.dumps(lavoro)\r\n # Formato: {\"nomeCittà\" : [\"anno;percMaschi;percFemmine\",\"anno;percMaschi;percFemmine\"x],...,...}\r\n open('static/jsons/disoccupazione.json',\r\n \"wb\").write(disoccupazioneJson.encode())\r\n\r\n # File iscritti totali per provincia\r\n iscritti = requests.get(\r\n 'http://dati.ustat.miur.it/dataset/3dd9ca7f-9cc9-4a1a-915c-e569b181dbd5/resource/eae4ee94-0797-41d2-b007-bc6dad3ef3e2/download/iscrittixresidenza.csv', allow_redirects=True)\r\n open('static/iscrittiProvincia.csv', 'wb').write(iscritti.content) #Qui si può cambiare il nome del file se necessario, basta che sia in formato csv corretto\r\n with open('static/iscrittiProvincia.csv', newline='') as f:\r\n reader = csv.reader(f)\r\n data = list(reader)[1:]\r\n\r\n iscrittiProvincia = {\r\n 'vicenza': [],\r\n 'verona': [],\r\n 'venezia': [],\r\n 'padova': [],\r\n 'treviso': [],\r\n 'belluno': [],\r\n 'rovigo': []}\r\n\r\n for row in data:\r\n row = row[0].split(';')\r\n if row[2].lower() == 'padova' or row[2].lower() == 'vicenza' or row[2].lower() == 'venezia' or row[2].lower() == 'verona' or row[2].lower() == 'treviso' or row[2].lower() == 'belluno' or row[2].lower() == 'rovigo':\r\n iscrittiProvincia[row[2].lower()].append(\r\n str(row[0]) + ';' + str(int(row[3])+int(row[4])))\r\n iscrittiProvinciaJson = json.dumps(iscrittiProvincia)\r\n # Formato: {\"nomeCittà\" : [\"anno;numero\"],...,...}\r\n open('static/jsons/iscrittiProvincia.json',\r\n \"wb\").write(iscrittiProvinciaJson.encode())\r\n return \"200\"\r\n\r\n#########\r\n#Startup#\r\n#########\r\n\r\n#Ad ogni riavvio forzato dell'applicazione, i dati vengono aggiornati (ci impiega qualche secondo al maassimo)\r\n\r\nupdateData()\r\n\r\nif __name__ == '__main__':\r\n application.run(debug=True, port=80)\r\n",
"step-ids": [
2,
3,
4,
6,
7
]
}
|
[
2,
3,
4,
6,
7
] |
<|reserved_special_token_0|>
def fSet_BillDate(pMonth):
if pMonth == 13:
pMonth = 12
logging.debug('- change bill date: ' + str(pMonth) + '/27/' + Settings.
dataYear)
time.sleep(1)
myTools.getFocus()
type('b', KeyModifier.ALT)
type('d')
time.sleep(2)
type('t')
type(Key.HOME, KeyModifier.CTRL)
thisYear = date.today().year
for prevYear in range(int(Settings.dataYear), thisYear):
type(Key.PAGE_UP, KeyModifier.CTRL)
time.sleep(1)
myTools.pressDOWN(4)
myTools.pressLEFT(2)
for nextMonth in range(pMonth - 1):
type(Key.PAGE_DOWN)
time.sleep(1)
type(Key.ENTER)
time.sleep(1)
def fRemove_Sort():
time.sleep(1)
logging.debug('- remove sort')
type(Key.F6)
time.sleep(1)
click(Pattern('remove_sort-1.png').similar(0.8))
time.sleep(1)
type(Key.F6)
time.sleep(1)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def fSet_BillDate(pMonth):
if pMonth == 13:
pMonth = 12
logging.debug('- change bill date: ' + str(pMonth) + '/27/' + Settings.
dataYear)
time.sleep(1)
myTools.getFocus()
type('b', KeyModifier.ALT)
type('d')
time.sleep(2)
type('t')
type(Key.HOME, KeyModifier.CTRL)
thisYear = date.today().year
for prevYear in range(int(Settings.dataYear), thisYear):
type(Key.PAGE_UP, KeyModifier.CTRL)
time.sleep(1)
myTools.pressDOWN(4)
myTools.pressLEFT(2)
for nextMonth in range(pMonth - 1):
type(Key.PAGE_DOWN)
time.sleep(1)
type(Key.ENTER)
time.sleep(1)
def fRemove_Sort():
time.sleep(1)
logging.debug('- remove sort')
type(Key.F6)
time.sleep(1)
click(Pattern('remove_sort-1.png').similar(0.8))
time.sleep(1)
type(Key.F6)
time.sleep(1)
<|reserved_special_token_0|>
def fPrint_Bills(pMonth):
myTools.sectionStartTimeStamp('bills' + str(pMonth))
logging.debug('Print_Bills: ' + str(pMonth))
fSet_BillDate(pMonth)
fPrint_BillRun(pMonth)
myTools.sectionEndTimeStamp()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def fSet_BillDate(pMonth):
if pMonth == 13:
pMonth = 12
logging.debug('- change bill date: ' + str(pMonth) + '/27/' + Settings.
dataYear)
time.sleep(1)
myTools.getFocus()
type('b', KeyModifier.ALT)
type('d')
time.sleep(2)
type('t')
type(Key.HOME, KeyModifier.CTRL)
thisYear = date.today().year
for prevYear in range(int(Settings.dataYear), thisYear):
type(Key.PAGE_UP, KeyModifier.CTRL)
time.sleep(1)
myTools.pressDOWN(4)
myTools.pressLEFT(2)
for nextMonth in range(pMonth - 1):
type(Key.PAGE_DOWN)
time.sleep(1)
type(Key.ENTER)
time.sleep(1)
def fRemove_Sort():
time.sleep(1)
logging.debug('- remove sort')
type(Key.F6)
time.sleep(1)
click(Pattern('remove_sort-1.png').similar(0.8))
time.sleep(1)
type(Key.F6)
time.sleep(1)
def fPrint_BillRun(pMonth):
reportName = 'Bill-' + myTools.padZero(pMonth
) + '-' + Settings.tsVersion + '.txt'
logging.debug('fPrint_BillRun: ' + reportName)
type('b', KeyModifier.CTRL)
time.sleep(1)
fRemove_Sort()
myTools.enterSlipFilter(pMonth, 'n')
logging.debug('-- print')
type(Key.ENTER)
time.sleep(1)
type(Settings.repFolder + '\\' + reportName)
time.sleep(1)
type(Key.ENTER)
time.sleep(1)
if exists('replace_msg.png'):
type('y')
logging.debug('-- approve')
wait(Pattern('approve_bills-1.png').targetOffset(-100, -8), FOREVER)
click(Pattern('approve_bills-1.png').targetOffset(-100, -8))
type(Key.ENTER)
time.sleep(3)
if int(Settings.tsVersion) > 2015:
wait('approving_bills.png', FOREVER)
while exists('approving_bills.png'):
logging.debug('--- msg exists')
time.sleep(2)
else:
waitVanish('approving_statusbar.png', FOREVER)
time.sleep(1)
reports_Compare.Compare_OneReport(reportName)
logging.debug('-- close report window')
click('report_generate_bills.png')
type(Key.F4, KeyModifier.CTRL)
time.sleep(2)
type('n')
time.sleep(1)
def fPrint_Bills(pMonth):
myTools.sectionStartTimeStamp('bills' + str(pMonth))
logging.debug('Print_Bills: ' + str(pMonth))
fSet_BillDate(pMonth)
fPrint_BillRun(pMonth)
myTools.sectionEndTimeStamp()
<|reserved_special_token_1|>
from sikuli import *
import logging
import myTools
from datetime import date
import reports_Compare
def fSet_BillDate(pMonth):
if pMonth == 13:
pMonth = 12
logging.debug('- change bill date: ' + str(pMonth) + '/27/' + Settings.
dataYear)
time.sleep(1)
myTools.getFocus()
type('b', KeyModifier.ALT)
type('d')
time.sleep(2)
type('t')
type(Key.HOME, KeyModifier.CTRL)
thisYear = date.today().year
for prevYear in range(int(Settings.dataYear), thisYear):
type(Key.PAGE_UP, KeyModifier.CTRL)
time.sleep(1)
myTools.pressDOWN(4)
myTools.pressLEFT(2)
for nextMonth in range(pMonth - 1):
type(Key.PAGE_DOWN)
time.sleep(1)
type(Key.ENTER)
time.sleep(1)
def fRemove_Sort():
time.sleep(1)
logging.debug('- remove sort')
type(Key.F6)
time.sleep(1)
click(Pattern('remove_sort-1.png').similar(0.8))
time.sleep(1)
type(Key.F6)
time.sleep(1)
def fPrint_BillRun(pMonth):
reportName = 'Bill-' + myTools.padZero(pMonth
) + '-' + Settings.tsVersion + '.txt'
logging.debug('fPrint_BillRun: ' + reportName)
type('b', KeyModifier.CTRL)
time.sleep(1)
fRemove_Sort()
myTools.enterSlipFilter(pMonth, 'n')
logging.debug('-- print')
type(Key.ENTER)
time.sleep(1)
type(Settings.repFolder + '\\' + reportName)
time.sleep(1)
type(Key.ENTER)
time.sleep(1)
if exists('replace_msg.png'):
type('y')
logging.debug('-- approve')
wait(Pattern('approve_bills-1.png').targetOffset(-100, -8), FOREVER)
click(Pattern('approve_bills-1.png').targetOffset(-100, -8))
type(Key.ENTER)
time.sleep(3)
if int(Settings.tsVersion) > 2015:
wait('approving_bills.png', FOREVER)
while exists('approving_bills.png'):
logging.debug('--- msg exists')
time.sleep(2)
else:
waitVanish('approving_statusbar.png', FOREVER)
time.sleep(1)
reports_Compare.Compare_OneReport(reportName)
logging.debug('-- close report window')
click('report_generate_bills.png')
type(Key.F4, KeyModifier.CTRL)
time.sleep(2)
type('n')
time.sleep(1)
def fPrint_Bills(pMonth):
myTools.sectionStartTimeStamp('bills' + str(pMonth))
logging.debug('Print_Bills: ' + str(pMonth))
fSet_BillDate(pMonth)
fPrint_BillRun(pMonth)
myTools.sectionEndTimeStamp()
<|reserved_special_token_1|>
from sikuli import *
import logging
import myTools
from datetime import date
import reports_Compare
#---------------------------------------------------#
def fSet_BillDate(pMonth):
#---------------------------------------------------#
if pMonth == 13:
pMonth = 12
logging.debug('- change bill date: ' + str(pMonth) + "/27/" + Settings.dataYear)
time.sleep(1)
# make sure timeslips has focus
myTools.getFocus()
# open revise date
type("b",KeyModifier.ALT)
type("d")
time.sleep(2)
# go to today
type("t")
#get to 01/01 of current year
type(Key.HOME,KeyModifier.CTRL)
# get to 01/01 of the data year
thisYear = date.today().year
for prevYear in range(int(Settings.dataYear),thisYear):
type(Key.PAGE_UP,KeyModifier.CTRL)
time.sleep(1)
# get to 01/27 of the data year
myTools.pressDOWN(4)
myTools.pressLEFT(2)
for nextMonth in range(pMonth-1):
type(Key.PAGE_DOWN)
time.sleep(1)
type(Key.ENTER)
time.sleep(1)
#---------------------------------------------------#
def fRemove_Sort():
#---------------------------------------------------#
time.sleep(1)
logging.debug('- remove sort')
type(Key.F6)
time.sleep(1)
click(Pattern("remove_sort-1.png").similar(0.80))
time.sleep(1)
type(Key.F6)
time.sleep(1)
#---------------------------------------------------#
def fPrint_BillRun(pMonth):
#---------------------------------------------------#
reportName = "Bill-" + myTools.padZero(pMonth) + "-" + Settings.tsVersion + ".txt"
logging.debug('fPrint_BillRun: ' + reportName)
type("b",KeyModifier.CTRL)
time.sleep(1)
fRemove_Sort()
myTools.enterSlipFilter(pMonth,"n")
# print bills to text
logging.debug('-- print')
type(Key.ENTER)
time.sleep(1)
# fill in path and name; press ENTER
type(Settings.repFolder + "\\" + reportName)
time.sleep(1)
type(Key.ENTER)
time.sleep(1)
if exists("replace_msg.png"):
type("y")
# approve bills
logging.debug('-- approve')
wait(Pattern("approve_bills-1.png").targetOffset(-100,-8),FOREVER)
click(Pattern("approve_bills-1.png").targetOffset(-100,-8))
type(Key.ENTER)
time.sleep(3)
if int(Settings.tsVersion) > 2015:
wait("approving_bills.png",FOREVER)
while exists("approving_bills.png"):
logging.debug('--- msg exists')
time.sleep(2)
else:
waitVanish("approving_statusbar.png",FOREVER)
time.sleep(1)
# compare the report with baseline
reports_Compare.Compare_OneReport(reportName)
# close report entry / don't save
logging.debug('-- close report window')
click("report_generate_bills.png")
type(Key.F4,KeyModifier.CTRL)
time.sleep(2)
type("n")
time.sleep(1)
#---------------------------------------------------#
def fPrint_Bills(pMonth):
#---------------------------------------------------#
myTools.sectionStartTimeStamp("bills" + str(pMonth))
logging.debug('Print_Bills: ' + str(pMonth))
fSet_BillDate(pMonth)
fPrint_BillRun(pMonth)
myTools.sectionEndTimeStamp()
|
flexible
|
{
"blob_id": "69721dca0f5d8396e330696cde52bfabad33c895",
"index": 3242,
"step-1": "<mask token>\n\n\ndef fSet_BillDate(pMonth):\n if pMonth == 13:\n pMonth = 12\n logging.debug('- change bill date: ' + str(pMonth) + '/27/' + Settings.\n dataYear)\n time.sleep(1)\n myTools.getFocus()\n type('b', KeyModifier.ALT)\n type('d')\n time.sleep(2)\n type('t')\n type(Key.HOME, KeyModifier.CTRL)\n thisYear = date.today().year\n for prevYear in range(int(Settings.dataYear), thisYear):\n type(Key.PAGE_UP, KeyModifier.CTRL)\n time.sleep(1)\n myTools.pressDOWN(4)\n myTools.pressLEFT(2)\n for nextMonth in range(pMonth - 1):\n type(Key.PAGE_DOWN)\n time.sleep(1)\n type(Key.ENTER)\n time.sleep(1)\n\n\ndef fRemove_Sort():\n time.sleep(1)\n logging.debug('- remove sort')\n type(Key.F6)\n time.sleep(1)\n click(Pattern('remove_sort-1.png').similar(0.8))\n time.sleep(1)\n type(Key.F6)\n time.sleep(1)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef fSet_BillDate(pMonth):\n if pMonth == 13:\n pMonth = 12\n logging.debug('- change bill date: ' + str(pMonth) + '/27/' + Settings.\n dataYear)\n time.sleep(1)\n myTools.getFocus()\n type('b', KeyModifier.ALT)\n type('d')\n time.sleep(2)\n type('t')\n type(Key.HOME, KeyModifier.CTRL)\n thisYear = date.today().year\n for prevYear in range(int(Settings.dataYear), thisYear):\n type(Key.PAGE_UP, KeyModifier.CTRL)\n time.sleep(1)\n myTools.pressDOWN(4)\n myTools.pressLEFT(2)\n for nextMonth in range(pMonth - 1):\n type(Key.PAGE_DOWN)\n time.sleep(1)\n type(Key.ENTER)\n time.sleep(1)\n\n\ndef fRemove_Sort():\n time.sleep(1)\n logging.debug('- remove sort')\n type(Key.F6)\n time.sleep(1)\n click(Pattern('remove_sort-1.png').similar(0.8))\n time.sleep(1)\n type(Key.F6)\n time.sleep(1)\n\n\n<mask token>\n\n\ndef fPrint_Bills(pMonth):\n myTools.sectionStartTimeStamp('bills' + str(pMonth))\n logging.debug('Print_Bills: ' + str(pMonth))\n fSet_BillDate(pMonth)\n fPrint_BillRun(pMonth)\n myTools.sectionEndTimeStamp()\n",
"step-3": "<mask token>\n\n\ndef fSet_BillDate(pMonth):\n if pMonth == 13:\n pMonth = 12\n logging.debug('- change bill date: ' + str(pMonth) + '/27/' + Settings.\n dataYear)\n time.sleep(1)\n myTools.getFocus()\n type('b', KeyModifier.ALT)\n type('d')\n time.sleep(2)\n type('t')\n type(Key.HOME, KeyModifier.CTRL)\n thisYear = date.today().year\n for prevYear in range(int(Settings.dataYear), thisYear):\n type(Key.PAGE_UP, KeyModifier.CTRL)\n time.sleep(1)\n myTools.pressDOWN(4)\n myTools.pressLEFT(2)\n for nextMonth in range(pMonth - 1):\n type(Key.PAGE_DOWN)\n time.sleep(1)\n type(Key.ENTER)\n time.sleep(1)\n\n\ndef fRemove_Sort():\n time.sleep(1)\n logging.debug('- remove sort')\n type(Key.F6)\n time.sleep(1)\n click(Pattern('remove_sort-1.png').similar(0.8))\n time.sleep(1)\n type(Key.F6)\n time.sleep(1)\n\n\ndef fPrint_BillRun(pMonth):\n reportName = 'Bill-' + myTools.padZero(pMonth\n ) + '-' + Settings.tsVersion + '.txt'\n logging.debug('fPrint_BillRun: ' + reportName)\n type('b', KeyModifier.CTRL)\n time.sleep(1)\n fRemove_Sort()\n myTools.enterSlipFilter(pMonth, 'n')\n logging.debug('-- print')\n type(Key.ENTER)\n time.sleep(1)\n type(Settings.repFolder + '\\\\' + reportName)\n time.sleep(1)\n type(Key.ENTER)\n time.sleep(1)\n if exists('replace_msg.png'):\n type('y')\n logging.debug('-- approve')\n wait(Pattern('approve_bills-1.png').targetOffset(-100, -8), FOREVER)\n click(Pattern('approve_bills-1.png').targetOffset(-100, -8))\n type(Key.ENTER)\n time.sleep(3)\n if int(Settings.tsVersion) > 2015:\n wait('approving_bills.png', FOREVER)\n while exists('approving_bills.png'):\n logging.debug('--- msg exists')\n time.sleep(2)\n else:\n waitVanish('approving_statusbar.png', FOREVER)\n time.sleep(1)\n reports_Compare.Compare_OneReport(reportName)\n logging.debug('-- close report window')\n click('report_generate_bills.png')\n type(Key.F4, KeyModifier.CTRL)\n time.sleep(2)\n type('n')\n time.sleep(1)\n\n\ndef fPrint_Bills(pMonth):\n myTools.sectionStartTimeStamp('bills' + str(pMonth))\n logging.debug('Print_Bills: ' + str(pMonth))\n fSet_BillDate(pMonth)\n fPrint_BillRun(pMonth)\n myTools.sectionEndTimeStamp()\n",
"step-4": "from sikuli import *\nimport logging\nimport myTools\nfrom datetime import date\nimport reports_Compare\n\n\ndef fSet_BillDate(pMonth):\n if pMonth == 13:\n pMonth = 12\n logging.debug('- change bill date: ' + str(pMonth) + '/27/' + Settings.\n dataYear)\n time.sleep(1)\n myTools.getFocus()\n type('b', KeyModifier.ALT)\n type('d')\n time.sleep(2)\n type('t')\n type(Key.HOME, KeyModifier.CTRL)\n thisYear = date.today().year\n for prevYear in range(int(Settings.dataYear), thisYear):\n type(Key.PAGE_UP, KeyModifier.CTRL)\n time.sleep(1)\n myTools.pressDOWN(4)\n myTools.pressLEFT(2)\n for nextMonth in range(pMonth - 1):\n type(Key.PAGE_DOWN)\n time.sleep(1)\n type(Key.ENTER)\n time.sleep(1)\n\n\ndef fRemove_Sort():\n time.sleep(1)\n logging.debug('- remove sort')\n type(Key.F6)\n time.sleep(1)\n click(Pattern('remove_sort-1.png').similar(0.8))\n time.sleep(1)\n type(Key.F6)\n time.sleep(1)\n\n\ndef fPrint_BillRun(pMonth):\n reportName = 'Bill-' + myTools.padZero(pMonth\n ) + '-' + Settings.tsVersion + '.txt'\n logging.debug('fPrint_BillRun: ' + reportName)\n type('b', KeyModifier.CTRL)\n time.sleep(1)\n fRemove_Sort()\n myTools.enterSlipFilter(pMonth, 'n')\n logging.debug('-- print')\n type(Key.ENTER)\n time.sleep(1)\n type(Settings.repFolder + '\\\\' + reportName)\n time.sleep(1)\n type(Key.ENTER)\n time.sleep(1)\n if exists('replace_msg.png'):\n type('y')\n logging.debug('-- approve')\n wait(Pattern('approve_bills-1.png').targetOffset(-100, -8), FOREVER)\n click(Pattern('approve_bills-1.png').targetOffset(-100, -8))\n type(Key.ENTER)\n time.sleep(3)\n if int(Settings.tsVersion) > 2015:\n wait('approving_bills.png', FOREVER)\n while exists('approving_bills.png'):\n logging.debug('--- msg exists')\n time.sleep(2)\n else:\n waitVanish('approving_statusbar.png', FOREVER)\n time.sleep(1)\n reports_Compare.Compare_OneReport(reportName)\n logging.debug('-- close report window')\n click('report_generate_bills.png')\n type(Key.F4, KeyModifier.CTRL)\n time.sleep(2)\n type('n')\n time.sleep(1)\n\n\ndef fPrint_Bills(pMonth):\n myTools.sectionStartTimeStamp('bills' + str(pMonth))\n logging.debug('Print_Bills: ' + str(pMonth))\n fSet_BillDate(pMonth)\n fPrint_BillRun(pMonth)\n myTools.sectionEndTimeStamp()\n",
"step-5": "from sikuli import *\nimport logging\nimport myTools\nfrom datetime import date\nimport reports_Compare\n\n#---------------------------------------------------#\ndef fSet_BillDate(pMonth):\n#---------------------------------------------------#\n\n if pMonth == 13:\n pMonth = 12 \n\n logging.debug('- change bill date: ' + str(pMonth) + \"/27/\" + Settings.dataYear)\n time.sleep(1)\n\n # make sure timeslips has focus\n myTools.getFocus()\n\n # open revise date\n type(\"b\",KeyModifier.ALT)\n type(\"d\") \n time.sleep(2)\n\n # go to today\n type(\"t\")\n\n #get to 01/01 of current year\n type(Key.HOME,KeyModifier.CTRL) \n\n # get to 01/01 of the data year\n thisYear = date.today().year\n for prevYear in range(int(Settings.dataYear),thisYear):\n type(Key.PAGE_UP,KeyModifier.CTRL) \n time.sleep(1)\n\n # get to 01/27 of the data year\n myTools.pressDOWN(4)\n myTools.pressLEFT(2) \n\n for nextMonth in range(pMonth-1):\n type(Key.PAGE_DOWN) \n time.sleep(1)\n \n type(Key.ENTER)\n time.sleep(1) \n\n#---------------------------------------------------#\ndef fRemove_Sort():\n#---------------------------------------------------#\n\n time.sleep(1)\n logging.debug('- remove sort')\n \n type(Key.F6)\n time.sleep(1)\n\n click(Pattern(\"remove_sort-1.png\").similar(0.80))\n time.sleep(1)\n \n type(Key.F6)\n time.sleep(1)\n\n#---------------------------------------------------#\ndef fPrint_BillRun(pMonth):\n#---------------------------------------------------#\n \n reportName = \"Bill-\" + myTools.padZero(pMonth) + \"-\" + Settings.tsVersion + \".txt\" \n logging.debug('fPrint_BillRun: ' + reportName)\n\n type(\"b\",KeyModifier.CTRL)\n time.sleep(1)\n\n fRemove_Sort()\n myTools.enterSlipFilter(pMonth,\"n\")\n\n # print bills to text\n logging.debug('-- print') \n type(Key.ENTER) \n time.sleep(1)\n\n # fill in path and name; press ENTER\n type(Settings.repFolder + \"\\\\\" + reportName)\n time.sleep(1)\n type(Key.ENTER) \n time.sleep(1)\n\n if exists(\"replace_msg.png\"):\n type(\"y\")\n\n # approve bills\n logging.debug('-- approve') \n wait(Pattern(\"approve_bills-1.png\").targetOffset(-100,-8),FOREVER)\n click(Pattern(\"approve_bills-1.png\").targetOffset(-100,-8))\n type(Key.ENTER)\n time.sleep(3)\n\n if int(Settings.tsVersion) > 2015:\n wait(\"approving_bills.png\",FOREVER) \n while exists(\"approving_bills.png\"):\n logging.debug('--- msg exists')\n time.sleep(2)\n else:\n waitVanish(\"approving_statusbar.png\",FOREVER) \n time.sleep(1)\n\n # compare the report with baseline\n reports_Compare.Compare_OneReport(reportName)\n\n # close report entry / don't save\n logging.debug('-- close report window')\n click(\"report_generate_bills.png\")\n type(Key.F4,KeyModifier.CTRL)\n time.sleep(2)\n type(\"n\") \n time.sleep(1)\n\n#---------------------------------------------------#\ndef fPrint_Bills(pMonth):\n#---------------------------------------------------#\n\n myTools.sectionStartTimeStamp(\"bills\" + str(pMonth))\n logging.debug('Print_Bills: ' + str(pMonth))\n \n fSet_BillDate(pMonth)\n fPrint_BillRun(pMonth)\n myTools.sectionEndTimeStamp()\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
fahrenheit = float(input("Enter a fahrenheit degree: "))
celcius = ((fahrenheit - 32) * 5) / 9
print("From fahrenheit to celcius", celcius)
|
normal
|
{
"blob_id": "2f2030107f3a23c0d2f404a838eaccc8b35ac410",
"index": 1086,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint('From fahrenheit to celcius', celcius)\n",
"step-3": "fahrenheit = float(input('Enter a fahrenheit degree: '))\ncelcius = (fahrenheit - 32) * 5 / 9\nprint('From fahrenheit to celcius', celcius)\n",
"step-4": "fahrenheit = float(input(\"Enter a fahrenheit degree: \"))\ncelcius = ((fahrenheit - 32) * 5) / 9\nprint(\"From fahrenheit to celcius\", celcius)",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
from django.db.models import Count
from django.utils.text import slugify
from rest_framework.serializers import ModelSerializer, SerializerMethodField, Serializer
from rest_framework import serializers
from category.models import Category
from product.models import Product, GalleryProduct, Stone, Color, Size
from category.api.serializers import CategorySerializer
from extensions.calculations import calculating_gold_jewelry
from taggit_serializer.serializers import (
TagListSerializerField,
TaggitSerializer
)
def _create_custom_uuid():
max_id = 1
ex_last_product = Product.objects.last()
if ex_last_product:
max_id = ex_last_product.id
my_id = '{}{:07d}'.format('EUA', max_id if max_id is not None else 1)
return my_id
class ColorSerializer(ModelSerializer):
class Meta:
model = Color
fields = ['id', 'color']
class SizeSerializer(ModelSerializer):
class Meta:
model = Size
fields = ['id', 'size']
class StoneSerilizer(ModelSerializer):
class Meta:
model = Stone
fields = '__all__'
class ImageCreateProductSerializer(serializers.Serializer):
class Meta:
model = GalleryProduct
fields = ['image']
class ProductListSerializer(serializers.ModelSerializer):
gallery = serializers.SerializerMethodField()
category = serializers.SerializerMethodField()
price = serializers.SerializerMethodField()
class Meta:
model = Product
fields = [
'id',
'rating',
'title',
'slug',
'image',
'gallery',
'category',
'price'
]
def get_category(self, obj):
result = obj.category
return CategorySerializer(instance=result).data
def get_gallery(self, obj):
result = GalleryProduct.objects.filter(product_id=obj)
return ImageProductSerializer(instance=result, many=True).data
def get_price(self, obj):
return obj.price
class ProductsOrderCartSerializer(ModelSerializer):
class Meta:
model = Product
fields = ['id', 'title', 'slug', 'image']
class ProductDetailSerializer(TaggitSerializer, ModelSerializer):
tags = TagListSerializerField()
gallery = SerializerMethodField()
color = SerializerMethodField()
size = SerializerMethodField()
category = SerializerMethodField()
price = serializers.SerializerMethodField()
class Meta:
model = Product
exclude = [
'site_rate',
'is_rate_fixed',
'provider_gold_rate',
'provider_diamond_price',
]
def get_color(self, obj):
result = obj.color.all()
return ColorSerializer(instance=result, many=True).data
def get_size(self, obj):
result = obj.size.all()
return SizeSerializer(instance=result, many=True).data
def get_category(self, obj):
return CategorySerializer(instance=obj.category).data
def get_gallery(self, obj):
result = GalleryProduct.objects.filter(product_id=obj)
return ImageProductSerializer(instance=result, many=True).data
def get_price(self, obj):
return obj.price
class ImageProductSerializer(ModelSerializer):
class Meta:
model = GalleryProduct
fields = ['image', 'product']
|
normal
|
{
"blob_id": "8be6031caad26ec6b6b99b8d8b8f80d16ad243d4",
"index": 7706,
"step-1": "<mask token>\n\n\nclass ProductsOrderCartSerializer(ModelSerializer):\n\n\n class Meta:\n model = Product\n fields = ['id', 'title', 'slug', 'image']\n\n\nclass ProductDetailSerializer(TaggitSerializer, ModelSerializer):\n tags = TagListSerializerField()\n gallery = SerializerMethodField()\n color = SerializerMethodField()\n size = SerializerMethodField()\n category = SerializerMethodField()\n price = serializers.SerializerMethodField()\n\n\n class Meta:\n model = Product\n exclude = ['site_rate', 'is_rate_fixed', 'provider_gold_rate',\n 'provider_diamond_price']\n\n def get_color(self, obj):\n result = obj.color.all()\n return ColorSerializer(instance=result, many=True).data\n\n def get_size(self, obj):\n result = obj.size.all()\n return SizeSerializer(instance=result, many=True).data\n\n def get_category(self, obj):\n return CategorySerializer(instance=obj.category).data\n\n def get_gallery(self, obj):\n result = GalleryProduct.objects.filter(product_id=obj)\n return ImageProductSerializer(instance=result, many=True).data\n\n def get_price(self, obj):\n return obj.price\n\n\nclass ImageProductSerializer(ModelSerializer):\n\n\n class Meta:\n model = GalleryProduct\n fields = ['image', 'product']\n",
"step-2": "<mask token>\n\n\nclass ImageCreateProductSerializer(serializers.Serializer):\n\n\n class Meta:\n model = GalleryProduct\n fields = ['image']\n\n\nclass ProductListSerializer(serializers.ModelSerializer):\n gallery = serializers.SerializerMethodField()\n category = serializers.SerializerMethodField()\n price = serializers.SerializerMethodField()\n\n\n class Meta:\n model = Product\n fields = ['id', 'rating', 'title', 'slug', 'image', 'gallery',\n 'category', 'price']\n\n def get_category(self, obj):\n result = obj.category\n return CategorySerializer(instance=result).data\n\n def get_gallery(self, obj):\n result = GalleryProduct.objects.filter(product_id=obj)\n return ImageProductSerializer(instance=result, many=True).data\n\n def get_price(self, obj):\n return obj.price\n\n\nclass ProductsOrderCartSerializer(ModelSerializer):\n\n\n class Meta:\n model = Product\n fields = ['id', 'title', 'slug', 'image']\n\n\nclass ProductDetailSerializer(TaggitSerializer, ModelSerializer):\n tags = TagListSerializerField()\n gallery = SerializerMethodField()\n color = SerializerMethodField()\n size = SerializerMethodField()\n category = SerializerMethodField()\n price = serializers.SerializerMethodField()\n\n\n class Meta:\n model = Product\n exclude = ['site_rate', 'is_rate_fixed', 'provider_gold_rate',\n 'provider_diamond_price']\n\n def get_color(self, obj):\n result = obj.color.all()\n return ColorSerializer(instance=result, many=True).data\n\n def get_size(self, obj):\n result = obj.size.all()\n return SizeSerializer(instance=result, many=True).data\n\n def get_category(self, obj):\n return CategorySerializer(instance=obj.category).data\n\n def get_gallery(self, obj):\n result = GalleryProduct.objects.filter(product_id=obj)\n return ImageProductSerializer(instance=result, many=True).data\n\n def get_price(self, obj):\n return obj.price\n\n\nclass ImageProductSerializer(ModelSerializer):\n\n\n class Meta:\n model = GalleryProduct\n fields = ['image', 'product']\n",
"step-3": "<mask token>\n\n\nclass StoneSerilizer(ModelSerializer):\n\n\n class Meta:\n model = Stone\n fields = '__all__'\n\n\nclass ImageCreateProductSerializer(serializers.Serializer):\n\n\n class Meta:\n model = GalleryProduct\n fields = ['image']\n\n\nclass ProductListSerializer(serializers.ModelSerializer):\n gallery = serializers.SerializerMethodField()\n category = serializers.SerializerMethodField()\n price = serializers.SerializerMethodField()\n\n\n class Meta:\n model = Product\n fields = ['id', 'rating', 'title', 'slug', 'image', 'gallery',\n 'category', 'price']\n\n def get_category(self, obj):\n result = obj.category\n return CategorySerializer(instance=result).data\n\n def get_gallery(self, obj):\n result = GalleryProduct.objects.filter(product_id=obj)\n return ImageProductSerializer(instance=result, many=True).data\n\n def get_price(self, obj):\n return obj.price\n\n\nclass ProductsOrderCartSerializer(ModelSerializer):\n\n\n class Meta:\n model = Product\n fields = ['id', 'title', 'slug', 'image']\n\n\nclass ProductDetailSerializer(TaggitSerializer, ModelSerializer):\n tags = TagListSerializerField()\n gallery = SerializerMethodField()\n color = SerializerMethodField()\n size = SerializerMethodField()\n category = SerializerMethodField()\n price = serializers.SerializerMethodField()\n\n\n class Meta:\n model = Product\n exclude = ['site_rate', 'is_rate_fixed', 'provider_gold_rate',\n 'provider_diamond_price']\n\n def get_color(self, obj):\n result = obj.color.all()\n return ColorSerializer(instance=result, many=True).data\n\n def get_size(self, obj):\n result = obj.size.all()\n return SizeSerializer(instance=result, many=True).data\n\n def get_category(self, obj):\n return CategorySerializer(instance=obj.category).data\n\n def get_gallery(self, obj):\n result = GalleryProduct.objects.filter(product_id=obj)\n return ImageProductSerializer(instance=result, many=True).data\n\n def get_price(self, obj):\n return obj.price\n\n\nclass ImageProductSerializer(ModelSerializer):\n\n\n class Meta:\n model = GalleryProduct\n fields = ['image', 'product']\n",
"step-4": "<mask token>\n\n\nclass SizeSerializer(ModelSerializer):\n\n\n class Meta:\n model = Size\n fields = ['id', 'size']\n\n\nclass StoneSerilizer(ModelSerializer):\n\n\n class Meta:\n model = Stone\n fields = '__all__'\n\n\nclass ImageCreateProductSerializer(serializers.Serializer):\n\n\n class Meta:\n model = GalleryProduct\n fields = ['image']\n\n\nclass ProductListSerializer(serializers.ModelSerializer):\n gallery = serializers.SerializerMethodField()\n category = serializers.SerializerMethodField()\n price = serializers.SerializerMethodField()\n\n\n class Meta:\n model = Product\n fields = ['id', 'rating', 'title', 'slug', 'image', 'gallery',\n 'category', 'price']\n\n def get_category(self, obj):\n result = obj.category\n return CategorySerializer(instance=result).data\n\n def get_gallery(self, obj):\n result = GalleryProduct.objects.filter(product_id=obj)\n return ImageProductSerializer(instance=result, many=True).data\n\n def get_price(self, obj):\n return obj.price\n\n\nclass ProductsOrderCartSerializer(ModelSerializer):\n\n\n class Meta:\n model = Product\n fields = ['id', 'title', 'slug', 'image']\n\n\nclass ProductDetailSerializer(TaggitSerializer, ModelSerializer):\n tags = TagListSerializerField()\n gallery = SerializerMethodField()\n color = SerializerMethodField()\n size = SerializerMethodField()\n category = SerializerMethodField()\n price = serializers.SerializerMethodField()\n\n\n class Meta:\n model = Product\n exclude = ['site_rate', 'is_rate_fixed', 'provider_gold_rate',\n 'provider_diamond_price']\n\n def get_color(self, obj):\n result = obj.color.all()\n return ColorSerializer(instance=result, many=True).data\n\n def get_size(self, obj):\n result = obj.size.all()\n return SizeSerializer(instance=result, many=True).data\n\n def get_category(self, obj):\n return CategorySerializer(instance=obj.category).data\n\n def get_gallery(self, obj):\n result = GalleryProduct.objects.filter(product_id=obj)\n return ImageProductSerializer(instance=result, many=True).data\n\n def get_price(self, obj):\n return obj.price\n\n\nclass ImageProductSerializer(ModelSerializer):\n\n\n class Meta:\n model = GalleryProduct\n fields = ['image', 'product']\n",
"step-5": "from django.db.models import Count\r\nfrom django.utils.text import slugify\r\n\r\nfrom rest_framework.serializers import ModelSerializer, SerializerMethodField, Serializer\r\nfrom rest_framework import serializers\r\n\r\nfrom category.models import Category\r\nfrom product.models import Product, GalleryProduct, Stone, Color, Size\r\nfrom category.api.serializers import CategorySerializer\r\nfrom extensions.calculations import calculating_gold_jewelry\r\nfrom taggit_serializer.serializers import (\r\n\tTagListSerializerField,\r\n\tTaggitSerializer\r\n\t)\r\n\r\n\r\ndef _create_custom_uuid():\r\n\tmax_id = 1\r\n\tex_last_product = Product.objects.last()\r\n\tif ex_last_product:\r\n\t\tmax_id = ex_last_product.id\r\n\r\n\tmy_id = '{}{:07d}'.format('EUA', max_id if max_id is not None else 1)\r\n\treturn my_id\r\n\r\n\r\nclass ColorSerializer(ModelSerializer):\r\n\tclass Meta:\r\n\t\tmodel = Color\r\n\t\tfields = ['id', 'color']\r\n\r\n\r\nclass SizeSerializer(ModelSerializer):\r\n\tclass Meta:\r\n\t\tmodel = Size\r\n\t\tfields = ['id', 'size']\r\n\r\n\r\nclass StoneSerilizer(ModelSerializer):\r\n\tclass Meta:\r\n\t\tmodel = Stone\r\n\t\tfields = '__all__'\r\n\t\t\r\n\r\nclass ImageCreateProductSerializer(serializers.Serializer):\r\n\tclass Meta:\r\n\t\tmodel = GalleryProduct\r\n\t\tfields = ['image']\r\n\t\r\n\r\nclass ProductListSerializer(serializers.ModelSerializer):\r\n\tgallery = serializers.SerializerMethodField()\r\n\tcategory = serializers.SerializerMethodField()\r\n\tprice = serializers.SerializerMethodField()\r\n\r\n\tclass Meta:\r\n\t\tmodel = Product\r\n\t\tfields = [\r\n\t\t\t'id',\r\n\t\t\t'rating',\r\n\t\t\t'title',\r\n\t\t\t'slug',\r\n\t\t\t'image',\r\n\t\t\t'gallery',\r\n\t\t\t'category',\r\n\t\t\t'price'\r\n\t\t]\r\n\r\n\tdef get_category(self, obj):\r\n\t\tresult = obj.category\r\n\t\treturn CategorySerializer(instance=result).data\r\n\r\n\tdef get_gallery(self, obj):\r\n\t\tresult = GalleryProduct.objects.filter(product_id=obj)\r\n\t\treturn ImageProductSerializer(instance=result, many=True).data\r\n\r\n\tdef get_price(self, obj):\r\n\t\treturn obj.price\r\n\r\n\r\nclass ProductsOrderCartSerializer(ModelSerializer):\r\n\r\n\tclass Meta:\r\n\t\tmodel = Product\r\n\t\tfields = ['id', 'title', 'slug', 'image']\r\n\r\n\r\nclass ProductDetailSerializer(TaggitSerializer, ModelSerializer):\r\n\ttags = TagListSerializerField()\r\n\tgallery = SerializerMethodField()\r\n\tcolor = SerializerMethodField()\r\n\tsize = SerializerMethodField()\r\n\tcategory = SerializerMethodField()\r\n\tprice = serializers.SerializerMethodField()\r\n\r\n\tclass Meta:\r\n\t\tmodel = Product\r\n\t\texclude = [\r\n\t\t\t'site_rate',\r\n\t\t\t'is_rate_fixed',\r\n\t\t\t'provider_gold_rate',\r\n\t\t\t'provider_diamond_price',\r\n\t\t]\r\n\r\n\tdef get_color(self, obj):\r\n\t\tresult = obj.color.all()\r\n\t\treturn ColorSerializer(instance=result, many=True).data\r\n\r\n\tdef get_size(self, obj):\r\n\t\tresult = obj.size.all()\r\n\t\treturn SizeSerializer(instance=result, many=True).data\r\n\r\n\tdef get_category(self, obj):\r\n\t\treturn CategorySerializer(instance=obj.category).data\r\n\r\n\tdef get_gallery(self, obj):\r\n\t\tresult = GalleryProduct.objects.filter(product_id=obj)\r\n\t\treturn ImageProductSerializer(instance=result, many=True).data\r\n\r\n\tdef get_price(self, obj):\r\n\t\treturn obj.price\r\n\r\n\r\nclass ImageProductSerializer(ModelSerializer):\r\n\tclass Meta:\r\n\t\tmodel = GalleryProduct\r\n\t\tfields = ['image', 'product']\r\n",
"step-ids": [
9,
15,
16,
17,
21
]
}
|
[
9,
15,
16,
17,
21
] |
from django.shortcuts import render
from rest_framework import generics
from rest_framework import mixins
from django.contrib.auth.models import User
from rest_framework import permissions
from rest_framework.decorators import api_view
from rest_framework.response import Response
from rest_framework.request import Request
from rest_framework.reverse import reverse
from rest_framework import renderers
from rest_framework import viewsets
# Create your views here.
from rest_framework.decorators import action
from community.csrfsession import CsrfExemptSessionAuthentication
from .serializers import InstitutionSerializer, UserSerializer
from .models import Institution
from rest_framework.exceptions import PermissionDenied
from community.permissions import isInstitutionAdmin, getUserInstitution, belongsToInstitution, canUpdateProfile
from community.filters import applyUserFilters, applyInstitutionFilters
from community.mappings import generateKeys
from django.db.models import Q
class InstitutionViewSet(viewsets.ModelViewSet):
"""
This viewset automatically provides `list`, `create`, `retrieve`,
`update` and `destroy` actions.
Additionally we also provide an extra `highlight` action.
"""
queryset = Institution.objects.all()
serializer_class = InstitutionSerializer
permission_classes = (permissions.IsAuthenticatedOrReadOnly, )
authentication_classes = (CsrfExemptSessionAuthentication, )
def list(self, request, *args, **kwargs):
if request.user.is_superuser:
self.queryset = applyInstitutionFilters(request, Institution, *args, **kwargs)
response = super(InstitutionViewSet, self).list(request, *args, **kwargs)
response = generateKeys(response, self.serializer_class)
return response
def retrieve(self, request, *args, **kwargs):
if not belongsToInstitution(request, self.get_object()):
raise PermissionDenied(detail='User does not belong to the institution', code=None)
return super(InstitutionViewSet, self).retrieve(request, *args, **kwargs)
def update(self, request, *args, **kwargs):
if not isInstitutionAdmin(request, self.get_object()):
raise PermissionDenied(detail='User is not an admin_user', code=None)
return super(InstitutionViewSet, self).retrieve(request, *args, **kwargs)
def get_permissions(self):
"""
Instantiates and returns the list of permissions that this view requires.
"""
from rest_framework.permissions import IsAuthenticated, IsAdminUser
if self.action =='retrieve' or self.action == 'update':
permission_classes = [IsAuthenticated]
else:
permission_classes = [IsAdminUser]
return [permission() for permission in permission_classes]
class UserViewSet(viewsets.ReadOnlyModelViewSet):
"""
This viewset automatically provides `list` and `detail` actions.
"""
queryset = User.objects.all()
serializer_class = UserSerializer
@api_view(['GET'])
def api_root(request, format=None):
authentication_classes = []
return Response({
'users': reverse('user-list', request=request, format=format),
'institutions': reverse('institution-list', request=request, format=format)
})
|
normal
|
{
"blob_id": "4c43c181dbba1680e036750a2a2ea1185bbe91da",
"index": 3218,
"step-1": "<mask token>\n\n\nclass InstitutionViewSet(viewsets.ModelViewSet):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def get_permissions(self):\n \"\"\"\n Instantiates and returns the list of permissions that this view requires.\n \"\"\"\n from rest_framework.permissions import IsAuthenticated, IsAdminUser\n if self.action == 'retrieve' or self.action == 'update':\n permission_classes = [IsAuthenticated]\n else:\n permission_classes = [IsAdminUser]\n return [permission() for permission in permission_classes]\n\n\nclass UserViewSet(viewsets.ReadOnlyModelViewSet):\n \"\"\"\n This viewset automatically provides `list` and `detail` actions.\n \"\"\"\n queryset = User.objects.all()\n serializer_class = UserSerializer\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass InstitutionViewSet(viewsets.ModelViewSet):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def list(self, request, *args, **kwargs):\n if request.user.is_superuser:\n self.queryset = applyInstitutionFilters(request, Institution, *\n args, **kwargs)\n response = super(InstitutionViewSet, self).list(request, *args, **\n kwargs)\n response = generateKeys(response, self.serializer_class)\n return response\n\n def retrieve(self, request, *args, **kwargs):\n if not belongsToInstitution(request, self.get_object()):\n raise PermissionDenied(detail=\n 'User does not belong to the institution', code=None)\n return super(InstitutionViewSet, self).retrieve(request, *args, **\n kwargs)\n\n def update(self, request, *args, **kwargs):\n if not isInstitutionAdmin(request, self.get_object()):\n raise PermissionDenied(detail='User is not an admin_user', code\n =None)\n return super(InstitutionViewSet, self).retrieve(request, *args, **\n kwargs)\n\n def get_permissions(self):\n \"\"\"\n Instantiates and returns the list of permissions that this view requires.\n \"\"\"\n from rest_framework.permissions import IsAuthenticated, IsAdminUser\n if self.action == 'retrieve' or self.action == 'update':\n permission_classes = [IsAuthenticated]\n else:\n permission_classes = [IsAdminUser]\n return [permission() for permission in permission_classes]\n\n\nclass UserViewSet(viewsets.ReadOnlyModelViewSet):\n \"\"\"\n This viewset automatically provides `list` and `detail` actions.\n \"\"\"\n queryset = User.objects.all()\n serializer_class = UserSerializer\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass InstitutionViewSet(viewsets.ModelViewSet):\n \"\"\"\n This viewset automatically provides `list`, `create`, `retrieve`,\n `update` and `destroy` actions.\n\n Additionally we also provide an extra `highlight` action.\n \"\"\"\n queryset = Institution.objects.all()\n serializer_class = InstitutionSerializer\n permission_classes = permissions.IsAuthenticatedOrReadOnly,\n authentication_classes = CsrfExemptSessionAuthentication,\n\n def list(self, request, *args, **kwargs):\n if request.user.is_superuser:\n self.queryset = applyInstitutionFilters(request, Institution, *\n args, **kwargs)\n response = super(InstitutionViewSet, self).list(request, *args, **\n kwargs)\n response = generateKeys(response, self.serializer_class)\n return response\n\n def retrieve(self, request, *args, **kwargs):\n if not belongsToInstitution(request, self.get_object()):\n raise PermissionDenied(detail=\n 'User does not belong to the institution', code=None)\n return super(InstitutionViewSet, self).retrieve(request, *args, **\n kwargs)\n\n def update(self, request, *args, **kwargs):\n if not isInstitutionAdmin(request, self.get_object()):\n raise PermissionDenied(detail='User is not an admin_user', code\n =None)\n return super(InstitutionViewSet, self).retrieve(request, *args, **\n kwargs)\n\n def get_permissions(self):\n \"\"\"\n Instantiates and returns the list of permissions that this view requires.\n \"\"\"\n from rest_framework.permissions import IsAuthenticated, IsAdminUser\n if self.action == 'retrieve' or self.action == 'update':\n permission_classes = [IsAuthenticated]\n else:\n permission_classes = [IsAdminUser]\n return [permission() for permission in permission_classes]\n\n\nclass UserViewSet(viewsets.ReadOnlyModelViewSet):\n \"\"\"\n This viewset automatically provides `list` and `detail` actions.\n \"\"\"\n queryset = User.objects.all()\n serializer_class = UserSerializer\n\n\n<mask token>\n",
"step-4": "<mask token>\n\n\nclass InstitutionViewSet(viewsets.ModelViewSet):\n \"\"\"\n This viewset automatically provides `list`, `create`, `retrieve`,\n `update` and `destroy` actions.\n\n Additionally we also provide an extra `highlight` action.\n \"\"\"\n queryset = Institution.objects.all()\n serializer_class = InstitutionSerializer\n permission_classes = permissions.IsAuthenticatedOrReadOnly,\n authentication_classes = CsrfExemptSessionAuthentication,\n\n def list(self, request, *args, **kwargs):\n if request.user.is_superuser:\n self.queryset = applyInstitutionFilters(request, Institution, *\n args, **kwargs)\n response = super(InstitutionViewSet, self).list(request, *args, **\n kwargs)\n response = generateKeys(response, self.serializer_class)\n return response\n\n def retrieve(self, request, *args, **kwargs):\n if not belongsToInstitution(request, self.get_object()):\n raise PermissionDenied(detail=\n 'User does not belong to the institution', code=None)\n return super(InstitutionViewSet, self).retrieve(request, *args, **\n kwargs)\n\n def update(self, request, *args, **kwargs):\n if not isInstitutionAdmin(request, self.get_object()):\n raise PermissionDenied(detail='User is not an admin_user', code\n =None)\n return super(InstitutionViewSet, self).retrieve(request, *args, **\n kwargs)\n\n def get_permissions(self):\n \"\"\"\n Instantiates and returns the list of permissions that this view requires.\n \"\"\"\n from rest_framework.permissions import IsAuthenticated, IsAdminUser\n if self.action == 'retrieve' or self.action == 'update':\n permission_classes = [IsAuthenticated]\n else:\n permission_classes = [IsAdminUser]\n return [permission() for permission in permission_classes]\n\n\nclass UserViewSet(viewsets.ReadOnlyModelViewSet):\n \"\"\"\n This viewset automatically provides `list` and `detail` actions.\n \"\"\"\n queryset = User.objects.all()\n serializer_class = UserSerializer\n\n\n@api_view(['GET'])\ndef api_root(request, format=None):\n authentication_classes = []\n return Response({'users': reverse('user-list', request=request, format=\n format), 'institutions': reverse('institution-list', request=\n request, format=format)})\n",
"step-5": "from django.shortcuts import render\nfrom rest_framework import generics\nfrom rest_framework import mixins\nfrom django.contrib.auth.models import User\nfrom rest_framework import permissions\n\nfrom rest_framework.decorators import api_view\nfrom rest_framework.response import Response\nfrom rest_framework.request import Request\nfrom rest_framework.reverse import reverse\nfrom rest_framework import renderers\nfrom rest_framework import viewsets\n# Create your views here.\n\nfrom rest_framework.decorators import action\nfrom community.csrfsession import CsrfExemptSessionAuthentication\nfrom .serializers import InstitutionSerializer, UserSerializer\nfrom .models import Institution\nfrom rest_framework.exceptions import PermissionDenied\nfrom community.permissions import isInstitutionAdmin, getUserInstitution, belongsToInstitution, canUpdateProfile\nfrom community.filters import applyUserFilters, applyInstitutionFilters\nfrom community.mappings import generateKeys\nfrom django.db.models import Q\n\nclass InstitutionViewSet(viewsets.ModelViewSet):\n \"\"\"\n This viewset automatically provides `list`, `create`, `retrieve`,\n `update` and `destroy` actions.\n\n Additionally we also provide an extra `highlight` action.\n \"\"\"\n queryset = Institution.objects.all()\n serializer_class = InstitutionSerializer\n permission_classes = (permissions.IsAuthenticatedOrReadOnly, )\n authentication_classes = (CsrfExemptSessionAuthentication, )\n\n def list(self, request, *args, **kwargs):\n if request.user.is_superuser:\n self.queryset = applyInstitutionFilters(request, Institution, *args, **kwargs)\n response = super(InstitutionViewSet, self).list(request, *args, **kwargs)\n response = generateKeys(response, self.serializer_class)\n return response\n\n def retrieve(self, request, *args, **kwargs):\n if not belongsToInstitution(request, self.get_object()):\n raise PermissionDenied(detail='User does not belong to the institution', code=None)\n return super(InstitutionViewSet, self).retrieve(request, *args, **kwargs)\n\n def update(self, request, *args, **kwargs):\n if not isInstitutionAdmin(request, self.get_object()):\n raise PermissionDenied(detail='User is not an admin_user', code=None)\n return super(InstitutionViewSet, self).retrieve(request, *args, **kwargs)\n\n\n def get_permissions(self):\n \"\"\"\n Instantiates and returns the list of permissions that this view requires.\n \"\"\"\n from rest_framework.permissions import IsAuthenticated, IsAdminUser\n if self.action =='retrieve' or self.action == 'update':\n permission_classes = [IsAuthenticated]\n else:\n permission_classes = [IsAdminUser]\n return [permission() for permission in permission_classes]\n\nclass UserViewSet(viewsets.ReadOnlyModelViewSet):\n \"\"\"\n This viewset automatically provides `list` and `detail` actions.\n \"\"\"\n queryset = User.objects.all()\n serializer_class = UserSerializer\n\n@api_view(['GET'])\ndef api_root(request, format=None):\n authentication_classes = []\n return Response({\n 'users': reverse('user-list', request=request, format=format),\n 'institutions': reverse('institution-list', request=request, format=format)\n })",
"step-ids": [
5,
8,
10,
11,
13
]
}
|
[
5,
8,
10,
11,
13
] |
import tensorflow as tf
from sklearn.cluster import KMeans
import tensorflow.keras as keras
from copy import deepcopy
import numpy as np
import h5py
from collections import defaultdict, namedtuple
from heapq import heappush, heappop, heapify
import struct
tf.enable_eager_execution()
mnist = tf.keras.datasets.mnist
(x_train, y_train),(x_test, y_test) = mnist.load_data()
x_train = x_train.reshape(-1, 28, 28, 1).astype(np.float32)
x_test = x_test.reshape(-1, 28, 28, 1).astype(np.float32)
x_train, x_test = x_train / 255.0, x_test / 255.0
y_train = keras.utils.to_categorical(y_train, 10)
y_test = keras.utils.to_categorical(y_test, 10)
print(x_test.shape)
COMPRESSION_RATE = 0.9
BATCH_SIZE = 50
NUM_BATCHES = 1000
NUM_EPOCH = 1
BITS = 5
MAX_SPAN = 2 ** BITS
LEARNING_RATE = 0.001
model = tf.keras.models.Sequential([
tf.keras.layers.Conv2D(32, (5, 5), activation='relu', input_shape=[28, 28, 1]),
tf.keras.layers.Conv2D(64, (5, 5), activation='relu'),
tf.keras.layers.MaxPool2D(pool_size=(2, 2)),
tf.keras.layers.Flatten(),
tf.keras.layers.Dropout(0.5),
tf.keras.layers.Dense(128, activation=tf.nn.relu),
tf.keras.layers.Dropout(0.5),
tf.keras.layers.Dense(10, activation=tf.nn.softmax)
])
model.compile(optimizer=tf.train.AdamOptimizer(learning_rate=LEARNING_RATE),
loss='categorical_crossentropy',
metrics=['accuracy'])
# history = model.fit(x_train, y_train, validation_split=0.2, epochs=5, batch_size=50)
# score = model.evaluate(x_test, y_test)
# print(score[1])
# model.save_weights('./result/my_model.h5', save_format='h5')
model.load_weights('./result/my_model.h5')
score = model.evaluate(x_test, y_test)
print(score[1])
def get_batch(batch_size):
index = np.random.randint(0, np.shape(x_train)[0], batch_size)
return x_train[index, :], y_train[index]
def prune_weights(weight):
for i in range(weight.shape[-1]):
tmp = deepcopy(weight[..., i])
tmp = np.abs(tmp)
tmp = np.sort(np.array(tmp))
# compute threshold
threshold = tmp[int(tmp.shape[0] * COMPRESSION_RATE)]
weight[..., i][np.abs(weight[..., i]) < threshold] = 0
sparse_matrix = deepcopy(weight)
sparse_matrix[sparse_matrix != 0] = 1
return weight, sparse_matrix
Sparse_layer = {}
# Pruning
for layer_id in range(len(model.layers)):
layer = model.layers[layer_id]
weight = layer.get_weights()
# weight:weight[0]
# bias:weight[1]
if len(weight) > 0:
if layer_id != 0:
w = deepcopy(weight)
new_weight, sparse_matrix = prune_weights(w[0])
Sparse_layer[layer_id] = sparse_matrix
w[0] = new_weight
layer.set_weights(w)
score = model.evaluate(x_test, y_test, verbose=0)
print(score[1])
# Retrain
for epoch in range(NUM_EPOCH):
for j in range(x_train.shape[0] // BATCH_SIZE):
begin = j*BATCH_SIZE
if j*BATCH_SIZE + BATCH_SIZE > x_train.shape[0]:
end = x_train.shape[0]
else:
end = j*BATCH_SIZE + BATCH_SIZE
X, Y = x_train[begin:end], y_train[begin:end]
# train on each batch
model.train_on_batch(X, Y)
# apply Sparse connection
for layer_id in Sparse_layer:
w = model.layers[layer_id].get_weights()
w[0] = w[0] * Sparse_layer[layer_id]
model.layers[layer_id].set_weights(w)
score = model.evaluate(x_test, y_test, verbose=0)
print('val loss: {}'.format(score[0]))
print('val acc: {}'.format(score[1]))
score = model.evaluate(x_test, y_test, verbose=0)
print(score[1])
cluster_index = dict()
cluster_centroids = dict()
# Weight Share and Quantization
for layer_id in Sparse_layer:
layer = model.layers[layer_id]
weight = layer.get_weights()
w = deepcopy(weight)
shape = w[0].shape
weight_array = w[0].flatten()
nonzero_weight = w[0][Sparse_layer[layer_id] != 0].flatten()
nonzero_index = np.where(Sparse_layer[layer_id].flatten() != 0)[0]
max_weight = max(nonzero_weight)
min_weight = min(nonzero_weight)
space = np.linspace(min_weight, max_weight, num=2 ** BITS)
kmeans = KMeans(n_clusters=len(space), init=space.reshape(-1, 1), n_init=1, precompute_distances=True,
algorithm="full")
kmeans.fit(nonzero_weight.reshape(-1, 1))
# cluster index of each weight
layer_cluster_index = kmeans.labels_
# value of the centroids
layer_centroids = kmeans.cluster_centers_.flatten()
# Add to dict
cluster_index[layer_id] = layer_cluster_index
cluster_centroids[layer_id] = layer_centroids
# set new weight
new_weight = kmeans.cluster_centers_[kmeans.labels_].flatten()
for idx in range(len(nonzero_index)):
index = nonzero_index[idx]
weight_array[index] = new_weight[idx]
# new_weight = kmeans.cluster_centers_[kmeans.labels_].reshape(shape)
# w[0] = new_weight
w[0] = weight_array.reshape(shape)
layer.set_weights(w)
score = model.evaluate(x_test, y_test, verbose=0)
print(score[1])
# calculate gradient and get the fine-tuned centroids
# for epoch in range(NUM_EPOCH):
# for j in range(x_train.shape[0] // BATCH_SIZE):
# begin = j * BATCH_SIZE
# if j * BATCH_SIZE + BATCH_SIZE > x_train.shape[0]:
# end = x_train.shape[0]
# else:
# end = j * BATCH_SIZE + BATCH_SIZE
# X, Y = x_train[begin:end], y_train[begin:end]
# with tf.GradientTape() as tape:
# y_predict = model(X)
# loss = tf.losses.softmax_cross_entropy(onehot_labels=Y, logits=y_predict)
# grads = tape.gradient(loss, model.variables)
# gradient_num = 0
# for layer_id in Sparse_layer:
# gradient_num += 2
# gradient = grads[gradient_num].numpy().flatten()
#
# # Get the gradient of the nonzero position
# nonzero_gradient = gradient[Sparse_layer[layer_id].flatten() != 0].flatten()
# nonzero_index = np.where(Sparse_layer[layer_id].flatten() != 0)[0]
# # print(len(nonzero_gradient))
#
# gradient_index = np.zeros(2 ** BITS)
# # Calculate the sum of gradient of the same cluster
# for i in range(len(nonzero_gradient)):
# gradient_index[cluster_index[layer_id][i]] += gradient[i]
# # Update centroid
# fine_tuned_centroids = cluster_centroids[layer_id]-LEARNING_RATE*gradient_index
# cluster_centroids[layer_id] = fine_tuned_centroids
#
# w = model.layers[layer_id].get_weights()
# shape = w[0].shape
# weight_array = w[0].flatten()
# new_weight = fine_tuned_centroids[cluster_index[layer_id]]
# for idx in range(len(nonzero_index)):
# index = nonzero_index[idx]
# weight_array[index] = new_weight[idx]
#
# w[0] = weight_array.reshape(shape)
# model.layers[layer_id].set_weights(w)
# score = model.evaluate(x_test, y_test, verbose=0)
# print('val loss: {}'.format(score[0]))
# print('val acc: {}'.format(score[1]))
print('-------------------')
score = model.evaluate(x_test, y_test, verbose=0)
print(score[1])
layer_relative_index = dict()
layer_weight_cluster_index = dict()
Node = namedtuple('Node', ['frequency', 'value', 'left', 'right'])
Node.__lt__ = lambda x, y: x.frequency < y.frequency
def encode_huffman_tree(root):
"""
Encodes a huffman tree to string of '0's and '1's
"""
# converter = {'float32':float2bitstr, 'int32':int2bitstr}
code_list = []
def encode_node(node):
if node.value is not None: # node is leaf node
code_list.append('1')
lst = list(int2bitstr(node.value))
code_list.extend(lst)
else:
code_list.append('0')
encode_node(node.left)
encode_node(node.right)
encode_node(root)
return ''.join(code_list)
def int2bitstr(integer):
four_bytes = struct.pack('>I', integer) # bytes
return ''.join(f'{byte:08b}' for byte in four_bytes) # string of '0's and '1's
def bitstr2int(bitstr):
byte_arr = bytearray(int(bitstr[i:i + 8], 2) for i in range(0, len(bitstr), 8))
return struct.unpack('>I', byte_arr)[0]
def huffman_encode(arr):
# count the frequency of each number in array
frequency_map = defaultdict(int)
for value in np.nditer(arr):
value = int(value)
frequency_map[value] += 1
heap = [Node(frequency, value, None, None) for value, frequency in frequency_map.items()]
heapify(heap)
# Merge nodes
while len(heap) > 1:
node1 = heappop(heap)
node2 = heappop(heap)
merged = Node(node1.frequency + node2.frequency, None, node1, node2)
heappush(heap, merged)
# Generate code value mapping
value2code = dict()
def generate_code(node, code):
if node is None:
return
if node.value is not None:
value2code[node.value] = code
return
generate_code(node.left, code + '0')
generate_code(node.right, code + '1')
root = heappop(heap)
generate_code(root, '')
data_encoding = ''.join(value2code[int(value)] for value in np.nditer(arr))
codebook_encoding = encode_huffman_tree(root)
return data_encoding, codebook_encoding
# Matrix sparsity with relative index
for layer_id in Sparse_layer:
layer = model.layers[layer_id]
weight = layer.get_weights()
w = deepcopy(weight)
shape = w[0].shape
weight_array = w[0].flatten()
# nonzero_weight = w[0][Sparse_layer[layer_id] != 0].flatten()
# print(len(nonzero_weight))
nonzero_weight_cluster_index = cluster_index[layer_id]
print(len(nonzero_weight_cluster_index))
nonzero_index = np.where(Sparse_layer[layer_id].flatten() != 0)[0]
first = nonzero_index[0]
relative = np.insert(np.diff(nonzero_index), 0, first)
relative_diff_index = relative.tolist()
weight_cluster_index = nonzero_weight_cluster_index.tolist()
shift = 0
for i in np.where(relative > MAX_SPAN)[0].tolist():
while relative_diff_index[i + shift] > MAX_SPAN:
relative_diff_index.insert(i + shift, MAX_SPAN)
weight_cluster_index.insert(i + shift, 0)
shift += 1
relative_diff_index[i + shift] -= MAX_SPAN
layer_relative_index[layer_id] = np.array(relative_diff_index)
data_encoding, codebook_encoding = huffman_encode(np.array(weight_cluster_index))
# layer_weight_cluster_index[layer_id] = np.array(weight_cluster_index)
layer_weight_cluster_index[layer_id] = np.array([data_encoding, codebook_encoding])
print('----------------')
# print(layer_weight_value[5])
# encode
file_name = './result/compressed_model2'
file = h5py.File('{}.h5'.format(file_name), mode='w')
for layer_id in range(len(model.layers)):
layer = model.layers[layer_id]
weight = layer.get_weights()
if len(weight) > 0:
file_layer = file.create_group(layer.name)
shape = weight[0].shape
if layer_id != 0:
print(len(weight[0].shape))
pshape = file_layer.create_dataset('shape', np.array(shape).shape, dtype='int32')
pindex = file_layer.create_dataset('index', layer_relative_index[layer_id].shape, dtype='int32')
# pcluster_index = file_layer.create_dataset('cluster_index', layer_weight_cluster_index[layer_id].shape,
# dtype='int32')
pcluster_index = file_layer.create_dataset('cluster_index', layer_weight_cluster_index[layer_id].shape,
dtype=h5py.special_dtype(vlen=str))
pcentroid = file_layer.create_dataset('centroid', cluster_centroids[layer_id].shape, dtype='float32')
pshape[:] = np.array(shape)
pindex[:] = layer_relative_index[layer_id]
pcluster_index[:] = layer_weight_cluster_index[layer_id]
pcentroid[:] = cluster_centroids[layer_id]
else:
pweight = file_layer.create_dataset('weight', weight[0].shape, dtype='float32')
pweight[:] = weight[0]
pbias = file_layer.create_dataset('bias', weight[1].shape, dtype='float32')
pbias[:] = weight[1]
file.flush()
file.close()
|
normal
|
{
"blob_id": "086aefaad7a4b743e5a05b3a44db971dbdbf16b6",
"index": 8299,
"step-1": "<mask token>\n\n\ndef prune_weights(weight):\n for i in range(weight.shape[-1]):\n tmp = deepcopy(weight[..., i])\n tmp = np.abs(tmp)\n tmp = np.sort(np.array(tmp))\n threshold = tmp[int(tmp.shape[0] * COMPRESSION_RATE)]\n weight[..., i][np.abs(weight[..., i]) < threshold] = 0\n sparse_matrix = deepcopy(weight)\n sparse_matrix[sparse_matrix != 0] = 1\n return weight, sparse_matrix\n\n\n<mask token>\n\n\ndef huffman_encode(arr):\n frequency_map = defaultdict(int)\n for value in np.nditer(arr):\n value = int(value)\n frequency_map[value] += 1\n heap = [Node(frequency, value, None, None) for value, frequency in\n frequency_map.items()]\n heapify(heap)\n while len(heap) > 1:\n node1 = heappop(heap)\n node2 = heappop(heap)\n merged = Node(node1.frequency + node2.frequency, None, node1, node2)\n heappush(heap, merged)\n value2code = dict()\n\n def generate_code(node, code):\n if node is None:\n return\n if node.value is not None:\n value2code[node.value] = code\n return\n generate_code(node.left, code + '0')\n generate_code(node.right, code + '1')\n root = heappop(heap)\n generate_code(root, '')\n data_encoding = ''.join(value2code[int(value)] for value in np.nditer(arr))\n codebook_encoding = encode_huffman_tree(root)\n return data_encoding, codebook_encoding\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef prune_weights(weight):\n for i in range(weight.shape[-1]):\n tmp = deepcopy(weight[..., i])\n tmp = np.abs(tmp)\n tmp = np.sort(np.array(tmp))\n threshold = tmp[int(tmp.shape[0] * COMPRESSION_RATE)]\n weight[..., i][np.abs(weight[..., i]) < threshold] = 0\n sparse_matrix = deepcopy(weight)\n sparse_matrix[sparse_matrix != 0] = 1\n return weight, sparse_matrix\n\n\n<mask token>\n\n\ndef int2bitstr(integer):\n four_bytes = struct.pack('>I', integer)\n return ''.join(f'{byte:08b}' for byte in four_bytes)\n\n\ndef bitstr2int(bitstr):\n byte_arr = bytearray(int(bitstr[i:i + 8], 2) for i in range(0, len(\n bitstr), 8))\n return struct.unpack('>I', byte_arr)[0]\n\n\ndef huffman_encode(arr):\n frequency_map = defaultdict(int)\n for value in np.nditer(arr):\n value = int(value)\n frequency_map[value] += 1\n heap = [Node(frequency, value, None, None) for value, frequency in\n frequency_map.items()]\n heapify(heap)\n while len(heap) > 1:\n node1 = heappop(heap)\n node2 = heappop(heap)\n merged = Node(node1.frequency + node2.frequency, None, node1, node2)\n heappush(heap, merged)\n value2code = dict()\n\n def generate_code(node, code):\n if node is None:\n return\n if node.value is not None:\n value2code[node.value] = code\n return\n generate_code(node.left, code + '0')\n generate_code(node.right, code + '1')\n root = heappop(heap)\n generate_code(root, '')\n data_encoding = ''.join(value2code[int(value)] for value in np.nditer(arr))\n codebook_encoding = encode_huffman_tree(root)\n return data_encoding, codebook_encoding\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef get_batch(batch_size):\n index = np.random.randint(0, np.shape(x_train)[0], batch_size)\n return x_train[index, :], y_train[index]\n\n\ndef prune_weights(weight):\n for i in range(weight.shape[-1]):\n tmp = deepcopy(weight[..., i])\n tmp = np.abs(tmp)\n tmp = np.sort(np.array(tmp))\n threshold = tmp[int(tmp.shape[0] * COMPRESSION_RATE)]\n weight[..., i][np.abs(weight[..., i]) < threshold] = 0\n sparse_matrix = deepcopy(weight)\n sparse_matrix[sparse_matrix != 0] = 1\n return weight, sparse_matrix\n\n\n<mask token>\n\n\ndef int2bitstr(integer):\n four_bytes = struct.pack('>I', integer)\n return ''.join(f'{byte:08b}' for byte in four_bytes)\n\n\ndef bitstr2int(bitstr):\n byte_arr = bytearray(int(bitstr[i:i + 8], 2) for i in range(0, len(\n bitstr), 8))\n return struct.unpack('>I', byte_arr)[0]\n\n\ndef huffman_encode(arr):\n frequency_map = defaultdict(int)\n for value in np.nditer(arr):\n value = int(value)\n frequency_map[value] += 1\n heap = [Node(frequency, value, None, None) for value, frequency in\n frequency_map.items()]\n heapify(heap)\n while len(heap) > 1:\n node1 = heappop(heap)\n node2 = heappop(heap)\n merged = Node(node1.frequency + node2.frequency, None, node1, node2)\n heappush(heap, merged)\n value2code = dict()\n\n def generate_code(node, code):\n if node is None:\n return\n if node.value is not None:\n value2code[node.value] = code\n return\n generate_code(node.left, code + '0')\n generate_code(node.right, code + '1')\n root = heappop(heap)\n generate_code(root, '')\n data_encoding = ''.join(value2code[int(value)] for value in np.nditer(arr))\n codebook_encoding = encode_huffman_tree(root)\n return data_encoding, codebook_encoding\n\n\n<mask token>\n",
"step-4": "<mask token>\n\n\ndef get_batch(batch_size):\n index = np.random.randint(0, np.shape(x_train)[0], batch_size)\n return x_train[index, :], y_train[index]\n\n\ndef prune_weights(weight):\n for i in range(weight.shape[-1]):\n tmp = deepcopy(weight[..., i])\n tmp = np.abs(tmp)\n tmp = np.sort(np.array(tmp))\n threshold = tmp[int(tmp.shape[0] * COMPRESSION_RATE)]\n weight[..., i][np.abs(weight[..., i]) < threshold] = 0\n sparse_matrix = deepcopy(weight)\n sparse_matrix[sparse_matrix != 0] = 1\n return weight, sparse_matrix\n\n\n<mask token>\n\n\ndef encode_huffman_tree(root):\n \"\"\"\n Encodes a huffman tree to string of '0's and '1's\n \"\"\"\n code_list = []\n\n def encode_node(node):\n if node.value is not None:\n code_list.append('1')\n lst = list(int2bitstr(node.value))\n code_list.extend(lst)\n else:\n code_list.append('0')\n encode_node(node.left)\n encode_node(node.right)\n encode_node(root)\n return ''.join(code_list)\n\n\ndef int2bitstr(integer):\n four_bytes = struct.pack('>I', integer)\n return ''.join(f'{byte:08b}' for byte in four_bytes)\n\n\ndef bitstr2int(bitstr):\n byte_arr = bytearray(int(bitstr[i:i + 8], 2) for i in range(0, len(\n bitstr), 8))\n return struct.unpack('>I', byte_arr)[0]\n\n\ndef huffman_encode(arr):\n frequency_map = defaultdict(int)\n for value in np.nditer(arr):\n value = int(value)\n frequency_map[value] += 1\n heap = [Node(frequency, value, None, None) for value, frequency in\n frequency_map.items()]\n heapify(heap)\n while len(heap) > 1:\n node1 = heappop(heap)\n node2 = heappop(heap)\n merged = Node(node1.frequency + node2.frequency, None, node1, node2)\n heappush(heap, merged)\n value2code = dict()\n\n def generate_code(node, code):\n if node is None:\n return\n if node.value is not None:\n value2code[node.value] = code\n return\n generate_code(node.left, code + '0')\n generate_code(node.right, code + '1')\n root = heappop(heap)\n generate_code(root, '')\n data_encoding = ''.join(value2code[int(value)] for value in np.nditer(arr))\n codebook_encoding = encode_huffman_tree(root)\n return data_encoding, codebook_encoding\n\n\n<mask token>\n",
"step-5": "import tensorflow as tf\nfrom sklearn.cluster import KMeans\nimport tensorflow.keras as keras\nfrom copy import deepcopy\nimport numpy as np\nimport h5py\nfrom collections import defaultdict, namedtuple\nfrom heapq import heappush, heappop, heapify\nimport struct\ntf.enable_eager_execution()\n\nmnist = tf.keras.datasets.mnist\n\n(x_train, y_train),(x_test, y_test) = mnist.load_data()\nx_train = x_train.reshape(-1, 28, 28, 1).astype(np.float32)\nx_test = x_test.reshape(-1, 28, 28, 1).astype(np.float32)\nx_train, x_test = x_train / 255.0, x_test / 255.0\ny_train = keras.utils.to_categorical(y_train, 10)\ny_test = keras.utils.to_categorical(y_test, 10)\nprint(x_test.shape)\n\nCOMPRESSION_RATE = 0.9\nBATCH_SIZE = 50\nNUM_BATCHES = 1000\nNUM_EPOCH = 1\nBITS = 5\nMAX_SPAN = 2 ** BITS\nLEARNING_RATE = 0.001\n\n\nmodel = tf.keras.models.Sequential([\n tf.keras.layers.Conv2D(32, (5, 5), activation='relu', input_shape=[28, 28, 1]),\n tf.keras.layers.Conv2D(64, (5, 5), activation='relu'),\n tf.keras.layers.MaxPool2D(pool_size=(2, 2)),\n tf.keras.layers.Flatten(),\n tf.keras.layers.Dropout(0.5),\n tf.keras.layers.Dense(128, activation=tf.nn.relu),\n tf.keras.layers.Dropout(0.5),\n tf.keras.layers.Dense(10, activation=tf.nn.softmax)\n])\n\nmodel.compile(optimizer=tf.train.AdamOptimizer(learning_rate=LEARNING_RATE),\n loss='categorical_crossentropy',\n metrics=['accuracy'])\n\n\n# history = model.fit(x_train, y_train, validation_split=0.2, epochs=5, batch_size=50)\n# score = model.evaluate(x_test, y_test)\n# print(score[1])\n\n# model.save_weights('./result/my_model.h5', save_format='h5')\n\nmodel.load_weights('./result/my_model.h5')\nscore = model.evaluate(x_test, y_test)\nprint(score[1])\n\n\ndef get_batch(batch_size):\n index = np.random.randint(0, np.shape(x_train)[0], batch_size)\n return x_train[index, :], y_train[index]\n\n\ndef prune_weights(weight):\n for i in range(weight.shape[-1]):\n tmp = deepcopy(weight[..., i])\n tmp = np.abs(tmp)\n tmp = np.sort(np.array(tmp))\n # compute threshold\n threshold = tmp[int(tmp.shape[0] * COMPRESSION_RATE)]\n weight[..., i][np.abs(weight[..., i]) < threshold] = 0\n sparse_matrix = deepcopy(weight)\n sparse_matrix[sparse_matrix != 0] = 1\n return weight, sparse_matrix\n\n\nSparse_layer = {}\n\n# Pruning\nfor layer_id in range(len(model.layers)):\n layer = model.layers[layer_id]\n weight = layer.get_weights()\n # weight:weight[0]\n # bias:weight[1]\n if len(weight) > 0:\n if layer_id != 0:\n w = deepcopy(weight)\n new_weight, sparse_matrix = prune_weights(w[0])\n Sparse_layer[layer_id] = sparse_matrix\n w[0] = new_weight\n layer.set_weights(w)\n\n\nscore = model.evaluate(x_test, y_test, verbose=0)\nprint(score[1])\n\n# Retrain\nfor epoch in range(NUM_EPOCH):\n for j in range(x_train.shape[0] // BATCH_SIZE):\n begin = j*BATCH_SIZE\n if j*BATCH_SIZE + BATCH_SIZE > x_train.shape[0]:\n end = x_train.shape[0]\n else:\n end = j*BATCH_SIZE + BATCH_SIZE\n X, Y = x_train[begin:end], y_train[begin:end]\n # train on each batch\n model.train_on_batch(X, Y)\n # apply Sparse connection\n for layer_id in Sparse_layer:\n w = model.layers[layer_id].get_weights()\n w[0] = w[0] * Sparse_layer[layer_id]\n model.layers[layer_id].set_weights(w)\n score = model.evaluate(x_test, y_test, verbose=0)\n print('val loss: {}'.format(score[0]))\n print('val acc: {}'.format(score[1]))\n\n\nscore = model.evaluate(x_test, y_test, verbose=0)\nprint(score[1])\n\ncluster_index = dict()\ncluster_centroids = dict()\n\n\n# Weight Share and Quantization\nfor layer_id in Sparse_layer:\n layer = model.layers[layer_id]\n weight = layer.get_weights()\n w = deepcopy(weight)\n shape = w[0].shape\n\n weight_array = w[0].flatten()\n nonzero_weight = w[0][Sparse_layer[layer_id] != 0].flatten()\n nonzero_index = np.where(Sparse_layer[layer_id].flatten() != 0)[0]\n\n max_weight = max(nonzero_weight)\n min_weight = min(nonzero_weight)\n space = np.linspace(min_weight, max_weight, num=2 ** BITS)\n kmeans = KMeans(n_clusters=len(space), init=space.reshape(-1, 1), n_init=1, precompute_distances=True,\n algorithm=\"full\")\n kmeans.fit(nonzero_weight.reshape(-1, 1))\n # cluster index of each weight\n layer_cluster_index = kmeans.labels_\n # value of the centroids\n layer_centroids = kmeans.cluster_centers_.flatten()\n # Add to dict\n cluster_index[layer_id] = layer_cluster_index\n cluster_centroids[layer_id] = layer_centroids\n\n # set new weight\n new_weight = kmeans.cluster_centers_[kmeans.labels_].flatten()\n for idx in range(len(nonzero_index)):\n index = nonzero_index[idx]\n weight_array[index] = new_weight[idx]\n # new_weight = kmeans.cluster_centers_[kmeans.labels_].reshape(shape)\n # w[0] = new_weight\n w[0] = weight_array.reshape(shape)\n layer.set_weights(w)\n\nscore = model.evaluate(x_test, y_test, verbose=0)\nprint(score[1])\n\n\n# calculate gradient and get the fine-tuned centroids\n# for epoch in range(NUM_EPOCH):\n# for j in range(x_train.shape[0] // BATCH_SIZE):\n# begin = j * BATCH_SIZE\n# if j * BATCH_SIZE + BATCH_SIZE > x_train.shape[0]:\n# end = x_train.shape[0]\n# else:\n# end = j * BATCH_SIZE + BATCH_SIZE\n# X, Y = x_train[begin:end], y_train[begin:end]\n# with tf.GradientTape() as tape:\n# y_predict = model(X)\n# loss = tf.losses.softmax_cross_entropy(onehot_labels=Y, logits=y_predict)\n# grads = tape.gradient(loss, model.variables)\n# gradient_num = 0\n# for layer_id in Sparse_layer:\n# gradient_num += 2\n# gradient = grads[gradient_num].numpy().flatten()\n#\n# # Get the gradient of the nonzero position\n# nonzero_gradient = gradient[Sparse_layer[layer_id].flatten() != 0].flatten()\n# nonzero_index = np.where(Sparse_layer[layer_id].flatten() != 0)[0]\n# # print(len(nonzero_gradient))\n#\n# gradient_index = np.zeros(2 ** BITS)\n# # Calculate the sum of gradient of the same cluster\n# for i in range(len(nonzero_gradient)):\n# gradient_index[cluster_index[layer_id][i]] += gradient[i]\n# # Update centroid\n# fine_tuned_centroids = cluster_centroids[layer_id]-LEARNING_RATE*gradient_index\n# cluster_centroids[layer_id] = fine_tuned_centroids\n#\n# w = model.layers[layer_id].get_weights()\n# shape = w[0].shape\n# weight_array = w[0].flatten()\n# new_weight = fine_tuned_centroids[cluster_index[layer_id]]\n# for idx in range(len(nonzero_index)):\n# index = nonzero_index[idx]\n# weight_array[index] = new_weight[idx]\n#\n# w[0] = weight_array.reshape(shape)\n# model.layers[layer_id].set_weights(w)\n# score = model.evaluate(x_test, y_test, verbose=0)\n# print('val loss: {}'.format(score[0]))\n# print('val acc: {}'.format(score[1]))\n\n\nprint('-------------------')\nscore = model.evaluate(x_test, y_test, verbose=0)\nprint(score[1])\n\n\nlayer_relative_index = dict()\nlayer_weight_cluster_index = dict()\n\nNode = namedtuple('Node', ['frequency', 'value', 'left', 'right'])\nNode.__lt__ = lambda x, y: x.frequency < y.frequency\n\n\ndef encode_huffman_tree(root):\n \"\"\"\n Encodes a huffman tree to string of '0's and '1's\n \"\"\"\n # converter = {'float32':float2bitstr, 'int32':int2bitstr}\n code_list = []\n\n def encode_node(node):\n if node.value is not None: # node is leaf node\n code_list.append('1')\n lst = list(int2bitstr(node.value))\n code_list.extend(lst)\n else:\n code_list.append('0')\n encode_node(node.left)\n encode_node(node.right)\n\n encode_node(root)\n return ''.join(code_list)\n\n\ndef int2bitstr(integer):\n four_bytes = struct.pack('>I', integer) # bytes\n return ''.join(f'{byte:08b}' for byte in four_bytes) # string of '0's and '1's\n\n\ndef bitstr2int(bitstr):\n byte_arr = bytearray(int(bitstr[i:i + 8], 2) for i in range(0, len(bitstr), 8))\n return struct.unpack('>I', byte_arr)[0]\n\n\ndef huffman_encode(arr):\n # count the frequency of each number in array\n frequency_map = defaultdict(int)\n for value in np.nditer(arr):\n value = int(value)\n frequency_map[value] += 1\n\n heap = [Node(frequency, value, None, None) for value, frequency in frequency_map.items()]\n heapify(heap)\n\n # Merge nodes\n while len(heap) > 1:\n node1 = heappop(heap)\n node2 = heappop(heap)\n merged = Node(node1.frequency + node2.frequency, None, node1, node2)\n heappush(heap, merged)\n\n # Generate code value mapping\n value2code = dict()\n\n def generate_code(node, code):\n if node is None:\n return\n if node.value is not None:\n value2code[node.value] = code\n return\n generate_code(node.left, code + '0')\n generate_code(node.right, code + '1')\n\n root = heappop(heap)\n generate_code(root, '')\n\n data_encoding = ''.join(value2code[int(value)] for value in np.nditer(arr))\n\n codebook_encoding = encode_huffman_tree(root)\n\n return data_encoding, codebook_encoding\n\n\n# Matrix sparsity with relative index\nfor layer_id in Sparse_layer:\n layer = model.layers[layer_id]\n weight = layer.get_weights()\n w = deepcopy(weight)\n shape = w[0].shape\n\n weight_array = w[0].flatten()\n # nonzero_weight = w[0][Sparse_layer[layer_id] != 0].flatten()\n # print(len(nonzero_weight))\n nonzero_weight_cluster_index = cluster_index[layer_id]\n print(len(nonzero_weight_cluster_index))\n nonzero_index = np.where(Sparse_layer[layer_id].flatten() != 0)[0]\n\n first = nonzero_index[0]\n\n relative = np.insert(np.diff(nonzero_index), 0, first)\n\n relative_diff_index = relative.tolist()\n\n weight_cluster_index = nonzero_weight_cluster_index.tolist()\n\n shift = 0\n for i in np.where(relative > MAX_SPAN)[0].tolist():\n while relative_diff_index[i + shift] > MAX_SPAN:\n relative_diff_index.insert(i + shift, MAX_SPAN)\n weight_cluster_index.insert(i + shift, 0)\n shift += 1\n relative_diff_index[i + shift] -= MAX_SPAN\n\n layer_relative_index[layer_id] = np.array(relative_diff_index)\n data_encoding, codebook_encoding = huffman_encode(np.array(weight_cluster_index))\n # layer_weight_cluster_index[layer_id] = np.array(weight_cluster_index)\n layer_weight_cluster_index[layer_id] = np.array([data_encoding, codebook_encoding])\n print('----------------')\n\n# print(layer_weight_value[5])\n\n# encode\nfile_name = './result/compressed_model2'\nfile = h5py.File('{}.h5'.format(file_name), mode='w')\n\nfor layer_id in range(len(model.layers)):\n layer = model.layers[layer_id]\n weight = layer.get_weights()\n if len(weight) > 0:\n file_layer = file.create_group(layer.name)\n shape = weight[0].shape\n if layer_id != 0:\n print(len(weight[0].shape))\n pshape = file_layer.create_dataset('shape', np.array(shape).shape, dtype='int32')\n pindex = file_layer.create_dataset('index', layer_relative_index[layer_id].shape, dtype='int32')\n # pcluster_index = file_layer.create_dataset('cluster_index', layer_weight_cluster_index[layer_id].shape,\n # dtype='int32')\n pcluster_index = file_layer.create_dataset('cluster_index', layer_weight_cluster_index[layer_id].shape,\n dtype=h5py.special_dtype(vlen=str))\n\n pcentroid = file_layer.create_dataset('centroid', cluster_centroids[layer_id].shape, dtype='float32')\n pshape[:] = np.array(shape)\n pindex[:] = layer_relative_index[layer_id]\n pcluster_index[:] = layer_weight_cluster_index[layer_id]\n pcentroid[:] = cluster_centroids[layer_id]\n else:\n pweight = file_layer.create_dataset('weight', weight[0].shape, dtype='float32')\n pweight[:] = weight[0]\n pbias = file_layer.create_dataset('bias', weight[1].shape, dtype='float32')\n pbias[:] = weight[1]\n\nfile.flush()\nfile.close()\n\n\n\n",
"step-ids": [
2,
4,
5,
6,
10
]
}
|
[
2,
4,
5,
6,
10
] |
from django.db.models import Model, CharField, IntegerField, ManyToManyField, ForeignKey, PROTECT
from django.core.validators import MaxValueValidator, MinValueValidator
from polymorphic.models import PolymorphicModel
from model_utils import Choices
class Tendencia(Model):
valor = CharField(max_length=16, unique=True)
slug = CharField(max_length=3, unique=True)
def __str__(self):
return self.valor
class Bba(Model):
nivel = IntegerField(
validators=[
MaxValueValidator(20),
MinValueValidator(1)
]
)
valor = IntegerField(
validators=[
MaxValueValidator(20),
MinValueValidator(0)
]
)
QUALIDADE = Choices(('boa', ('BBA Boa')), ('ruim', ('BBA Ruim')))
qualidade = CharField(choices=QUALIDADE, max_length=4)
class Meta:
unique_together = ('qualidade', 'nivel', 'valor')
def __str__(self):
return 'BBA {} nível {}'.format(self.qualidade, self.nivel)
class Atributo(Model):
NOME = Choices(('Força'), ('Destreza'), ('Constituição'),
('Inteligência'), ('Sabedoria'), ('Carisma'))
nome = CharField(choices=NOME, max_length=12, unique=True)
SLUG = Choices(('for'), ('des'), ('con'),
('int'), ('sab'), ('car'))
slug = CharField(choices=SLUG, max_length=3, unique=True)
def __str__(self):
return self.nome
class Resistencia(Model):
nivel = IntegerField(
validators=[
MaxValueValidator(20),
MinValueValidator(1)
]
)
valor = IntegerField(
validators=[
MaxValueValidator(20),
MinValueValidator(0)
]
)
NOME = Choices(('Fortitude'), ('Reflexo'), ('Vontade'))
nome = CharField(choices=NOME, max_length=9)
SLUG = Choices(('fort'), ('ref'), ('von'))
slug = CharField(choices=SLUG, max_length=4)
QUALIDADE = Choices(('boa', ('Resistencia Boa')), ('ruim', ('Resistencia Ruim')))
qualidade = CharField(choices=QUALIDADE, max_length=4)
atributo = ForeignKey(Atributo, related_name='+', on_delete=PROTECT)
class Meta:
unique_together = ('slug', 'qualidade', 'nivel', 'valor')
def __str__(self):
return '{} {} nivel {}'.format(self.nome, self.qualidade, self.nivel)
class Pericia(Model):
nome = CharField(max_length=37)
slug = CharField(max_length=37, unique=True)
atributo = ForeignKey(Atributo, related_name='+', on_delete=PROTECT)
def __str__(self):
return self.nome
class Classe(PolymorphicModel):
nome = CharField(max_length=20)
slug = CharField(max_length=20, unique=True)
pericias = ManyToManyField(Pericia, related_name='+')
quantidade_pericias_por_nivel = IntegerField(
validators=[
MinValueValidator(1)
]
)
bbas = ManyToManyField(Bba, related_name='+')
resistencias = ManyToManyField(Resistencia, related_name='+')
tendencias = ManyToManyField(Tendencia, related_name='+')
DV = Choices((4, ('d4')), (6, ('d6')), (8, ('d8')), (10, ('d10')), (12, ('d12')))
dv = IntegerField(choices=DV)
CONJURADOR = Choices(('div', ('Divino')), ('arc', ('Arcano')), ('nan', ('Não conjurador')))
conjurador = CharField(choices=CONJURADOR, default=CONJURADOR.nan, max_length=3)
# conjurador_completo = BooleanField(default=True)
def add_tendencia(self, tendencia):
self.tendencias.append(tendencia)
def add_pericia(self, pericia):
self.pericias.append(pericia)
def add_bba(self, bba):
self.bbas.append(bba)
def add_resistencia(self, resistencia):
self.resistencias.append(resistencia)
def get_bba_nivel(self, nivel):
for bba in self.bbas.all():
assert isinstance(bba, Bba)
if bba.nivel == nivel:
return bba.valor
return 0
def __str__(self):
return self.nome
class ClassePrestigio(Classe):
pass
class Tipo(Model):
nome = CharField(max_length=14)
slug = CharField(max_length=14, unique=True)
def __str__(self):
return self.nome
class Raca(Model):
nome = CharField(max_length=14)
slug = CharField(max_length=14, unique=True)
def __str__(self):
return self.nome
class Modelo(Model):
nome = CharField(max_length=20)
slug = CharField(max_length=20, unique=True)
def __str__(self):
return self.nome
|
normal
|
{
"blob_id": "c07454dfb9dabb89c86f63063231ae9cf915aa38",
"index": 4116,
"step-1": "<mask token>\n\n\nclass Classe(PolymorphicModel):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def add_tendencia(self, tendencia):\n self.tendencias.append(tendencia)\n <mask token>\n\n def add_bba(self, bba):\n self.bbas.append(bba)\n\n def add_resistencia(self, resistencia):\n self.resistencias.append(resistencia)\n <mask token>\n\n def __str__(self):\n return self.nome\n\n\nclass ClassePrestigio(Classe):\n pass\n\n\nclass Tipo(Model):\n nome = CharField(max_length=14)\n slug = CharField(max_length=14, unique=True)\n\n def __str__(self):\n return self.nome\n\n\nclass Raca(Model):\n nome = CharField(max_length=14)\n slug = CharField(max_length=14, unique=True)\n\n def __str__(self):\n return self.nome\n\n\nclass Modelo(Model):\n nome = CharField(max_length=20)\n slug = CharField(max_length=20, unique=True)\n\n def __str__(self):\n return self.nome\n",
"step-2": "<mask token>\n\n\nclass Pericia(Model):\n <mask token>\n <mask token>\n <mask token>\n\n def __str__(self):\n return self.nome\n\n\nclass Classe(PolymorphicModel):\n nome = CharField(max_length=20)\n slug = CharField(max_length=20, unique=True)\n pericias = ManyToManyField(Pericia, related_name='+')\n quantidade_pericias_por_nivel = IntegerField(validators=[\n MinValueValidator(1)])\n bbas = ManyToManyField(Bba, related_name='+')\n resistencias = ManyToManyField(Resistencia, related_name='+')\n tendencias = ManyToManyField(Tendencia, related_name='+')\n DV = Choices((4, 'd4'), (6, 'd6'), (8, 'd8'), (10, 'd10'), (12, 'd12'))\n dv = IntegerField(choices=DV)\n CONJURADOR = Choices(('div', 'Divino'), ('arc', 'Arcano'), ('nan',\n 'Não conjurador'))\n conjurador = CharField(choices=CONJURADOR, default=CONJURADOR.nan,\n max_length=3)\n\n def add_tendencia(self, tendencia):\n self.tendencias.append(tendencia)\n\n def add_pericia(self, pericia):\n self.pericias.append(pericia)\n\n def add_bba(self, bba):\n self.bbas.append(bba)\n\n def add_resistencia(self, resistencia):\n self.resistencias.append(resistencia)\n\n def get_bba_nivel(self, nivel):\n for bba in self.bbas.all():\n assert isinstance(bba, Bba)\n if bba.nivel == nivel:\n return bba.valor\n return 0\n\n def __str__(self):\n return self.nome\n\n\nclass ClassePrestigio(Classe):\n pass\n\n\nclass Tipo(Model):\n nome = CharField(max_length=14)\n slug = CharField(max_length=14, unique=True)\n\n def __str__(self):\n return self.nome\n\n\nclass Raca(Model):\n nome = CharField(max_length=14)\n slug = CharField(max_length=14, unique=True)\n\n def __str__(self):\n return self.nome\n\n\nclass Modelo(Model):\n nome = CharField(max_length=20)\n slug = CharField(max_length=20, unique=True)\n\n def __str__(self):\n return self.nome\n",
"step-3": "<mask token>\n\n\nclass Resistencia(Model):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n\n class Meta:\n unique_together = 'slug', 'qualidade', 'nivel', 'valor'\n\n def __str__(self):\n return '{} {} nivel {}'.format(self.nome, self.qualidade, self.nivel)\n\n\nclass Pericia(Model):\n nome = CharField(max_length=37)\n slug = CharField(max_length=37, unique=True)\n atributo = ForeignKey(Atributo, related_name='+', on_delete=PROTECT)\n\n def __str__(self):\n return self.nome\n\n\nclass Classe(PolymorphicModel):\n nome = CharField(max_length=20)\n slug = CharField(max_length=20, unique=True)\n pericias = ManyToManyField(Pericia, related_name='+')\n quantidade_pericias_por_nivel = IntegerField(validators=[\n MinValueValidator(1)])\n bbas = ManyToManyField(Bba, related_name='+')\n resistencias = ManyToManyField(Resistencia, related_name='+')\n tendencias = ManyToManyField(Tendencia, related_name='+')\n DV = Choices((4, 'd4'), (6, 'd6'), (8, 'd8'), (10, 'd10'), (12, 'd12'))\n dv = IntegerField(choices=DV)\n CONJURADOR = Choices(('div', 'Divino'), ('arc', 'Arcano'), ('nan',\n 'Não conjurador'))\n conjurador = CharField(choices=CONJURADOR, default=CONJURADOR.nan,\n max_length=3)\n\n def add_tendencia(self, tendencia):\n self.tendencias.append(tendencia)\n\n def add_pericia(self, pericia):\n self.pericias.append(pericia)\n\n def add_bba(self, bba):\n self.bbas.append(bba)\n\n def add_resistencia(self, resistencia):\n self.resistencias.append(resistencia)\n\n def get_bba_nivel(self, nivel):\n for bba in self.bbas.all():\n assert isinstance(bba, Bba)\n if bba.nivel == nivel:\n return bba.valor\n return 0\n\n def __str__(self):\n return self.nome\n\n\nclass ClassePrestigio(Classe):\n pass\n\n\nclass Tipo(Model):\n nome = CharField(max_length=14)\n slug = CharField(max_length=14, unique=True)\n\n def __str__(self):\n return self.nome\n\n\nclass Raca(Model):\n nome = CharField(max_length=14)\n slug = CharField(max_length=14, unique=True)\n\n def __str__(self):\n return self.nome\n\n\nclass Modelo(Model):\n nome = CharField(max_length=20)\n slug = CharField(max_length=20, unique=True)\n\n def __str__(self):\n return self.nome\n",
"step-4": "<mask token>\n\n\nclass Atributo(Model):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n\nclass Resistencia(Model):\n nivel = IntegerField(validators=[MaxValueValidator(20),\n MinValueValidator(1)])\n valor = IntegerField(validators=[MaxValueValidator(20),\n MinValueValidator(0)])\n NOME = Choices('Fortitude', 'Reflexo', 'Vontade')\n nome = CharField(choices=NOME, max_length=9)\n SLUG = Choices('fort', 'ref', 'von')\n slug = CharField(choices=SLUG, max_length=4)\n QUALIDADE = Choices(('boa', 'Resistencia Boa'), ('ruim',\n 'Resistencia Ruim'))\n qualidade = CharField(choices=QUALIDADE, max_length=4)\n atributo = ForeignKey(Atributo, related_name='+', on_delete=PROTECT)\n\n\n class Meta:\n unique_together = 'slug', 'qualidade', 'nivel', 'valor'\n\n def __str__(self):\n return '{} {} nivel {}'.format(self.nome, self.qualidade, self.nivel)\n\n\nclass Pericia(Model):\n nome = CharField(max_length=37)\n slug = CharField(max_length=37, unique=True)\n atributo = ForeignKey(Atributo, related_name='+', on_delete=PROTECT)\n\n def __str__(self):\n return self.nome\n\n\nclass Classe(PolymorphicModel):\n nome = CharField(max_length=20)\n slug = CharField(max_length=20, unique=True)\n pericias = ManyToManyField(Pericia, related_name='+')\n quantidade_pericias_por_nivel = IntegerField(validators=[\n MinValueValidator(1)])\n bbas = ManyToManyField(Bba, related_name='+')\n resistencias = ManyToManyField(Resistencia, related_name='+')\n tendencias = ManyToManyField(Tendencia, related_name='+')\n DV = Choices((4, 'd4'), (6, 'd6'), (8, 'd8'), (10, 'd10'), (12, 'd12'))\n dv = IntegerField(choices=DV)\n CONJURADOR = Choices(('div', 'Divino'), ('arc', 'Arcano'), ('nan',\n 'Não conjurador'))\n conjurador = CharField(choices=CONJURADOR, default=CONJURADOR.nan,\n max_length=3)\n\n def add_tendencia(self, tendencia):\n self.tendencias.append(tendencia)\n\n def add_pericia(self, pericia):\n self.pericias.append(pericia)\n\n def add_bba(self, bba):\n self.bbas.append(bba)\n\n def add_resistencia(self, resistencia):\n self.resistencias.append(resistencia)\n\n def get_bba_nivel(self, nivel):\n for bba in self.bbas.all():\n assert isinstance(bba, Bba)\n if bba.nivel == nivel:\n return bba.valor\n return 0\n\n def __str__(self):\n return self.nome\n\n\nclass ClassePrestigio(Classe):\n pass\n\n\nclass Tipo(Model):\n nome = CharField(max_length=14)\n slug = CharField(max_length=14, unique=True)\n\n def __str__(self):\n return self.nome\n\n\nclass Raca(Model):\n nome = CharField(max_length=14)\n slug = CharField(max_length=14, unique=True)\n\n def __str__(self):\n return self.nome\n\n\nclass Modelo(Model):\n nome = CharField(max_length=20)\n slug = CharField(max_length=20, unique=True)\n\n def __str__(self):\n return self.nome\n",
"step-5": "from django.db.models import Model, CharField, IntegerField, ManyToManyField, ForeignKey, PROTECT\nfrom django.core.validators import MaxValueValidator, MinValueValidator\nfrom polymorphic.models import PolymorphicModel\nfrom model_utils import Choices\n\n\nclass Tendencia(Model):\n valor = CharField(max_length=16, unique=True)\n slug = CharField(max_length=3, unique=True)\n\n def __str__(self):\n return self.valor\n\n\nclass Bba(Model):\n nivel = IntegerField(\n validators=[\n MaxValueValidator(20),\n MinValueValidator(1)\n ]\n )\n valor = IntegerField(\n validators=[\n MaxValueValidator(20),\n MinValueValidator(0)\n ]\n )\n QUALIDADE = Choices(('boa', ('BBA Boa')), ('ruim', ('BBA Ruim')))\n qualidade = CharField(choices=QUALIDADE, max_length=4)\n\n class Meta:\n unique_together = ('qualidade', 'nivel', 'valor')\n\n def __str__(self):\n return 'BBA {} nível {}'.format(self.qualidade, self.nivel)\n\n\nclass Atributo(Model):\n NOME = Choices(('Força'), ('Destreza'), ('Constituição'),\n ('Inteligência'), ('Sabedoria'), ('Carisma'))\n nome = CharField(choices=NOME, max_length=12, unique=True)\n SLUG = Choices(('for'), ('des'), ('con'),\n ('int'), ('sab'), ('car'))\n slug = CharField(choices=SLUG, max_length=3, unique=True)\n\n def __str__(self):\n return self.nome\n\n\nclass Resistencia(Model):\n nivel = IntegerField(\n validators=[\n MaxValueValidator(20),\n MinValueValidator(1)\n ]\n )\n valor = IntegerField(\n validators=[\n MaxValueValidator(20),\n MinValueValidator(0)\n ]\n )\n NOME = Choices(('Fortitude'), ('Reflexo'), ('Vontade'))\n nome = CharField(choices=NOME, max_length=9)\n SLUG = Choices(('fort'), ('ref'), ('von'))\n slug = CharField(choices=SLUG, max_length=4)\n QUALIDADE = Choices(('boa', ('Resistencia Boa')), ('ruim', ('Resistencia Ruim')))\n qualidade = CharField(choices=QUALIDADE, max_length=4)\n atributo = ForeignKey(Atributo, related_name='+', on_delete=PROTECT)\n\n class Meta:\n unique_together = ('slug', 'qualidade', 'nivel', 'valor')\n\n def __str__(self):\n return '{} {} nivel {}'.format(self.nome, self.qualidade, self.nivel)\n\n\nclass Pericia(Model):\n nome = CharField(max_length=37)\n slug = CharField(max_length=37, unique=True)\n atributo = ForeignKey(Atributo, related_name='+', on_delete=PROTECT)\n\n def __str__(self):\n return self.nome\n\n\nclass Classe(PolymorphicModel):\n nome = CharField(max_length=20)\n slug = CharField(max_length=20, unique=True)\n pericias = ManyToManyField(Pericia, related_name='+')\n quantidade_pericias_por_nivel = IntegerField(\n validators=[\n MinValueValidator(1)\n ]\n )\n bbas = ManyToManyField(Bba, related_name='+')\n resistencias = ManyToManyField(Resistencia, related_name='+')\n tendencias = ManyToManyField(Tendencia, related_name='+')\n DV = Choices((4, ('d4')), (6, ('d6')), (8, ('d8')), (10, ('d10')), (12, ('d12')))\n dv = IntegerField(choices=DV)\n CONJURADOR = Choices(('div', ('Divino')), ('arc', ('Arcano')), ('nan', ('Não conjurador')))\n conjurador = CharField(choices=CONJURADOR, default=CONJURADOR.nan, max_length=3)\n\n # conjurador_completo = BooleanField(default=True)\n\n def add_tendencia(self, tendencia):\n self.tendencias.append(tendencia)\n\n def add_pericia(self, pericia):\n self.pericias.append(pericia)\n\n def add_bba(self, bba):\n self.bbas.append(bba)\n\n def add_resistencia(self, resistencia):\n self.resistencias.append(resistencia)\n\n def get_bba_nivel(self, nivel):\n for bba in self.bbas.all():\n assert isinstance(bba, Bba)\n if bba.nivel == nivel:\n return bba.valor\n return 0\n\n def __str__(self):\n return self.nome\n\n\nclass ClassePrestigio(Classe):\n pass\n\n\nclass Tipo(Model):\n nome = CharField(max_length=14)\n slug = CharField(max_length=14, unique=True)\n\n def __str__(self):\n return self.nome\n\n\nclass Raca(Model):\n nome = CharField(max_length=14)\n slug = CharField(max_length=14, unique=True)\n\n def __str__(self):\n return self.nome\n\n\nclass Modelo(Model):\n nome = CharField(max_length=20)\n slug = CharField(max_length=20, unique=True)\n\n def __str__(self):\n return self.nome\n",
"step-ids": [
15,
20,
23,
25,
35
]
}
|
[
15,
20,
23,
25,
35
] |
<|reserved_special_token_0|>
def loadData(fileDj):
data = []
fid = open(fileDj)
for line in fid:
line = line.strip()
m = [float(x) for x in line.split(' ')]
data.append(m)
return data
def getInitialCentroids(X, k):
initialCentroids = []
for i in range(k):
index = random.randint(0, len(X))
initialCentroids.append(X[index])
return initialCentroids
def visualizeClusters(clusters):
for i in range(len(clusters)):
clusters[i] = np.array(clusters[i])
plt.plot(clusters[0][:, 0], clusters[0][:, 1], 'rs', clusters[1][:, 0],
clusters[1][:, 1], 'bs')
plt.show()
return
<|reserved_special_token_0|>
def euclidean_dist(data, centroids, clusters):
centroids = np.array(centroids)
for instance in data:
instance = np.array(instance)
mu_index = min([(i[0], np.linalg.norm(instance - centroids[i[0]])) for
i in enumerate(centroids)], key=lambda t: t[1])[0]
try:
clusters[mu_index].append(instance)
except KeyError:
clusters[mu_index] = [instance]
for cluster in clusters:
if not cluster:
cluster.append(data[np.random.randint(0, len(data), size=1)].
flatten().tolist())
return clusters
def kmeans(X, k, maxIter=1000):
centroids = getInitialCentroids(X, k)
old_centroids = [[] for i in range(k)]
iterations = 0
while not has_converged(centroids, old_centroids, iterations):
iterations += 1
clusters = [[] for i in range(k)]
clusters = euclidean_dist(X, centroids, clusters)
index = 0
for cluster in clusters:
old_centroids[index] = centroids[index]
centroids[index] = np.mean(cluster, axis=0).tolist()
index += 1
visualizeClusters(clusters)
return clusters
def kmeans_(X, k, maxIter=1000):
centroids = getInitialCentroids(X, k)
old_centroids = [[] for i in range(k)]
iterations = 0
while not has_converged(centroids, old_centroids, iterations):
iterations += 1
clusters = [[] for i in range(k)]
clusters = euclidean_dist(X, centroids, clusters)
index = 0
for cluster in clusters:
old_centroids[index] = centroids[index]
centroids[index] = np.mean(cluster, axis=0).tolist()
index += 1
return clusters
def Func(clusters):
center = []
for i in range(len(clusters)):
center.append(clusters[i][0])
distSum = 0
for i in range(len(clusters)):
for j in range(1, len(clusters[i])):
distSum += np.linalg.norm(center[i] - clusters[i][j])
return distSum
def kneeFinding(X, kList):
obj = []
for i in kList:
obj.append(Func(kmeans_(X, i)))
plt.plot(range(1, 7), obj)
plt.show()
return
def purity(X, clusters):
purities = []
for i in range(2):
count = 0
for idx in range(len(clusters[i])):
if int(clusters[i][idx][2]) == 1:
count += 1
purity = count * 1.0 / len(clusters[i])
if purity > 0.5:
purities.append(purity)
else:
purities.append(1 - purity)
return purities
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def loadData(fileDj):
data = []
fid = open(fileDj)
for line in fid:
line = line.strip()
m = [float(x) for x in line.split(' ')]
data.append(m)
return data
def getInitialCentroids(X, k):
initialCentroids = []
for i in range(k):
index = random.randint(0, len(X))
initialCentroids.append(X[index])
return initialCentroids
def visualizeClusters(clusters):
for i in range(len(clusters)):
clusters[i] = np.array(clusters[i])
plt.plot(clusters[0][:, 0], clusters[0][:, 1], 'rs', clusters[1][:, 0],
clusters[1][:, 1], 'bs')
plt.show()
return
<|reserved_special_token_0|>
def euclidean_dist(data, centroids, clusters):
centroids = np.array(centroids)
for instance in data:
instance = np.array(instance)
mu_index = min([(i[0], np.linalg.norm(instance - centroids[i[0]])) for
i in enumerate(centroids)], key=lambda t: t[1])[0]
try:
clusters[mu_index].append(instance)
except KeyError:
clusters[mu_index] = [instance]
for cluster in clusters:
if not cluster:
cluster.append(data[np.random.randint(0, len(data), size=1)].
flatten().tolist())
return clusters
def kmeans(X, k, maxIter=1000):
centroids = getInitialCentroids(X, k)
old_centroids = [[] for i in range(k)]
iterations = 0
while not has_converged(centroids, old_centroids, iterations):
iterations += 1
clusters = [[] for i in range(k)]
clusters = euclidean_dist(X, centroids, clusters)
index = 0
for cluster in clusters:
old_centroids[index] = centroids[index]
centroids[index] = np.mean(cluster, axis=0).tolist()
index += 1
visualizeClusters(clusters)
return clusters
def kmeans_(X, k, maxIter=1000):
centroids = getInitialCentroids(X, k)
old_centroids = [[] for i in range(k)]
iterations = 0
while not has_converged(centroids, old_centroids, iterations):
iterations += 1
clusters = [[] for i in range(k)]
clusters = euclidean_dist(X, centroids, clusters)
index = 0
for cluster in clusters:
old_centroids[index] = centroids[index]
centroids[index] = np.mean(cluster, axis=0).tolist()
index += 1
return clusters
def Func(clusters):
center = []
for i in range(len(clusters)):
center.append(clusters[i][0])
distSum = 0
for i in range(len(clusters)):
for j in range(1, len(clusters[i])):
distSum += np.linalg.norm(center[i] - clusters[i][j])
return distSum
def kneeFinding(X, kList):
obj = []
for i in kList:
obj.append(Func(kmeans_(X, i)))
plt.plot(range(1, 7), obj)
plt.show()
return
def purity(X, clusters):
purities = []
for i in range(2):
count = 0
for idx in range(len(clusters[i])):
if int(clusters[i][idx][2]) == 1:
count += 1
purity = count * 1.0 / len(clusters[i])
if purity > 0.5:
purities.append(purity)
else:
purities.append(1 - purity)
return purities
<|reserved_special_token_0|>
def main():
datadir = ''
pathDataset1 = datadir + 'humanData.txt'
dataset1 = loadData(pathDataset1)
kneeFinding(dataset1, range(1, 7))
clusters = kmeans(dataset1, 2, maxIter=1000)
purity(dataset1, clusters)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def loadData(fileDj):
data = []
fid = open(fileDj)
for line in fid:
line = line.strip()
m = [float(x) for x in line.split(' ')]
data.append(m)
return data
def getInitialCentroids(X, k):
initialCentroids = []
for i in range(k):
index = random.randint(0, len(X))
initialCentroids.append(X[index])
return initialCentroids
def visualizeClusters(clusters):
for i in range(len(clusters)):
clusters[i] = np.array(clusters[i])
plt.plot(clusters[0][:, 0], clusters[0][:, 1], 'rs', clusters[1][:, 0],
clusters[1][:, 1], 'bs')
plt.show()
return
def has_converged(centroids, old_centroids, iterations):
MAX_ITERATIONS = 100
if iterations > MAX_ITERATIONS:
return True
return old_centroids == centroids
def euclidean_dist(data, centroids, clusters):
centroids = np.array(centroids)
for instance in data:
instance = np.array(instance)
mu_index = min([(i[0], np.linalg.norm(instance - centroids[i[0]])) for
i in enumerate(centroids)], key=lambda t: t[1])[0]
try:
clusters[mu_index].append(instance)
except KeyError:
clusters[mu_index] = [instance]
for cluster in clusters:
if not cluster:
cluster.append(data[np.random.randint(0, len(data), size=1)].
flatten().tolist())
return clusters
def kmeans(X, k, maxIter=1000):
centroids = getInitialCentroids(X, k)
old_centroids = [[] for i in range(k)]
iterations = 0
while not has_converged(centroids, old_centroids, iterations):
iterations += 1
clusters = [[] for i in range(k)]
clusters = euclidean_dist(X, centroids, clusters)
index = 0
for cluster in clusters:
old_centroids[index] = centroids[index]
centroids[index] = np.mean(cluster, axis=0).tolist()
index += 1
visualizeClusters(clusters)
return clusters
def kmeans_(X, k, maxIter=1000):
centroids = getInitialCentroids(X, k)
old_centroids = [[] for i in range(k)]
iterations = 0
while not has_converged(centroids, old_centroids, iterations):
iterations += 1
clusters = [[] for i in range(k)]
clusters = euclidean_dist(X, centroids, clusters)
index = 0
for cluster in clusters:
old_centroids[index] = centroids[index]
centroids[index] = np.mean(cluster, axis=0).tolist()
index += 1
return clusters
def Func(clusters):
center = []
for i in range(len(clusters)):
center.append(clusters[i][0])
distSum = 0
for i in range(len(clusters)):
for j in range(1, len(clusters[i])):
distSum += np.linalg.norm(center[i] - clusters[i][j])
return distSum
def kneeFinding(X, kList):
obj = []
for i in kList:
obj.append(Func(kmeans_(X, i)))
plt.plot(range(1, 7), obj)
plt.show()
return
def purity(X, clusters):
purities = []
for i in range(2):
count = 0
for idx in range(len(clusters[i])):
if int(clusters[i][idx][2]) == 1:
count += 1
purity = count * 1.0 / len(clusters[i])
if purity > 0.5:
purities.append(purity)
else:
purities.append(1 - purity)
return purities
<|reserved_special_token_0|>
def main():
datadir = ''
pathDataset1 = datadir + 'humanData.txt'
dataset1 = loadData(pathDataset1)
kneeFinding(dataset1, range(1, 7))
clusters = kmeans(dataset1, 2, maxIter=1000)
purity(dataset1, clusters)
<|reserved_special_token_0|>
if __name__ == '__main__':
main()
<|reserved_special_token_1|>
import sys
import numpy as np
import random
import matplotlib.pyplot as plt
def loadData(fileDj):
data = []
fid = open(fileDj)
for line in fid:
line = line.strip()
m = [float(x) for x in line.split(' ')]
data.append(m)
return data
def getInitialCentroids(X, k):
initialCentroids = []
for i in range(k):
index = random.randint(0, len(X))
initialCentroids.append(X[index])
return initialCentroids
def visualizeClusters(clusters):
for i in range(len(clusters)):
clusters[i] = np.array(clusters[i])
plt.plot(clusters[0][:, 0], clusters[0][:, 1], 'rs', clusters[1][:, 0],
clusters[1][:, 1], 'bs')
plt.show()
return
def has_converged(centroids, old_centroids, iterations):
MAX_ITERATIONS = 100
if iterations > MAX_ITERATIONS:
return True
return old_centroids == centroids
def euclidean_dist(data, centroids, clusters):
centroids = np.array(centroids)
for instance in data:
instance = np.array(instance)
mu_index = min([(i[0], np.linalg.norm(instance - centroids[i[0]])) for
i in enumerate(centroids)], key=lambda t: t[1])[0]
try:
clusters[mu_index].append(instance)
except KeyError:
clusters[mu_index] = [instance]
for cluster in clusters:
if not cluster:
cluster.append(data[np.random.randint(0, len(data), size=1)].
flatten().tolist())
return clusters
def kmeans(X, k, maxIter=1000):
centroids = getInitialCentroids(X, k)
old_centroids = [[] for i in range(k)]
iterations = 0
while not has_converged(centroids, old_centroids, iterations):
iterations += 1
clusters = [[] for i in range(k)]
clusters = euclidean_dist(X, centroids, clusters)
index = 0
for cluster in clusters:
old_centroids[index] = centroids[index]
centroids[index] = np.mean(cluster, axis=0).tolist()
index += 1
visualizeClusters(clusters)
return clusters
def kmeans_(X, k, maxIter=1000):
centroids = getInitialCentroids(X, k)
old_centroids = [[] for i in range(k)]
iterations = 0
while not has_converged(centroids, old_centroids, iterations):
iterations += 1
clusters = [[] for i in range(k)]
clusters = euclidean_dist(X, centroids, clusters)
index = 0
for cluster in clusters:
old_centroids[index] = centroids[index]
centroids[index] = np.mean(cluster, axis=0).tolist()
index += 1
return clusters
def Func(clusters):
center = []
for i in range(len(clusters)):
center.append(clusters[i][0])
distSum = 0
for i in range(len(clusters)):
for j in range(1, len(clusters[i])):
distSum += np.linalg.norm(center[i] - clusters[i][j])
return distSum
def kneeFinding(X, kList):
obj = []
for i in kList:
obj.append(Func(kmeans_(X, i)))
plt.plot(range(1, 7), obj)
plt.show()
return
def purity(X, clusters):
purities = []
for i in range(2):
count = 0
for idx in range(len(clusters[i])):
if int(clusters[i][idx][2]) == 1:
count += 1
purity = count * 1.0 / len(clusters[i])
if purity > 0.5:
purities.append(purity)
else:
purities.append(1 - purity)
return purities
<|reserved_special_token_0|>
def main():
datadir = ''
pathDataset1 = datadir + 'humanData.txt'
dataset1 = loadData(pathDataset1)
kneeFinding(dataset1, range(1, 7))
clusters = kmeans(dataset1, 2, maxIter=1000)
purity(dataset1, clusters)
<|reserved_special_token_0|>
if __name__ == '__main__':
main()
<|reserved_special_token_1|>
#!/usr/bin/python
import sys
import numpy as np
import random
import matplotlib.pyplot as plt
#Your code here
def loadData(fileDj):
data = []
fid = open(fileDj)
for line in fid:
line = line.strip()
m = [float(x) for x in line.split(' ')]
data.append(m)
return data
## K-means functions
def getInitialCentroids(X, k):
initialCentroids = []
for i in range(k):
index = random.randint(0, len(X))
initialCentroids.append(X[index])
#Your code here
return initialCentroids
def visualizeClusters(clusters):
for i in range(len(clusters)):
clusters[i] = np.array(clusters[i])
plt.plot(clusters[0][:,0], clusters[0][:,1], 'rs', clusters[1][:,0], clusters[1][:,1], 'bs')
plt.show()
return
def has_converged(centroids, old_centroids, iterations):
MAX_ITERATIONS = 100
if iterations > MAX_ITERATIONS:
return True
return old_centroids == centroids
def euclidean_dist(data, centroids, clusters):
centroids = np.array(centroids)
for instance in data:
instance = np.array(instance)
mu_index = min([(i[0], np.linalg.norm(instance - centroids[i[0]])) \
for i in enumerate(centroids)], key=lambda t: t[1])[0]
try:
clusters[mu_index].append(instance)
except KeyError:
clusters[mu_index] = [instance]
for cluster in clusters:
if not cluster:
cluster.append(data[np.random.randint(0, len(data), size=1)].flatten().tolist())
return clusters
def kmeans(X, k, maxIter=1000):
centroids = getInitialCentroids(X,k)
old_centroids = [[] for i in range(k)]
iterations = 0
while not (has_converged(centroids, old_centroids, iterations)):
iterations += 1
clusters = [[] for i in range(k)]
# assign data points to clusters
clusters = euclidean_dist(X, centroids, clusters)
# recalculate centroids
index = 0
for cluster in clusters:
old_centroids[index] = centroids[index]
centroids[index] = np.mean(cluster, axis=0).tolist()
index += 1
visualizeClusters(clusters)
return clusters
def kmeans_(X, k, maxIter=1000):
centroids = getInitialCentroids(X,k)
old_centroids = [[] for i in range(k)]
iterations = 0
while not (has_converged(centroids, old_centroids, iterations)):
iterations += 1
clusters = [[] for i in range(k)]
# assign data points to clusters
clusters = euclidean_dist(X, centroids, clusters)
# recalculate centroids
index = 0
for cluster in clusters:
old_centroids[index] = centroids[index]
centroids[index] = np.mean(cluster, axis=0).tolist()
index += 1
#visualizeClusters(clusters)
return clusters
def Func(clusters):
center = []
for i in range(len(clusters)):
center.append(clusters[i][0])
distSum = 0
for i in range(len(clusters)):
for j in range(1, len(clusters[i])):
distSum += np.linalg.norm(center[i] - clusters[i][j])
return distSum
def kneeFinding(X,kList):
obj = []
for i in kList:
obj.append(Func(kmeans_(X, i)))
plt.plot(range(1,7), obj)
plt.show()
return
def purity(X, clusters):
purities = []
#Your code
for i in range(2):
count = 0
for idx in range(len(clusters[i])):
if(int(clusters[i][idx][2]) == 1):
count += 1
purity = count*1.0 / len(clusters[i])
if purity > 0.5:
purities.append(purity)
else:
purities.append(1-purity)
#<type 'list'>: [0.9724249797242498, 0.999000999000999]
return purities
'''
## GMM functions
#calculate the initial covariance matrix
#covType: diag, full
def getInitialsGMM(X,k,covType):
if covType == 'full':
dataArray = np.transpose(np.array([pt[0:-1] for pt in X]))
covMat = np.cov(dataArray)
else:
covMatList = []
for i in range(len(X[0])-1):
data = [pt[i] for pt in X]
cov = np.asscalar(np.cov(data))
covMatList.append(cov)
covMat = np.diag(covMatList)
initialClusters = {}
#Your code here
return initialClusters
def calcLogLikelihood(X,clusters,k):
loglikelihood = 0
#Your code here
return loglikelihood
#E-step
def updateEStep(X,clusters,k):
EMatrix = []
#Your code here
return EMatrix
#M-step
def updateMStep(X,clusters,EMatrix):
#Your code here
return clusters
def visualizeClustersGMM(X,labels,clusters,covType):
#Your code here
def gmmCluster(X, k, covType, maxIter=1000):
#initial clusters
clustersGMM = getInitialsGMM(X,k,covType)
labels = []
#Your code here
visualizeClustersGMM(X,labels,clustersGMM,covType)
return labels,clustersGMM
def purityGMM(X, clusters, labels):
purities = []
#Your code here
return purities
'''
def main():
#######dataset path
#datadir = sys.argv[1]
datadir = ''
pathDataset1 = datadir+'humanData.txt'
#pathDataset2 = datadir+'/audioData.txt'
dataset1 = loadData(pathDataset1)
#dataset2 = loadData(pathDataset2)
#Q4
kneeFinding(dataset1,range(1,7))
#Q5
clusters = kmeans(dataset1, 2, maxIter=1000)
purity(dataset1,clusters)
'''
#Q7
labels11,clustersGMM11 = gmmCluster(dataset1, 2, 'diag')
labels12,clustersGMM12 = gmmCluster(dataset1, 2, 'full')
#Q8
labels21,clustersGMM21 = gmmCluster(dataset2, 2, 'diag')
labels22,clustersGMM22 = gmmCluster(dataset2, 2, 'full')
#Q9
purities11 = purityGMM(dataset1, clustersGMM11, labels11)
purities12 = purityGMM(dataset1, clustersGMM12, labels12)
purities21 = purityGMM(dataset2, clustersGMM21, labels21)
purities22 = purityGMM(dataset2, clustersGMM22, labels22)
'''
if __name__ == "__main__":
main()
|
flexible
|
{
"blob_id": "000dd63089fd0c6184fd032fe75ccc920beee7a8",
"index": 127,
"step-1": "<mask token>\n\n\ndef loadData(fileDj):\n data = []\n fid = open(fileDj)\n for line in fid:\n line = line.strip()\n m = [float(x) for x in line.split(' ')]\n data.append(m)\n return data\n\n\ndef getInitialCentroids(X, k):\n initialCentroids = []\n for i in range(k):\n index = random.randint(0, len(X))\n initialCentroids.append(X[index])\n return initialCentroids\n\n\ndef visualizeClusters(clusters):\n for i in range(len(clusters)):\n clusters[i] = np.array(clusters[i])\n plt.plot(clusters[0][:, 0], clusters[0][:, 1], 'rs', clusters[1][:, 0],\n clusters[1][:, 1], 'bs')\n plt.show()\n return\n\n\n<mask token>\n\n\ndef euclidean_dist(data, centroids, clusters):\n centroids = np.array(centroids)\n for instance in data:\n instance = np.array(instance)\n mu_index = min([(i[0], np.linalg.norm(instance - centroids[i[0]])) for\n i in enumerate(centroids)], key=lambda t: t[1])[0]\n try:\n clusters[mu_index].append(instance)\n except KeyError:\n clusters[mu_index] = [instance]\n for cluster in clusters:\n if not cluster:\n cluster.append(data[np.random.randint(0, len(data), size=1)].\n flatten().tolist())\n return clusters\n\n\ndef kmeans(X, k, maxIter=1000):\n centroids = getInitialCentroids(X, k)\n old_centroids = [[] for i in range(k)]\n iterations = 0\n while not has_converged(centroids, old_centroids, iterations):\n iterations += 1\n clusters = [[] for i in range(k)]\n clusters = euclidean_dist(X, centroids, clusters)\n index = 0\n for cluster in clusters:\n old_centroids[index] = centroids[index]\n centroids[index] = np.mean(cluster, axis=0).tolist()\n index += 1\n visualizeClusters(clusters)\n return clusters\n\n\ndef kmeans_(X, k, maxIter=1000):\n centroids = getInitialCentroids(X, k)\n old_centroids = [[] for i in range(k)]\n iterations = 0\n while not has_converged(centroids, old_centroids, iterations):\n iterations += 1\n clusters = [[] for i in range(k)]\n clusters = euclidean_dist(X, centroids, clusters)\n index = 0\n for cluster in clusters:\n old_centroids[index] = centroids[index]\n centroids[index] = np.mean(cluster, axis=0).tolist()\n index += 1\n return clusters\n\n\ndef Func(clusters):\n center = []\n for i in range(len(clusters)):\n center.append(clusters[i][0])\n distSum = 0\n for i in range(len(clusters)):\n for j in range(1, len(clusters[i])):\n distSum += np.linalg.norm(center[i] - clusters[i][j])\n return distSum\n\n\ndef kneeFinding(X, kList):\n obj = []\n for i in kList:\n obj.append(Func(kmeans_(X, i)))\n plt.plot(range(1, 7), obj)\n plt.show()\n return\n\n\ndef purity(X, clusters):\n purities = []\n for i in range(2):\n count = 0\n for idx in range(len(clusters[i])):\n if int(clusters[i][idx][2]) == 1:\n count += 1\n purity = count * 1.0 / len(clusters[i])\n if purity > 0.5:\n purities.append(purity)\n else:\n purities.append(1 - purity)\n return purities\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef loadData(fileDj):\n data = []\n fid = open(fileDj)\n for line in fid:\n line = line.strip()\n m = [float(x) for x in line.split(' ')]\n data.append(m)\n return data\n\n\ndef getInitialCentroids(X, k):\n initialCentroids = []\n for i in range(k):\n index = random.randint(0, len(X))\n initialCentroids.append(X[index])\n return initialCentroids\n\n\ndef visualizeClusters(clusters):\n for i in range(len(clusters)):\n clusters[i] = np.array(clusters[i])\n plt.plot(clusters[0][:, 0], clusters[0][:, 1], 'rs', clusters[1][:, 0],\n clusters[1][:, 1], 'bs')\n plt.show()\n return\n\n\n<mask token>\n\n\ndef euclidean_dist(data, centroids, clusters):\n centroids = np.array(centroids)\n for instance in data:\n instance = np.array(instance)\n mu_index = min([(i[0], np.linalg.norm(instance - centroids[i[0]])) for\n i in enumerate(centroids)], key=lambda t: t[1])[0]\n try:\n clusters[mu_index].append(instance)\n except KeyError:\n clusters[mu_index] = [instance]\n for cluster in clusters:\n if not cluster:\n cluster.append(data[np.random.randint(0, len(data), size=1)].\n flatten().tolist())\n return clusters\n\n\ndef kmeans(X, k, maxIter=1000):\n centroids = getInitialCentroids(X, k)\n old_centroids = [[] for i in range(k)]\n iterations = 0\n while not has_converged(centroids, old_centroids, iterations):\n iterations += 1\n clusters = [[] for i in range(k)]\n clusters = euclidean_dist(X, centroids, clusters)\n index = 0\n for cluster in clusters:\n old_centroids[index] = centroids[index]\n centroids[index] = np.mean(cluster, axis=0).tolist()\n index += 1\n visualizeClusters(clusters)\n return clusters\n\n\ndef kmeans_(X, k, maxIter=1000):\n centroids = getInitialCentroids(X, k)\n old_centroids = [[] for i in range(k)]\n iterations = 0\n while not has_converged(centroids, old_centroids, iterations):\n iterations += 1\n clusters = [[] for i in range(k)]\n clusters = euclidean_dist(X, centroids, clusters)\n index = 0\n for cluster in clusters:\n old_centroids[index] = centroids[index]\n centroids[index] = np.mean(cluster, axis=0).tolist()\n index += 1\n return clusters\n\n\ndef Func(clusters):\n center = []\n for i in range(len(clusters)):\n center.append(clusters[i][0])\n distSum = 0\n for i in range(len(clusters)):\n for j in range(1, len(clusters[i])):\n distSum += np.linalg.norm(center[i] - clusters[i][j])\n return distSum\n\n\ndef kneeFinding(X, kList):\n obj = []\n for i in kList:\n obj.append(Func(kmeans_(X, i)))\n plt.plot(range(1, 7), obj)\n plt.show()\n return\n\n\ndef purity(X, clusters):\n purities = []\n for i in range(2):\n count = 0\n for idx in range(len(clusters[i])):\n if int(clusters[i][idx][2]) == 1:\n count += 1\n purity = count * 1.0 / len(clusters[i])\n if purity > 0.5:\n purities.append(purity)\n else:\n purities.append(1 - purity)\n return purities\n\n\n<mask token>\n\n\ndef main():\n datadir = ''\n pathDataset1 = datadir + 'humanData.txt'\n dataset1 = loadData(pathDataset1)\n kneeFinding(dataset1, range(1, 7))\n clusters = kmeans(dataset1, 2, maxIter=1000)\n purity(dataset1, clusters)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef loadData(fileDj):\n data = []\n fid = open(fileDj)\n for line in fid:\n line = line.strip()\n m = [float(x) for x in line.split(' ')]\n data.append(m)\n return data\n\n\ndef getInitialCentroids(X, k):\n initialCentroids = []\n for i in range(k):\n index = random.randint(0, len(X))\n initialCentroids.append(X[index])\n return initialCentroids\n\n\ndef visualizeClusters(clusters):\n for i in range(len(clusters)):\n clusters[i] = np.array(clusters[i])\n plt.plot(clusters[0][:, 0], clusters[0][:, 1], 'rs', clusters[1][:, 0],\n clusters[1][:, 1], 'bs')\n plt.show()\n return\n\n\ndef has_converged(centroids, old_centroids, iterations):\n MAX_ITERATIONS = 100\n if iterations > MAX_ITERATIONS:\n return True\n return old_centroids == centroids\n\n\ndef euclidean_dist(data, centroids, clusters):\n centroids = np.array(centroids)\n for instance in data:\n instance = np.array(instance)\n mu_index = min([(i[0], np.linalg.norm(instance - centroids[i[0]])) for\n i in enumerate(centroids)], key=lambda t: t[1])[0]\n try:\n clusters[mu_index].append(instance)\n except KeyError:\n clusters[mu_index] = [instance]\n for cluster in clusters:\n if not cluster:\n cluster.append(data[np.random.randint(0, len(data), size=1)].\n flatten().tolist())\n return clusters\n\n\ndef kmeans(X, k, maxIter=1000):\n centroids = getInitialCentroids(X, k)\n old_centroids = [[] for i in range(k)]\n iterations = 0\n while not has_converged(centroids, old_centroids, iterations):\n iterations += 1\n clusters = [[] for i in range(k)]\n clusters = euclidean_dist(X, centroids, clusters)\n index = 0\n for cluster in clusters:\n old_centroids[index] = centroids[index]\n centroids[index] = np.mean(cluster, axis=0).tolist()\n index += 1\n visualizeClusters(clusters)\n return clusters\n\n\ndef kmeans_(X, k, maxIter=1000):\n centroids = getInitialCentroids(X, k)\n old_centroids = [[] for i in range(k)]\n iterations = 0\n while not has_converged(centroids, old_centroids, iterations):\n iterations += 1\n clusters = [[] for i in range(k)]\n clusters = euclidean_dist(X, centroids, clusters)\n index = 0\n for cluster in clusters:\n old_centroids[index] = centroids[index]\n centroids[index] = np.mean(cluster, axis=0).tolist()\n index += 1\n return clusters\n\n\ndef Func(clusters):\n center = []\n for i in range(len(clusters)):\n center.append(clusters[i][0])\n distSum = 0\n for i in range(len(clusters)):\n for j in range(1, len(clusters[i])):\n distSum += np.linalg.norm(center[i] - clusters[i][j])\n return distSum\n\n\ndef kneeFinding(X, kList):\n obj = []\n for i in kList:\n obj.append(Func(kmeans_(X, i)))\n plt.plot(range(1, 7), obj)\n plt.show()\n return\n\n\ndef purity(X, clusters):\n purities = []\n for i in range(2):\n count = 0\n for idx in range(len(clusters[i])):\n if int(clusters[i][idx][2]) == 1:\n count += 1\n purity = count * 1.0 / len(clusters[i])\n if purity > 0.5:\n purities.append(purity)\n else:\n purities.append(1 - purity)\n return purities\n\n\n<mask token>\n\n\ndef main():\n datadir = ''\n pathDataset1 = datadir + 'humanData.txt'\n dataset1 = loadData(pathDataset1)\n kneeFinding(dataset1, range(1, 7))\n clusters = kmeans(dataset1, 2, maxIter=1000)\n purity(dataset1, clusters)\n\n\n<mask token>\nif __name__ == '__main__':\n main()\n",
"step-4": "import sys\nimport numpy as np\nimport random\nimport matplotlib.pyplot as plt\n\n\ndef loadData(fileDj):\n data = []\n fid = open(fileDj)\n for line in fid:\n line = line.strip()\n m = [float(x) for x in line.split(' ')]\n data.append(m)\n return data\n\n\ndef getInitialCentroids(X, k):\n initialCentroids = []\n for i in range(k):\n index = random.randint(0, len(X))\n initialCentroids.append(X[index])\n return initialCentroids\n\n\ndef visualizeClusters(clusters):\n for i in range(len(clusters)):\n clusters[i] = np.array(clusters[i])\n plt.plot(clusters[0][:, 0], clusters[0][:, 1], 'rs', clusters[1][:, 0],\n clusters[1][:, 1], 'bs')\n plt.show()\n return\n\n\ndef has_converged(centroids, old_centroids, iterations):\n MAX_ITERATIONS = 100\n if iterations > MAX_ITERATIONS:\n return True\n return old_centroids == centroids\n\n\ndef euclidean_dist(data, centroids, clusters):\n centroids = np.array(centroids)\n for instance in data:\n instance = np.array(instance)\n mu_index = min([(i[0], np.linalg.norm(instance - centroids[i[0]])) for\n i in enumerate(centroids)], key=lambda t: t[1])[0]\n try:\n clusters[mu_index].append(instance)\n except KeyError:\n clusters[mu_index] = [instance]\n for cluster in clusters:\n if not cluster:\n cluster.append(data[np.random.randint(0, len(data), size=1)].\n flatten().tolist())\n return clusters\n\n\ndef kmeans(X, k, maxIter=1000):\n centroids = getInitialCentroids(X, k)\n old_centroids = [[] for i in range(k)]\n iterations = 0\n while not has_converged(centroids, old_centroids, iterations):\n iterations += 1\n clusters = [[] for i in range(k)]\n clusters = euclidean_dist(X, centroids, clusters)\n index = 0\n for cluster in clusters:\n old_centroids[index] = centroids[index]\n centroids[index] = np.mean(cluster, axis=0).tolist()\n index += 1\n visualizeClusters(clusters)\n return clusters\n\n\ndef kmeans_(X, k, maxIter=1000):\n centroids = getInitialCentroids(X, k)\n old_centroids = [[] for i in range(k)]\n iterations = 0\n while not has_converged(centroids, old_centroids, iterations):\n iterations += 1\n clusters = [[] for i in range(k)]\n clusters = euclidean_dist(X, centroids, clusters)\n index = 0\n for cluster in clusters:\n old_centroids[index] = centroids[index]\n centroids[index] = np.mean(cluster, axis=0).tolist()\n index += 1\n return clusters\n\n\ndef Func(clusters):\n center = []\n for i in range(len(clusters)):\n center.append(clusters[i][0])\n distSum = 0\n for i in range(len(clusters)):\n for j in range(1, len(clusters[i])):\n distSum += np.linalg.norm(center[i] - clusters[i][j])\n return distSum\n\n\ndef kneeFinding(X, kList):\n obj = []\n for i in kList:\n obj.append(Func(kmeans_(X, i)))\n plt.plot(range(1, 7), obj)\n plt.show()\n return\n\n\ndef purity(X, clusters):\n purities = []\n for i in range(2):\n count = 0\n for idx in range(len(clusters[i])):\n if int(clusters[i][idx][2]) == 1:\n count += 1\n purity = count * 1.0 / len(clusters[i])\n if purity > 0.5:\n purities.append(purity)\n else:\n purities.append(1 - purity)\n return purities\n\n\n<mask token>\n\n\ndef main():\n datadir = ''\n pathDataset1 = datadir + 'humanData.txt'\n dataset1 = loadData(pathDataset1)\n kneeFinding(dataset1, range(1, 7))\n clusters = kmeans(dataset1, 2, maxIter=1000)\n purity(dataset1, clusters)\n\n\n<mask token>\nif __name__ == '__main__':\n main()\n",
"step-5": "#!/usr/bin/python\n\nimport sys\nimport numpy as np\nimport random\nimport matplotlib.pyplot as plt\n#Your code here\n\ndef loadData(fileDj):\n data = []\n fid = open(fileDj)\n for line in fid:\n line = line.strip()\n m = [float(x) for x in line.split(' ')]\n data.append(m)\n\n\n return data\n\n## K-means functions \n\ndef getInitialCentroids(X, k):\n initialCentroids = []\n\n for i in range(k):\n index = random.randint(0, len(X))\n initialCentroids.append(X[index])\n\n #Your code here\n return initialCentroids\n\n\ndef visualizeClusters(clusters):\n\n for i in range(len(clusters)):\n clusters[i] = np.array(clusters[i])\n\n plt.plot(clusters[0][:,0], clusters[0][:,1], 'rs', clusters[1][:,0], clusters[1][:,1], 'bs')\n plt.show()\n return\n\ndef has_converged(centroids, old_centroids, iterations):\n MAX_ITERATIONS = 100\n if iterations > MAX_ITERATIONS:\n return True\n return old_centroids == centroids\n\ndef euclidean_dist(data, centroids, clusters):\n centroids = np.array(centroids)\n for instance in data:\n instance = np.array(instance)\n\n mu_index = min([(i[0], np.linalg.norm(instance - centroids[i[0]])) \\\n for i in enumerate(centroids)], key=lambda t: t[1])[0]\n try:\n clusters[mu_index].append(instance)\n except KeyError:\n clusters[mu_index] = [instance]\n\n for cluster in clusters:\n if not cluster:\n cluster.append(data[np.random.randint(0, len(data), size=1)].flatten().tolist())\n\n return clusters\n\n\ndef kmeans(X, k, maxIter=1000):\n\n centroids = getInitialCentroids(X,k)\n\n old_centroids = [[] for i in range(k)]\n\n iterations = 0\n while not (has_converged(centroids, old_centroids, iterations)):\n iterations += 1\n\n clusters = [[] for i in range(k)]\n\n # assign data points to clusters\n clusters = euclidean_dist(X, centroids, clusters)\n\n # recalculate centroids\n index = 0\n for cluster in clusters:\n old_centroids[index] = centroids[index]\n centroids[index] = np.mean(cluster, axis=0).tolist()\n index += 1\n\n visualizeClusters(clusters)\n\n return clusters\n\ndef kmeans_(X, k, maxIter=1000):\n\n centroids = getInitialCentroids(X,k)\n\n old_centroids = [[] for i in range(k)]\n\n iterations = 0\n while not (has_converged(centroids, old_centroids, iterations)):\n iterations += 1\n\n clusters = [[] for i in range(k)]\n\n # assign data points to clusters\n clusters = euclidean_dist(X, centroids, clusters)\n\n # recalculate centroids\n index = 0\n for cluster in clusters:\n old_centroids[index] = centroids[index]\n centroids[index] = np.mean(cluster, axis=0).tolist()\n index += 1\n\n #visualizeClusters(clusters)\n\n return clusters\n\n\ndef Func(clusters):\n center = []\n for i in range(len(clusters)):\n center.append(clusters[i][0])\n\n distSum = 0\n\n for i in range(len(clusters)):\n for j in range(1, len(clusters[i])):\n distSum += np.linalg.norm(center[i] - clusters[i][j])\n\n return distSum\n\ndef kneeFinding(X,kList):\n obj = []\n\n for i in kList:\n obj.append(Func(kmeans_(X, i)))\n\n plt.plot(range(1,7), obj)\n plt.show()\n\n return\n\ndef purity(X, clusters):\n purities = []\n #Your code\n for i in range(2):\n count = 0\n for idx in range(len(clusters[i])):\n if(int(clusters[i][idx][2]) == 1):\n count += 1\n\n purity = count*1.0 / len(clusters[i])\n if purity > 0.5:\n purities.append(purity)\n else:\n purities.append(1-purity)\n\n #<type 'list'>: [0.9724249797242498, 0.999000999000999]\n return purities\n\n'''\n\n## GMM functions \n\n#calculate the initial covariance matrix\n#covType: diag, full\ndef getInitialsGMM(X,k,covType):\n if covType == 'full':\n dataArray = np.transpose(np.array([pt[0:-1] for pt in X]))\n covMat = np.cov(dataArray)\n else:\n covMatList = []\n for i in range(len(X[0])-1):\n data = [pt[i] for pt in X]\n cov = np.asscalar(np.cov(data))\n covMatList.append(cov)\n covMat = np.diag(covMatList)\n\n initialClusters = {}\n #Your code here\n return initialClusters\n\n\ndef calcLogLikelihood(X,clusters,k):\n loglikelihood = 0\n #Your code here\n return loglikelihood\n\n#E-step\ndef updateEStep(X,clusters,k):\n EMatrix = []\n #Your code here\n return EMatrix\n\n#M-step\ndef updateMStep(X,clusters,EMatrix):\n #Your code here\n return clusters\n\ndef visualizeClustersGMM(X,labels,clusters,covType):\n #Your code here\n\n\ndef gmmCluster(X, k, covType, maxIter=1000):\n #initial clusters\n clustersGMM = getInitialsGMM(X,k,covType)\n labels = []\n #Your code here\n visualizeClustersGMM(X,labels,clustersGMM,covType)\n return labels,clustersGMM\n\n\ndef purityGMM(X, clusters, labels):\n purities = []\n #Your code here\n return purities\n\n\n'''\n\ndef main():\n #######dataset path\n #datadir = sys.argv[1]\n datadir = ''\n pathDataset1 = datadir+'humanData.txt'\n #pathDataset2 = datadir+'/audioData.txt'\n dataset1 = loadData(pathDataset1)\n #dataset2 = loadData(pathDataset2)\n\n\n #Q4\n kneeFinding(dataset1,range(1,7))\n\n #Q5\n clusters = kmeans(dataset1, 2, maxIter=1000)\n purity(dataset1,clusters)\n'''\n #Q7\n labels11,clustersGMM11 = gmmCluster(dataset1, 2, 'diag')\n labels12,clustersGMM12 = gmmCluster(dataset1, 2, 'full')\n\n #Q8\n labels21,clustersGMM21 = gmmCluster(dataset2, 2, 'diag')\n labels22,clustersGMM22 = gmmCluster(dataset2, 2, 'full')\n\n #Q9\n purities11 = purityGMM(dataset1, clustersGMM11, labels11)\n purities12 = purityGMM(dataset1, clustersGMM12, labels12)\n purities21 = purityGMM(dataset2, clustersGMM21, labels21)\n purities22 = purityGMM(dataset2, clustersGMM22, labels22)\n'''\nif __name__ == \"__main__\":\n main()",
"step-ids": [
9,
10,
12,
13,
14
]
}
|
[
9,
10,
12,
13,
14
] |
# -*- coding: utf-8 -*-
import sys
from os import path
try:
import DMP
except ImportError:
sys.path.append(path.dirname(path.dirname(path.abspath(__file__))))
from DMP.modeling.vectorMaker import VectorMaker
from DMP.modeling.variables import KEY_TOTAL, KEY_TRAIN, KEY_VALID, KEY_TEST
from DMP.dataset.dataHandler import DataHandler
from DMP.utils.arg_encoding import *
if __name__ == '__main__':
file_dict = {
KEY_TOTAL: SAVE_FILE_TOTAL,
KEY_TRAIN: SAVE_FILE_TRAIN,
KEY_VALID: SAVE_FILE_VALID,
KEY_TEST: SAVE_FILE_TEST
}
dataHandler_dict = dict()
# loading data
for key, read_csv in file_dict.items():
dataHandler = DataHandler(read_csv, column_target=COLUMN_TARGET, eliminate_target=True)
dataHandler.load()
dataHandler_dict[key] = dataHandler
# encoding data using dataHandler
vectorMaker = VectorMaker(dataHandler_dict)
vectorMaker.encoding()
vectorMaker.show_vector_info()
vectorMaker.build_tf_records()
vectorMaker.build_pillow_img()
vectorMaker.dump()
|
normal
|
{
"blob_id": "ca25739583d3b7ff449fbd2f56a96631981c815d",
"index": 5986,
"step-1": "<mask token>\n",
"step-2": "<mask token>\ntry:\n import DMP\nexcept ImportError:\n sys.path.append(path.dirname(path.dirname(path.abspath(__file__))))\n<mask token>\nif __name__ == '__main__':\n file_dict = {KEY_TOTAL: SAVE_FILE_TOTAL, KEY_TRAIN: SAVE_FILE_TRAIN,\n KEY_VALID: SAVE_FILE_VALID, KEY_TEST: SAVE_FILE_TEST}\n dataHandler_dict = dict()\n for key, read_csv in file_dict.items():\n dataHandler = DataHandler(read_csv, column_target=COLUMN_TARGET,\n eliminate_target=True)\n dataHandler.load()\n dataHandler_dict[key] = dataHandler\n vectorMaker = VectorMaker(dataHandler_dict)\n vectorMaker.encoding()\n vectorMaker.show_vector_info()\n vectorMaker.build_tf_records()\n vectorMaker.build_pillow_img()\n vectorMaker.dump()\n",
"step-3": "import sys\nfrom os import path\ntry:\n import DMP\nexcept ImportError:\n sys.path.append(path.dirname(path.dirname(path.abspath(__file__))))\nfrom DMP.modeling.vectorMaker import VectorMaker\nfrom DMP.modeling.variables import KEY_TOTAL, KEY_TRAIN, KEY_VALID, KEY_TEST\nfrom DMP.dataset.dataHandler import DataHandler\nfrom DMP.utils.arg_encoding import *\nif __name__ == '__main__':\n file_dict = {KEY_TOTAL: SAVE_FILE_TOTAL, KEY_TRAIN: SAVE_FILE_TRAIN,\n KEY_VALID: SAVE_FILE_VALID, KEY_TEST: SAVE_FILE_TEST}\n dataHandler_dict = dict()\n for key, read_csv in file_dict.items():\n dataHandler = DataHandler(read_csv, column_target=COLUMN_TARGET,\n eliminate_target=True)\n dataHandler.load()\n dataHandler_dict[key] = dataHandler\n vectorMaker = VectorMaker(dataHandler_dict)\n vectorMaker.encoding()\n vectorMaker.show_vector_info()\n vectorMaker.build_tf_records()\n vectorMaker.build_pillow_img()\n vectorMaker.dump()\n",
"step-4": "# -*- coding: utf-8 -*-\n\nimport sys\nfrom os import path\n\ntry:\n import DMP\nexcept ImportError:\n sys.path.append(path.dirname(path.dirname(path.abspath(__file__))))\n\nfrom DMP.modeling.vectorMaker import VectorMaker\nfrom DMP.modeling.variables import KEY_TOTAL, KEY_TRAIN, KEY_VALID, KEY_TEST\nfrom DMP.dataset.dataHandler import DataHandler\nfrom DMP.utils.arg_encoding import *\n\n\nif __name__ == '__main__':\n file_dict = {\n KEY_TOTAL: SAVE_FILE_TOTAL,\n KEY_TRAIN: SAVE_FILE_TRAIN,\n KEY_VALID: SAVE_FILE_VALID,\n KEY_TEST: SAVE_FILE_TEST\n }\n dataHandler_dict = dict()\n\n # loading data\n for key, read_csv in file_dict.items():\n dataHandler = DataHandler(read_csv, column_target=COLUMN_TARGET, eliminate_target=True)\n dataHandler.load()\n dataHandler_dict[key] = dataHandler\n\n # encoding data using dataHandler\n vectorMaker = VectorMaker(dataHandler_dict)\n\n vectorMaker.encoding()\n vectorMaker.show_vector_info()\n vectorMaker.build_tf_records()\n vectorMaker.build_pillow_img()\n vectorMaker.dump()\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
# -*- coding: utf-8 -*-
from django.core.exceptions import ObjectDoesNotExist
from django.shortcuts import render
import urllib
from django.http import HttpResponse, Http404
from django.utils.dateparse import parse_datetime
from urllib.parse import urlencode
from matplotlib import pyplot
from decimal import Decimal
from collections import OrderedDict
from datetime import (
datetime,
)
from estacionamientos.controller import (
HorarioEstacionamiento,
validarHorarioReserva,
marzullo,
get_client_ip,
tasa_reservaciones,
calcular_porcentaje_de_tasa,
consultar_ingresos,
)
from estacionamientos.forms import (
EstacionamientoExtendedForm,
EstacionamientoForm,
ReservaForm,
PagoForm,
RifForm,
CedulaForm,
)
from estacionamientos.models import (
Estacionamiento,
Reserva,
Pago,
TarifaHora,
TarifaMinuto,
TarifaHorayFraccion,
TarifaFinDeSemana,
TarifaHoraPico
)
# Usamos esta vista para procesar todos los estacionamientos
def estacionamientos_all(request):
estacionamientos = Estacionamiento.objects.all()
# Si es un GET, mandamos un formulario vacio
if request.method == 'GET':
form = EstacionamientoForm()
# Si es POST, se verifica la información recibida
elif request.method == 'POST':
# Creamos un formulario con los datos que recibimos
form = EstacionamientoForm(request.POST)
# Parte de la entrega era limitar la cantidad maxima de
# estacionamientos a 5
if len(estacionamientos) >= 5:
return render(
request, 'template-mensaje.html',
{ 'color' : 'red'
, 'mensaje' : 'No se pueden agregar más estacionamientos'
}
)
# Si el formulario es valido, entonces creamos un objeto con
# el constructor del modelo
if form.is_valid():
obj = Estacionamiento(
propietario = form.cleaned_data['propietario'],
nombre = form.cleaned_data['nombre'],
direccion = form.cleaned_data['direccion'],
rif = form.cleaned_data['rif'],
telefono1 = form.cleaned_data['telefono_1'],
telefono2 = form.cleaned_data['telefono_2'],
telefono3 = form.cleaned_data['telefono_3'],
email1 = form.cleaned_data['email_1'],
email2 = form.cleaned_data['email_2']
)
obj.save()
# Recargamos los estacionamientos ya que acabamos de agregar
estacionamientos = Estacionamiento.objects.all()
form = EstacionamientoForm()
return render(
request,
'catalogo-estacionamientos.html',
{ 'form': form
, 'estacionamientos': estacionamientos
}
)
def estacionamiento_detail(request, _id):
_id = int(_id)
# Verificamos que el objeto exista antes de continuar
try:
estacionamiento = Estacionamiento.objects.get(id = _id)
except ObjectDoesNotExist:
raise Http404
if request.method == 'GET':
if estacionamiento.tarifa:
form_data = {
'horarioin' : estacionamiento.apertura,
'horarioout' : estacionamiento.cierre,
'tarifa' : estacionamiento.tarifa.tarifa,
'tarifa2' : estacionamiento.tarifa.tarifa2,
'inicioTarifa2' : estacionamiento.tarifa.inicioEspecial,
'finTarifa2' : estacionamiento.tarifa.finEspecial,
'puestos' : estacionamiento.capacidad,
'esquema' : estacionamiento.tarifa.__class__.__name__
}
form = EstacionamientoExtendedForm(data = form_data)
else:
form = EstacionamientoExtendedForm()
elif request.method == 'POST':
# Leemos el formulario
form = EstacionamientoExtendedForm(request.POST)
# Si el formulario
if form.is_valid():
horaIn = form.cleaned_data['horarioin']
horaOut = form.cleaned_data['horarioout']
tarifa = form.cleaned_data['tarifa']
tipo = form.cleaned_data['esquema']
inicioTarifa2 = form.cleaned_data['inicioTarifa2']
finTarifa2 = form.cleaned_data['finTarifa2']
tarifa2 = form.cleaned_data['tarifa2']
esquemaTarifa = eval(tipo)(
tarifa = tarifa,
tarifa2 = tarifa2,
inicioEspecial = inicioTarifa2,
finEspecial = finTarifa2
)
esquemaTarifa.save()
# debería funcionar con excepciones, y el mensaje debe ser mostrado
# en el mismo formulario
if not HorarioEstacionamiento(horaIn, horaOut):
return render(
request,
'template-mensaje.html',
{ 'color':'red'
, 'mensaje': 'El horario de apertura debe ser menor al horario de cierre'
}
)
# debería funcionar con excepciones
estacionamiento.tarifa = esquemaTarifa
estacionamiento.apertura = horaIn
estacionamiento.cierre = horaOut
estacionamiento.capacidad = form.cleaned_data['puestos']
estacionamiento.save()
form = EstacionamientoExtendedForm()
return render(
request,
'detalle-estacionamiento.html',
{ 'form': form
, 'estacionamiento': estacionamiento
}
)
def estacionamiento_reserva(request, _id):
_id = int(_id)
# Verificamos que el objeto exista antes de continuar
try:
estacionamiento = Estacionamiento.objects.get(id = _id)
except ObjectDoesNotExist:
raise Http404
# Verificamos que el estacionamiento este parametrizado
if (estacionamiento.apertura is None):
return HttpResponse(status = 403) # Esta prohibido entrar aun
# Si se hace un GET renderizamos los estacionamientos con su formulario
if request.method == 'GET':
form = ReservaForm()
# Si es un POST estan mandando un request
elif request.method == 'POST':
form = ReservaForm(request.POST)
# Verificamos si es valido con los validadores del formulario
if form.is_valid():
inicioReserva = form.cleaned_data['inicio']
finalReserva = form.cleaned_data['final']
# debería funcionar con excepciones, y el mensaje debe ser mostrado
# en el mismo formulario
m_validado = validarHorarioReserva(
inicioReserva,
finalReserva,
estacionamiento.apertura,
estacionamiento.cierre,
)
# Si no es valido devolvemos el request
if not m_validado[0]:
return render(
request,
'template-mensaje.html',
{ 'color' :'red'
, 'mensaje': m_validado[1]
}
)
if marzullo(_id, inicioReserva, finalReserva):
reservaFinal = Reserva(
estacionamiento = estacionamiento,
inicioReserva = inicioReserva,
finalReserva = finalReserva,
)
monto = Decimal(
estacionamiento.tarifa.calcularPrecio(
inicioReserva,finalReserva
)
)
request.session['monto'] = float(
estacionamiento.tarifa.calcularPrecio(
inicioReserva,
finalReserva
)
)
request.session['finalReservaHora'] = finalReserva.hour
request.session['finalReservaMinuto'] = finalReserva.minute
request.session['inicioReservaHora'] = inicioReserva.hour
request.session['inicioReservaMinuto'] = inicioReserva.minute
request.session['anioinicial'] = inicioReserva.year
request.session['mesinicial'] = inicioReserva.month
request.session['diainicial'] = inicioReserva.day
request.session['aniofinal'] = finalReserva.year
request.session['mesfinal'] = finalReserva.month
request.session['diafinal'] = finalReserva.day
return render(
request,
'confirmar.html',
{ 'id' : _id
, 'monto' : monto
, 'reserva' : reservaFinal
, 'color' : 'green'
, 'mensaje' : 'Existe un puesto disponible'
}
)
else:
# Cambiar mensaje
return render(
request,
'template-mensaje.html',
{'color' : 'red'
, 'mensaje' : 'No hay un puesto disponible para ese horario'
}
)
return render(
request,
'reserva.html',
{ 'form': form
, 'estacionamiento': estacionamiento
}
)
def estacionamiento_pago(request,_id):
form = PagoForm()
try:
estacionamiento = Estacionamiento.objects.get(id = _id)
except ObjectDoesNotExist:
raise Http404
if (estacionamiento.apertura is None):
return HttpResponse(status = 403) # No esta permitido acceder a esta vista aun
if request.method == 'POST':
form = PagoForm(request.POST)
if form.is_valid():
inicioReserva = datetime(
year = request.session['anioinicial'],
month = request.session['mesinicial'],
day = request.session['diainicial'],
hour = request.session['inicioReservaHora'],
minute = request.session['inicioReservaMinuto']
)
finalReserva = datetime(
year = request.session['aniofinal'],
month = request.session['mesfinal'],
day = request.session['diafinal'],
hour = request.session['finalReservaHora'],
minute = request.session['finalReservaMinuto']
)
reservaFinal = Reserva(
estacionamiento = estacionamiento,
inicioReserva = inicioReserva,
finalReserva = finalReserva,
)
# Se guarda la reserva en la base de datos
reservaFinal.save()
monto = Decimal(request.session['monto']).quantize(Decimal('1.00'))
pago = Pago(
fechaTransaccion = datetime.now(),
cedula = form.cleaned_data['cedula'],
cedulaTipo = form.cleaned_data['cedulaTipo'],
monto = monto,
tarjetaTipo = form.cleaned_data['tarjetaTipo'],
reserva = reservaFinal,
)
# Se guarda el recibo de pago en la base de datos
pago.save()
return render(
request,
'pago.html',
{ "id" : _id
, "pago" : pago
, "color" : "green"
, 'mensaje' : "Se realizo el pago de reserva satisfactoriamente."
}
)
return render(
request,
'pago.html',
{ 'form' : form }
)
def estacionamiento_ingreso(request):
form = RifForm()
if request.method == 'POST':
form = RifForm(request.POST)
if form.is_valid():
rif = form.cleaned_data['rif']
listaIngresos, ingresoTotal = consultar_ingresos(rif)
return render(
request,
'consultar-ingreso.html',
{ "ingresoTotal" : ingresoTotal
, "listaIngresos" : listaIngresos
, "form" : form
}
)
return render(
request,
'consultar-ingreso.html',
{ "form" : form }
)
def estacionamiento_consulta_reserva(request):
form = CedulaForm()
if request.method == 'POST':
form = CedulaForm(request.POST)
if form.is_valid():
cedula = form.cleaned_data['cedula']
facturas = Pago.objects.filter(cedula = cedula)
listaFacturas = []
listaFacturas = sorted(
list(facturas),
key = lambda r: r.reserva.inicioReserva
)
return render(
request,
'consultar-reservas.html',
{ "listaFacturas" : listaFacturas
, "form" : form
}
)
return render(
request,
'consultar-reservas.html',
{ "form" : form }
)
def receive_sms(request):
ip = get_client_ip(request) # Busca el IP del telefono donde esta montado el SMS Gateway
port = '8000' # Puerto del telefono donde esta montado el SMS Gateway
phone = request.GET.get('phone', False)
sms = request.GET.get('text', False)
if (not sms or not phone):
return HttpResponse(status=400) # Bad request
phone = urllib.parse.quote(str(phone)) # Codificacion porcentaje del numero de telefono recibido
# Tratamiento del texto recibido
try:
sms = sms.split(' ')
id_sms = int(sms[0])
inicio_reserva = sms[1] + ' ' + sms[2]
final_reserva = sms[3] + ' ' + sms[4]
inicio_reserva = parse_datetime(inicio_reserva)
final_reserva = parse_datetime(final_reserva)
except:
return HttpResponse(status=400) # Bad request
# Validacion del id de estacionamiento recibido por SMS
try:
estacionamiento = Estacionamiento.objects.get(id = id_sms)
except ObjectDoesNotExist:
text = 'No existe el estacionamiento ' + str(id_sms) + '.'
text = urllib.parse.quote(str(text))
urllib.request.urlopen('http://{0}:{1}/sendsms?phone={2}&text={3}&password='.format(ip, port, phone, text))
return HttpResponse('No existe el estacionamiento ' + str(id_sms) + '.')
# Validacion de las dos fechas recibidas por SMS
m_validado = validarHorarioReserva(
inicio_reserva,
final_reserva,
estacionamiento.apertura,
estacionamiento.cierre,
)
if m_validado[0]:
'''reserva_sms = Reserva(
estacionamiento = estacionamiento,
inicioReserva = inicio_reserva,
finalReserva = final_reserva,
)
reserva_sms.save()'''
text = 'Se realizó la reserva satisfactoriamente.'
text = urllib.parse.quote(str(text))
urllib.request.urlopen('http://{0}:{1}/sendsms?phone={2}&text={3}&password='.format(ip, port, phone, text))
else:
text = m_validado[1]
text = urllib.parse.quote(str(text))
urllib.request.urlopen('http://{0}:{1}/sendsms?phone={2}&text={3}&password='.format(ip, port, phone, text))
return HttpResponse(m_validado[1])
return HttpResponse('')
def tasa_de_reservacion(request, _id):
_id = int(_id)
# Verificamos que el objeto exista antes de continuar
try:
estacionamiento = Estacionamiento.objects.get(id = _id)
except ObjectDoesNotExist:
raise Http404
if (estacionamiento.apertura is None):
return render(
request, 'template-mensaje.html',
{ 'color' : 'red'
, 'mensaje' : 'Se debe parametrizar el estacionamiento primero.'
}
)
ocupacion = tasa_reservaciones(_id)
calcular_porcentaje_de_tasa(estacionamiento.apertura, estacionamiento.cierre, estacionamiento.capacidad, ocupacion)
datos_ocupacion = urlencode(ocupacion) # Se convierten los datos del diccionario en el formato key1=value1&key2=value2&...
return render(
request,
'tasa-reservacion.html',
{ "ocupacion" : ocupacion
, "datos_ocupacion": datos_ocupacion
}
)
def grafica_tasa_de_reservacion(request):
# Recuperacion del diccionario para crear el grafico
try:
datos_ocupacion = request.GET.dict()
datos_ocupacion = OrderedDict(sorted((k, float(v)) for k, v in datos_ocupacion.items()))
response = HttpResponse(content_type='image/png')
except:
return HttpResponse(status=400) # Bad request
# Si el request no viene con algun diccionario
if (not datos_ocupacion):
return HttpResponse(status=400) # Bad request
# Configuracion y creacion del grafico de barras con la biblioteca pyplot
pyplot.switch_backend('Agg') # Para que no use Tk y aparezcan problemas con hilos
pyplot.bar(range(len(datos_ocupacion)), datos_ocupacion.values(), hold = False, color = '#6495ed')
pyplot.ylim([0,100])
pyplot.title('Distribución de los porcentajes por fecha')
pyplot.xticks(range(len(datos_ocupacion)), list(datos_ocupacion.keys()), rotation=20)
pyplot.ylabel('Porcentaje (%)')
pyplot.grid(True, 'major', 'both')
pyplot.savefig(response, format='png') # Guarda la imagen creada en el HttpResponse creado
pyplot.close()
return response
|
normal
|
{
"blob_id": "1a46752a2d1c72ec6084e7af3694a3969e2d1b4c",
"index": 1772,
"step-1": "<mask token>\n\n\ndef estacionamiento_reserva(request, _id):\n _id = int(_id)\n try:\n estacionamiento = Estacionamiento.objects.get(id=_id)\n except ObjectDoesNotExist:\n raise Http404\n if estacionamiento.apertura is None:\n return HttpResponse(status=403)\n if request.method == 'GET':\n form = ReservaForm()\n elif request.method == 'POST':\n form = ReservaForm(request.POST)\n if form.is_valid():\n inicioReserva = form.cleaned_data['inicio']\n finalReserva = form.cleaned_data['final']\n m_validado = validarHorarioReserva(inicioReserva, finalReserva,\n estacionamiento.apertura, estacionamiento.cierre)\n if not m_validado[0]:\n return render(request, 'template-mensaje.html', {'color':\n 'red', 'mensaje': m_validado[1]})\n if marzullo(_id, inicioReserva, finalReserva):\n reservaFinal = Reserva(estacionamiento=estacionamiento,\n inicioReserva=inicioReserva, finalReserva=finalReserva)\n monto = Decimal(estacionamiento.tarifa.calcularPrecio(\n inicioReserva, finalReserva))\n request.session['monto'] = float(estacionamiento.tarifa.\n calcularPrecio(inicioReserva, finalReserva))\n request.session['finalReservaHora'] = finalReserva.hour\n request.session['finalReservaMinuto'] = finalReserva.minute\n request.session['inicioReservaHora'] = inicioReserva.hour\n request.session['inicioReservaMinuto'] = inicioReserva.minute\n request.session['anioinicial'] = inicioReserva.year\n request.session['mesinicial'] = inicioReserva.month\n request.session['diainicial'] = inicioReserva.day\n request.session['aniofinal'] = finalReserva.year\n request.session['mesfinal'] = finalReserva.month\n request.session['diafinal'] = finalReserva.day\n return render(request, 'confirmar.html', {'id': _id,\n 'monto': monto, 'reserva': reservaFinal, 'color':\n 'green', 'mensaje': 'Existe un puesto disponible'})\n else:\n return render(request, 'template-mensaje.html', {'color':\n 'red', 'mensaje':\n 'No hay un puesto disponible para ese horario'})\n return render(request, 'reserva.html', {'form': form, 'estacionamiento':\n estacionamiento})\n\n\ndef estacionamiento_pago(request, _id):\n form = PagoForm()\n try:\n estacionamiento = Estacionamiento.objects.get(id=_id)\n except ObjectDoesNotExist:\n raise Http404\n if estacionamiento.apertura is None:\n return HttpResponse(status=403)\n if request.method == 'POST':\n form = PagoForm(request.POST)\n if form.is_valid():\n inicioReserva = datetime(year=request.session['anioinicial'],\n month=request.session['mesinicial'], day=request.session[\n 'diainicial'], hour=request.session['inicioReservaHora'],\n minute=request.session['inicioReservaMinuto'])\n finalReserva = datetime(year=request.session['aniofinal'],\n month=request.session['mesfinal'], day=request.session[\n 'diafinal'], hour=request.session['finalReservaHora'],\n minute=request.session['finalReservaMinuto'])\n reservaFinal = Reserva(estacionamiento=estacionamiento,\n inicioReserva=inicioReserva, finalReserva=finalReserva)\n reservaFinal.save()\n monto = Decimal(request.session['monto']).quantize(Decimal('1.00'))\n pago = Pago(fechaTransaccion=datetime.now(), cedula=form.\n cleaned_data['cedula'], cedulaTipo=form.cleaned_data[\n 'cedulaTipo'], monto=monto, tarjetaTipo=form.cleaned_data[\n 'tarjetaTipo'], reserva=reservaFinal)\n pago.save()\n return render(request, 'pago.html', {'id': _id, 'pago': pago,\n 'color': 'green', 'mensaje':\n 'Se realizo el pago de reserva satisfactoriamente.'})\n return render(request, 'pago.html', {'form': form})\n\n\ndef estacionamiento_ingreso(request):\n form = RifForm()\n if request.method == 'POST':\n form = RifForm(request.POST)\n if form.is_valid():\n rif = form.cleaned_data['rif']\n listaIngresos, ingresoTotal = consultar_ingresos(rif)\n return render(request, 'consultar-ingreso.html', {\n 'ingresoTotal': ingresoTotal, 'listaIngresos':\n listaIngresos, 'form': form})\n return render(request, 'consultar-ingreso.html', {'form': form})\n\n\ndef estacionamiento_consulta_reserva(request):\n form = CedulaForm()\n if request.method == 'POST':\n form = CedulaForm(request.POST)\n if form.is_valid():\n cedula = form.cleaned_data['cedula']\n facturas = Pago.objects.filter(cedula=cedula)\n listaFacturas = []\n listaFacturas = sorted(list(facturas), key=lambda r: r.reserva.\n inicioReserva)\n return render(request, 'consultar-reservas.html', {\n 'listaFacturas': listaFacturas, 'form': form})\n return render(request, 'consultar-reservas.html', {'form': form})\n\n\ndef receive_sms(request):\n ip = get_client_ip(request)\n port = '8000'\n phone = request.GET.get('phone', False)\n sms = request.GET.get('text', False)\n if not sms or not phone:\n return HttpResponse(status=400)\n phone = urllib.parse.quote(str(phone))\n try:\n sms = sms.split(' ')\n id_sms = int(sms[0])\n inicio_reserva = sms[1] + ' ' + sms[2]\n final_reserva = sms[3] + ' ' + sms[4]\n inicio_reserva = parse_datetime(inicio_reserva)\n final_reserva = parse_datetime(final_reserva)\n except:\n return HttpResponse(status=400)\n try:\n estacionamiento = Estacionamiento.objects.get(id=id_sms)\n except ObjectDoesNotExist:\n text = 'No existe el estacionamiento ' + str(id_sms) + '.'\n text = urllib.parse.quote(str(text))\n urllib.request.urlopen(\n 'http://{0}:{1}/sendsms?phone={2}&text={3}&password='.format(ip,\n port, phone, text))\n return HttpResponse('No existe el estacionamiento ' + str(id_sms) + '.'\n )\n m_validado = validarHorarioReserva(inicio_reserva, final_reserva,\n estacionamiento.apertura, estacionamiento.cierre)\n if m_validado[0]:\n \"\"\"reserva_sms = Reserva(\n estacionamiento = estacionamiento,\n inicioReserva = inicio_reserva,\n finalReserva = final_reserva,\n )\n reserva_sms.save()\"\"\"\n text = 'Se realizó la reserva satisfactoriamente.'\n text = urllib.parse.quote(str(text))\n urllib.request.urlopen(\n 'http://{0}:{1}/sendsms?phone={2}&text={3}&password='.format(ip,\n port, phone, text))\n else:\n text = m_validado[1]\n text = urllib.parse.quote(str(text))\n urllib.request.urlopen(\n 'http://{0}:{1}/sendsms?phone={2}&text={3}&password='.format(ip,\n port, phone, text))\n return HttpResponse(m_validado[1])\n return HttpResponse('')\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef estacionamientos_all(request):\n estacionamientos = Estacionamiento.objects.all()\n if request.method == 'GET':\n form = EstacionamientoForm()\n elif request.method == 'POST':\n form = EstacionamientoForm(request.POST)\n if len(estacionamientos) >= 5:\n return render(request, 'template-mensaje.html', {'color': 'red',\n 'mensaje': 'No se pueden agregar más estacionamientos'})\n if form.is_valid():\n obj = Estacionamiento(propietario=form.cleaned_data[\n 'propietario'], nombre=form.cleaned_data['nombre'],\n direccion=form.cleaned_data['direccion'], rif=form.\n cleaned_data['rif'], telefono1=form.cleaned_data[\n 'telefono_1'], telefono2=form.cleaned_data['telefono_2'],\n telefono3=form.cleaned_data['telefono_3'], email1=form.\n cleaned_data['email_1'], email2=form.cleaned_data['email_2'])\n obj.save()\n estacionamientos = Estacionamiento.objects.all()\n form = EstacionamientoForm()\n return render(request, 'catalogo-estacionamientos.html', {'form': form,\n 'estacionamientos': estacionamientos})\n\n\n<mask token>\n\n\ndef estacionamiento_reserva(request, _id):\n _id = int(_id)\n try:\n estacionamiento = Estacionamiento.objects.get(id=_id)\n except ObjectDoesNotExist:\n raise Http404\n if estacionamiento.apertura is None:\n return HttpResponse(status=403)\n if request.method == 'GET':\n form = ReservaForm()\n elif request.method == 'POST':\n form = ReservaForm(request.POST)\n if form.is_valid():\n inicioReserva = form.cleaned_data['inicio']\n finalReserva = form.cleaned_data['final']\n m_validado = validarHorarioReserva(inicioReserva, finalReserva,\n estacionamiento.apertura, estacionamiento.cierre)\n if not m_validado[0]:\n return render(request, 'template-mensaje.html', {'color':\n 'red', 'mensaje': m_validado[1]})\n if marzullo(_id, inicioReserva, finalReserva):\n reservaFinal = Reserva(estacionamiento=estacionamiento,\n inicioReserva=inicioReserva, finalReserva=finalReserva)\n monto = Decimal(estacionamiento.tarifa.calcularPrecio(\n inicioReserva, finalReserva))\n request.session['monto'] = float(estacionamiento.tarifa.\n calcularPrecio(inicioReserva, finalReserva))\n request.session['finalReservaHora'] = finalReserva.hour\n request.session['finalReservaMinuto'] = finalReserva.minute\n request.session['inicioReservaHora'] = inicioReserva.hour\n request.session['inicioReservaMinuto'] = inicioReserva.minute\n request.session['anioinicial'] = inicioReserva.year\n request.session['mesinicial'] = inicioReserva.month\n request.session['diainicial'] = inicioReserva.day\n request.session['aniofinal'] = finalReserva.year\n request.session['mesfinal'] = finalReserva.month\n request.session['diafinal'] = finalReserva.day\n return render(request, 'confirmar.html', {'id': _id,\n 'monto': monto, 'reserva': reservaFinal, 'color':\n 'green', 'mensaje': 'Existe un puesto disponible'})\n else:\n return render(request, 'template-mensaje.html', {'color':\n 'red', 'mensaje':\n 'No hay un puesto disponible para ese horario'})\n return render(request, 'reserva.html', {'form': form, 'estacionamiento':\n estacionamiento})\n\n\ndef estacionamiento_pago(request, _id):\n form = PagoForm()\n try:\n estacionamiento = Estacionamiento.objects.get(id=_id)\n except ObjectDoesNotExist:\n raise Http404\n if estacionamiento.apertura is None:\n return HttpResponse(status=403)\n if request.method == 'POST':\n form = PagoForm(request.POST)\n if form.is_valid():\n inicioReserva = datetime(year=request.session['anioinicial'],\n month=request.session['mesinicial'], day=request.session[\n 'diainicial'], hour=request.session['inicioReservaHora'],\n minute=request.session['inicioReservaMinuto'])\n finalReserva = datetime(year=request.session['aniofinal'],\n month=request.session['mesfinal'], day=request.session[\n 'diafinal'], hour=request.session['finalReservaHora'],\n minute=request.session['finalReservaMinuto'])\n reservaFinal = Reserva(estacionamiento=estacionamiento,\n inicioReserva=inicioReserva, finalReserva=finalReserva)\n reservaFinal.save()\n monto = Decimal(request.session['monto']).quantize(Decimal('1.00'))\n pago = Pago(fechaTransaccion=datetime.now(), cedula=form.\n cleaned_data['cedula'], cedulaTipo=form.cleaned_data[\n 'cedulaTipo'], monto=monto, tarjetaTipo=form.cleaned_data[\n 'tarjetaTipo'], reserva=reservaFinal)\n pago.save()\n return render(request, 'pago.html', {'id': _id, 'pago': pago,\n 'color': 'green', 'mensaje':\n 'Se realizo el pago de reserva satisfactoriamente.'})\n return render(request, 'pago.html', {'form': form})\n\n\ndef estacionamiento_ingreso(request):\n form = RifForm()\n if request.method == 'POST':\n form = RifForm(request.POST)\n if form.is_valid():\n rif = form.cleaned_data['rif']\n listaIngresos, ingresoTotal = consultar_ingresos(rif)\n return render(request, 'consultar-ingreso.html', {\n 'ingresoTotal': ingresoTotal, 'listaIngresos':\n listaIngresos, 'form': form})\n return render(request, 'consultar-ingreso.html', {'form': form})\n\n\ndef estacionamiento_consulta_reserva(request):\n form = CedulaForm()\n if request.method == 'POST':\n form = CedulaForm(request.POST)\n if form.is_valid():\n cedula = form.cleaned_data['cedula']\n facturas = Pago.objects.filter(cedula=cedula)\n listaFacturas = []\n listaFacturas = sorted(list(facturas), key=lambda r: r.reserva.\n inicioReserva)\n return render(request, 'consultar-reservas.html', {\n 'listaFacturas': listaFacturas, 'form': form})\n return render(request, 'consultar-reservas.html', {'form': form})\n\n\ndef receive_sms(request):\n ip = get_client_ip(request)\n port = '8000'\n phone = request.GET.get('phone', False)\n sms = request.GET.get('text', False)\n if not sms or not phone:\n return HttpResponse(status=400)\n phone = urllib.parse.quote(str(phone))\n try:\n sms = sms.split(' ')\n id_sms = int(sms[0])\n inicio_reserva = sms[1] + ' ' + sms[2]\n final_reserva = sms[3] + ' ' + sms[4]\n inicio_reserva = parse_datetime(inicio_reserva)\n final_reserva = parse_datetime(final_reserva)\n except:\n return HttpResponse(status=400)\n try:\n estacionamiento = Estacionamiento.objects.get(id=id_sms)\n except ObjectDoesNotExist:\n text = 'No existe el estacionamiento ' + str(id_sms) + '.'\n text = urllib.parse.quote(str(text))\n urllib.request.urlopen(\n 'http://{0}:{1}/sendsms?phone={2}&text={3}&password='.format(ip,\n port, phone, text))\n return HttpResponse('No existe el estacionamiento ' + str(id_sms) + '.'\n )\n m_validado = validarHorarioReserva(inicio_reserva, final_reserva,\n estacionamiento.apertura, estacionamiento.cierre)\n if m_validado[0]:\n \"\"\"reserva_sms = Reserva(\n estacionamiento = estacionamiento,\n inicioReserva = inicio_reserva,\n finalReserva = final_reserva,\n )\n reserva_sms.save()\"\"\"\n text = 'Se realizó la reserva satisfactoriamente.'\n text = urllib.parse.quote(str(text))\n urllib.request.urlopen(\n 'http://{0}:{1}/sendsms?phone={2}&text={3}&password='.format(ip,\n port, phone, text))\n else:\n text = m_validado[1]\n text = urllib.parse.quote(str(text))\n urllib.request.urlopen(\n 'http://{0}:{1}/sendsms?phone={2}&text={3}&password='.format(ip,\n port, phone, text))\n return HttpResponse(m_validado[1])\n return HttpResponse('')\n\n\n<mask token>\n\n\ndef grafica_tasa_de_reservacion(request):\n try:\n datos_ocupacion = request.GET.dict()\n datos_ocupacion = OrderedDict(sorted((k, float(v)) for k, v in\n datos_ocupacion.items()))\n response = HttpResponse(content_type='image/png')\n except:\n return HttpResponse(status=400)\n if not datos_ocupacion:\n return HttpResponse(status=400)\n pyplot.switch_backend('Agg')\n pyplot.bar(range(len(datos_ocupacion)), datos_ocupacion.values(), hold=\n False, color='#6495ed')\n pyplot.ylim([0, 100])\n pyplot.title('Distribución de los porcentajes por fecha')\n pyplot.xticks(range(len(datos_ocupacion)), list(datos_ocupacion.keys()),\n rotation=20)\n pyplot.ylabel('Porcentaje (%)')\n pyplot.grid(True, 'major', 'both')\n pyplot.savefig(response, format='png')\n pyplot.close()\n return response\n",
"step-3": "<mask token>\n\n\ndef estacionamientos_all(request):\n estacionamientos = Estacionamiento.objects.all()\n if request.method == 'GET':\n form = EstacionamientoForm()\n elif request.method == 'POST':\n form = EstacionamientoForm(request.POST)\n if len(estacionamientos) >= 5:\n return render(request, 'template-mensaje.html', {'color': 'red',\n 'mensaje': 'No se pueden agregar más estacionamientos'})\n if form.is_valid():\n obj = Estacionamiento(propietario=form.cleaned_data[\n 'propietario'], nombre=form.cleaned_data['nombre'],\n direccion=form.cleaned_data['direccion'], rif=form.\n cleaned_data['rif'], telefono1=form.cleaned_data[\n 'telefono_1'], telefono2=form.cleaned_data['telefono_2'],\n telefono3=form.cleaned_data['telefono_3'], email1=form.\n cleaned_data['email_1'], email2=form.cleaned_data['email_2'])\n obj.save()\n estacionamientos = Estacionamiento.objects.all()\n form = EstacionamientoForm()\n return render(request, 'catalogo-estacionamientos.html', {'form': form,\n 'estacionamientos': estacionamientos})\n\n\ndef estacionamiento_detail(request, _id):\n _id = int(_id)\n try:\n estacionamiento = Estacionamiento.objects.get(id=_id)\n except ObjectDoesNotExist:\n raise Http404\n if request.method == 'GET':\n if estacionamiento.tarifa:\n form_data = {'horarioin': estacionamiento.apertura,\n 'horarioout': estacionamiento.cierre, 'tarifa':\n estacionamiento.tarifa.tarifa, 'tarifa2': estacionamiento.\n tarifa.tarifa2, 'inicioTarifa2': estacionamiento.tarifa.\n inicioEspecial, 'finTarifa2': estacionamiento.tarifa.\n finEspecial, 'puestos': estacionamiento.capacidad,\n 'esquema': estacionamiento.tarifa.__class__.__name__}\n form = EstacionamientoExtendedForm(data=form_data)\n else:\n form = EstacionamientoExtendedForm()\n elif request.method == 'POST':\n form = EstacionamientoExtendedForm(request.POST)\n if form.is_valid():\n horaIn = form.cleaned_data['horarioin']\n horaOut = form.cleaned_data['horarioout']\n tarifa = form.cleaned_data['tarifa']\n tipo = form.cleaned_data['esquema']\n inicioTarifa2 = form.cleaned_data['inicioTarifa2']\n finTarifa2 = form.cleaned_data['finTarifa2']\n tarifa2 = form.cleaned_data['tarifa2']\n esquemaTarifa = eval(tipo)(tarifa=tarifa, tarifa2=tarifa2,\n inicioEspecial=inicioTarifa2, finEspecial=finTarifa2)\n esquemaTarifa.save()\n if not HorarioEstacionamiento(horaIn, horaOut):\n return render(request, 'template-mensaje.html', {'color':\n 'red', 'mensaje':\n 'El horario de apertura debe ser menor al horario de cierre'\n })\n estacionamiento.tarifa = esquemaTarifa\n estacionamiento.apertura = horaIn\n estacionamiento.cierre = horaOut\n estacionamiento.capacidad = form.cleaned_data['puestos']\n estacionamiento.save()\n form = EstacionamientoExtendedForm()\n return render(request, 'detalle-estacionamiento.html', {'form': form,\n 'estacionamiento': estacionamiento})\n\n\ndef estacionamiento_reserva(request, _id):\n _id = int(_id)\n try:\n estacionamiento = Estacionamiento.objects.get(id=_id)\n except ObjectDoesNotExist:\n raise Http404\n if estacionamiento.apertura is None:\n return HttpResponse(status=403)\n if request.method == 'GET':\n form = ReservaForm()\n elif request.method == 'POST':\n form = ReservaForm(request.POST)\n if form.is_valid():\n inicioReserva = form.cleaned_data['inicio']\n finalReserva = form.cleaned_data['final']\n m_validado = validarHorarioReserva(inicioReserva, finalReserva,\n estacionamiento.apertura, estacionamiento.cierre)\n if not m_validado[0]:\n return render(request, 'template-mensaje.html', {'color':\n 'red', 'mensaje': m_validado[1]})\n if marzullo(_id, inicioReserva, finalReserva):\n reservaFinal = Reserva(estacionamiento=estacionamiento,\n inicioReserva=inicioReserva, finalReserva=finalReserva)\n monto = Decimal(estacionamiento.tarifa.calcularPrecio(\n inicioReserva, finalReserva))\n request.session['monto'] = float(estacionamiento.tarifa.\n calcularPrecio(inicioReserva, finalReserva))\n request.session['finalReservaHora'] = finalReserva.hour\n request.session['finalReservaMinuto'] = finalReserva.minute\n request.session['inicioReservaHora'] = inicioReserva.hour\n request.session['inicioReservaMinuto'] = inicioReserva.minute\n request.session['anioinicial'] = inicioReserva.year\n request.session['mesinicial'] = inicioReserva.month\n request.session['diainicial'] = inicioReserva.day\n request.session['aniofinal'] = finalReserva.year\n request.session['mesfinal'] = finalReserva.month\n request.session['diafinal'] = finalReserva.day\n return render(request, 'confirmar.html', {'id': _id,\n 'monto': monto, 'reserva': reservaFinal, 'color':\n 'green', 'mensaje': 'Existe un puesto disponible'})\n else:\n return render(request, 'template-mensaje.html', {'color':\n 'red', 'mensaje':\n 'No hay un puesto disponible para ese horario'})\n return render(request, 'reserva.html', {'form': form, 'estacionamiento':\n estacionamiento})\n\n\ndef estacionamiento_pago(request, _id):\n form = PagoForm()\n try:\n estacionamiento = Estacionamiento.objects.get(id=_id)\n except ObjectDoesNotExist:\n raise Http404\n if estacionamiento.apertura is None:\n return HttpResponse(status=403)\n if request.method == 'POST':\n form = PagoForm(request.POST)\n if form.is_valid():\n inicioReserva = datetime(year=request.session['anioinicial'],\n month=request.session['mesinicial'], day=request.session[\n 'diainicial'], hour=request.session['inicioReservaHora'],\n minute=request.session['inicioReservaMinuto'])\n finalReserva = datetime(year=request.session['aniofinal'],\n month=request.session['mesfinal'], day=request.session[\n 'diafinal'], hour=request.session['finalReservaHora'],\n minute=request.session['finalReservaMinuto'])\n reservaFinal = Reserva(estacionamiento=estacionamiento,\n inicioReserva=inicioReserva, finalReserva=finalReserva)\n reservaFinal.save()\n monto = Decimal(request.session['monto']).quantize(Decimal('1.00'))\n pago = Pago(fechaTransaccion=datetime.now(), cedula=form.\n cleaned_data['cedula'], cedulaTipo=form.cleaned_data[\n 'cedulaTipo'], monto=monto, tarjetaTipo=form.cleaned_data[\n 'tarjetaTipo'], reserva=reservaFinal)\n pago.save()\n return render(request, 'pago.html', {'id': _id, 'pago': pago,\n 'color': 'green', 'mensaje':\n 'Se realizo el pago de reserva satisfactoriamente.'})\n return render(request, 'pago.html', {'form': form})\n\n\ndef estacionamiento_ingreso(request):\n form = RifForm()\n if request.method == 'POST':\n form = RifForm(request.POST)\n if form.is_valid():\n rif = form.cleaned_data['rif']\n listaIngresos, ingresoTotal = consultar_ingresos(rif)\n return render(request, 'consultar-ingreso.html', {\n 'ingresoTotal': ingresoTotal, 'listaIngresos':\n listaIngresos, 'form': form})\n return render(request, 'consultar-ingreso.html', {'form': form})\n\n\ndef estacionamiento_consulta_reserva(request):\n form = CedulaForm()\n if request.method == 'POST':\n form = CedulaForm(request.POST)\n if form.is_valid():\n cedula = form.cleaned_data['cedula']\n facturas = Pago.objects.filter(cedula=cedula)\n listaFacturas = []\n listaFacturas = sorted(list(facturas), key=lambda r: r.reserva.\n inicioReserva)\n return render(request, 'consultar-reservas.html', {\n 'listaFacturas': listaFacturas, 'form': form})\n return render(request, 'consultar-reservas.html', {'form': form})\n\n\ndef receive_sms(request):\n ip = get_client_ip(request)\n port = '8000'\n phone = request.GET.get('phone', False)\n sms = request.GET.get('text', False)\n if not sms or not phone:\n return HttpResponse(status=400)\n phone = urllib.parse.quote(str(phone))\n try:\n sms = sms.split(' ')\n id_sms = int(sms[0])\n inicio_reserva = sms[1] + ' ' + sms[2]\n final_reserva = sms[3] + ' ' + sms[4]\n inicio_reserva = parse_datetime(inicio_reserva)\n final_reserva = parse_datetime(final_reserva)\n except:\n return HttpResponse(status=400)\n try:\n estacionamiento = Estacionamiento.objects.get(id=id_sms)\n except ObjectDoesNotExist:\n text = 'No existe el estacionamiento ' + str(id_sms) + '.'\n text = urllib.parse.quote(str(text))\n urllib.request.urlopen(\n 'http://{0}:{1}/sendsms?phone={2}&text={3}&password='.format(ip,\n port, phone, text))\n return HttpResponse('No existe el estacionamiento ' + str(id_sms) + '.'\n )\n m_validado = validarHorarioReserva(inicio_reserva, final_reserva,\n estacionamiento.apertura, estacionamiento.cierre)\n if m_validado[0]:\n \"\"\"reserva_sms = Reserva(\n estacionamiento = estacionamiento,\n inicioReserva = inicio_reserva,\n finalReserva = final_reserva,\n )\n reserva_sms.save()\"\"\"\n text = 'Se realizó la reserva satisfactoriamente.'\n text = urllib.parse.quote(str(text))\n urllib.request.urlopen(\n 'http://{0}:{1}/sendsms?phone={2}&text={3}&password='.format(ip,\n port, phone, text))\n else:\n text = m_validado[1]\n text = urllib.parse.quote(str(text))\n urllib.request.urlopen(\n 'http://{0}:{1}/sendsms?phone={2}&text={3}&password='.format(ip,\n port, phone, text))\n return HttpResponse(m_validado[1])\n return HttpResponse('')\n\n\ndef tasa_de_reservacion(request, _id):\n _id = int(_id)\n try:\n estacionamiento = Estacionamiento.objects.get(id=_id)\n except ObjectDoesNotExist:\n raise Http404\n if estacionamiento.apertura is None:\n return render(request, 'template-mensaje.html', {'color': 'red',\n 'mensaje': 'Se debe parametrizar el estacionamiento primero.'})\n ocupacion = tasa_reservaciones(_id)\n calcular_porcentaje_de_tasa(estacionamiento.apertura, estacionamiento.\n cierre, estacionamiento.capacidad, ocupacion)\n datos_ocupacion = urlencode(ocupacion)\n return render(request, 'tasa-reservacion.html', {'ocupacion': ocupacion,\n 'datos_ocupacion': datos_ocupacion})\n\n\ndef grafica_tasa_de_reservacion(request):\n try:\n datos_ocupacion = request.GET.dict()\n datos_ocupacion = OrderedDict(sorted((k, float(v)) for k, v in\n datos_ocupacion.items()))\n response = HttpResponse(content_type='image/png')\n except:\n return HttpResponse(status=400)\n if not datos_ocupacion:\n return HttpResponse(status=400)\n pyplot.switch_backend('Agg')\n pyplot.bar(range(len(datos_ocupacion)), datos_ocupacion.values(), hold=\n False, color='#6495ed')\n pyplot.ylim([0, 100])\n pyplot.title('Distribución de los porcentajes por fecha')\n pyplot.xticks(range(len(datos_ocupacion)), list(datos_ocupacion.keys()),\n rotation=20)\n pyplot.ylabel('Porcentaje (%)')\n pyplot.grid(True, 'major', 'both')\n pyplot.savefig(response, format='png')\n pyplot.close()\n return response\n",
"step-4": "from django.core.exceptions import ObjectDoesNotExist\nfrom django.shortcuts import render\nimport urllib\nfrom django.http import HttpResponse, Http404\nfrom django.utils.dateparse import parse_datetime\nfrom urllib.parse import urlencode\nfrom matplotlib import pyplot\nfrom decimal import Decimal\nfrom collections import OrderedDict\nfrom datetime import datetime\nfrom estacionamientos.controller import HorarioEstacionamiento, validarHorarioReserva, marzullo, get_client_ip, tasa_reservaciones, calcular_porcentaje_de_tasa, consultar_ingresos\nfrom estacionamientos.forms import EstacionamientoExtendedForm, EstacionamientoForm, ReservaForm, PagoForm, RifForm, CedulaForm\nfrom estacionamientos.models import Estacionamiento, Reserva, Pago, TarifaHora, TarifaMinuto, TarifaHorayFraccion, TarifaFinDeSemana, TarifaHoraPico\n\n\ndef estacionamientos_all(request):\n estacionamientos = Estacionamiento.objects.all()\n if request.method == 'GET':\n form = EstacionamientoForm()\n elif request.method == 'POST':\n form = EstacionamientoForm(request.POST)\n if len(estacionamientos) >= 5:\n return render(request, 'template-mensaje.html', {'color': 'red',\n 'mensaje': 'No se pueden agregar más estacionamientos'})\n if form.is_valid():\n obj = Estacionamiento(propietario=form.cleaned_data[\n 'propietario'], nombre=form.cleaned_data['nombre'],\n direccion=form.cleaned_data['direccion'], rif=form.\n cleaned_data['rif'], telefono1=form.cleaned_data[\n 'telefono_1'], telefono2=form.cleaned_data['telefono_2'],\n telefono3=form.cleaned_data['telefono_3'], email1=form.\n cleaned_data['email_1'], email2=form.cleaned_data['email_2'])\n obj.save()\n estacionamientos = Estacionamiento.objects.all()\n form = EstacionamientoForm()\n return render(request, 'catalogo-estacionamientos.html', {'form': form,\n 'estacionamientos': estacionamientos})\n\n\ndef estacionamiento_detail(request, _id):\n _id = int(_id)\n try:\n estacionamiento = Estacionamiento.objects.get(id=_id)\n except ObjectDoesNotExist:\n raise Http404\n if request.method == 'GET':\n if estacionamiento.tarifa:\n form_data = {'horarioin': estacionamiento.apertura,\n 'horarioout': estacionamiento.cierre, 'tarifa':\n estacionamiento.tarifa.tarifa, 'tarifa2': estacionamiento.\n tarifa.tarifa2, 'inicioTarifa2': estacionamiento.tarifa.\n inicioEspecial, 'finTarifa2': estacionamiento.tarifa.\n finEspecial, 'puestos': estacionamiento.capacidad,\n 'esquema': estacionamiento.tarifa.__class__.__name__}\n form = EstacionamientoExtendedForm(data=form_data)\n else:\n form = EstacionamientoExtendedForm()\n elif request.method == 'POST':\n form = EstacionamientoExtendedForm(request.POST)\n if form.is_valid():\n horaIn = form.cleaned_data['horarioin']\n horaOut = form.cleaned_data['horarioout']\n tarifa = form.cleaned_data['tarifa']\n tipo = form.cleaned_data['esquema']\n inicioTarifa2 = form.cleaned_data['inicioTarifa2']\n finTarifa2 = form.cleaned_data['finTarifa2']\n tarifa2 = form.cleaned_data['tarifa2']\n esquemaTarifa = eval(tipo)(tarifa=tarifa, tarifa2=tarifa2,\n inicioEspecial=inicioTarifa2, finEspecial=finTarifa2)\n esquemaTarifa.save()\n if not HorarioEstacionamiento(horaIn, horaOut):\n return render(request, 'template-mensaje.html', {'color':\n 'red', 'mensaje':\n 'El horario de apertura debe ser menor al horario de cierre'\n })\n estacionamiento.tarifa = esquemaTarifa\n estacionamiento.apertura = horaIn\n estacionamiento.cierre = horaOut\n estacionamiento.capacidad = form.cleaned_data['puestos']\n estacionamiento.save()\n form = EstacionamientoExtendedForm()\n return render(request, 'detalle-estacionamiento.html', {'form': form,\n 'estacionamiento': estacionamiento})\n\n\ndef estacionamiento_reserva(request, _id):\n _id = int(_id)\n try:\n estacionamiento = Estacionamiento.objects.get(id=_id)\n except ObjectDoesNotExist:\n raise Http404\n if estacionamiento.apertura is None:\n return HttpResponse(status=403)\n if request.method == 'GET':\n form = ReservaForm()\n elif request.method == 'POST':\n form = ReservaForm(request.POST)\n if form.is_valid():\n inicioReserva = form.cleaned_data['inicio']\n finalReserva = form.cleaned_data['final']\n m_validado = validarHorarioReserva(inicioReserva, finalReserva,\n estacionamiento.apertura, estacionamiento.cierre)\n if not m_validado[0]:\n return render(request, 'template-mensaje.html', {'color':\n 'red', 'mensaje': m_validado[1]})\n if marzullo(_id, inicioReserva, finalReserva):\n reservaFinal = Reserva(estacionamiento=estacionamiento,\n inicioReserva=inicioReserva, finalReserva=finalReserva)\n monto = Decimal(estacionamiento.tarifa.calcularPrecio(\n inicioReserva, finalReserva))\n request.session['monto'] = float(estacionamiento.tarifa.\n calcularPrecio(inicioReserva, finalReserva))\n request.session['finalReservaHora'] = finalReserva.hour\n request.session['finalReservaMinuto'] = finalReserva.minute\n request.session['inicioReservaHora'] = inicioReserva.hour\n request.session['inicioReservaMinuto'] = inicioReserva.minute\n request.session['anioinicial'] = inicioReserva.year\n request.session['mesinicial'] = inicioReserva.month\n request.session['diainicial'] = inicioReserva.day\n request.session['aniofinal'] = finalReserva.year\n request.session['mesfinal'] = finalReserva.month\n request.session['diafinal'] = finalReserva.day\n return render(request, 'confirmar.html', {'id': _id,\n 'monto': monto, 'reserva': reservaFinal, 'color':\n 'green', 'mensaje': 'Existe un puesto disponible'})\n else:\n return render(request, 'template-mensaje.html', {'color':\n 'red', 'mensaje':\n 'No hay un puesto disponible para ese horario'})\n return render(request, 'reserva.html', {'form': form, 'estacionamiento':\n estacionamiento})\n\n\ndef estacionamiento_pago(request, _id):\n form = PagoForm()\n try:\n estacionamiento = Estacionamiento.objects.get(id=_id)\n except ObjectDoesNotExist:\n raise Http404\n if estacionamiento.apertura is None:\n return HttpResponse(status=403)\n if request.method == 'POST':\n form = PagoForm(request.POST)\n if form.is_valid():\n inicioReserva = datetime(year=request.session['anioinicial'],\n month=request.session['mesinicial'], day=request.session[\n 'diainicial'], hour=request.session['inicioReservaHora'],\n minute=request.session['inicioReservaMinuto'])\n finalReserva = datetime(year=request.session['aniofinal'],\n month=request.session['mesfinal'], day=request.session[\n 'diafinal'], hour=request.session['finalReservaHora'],\n minute=request.session['finalReservaMinuto'])\n reservaFinal = Reserva(estacionamiento=estacionamiento,\n inicioReserva=inicioReserva, finalReserva=finalReserva)\n reservaFinal.save()\n monto = Decimal(request.session['monto']).quantize(Decimal('1.00'))\n pago = Pago(fechaTransaccion=datetime.now(), cedula=form.\n cleaned_data['cedula'], cedulaTipo=form.cleaned_data[\n 'cedulaTipo'], monto=monto, tarjetaTipo=form.cleaned_data[\n 'tarjetaTipo'], reserva=reservaFinal)\n pago.save()\n return render(request, 'pago.html', {'id': _id, 'pago': pago,\n 'color': 'green', 'mensaje':\n 'Se realizo el pago de reserva satisfactoriamente.'})\n return render(request, 'pago.html', {'form': form})\n\n\ndef estacionamiento_ingreso(request):\n form = RifForm()\n if request.method == 'POST':\n form = RifForm(request.POST)\n if form.is_valid():\n rif = form.cleaned_data['rif']\n listaIngresos, ingresoTotal = consultar_ingresos(rif)\n return render(request, 'consultar-ingreso.html', {\n 'ingresoTotal': ingresoTotal, 'listaIngresos':\n listaIngresos, 'form': form})\n return render(request, 'consultar-ingreso.html', {'form': form})\n\n\ndef estacionamiento_consulta_reserva(request):\n form = CedulaForm()\n if request.method == 'POST':\n form = CedulaForm(request.POST)\n if form.is_valid():\n cedula = form.cleaned_data['cedula']\n facturas = Pago.objects.filter(cedula=cedula)\n listaFacturas = []\n listaFacturas = sorted(list(facturas), key=lambda r: r.reserva.\n inicioReserva)\n return render(request, 'consultar-reservas.html', {\n 'listaFacturas': listaFacturas, 'form': form})\n return render(request, 'consultar-reservas.html', {'form': form})\n\n\ndef receive_sms(request):\n ip = get_client_ip(request)\n port = '8000'\n phone = request.GET.get('phone', False)\n sms = request.GET.get('text', False)\n if not sms or not phone:\n return HttpResponse(status=400)\n phone = urllib.parse.quote(str(phone))\n try:\n sms = sms.split(' ')\n id_sms = int(sms[0])\n inicio_reserva = sms[1] + ' ' + sms[2]\n final_reserva = sms[3] + ' ' + sms[4]\n inicio_reserva = parse_datetime(inicio_reserva)\n final_reserva = parse_datetime(final_reserva)\n except:\n return HttpResponse(status=400)\n try:\n estacionamiento = Estacionamiento.objects.get(id=id_sms)\n except ObjectDoesNotExist:\n text = 'No existe el estacionamiento ' + str(id_sms) + '.'\n text = urllib.parse.quote(str(text))\n urllib.request.urlopen(\n 'http://{0}:{1}/sendsms?phone={2}&text={3}&password='.format(ip,\n port, phone, text))\n return HttpResponse('No existe el estacionamiento ' + str(id_sms) + '.'\n )\n m_validado = validarHorarioReserva(inicio_reserva, final_reserva,\n estacionamiento.apertura, estacionamiento.cierre)\n if m_validado[0]:\n \"\"\"reserva_sms = Reserva(\n estacionamiento = estacionamiento,\n inicioReserva = inicio_reserva,\n finalReserva = final_reserva,\n )\n reserva_sms.save()\"\"\"\n text = 'Se realizó la reserva satisfactoriamente.'\n text = urllib.parse.quote(str(text))\n urllib.request.urlopen(\n 'http://{0}:{1}/sendsms?phone={2}&text={3}&password='.format(ip,\n port, phone, text))\n else:\n text = m_validado[1]\n text = urllib.parse.quote(str(text))\n urllib.request.urlopen(\n 'http://{0}:{1}/sendsms?phone={2}&text={3}&password='.format(ip,\n port, phone, text))\n return HttpResponse(m_validado[1])\n return HttpResponse('')\n\n\ndef tasa_de_reservacion(request, _id):\n _id = int(_id)\n try:\n estacionamiento = Estacionamiento.objects.get(id=_id)\n except ObjectDoesNotExist:\n raise Http404\n if estacionamiento.apertura is None:\n return render(request, 'template-mensaje.html', {'color': 'red',\n 'mensaje': 'Se debe parametrizar el estacionamiento primero.'})\n ocupacion = tasa_reservaciones(_id)\n calcular_porcentaje_de_tasa(estacionamiento.apertura, estacionamiento.\n cierre, estacionamiento.capacidad, ocupacion)\n datos_ocupacion = urlencode(ocupacion)\n return render(request, 'tasa-reservacion.html', {'ocupacion': ocupacion,\n 'datos_ocupacion': datos_ocupacion})\n\n\ndef grafica_tasa_de_reservacion(request):\n try:\n datos_ocupacion = request.GET.dict()\n datos_ocupacion = OrderedDict(sorted((k, float(v)) for k, v in\n datos_ocupacion.items()))\n response = HttpResponse(content_type='image/png')\n except:\n return HttpResponse(status=400)\n if not datos_ocupacion:\n return HttpResponse(status=400)\n pyplot.switch_backend('Agg')\n pyplot.bar(range(len(datos_ocupacion)), datos_ocupacion.values(), hold=\n False, color='#6495ed')\n pyplot.ylim([0, 100])\n pyplot.title('Distribución de los porcentajes por fecha')\n pyplot.xticks(range(len(datos_ocupacion)), list(datos_ocupacion.keys()),\n rotation=20)\n pyplot.ylabel('Porcentaje (%)')\n pyplot.grid(True, 'major', 'both')\n pyplot.savefig(response, format='png')\n pyplot.close()\n return response\n",
"step-5": "# -*- coding: utf-8 -*-\n\nfrom django.core.exceptions import ObjectDoesNotExist\nfrom django.shortcuts import render\nimport urllib\nfrom django.http import HttpResponse, Http404\nfrom django.utils.dateparse import parse_datetime\nfrom urllib.parse import urlencode\nfrom matplotlib import pyplot\nfrom decimal import Decimal\nfrom collections import OrderedDict\n\nfrom datetime import (\n datetime,\n)\n\nfrom estacionamientos.controller import (\n HorarioEstacionamiento,\n validarHorarioReserva,\n marzullo,\n get_client_ip,\n tasa_reservaciones,\n calcular_porcentaje_de_tasa,\n consultar_ingresos,\n)\n\nfrom estacionamientos.forms import (\n EstacionamientoExtendedForm,\n EstacionamientoForm,\n ReservaForm,\n PagoForm,\n RifForm,\n CedulaForm,\n)\nfrom estacionamientos.models import (\n Estacionamiento,\n Reserva,\n Pago,\n TarifaHora,\n TarifaMinuto,\n TarifaHorayFraccion,\n TarifaFinDeSemana,\n TarifaHoraPico\n)\n\n# Usamos esta vista para procesar todos los estacionamientos\ndef estacionamientos_all(request):\n estacionamientos = Estacionamiento.objects.all()\n\n # Si es un GET, mandamos un formulario vacio\n if request.method == 'GET':\n form = EstacionamientoForm()\n\n # Si es POST, se verifica la información recibida\n elif request.method == 'POST':\n # Creamos un formulario con los datos que recibimos\n form = EstacionamientoForm(request.POST)\n\n # Parte de la entrega era limitar la cantidad maxima de\n # estacionamientos a 5\n if len(estacionamientos) >= 5:\n return render(\n request, 'template-mensaje.html',\n { 'color' : 'red'\n , 'mensaje' : 'No se pueden agregar más estacionamientos'\n }\n )\n\n # Si el formulario es valido, entonces creamos un objeto con\n # el constructor del modelo\n if form.is_valid():\n obj = Estacionamiento(\n propietario = form.cleaned_data['propietario'],\n nombre = form.cleaned_data['nombre'],\n direccion = form.cleaned_data['direccion'],\n rif = form.cleaned_data['rif'],\n telefono1 = form.cleaned_data['telefono_1'],\n telefono2 = form.cleaned_data['telefono_2'],\n telefono3 = form.cleaned_data['telefono_3'],\n email1 = form.cleaned_data['email_1'],\n email2 = form.cleaned_data['email_2']\n )\n obj.save()\n # Recargamos los estacionamientos ya que acabamos de agregar\n estacionamientos = Estacionamiento.objects.all()\n form = EstacionamientoForm()\n\n return render(\n request,\n 'catalogo-estacionamientos.html',\n { 'form': form\n , 'estacionamientos': estacionamientos\n }\n )\n\ndef estacionamiento_detail(request, _id):\n _id = int(_id)\n # Verificamos que el objeto exista antes de continuar\n try:\n estacionamiento = Estacionamiento.objects.get(id = _id)\n except ObjectDoesNotExist:\n raise Http404\n\n if request.method == 'GET':\n if estacionamiento.tarifa:\n \n form_data = {\n 'horarioin' : estacionamiento.apertura,\n 'horarioout' : estacionamiento.cierre,\n 'tarifa' : estacionamiento.tarifa.tarifa,\n 'tarifa2' : estacionamiento.tarifa.tarifa2,\n 'inicioTarifa2' : estacionamiento.tarifa.inicioEspecial,\n 'finTarifa2' : estacionamiento.tarifa.finEspecial,\n 'puestos' : estacionamiento.capacidad,\n 'esquema' : estacionamiento.tarifa.__class__.__name__\n }\n form = EstacionamientoExtendedForm(data = form_data)\n else:\n form = EstacionamientoExtendedForm()\n\n elif request.method == 'POST':\n # Leemos el formulario\n form = EstacionamientoExtendedForm(request.POST)\n # Si el formulario\n if form.is_valid():\n horaIn = form.cleaned_data['horarioin']\n horaOut = form.cleaned_data['horarioout']\n tarifa = form.cleaned_data['tarifa']\n tipo = form.cleaned_data['esquema']\n inicioTarifa2 = form.cleaned_data['inicioTarifa2']\n finTarifa2 = form.cleaned_data['finTarifa2']\n tarifa2 = form.cleaned_data['tarifa2']\n\n esquemaTarifa = eval(tipo)(\n tarifa = tarifa,\n tarifa2 = tarifa2,\n inicioEspecial = inicioTarifa2,\n finEspecial = finTarifa2\n )\n\n esquemaTarifa.save()\n # debería funcionar con excepciones, y el mensaje debe ser mostrado\n # en el mismo formulario\n if not HorarioEstacionamiento(horaIn, horaOut):\n return render(\n request,\n 'template-mensaje.html',\n { 'color':'red'\n , 'mensaje': 'El horario de apertura debe ser menor al horario de cierre'\n }\n )\n # debería funcionar con excepciones\n estacionamiento.tarifa = esquemaTarifa\n estacionamiento.apertura = horaIn\n estacionamiento.cierre = horaOut\n estacionamiento.capacidad = form.cleaned_data['puestos']\n\n estacionamiento.save()\n form = EstacionamientoExtendedForm()\n\n return render(\n request,\n 'detalle-estacionamiento.html',\n { 'form': form\n , 'estacionamiento': estacionamiento\n }\n )\n\n\ndef estacionamiento_reserva(request, _id):\n _id = int(_id)\n # Verificamos que el objeto exista antes de continuar\n try:\n estacionamiento = Estacionamiento.objects.get(id = _id)\n except ObjectDoesNotExist:\n raise Http404\n\n # Verificamos que el estacionamiento este parametrizado\n if (estacionamiento.apertura is None):\n return HttpResponse(status = 403) # Esta prohibido entrar aun\n\n # Si se hace un GET renderizamos los estacionamientos con su formulario\n if request.method == 'GET':\n form = ReservaForm()\n\n # Si es un POST estan mandando un request\n elif request.method == 'POST':\n form = ReservaForm(request.POST)\n # Verificamos si es valido con los validadores del formulario\n if form.is_valid():\n\n inicioReserva = form.cleaned_data['inicio']\n finalReserva = form.cleaned_data['final']\n\n # debería funcionar con excepciones, y el mensaje debe ser mostrado\n # en el mismo formulario\n m_validado = validarHorarioReserva(\n inicioReserva,\n finalReserva,\n estacionamiento.apertura,\n estacionamiento.cierre,\n )\n\n # Si no es valido devolvemos el request\n if not m_validado[0]:\n return render(\n request,\n 'template-mensaje.html',\n { 'color' :'red'\n , 'mensaje': m_validado[1]\n }\n )\n\n if marzullo(_id, inicioReserva, finalReserva):\n reservaFinal = Reserva(\n estacionamiento = estacionamiento,\n inicioReserva = inicioReserva,\n finalReserva = finalReserva,\n )\n\n monto = Decimal(\n estacionamiento.tarifa.calcularPrecio(\n inicioReserva,finalReserva\n )\n )\n\n request.session['monto'] = float(\n estacionamiento.tarifa.calcularPrecio(\n inicioReserva,\n finalReserva\n )\n )\n request.session['finalReservaHora'] = finalReserva.hour\n request.session['finalReservaMinuto'] = finalReserva.minute\n request.session['inicioReservaHora'] = inicioReserva.hour\n request.session['inicioReservaMinuto'] = inicioReserva.minute\n request.session['anioinicial'] = inicioReserva.year\n request.session['mesinicial'] = inicioReserva.month\n request.session['diainicial'] = inicioReserva.day\n request.session['aniofinal'] = finalReserva.year\n request.session['mesfinal'] = finalReserva.month\n request.session['diafinal'] = finalReserva.day\n return render(\n request,\n 'confirmar.html',\n { 'id' : _id\n , 'monto' : monto\n , 'reserva' : reservaFinal\n , 'color' : 'green'\n , 'mensaje' : 'Existe un puesto disponible'\n }\n )\n else:\n # Cambiar mensaje\n return render(\n request,\n 'template-mensaje.html',\n {'color' : 'red'\n , 'mensaje' : 'No hay un puesto disponible para ese horario'\n }\n )\n\n return render(\n request,\n 'reserva.html',\n { 'form': form\n , 'estacionamiento': estacionamiento\n }\n )\n\ndef estacionamiento_pago(request,_id):\n form = PagoForm()\n \n try:\n estacionamiento = Estacionamiento.objects.get(id = _id)\n except ObjectDoesNotExist:\n raise Http404\n \n if (estacionamiento.apertura is None):\n return HttpResponse(status = 403) # No esta permitido acceder a esta vista aun\n \n if request.method == 'POST':\n form = PagoForm(request.POST)\n if form.is_valid():\n \n inicioReserva = datetime(\n year = request.session['anioinicial'],\n month = request.session['mesinicial'],\n day = request.session['diainicial'],\n hour = request.session['inicioReservaHora'],\n minute = request.session['inicioReservaMinuto']\n )\n\n finalReserva = datetime(\n year = request.session['aniofinal'],\n month = request.session['mesfinal'],\n day = request.session['diafinal'],\n hour = request.session['finalReservaHora'],\n minute = request.session['finalReservaMinuto']\n )\n\n reservaFinal = Reserva(\n estacionamiento = estacionamiento,\n inicioReserva = inicioReserva,\n finalReserva = finalReserva,\n )\n\n # Se guarda la reserva en la base de datos\n reservaFinal.save()\n\n monto = Decimal(request.session['monto']).quantize(Decimal('1.00'))\n pago = Pago(\n fechaTransaccion = datetime.now(),\n cedula = form.cleaned_data['cedula'],\n cedulaTipo = form.cleaned_data['cedulaTipo'],\n monto = monto,\n tarjetaTipo = form.cleaned_data['tarjetaTipo'],\n reserva = reservaFinal,\n )\n\n\n # Se guarda el recibo de pago en la base de datos\n pago.save()\n\n return render(\n request,\n 'pago.html',\n { \"id\" : _id\n , \"pago\" : pago\n , \"color\" : \"green\"\n , 'mensaje' : \"Se realizo el pago de reserva satisfactoriamente.\"\n }\n )\n\n return render(\n request,\n 'pago.html',\n { 'form' : form }\n )\n\ndef estacionamiento_ingreso(request):\n form = RifForm()\n if request.method == 'POST':\n form = RifForm(request.POST)\n if form.is_valid():\n\n rif = form.cleaned_data['rif']\n listaIngresos, ingresoTotal = consultar_ingresos(rif)\n\n return render(\n request,\n 'consultar-ingreso.html',\n { \"ingresoTotal\" : ingresoTotal\n , \"listaIngresos\" : listaIngresos\n , \"form\" : form\n }\n )\n\n return render(\n request,\n 'consultar-ingreso.html',\n { \"form\" : form }\n )\n\ndef estacionamiento_consulta_reserva(request):\n form = CedulaForm()\n if request.method == 'POST':\n form = CedulaForm(request.POST)\n if form.is_valid():\n\n cedula = form.cleaned_data['cedula']\n facturas = Pago.objects.filter(cedula = cedula)\n listaFacturas = []\n\n listaFacturas = sorted(\n list(facturas),\n key = lambda r: r.reserva.inicioReserva\n )\n return render(\n request,\n 'consultar-reservas.html',\n { \"listaFacturas\" : listaFacturas\n , \"form\" : form\n }\n )\n return render(\n request,\n 'consultar-reservas.html',\n { \"form\" : form }\n )\n\ndef receive_sms(request):\n ip = get_client_ip(request) # Busca el IP del telefono donde esta montado el SMS Gateway\n port = '8000' # Puerto del telefono donde esta montado el SMS Gateway\n phone = request.GET.get('phone', False)\n sms = request.GET.get('text', False)\n if (not sms or not phone):\n return HttpResponse(status=400) # Bad request\n \n phone = urllib.parse.quote(str(phone)) # Codificacion porcentaje del numero de telefono recibido\n \n # Tratamiento del texto recibido\n try:\n sms = sms.split(' ')\n id_sms = int(sms[0])\n inicio_reserva = sms[1] + ' ' + sms[2]\n final_reserva = sms[3] + ' ' + sms[4]\n inicio_reserva = parse_datetime(inicio_reserva)\n final_reserva = parse_datetime(final_reserva)\n except:\n return HttpResponse(status=400) # Bad request\n \n # Validacion del id de estacionamiento recibido por SMS\n try:\n estacionamiento = Estacionamiento.objects.get(id = id_sms)\n except ObjectDoesNotExist:\n text = 'No existe el estacionamiento ' + str(id_sms) + '.'\n text = urllib.parse.quote(str(text))\n urllib.request.urlopen('http://{0}:{1}/sendsms?phone={2}&text={3}&password='.format(ip, port, phone, text))\n return HttpResponse('No existe el estacionamiento ' + str(id_sms) + '.')\n \n # Validacion de las dos fechas recibidas por SMS\n m_validado = validarHorarioReserva(\n inicio_reserva,\n final_reserva,\n estacionamiento.apertura,\n estacionamiento.cierre,\n )\n if m_validado[0]:\n '''reserva_sms = Reserva(\n estacionamiento = estacionamiento,\n inicioReserva = inicio_reserva,\n finalReserva = final_reserva,\n )\n reserva_sms.save()'''\n text = 'Se realizó la reserva satisfactoriamente.'\n text = urllib.parse.quote(str(text))\n urllib.request.urlopen('http://{0}:{1}/sendsms?phone={2}&text={3}&password='.format(ip, port, phone, text))\n else:\n text = m_validado[1]\n text = urllib.parse.quote(str(text))\n urllib.request.urlopen('http://{0}:{1}/sendsms?phone={2}&text={3}&password='.format(ip, port, phone, text))\n return HttpResponse(m_validado[1])\n \n return HttpResponse('')\n \ndef tasa_de_reservacion(request, _id):\n _id = int(_id)\n # Verificamos que el objeto exista antes de continuar\n try:\n estacionamiento = Estacionamiento.objects.get(id = _id)\n except ObjectDoesNotExist:\n raise Http404\n if (estacionamiento.apertura is None):\n return render(\n request, 'template-mensaje.html',\n { 'color' : 'red'\n , 'mensaje' : 'Se debe parametrizar el estacionamiento primero.'\n }\n )\n ocupacion = tasa_reservaciones(_id)\n calcular_porcentaje_de_tasa(estacionamiento.apertura, estacionamiento.cierre, estacionamiento.capacidad, ocupacion)\n datos_ocupacion = urlencode(ocupacion) # Se convierten los datos del diccionario en el formato key1=value1&key2=value2&...\n return render(\n request,\n 'tasa-reservacion.html',\n { \"ocupacion\" : ocupacion\n , \"datos_ocupacion\": datos_ocupacion\n }\n )\n\ndef grafica_tasa_de_reservacion(request):\n \n # Recuperacion del diccionario para crear el grafico\n try:\n datos_ocupacion = request.GET.dict()\n datos_ocupacion = OrderedDict(sorted((k, float(v)) for k, v in datos_ocupacion.items())) \n response = HttpResponse(content_type='image/png')\n except:\n return HttpResponse(status=400) # Bad request\n \n # Si el request no viene con algun diccionario\n if (not datos_ocupacion):\n return HttpResponse(status=400) # Bad request\n \n # Configuracion y creacion del grafico de barras con la biblioteca pyplot\n pyplot.switch_backend('Agg') # Para que no use Tk y aparezcan problemas con hilos\n pyplot.bar(range(len(datos_ocupacion)), datos_ocupacion.values(), hold = False, color = '#6495ed')\n pyplot.ylim([0,100])\n pyplot.title('Distribución de los porcentajes por fecha')\n pyplot.xticks(range(len(datos_ocupacion)), list(datos_ocupacion.keys()), rotation=20)\n pyplot.ylabel('Porcentaje (%)')\n pyplot.grid(True, 'major', 'both')\n pyplot.savefig(response, format='png') # Guarda la imagen creada en el HttpResponse creado\n pyplot.close()\n \n return response\n",
"step-ids": [
5,
7,
9,
10,
11
]
}
|
[
5,
7,
9,
10,
11
] |
import cv2
import glob
import numpy as np
import csv
import matplotlib.pyplot as plt
from pydarknet import Detector,Image
"""
Calculates the average precision based on the precision and recall values,
which are essentially the output of getPrecisionRecall
Returns the 101pt interpolation curve and a single average precision value
"""
def getAP(prec,rec):
#smooth
prec0 = prec.copy()
prec0.append(0.0)
smoothprec = np.zeros(101) #smoothed and ready for easy 101pt interpolation
for idx in range(101):
i = (100-idx)/100.
val = 0
for re_idx in range(len(rec)): #go through recs
re_i = len(rec)-re_idx-1 #from back to front
if rec[re_i] >= i: # if value there is larger than i
val = max(prec0[re_i:])
#break
smoothprec[100-idx] = val
#quick 101 pt interpolation
ap = np.mean(smoothprec)
return(smoothprec,ap)
"""
Calculates the intersection of two boxes a and b,
both arrays are in the format x1,y1,x2,y2, where x1,y1 and x2,y2 are
the upmost left and downmost right corner
Returns a single value for the Intersection amount in pixels
"""
def getIntersection(a,b): #each in format x1,y1,x2,y2
intersection = [0,0,0,0]
#left ->
if b[0] <= a[0] and a[0] <= b[2]:
intersection[0] = a[0]
elif a[0] <= b[0] and b[0] <= a[2]:
intersection[0] = b[0]
else:
return 0
#down ->
if b[1] <= a[1] and a[1] <= b[3]:
intersection[1] = a[1]
elif a[1] <= b[1] and b[1] <= a[3]:
intersection[1] = b[1]
else:
return 0
#right ->
if b[0] <= a[2] and a[2] <= b[2]:
intersection[2] = a[2]
elif a[0] <= b[2] and b[2] <= a[2]:
intersection[2] = b[2]
else:
return 0
#up ->
if b[0] <= a[3] and a[3] <= b[3]: #up
intersection[3] = a[3]
elif a[0] <= b[3] and b[3] <= a[3]:
intersection[3] = b[3]
else:
return 0
i1 = intersection[3]-intersection[1]
i2 = intersection[2]-intersection[0]
i = i1*i2
return i
"""
Calculates the IoU Intersection over Union for the two boxes a and b,
both arrays are in the format x1,y1,x2,y2, where x1,y1 and x2,y2 are
the upmost left and downmost right corner
Returns a single IoU value
"""
def getIoU(a,b): #format of a and b is x1,y1,x2,y2
a = np.array(a, np.float32)
b = np.array(b, np.float32)
intersection = getIntersection(a,b)
asize = (a[2]-a[0])*(a[3]-a[1])
bsize = (b[2]-b[0])*(b[3]-b[1])
if intersection > 0:#
union = asize + bsize - intersection
else:
union = asize + bsize
return(intersection/union)
"""
Calculates the precision and recall values/curve given plist that contains only "TP" and "FP" items
this list was created by predictions that are ordered based on score
and positives, the number of all positives based on the ground truth
Returns tuple of lists for precisions and recalls
"""
def getPrecisionRecall(plist,positives):
tp = 0
fp = 0
precs = []
recs = []
for e in plist:
if e == "TP":
tp += 1
elif e == "FP":
fp += 1
precision = tp/(tp+fp)
precs.append(precision)
recall = tp/(positives)
recs.append(recall)
return(precs,recs)
def readResults(filename):
file = []
with open(filename) as csvfile:
reader = csv.reader(csvfile, delimiter=',')
for row in reader:
file.append(row)
return file
"""
converts relative to absolute coordinates,
x = point of box (relative), y = point of box (relative)
w = width of box (relative), h = height of box (relative)
o_x = original width of image, o_y = original height of image
"""
def relativeToAbsolute(x,y,w,h,o_x,o_y):
n_x = float(x)*float(o_x)
n_y = float(y)*float(o_y)
n_w = float(w)*float(o_x)
n_h = float(h)*float(o_y)
return(n_x,n_y,n_w,n_h)
|
normal
|
{
"blob_id": "f8a31cdf5f55b5aed33a407d2c008ba9b969d655",
"index": 9493,
"step-1": "<mask token>\n\n\ndef getIntersection(a, b):\n intersection = [0, 0, 0, 0]\n if b[0] <= a[0] and a[0] <= b[2]:\n intersection[0] = a[0]\n elif a[0] <= b[0] and b[0] <= a[2]:\n intersection[0] = b[0]\n else:\n return 0\n if b[1] <= a[1] and a[1] <= b[3]:\n intersection[1] = a[1]\n elif a[1] <= b[1] and b[1] <= a[3]:\n intersection[1] = b[1]\n else:\n return 0\n if b[0] <= a[2] and a[2] <= b[2]:\n intersection[2] = a[2]\n elif a[0] <= b[2] and b[2] <= a[2]:\n intersection[2] = b[2]\n else:\n return 0\n if b[0] <= a[3] and a[3] <= b[3]:\n intersection[3] = a[3]\n elif a[0] <= b[3] and b[3] <= a[3]:\n intersection[3] = b[3]\n else:\n return 0\n i1 = intersection[3] - intersection[1]\n i2 = intersection[2] - intersection[0]\n i = i1 * i2\n return i\n\n\n<mask token>\n\n\ndef readResults(filename):\n file = []\n with open(filename) as csvfile:\n reader = csv.reader(csvfile, delimiter=',')\n for row in reader:\n file.append(row)\n return file\n\n\n<mask token>\n\n\ndef relativeToAbsolute(x, y, w, h, o_x, o_y):\n n_x = float(x) * float(o_x)\n n_y = float(y) * float(o_y)\n n_w = float(w) * float(o_x)\n n_h = float(h) * float(o_y)\n return n_x, n_y, n_w, n_h\n",
"step-2": "<mask token>\n\n\ndef getAP(prec, rec):\n prec0 = prec.copy()\n prec0.append(0.0)\n smoothprec = np.zeros(101)\n for idx in range(101):\n i = (100 - idx) / 100.0\n val = 0\n for re_idx in range(len(rec)):\n re_i = len(rec) - re_idx - 1\n if rec[re_i] >= i:\n val = max(prec0[re_i:])\n smoothprec[100 - idx] = val\n ap = np.mean(smoothprec)\n return smoothprec, ap\n\n\n<mask token>\n\n\ndef getIntersection(a, b):\n intersection = [0, 0, 0, 0]\n if b[0] <= a[0] and a[0] <= b[2]:\n intersection[0] = a[0]\n elif a[0] <= b[0] and b[0] <= a[2]:\n intersection[0] = b[0]\n else:\n return 0\n if b[1] <= a[1] and a[1] <= b[3]:\n intersection[1] = a[1]\n elif a[1] <= b[1] and b[1] <= a[3]:\n intersection[1] = b[1]\n else:\n return 0\n if b[0] <= a[2] and a[2] <= b[2]:\n intersection[2] = a[2]\n elif a[0] <= b[2] and b[2] <= a[2]:\n intersection[2] = b[2]\n else:\n return 0\n if b[0] <= a[3] and a[3] <= b[3]:\n intersection[3] = a[3]\n elif a[0] <= b[3] and b[3] <= a[3]:\n intersection[3] = b[3]\n else:\n return 0\n i1 = intersection[3] - intersection[1]\n i2 = intersection[2] - intersection[0]\n i = i1 * i2\n return i\n\n\n<mask token>\n\n\ndef getPrecisionRecall(plist, positives):\n tp = 0\n fp = 0\n precs = []\n recs = []\n for e in plist:\n if e == 'TP':\n tp += 1\n elif e == 'FP':\n fp += 1\n precision = tp / (tp + fp)\n precs.append(precision)\n recall = tp / positives\n recs.append(recall)\n return precs, recs\n\n\ndef readResults(filename):\n file = []\n with open(filename) as csvfile:\n reader = csv.reader(csvfile, delimiter=',')\n for row in reader:\n file.append(row)\n return file\n\n\n<mask token>\n\n\ndef relativeToAbsolute(x, y, w, h, o_x, o_y):\n n_x = float(x) * float(o_x)\n n_y = float(y) * float(o_y)\n n_w = float(w) * float(o_x)\n n_h = float(h) * float(o_y)\n return n_x, n_y, n_w, n_h\n",
"step-3": "<mask token>\n\n\ndef getAP(prec, rec):\n prec0 = prec.copy()\n prec0.append(0.0)\n smoothprec = np.zeros(101)\n for idx in range(101):\n i = (100 - idx) / 100.0\n val = 0\n for re_idx in range(len(rec)):\n re_i = len(rec) - re_idx - 1\n if rec[re_i] >= i:\n val = max(prec0[re_i:])\n smoothprec[100 - idx] = val\n ap = np.mean(smoothprec)\n return smoothprec, ap\n\n\n<mask token>\n\n\ndef getIntersection(a, b):\n intersection = [0, 0, 0, 0]\n if b[0] <= a[0] and a[0] <= b[2]:\n intersection[0] = a[0]\n elif a[0] <= b[0] and b[0] <= a[2]:\n intersection[0] = b[0]\n else:\n return 0\n if b[1] <= a[1] and a[1] <= b[3]:\n intersection[1] = a[1]\n elif a[1] <= b[1] and b[1] <= a[3]:\n intersection[1] = b[1]\n else:\n return 0\n if b[0] <= a[2] and a[2] <= b[2]:\n intersection[2] = a[2]\n elif a[0] <= b[2] and b[2] <= a[2]:\n intersection[2] = b[2]\n else:\n return 0\n if b[0] <= a[3] and a[3] <= b[3]:\n intersection[3] = a[3]\n elif a[0] <= b[3] and b[3] <= a[3]:\n intersection[3] = b[3]\n else:\n return 0\n i1 = intersection[3] - intersection[1]\n i2 = intersection[2] - intersection[0]\n i = i1 * i2\n return i\n\n\n<mask token>\n\n\ndef getIoU(a, b):\n a = np.array(a, np.float32)\n b = np.array(b, np.float32)\n intersection = getIntersection(a, b)\n asize = (a[2] - a[0]) * (a[3] - a[1])\n bsize = (b[2] - b[0]) * (b[3] - b[1])\n if intersection > 0:\n union = asize + bsize - intersection\n else:\n union = asize + bsize\n return intersection / union\n\n\n<mask token>\n\n\ndef getPrecisionRecall(plist, positives):\n tp = 0\n fp = 0\n precs = []\n recs = []\n for e in plist:\n if e == 'TP':\n tp += 1\n elif e == 'FP':\n fp += 1\n precision = tp / (tp + fp)\n precs.append(precision)\n recall = tp / positives\n recs.append(recall)\n return precs, recs\n\n\ndef readResults(filename):\n file = []\n with open(filename) as csvfile:\n reader = csv.reader(csvfile, delimiter=',')\n for row in reader:\n file.append(row)\n return file\n\n\n<mask token>\n\n\ndef relativeToAbsolute(x, y, w, h, o_x, o_y):\n n_x = float(x) * float(o_x)\n n_y = float(y) * float(o_y)\n n_w = float(w) * float(o_x)\n n_h = float(h) * float(o_y)\n return n_x, n_y, n_w, n_h\n",
"step-4": "import cv2\nimport glob\nimport numpy as np\nimport csv\nimport matplotlib.pyplot as plt\nfrom pydarknet import Detector, Image\n<mask token>\n\n\ndef getAP(prec, rec):\n prec0 = prec.copy()\n prec0.append(0.0)\n smoothprec = np.zeros(101)\n for idx in range(101):\n i = (100 - idx) / 100.0\n val = 0\n for re_idx in range(len(rec)):\n re_i = len(rec) - re_idx - 1\n if rec[re_i] >= i:\n val = max(prec0[re_i:])\n smoothprec[100 - idx] = val\n ap = np.mean(smoothprec)\n return smoothprec, ap\n\n\n<mask token>\n\n\ndef getIntersection(a, b):\n intersection = [0, 0, 0, 0]\n if b[0] <= a[0] and a[0] <= b[2]:\n intersection[0] = a[0]\n elif a[0] <= b[0] and b[0] <= a[2]:\n intersection[0] = b[0]\n else:\n return 0\n if b[1] <= a[1] and a[1] <= b[3]:\n intersection[1] = a[1]\n elif a[1] <= b[1] and b[1] <= a[3]:\n intersection[1] = b[1]\n else:\n return 0\n if b[0] <= a[2] and a[2] <= b[2]:\n intersection[2] = a[2]\n elif a[0] <= b[2] and b[2] <= a[2]:\n intersection[2] = b[2]\n else:\n return 0\n if b[0] <= a[3] and a[3] <= b[3]:\n intersection[3] = a[3]\n elif a[0] <= b[3] and b[3] <= a[3]:\n intersection[3] = b[3]\n else:\n return 0\n i1 = intersection[3] - intersection[1]\n i2 = intersection[2] - intersection[0]\n i = i1 * i2\n return i\n\n\n<mask token>\n\n\ndef getIoU(a, b):\n a = np.array(a, np.float32)\n b = np.array(b, np.float32)\n intersection = getIntersection(a, b)\n asize = (a[2] - a[0]) * (a[3] - a[1])\n bsize = (b[2] - b[0]) * (b[3] - b[1])\n if intersection > 0:\n union = asize + bsize - intersection\n else:\n union = asize + bsize\n return intersection / union\n\n\n<mask token>\n\n\ndef getPrecisionRecall(plist, positives):\n tp = 0\n fp = 0\n precs = []\n recs = []\n for e in plist:\n if e == 'TP':\n tp += 1\n elif e == 'FP':\n fp += 1\n precision = tp / (tp + fp)\n precs.append(precision)\n recall = tp / positives\n recs.append(recall)\n return precs, recs\n\n\ndef readResults(filename):\n file = []\n with open(filename) as csvfile:\n reader = csv.reader(csvfile, delimiter=',')\n for row in reader:\n file.append(row)\n return file\n\n\n<mask token>\n\n\ndef relativeToAbsolute(x, y, w, h, o_x, o_y):\n n_x = float(x) * float(o_x)\n n_y = float(y) * float(o_y)\n n_w = float(w) * float(o_x)\n n_h = float(h) * float(o_y)\n return n_x, n_y, n_w, n_h\n",
"step-5": "import cv2\r\nimport glob\r\nimport numpy as np\r\nimport csv\r\nimport matplotlib.pyplot as plt\r\nfrom pydarknet import Detector,Image\r\n\r\n\"\"\"\r\nCalculates the average precision based on the precision and recall values,\r\nwhich are essentially the output of getPrecisionRecall\r\nReturns the 101pt interpolation curve and a single average precision value\r\n\"\"\"\r\ndef getAP(prec,rec):\r\n #smooth\r\n prec0 = prec.copy()\r\n prec0.append(0.0)\r\n smoothprec = np.zeros(101) #smoothed and ready for easy 101pt interpolation\r\n for idx in range(101):\r\n i = (100-idx)/100.\r\n val = 0\r\n for re_idx in range(len(rec)): #go through recs\r\n re_i = len(rec)-re_idx-1 #from back to front\r\n if rec[re_i] >= i: # if value there is larger than i\r\n val = max(prec0[re_i:])\r\n #break\r\n smoothprec[100-idx] = val\r\n #quick 101 pt interpolation\r\n ap = np.mean(smoothprec)\r\n return(smoothprec,ap)\r\n\r\n\"\"\"\r\nCalculates the intersection of two boxes a and b,\r\nboth arrays are in the format x1,y1,x2,y2, where x1,y1 and x2,y2 are \r\nthe upmost left and downmost right corner\r\nReturns a single value for the Intersection amount in pixels\r\n\"\"\"\r\ndef getIntersection(a,b): #each in format x1,y1,x2,y2\r\n intersection = [0,0,0,0]\r\n #left -> \r\n if b[0] <= a[0] and a[0] <= b[2]:\r\n intersection[0] = a[0]\r\n elif a[0] <= b[0] and b[0] <= a[2]:\r\n intersection[0] = b[0]\r\n else: \r\n return 0\r\n #down ->\r\n if b[1] <= a[1] and a[1] <= b[3]:\r\n intersection[1] = a[1]\r\n elif a[1] <= b[1] and b[1] <= a[3]:\r\n intersection[1] = b[1]\r\n else:\r\n return 0\r\n #right ->\r\n if b[0] <= a[2] and a[2] <= b[2]: \r\n intersection[2] = a[2]\r\n elif a[0] <= b[2] and b[2] <= a[2]:\r\n intersection[2] = b[2]\r\n else:\r\n return 0\r\n #up ->\r\n if b[0] <= a[3] and a[3] <= b[3]: #up\r\n intersection[3] = a[3]\r\n elif a[0] <= b[3] and b[3] <= a[3]:\r\n intersection[3] = b[3] \r\n else:\r\n return 0\r\n i1 = intersection[3]-intersection[1]\r\n i2 = intersection[2]-intersection[0]\r\n i = i1*i2 \r\n return i\r\n\r\n\"\"\"\r\nCalculates the IoU Intersection over Union for the two boxes a and b,\r\nboth arrays are in the format x1,y1,x2,y2, where x1,y1 and x2,y2 are \r\nthe upmost left and downmost right corner\r\nReturns a single IoU value\r\n\"\"\"\r\ndef getIoU(a,b): #format of a and b is x1,y1,x2,y2\r\n a = np.array(a, np.float32)\r\n b = np.array(b, np.float32)\r\n intersection = getIntersection(a,b)\r\n asize = (a[2]-a[0])*(a[3]-a[1])\r\n bsize = (b[2]-b[0])*(b[3]-b[1])\r\n if intersection > 0:#\r\n union = asize + bsize - intersection\r\n else:\r\n union = asize + bsize\r\n return(intersection/union)\r\n\r\n\"\"\"\r\nCalculates the precision and recall values/curve given plist that contains only \"TP\" and \"FP\" items\r\nthis list was created by predictions that are ordered based on score\r\nand positives, the number of all positives based on the ground truth\r\nReturns tuple of lists for precisions and recalls\r\n\"\"\"\r\ndef getPrecisionRecall(plist,positives):\r\n tp = 0\r\n fp = 0\r\n precs = []\r\n recs = []\r\n for e in plist:\r\n if e == \"TP\":\r\n tp += 1\r\n elif e == \"FP\":\r\n fp += 1\r\n precision = tp/(tp+fp)\r\n precs.append(precision)\r\n recall = tp/(positives)\r\n recs.append(recall)\r\n return(precs,recs)\r\n\r\ndef readResults(filename):\r\n\tfile = []\r\n\twith open(filename) as csvfile:\r\n\t reader = csv.reader(csvfile, delimiter=',')\r\n\t for row in reader:\r\n\t \tfile.append(row)\r\n\treturn file\r\n\r\n\"\"\"\r\nconverts relative to absolute coordinates,\r\nx = point of box (relative), y = point of box (relative)\r\nw = width of box (relative), h = height of box (relative)\r\no_x = original width of image, o_y = original height of image\r\n\"\"\"\r\ndef relativeToAbsolute(x,y,w,h,o_x,o_y):\r\n n_x = float(x)*float(o_x)\r\n n_y = float(y)*float(o_y)\r\n n_w = float(w)*float(o_x)\r\n n_h = float(h)*float(o_y)\r\n return(n_x,n_y,n_w,n_h)\r\n\r\n",
"step-ids": [
3,
5,
6,
7,
8
]
}
|
[
3,
5,
6,
7,
8
] |
from app import create_app
__author__ = '七月'
app = create_app()
if __name__ == '__main__':
app.run(debug=app.config['DEBUG'])
|
normal
|
{
"blob_id": "9a6d6637cd4ecf2f6e9c8eb8e702be06e83beea4",
"index": 998,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nif __name__ == '__main__':\n app.run(debug=app.config['DEBUG'])\n",
"step-3": "<mask token>\n__author__ = '七月'\napp = create_app()\nif __name__ == '__main__':\n app.run(debug=app.config['DEBUG'])\n",
"step-4": "from app import create_app\n__author__ = '七月'\napp = create_app()\nif __name__ == '__main__':\n app.run(debug=app.config['DEBUG'])\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
def make_model():
corpus = open(corpusFile).read()
text_model = markovify.Text(corpus, state_size=4)
model_json = text_model.to_json()
f = open(modelFile, mode='w')
f.write(model_json)
f.close()
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def generate():
generate_count = 168
model_json = open(modelFile, 'r').read()
model = markovify.Text.from_json(model_json)
conn = sqlite3.connect(dbFile)
c = conn.cursor()
for i in range(generate_count):
content = model.make_short_sentence(140)
generated_timestamp = int(time.time())
if content:
c.execute(
'INSERT INTO tweets (content,generated_timestamp) VALUES (?,?)'
, (content, generated_timestamp))
print(content)
print(generated_timestamp)
print('----------')
conn.commit()
conn.close()
def make_model():
corpus = open(corpusFile).read()
text_model = markovify.Text(corpus, state_size=4)
model_json = text_model.to_json()
f = open(modelFile, mode='w')
f.write(model_json)
f.close()
def full_gen():
corpus = open(corpusFile).read()
model = markovify.Text(corpus, state_size=4)
generate_count = 168
conn = sqlite3.connect(dbFile)
c = conn.cursor()
for i in range(generate_count):
content = model.make_short_sentence(140, max_overlap_ratio=0.8)
generated_timestamp = int(time.time())
if content:
c.execute(
'INSERT INTO tweets (content,generated_timestamp) VALUES (?,?)'
, (content, generated_timestamp))
print(content)
print(generated_timestamp)
print('----------')
conn.commit()
conn.close()
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--model', action='store_true', default=False, help
='Create Model JSON')
parser.add_argument('--gen', action='store_true', default=False, help=
'Generate from stored Model')
parser.add_argument('--full', action='store_true', default=False, help=
'Full Geneate')
args = parser.parse_args()
if args.gen:
generate()
elif args.model:
make_model()
else:
full_gen()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
modelFile = './data/model.json'
corpusFile = './data/corpus.txt'
dbFile = './data/tweets.sqlite3'
def generate():
generate_count = 168
model_json = open(modelFile, 'r').read()
model = markovify.Text.from_json(model_json)
conn = sqlite3.connect(dbFile)
c = conn.cursor()
for i in range(generate_count):
content = model.make_short_sentence(140)
generated_timestamp = int(time.time())
if content:
c.execute(
'INSERT INTO tweets (content,generated_timestamp) VALUES (?,?)'
, (content, generated_timestamp))
print(content)
print(generated_timestamp)
print('----------')
conn.commit()
conn.close()
def make_model():
corpus = open(corpusFile).read()
text_model = markovify.Text(corpus, state_size=4)
model_json = text_model.to_json()
f = open(modelFile, mode='w')
f.write(model_json)
f.close()
def full_gen():
corpus = open(corpusFile).read()
model = markovify.Text(corpus, state_size=4)
generate_count = 168
conn = sqlite3.connect(dbFile)
c = conn.cursor()
for i in range(generate_count):
content = model.make_short_sentence(140, max_overlap_ratio=0.8)
generated_timestamp = int(time.time())
if content:
c.execute(
'INSERT INTO tweets (content,generated_timestamp) VALUES (?,?)'
, (content, generated_timestamp))
print(content)
print(generated_timestamp)
print('----------')
conn.commit()
conn.close()
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--model', action='store_true', default=False, help
='Create Model JSON')
parser.add_argument('--gen', action='store_true', default=False, help=
'Generate from stored Model')
parser.add_argument('--full', action='store_true', default=False, help=
'Full Geneate')
args = parser.parse_args()
if args.gen:
generate()
elif args.model:
make_model()
else:
full_gen()
<|reserved_special_token_1|>
import markovify
import argparse
import sqlite3
import time
modelFile = './data/model.json'
corpusFile = './data/corpus.txt'
dbFile = './data/tweets.sqlite3'
def generate():
generate_count = 168
model_json = open(modelFile, 'r').read()
model = markovify.Text.from_json(model_json)
conn = sqlite3.connect(dbFile)
c = conn.cursor()
for i in range(generate_count):
content = model.make_short_sentence(140)
generated_timestamp = int(time.time())
if content:
c.execute(
'INSERT INTO tweets (content,generated_timestamp) VALUES (?,?)'
, (content, generated_timestamp))
print(content)
print(generated_timestamp)
print('----------')
conn.commit()
conn.close()
def make_model():
corpus = open(corpusFile).read()
text_model = markovify.Text(corpus, state_size=4)
model_json = text_model.to_json()
f = open(modelFile, mode='w')
f.write(model_json)
f.close()
def full_gen():
corpus = open(corpusFile).read()
model = markovify.Text(corpus, state_size=4)
generate_count = 168
conn = sqlite3.connect(dbFile)
c = conn.cursor()
for i in range(generate_count):
content = model.make_short_sentence(140, max_overlap_ratio=0.8)
generated_timestamp = int(time.time())
if content:
c.execute(
'INSERT INTO tweets (content,generated_timestamp) VALUES (?,?)'
, (content, generated_timestamp))
print(content)
print(generated_timestamp)
print('----------')
conn.commit()
conn.close()
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--model', action='store_true', default=False, help
='Create Model JSON')
parser.add_argument('--gen', action='store_true', default=False, help=
'Generate from stored Model')
parser.add_argument('--full', action='store_true', default=False, help=
'Full Geneate')
args = parser.parse_args()
if args.gen:
generate()
elif args.model:
make_model()
else:
full_gen()
<|reserved_special_token_1|>
import markovify
import argparse
import sqlite3
import time
modelFile = './data/model.json'
corpusFile = './data/corpus.txt'
dbFile = './data/tweets.sqlite3'
def generate():
generate_count = 168
model_json = open(modelFile, 'r').read()
model = markovify.Text.from_json(model_json)
conn = sqlite3.connect(dbFile)
c = conn.cursor()
for i in range(generate_count):
content = model.make_short_sentence(140)
generated_timestamp = int(time.time())
if content:
c.execute('INSERT INTO tweets (content,generated_timestamp) VALUES (?,?)', (content,generated_timestamp))
print(content)
print(generated_timestamp)
print('----------')
conn.commit()
conn.close()
def make_model():
corpus = open(corpusFile).read()
text_model = markovify.Text(corpus, state_size=4)
model_json = text_model.to_json()
f = open(modelFile, mode='w')
f.write(model_json)
f.close()
def full_gen():
corpus = open(corpusFile).read()
model = markovify.Text(corpus, state_size=4)
generate_count = 168
conn = sqlite3.connect(dbFile)
c = conn.cursor()
for i in range(generate_count):
content = model.make_short_sentence(140, max_overlap_ratio=.8)
generated_timestamp = int(time.time())
if content:
c.execute('INSERT INTO tweets (content,generated_timestamp) VALUES (?,?)', (content,generated_timestamp))
print(content)
print(generated_timestamp)
print('----------')
conn.commit()
conn.close()
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--model", action="store_true", default=False, help="Create Model JSON")
parser.add_argument("--gen", action="store_true", default=False, help="Generate from stored Model")
parser.add_argument("--full", action="store_true", default=False, help="Full Geneate")
args = parser.parse_args()
if args.gen:
generate()
elif args.model:
make_model()
else:
full_gen()
|
flexible
|
{
"blob_id": "cc71c0cc1ec21dc465486fb5894c4d389c39bd62",
"index": 8164,
"step-1": "<mask token>\n\n\ndef make_model():\n corpus = open(corpusFile).read()\n text_model = markovify.Text(corpus, state_size=4)\n model_json = text_model.to_json()\n f = open(modelFile, mode='w')\n f.write(model_json)\n f.close()\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef generate():\n generate_count = 168\n model_json = open(modelFile, 'r').read()\n model = markovify.Text.from_json(model_json)\n conn = sqlite3.connect(dbFile)\n c = conn.cursor()\n for i in range(generate_count):\n content = model.make_short_sentence(140)\n generated_timestamp = int(time.time())\n if content:\n c.execute(\n 'INSERT INTO tweets (content,generated_timestamp) VALUES (?,?)'\n , (content, generated_timestamp))\n print(content)\n print(generated_timestamp)\n print('----------')\n conn.commit()\n conn.close()\n\n\ndef make_model():\n corpus = open(corpusFile).read()\n text_model = markovify.Text(corpus, state_size=4)\n model_json = text_model.to_json()\n f = open(modelFile, mode='w')\n f.write(model_json)\n f.close()\n\n\ndef full_gen():\n corpus = open(corpusFile).read()\n model = markovify.Text(corpus, state_size=4)\n generate_count = 168\n conn = sqlite3.connect(dbFile)\n c = conn.cursor()\n for i in range(generate_count):\n content = model.make_short_sentence(140, max_overlap_ratio=0.8)\n generated_timestamp = int(time.time())\n if content:\n c.execute(\n 'INSERT INTO tweets (content,generated_timestamp) VALUES (?,?)'\n , (content, generated_timestamp))\n print(content)\n print(generated_timestamp)\n print('----------')\n conn.commit()\n conn.close()\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('--model', action='store_true', default=False, help\n ='Create Model JSON')\n parser.add_argument('--gen', action='store_true', default=False, help=\n 'Generate from stored Model')\n parser.add_argument('--full', action='store_true', default=False, help=\n 'Full Geneate')\n args = parser.parse_args()\n if args.gen:\n generate()\n elif args.model:\n make_model()\n else:\n full_gen()\n",
"step-3": "<mask token>\nmodelFile = './data/model.json'\ncorpusFile = './data/corpus.txt'\ndbFile = './data/tweets.sqlite3'\n\n\ndef generate():\n generate_count = 168\n model_json = open(modelFile, 'r').read()\n model = markovify.Text.from_json(model_json)\n conn = sqlite3.connect(dbFile)\n c = conn.cursor()\n for i in range(generate_count):\n content = model.make_short_sentence(140)\n generated_timestamp = int(time.time())\n if content:\n c.execute(\n 'INSERT INTO tweets (content,generated_timestamp) VALUES (?,?)'\n , (content, generated_timestamp))\n print(content)\n print(generated_timestamp)\n print('----------')\n conn.commit()\n conn.close()\n\n\ndef make_model():\n corpus = open(corpusFile).read()\n text_model = markovify.Text(corpus, state_size=4)\n model_json = text_model.to_json()\n f = open(modelFile, mode='w')\n f.write(model_json)\n f.close()\n\n\ndef full_gen():\n corpus = open(corpusFile).read()\n model = markovify.Text(corpus, state_size=4)\n generate_count = 168\n conn = sqlite3.connect(dbFile)\n c = conn.cursor()\n for i in range(generate_count):\n content = model.make_short_sentence(140, max_overlap_ratio=0.8)\n generated_timestamp = int(time.time())\n if content:\n c.execute(\n 'INSERT INTO tweets (content,generated_timestamp) VALUES (?,?)'\n , (content, generated_timestamp))\n print(content)\n print(generated_timestamp)\n print('----------')\n conn.commit()\n conn.close()\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('--model', action='store_true', default=False, help\n ='Create Model JSON')\n parser.add_argument('--gen', action='store_true', default=False, help=\n 'Generate from stored Model')\n parser.add_argument('--full', action='store_true', default=False, help=\n 'Full Geneate')\n args = parser.parse_args()\n if args.gen:\n generate()\n elif args.model:\n make_model()\n else:\n full_gen()\n",
"step-4": "import markovify\nimport argparse\nimport sqlite3\nimport time\nmodelFile = './data/model.json'\ncorpusFile = './data/corpus.txt'\ndbFile = './data/tweets.sqlite3'\n\n\ndef generate():\n generate_count = 168\n model_json = open(modelFile, 'r').read()\n model = markovify.Text.from_json(model_json)\n conn = sqlite3.connect(dbFile)\n c = conn.cursor()\n for i in range(generate_count):\n content = model.make_short_sentence(140)\n generated_timestamp = int(time.time())\n if content:\n c.execute(\n 'INSERT INTO tweets (content,generated_timestamp) VALUES (?,?)'\n , (content, generated_timestamp))\n print(content)\n print(generated_timestamp)\n print('----------')\n conn.commit()\n conn.close()\n\n\ndef make_model():\n corpus = open(corpusFile).read()\n text_model = markovify.Text(corpus, state_size=4)\n model_json = text_model.to_json()\n f = open(modelFile, mode='w')\n f.write(model_json)\n f.close()\n\n\ndef full_gen():\n corpus = open(corpusFile).read()\n model = markovify.Text(corpus, state_size=4)\n generate_count = 168\n conn = sqlite3.connect(dbFile)\n c = conn.cursor()\n for i in range(generate_count):\n content = model.make_short_sentence(140, max_overlap_ratio=0.8)\n generated_timestamp = int(time.time())\n if content:\n c.execute(\n 'INSERT INTO tweets (content,generated_timestamp) VALUES (?,?)'\n , (content, generated_timestamp))\n print(content)\n print(generated_timestamp)\n print('----------')\n conn.commit()\n conn.close()\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('--model', action='store_true', default=False, help\n ='Create Model JSON')\n parser.add_argument('--gen', action='store_true', default=False, help=\n 'Generate from stored Model')\n parser.add_argument('--full', action='store_true', default=False, help=\n 'Full Geneate')\n args = parser.parse_args()\n if args.gen:\n generate()\n elif args.model:\n make_model()\n else:\n full_gen()\n",
"step-5": "import markovify\nimport argparse\nimport sqlite3\nimport time\n\nmodelFile = './data/model.json'\ncorpusFile = './data/corpus.txt'\ndbFile = './data/tweets.sqlite3'\n\ndef generate():\n generate_count = 168\n model_json = open(modelFile, 'r').read()\n model = markovify.Text.from_json(model_json)\n\n conn = sqlite3.connect(dbFile)\n c = conn.cursor()\n\n for i in range(generate_count):\n content = model.make_short_sentence(140)\n generated_timestamp = int(time.time())\n\n if content:\n c.execute('INSERT INTO tweets (content,generated_timestamp) VALUES (?,?)', (content,generated_timestamp))\n print(content)\n print(generated_timestamp)\n print('----------')\n\n conn.commit()\n\n conn.close()\n\ndef make_model():\n corpus = open(corpusFile).read()\n\n text_model = markovify.Text(corpus, state_size=4)\n model_json = text_model.to_json()\n\n f = open(modelFile, mode='w')\n f.write(model_json)\n f.close()\n\n\ndef full_gen():\n corpus = open(corpusFile).read()\n\n model = markovify.Text(corpus, state_size=4)\n\n generate_count = 168\n\n conn = sqlite3.connect(dbFile)\n c = conn.cursor()\n\n for i in range(generate_count):\n content = model.make_short_sentence(140, max_overlap_ratio=.8)\n generated_timestamp = int(time.time())\n\n if content:\n c.execute('INSERT INTO tweets (content,generated_timestamp) VALUES (?,?)', (content,generated_timestamp))\n print(content)\n print(generated_timestamp)\n print('----------')\n\n conn.commit()\n\n conn.close()\n\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--model\", action=\"store_true\", default=False, help=\"Create Model JSON\")\n parser.add_argument(\"--gen\", action=\"store_true\", default=False, help=\"Generate from stored Model\")\n parser.add_argument(\"--full\", action=\"store_true\", default=False, help=\"Full Geneate\")\n args = parser.parse_args()\n if args.gen:\n generate()\n elif args.model:\n make_model()\n else:\n full_gen()\n",
"step-ids": [
1,
4,
5,
6,
7
]
}
|
[
1,
4,
5,
6,
7
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Migration(migrations.Migration):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Migration(migrations.Migration):
dependencies = [('auth', '0001_initial'), ('c4c_app',
'0006_c4cjob_complete')]
operations = [migrations.AlterModelOptions(name='c4cbranch', options={
'verbose_name': 'Branch', 'verbose_name_plural': 'Branches'}),
migrations.AlterModelOptions(name='c4cdonation', options={
'verbose_name': 'Donation', 'verbose_name_plural': 'Donations'}),
migrations.AlterModelOptions(name='c4cevent', options={
'verbose_name': 'Event', 'verbose_name_plural': 'Events'}),
migrations.AlterModelOptions(name='c4cjob', options={'verbose_name':
'Job', 'verbose_name_plural': 'Jobs'}), migrations.
AlterModelOptions(name='c4cuser', options={'verbose_name':
'C4C User', 'verbose_name_plural': 'C4C Users'}), migrations.
RemoveField(model_name='c4cbranch', name='officers'), migrations.
AddField(model_name='c4cbranch', name='group', field=models.
OneToOneField(related_name='in_branches', default=None, to=
'auth.Group'), preserve_default=False), migrations.AddField(
model_name='c4cbranch', name='officers_group', field=models.
OneToOneField(related_name='is_branch_officer_of', default=None, to
='auth.Group'), preserve_default=False), migrations.AddField(
model_name='c4cjob', name='offer', field=models.BooleanField(
default=False), preserve_default=True), migrations.AlterField(
model_name='c4cjob', name='duration', field=models.IntegerField(
null=True), preserve_default=True)]
<|reserved_special_token_1|>
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [('auth', '0001_initial'), ('c4c_app',
'0006_c4cjob_complete')]
operations = [migrations.AlterModelOptions(name='c4cbranch', options={
'verbose_name': 'Branch', 'verbose_name_plural': 'Branches'}),
migrations.AlterModelOptions(name='c4cdonation', options={
'verbose_name': 'Donation', 'verbose_name_plural': 'Donations'}),
migrations.AlterModelOptions(name='c4cevent', options={
'verbose_name': 'Event', 'verbose_name_plural': 'Events'}),
migrations.AlterModelOptions(name='c4cjob', options={'verbose_name':
'Job', 'verbose_name_plural': 'Jobs'}), migrations.
AlterModelOptions(name='c4cuser', options={'verbose_name':
'C4C User', 'verbose_name_plural': 'C4C Users'}), migrations.
RemoveField(model_name='c4cbranch', name='officers'), migrations.
AddField(model_name='c4cbranch', name='group', field=models.
OneToOneField(related_name='in_branches', default=None, to=
'auth.Group'), preserve_default=False), migrations.AddField(
model_name='c4cbranch', name='officers_group', field=models.
OneToOneField(related_name='is_branch_officer_of', default=None, to
='auth.Group'), preserve_default=False), migrations.AddField(
model_name='c4cjob', name='offer', field=models.BooleanField(
default=False), preserve_default=True), migrations.AlterField(
model_name='c4cjob', name='duration', field=models.IntegerField(
null=True), preserve_default=True)]
<|reserved_special_token_1|>
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('auth', '0001_initial'),
('c4c_app', '0006_c4cjob_complete'),
]
operations = [
migrations.AlterModelOptions(
name='c4cbranch',
options={'verbose_name': 'Branch', 'verbose_name_plural': 'Branches'},
),
migrations.AlterModelOptions(
name='c4cdonation',
options={'verbose_name': 'Donation', 'verbose_name_plural': 'Donations'},
),
migrations.AlterModelOptions(
name='c4cevent',
options={'verbose_name': 'Event', 'verbose_name_plural': 'Events'},
),
migrations.AlterModelOptions(
name='c4cjob',
options={'verbose_name': 'Job', 'verbose_name_plural': 'Jobs'},
),
migrations.AlterModelOptions(
name='c4cuser',
options={'verbose_name': 'C4C User', 'verbose_name_plural': 'C4C Users'},
),
migrations.RemoveField(
model_name='c4cbranch',
name='officers',
),
migrations.AddField(
model_name='c4cbranch',
name='group',
field=models.OneToOneField(related_name='in_branches', default=None, to='auth.Group'),
preserve_default=False,
),
migrations.AddField(
model_name='c4cbranch',
name='officers_group',
field=models.OneToOneField(related_name='is_branch_officer_of', default=None, to='auth.Group'),
preserve_default=False,
),
migrations.AddField(
model_name='c4cjob',
name='offer',
field=models.BooleanField(default=False),
preserve_default=True,
),
migrations.AlterField(
model_name='c4cjob',
name='duration',
field=models.IntegerField(null=True),
preserve_default=True,
),
]
|
flexible
|
{
"blob_id": "30986eb0a6cd82f837dd14fb383529a6a41def9a",
"index": 8338,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Migration(migrations.Migration):\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Migration(migrations.Migration):\n dependencies = [('auth', '0001_initial'), ('c4c_app',\n '0006_c4cjob_complete')]\n operations = [migrations.AlterModelOptions(name='c4cbranch', options={\n 'verbose_name': 'Branch', 'verbose_name_plural': 'Branches'}),\n migrations.AlterModelOptions(name='c4cdonation', options={\n 'verbose_name': 'Donation', 'verbose_name_plural': 'Donations'}),\n migrations.AlterModelOptions(name='c4cevent', options={\n 'verbose_name': 'Event', 'verbose_name_plural': 'Events'}),\n migrations.AlterModelOptions(name='c4cjob', options={'verbose_name':\n 'Job', 'verbose_name_plural': 'Jobs'}), migrations.\n AlterModelOptions(name='c4cuser', options={'verbose_name':\n 'C4C User', 'verbose_name_plural': 'C4C Users'}), migrations.\n RemoveField(model_name='c4cbranch', name='officers'), migrations.\n AddField(model_name='c4cbranch', name='group', field=models.\n OneToOneField(related_name='in_branches', default=None, to=\n 'auth.Group'), preserve_default=False), migrations.AddField(\n model_name='c4cbranch', name='officers_group', field=models.\n OneToOneField(related_name='is_branch_officer_of', default=None, to\n ='auth.Group'), preserve_default=False), migrations.AddField(\n model_name='c4cjob', name='offer', field=models.BooleanField(\n default=False), preserve_default=True), migrations.AlterField(\n model_name='c4cjob', name='duration', field=models.IntegerField(\n null=True), preserve_default=True)]\n",
"step-4": "from __future__ import unicode_literals\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n dependencies = [('auth', '0001_initial'), ('c4c_app',\n '0006_c4cjob_complete')]\n operations = [migrations.AlterModelOptions(name='c4cbranch', options={\n 'verbose_name': 'Branch', 'verbose_name_plural': 'Branches'}),\n migrations.AlterModelOptions(name='c4cdonation', options={\n 'verbose_name': 'Donation', 'verbose_name_plural': 'Donations'}),\n migrations.AlterModelOptions(name='c4cevent', options={\n 'verbose_name': 'Event', 'verbose_name_plural': 'Events'}),\n migrations.AlterModelOptions(name='c4cjob', options={'verbose_name':\n 'Job', 'verbose_name_plural': 'Jobs'}), migrations.\n AlterModelOptions(name='c4cuser', options={'verbose_name':\n 'C4C User', 'verbose_name_plural': 'C4C Users'}), migrations.\n RemoveField(model_name='c4cbranch', name='officers'), migrations.\n AddField(model_name='c4cbranch', name='group', field=models.\n OneToOneField(related_name='in_branches', default=None, to=\n 'auth.Group'), preserve_default=False), migrations.AddField(\n model_name='c4cbranch', name='officers_group', field=models.\n OneToOneField(related_name='is_branch_officer_of', default=None, to\n ='auth.Group'), preserve_default=False), migrations.AddField(\n model_name='c4cjob', name='offer', field=models.BooleanField(\n default=False), preserve_default=True), migrations.AlterField(\n model_name='c4cjob', name='duration', field=models.IntegerField(\n null=True), preserve_default=True)]\n",
"step-5": "# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('auth', '0001_initial'),\n ('c4c_app', '0006_c4cjob_complete'),\n ]\n\n operations = [\n migrations.AlterModelOptions(\n name='c4cbranch',\n options={'verbose_name': 'Branch', 'verbose_name_plural': 'Branches'},\n ),\n migrations.AlterModelOptions(\n name='c4cdonation',\n options={'verbose_name': 'Donation', 'verbose_name_plural': 'Donations'},\n ),\n migrations.AlterModelOptions(\n name='c4cevent',\n options={'verbose_name': 'Event', 'verbose_name_plural': 'Events'},\n ),\n migrations.AlterModelOptions(\n name='c4cjob',\n options={'verbose_name': 'Job', 'verbose_name_plural': 'Jobs'},\n ),\n migrations.AlterModelOptions(\n name='c4cuser',\n options={'verbose_name': 'C4C User', 'verbose_name_plural': 'C4C Users'},\n ),\n migrations.RemoveField(\n model_name='c4cbranch',\n name='officers',\n ),\n migrations.AddField(\n model_name='c4cbranch',\n name='group',\n field=models.OneToOneField(related_name='in_branches', default=None, to='auth.Group'),\n preserve_default=False,\n ),\n migrations.AddField(\n model_name='c4cbranch',\n name='officers_group',\n field=models.OneToOneField(related_name='is_branch_officer_of', default=None, to='auth.Group'),\n preserve_default=False,\n ),\n migrations.AddField(\n model_name='c4cjob',\n name='offer',\n field=models.BooleanField(default=False),\n preserve_default=True,\n ),\n migrations.AlterField(\n model_name='c4cjob',\n name='duration',\n field=models.IntegerField(null=True),\n preserve_default=True,\n ),\n ]\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
import ConfigParser
''' Merge as many as ConfigParser as you want'''
def Config_Append(SRC_Config ,DST_Config):
import tempfile
temp_src = tempfile.NamedTemporaryFile(delete=True)
temp_dst = tempfile.NamedTemporaryFile(delete=True)
with open(temp_src.name,'wb') as src, open(temp_dst.name,'wb') as dst:
SRC_Config.write(src)
DST_Config.write(dst)
DST_Config.read([temp_src.name,temp_dst.name])
return DST_Config
if __name__ == '__main__':
# initial
config_one = ConfigParser.RawConfigParser()
config_two = ConfigParser.RawConfigParser()
config_three = ConfigParser.RawConfigParser()
:
:
:
# read config
config_one.read('one.ini')
config_two.read('two.ini')
config_three.read('three.ini')
:
:
:
# data manipulation
blah blah
# config merge
config_final = reduce(Config_Append, (config_one ,config_two, config_three, ...))
# show
for i in config_final.sections():
print '[',i,']'
print config_final.items(i)
|
normal
|
{
"blob_id": "d17f1176ac60a3f6836c706883ab1847b61f50bf",
"index": 1857,
"step-1": "import ConfigParser\n''' Merge as many as ConfigParser as you want'''\n\ndef Config_Append(SRC_Config ,DST_Config):\n import tempfile\n temp_src = tempfile.NamedTemporaryFile(delete=True)\n temp_dst = tempfile.NamedTemporaryFile(delete=True)\n with open(temp_src.name,'wb') as src, open(temp_dst.name,'wb') as dst:\n SRC_Config.write(src)\n DST_Config.write(dst)\n DST_Config.read([temp_src.name,temp_dst.name])\n return DST_Config\n\n\nif __name__ == '__main__':\n # initial\n config_one = ConfigParser.RawConfigParser()\n config_two = ConfigParser.RawConfigParser()\n config_three = ConfigParser.RawConfigParser()\n :\n :\n :\n # read config \n config_one.read('one.ini')\n config_two.read('two.ini')\n config_three.read('three.ini')\n :\n :\n :\n \n # data manipulation\n blah blah\n \n # config merge\n config_final = reduce(Config_Append, (config_one ,config_two, config_three, ...))\n \n # show\n for i in config_final.sections():\n print '[',i,']'\n print config_final.items(i)\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
"""Test the init file of Mailgun."""
import hashlib
import hmac
import pytest
from homeassistant import config_entries, data_entry_flow
from homeassistant.components import mailgun, webhook
from homeassistant.config import async_process_ha_core_config
from homeassistant.const import CONF_API_KEY, CONF_DOMAIN
from homeassistant.core import callback
from homeassistant.setup import async_setup_component
API_KEY = "abc123"
@pytest.fixture
async def http_client(hass, hass_client_no_auth):
"""Initialize a Home Assistant Server for testing this module."""
await async_setup_component(hass, webhook.DOMAIN, {})
return await hass_client_no_auth()
@pytest.fixture
async def webhook_id_with_api_key(hass):
"""Initialize the Mailgun component and get the webhook_id."""
await async_setup_component(
hass,
mailgun.DOMAIN,
{mailgun.DOMAIN: {CONF_API_KEY: API_KEY, CONF_DOMAIN: "example.com"}},
)
await async_process_ha_core_config(
hass,
{"internal_url": "http://example.local:8123"},
)
result = await hass.config_entries.flow.async_init(
"mailgun", context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == data_entry_flow.FlowResultType.FORM, result
result = await hass.config_entries.flow.async_configure(result["flow_id"], {})
assert result["type"] == data_entry_flow.FlowResultType.CREATE_ENTRY
return result["result"].data["webhook_id"]
@pytest.fixture
async def webhook_id_without_api_key(hass):
"""Initialize the Mailgun component and get the webhook_id w/o API key."""
await async_setup_component(hass, mailgun.DOMAIN, {})
await async_process_ha_core_config(
hass,
{"internal_url": "http://example.local:8123"},
)
result = await hass.config_entries.flow.async_init(
"mailgun", context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == data_entry_flow.FlowResultType.FORM, result
result = await hass.config_entries.flow.async_configure(result["flow_id"], {})
assert result["type"] == data_entry_flow.FlowResultType.CREATE_ENTRY
return result["result"].data["webhook_id"]
@pytest.fixture
async def mailgun_events(hass):
"""Return a list of mailgun_events triggered."""
events = []
@callback
def handle_event(event):
"""Handle Mailgun event."""
events.append(event)
hass.bus.async_listen(mailgun.MESSAGE_RECEIVED, handle_event)
return events
async def test_mailgun_webhook_with_missing_signature(
http_client, webhook_id_with_api_key, mailgun_events
) -> None:
"""Test that webhook doesn't trigger an event without a signature."""
event_count = len(mailgun_events)
await http_client.post(
f"/api/webhook/{webhook_id_with_api_key}",
json={"hello": "mailgun", "signature": {}},
)
assert len(mailgun_events) == event_count
await http_client.post(
f"/api/webhook/{webhook_id_with_api_key}", json={"hello": "mailgun"}
)
assert len(mailgun_events) == event_count
async def test_mailgun_webhook_with_different_api_key(
http_client, webhook_id_with_api_key, mailgun_events
) -> None:
"""Test that webhook doesn't trigger an event with a wrong signature."""
timestamp = "1529006854"
token = "a8ce0edb2dd8301dee6c2405235584e45aa91d1e9f979f3de0"
event_count = len(mailgun_events)
await http_client.post(
f"/api/webhook/{webhook_id_with_api_key}",
json={
"hello": "mailgun",
"signature": {
"signature": hmac.new(
key=b"random_api_key",
msg=bytes(f"{timestamp}{token}", "utf-8"),
digestmod=hashlib.sha256,
).hexdigest(),
"timestamp": timestamp,
"token": token,
},
},
)
assert len(mailgun_events) == event_count
async def test_mailgun_webhook_event_with_correct_api_key(
http_client, webhook_id_with_api_key, mailgun_events
) -> None:
"""Test that webhook triggers an event after validating a signature."""
timestamp = "1529006854"
token = "a8ce0edb2dd8301dee6c2405235584e45aa91d1e9f979f3de0"
event_count = len(mailgun_events)
await http_client.post(
f"/api/webhook/{webhook_id_with_api_key}",
json={
"hello": "mailgun",
"signature": {
"signature": hmac.new(
key=bytes(API_KEY, "utf-8"),
msg=bytes(f"{timestamp}{token}", "utf-8"),
digestmod=hashlib.sha256,
).hexdigest(),
"timestamp": timestamp,
"token": token,
},
},
)
assert len(mailgun_events) == event_count + 1
assert mailgun_events[-1].data["webhook_id"] == webhook_id_with_api_key
assert mailgun_events[-1].data["hello"] == "mailgun"
async def test_mailgun_webhook_with_missing_signature_without_api_key(
http_client, webhook_id_without_api_key, mailgun_events
) -> None:
"""Test that webhook triggers an event without a signature w/o API key."""
event_count = len(mailgun_events)
await http_client.post(
f"/api/webhook/{webhook_id_without_api_key}",
json={"hello": "mailgun", "signature": {}},
)
assert len(mailgun_events) == event_count + 1
assert mailgun_events[-1].data["webhook_id"] == webhook_id_without_api_key
assert mailgun_events[-1].data["hello"] == "mailgun"
await http_client.post(
f"/api/webhook/{webhook_id_without_api_key}", json={"hello": "mailgun"}
)
assert len(mailgun_events) == event_count + 1
assert mailgun_events[-1].data["webhook_id"] == webhook_id_without_api_key
assert mailgun_events[-1].data["hello"] == "mailgun"
async def test_mailgun_webhook_event_without_an_api_key(
http_client, webhook_id_without_api_key, mailgun_events
) -> None:
"""Test that webhook triggers an event if there is no api key."""
timestamp = "1529006854"
token = "a8ce0edb2dd8301dee6c2405235584e45aa91d1e9f979f3de0"
event_count = len(mailgun_events)
await http_client.post(
f"/api/webhook/{webhook_id_without_api_key}",
json={
"hello": "mailgun",
"signature": {
"signature": hmac.new(
key=bytes(API_KEY, "utf-8"),
msg=bytes(f"{timestamp}{token}", "utf-8"),
digestmod=hashlib.sha256,
).hexdigest(),
"timestamp": timestamp,
"token": token,
},
},
)
assert len(mailgun_events) == event_count + 1
assert mailgun_events[-1].data["webhook_id"] == webhook_id_without_api_key
assert mailgun_events[-1].data["hello"] == "mailgun"
|
normal
|
{
"blob_id": "a55024f0e5edec22125ce53ef54ee364be185cb8",
"index": 1099,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\n@pytest.fixture\nasync def http_client(hass, hass_client_no_auth):\n \"\"\"Initialize a Home Assistant Server for testing this module.\"\"\"\n await async_setup_component(hass, webhook.DOMAIN, {})\n return await hass_client_no_auth()\n\n\n@pytest.fixture\nasync def webhook_id_with_api_key(hass):\n \"\"\"Initialize the Mailgun component and get the webhook_id.\"\"\"\n await async_setup_component(hass, mailgun.DOMAIN, {mailgun.DOMAIN: {\n CONF_API_KEY: API_KEY, CONF_DOMAIN: 'example.com'}})\n await async_process_ha_core_config(hass, {'internal_url':\n 'http://example.local:8123'})\n result = await hass.config_entries.flow.async_init('mailgun', context={\n 'source': config_entries.SOURCE_USER})\n assert result['type'] == data_entry_flow.FlowResultType.FORM, result\n result = await hass.config_entries.flow.async_configure(result[\n 'flow_id'], {})\n assert result['type'] == data_entry_flow.FlowResultType.CREATE_ENTRY\n return result['result'].data['webhook_id']\n\n\n@pytest.fixture\nasync def webhook_id_without_api_key(hass):\n \"\"\"Initialize the Mailgun component and get the webhook_id w/o API key.\"\"\"\n await async_setup_component(hass, mailgun.DOMAIN, {})\n await async_process_ha_core_config(hass, {'internal_url':\n 'http://example.local:8123'})\n result = await hass.config_entries.flow.async_init('mailgun', context={\n 'source': config_entries.SOURCE_USER})\n assert result['type'] == data_entry_flow.FlowResultType.FORM, result\n result = await hass.config_entries.flow.async_configure(result[\n 'flow_id'], {})\n assert result['type'] == data_entry_flow.FlowResultType.CREATE_ENTRY\n return result['result'].data['webhook_id']\n\n\n@pytest.fixture\nasync def mailgun_events(hass):\n \"\"\"Return a list of mailgun_events triggered.\"\"\"\n events = []\n\n @callback\n def handle_event(event):\n \"\"\"Handle Mailgun event.\"\"\"\n events.append(event)\n hass.bus.async_listen(mailgun.MESSAGE_RECEIVED, handle_event)\n return events\n\n\nasync def test_mailgun_webhook_with_missing_signature(http_client,\n webhook_id_with_api_key, mailgun_events) ->None:\n \"\"\"Test that webhook doesn't trigger an event without a signature.\"\"\"\n event_count = len(mailgun_events)\n await http_client.post(f'/api/webhook/{webhook_id_with_api_key}', json=\n {'hello': 'mailgun', 'signature': {}})\n assert len(mailgun_events) == event_count\n await http_client.post(f'/api/webhook/{webhook_id_with_api_key}', json=\n {'hello': 'mailgun'})\n assert len(mailgun_events) == event_count\n\n\nasync def test_mailgun_webhook_with_different_api_key(http_client,\n webhook_id_with_api_key, mailgun_events) ->None:\n \"\"\"Test that webhook doesn't trigger an event with a wrong signature.\"\"\"\n timestamp = '1529006854'\n token = 'a8ce0edb2dd8301dee6c2405235584e45aa91d1e9f979f3de0'\n event_count = len(mailgun_events)\n await http_client.post(f'/api/webhook/{webhook_id_with_api_key}', json=\n {'hello': 'mailgun', 'signature': {'signature': hmac.new(key=\n b'random_api_key', msg=bytes(f'{timestamp}{token}', 'utf-8'),\n digestmod=hashlib.sha256).hexdigest(), 'timestamp': timestamp,\n 'token': token}})\n assert len(mailgun_events) == event_count\n\n\nasync def test_mailgun_webhook_event_with_correct_api_key(http_client,\n webhook_id_with_api_key, mailgun_events) ->None:\n \"\"\"Test that webhook triggers an event after validating a signature.\"\"\"\n timestamp = '1529006854'\n token = 'a8ce0edb2dd8301dee6c2405235584e45aa91d1e9f979f3de0'\n event_count = len(mailgun_events)\n await http_client.post(f'/api/webhook/{webhook_id_with_api_key}', json=\n {'hello': 'mailgun', 'signature': {'signature': hmac.new(key=bytes(\n API_KEY, 'utf-8'), msg=bytes(f'{timestamp}{token}', 'utf-8'),\n digestmod=hashlib.sha256).hexdigest(), 'timestamp': timestamp,\n 'token': token}})\n assert len(mailgun_events) == event_count + 1\n assert mailgun_events[-1].data['webhook_id'] == webhook_id_with_api_key\n assert mailgun_events[-1].data['hello'] == 'mailgun'\n\n\nasync def test_mailgun_webhook_with_missing_signature_without_api_key(\n http_client, webhook_id_without_api_key, mailgun_events) ->None:\n \"\"\"Test that webhook triggers an event without a signature w/o API key.\"\"\"\n event_count = len(mailgun_events)\n await http_client.post(f'/api/webhook/{webhook_id_without_api_key}',\n json={'hello': 'mailgun', 'signature': {}})\n assert len(mailgun_events) == event_count + 1\n assert mailgun_events[-1].data['webhook_id'] == webhook_id_without_api_key\n assert mailgun_events[-1].data['hello'] == 'mailgun'\n await http_client.post(f'/api/webhook/{webhook_id_without_api_key}',\n json={'hello': 'mailgun'})\n assert len(mailgun_events) == event_count + 1\n assert mailgun_events[-1].data['webhook_id'] == webhook_id_without_api_key\n assert mailgun_events[-1].data['hello'] == 'mailgun'\n\n\nasync def test_mailgun_webhook_event_without_an_api_key(http_client,\n webhook_id_without_api_key, mailgun_events) ->None:\n \"\"\"Test that webhook triggers an event if there is no api key.\"\"\"\n timestamp = '1529006854'\n token = 'a8ce0edb2dd8301dee6c2405235584e45aa91d1e9f979f3de0'\n event_count = len(mailgun_events)\n await http_client.post(f'/api/webhook/{webhook_id_without_api_key}',\n json={'hello': 'mailgun', 'signature': {'signature': hmac.new(key=\n bytes(API_KEY, 'utf-8'), msg=bytes(f'{timestamp}{token}', 'utf-8'),\n digestmod=hashlib.sha256).hexdigest(), 'timestamp': timestamp,\n 'token': token}})\n assert len(mailgun_events) == event_count + 1\n assert mailgun_events[-1].data['webhook_id'] == webhook_id_without_api_key\n assert mailgun_events[-1].data['hello'] == 'mailgun'\n",
"step-3": "<mask token>\nAPI_KEY = 'abc123'\n\n\n@pytest.fixture\nasync def http_client(hass, hass_client_no_auth):\n \"\"\"Initialize a Home Assistant Server for testing this module.\"\"\"\n await async_setup_component(hass, webhook.DOMAIN, {})\n return await hass_client_no_auth()\n\n\n@pytest.fixture\nasync def webhook_id_with_api_key(hass):\n \"\"\"Initialize the Mailgun component and get the webhook_id.\"\"\"\n await async_setup_component(hass, mailgun.DOMAIN, {mailgun.DOMAIN: {\n CONF_API_KEY: API_KEY, CONF_DOMAIN: 'example.com'}})\n await async_process_ha_core_config(hass, {'internal_url':\n 'http://example.local:8123'})\n result = await hass.config_entries.flow.async_init('mailgun', context={\n 'source': config_entries.SOURCE_USER})\n assert result['type'] == data_entry_flow.FlowResultType.FORM, result\n result = await hass.config_entries.flow.async_configure(result[\n 'flow_id'], {})\n assert result['type'] == data_entry_flow.FlowResultType.CREATE_ENTRY\n return result['result'].data['webhook_id']\n\n\n@pytest.fixture\nasync def webhook_id_without_api_key(hass):\n \"\"\"Initialize the Mailgun component and get the webhook_id w/o API key.\"\"\"\n await async_setup_component(hass, mailgun.DOMAIN, {})\n await async_process_ha_core_config(hass, {'internal_url':\n 'http://example.local:8123'})\n result = await hass.config_entries.flow.async_init('mailgun', context={\n 'source': config_entries.SOURCE_USER})\n assert result['type'] == data_entry_flow.FlowResultType.FORM, result\n result = await hass.config_entries.flow.async_configure(result[\n 'flow_id'], {})\n assert result['type'] == data_entry_flow.FlowResultType.CREATE_ENTRY\n return result['result'].data['webhook_id']\n\n\n@pytest.fixture\nasync def mailgun_events(hass):\n \"\"\"Return a list of mailgun_events triggered.\"\"\"\n events = []\n\n @callback\n def handle_event(event):\n \"\"\"Handle Mailgun event.\"\"\"\n events.append(event)\n hass.bus.async_listen(mailgun.MESSAGE_RECEIVED, handle_event)\n return events\n\n\nasync def test_mailgun_webhook_with_missing_signature(http_client,\n webhook_id_with_api_key, mailgun_events) ->None:\n \"\"\"Test that webhook doesn't trigger an event without a signature.\"\"\"\n event_count = len(mailgun_events)\n await http_client.post(f'/api/webhook/{webhook_id_with_api_key}', json=\n {'hello': 'mailgun', 'signature': {}})\n assert len(mailgun_events) == event_count\n await http_client.post(f'/api/webhook/{webhook_id_with_api_key}', json=\n {'hello': 'mailgun'})\n assert len(mailgun_events) == event_count\n\n\nasync def test_mailgun_webhook_with_different_api_key(http_client,\n webhook_id_with_api_key, mailgun_events) ->None:\n \"\"\"Test that webhook doesn't trigger an event with a wrong signature.\"\"\"\n timestamp = '1529006854'\n token = 'a8ce0edb2dd8301dee6c2405235584e45aa91d1e9f979f3de0'\n event_count = len(mailgun_events)\n await http_client.post(f'/api/webhook/{webhook_id_with_api_key}', json=\n {'hello': 'mailgun', 'signature': {'signature': hmac.new(key=\n b'random_api_key', msg=bytes(f'{timestamp}{token}', 'utf-8'),\n digestmod=hashlib.sha256).hexdigest(), 'timestamp': timestamp,\n 'token': token}})\n assert len(mailgun_events) == event_count\n\n\nasync def test_mailgun_webhook_event_with_correct_api_key(http_client,\n webhook_id_with_api_key, mailgun_events) ->None:\n \"\"\"Test that webhook triggers an event after validating a signature.\"\"\"\n timestamp = '1529006854'\n token = 'a8ce0edb2dd8301dee6c2405235584e45aa91d1e9f979f3de0'\n event_count = len(mailgun_events)\n await http_client.post(f'/api/webhook/{webhook_id_with_api_key}', json=\n {'hello': 'mailgun', 'signature': {'signature': hmac.new(key=bytes(\n API_KEY, 'utf-8'), msg=bytes(f'{timestamp}{token}', 'utf-8'),\n digestmod=hashlib.sha256).hexdigest(), 'timestamp': timestamp,\n 'token': token}})\n assert len(mailgun_events) == event_count + 1\n assert mailgun_events[-1].data['webhook_id'] == webhook_id_with_api_key\n assert mailgun_events[-1].data['hello'] == 'mailgun'\n\n\nasync def test_mailgun_webhook_with_missing_signature_without_api_key(\n http_client, webhook_id_without_api_key, mailgun_events) ->None:\n \"\"\"Test that webhook triggers an event without a signature w/o API key.\"\"\"\n event_count = len(mailgun_events)\n await http_client.post(f'/api/webhook/{webhook_id_without_api_key}',\n json={'hello': 'mailgun', 'signature': {}})\n assert len(mailgun_events) == event_count + 1\n assert mailgun_events[-1].data['webhook_id'] == webhook_id_without_api_key\n assert mailgun_events[-1].data['hello'] == 'mailgun'\n await http_client.post(f'/api/webhook/{webhook_id_without_api_key}',\n json={'hello': 'mailgun'})\n assert len(mailgun_events) == event_count + 1\n assert mailgun_events[-1].data['webhook_id'] == webhook_id_without_api_key\n assert mailgun_events[-1].data['hello'] == 'mailgun'\n\n\nasync def test_mailgun_webhook_event_without_an_api_key(http_client,\n webhook_id_without_api_key, mailgun_events) ->None:\n \"\"\"Test that webhook triggers an event if there is no api key.\"\"\"\n timestamp = '1529006854'\n token = 'a8ce0edb2dd8301dee6c2405235584e45aa91d1e9f979f3de0'\n event_count = len(mailgun_events)\n await http_client.post(f'/api/webhook/{webhook_id_without_api_key}',\n json={'hello': 'mailgun', 'signature': {'signature': hmac.new(key=\n bytes(API_KEY, 'utf-8'), msg=bytes(f'{timestamp}{token}', 'utf-8'),\n digestmod=hashlib.sha256).hexdigest(), 'timestamp': timestamp,\n 'token': token}})\n assert len(mailgun_events) == event_count + 1\n assert mailgun_events[-1].data['webhook_id'] == webhook_id_without_api_key\n assert mailgun_events[-1].data['hello'] == 'mailgun'\n",
"step-4": "<mask token>\nimport hashlib\nimport hmac\nimport pytest\nfrom homeassistant import config_entries, data_entry_flow\nfrom homeassistant.components import mailgun, webhook\nfrom homeassistant.config import async_process_ha_core_config\nfrom homeassistant.const import CONF_API_KEY, CONF_DOMAIN\nfrom homeassistant.core import callback\nfrom homeassistant.setup import async_setup_component\nAPI_KEY = 'abc123'\n\n\n@pytest.fixture\nasync def http_client(hass, hass_client_no_auth):\n \"\"\"Initialize a Home Assistant Server for testing this module.\"\"\"\n await async_setup_component(hass, webhook.DOMAIN, {})\n return await hass_client_no_auth()\n\n\n@pytest.fixture\nasync def webhook_id_with_api_key(hass):\n \"\"\"Initialize the Mailgun component and get the webhook_id.\"\"\"\n await async_setup_component(hass, mailgun.DOMAIN, {mailgun.DOMAIN: {\n CONF_API_KEY: API_KEY, CONF_DOMAIN: 'example.com'}})\n await async_process_ha_core_config(hass, {'internal_url':\n 'http://example.local:8123'})\n result = await hass.config_entries.flow.async_init('mailgun', context={\n 'source': config_entries.SOURCE_USER})\n assert result['type'] == data_entry_flow.FlowResultType.FORM, result\n result = await hass.config_entries.flow.async_configure(result[\n 'flow_id'], {})\n assert result['type'] == data_entry_flow.FlowResultType.CREATE_ENTRY\n return result['result'].data['webhook_id']\n\n\n@pytest.fixture\nasync def webhook_id_without_api_key(hass):\n \"\"\"Initialize the Mailgun component and get the webhook_id w/o API key.\"\"\"\n await async_setup_component(hass, mailgun.DOMAIN, {})\n await async_process_ha_core_config(hass, {'internal_url':\n 'http://example.local:8123'})\n result = await hass.config_entries.flow.async_init('mailgun', context={\n 'source': config_entries.SOURCE_USER})\n assert result['type'] == data_entry_flow.FlowResultType.FORM, result\n result = await hass.config_entries.flow.async_configure(result[\n 'flow_id'], {})\n assert result['type'] == data_entry_flow.FlowResultType.CREATE_ENTRY\n return result['result'].data['webhook_id']\n\n\n@pytest.fixture\nasync def mailgun_events(hass):\n \"\"\"Return a list of mailgun_events triggered.\"\"\"\n events = []\n\n @callback\n def handle_event(event):\n \"\"\"Handle Mailgun event.\"\"\"\n events.append(event)\n hass.bus.async_listen(mailgun.MESSAGE_RECEIVED, handle_event)\n return events\n\n\nasync def test_mailgun_webhook_with_missing_signature(http_client,\n webhook_id_with_api_key, mailgun_events) ->None:\n \"\"\"Test that webhook doesn't trigger an event without a signature.\"\"\"\n event_count = len(mailgun_events)\n await http_client.post(f'/api/webhook/{webhook_id_with_api_key}', json=\n {'hello': 'mailgun', 'signature': {}})\n assert len(mailgun_events) == event_count\n await http_client.post(f'/api/webhook/{webhook_id_with_api_key}', json=\n {'hello': 'mailgun'})\n assert len(mailgun_events) == event_count\n\n\nasync def test_mailgun_webhook_with_different_api_key(http_client,\n webhook_id_with_api_key, mailgun_events) ->None:\n \"\"\"Test that webhook doesn't trigger an event with a wrong signature.\"\"\"\n timestamp = '1529006854'\n token = 'a8ce0edb2dd8301dee6c2405235584e45aa91d1e9f979f3de0'\n event_count = len(mailgun_events)\n await http_client.post(f'/api/webhook/{webhook_id_with_api_key}', json=\n {'hello': 'mailgun', 'signature': {'signature': hmac.new(key=\n b'random_api_key', msg=bytes(f'{timestamp}{token}', 'utf-8'),\n digestmod=hashlib.sha256).hexdigest(), 'timestamp': timestamp,\n 'token': token}})\n assert len(mailgun_events) == event_count\n\n\nasync def test_mailgun_webhook_event_with_correct_api_key(http_client,\n webhook_id_with_api_key, mailgun_events) ->None:\n \"\"\"Test that webhook triggers an event after validating a signature.\"\"\"\n timestamp = '1529006854'\n token = 'a8ce0edb2dd8301dee6c2405235584e45aa91d1e9f979f3de0'\n event_count = len(mailgun_events)\n await http_client.post(f'/api/webhook/{webhook_id_with_api_key}', json=\n {'hello': 'mailgun', 'signature': {'signature': hmac.new(key=bytes(\n API_KEY, 'utf-8'), msg=bytes(f'{timestamp}{token}', 'utf-8'),\n digestmod=hashlib.sha256).hexdigest(), 'timestamp': timestamp,\n 'token': token}})\n assert len(mailgun_events) == event_count + 1\n assert mailgun_events[-1].data['webhook_id'] == webhook_id_with_api_key\n assert mailgun_events[-1].data['hello'] == 'mailgun'\n\n\nasync def test_mailgun_webhook_with_missing_signature_without_api_key(\n http_client, webhook_id_without_api_key, mailgun_events) ->None:\n \"\"\"Test that webhook triggers an event without a signature w/o API key.\"\"\"\n event_count = len(mailgun_events)\n await http_client.post(f'/api/webhook/{webhook_id_without_api_key}',\n json={'hello': 'mailgun', 'signature': {}})\n assert len(mailgun_events) == event_count + 1\n assert mailgun_events[-1].data['webhook_id'] == webhook_id_without_api_key\n assert mailgun_events[-1].data['hello'] == 'mailgun'\n await http_client.post(f'/api/webhook/{webhook_id_without_api_key}',\n json={'hello': 'mailgun'})\n assert len(mailgun_events) == event_count + 1\n assert mailgun_events[-1].data['webhook_id'] == webhook_id_without_api_key\n assert mailgun_events[-1].data['hello'] == 'mailgun'\n\n\nasync def test_mailgun_webhook_event_without_an_api_key(http_client,\n webhook_id_without_api_key, mailgun_events) ->None:\n \"\"\"Test that webhook triggers an event if there is no api key.\"\"\"\n timestamp = '1529006854'\n token = 'a8ce0edb2dd8301dee6c2405235584e45aa91d1e9f979f3de0'\n event_count = len(mailgun_events)\n await http_client.post(f'/api/webhook/{webhook_id_without_api_key}',\n json={'hello': 'mailgun', 'signature': {'signature': hmac.new(key=\n bytes(API_KEY, 'utf-8'), msg=bytes(f'{timestamp}{token}', 'utf-8'),\n digestmod=hashlib.sha256).hexdigest(), 'timestamp': timestamp,\n 'token': token}})\n assert len(mailgun_events) == event_count + 1\n assert mailgun_events[-1].data['webhook_id'] == webhook_id_without_api_key\n assert mailgun_events[-1].data['hello'] == 'mailgun'\n",
"step-5": "\"\"\"Test the init file of Mailgun.\"\"\"\nimport hashlib\nimport hmac\n\nimport pytest\n\nfrom homeassistant import config_entries, data_entry_flow\nfrom homeassistant.components import mailgun, webhook\nfrom homeassistant.config import async_process_ha_core_config\nfrom homeassistant.const import CONF_API_KEY, CONF_DOMAIN\nfrom homeassistant.core import callback\nfrom homeassistant.setup import async_setup_component\n\nAPI_KEY = \"abc123\"\n\n\n@pytest.fixture\nasync def http_client(hass, hass_client_no_auth):\n \"\"\"Initialize a Home Assistant Server for testing this module.\"\"\"\n await async_setup_component(hass, webhook.DOMAIN, {})\n return await hass_client_no_auth()\n\n\n@pytest.fixture\nasync def webhook_id_with_api_key(hass):\n \"\"\"Initialize the Mailgun component and get the webhook_id.\"\"\"\n await async_setup_component(\n hass,\n mailgun.DOMAIN,\n {mailgun.DOMAIN: {CONF_API_KEY: API_KEY, CONF_DOMAIN: \"example.com\"}},\n )\n\n await async_process_ha_core_config(\n hass,\n {\"internal_url\": \"http://example.local:8123\"},\n )\n result = await hass.config_entries.flow.async_init(\n \"mailgun\", context={\"source\": config_entries.SOURCE_USER}\n )\n assert result[\"type\"] == data_entry_flow.FlowResultType.FORM, result\n\n result = await hass.config_entries.flow.async_configure(result[\"flow_id\"], {})\n assert result[\"type\"] == data_entry_flow.FlowResultType.CREATE_ENTRY\n\n return result[\"result\"].data[\"webhook_id\"]\n\n\n@pytest.fixture\nasync def webhook_id_without_api_key(hass):\n \"\"\"Initialize the Mailgun component and get the webhook_id w/o API key.\"\"\"\n await async_setup_component(hass, mailgun.DOMAIN, {})\n\n await async_process_ha_core_config(\n hass,\n {\"internal_url\": \"http://example.local:8123\"},\n )\n result = await hass.config_entries.flow.async_init(\n \"mailgun\", context={\"source\": config_entries.SOURCE_USER}\n )\n assert result[\"type\"] == data_entry_flow.FlowResultType.FORM, result\n\n result = await hass.config_entries.flow.async_configure(result[\"flow_id\"], {})\n assert result[\"type\"] == data_entry_flow.FlowResultType.CREATE_ENTRY\n\n return result[\"result\"].data[\"webhook_id\"]\n\n\n@pytest.fixture\nasync def mailgun_events(hass):\n \"\"\"Return a list of mailgun_events triggered.\"\"\"\n events = []\n\n @callback\n def handle_event(event):\n \"\"\"Handle Mailgun event.\"\"\"\n events.append(event)\n\n hass.bus.async_listen(mailgun.MESSAGE_RECEIVED, handle_event)\n\n return events\n\n\nasync def test_mailgun_webhook_with_missing_signature(\n http_client, webhook_id_with_api_key, mailgun_events\n) -> None:\n \"\"\"Test that webhook doesn't trigger an event without a signature.\"\"\"\n event_count = len(mailgun_events)\n\n await http_client.post(\n f\"/api/webhook/{webhook_id_with_api_key}\",\n json={\"hello\": \"mailgun\", \"signature\": {}},\n )\n\n assert len(mailgun_events) == event_count\n\n await http_client.post(\n f\"/api/webhook/{webhook_id_with_api_key}\", json={\"hello\": \"mailgun\"}\n )\n\n assert len(mailgun_events) == event_count\n\n\nasync def test_mailgun_webhook_with_different_api_key(\n http_client, webhook_id_with_api_key, mailgun_events\n) -> None:\n \"\"\"Test that webhook doesn't trigger an event with a wrong signature.\"\"\"\n timestamp = \"1529006854\"\n token = \"a8ce0edb2dd8301dee6c2405235584e45aa91d1e9f979f3de0\"\n\n event_count = len(mailgun_events)\n\n await http_client.post(\n f\"/api/webhook/{webhook_id_with_api_key}\",\n json={\n \"hello\": \"mailgun\",\n \"signature\": {\n \"signature\": hmac.new(\n key=b\"random_api_key\",\n msg=bytes(f\"{timestamp}{token}\", \"utf-8\"),\n digestmod=hashlib.sha256,\n ).hexdigest(),\n \"timestamp\": timestamp,\n \"token\": token,\n },\n },\n )\n\n assert len(mailgun_events) == event_count\n\n\nasync def test_mailgun_webhook_event_with_correct_api_key(\n http_client, webhook_id_with_api_key, mailgun_events\n) -> None:\n \"\"\"Test that webhook triggers an event after validating a signature.\"\"\"\n timestamp = \"1529006854\"\n token = \"a8ce0edb2dd8301dee6c2405235584e45aa91d1e9f979f3de0\"\n\n event_count = len(mailgun_events)\n\n await http_client.post(\n f\"/api/webhook/{webhook_id_with_api_key}\",\n json={\n \"hello\": \"mailgun\",\n \"signature\": {\n \"signature\": hmac.new(\n key=bytes(API_KEY, \"utf-8\"),\n msg=bytes(f\"{timestamp}{token}\", \"utf-8\"),\n digestmod=hashlib.sha256,\n ).hexdigest(),\n \"timestamp\": timestamp,\n \"token\": token,\n },\n },\n )\n\n assert len(mailgun_events) == event_count + 1\n assert mailgun_events[-1].data[\"webhook_id\"] == webhook_id_with_api_key\n assert mailgun_events[-1].data[\"hello\"] == \"mailgun\"\n\n\nasync def test_mailgun_webhook_with_missing_signature_without_api_key(\n http_client, webhook_id_without_api_key, mailgun_events\n) -> None:\n \"\"\"Test that webhook triggers an event without a signature w/o API key.\"\"\"\n event_count = len(mailgun_events)\n\n await http_client.post(\n f\"/api/webhook/{webhook_id_without_api_key}\",\n json={\"hello\": \"mailgun\", \"signature\": {}},\n )\n\n assert len(mailgun_events) == event_count + 1\n assert mailgun_events[-1].data[\"webhook_id\"] == webhook_id_without_api_key\n assert mailgun_events[-1].data[\"hello\"] == \"mailgun\"\n\n await http_client.post(\n f\"/api/webhook/{webhook_id_without_api_key}\", json={\"hello\": \"mailgun\"}\n )\n\n assert len(mailgun_events) == event_count + 1\n assert mailgun_events[-1].data[\"webhook_id\"] == webhook_id_without_api_key\n assert mailgun_events[-1].data[\"hello\"] == \"mailgun\"\n\n\nasync def test_mailgun_webhook_event_without_an_api_key(\n http_client, webhook_id_without_api_key, mailgun_events\n) -> None:\n \"\"\"Test that webhook triggers an event if there is no api key.\"\"\"\n timestamp = \"1529006854\"\n token = \"a8ce0edb2dd8301dee6c2405235584e45aa91d1e9f979f3de0\"\n\n event_count = len(mailgun_events)\n\n await http_client.post(\n f\"/api/webhook/{webhook_id_without_api_key}\",\n json={\n \"hello\": \"mailgun\",\n \"signature\": {\n \"signature\": hmac.new(\n key=bytes(API_KEY, \"utf-8\"),\n msg=bytes(f\"{timestamp}{token}\", \"utf-8\"),\n digestmod=hashlib.sha256,\n ).hexdigest(),\n \"timestamp\": timestamp,\n \"token\": token,\n },\n },\n )\n\n assert len(mailgun_events) == event_count + 1\n assert mailgun_events[-1].data[\"webhook_id\"] == webhook_id_without_api_key\n assert mailgun_events[-1].data[\"hello\"] == \"mailgun\"\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
#!/user/bin/env python
# -*- coding: utf-8 -*-
# @Author : XordenLee
# @Time : 2019/2/1 18:51
import itchat
import requests
import sys
default_api_key = 'bb495c529b0e4efebd5d2632ecac5fb8'
def send(user_id, input_text, api_key=None):
if not api_key:
api_key = default_api_key
msg = {
"reqType": 0,
"perception": {
"inputText": {
"text": input_text
},
"selfInfo": {
"location": {
"city": "北京",
"province": "北京",
}
}
},
"userInfo": {
"apiKey": api_key,
"userId": user_id
}
}
return requests.post('http://openapi.tuling123.com/openapi/api/v2',json=msg).json()
@itchat.msg_register(itchat.content.TEXT)
def text_reply(msg):
print(msg.FromUserName[-9:], msg.text)
req = send(msg.FromUserName[1: 32], msg.text)
a = req.get('results')
b = a[0]['values']['text']
print(msg.get('ToUserName')[-9:],b)
return b
itchat.auto_login(hotReload=True)
itchat.run()
|
normal
|
{
"blob_id": "15539d824490b7ae4724e7c11949aa1db25ecab2",
"index": 5112,
"step-1": "<mask token>\n\n\ndef send(user_id, input_text, api_key=None):\n if not api_key:\n api_key = default_api_key\n msg = {'reqType': 0, 'perception': {'inputText': {'text': input_text},\n 'selfInfo': {'location': {'city': '北京', 'province': '北京'}}},\n 'userInfo': {'apiKey': api_key, 'userId': user_id}}\n return requests.post('http://openapi.tuling123.com/openapi/api/v2',\n json=msg).json()\n\n\n@itchat.msg_register(itchat.content.TEXT)\ndef text_reply(msg):\n print(msg.FromUserName[-9:], msg.text)\n req = send(msg.FromUserName[1:32], msg.text)\n a = req.get('results')\n b = a[0]['values']['text']\n print(msg.get('ToUserName')[-9:], b)\n return b\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef send(user_id, input_text, api_key=None):\n if not api_key:\n api_key = default_api_key\n msg = {'reqType': 0, 'perception': {'inputText': {'text': input_text},\n 'selfInfo': {'location': {'city': '北京', 'province': '北京'}}},\n 'userInfo': {'apiKey': api_key, 'userId': user_id}}\n return requests.post('http://openapi.tuling123.com/openapi/api/v2',\n json=msg).json()\n\n\n@itchat.msg_register(itchat.content.TEXT)\ndef text_reply(msg):\n print(msg.FromUserName[-9:], msg.text)\n req = send(msg.FromUserName[1:32], msg.text)\n a = req.get('results')\n b = a[0]['values']['text']\n print(msg.get('ToUserName')[-9:], b)\n return b\n\n\nitchat.auto_login(hotReload=True)\nitchat.run()\n",
"step-3": "<mask token>\ndefault_api_key = 'bb495c529b0e4efebd5d2632ecac5fb8'\n\n\ndef send(user_id, input_text, api_key=None):\n if not api_key:\n api_key = default_api_key\n msg = {'reqType': 0, 'perception': {'inputText': {'text': input_text},\n 'selfInfo': {'location': {'city': '北京', 'province': '北京'}}},\n 'userInfo': {'apiKey': api_key, 'userId': user_id}}\n return requests.post('http://openapi.tuling123.com/openapi/api/v2',\n json=msg).json()\n\n\n@itchat.msg_register(itchat.content.TEXT)\ndef text_reply(msg):\n print(msg.FromUserName[-9:], msg.text)\n req = send(msg.FromUserName[1:32], msg.text)\n a = req.get('results')\n b = a[0]['values']['text']\n print(msg.get('ToUserName')[-9:], b)\n return b\n\n\nitchat.auto_login(hotReload=True)\nitchat.run()\n",
"step-4": "import itchat\nimport requests\nimport sys\ndefault_api_key = 'bb495c529b0e4efebd5d2632ecac5fb8'\n\n\ndef send(user_id, input_text, api_key=None):\n if not api_key:\n api_key = default_api_key\n msg = {'reqType': 0, 'perception': {'inputText': {'text': input_text},\n 'selfInfo': {'location': {'city': '北京', 'province': '北京'}}},\n 'userInfo': {'apiKey': api_key, 'userId': user_id}}\n return requests.post('http://openapi.tuling123.com/openapi/api/v2',\n json=msg).json()\n\n\n@itchat.msg_register(itchat.content.TEXT)\ndef text_reply(msg):\n print(msg.FromUserName[-9:], msg.text)\n req = send(msg.FromUserName[1:32], msg.text)\n a = req.get('results')\n b = a[0]['values']['text']\n print(msg.get('ToUserName')[-9:], b)\n return b\n\n\nitchat.auto_login(hotReload=True)\nitchat.run()\n",
"step-5": "#!/user/bin/env python\r\n# -*- coding: utf-8 -*-\r\n# @Author : XordenLee\r\n# @Time : 2019/2/1 18:51\r\n\r\n\r\nimport itchat\r\nimport requests\r\nimport sys\r\n\r\ndefault_api_key = 'bb495c529b0e4efebd5d2632ecac5fb8'\r\n\r\ndef send(user_id, input_text, api_key=None):\r\n if not api_key:\r\n api_key = default_api_key\r\n msg = {\r\n \"reqType\": 0,\r\n \"perception\": {\r\n \"inputText\": {\r\n \"text\": input_text\r\n },\r\n \"selfInfo\": {\r\n \"location\": {\r\n \"city\": \"北京\",\r\n \"province\": \"北京\",\r\n }\r\n }\r\n },\r\n \"userInfo\": {\r\n \"apiKey\": api_key,\r\n \"userId\": user_id\r\n }\r\n }\r\n\r\n return requests.post('http://openapi.tuling123.com/openapi/api/v2',json=msg).json()\r\n\r\n\r\n@itchat.msg_register(itchat.content.TEXT)\r\ndef text_reply(msg):\r\n print(msg.FromUserName[-9:], msg.text)\r\n req = send(msg.FromUserName[1: 32], msg.text)\r\n a = req.get('results')\r\n b = a[0]['values']['text']\r\n print(msg.get('ToUserName')[-9:],b)\r\n return b\r\n\r\nitchat.auto_login(hotReload=True)\r\nitchat.run()\r\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
print(lista)
<|reserved_special_token_0|>
print(listasemana[0])
<|reserved_special_token_0|>
print(listasemana[-1])
<|reserved_special_token_0|>
print(listasemana[0, 3])
<|reserved_special_token_0|>
print(conjunto)
<|reserved_special_token_0|>
print(lista1palabras, lista2palabras)
<|reserved_special_token_0|>
print(lista1palabras[1, 2, 4, 5], lista2palabras)
<|reserved_special_token_0|>
print(lista2palabras[0, 1, 4, 5], lista1palabras)
<|reserved_special_token_0|>
print(lista1palabras, lista2palabras[0, 3])
<|reserved_special_token_1|>
lista = []
print(lista)
listasemana = ['Lunes', 'Martes', 'Miercoles', 'Jueves', 'Viernes']
print(listasemana[0])
listasemana = ['Lunes', 'Martes', 'Miercoles', 'Jueves', 'Viernes']
print(listasemana[-1])
listasemana = ['Lunes', 'Martes', 'Miercoles', 'Jueves', 'Viernes']
print(listasemana[0, 3])
listaa = [1, 2, 3, 4, 'hola', 2, 2]
conjunto = set(listaa)
listaa = listaa(conjunto)
print(conjunto)
lista1palabras = ['Sofia', 'Karla', 'Verinica', 'Lina', 'Natalia', 'Estefania']
lista2palabras = ['Enrique', 'Erica', 'Sofia', 'Lina', 'Carlos', 'Pablo']
print(lista1palabras, lista2palabras)
lista1palabras = ['Sofia', 'Karla', 'Verinica', 'Lina', 'Natalia', 'Estefania']
lista2palabras = ['Enrique', 'Erica', 'Sofia', 'Lina', 'Carlos', 'Pablo']
print(lista1palabras[1, 2, 4, 5], lista2palabras)
lista1palabras = ['Sofia', 'Karla', 'Verinica', 'Lina', 'Natalia', 'Estefania']
lista2palabras = ['Enrique', 'Erica', 'Sofia', 'Lina', 'Carlos', 'Pablo']
print(lista2palabras[0, 1, 4, 5], lista1palabras)
lista1palabras = ['Sofia', 'Karla', 'Verinica', 'Lina', 'Natalia', 'Estefania']
lista2palabras = ['Enrique', 'Erica', 'Sofia', 'Lina', 'Carlos', 'Pablo']
print(lista1palabras, lista2palabras[0, 3])
<|reserved_special_token_1|>
#listas
lista=[]
print(lista)
#lista semana
listasemana=["Lunes","Martes","Miercoles","Jueves","Viernes"]
print(listasemana[0])
#lista semana
listasemana=["Lunes","Martes","Miercoles","Jueves","Viernes"]
print(listasemana[-1])
#lista semana
listasemana=["Lunes","Martes","Miercoles","Jueves","Viernes"]
print(listasemana[0,3])
#quitar los elementos repetidos de una lista
listaa=[1,2,3,4,"hola",2,2]
conjunto=set(listaa)
listaa=listaa(conjunto)
print(conjunto)
#listas palabras de 2 listas
lista1palabras=["Sofia","Karla","Verinica","Lina","Natalia","Estefania"]
lista2palabras=["Enrique","Erica","Sofia","Lina","Carlos","Pablo"]
print(lista1palabras,lista2palabras)
#listas de palabras que aparecen en la primera lista
lista1palabras=["Sofia","Karla","Verinica","Lina","Natalia","Estefania"]
lista2palabras=["Enrique","Erica","Sofia","Lina","Carlos","Pablo"]
print(lista1palabras[1,2,4,5],lista2palabras)
#listas de palabras que aparecen en la segunda lista
lista1palabras=["Sofia","Karla","Verinica","Lina","Natalia","Estefania"]
lista2palabras=["Enrique","Erica","Sofia","Lina","Carlos","Pablo"]
print(lista2palabras[0,1,4,5],lista1palabras)
#listas palabras repetidas en ambas listas
lista1palabras=["Sofia","Karla","Verinica","Lina","Natalia","Estefania"]
lista2palabras=["Enrique","Erica","Sofia","Lina","Carlos","Pablo"]
print(lista1palabras,lista2palabras[0,3])
|
flexible
|
{
"blob_id": "37b23dc520abc7cbb6798f41063696916065626f",
"index": 2203,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint(lista)\n<mask token>\nprint(listasemana[0])\n<mask token>\nprint(listasemana[-1])\n<mask token>\nprint(listasemana[0, 3])\n<mask token>\nprint(conjunto)\n<mask token>\nprint(lista1palabras, lista2palabras)\n<mask token>\nprint(lista1palabras[1, 2, 4, 5], lista2palabras)\n<mask token>\nprint(lista2palabras[0, 1, 4, 5], lista1palabras)\n<mask token>\nprint(lista1palabras, lista2palabras[0, 3])\n",
"step-3": "lista = []\nprint(lista)\nlistasemana = ['Lunes', 'Martes', 'Miercoles', 'Jueves', 'Viernes']\nprint(listasemana[0])\nlistasemana = ['Lunes', 'Martes', 'Miercoles', 'Jueves', 'Viernes']\nprint(listasemana[-1])\nlistasemana = ['Lunes', 'Martes', 'Miercoles', 'Jueves', 'Viernes']\nprint(listasemana[0, 3])\nlistaa = [1, 2, 3, 4, 'hola', 2, 2]\nconjunto = set(listaa)\nlistaa = listaa(conjunto)\nprint(conjunto)\nlista1palabras = ['Sofia', 'Karla', 'Verinica', 'Lina', 'Natalia', 'Estefania']\nlista2palabras = ['Enrique', 'Erica', 'Sofia', 'Lina', 'Carlos', 'Pablo']\nprint(lista1palabras, lista2palabras)\nlista1palabras = ['Sofia', 'Karla', 'Verinica', 'Lina', 'Natalia', 'Estefania']\nlista2palabras = ['Enrique', 'Erica', 'Sofia', 'Lina', 'Carlos', 'Pablo']\nprint(lista1palabras[1, 2, 4, 5], lista2palabras)\nlista1palabras = ['Sofia', 'Karla', 'Verinica', 'Lina', 'Natalia', 'Estefania']\nlista2palabras = ['Enrique', 'Erica', 'Sofia', 'Lina', 'Carlos', 'Pablo']\nprint(lista2palabras[0, 1, 4, 5], lista1palabras)\nlista1palabras = ['Sofia', 'Karla', 'Verinica', 'Lina', 'Natalia', 'Estefania']\nlista2palabras = ['Enrique', 'Erica', 'Sofia', 'Lina', 'Carlos', 'Pablo']\nprint(lista1palabras, lista2palabras[0, 3])\n",
"step-4": "#listas\nlista=[]\nprint(lista)\n\n#lista semana\nlistasemana=[\"Lunes\",\"Martes\",\"Miercoles\",\"Jueves\",\"Viernes\"]\nprint(listasemana[0])\n\n#lista semana\nlistasemana=[\"Lunes\",\"Martes\",\"Miercoles\",\"Jueves\",\"Viernes\"]\nprint(listasemana[-1])\n\n\n#lista semana\nlistasemana=[\"Lunes\",\"Martes\",\"Miercoles\",\"Jueves\",\"Viernes\"]\nprint(listasemana[0,3])\n\n#quitar los elementos repetidos de una lista\nlistaa=[1,2,3,4,\"hola\",2,2]\nconjunto=set(listaa)\nlistaa=listaa(conjunto)\nprint(conjunto)\n\n#listas palabras de 2 listas\nlista1palabras=[\"Sofia\",\"Karla\",\"Verinica\",\"Lina\",\"Natalia\",\"Estefania\"]\nlista2palabras=[\"Enrique\",\"Erica\",\"Sofia\",\"Lina\",\"Carlos\",\"Pablo\"]\nprint(lista1palabras,lista2palabras)\n\n#listas de palabras que aparecen en la primera lista\nlista1palabras=[\"Sofia\",\"Karla\",\"Verinica\",\"Lina\",\"Natalia\",\"Estefania\"]\nlista2palabras=[\"Enrique\",\"Erica\",\"Sofia\",\"Lina\",\"Carlos\",\"Pablo\"]\nprint(lista1palabras[1,2,4,5],lista2palabras)\n\n#listas de palabras que aparecen en la segunda lista\nlista1palabras=[\"Sofia\",\"Karla\",\"Verinica\",\"Lina\",\"Natalia\",\"Estefania\"]\nlista2palabras=[\"Enrique\",\"Erica\",\"Sofia\",\"Lina\",\"Carlos\",\"Pablo\"]\nprint(lista2palabras[0,1,4,5],lista1palabras)\n\n\n#listas palabras repetidas en ambas listas\nlista1palabras=[\"Sofia\",\"Karla\",\"Verinica\",\"Lina\",\"Natalia\",\"Estefania\"]\nlista2palabras=[\"Enrique\",\"Erica\",\"Sofia\",\"Lina\",\"Carlos\",\"Pablo\"]\nprint(lista1palabras,lista2palabras[0,3])\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
import sys
character_dict = {}
f = open(sys.argv[1], 'r')
while True:
pinyin = f.readline().strip()
character = f.readline().strip()
if not character: break
character_dict[pinyin] = character
import time
fout = open(sys.argv[1][:-3] + "_guess_char.out", 'w')
fout.write("-----------------------------")
fout.write("\n")
import random
incorrect = []
pinyin_keys = list(character_dict.keys())
random.shuffle(pinyin_keys)
problems_left = pinyin_keys
additional = 0
while len(problems_left) > 0:
key = problems_left[0]
chinese_character = character_dict[key]
pinyin = key
# result = chinese_character.encode('big5').decode('big5')
guess = input("Guess for " + pinyin + "(Press Enter): ")
print(chinese_character, "<-- Answer")
correctResult = True if input("Did you get it right? y/n?") == "y" else False
if correctResult:
print("CORRECT! Nice!")
else:
print("WRONG!", pinyin,"==", chinese_character)
fout.write("WRONG! " + pinyin + " == " + chinese_character)
fout.write("\n")
incorrect.append(pinyin)
problems_left.append(pinyin)
additional += 1
del problems_left[0]
print("Here's the ones you got wrong!")
for key in incorrect:
print(key, "--", character_dict[key])
fout.write(key + "--" + character_dict[key])
correct_num = len(character_dict) + additional - len(incorrect)
print("ACCURACY:", correct_num, "/", len(character_dict) + additional, ":", int(100 * correct_num/(len(character_dict) + additional)), "%")
fout.write("ACCURACY: " + str(correct_num) + "/" + str(len(character_dict) + additional) + " : " + str(100 * correct_num/(len(character_dict) + additional)) + "%")
fout.write("-----------------------------")
|
normal
|
{
"blob_id": "226bb323597100b57ef83eb0d5e4a9b894b77fd2",
"index": 9830,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nwhile True:\n pinyin = f.readline().strip()\n character = f.readline().strip()\n if not character:\n break\n character_dict[pinyin] = character\n<mask token>\nfout.write('-----------------------------')\nfout.write('\\n')\n<mask token>\nrandom.shuffle(pinyin_keys)\n<mask token>\nwhile len(problems_left) > 0:\n key = problems_left[0]\n chinese_character = character_dict[key]\n pinyin = key\n guess = input('Guess for ' + pinyin + '(Press Enter): ')\n print(chinese_character, '<-- Answer')\n correctResult = True if input('Did you get it right? y/n?'\n ) == 'y' else False\n if correctResult:\n print('CORRECT! Nice!')\n else:\n print('WRONG!', pinyin, '==', chinese_character)\n fout.write('WRONG! ' + pinyin + ' == ' + chinese_character)\n fout.write('\\n')\n incorrect.append(pinyin)\n problems_left.append(pinyin)\n additional += 1\n del problems_left[0]\nprint(\"Here's the ones you got wrong!\")\nfor key in incorrect:\n print(key, '--', character_dict[key])\n fout.write(key + '--' + character_dict[key])\n<mask token>\nprint('ACCURACY:', correct_num, '/', len(character_dict) + additional, ':',\n int(100 * correct_num / (len(character_dict) + additional)), '%')\nfout.write('ACCURACY: ' + str(correct_num) + '/' + str(len(character_dict) +\n additional) + ' : ' + str(100 * correct_num / (len(character_dict) +\n additional)) + '%')\nfout.write('-----------------------------')\n",
"step-3": "<mask token>\ncharacter_dict = {}\nf = open(sys.argv[1], 'r')\nwhile True:\n pinyin = f.readline().strip()\n character = f.readline().strip()\n if not character:\n break\n character_dict[pinyin] = character\n<mask token>\nfout = open(sys.argv[1][:-3] + '_guess_char.out', 'w')\nfout.write('-----------------------------')\nfout.write('\\n')\n<mask token>\nincorrect = []\npinyin_keys = list(character_dict.keys())\nrandom.shuffle(pinyin_keys)\nproblems_left = pinyin_keys\nadditional = 0\nwhile len(problems_left) > 0:\n key = problems_left[0]\n chinese_character = character_dict[key]\n pinyin = key\n guess = input('Guess for ' + pinyin + '(Press Enter): ')\n print(chinese_character, '<-- Answer')\n correctResult = True if input('Did you get it right? y/n?'\n ) == 'y' else False\n if correctResult:\n print('CORRECT! Nice!')\n else:\n print('WRONG!', pinyin, '==', chinese_character)\n fout.write('WRONG! ' + pinyin + ' == ' + chinese_character)\n fout.write('\\n')\n incorrect.append(pinyin)\n problems_left.append(pinyin)\n additional += 1\n del problems_left[0]\nprint(\"Here's the ones you got wrong!\")\nfor key in incorrect:\n print(key, '--', character_dict[key])\n fout.write(key + '--' + character_dict[key])\ncorrect_num = len(character_dict) + additional - len(incorrect)\nprint('ACCURACY:', correct_num, '/', len(character_dict) + additional, ':',\n int(100 * correct_num / (len(character_dict) + additional)), '%')\nfout.write('ACCURACY: ' + str(correct_num) + '/' + str(len(character_dict) +\n additional) + ' : ' + str(100 * correct_num / (len(character_dict) +\n additional)) + '%')\nfout.write('-----------------------------')\n",
"step-4": "import sys\ncharacter_dict = {}\nf = open(sys.argv[1], 'r')\nwhile True:\n pinyin = f.readline().strip()\n character = f.readline().strip()\n if not character:\n break\n character_dict[pinyin] = character\nimport time\nfout = open(sys.argv[1][:-3] + '_guess_char.out', 'w')\nfout.write('-----------------------------')\nfout.write('\\n')\nimport random\nincorrect = []\npinyin_keys = list(character_dict.keys())\nrandom.shuffle(pinyin_keys)\nproblems_left = pinyin_keys\nadditional = 0\nwhile len(problems_left) > 0:\n key = problems_left[0]\n chinese_character = character_dict[key]\n pinyin = key\n guess = input('Guess for ' + pinyin + '(Press Enter): ')\n print(chinese_character, '<-- Answer')\n correctResult = True if input('Did you get it right? y/n?'\n ) == 'y' else False\n if correctResult:\n print('CORRECT! Nice!')\n else:\n print('WRONG!', pinyin, '==', chinese_character)\n fout.write('WRONG! ' + pinyin + ' == ' + chinese_character)\n fout.write('\\n')\n incorrect.append(pinyin)\n problems_left.append(pinyin)\n additional += 1\n del problems_left[0]\nprint(\"Here's the ones you got wrong!\")\nfor key in incorrect:\n print(key, '--', character_dict[key])\n fout.write(key + '--' + character_dict[key])\ncorrect_num = len(character_dict) + additional - len(incorrect)\nprint('ACCURACY:', correct_num, '/', len(character_dict) + additional, ':',\n int(100 * correct_num / (len(character_dict) + additional)), '%')\nfout.write('ACCURACY: ' + str(correct_num) + '/' + str(len(character_dict) +\n additional) + ' : ' + str(100 * correct_num / (len(character_dict) +\n additional)) + '%')\nfout.write('-----------------------------')\n",
"step-5": "import sys\n\ncharacter_dict = {}\n\nf = open(sys.argv[1], 'r')\nwhile True:\n\tpinyin = f.readline().strip()\n\tcharacter = f.readline().strip()\n\tif not character: break\n\tcharacter_dict[pinyin] = character\nimport time\nfout = open(sys.argv[1][:-3] + \"_guess_char.out\", 'w')\nfout.write(\"-----------------------------\")\nfout.write(\"\\n\")\nimport random\n\nincorrect = []\npinyin_keys = list(character_dict.keys())\nrandom.shuffle(pinyin_keys)\nproblems_left = pinyin_keys\nadditional = 0\nwhile len(problems_left) > 0:\n\tkey = problems_left[0]\n\tchinese_character = character_dict[key]\n\tpinyin = key\n\t# result = chinese_character.encode('big5').decode('big5')\n\tguess = input(\"Guess for \" + pinyin + \"(Press Enter): \")\n\tprint(chinese_character, \"<-- Answer\")\n\tcorrectResult = True if input(\"Did you get it right? y/n?\") == \"y\" else False\n\tif correctResult:\n\t\tprint(\"CORRECT! Nice!\")\n\telse:\n\t\tprint(\"WRONG!\", pinyin,\"==\", chinese_character)\n\t\tfout.write(\"WRONG! \" + pinyin + \" == \" + chinese_character)\n\t\tfout.write(\"\\n\")\n\t\tincorrect.append(pinyin)\n\t\tproblems_left.append(pinyin)\n\t\tadditional += 1\n\tdel problems_left[0]\n\nprint(\"Here's the ones you got wrong!\")\nfor key in incorrect:\n\tprint(key, \"--\", character_dict[key])\n\tfout.write(key + \"--\" + character_dict[key])\ncorrect_num = len(character_dict) + additional - len(incorrect)\nprint(\"ACCURACY:\", correct_num, \"/\", len(character_dict) + additional, \":\", int(100 * correct_num/(len(character_dict) + additional)), \"%\")\nfout.write(\"ACCURACY: \" + str(correct_num) + \"/\" + str(len(character_dict) + additional) + \" : \" + str(100 * correct_num/(len(character_dict) + additional)) + \"%\")\nfout.write(\"-----------------------------\")",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
from typing import *
class Solution:
def isValidSudoku(self, board: List[List[str]]) -> bool:
cells = {}
for i in range(9):
for j in range(9):
if board[i][j] != ".":
val = board[i][j]
# is unique in row
for k in range(j-1, -1, -1):
if val == board[i][k]:
return False
for k in range(j+1, 9):
if val == board[i][k]:
return False
# is unique in col
for k in range(i-1, -1, -1):
if val == board[k][j]:
return False
for k in range(i+1, 9):
if val == board[k][j]:
return False
idx = i // 3 * 3 + j // 3
if idx in cells:
if val in cells[idx]:
return False
else:
cells[idx].append(val)
else:
cells[idx] = [val]
return True
|
normal
|
{
"blob_id": "57c911c9a10f9d116f1b7099c5202377e16050f1",
"index": 7871,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Solution:\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Solution:\n\n def isValidSudoku(self, board: List[List[str]]) ->bool:\n cells = {}\n for i in range(9):\n for j in range(9):\n if board[i][j] != '.':\n val = board[i][j]\n for k in range(j - 1, -1, -1):\n if val == board[i][k]:\n return False\n for k in range(j + 1, 9):\n if val == board[i][k]:\n return False\n for k in range(i - 1, -1, -1):\n if val == board[k][j]:\n return False\n for k in range(i + 1, 9):\n if val == board[k][j]:\n return False\n idx = i // 3 * 3 + j // 3\n if idx in cells:\n if val in cells[idx]:\n return False\n else:\n cells[idx].append(val)\n else:\n cells[idx] = [val]\n return True\n",
"step-4": "from typing import *\n\n\nclass Solution:\n\n def isValidSudoku(self, board: List[List[str]]) ->bool:\n cells = {}\n for i in range(9):\n for j in range(9):\n if board[i][j] != '.':\n val = board[i][j]\n for k in range(j - 1, -1, -1):\n if val == board[i][k]:\n return False\n for k in range(j + 1, 9):\n if val == board[i][k]:\n return False\n for k in range(i - 1, -1, -1):\n if val == board[k][j]:\n return False\n for k in range(i + 1, 9):\n if val == board[k][j]:\n return False\n idx = i // 3 * 3 + j // 3\n if idx in cells:\n if val in cells[idx]:\n return False\n else:\n cells[idx].append(val)\n else:\n cells[idx] = [val]\n return True\n",
"step-5": "from typing import *\n\n\nclass Solution:\n def isValidSudoku(self, board: List[List[str]]) -> bool:\n cells = {}\n \n for i in range(9):\n for j in range(9):\n if board[i][j] != \".\":\n val = board[i][j]\n \n # is unique in row\n for k in range(j-1, -1, -1):\n if val == board[i][k]:\n return False\n \n for k in range(j+1, 9):\n if val == board[i][k]:\n return False\n \n # is unique in col\n for k in range(i-1, -1, -1):\n if val == board[k][j]:\n return False\n \n for k in range(i+1, 9):\n if val == board[k][j]:\n return False\n \n idx = i // 3 * 3 + j // 3\n \n if idx in cells:\n if val in cells[idx]:\n return False\n else:\n cells[idx].append(val)\n else:\n cells[idx] = [val]\n \n return True\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
from typing import List
import uvicorn
from fastapi import Depends, FastAPI, HTTPException
from sqlalchemy.orm import Session
from . import crud, models, schemas
from .database import SessionLocal, engine
models.Base.metadata.create_all(bind=engine)
app = FastAPI()
def get_db():
db = SessionLocal()
try:
yield db
finally:
db.close()
@app.post("/users/", response_model=schemas.UserCreate)
def create_user(user: schemas.UserCreate, db: Session = Depends(get_db)):
db_user = crud.get_user_by_mail(db, user.mail)
if db_user:
raise HTTPException(status_code=400, detail="Email already registered")
return crud.create_user(db=db, user=user)
@app.get("/users/{user_id}", response_model=schemas.User)
def read_user(user_id: int, db: Session = Depends(get_db)):
db_user = crud.get_user(db, user_id)
if db_user is None:
raise HTTPException(status_code=404, detail = "User not found")
return db_user
@app.delete("/users/{user_id}", response_model=schemas.User)
def delete_user(user_id: int, db: Session = Depends(get_db)):
db_user = crud.delete_user(user_id)
if db_user is None:
raise HTTPException(status_code=404, detail="User not found")
return db_user
# @app.post("/cars/", response_model=schemas.Car)
# def create_user(car: schemas.CarCreate, db: Session = Depends(get_db)):
#
# if db_car:
# raise HTTPException(status_code=400, detail="License already registered")
# return crud.create_car(db=db, car=car)
@app.get("/demand", response_model=schemas.Demand)
def place_demand(demand: schemas.DemandCreate, db: Session = Depends(get_db)):
db_demand = crud.get_active_demand_user(db, demand.user_id)
if db_demand:
raise HTTPException(status_code=400, detail="The user already has an open demand")
db_demand = crud.create_demand(db, demand)
#ToDo Trigger schedular
return db_demand
if __name__ == "__main__":
uvicorn.run(app, host="0.0.0.0", port=8000)
|
normal
|
{
"blob_id": "5961c593b46a8d3a0f7c62d862cce9a2814e42f4",
"index": 9019,
"step-1": "<mask token>\n\n\ndef get_db():\n db = SessionLocal()\n try:\n yield db\n finally:\n db.close()\n\n\n@app.post('/users/', response_model=schemas.UserCreate)\ndef create_user(user: schemas.UserCreate, db: Session=Depends(get_db)):\n db_user = crud.get_user_by_mail(db, user.mail)\n if db_user:\n raise HTTPException(status_code=400, detail='Email already registered')\n return crud.create_user(db=db, user=user)\n\n\n@app.get('/users/{user_id}', response_model=schemas.User)\ndef read_user(user_id: int, db: Session=Depends(get_db)):\n db_user = crud.get_user(db, user_id)\n if db_user is None:\n raise HTTPException(status_code=404, detail='User not found')\n return db_user\n\n\n@app.delete('/users/{user_id}', response_model=schemas.User)\ndef delete_user(user_id: int, db: Session=Depends(get_db)):\n db_user = crud.delete_user(user_id)\n if db_user is None:\n raise HTTPException(status_code=404, detail='User not found')\n return db_user\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef get_db():\n db = SessionLocal()\n try:\n yield db\n finally:\n db.close()\n\n\n@app.post('/users/', response_model=schemas.UserCreate)\ndef create_user(user: schemas.UserCreate, db: Session=Depends(get_db)):\n db_user = crud.get_user_by_mail(db, user.mail)\n if db_user:\n raise HTTPException(status_code=400, detail='Email already registered')\n return crud.create_user(db=db, user=user)\n\n\n@app.get('/users/{user_id}', response_model=schemas.User)\ndef read_user(user_id: int, db: Session=Depends(get_db)):\n db_user = crud.get_user(db, user_id)\n if db_user is None:\n raise HTTPException(status_code=404, detail='User not found')\n return db_user\n\n\n@app.delete('/users/{user_id}', response_model=schemas.User)\ndef delete_user(user_id: int, db: Session=Depends(get_db)):\n db_user = crud.delete_user(user_id)\n if db_user is None:\n raise HTTPException(status_code=404, detail='User not found')\n return db_user\n\n\n@app.get('/demand', response_model=schemas.Demand)\ndef place_demand(demand: schemas.DemandCreate, db: Session=Depends(get_db)):\n db_demand = crud.get_active_demand_user(db, demand.user_id)\n if db_demand:\n raise HTTPException(status_code=400, detail=\n 'The user already has an open demand')\n db_demand = crud.create_demand(db, demand)\n return db_demand\n\n\n<mask token>\n",
"step-3": "<mask token>\nmodels.Base.metadata.create_all(bind=engine)\n<mask token>\n\n\ndef get_db():\n db = SessionLocal()\n try:\n yield db\n finally:\n db.close()\n\n\n@app.post('/users/', response_model=schemas.UserCreate)\ndef create_user(user: schemas.UserCreate, db: Session=Depends(get_db)):\n db_user = crud.get_user_by_mail(db, user.mail)\n if db_user:\n raise HTTPException(status_code=400, detail='Email already registered')\n return crud.create_user(db=db, user=user)\n\n\n@app.get('/users/{user_id}', response_model=schemas.User)\ndef read_user(user_id: int, db: Session=Depends(get_db)):\n db_user = crud.get_user(db, user_id)\n if db_user is None:\n raise HTTPException(status_code=404, detail='User not found')\n return db_user\n\n\n@app.delete('/users/{user_id}', response_model=schemas.User)\ndef delete_user(user_id: int, db: Session=Depends(get_db)):\n db_user = crud.delete_user(user_id)\n if db_user is None:\n raise HTTPException(status_code=404, detail='User not found')\n return db_user\n\n\n@app.get('/demand', response_model=schemas.Demand)\ndef place_demand(demand: schemas.DemandCreate, db: Session=Depends(get_db)):\n db_demand = crud.get_active_demand_user(db, demand.user_id)\n if db_demand:\n raise HTTPException(status_code=400, detail=\n 'The user already has an open demand')\n db_demand = crud.create_demand(db, demand)\n return db_demand\n\n\nif __name__ == '__main__':\n uvicorn.run(app, host='0.0.0.0', port=8000)\n",
"step-4": "<mask token>\nmodels.Base.metadata.create_all(bind=engine)\napp = FastAPI()\n\n\ndef get_db():\n db = SessionLocal()\n try:\n yield db\n finally:\n db.close()\n\n\n@app.post('/users/', response_model=schemas.UserCreate)\ndef create_user(user: schemas.UserCreate, db: Session=Depends(get_db)):\n db_user = crud.get_user_by_mail(db, user.mail)\n if db_user:\n raise HTTPException(status_code=400, detail='Email already registered')\n return crud.create_user(db=db, user=user)\n\n\n@app.get('/users/{user_id}', response_model=schemas.User)\ndef read_user(user_id: int, db: Session=Depends(get_db)):\n db_user = crud.get_user(db, user_id)\n if db_user is None:\n raise HTTPException(status_code=404, detail='User not found')\n return db_user\n\n\n@app.delete('/users/{user_id}', response_model=schemas.User)\ndef delete_user(user_id: int, db: Session=Depends(get_db)):\n db_user = crud.delete_user(user_id)\n if db_user is None:\n raise HTTPException(status_code=404, detail='User not found')\n return db_user\n\n\n@app.get('/demand', response_model=schemas.Demand)\ndef place_demand(demand: schemas.DemandCreate, db: Session=Depends(get_db)):\n db_demand = crud.get_active_demand_user(db, demand.user_id)\n if db_demand:\n raise HTTPException(status_code=400, detail=\n 'The user already has an open demand')\n db_demand = crud.create_demand(db, demand)\n return db_demand\n\n\nif __name__ == '__main__':\n uvicorn.run(app, host='0.0.0.0', port=8000)\n",
"step-5": "from typing import List\n\nimport uvicorn\nfrom fastapi import Depends, FastAPI, HTTPException\nfrom sqlalchemy.orm import Session\n\nfrom . import crud, models, schemas\nfrom .database import SessionLocal, engine\n\nmodels.Base.metadata.create_all(bind=engine)\n\napp = FastAPI()\n\ndef get_db():\n db = SessionLocal()\n\n try:\n yield db\n finally:\n db.close()\n\n@app.post(\"/users/\", response_model=schemas.UserCreate)\ndef create_user(user: schemas.UserCreate, db: Session = Depends(get_db)):\n db_user = crud.get_user_by_mail(db, user.mail)\n if db_user:\n raise HTTPException(status_code=400, detail=\"Email already registered\")\n return crud.create_user(db=db, user=user)\n\n@app.get(\"/users/{user_id}\", response_model=schemas.User)\ndef read_user(user_id: int, db: Session = Depends(get_db)):\n db_user = crud.get_user(db, user_id)\n\n if db_user is None:\n raise HTTPException(status_code=404, detail = \"User not found\")\n return db_user\n\n@app.delete(\"/users/{user_id}\", response_model=schemas.User)\ndef delete_user(user_id: int, db: Session = Depends(get_db)):\n db_user = crud.delete_user(user_id)\n\n if db_user is None:\n raise HTTPException(status_code=404, detail=\"User not found\")\n\n return db_user\n\n\n\n# @app.post(\"/cars/\", response_model=schemas.Car)\n# def create_user(car: schemas.CarCreate, db: Session = Depends(get_db)):\n#\n# if db_car:\n# raise HTTPException(status_code=400, detail=\"License already registered\")\n# return crud.create_car(db=db, car=car)\n\n\n@app.get(\"/demand\", response_model=schemas.Demand)\ndef place_demand(demand: schemas.DemandCreate, db: Session = Depends(get_db)):\n db_demand = crud.get_active_demand_user(db, demand.user_id)\n if db_demand:\n raise HTTPException(status_code=400, detail=\"The user already has an open demand\")\n\n db_demand = crud.create_demand(db, demand)\n\n #ToDo Trigger schedular\n\n return db_demand\n\nif __name__ == \"__main__\":\n uvicorn.run(app, host=\"0.0.0.0\", port=8000)",
"step-ids": [
4,
5,
6,
7,
9
]
}
|
[
4,
5,
6,
7,
9
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
z1.write('file05.txt')
z1.write('file03.txt')
z1.close()
<|reserved_special_token_0|>
z2.extractall('电影')
z2.close()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
z1 = zipfile.ZipFile('a.zip', 'w')
z1.write('file05.txt')
z1.write('file03.txt')
z1.close()
z2 = zipfile.ZipFile('a.zip', 'r')
z2.extractall('电影')
z2.close()
<|reserved_special_token_1|>
import shutil
import zipfile
z1 = zipfile.ZipFile('a.zip', 'w')
z1.write('file05.txt')
z1.write('file03.txt')
z1.close()
z2 = zipfile.ZipFile('a.zip', 'r')
z2.extractall('电影')
z2.close()
<|reserved_special_token_1|>
#coding=utf-8
import shutil
import zipfile
# shutil.copyfile("file03.txt","file05.txt") #拷贝
# shutil.copytree("movie/大陆","电影") #拷贝文件夹
#忽略不需要拷贝的文件
# shutil.copytree("movie/大陆","电影",ignore=shutil.ignore_patterns("*.txt","*.html"))
#压缩和解压缩
# shutil.make_archive("电影/压缩","zip","movie/大陆")
z1 = zipfile.ZipFile("a.zip","w")
z1.write("file05.txt")
z1.write("file03.txt")
z1.close()
#解压缩
z2 = zipfile.ZipFile("a.zip","r")
z2.extractall("电影")
z2.close()
|
flexible
|
{
"blob_id": "81f5753e8d0004244b4ee8e26895cb2b38fbb8b6",
"index": 751,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nz1.write('file05.txt')\nz1.write('file03.txt')\nz1.close()\n<mask token>\nz2.extractall('电影')\nz2.close()\n",
"step-3": "<mask token>\nz1 = zipfile.ZipFile('a.zip', 'w')\nz1.write('file05.txt')\nz1.write('file03.txt')\nz1.close()\nz2 = zipfile.ZipFile('a.zip', 'r')\nz2.extractall('电影')\nz2.close()\n",
"step-4": "import shutil\nimport zipfile\nz1 = zipfile.ZipFile('a.zip', 'w')\nz1.write('file05.txt')\nz1.write('file03.txt')\nz1.close()\nz2 = zipfile.ZipFile('a.zip', 'r')\nz2.extractall('电影')\nz2.close()\n",
"step-5": "#coding=utf-8\n\nimport shutil\nimport zipfile\n# shutil.copyfile(\"file03.txt\",\"file05.txt\") #拷贝\n\n# shutil.copytree(\"movie/大陆\",\"电影\") #拷贝文件夹\n\n#忽略不需要拷贝的文件\n# shutil.copytree(\"movie/大陆\",\"电影\",ignore=shutil.ignore_patterns(\"*.txt\",\"*.html\"))\n\n#压缩和解压缩\n# shutil.make_archive(\"电影/压缩\",\"zip\",\"movie/大陆\")\n\n\nz1 = zipfile.ZipFile(\"a.zip\",\"w\")\nz1.write(\"file05.txt\")\nz1.write(\"file03.txt\")\nz1.close()\n\n\n#解压缩\nz2 = zipfile.ZipFile(\"a.zip\",\"r\")\nz2.extractall(\"电影\")\nz2.close()\n\n\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
num = 15850
base = 16
# Primera división
residuo = num % base
cociente = num // base
bit1 = str(residuo)
bit1 = bit1.replace("10","a")
bit1 = bit1.replace("11","b")
bit1 = bit1.replace("12","c")
bit1 = bit1.replace("13","d")
bit1 = bit1.replace("14","e")
bit1 = bit1.replace("15","f")
# Segunda división
residuo = cociente % base
cociente = cociente // base
bit2 = str(residuo)
bit2 = bit2.replace("10","a")
bit2 = bit2.replace("11","b")
bit2 = bit2.replace("12","c")
bit2 = bit2.replace("13","d")
bit2 = bit2.replace("14","e")
bit2 = bit2.replace("15","f")
# Tercera división
residuo = cociente % base
cociente = cociente // base
bit3 = str(residuo)
bit3 = bit3.replace("10","a")
bit3 = bit3.replace("11","b")
bit3 = bit3.replace("12","c")
bit3 = bit3.replace("13","d")
bit3 = bit3.replace("14","e")
bit3 = bit3.replace("15","f")
# Cuarta división
residuo = cociente % base
cociente = cociente // base
bit4 = str(residuo)
bit4 = bit4.replace("10","a")
bit4 = bit4.replace("11","b")
bit4 = bit4.replace("12","c")
bit4 = bit4.replace("13","d")
bit4 = bit4.replace("14","e")
bit4 = bit4.replace("15","f")
print("{} = {}{}{}{}".format(num,bit4,bit3,bit2,bit1))
|
normal
|
{
"blob_id": "2d72f063362aaefdc236e1240020c71bacaf51cf",
"index": 8057,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint('{} = {}{}{}{}'.format(num, bit4, bit3, bit2, bit1))\n",
"step-3": "num = 15850\nbase = 16\nresiduo = num % base\ncociente = num // base\nbit1 = str(residuo)\nbit1 = bit1.replace('10', 'a')\nbit1 = bit1.replace('11', 'b')\nbit1 = bit1.replace('12', 'c')\nbit1 = bit1.replace('13', 'd')\nbit1 = bit1.replace('14', 'e')\nbit1 = bit1.replace('15', 'f')\nresiduo = cociente % base\ncociente = cociente // base\nbit2 = str(residuo)\nbit2 = bit2.replace('10', 'a')\nbit2 = bit2.replace('11', 'b')\nbit2 = bit2.replace('12', 'c')\nbit2 = bit2.replace('13', 'd')\nbit2 = bit2.replace('14', 'e')\nbit2 = bit2.replace('15', 'f')\nresiduo = cociente % base\ncociente = cociente // base\nbit3 = str(residuo)\nbit3 = bit3.replace('10', 'a')\nbit3 = bit3.replace('11', 'b')\nbit3 = bit3.replace('12', 'c')\nbit3 = bit3.replace('13', 'd')\nbit3 = bit3.replace('14', 'e')\nbit3 = bit3.replace('15', 'f')\nresiduo = cociente % base\ncociente = cociente // base\nbit4 = str(residuo)\nbit4 = bit4.replace('10', 'a')\nbit4 = bit4.replace('11', 'b')\nbit4 = bit4.replace('12', 'c')\nbit4 = bit4.replace('13', 'd')\nbit4 = bit4.replace('14', 'e')\nbit4 = bit4.replace('15', 'f')\nprint('{} = {}{}{}{}'.format(num, bit4, bit3, bit2, bit1))\n",
"step-4": "num = 15850\nbase = 16\n\n# Primera división\nresiduo = num % base\ncociente = num // base\nbit1 = str(residuo)\n\nbit1 = bit1.replace(\"10\",\"a\")\nbit1 = bit1.replace(\"11\",\"b\")\nbit1 = bit1.replace(\"12\",\"c\")\nbit1 = bit1.replace(\"13\",\"d\")\nbit1 = bit1.replace(\"14\",\"e\")\nbit1 = bit1.replace(\"15\",\"f\")\n\n\n# Segunda división\nresiduo = cociente % base\ncociente = cociente // base\nbit2 = str(residuo)\n\nbit2 = bit2.replace(\"10\",\"a\")\nbit2 = bit2.replace(\"11\",\"b\")\nbit2 = bit2.replace(\"12\",\"c\")\nbit2 = bit2.replace(\"13\",\"d\")\nbit2 = bit2.replace(\"14\",\"e\")\nbit2 = bit2.replace(\"15\",\"f\")\n\n\n# Tercera división\nresiduo = cociente % base\ncociente = cociente // base\nbit3 = str(residuo)\n\nbit3 = bit3.replace(\"10\",\"a\")\nbit3 = bit3.replace(\"11\",\"b\")\nbit3 = bit3.replace(\"12\",\"c\")\nbit3 = bit3.replace(\"13\",\"d\")\nbit3 = bit3.replace(\"14\",\"e\")\nbit3 = bit3.replace(\"15\",\"f\")\n\n\n# Cuarta división\nresiduo = cociente % base\ncociente = cociente // base\nbit4 = str(residuo)\n\nbit4 = bit4.replace(\"10\",\"a\")\nbit4 = bit4.replace(\"11\",\"b\")\nbit4 = bit4.replace(\"12\",\"c\")\nbit4 = bit4.replace(\"13\",\"d\")\nbit4 = bit4.replace(\"14\",\"e\")\nbit4 = bit4.replace(\"15\",\"f\")\n\n\nprint(\"{} = {}{}{}{}\".format(num,bit4,bit3,bit2,bit1))",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
# Generated by Django 3.1.1 on 2020-10-29 13:56
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('registered_user', '0005_auto_20201029_1710'),
]
operations = [
migrations.AlterField(
model_name='user_details',
name='dateofbirth',
field=models.DateField(null=True),
),
]
|
normal
|
{
"blob_id": "f2c96b3133137019dc6bd462f096f3b4c5f12648",
"index": 6635,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Migration(migrations.Migration):\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Migration(migrations.Migration):\n dependencies = [('registered_user', '0005_auto_20201029_1710')]\n operations = [migrations.AlterField(model_name='user_details', name=\n 'dateofbirth', field=models.DateField(null=True))]\n",
"step-4": "from django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n dependencies = [('registered_user', '0005_auto_20201029_1710')]\n operations = [migrations.AlterField(model_name='user_details', name=\n 'dateofbirth', field=models.DateField(null=True))]\n",
"step-5": "# Generated by Django 3.1.1 on 2020-10-29 13:56\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('registered_user', '0005_auto_20201029_1710'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='user_details',\n name='dateofbirth',\n field=models.DateField(null=True),\n ),\n ]\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
import http.client
from urllib.parse import urlencode
client = http.client.HTTPConnection("127.0.0.1:9000")
post_data = {
"usertag": "test",
"password": '123456',
'code': "print('Hello Web')"
}
head_dict = {'Content-Type': 'application/x-www-form-urlencoded'}
post_data = urlencode(post_data)
client.request(method="POST", url='/',
body=post_data.encode('utf-8'),
headers=head_dict)
resp = client.getresponse()
content = resp.read().decode("utf-8")
client.close()
print(content)
|
normal
|
{
"blob_id": "ee1ce3ea4b31246703530478d6550b0c8866197e",
"index": 1190,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nclient.request(method='POST', url='/', body=post_data.encode('utf-8'),\n headers=head_dict)\n<mask token>\nclient.close()\nprint(content)\n",
"step-3": "<mask token>\nclient = http.client.HTTPConnection('127.0.0.1:9000')\npost_data = {'usertag': 'test', 'password': '123456', 'code':\n \"print('Hello Web')\"}\nhead_dict = {'Content-Type': 'application/x-www-form-urlencoded'}\npost_data = urlencode(post_data)\nclient.request(method='POST', url='/', body=post_data.encode('utf-8'),\n headers=head_dict)\nresp = client.getresponse()\ncontent = resp.read().decode('utf-8')\nclient.close()\nprint(content)\n",
"step-4": "import http.client\nfrom urllib.parse import urlencode\nclient = http.client.HTTPConnection('127.0.0.1:9000')\npost_data = {'usertag': 'test', 'password': '123456', 'code':\n \"print('Hello Web')\"}\nhead_dict = {'Content-Type': 'application/x-www-form-urlencoded'}\npost_data = urlencode(post_data)\nclient.request(method='POST', url='/', body=post_data.encode('utf-8'),\n headers=head_dict)\nresp = client.getresponse()\ncontent = resp.read().decode('utf-8')\nclient.close()\nprint(content)\n",
"step-5": "import http.client\nfrom urllib.parse import urlencode\nclient = http.client.HTTPConnection(\"127.0.0.1:9000\")\npost_data = {\n \"usertag\": \"test\",\n \"password\": '123456',\n 'code': \"print('Hello Web')\"\n}\nhead_dict = {'Content-Type': 'application/x-www-form-urlencoded'}\npost_data = urlencode(post_data)\nclient.request(method=\"POST\", url='/',\n body=post_data.encode('utf-8'),\n headers=head_dict)\nresp = client.getresponse()\ncontent = resp.read().decode(\"utf-8\")\nclient.close()\nprint(content)\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
print('>>>> Connecting with the UAV <<<<')
<|reserved_special_token_0|>
vehicle.wait_ready('autopilot_version')
print('ready')
<|reserved_special_token_0|>
if cap.isOpened() == False:
print('Unable to read camera feed')
<|reserved_special_token_0|>
while True:
posdata = str(vehicle.location.global_relative_frame).split(':')
_, _, alt = posdata[1].split(',')
ret, frame = cap.read()
cv2.putText(frame, str(alt), (0, int(frame_height / 2.1)), cv2.
FONT_HERSHEY_SIMPLEX, 0.4, (255, 255, 255), 1)
if ret == True:
print('record..')
out.write(frame)
if cv2.waitKey(1) & 255 == ord('q'):
break
else:
break
cap.release()
out.release()
cv2.destroyAllWindows()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
connection_string = '/dev/ttyACM0'
baud_rate = 115200
print('>>>> Connecting with the UAV <<<<')
vehicle = connect(connection_string, baud=baud_rate, wait_ready=True)
vehicle.wait_ready('autopilot_version')
print('ready')
cap = cv2.VideoCapture(0)
if cap.isOpened() == False:
print('Unable to read camera feed')
frame_width = int(cap.get(3))
frame_height = int(cap.get(4))
t = str(time.time())
out = cv2.VideoWriter(t + '.avi', cv2.VideoWriter_fourcc('M', 'J', 'P', 'G'
), 10, (frame_width, frame_height))
while True:
posdata = str(vehicle.location.global_relative_frame).split(':')
_, _, alt = posdata[1].split(',')
ret, frame = cap.read()
cv2.putText(frame, str(alt), (0, int(frame_height / 2.1)), cv2.
FONT_HERSHEY_SIMPLEX, 0.4, (255, 255, 255), 1)
if ret == True:
print('record..')
out.write(frame)
if cv2.waitKey(1) & 255 == ord('q'):
break
else:
break
cap.release()
out.release()
cv2.destroyAllWindows()
<|reserved_special_token_1|>
import cv2
import numpy as np
import time
from dronekit import connect, VehicleMode
connection_string = '/dev/ttyACM0'
baud_rate = 115200
print('>>>> Connecting with the UAV <<<<')
vehicle = connect(connection_string, baud=baud_rate, wait_ready=True)
vehicle.wait_ready('autopilot_version')
print('ready')
cap = cv2.VideoCapture(0)
if cap.isOpened() == False:
print('Unable to read camera feed')
frame_width = int(cap.get(3))
frame_height = int(cap.get(4))
t = str(time.time())
out = cv2.VideoWriter(t + '.avi', cv2.VideoWriter_fourcc('M', 'J', 'P', 'G'
), 10, (frame_width, frame_height))
while True:
posdata = str(vehicle.location.global_relative_frame).split(':')
_, _, alt = posdata[1].split(',')
ret, frame = cap.read()
cv2.putText(frame, str(alt), (0, int(frame_height / 2.1)), cv2.
FONT_HERSHEY_SIMPLEX, 0.4, (255, 255, 255), 1)
if ret == True:
print('record..')
out.write(frame)
if cv2.waitKey(1) & 255 == ord('q'):
break
else:
break
cap.release()
out.release()
cv2.destroyAllWindows()
<|reserved_special_token_1|>
import cv2
import numpy as np
import time
from dronekit import connect, VehicleMode
connection_string = "/dev/ttyACM0"
baud_rate = 115200
print(">>>> Connecting with the UAV <<<<")
vehicle = connect(connection_string, baud=baud_rate, wait_ready=True)
vehicle.wait_ready('autopilot_version')
print('ready')
cap = cv2.VideoCapture(0)
if (cap.isOpened() == False):
print("Unable to read camera feed")
frame_width = int(cap.get(3))
frame_height = int(cap.get(4))
t = str(time.time())
out = cv2.VideoWriter(t+'.avi',cv2.VideoWriter_fourcc('M','J','P','G'), 10, (frame_width,frame_height))
while(True):
posdata = str(vehicle.location.global_relative_frame).split(':')
_, _, alt = posdata[1].split(',')
ret, frame = cap.read()
cv2.putText(frame, str(alt),(0,int(frame_height/2.1)),cv2.FONT_HERSHEY_SIMPLEX, 0.4, (255,255,255), 1)
if ret == True:
print("record..")
out.write(frame)
#cv2.imshow('frame',frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
else:
break
cap.release()
out.release()
cv2.destroyAllWindows()
|
flexible
|
{
"blob_id": "8c11463e35fb32949abbb163a89f874040a33ad0",
"index": 5415,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint('>>>> Connecting with the UAV <<<<')\n<mask token>\nvehicle.wait_ready('autopilot_version')\nprint('ready')\n<mask token>\nif cap.isOpened() == False:\n print('Unable to read camera feed')\n<mask token>\nwhile True:\n posdata = str(vehicle.location.global_relative_frame).split(':')\n _, _, alt = posdata[1].split(',')\n ret, frame = cap.read()\n cv2.putText(frame, str(alt), (0, int(frame_height / 2.1)), cv2.\n FONT_HERSHEY_SIMPLEX, 0.4, (255, 255, 255), 1)\n if ret == True:\n print('record..')\n out.write(frame)\n if cv2.waitKey(1) & 255 == ord('q'):\n break\n else:\n break\ncap.release()\nout.release()\ncv2.destroyAllWindows()\n",
"step-3": "<mask token>\nconnection_string = '/dev/ttyACM0'\nbaud_rate = 115200\nprint('>>>> Connecting with the UAV <<<<')\nvehicle = connect(connection_string, baud=baud_rate, wait_ready=True)\nvehicle.wait_ready('autopilot_version')\nprint('ready')\ncap = cv2.VideoCapture(0)\nif cap.isOpened() == False:\n print('Unable to read camera feed')\nframe_width = int(cap.get(3))\nframe_height = int(cap.get(4))\nt = str(time.time())\nout = cv2.VideoWriter(t + '.avi', cv2.VideoWriter_fourcc('M', 'J', 'P', 'G'\n ), 10, (frame_width, frame_height))\nwhile True:\n posdata = str(vehicle.location.global_relative_frame).split(':')\n _, _, alt = posdata[1].split(',')\n ret, frame = cap.read()\n cv2.putText(frame, str(alt), (0, int(frame_height / 2.1)), cv2.\n FONT_HERSHEY_SIMPLEX, 0.4, (255, 255, 255), 1)\n if ret == True:\n print('record..')\n out.write(frame)\n if cv2.waitKey(1) & 255 == ord('q'):\n break\n else:\n break\ncap.release()\nout.release()\ncv2.destroyAllWindows()\n",
"step-4": "import cv2\nimport numpy as np\nimport time\nfrom dronekit import connect, VehicleMode\nconnection_string = '/dev/ttyACM0'\nbaud_rate = 115200\nprint('>>>> Connecting with the UAV <<<<')\nvehicle = connect(connection_string, baud=baud_rate, wait_ready=True)\nvehicle.wait_ready('autopilot_version')\nprint('ready')\ncap = cv2.VideoCapture(0)\nif cap.isOpened() == False:\n print('Unable to read camera feed')\nframe_width = int(cap.get(3))\nframe_height = int(cap.get(4))\nt = str(time.time())\nout = cv2.VideoWriter(t + '.avi', cv2.VideoWriter_fourcc('M', 'J', 'P', 'G'\n ), 10, (frame_width, frame_height))\nwhile True:\n posdata = str(vehicle.location.global_relative_frame).split(':')\n _, _, alt = posdata[1].split(',')\n ret, frame = cap.read()\n cv2.putText(frame, str(alt), (0, int(frame_height / 2.1)), cv2.\n FONT_HERSHEY_SIMPLEX, 0.4, (255, 255, 255), 1)\n if ret == True:\n print('record..')\n out.write(frame)\n if cv2.waitKey(1) & 255 == ord('q'):\n break\n else:\n break\ncap.release()\nout.release()\ncv2.destroyAllWindows()\n",
"step-5": "import cv2\nimport numpy as np\nimport time \nfrom dronekit import connect, VehicleMode\n\nconnection_string = \"/dev/ttyACM0\"\nbaud_rate = 115200\nprint(\">>>> Connecting with the UAV <<<<\")\nvehicle = connect(connection_string, baud=baud_rate, wait_ready=True)\nvehicle.wait_ready('autopilot_version')\nprint('ready')\n\ncap = cv2.VideoCapture(0)\n \nif (cap.isOpened() == False): \n print(\"Unable to read camera feed\")\n\nframe_width = int(cap.get(3))\nframe_height = int(cap.get(4))\nt = str(time.time())\nout = cv2.VideoWriter(t+'.avi',cv2.VideoWriter_fourcc('M','J','P','G'), 10, (frame_width,frame_height))\n \nwhile(True):\n posdata = str(vehicle.location.global_relative_frame).split(':')\n _, _, alt = posdata[1].split(',')\n ret, frame = cap.read()\n cv2.putText(frame, str(alt),(0,int(frame_height/2.1)),cv2.FONT_HERSHEY_SIMPLEX, 0.4, (255,255,255), 1)\n if ret == True: \n print(\"record..\")\n out.write(frame)\n #cv2.imshow('frame',frame)\n if cv2.waitKey(1) & 0xFF == ord('q'):\n break\n else:\n break \ncap.release()\nout.release()\ncv2.destroyAllWindows() \n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
#Eyal Reis - 203249354
from view import View
def main():
"""
primary game method
"""
view = View()
view.root.mainloop()
if __name__ == "__main__":
main()
|
normal
|
{
"blob_id": "640eae824e43e394bf0624dd4cf7dcec78f43604",
"index": 4947,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef main():\n \"\"\"\n primary game method\n \"\"\"\n view = View()\n view.root.mainloop()\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef main():\n \"\"\"\n primary game method\n \"\"\"\n view = View()\n view.root.mainloop()\n\n\nif __name__ == '__main__':\n main()\n",
"step-4": "from view import View\n\n\ndef main():\n \"\"\"\n primary game method\n \"\"\"\n view = View()\n view.root.mainloop()\n\n\nif __name__ == '__main__':\n main()\n",
"step-5": "#Eyal Reis - 203249354\n\nfrom view import View\n\ndef main():\n \"\"\"\n primary game method\n \"\"\"\n view = View()\n view.root.mainloop()\n \nif __name__ == \"__main__\":\n main()\n ",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
def read_video(filename):
"""
将视频每秒的内容提取出来
:param filename: 视频文件路径
:return: 视频文件名,用来拼接
"""
cap = cv2.VideoCapture(filename)
rate = cap.get(cv2.CAP_PROP_FPS)
count = 0
success, frame = cap.read()
imageCount = 0
while success:
success, frame = cap.read()
count += 1
if count >= rate:
if not os.path.exists(stich_path):
os.mkdir(stich_path)
shotname, extension = os.path.splitext(filename)
shotname = shotname.split('\\')[len(shotname.split('\\')) - 1]
if not os.path.exists(stich_path + shotname):
os.mkdir(stich_path + shotname)
cv2.imencode('.jpg', frame)[1].tofile(stich_path + shotname +
'\\' + str(imageCount) + '.jpg')
imageCount += 1
count = 0
stitcher_image(shotname)
def stitcher_image(shotname):
"""
使用OpenCV的stitcher进行拼接
****需要OpenCV 3.3.0****
OpenCV 3.3.0以下的版本stitcher不能正确的运行,详情参考 https://github.com/opencv/opencv/issues/6969#issuecomment-326430615
:param shotname:
"""
imgs = []
for file in os.listdir(stich_path + shotname):
imgs.append(cv2.imread(stich_path + shotname + '\\' + file))
stitcher = cv2.createStitcher(False)
result = stitcher.stitch(imgs)
cv2.imwrite(stich_path + shotname + '\\' + 'stich_result.jpg', result[1])
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def read_video(filename):
"""
将视频每秒的内容提取出来
:param filename: 视频文件路径
:return: 视频文件名,用来拼接
"""
cap = cv2.VideoCapture(filename)
rate = cap.get(cv2.CAP_PROP_FPS)
count = 0
success, frame = cap.read()
imageCount = 0
while success:
success, frame = cap.read()
count += 1
if count >= rate:
if not os.path.exists(stich_path):
os.mkdir(stich_path)
shotname, extension = os.path.splitext(filename)
shotname = shotname.split('\\')[len(shotname.split('\\')) - 1]
if not os.path.exists(stich_path + shotname):
os.mkdir(stich_path + shotname)
cv2.imencode('.jpg', frame)[1].tofile(stich_path + shotname +
'\\' + str(imageCount) + '.jpg')
imageCount += 1
count = 0
stitcher_image(shotname)
def stitcher_image(shotname):
"""
使用OpenCV的stitcher进行拼接
****需要OpenCV 3.3.0****
OpenCV 3.3.0以下的版本stitcher不能正确的运行,详情参考 https://github.com/opencv/opencv/issues/6969#issuecomment-326430615
:param shotname:
"""
imgs = []
for file in os.listdir(stich_path + shotname):
imgs.append(cv2.imread(stich_path + shotname + '\\' + file))
stitcher = cv2.createStitcher(False)
result = stitcher.stitch(imgs)
cv2.imwrite(stich_path + shotname + '\\' + 'stich_result.jpg', result[1])
def read_file_list(path):
if os.path.isdir(path):
pathlist = os.listdir(path)
for file in pathlist:
read_video(path + '\\' + file)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
stich_path = 'stichImage\\'
def read_video(filename):
"""
将视频每秒的内容提取出来
:param filename: 视频文件路径
:return: 视频文件名,用来拼接
"""
cap = cv2.VideoCapture(filename)
rate = cap.get(cv2.CAP_PROP_FPS)
count = 0
success, frame = cap.read()
imageCount = 0
while success:
success, frame = cap.read()
count += 1
if count >= rate:
if not os.path.exists(stich_path):
os.mkdir(stich_path)
shotname, extension = os.path.splitext(filename)
shotname = shotname.split('\\')[len(shotname.split('\\')) - 1]
if not os.path.exists(stich_path + shotname):
os.mkdir(stich_path + shotname)
cv2.imencode('.jpg', frame)[1].tofile(stich_path + shotname +
'\\' + str(imageCount) + '.jpg')
imageCount += 1
count = 0
stitcher_image(shotname)
def stitcher_image(shotname):
"""
使用OpenCV的stitcher进行拼接
****需要OpenCV 3.3.0****
OpenCV 3.3.0以下的版本stitcher不能正确的运行,详情参考 https://github.com/opencv/opencv/issues/6969#issuecomment-326430615
:param shotname:
"""
imgs = []
for file in os.listdir(stich_path + shotname):
imgs.append(cv2.imread(stich_path + shotname + '\\' + file))
stitcher = cv2.createStitcher(False)
result = stitcher.stitch(imgs)
cv2.imwrite(stich_path + shotname + '\\' + 'stich_result.jpg', result[1])
def read_file_list(path):
if os.path.isdir(path):
pathlist = os.listdir(path)
for file in pathlist:
read_video(path + '\\' + file)
<|reserved_special_token_1|>
import cv2
import os
<|reserved_special_token_0|>
stich_path = 'stichImage\\'
def read_video(filename):
"""
将视频每秒的内容提取出来
:param filename: 视频文件路径
:return: 视频文件名,用来拼接
"""
cap = cv2.VideoCapture(filename)
rate = cap.get(cv2.CAP_PROP_FPS)
count = 0
success, frame = cap.read()
imageCount = 0
while success:
success, frame = cap.read()
count += 1
if count >= rate:
if not os.path.exists(stich_path):
os.mkdir(stich_path)
shotname, extension = os.path.splitext(filename)
shotname = shotname.split('\\')[len(shotname.split('\\')) - 1]
if not os.path.exists(stich_path + shotname):
os.mkdir(stich_path + shotname)
cv2.imencode('.jpg', frame)[1].tofile(stich_path + shotname +
'\\' + str(imageCount) + '.jpg')
imageCount += 1
count = 0
stitcher_image(shotname)
def stitcher_image(shotname):
"""
使用OpenCV的stitcher进行拼接
****需要OpenCV 3.3.0****
OpenCV 3.3.0以下的版本stitcher不能正确的运行,详情参考 https://github.com/opencv/opencv/issues/6969#issuecomment-326430615
:param shotname:
"""
imgs = []
for file in os.listdir(stich_path + shotname):
imgs.append(cv2.imread(stich_path + shotname + '\\' + file))
stitcher = cv2.createStitcher(False)
result = stitcher.stitch(imgs)
cv2.imwrite(stich_path + shotname + '\\' + 'stich_result.jpg', result[1])
def read_file_list(path):
if os.path.isdir(path):
pathlist = os.listdir(path)
for file in pathlist:
read_video(path + '\\' + file)
<|reserved_special_token_1|>
import cv2
import os
"""
视频场景拼接
"""
stich_path="stichImage\\"
def read_video(filename):
'''
将视频每秒的内容提取出来
:param filename: 视频文件路径
:return: 视频文件名,用来拼接
'''
cap=cv2.VideoCapture(filename)
rate = cap.get(cv2.CAP_PROP_FPS)
count=0
success, frame = cap.read()
imageCount=0
while success:
success, frame = cap.read()
count+=1
if count>=rate:
if not os.path.exists(stich_path):
os.mkdir(stich_path)
(shotname, extension)=os.path.splitext(filename)
shotname=shotname.split('\\')[len(shotname.split('\\'))-1]
if not os.path.exists(stich_path+shotname):
os.mkdir(stich_path+shotname)
# frame=cv2.resize(frame,(960,544))
cv2.imencode(".jpg", frame)[1].tofile(
stich_path+shotname+'\\'+str(imageCount)+'.jpg')
imageCount+=1
count=0
stitcher_image(shotname)
def stitcher_image(shotname):
"""
使用OpenCV的stitcher进行拼接
****需要OpenCV 3.3.0****
OpenCV 3.3.0以下的版本stitcher不能正确的运行,详情参考 https://github.com/opencv/opencv/issues/6969#issuecomment-326430615
:param shotname:
"""
imgs=[]
for file in os.listdir(stich_path+shotname):
imgs.append(cv2.imread(stich_path+shotname+'\\'+file))
stitcher = cv2.createStitcher(False)
result = stitcher.stitch(imgs)
cv2.imwrite(stich_path+shotname+'\\'+"stich_result.jpg", result[1])
def read_file_list(path):
if os.path.isdir(path):
pathlist=os.listdir(path)
for file in pathlist:
read_video(path+'\\'+file)
# read_video('E:\\2.mp4')
|
flexible
|
{
"blob_id": "a8506420b1bc558fa953f0cec3f8c16beaf44909",
"index": 9886,
"step-1": "<mask token>\n\n\ndef read_video(filename):\n \"\"\"\n 将视频每秒的内容提取出来\n :param filename: 视频文件路径\n :return: 视频文件名,用来拼接\n \"\"\"\n cap = cv2.VideoCapture(filename)\n rate = cap.get(cv2.CAP_PROP_FPS)\n count = 0\n success, frame = cap.read()\n imageCount = 0\n while success:\n success, frame = cap.read()\n count += 1\n if count >= rate:\n if not os.path.exists(stich_path):\n os.mkdir(stich_path)\n shotname, extension = os.path.splitext(filename)\n shotname = shotname.split('\\\\')[len(shotname.split('\\\\')) - 1]\n if not os.path.exists(stich_path + shotname):\n os.mkdir(stich_path + shotname)\n cv2.imencode('.jpg', frame)[1].tofile(stich_path + shotname +\n '\\\\' + str(imageCount) + '.jpg')\n imageCount += 1\n count = 0\n stitcher_image(shotname)\n\n\ndef stitcher_image(shotname):\n \"\"\"\n 使用OpenCV的stitcher进行拼接\n ****需要OpenCV 3.3.0****\n OpenCV 3.3.0以下的版本stitcher不能正确的运行,详情参考 https://github.com/opencv/opencv/issues/6969#issuecomment-326430615\n :param shotname:\n \"\"\"\n imgs = []\n for file in os.listdir(stich_path + shotname):\n imgs.append(cv2.imread(stich_path + shotname + '\\\\' + file))\n stitcher = cv2.createStitcher(False)\n result = stitcher.stitch(imgs)\n cv2.imwrite(stich_path + shotname + '\\\\' + 'stich_result.jpg', result[1])\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef read_video(filename):\n \"\"\"\n 将视频每秒的内容提取出来\n :param filename: 视频文件路径\n :return: 视频文件名,用来拼接\n \"\"\"\n cap = cv2.VideoCapture(filename)\n rate = cap.get(cv2.CAP_PROP_FPS)\n count = 0\n success, frame = cap.read()\n imageCount = 0\n while success:\n success, frame = cap.read()\n count += 1\n if count >= rate:\n if not os.path.exists(stich_path):\n os.mkdir(stich_path)\n shotname, extension = os.path.splitext(filename)\n shotname = shotname.split('\\\\')[len(shotname.split('\\\\')) - 1]\n if not os.path.exists(stich_path + shotname):\n os.mkdir(stich_path + shotname)\n cv2.imencode('.jpg', frame)[1].tofile(stich_path + shotname +\n '\\\\' + str(imageCount) + '.jpg')\n imageCount += 1\n count = 0\n stitcher_image(shotname)\n\n\ndef stitcher_image(shotname):\n \"\"\"\n 使用OpenCV的stitcher进行拼接\n ****需要OpenCV 3.3.0****\n OpenCV 3.3.0以下的版本stitcher不能正确的运行,详情参考 https://github.com/opencv/opencv/issues/6969#issuecomment-326430615\n :param shotname:\n \"\"\"\n imgs = []\n for file in os.listdir(stich_path + shotname):\n imgs.append(cv2.imread(stich_path + shotname + '\\\\' + file))\n stitcher = cv2.createStitcher(False)\n result = stitcher.stitch(imgs)\n cv2.imwrite(stich_path + shotname + '\\\\' + 'stich_result.jpg', result[1])\n\n\ndef read_file_list(path):\n if os.path.isdir(path):\n pathlist = os.listdir(path)\n for file in pathlist:\n read_video(path + '\\\\' + file)\n",
"step-3": "<mask token>\nstich_path = 'stichImage\\\\'\n\n\ndef read_video(filename):\n \"\"\"\n 将视频每秒的内容提取出来\n :param filename: 视频文件路径\n :return: 视频文件名,用来拼接\n \"\"\"\n cap = cv2.VideoCapture(filename)\n rate = cap.get(cv2.CAP_PROP_FPS)\n count = 0\n success, frame = cap.read()\n imageCount = 0\n while success:\n success, frame = cap.read()\n count += 1\n if count >= rate:\n if not os.path.exists(stich_path):\n os.mkdir(stich_path)\n shotname, extension = os.path.splitext(filename)\n shotname = shotname.split('\\\\')[len(shotname.split('\\\\')) - 1]\n if not os.path.exists(stich_path + shotname):\n os.mkdir(stich_path + shotname)\n cv2.imencode('.jpg', frame)[1].tofile(stich_path + shotname +\n '\\\\' + str(imageCount) + '.jpg')\n imageCount += 1\n count = 0\n stitcher_image(shotname)\n\n\ndef stitcher_image(shotname):\n \"\"\"\n 使用OpenCV的stitcher进行拼接\n ****需要OpenCV 3.3.0****\n OpenCV 3.3.0以下的版本stitcher不能正确的运行,详情参考 https://github.com/opencv/opencv/issues/6969#issuecomment-326430615\n :param shotname:\n \"\"\"\n imgs = []\n for file in os.listdir(stich_path + shotname):\n imgs.append(cv2.imread(stich_path + shotname + '\\\\' + file))\n stitcher = cv2.createStitcher(False)\n result = stitcher.stitch(imgs)\n cv2.imwrite(stich_path + shotname + '\\\\' + 'stich_result.jpg', result[1])\n\n\ndef read_file_list(path):\n if os.path.isdir(path):\n pathlist = os.listdir(path)\n for file in pathlist:\n read_video(path + '\\\\' + file)\n",
"step-4": "import cv2\nimport os\n<mask token>\nstich_path = 'stichImage\\\\'\n\n\ndef read_video(filename):\n \"\"\"\n 将视频每秒的内容提取出来\n :param filename: 视频文件路径\n :return: 视频文件名,用来拼接\n \"\"\"\n cap = cv2.VideoCapture(filename)\n rate = cap.get(cv2.CAP_PROP_FPS)\n count = 0\n success, frame = cap.read()\n imageCount = 0\n while success:\n success, frame = cap.read()\n count += 1\n if count >= rate:\n if not os.path.exists(stich_path):\n os.mkdir(stich_path)\n shotname, extension = os.path.splitext(filename)\n shotname = shotname.split('\\\\')[len(shotname.split('\\\\')) - 1]\n if not os.path.exists(stich_path + shotname):\n os.mkdir(stich_path + shotname)\n cv2.imencode('.jpg', frame)[1].tofile(stich_path + shotname +\n '\\\\' + str(imageCount) + '.jpg')\n imageCount += 1\n count = 0\n stitcher_image(shotname)\n\n\ndef stitcher_image(shotname):\n \"\"\"\n 使用OpenCV的stitcher进行拼接\n ****需要OpenCV 3.3.0****\n OpenCV 3.3.0以下的版本stitcher不能正确的运行,详情参考 https://github.com/opencv/opencv/issues/6969#issuecomment-326430615\n :param shotname:\n \"\"\"\n imgs = []\n for file in os.listdir(stich_path + shotname):\n imgs.append(cv2.imread(stich_path + shotname + '\\\\' + file))\n stitcher = cv2.createStitcher(False)\n result = stitcher.stitch(imgs)\n cv2.imwrite(stich_path + shotname + '\\\\' + 'stich_result.jpg', result[1])\n\n\ndef read_file_list(path):\n if os.path.isdir(path):\n pathlist = os.listdir(path)\n for file in pathlist:\n read_video(path + '\\\\' + file)\n",
"step-5": "import cv2\nimport os\n\"\"\"\n视频场景拼接\n\"\"\"\nstich_path=\"stichImage\\\\\"\n\ndef read_video(filename):\n '''\n 将视频每秒的内容提取出来\n :param filename: 视频文件路径\n :return: 视频文件名,用来拼接\n '''\n cap=cv2.VideoCapture(filename)\n rate = cap.get(cv2.CAP_PROP_FPS)\n count=0\n success, frame = cap.read()\n imageCount=0\n while success:\n success, frame = cap.read()\n count+=1\n if count>=rate:\n if not os.path.exists(stich_path):\n os.mkdir(stich_path)\n (shotname, extension)=os.path.splitext(filename)\n shotname=shotname.split('\\\\')[len(shotname.split('\\\\'))-1]\n if not os.path.exists(stich_path+shotname):\n os.mkdir(stich_path+shotname)\n # frame=cv2.resize(frame,(960,544))\n cv2.imencode(\".jpg\", frame)[1].tofile(\n stich_path+shotname+'\\\\'+str(imageCount)+'.jpg')\n imageCount+=1\n count=0\n stitcher_image(shotname)\n\n\ndef stitcher_image(shotname):\n \"\"\"\n 使用OpenCV的stitcher进行拼接\n ****需要OpenCV 3.3.0****\n OpenCV 3.3.0以下的版本stitcher不能正确的运行,详情参考 https://github.com/opencv/opencv/issues/6969#issuecomment-326430615\n :param shotname:\n \"\"\"\n imgs=[]\n for file in os.listdir(stich_path+shotname):\n imgs.append(cv2.imread(stich_path+shotname+'\\\\'+file))\n stitcher = cv2.createStitcher(False)\n result = stitcher.stitch(imgs)\n cv2.imwrite(stich_path+shotname+'\\\\'+\"stich_result.jpg\", result[1])\n\ndef read_file_list(path):\n if os.path.isdir(path):\n pathlist=os.listdir(path)\n for file in pathlist:\n read_video(path+'\\\\'+file)\n\n\n\n# read_video('E:\\\\2.mp4')\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
<|reserved_special_token_0|>
class Encoder(Protocol):
<|reserved_special_token_0|>
class Decoder(Protocol):
def __call__(self, packed: EncodedData, **kwargs) ->Dict[Any, Any]:
...
class DataClassYAMLMixin(DataClassDictMixin):
def to_yaml(self: T, encoder: Encoder=yaml.dump, dict_params: Mapping=
MappingProxyType({}), **encoder_kwargs) ->EncodedData:
return encoder(self.to_dict(**dict(DEFAULT_DICT_PARAMS, **
dict_params)), **encoder_kwargs)
@classmethod
def from_yaml(cls: Type[T], data: EncodedData, decoder: Decoder=yaml.
safe_load, dict_params: Mapping=MappingProxyType({}), **decoder_kwargs
) ->T:
return cls.from_dict(decoder(data, **decoder_kwargs), **dict(
DEFAULT_DICT_PARAMS, **dict_params))
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Encoder(Protocol):
def __call__(self, o, **kwargs) ->EncodedData:
...
class Decoder(Protocol):
def __call__(self, packed: EncodedData, **kwargs) ->Dict[Any, Any]:
...
class DataClassYAMLMixin(DataClassDictMixin):
def to_yaml(self: T, encoder: Encoder=yaml.dump, dict_params: Mapping=
MappingProxyType({}), **encoder_kwargs) ->EncodedData:
return encoder(self.to_dict(**dict(DEFAULT_DICT_PARAMS, **
dict_params)), **encoder_kwargs)
@classmethod
def from_yaml(cls: Type[T], data: EncodedData, decoder: Decoder=yaml.
safe_load, dict_params: Mapping=MappingProxyType({}), **decoder_kwargs
) ->T:
return cls.from_dict(decoder(data, **decoder_kwargs), **dict(
DEFAULT_DICT_PARAMS, **dict_params))
<|reserved_special_token_1|>
<|reserved_special_token_0|>
DEFAULT_DICT_PARAMS = {'use_bytes': False, 'use_enum': False,
'use_datetime': False}
EncodedData = Union[str, bytes]
T = TypeVar('T', bound='DataClassYAMLMixin')
class Encoder(Protocol):
def __call__(self, o, **kwargs) ->EncodedData:
...
class Decoder(Protocol):
def __call__(self, packed: EncodedData, **kwargs) ->Dict[Any, Any]:
...
class DataClassYAMLMixin(DataClassDictMixin):
def to_yaml(self: T, encoder: Encoder=yaml.dump, dict_params: Mapping=
MappingProxyType({}), **encoder_kwargs) ->EncodedData:
return encoder(self.to_dict(**dict(DEFAULT_DICT_PARAMS, **
dict_params)), **encoder_kwargs)
@classmethod
def from_yaml(cls: Type[T], data: EncodedData, decoder: Decoder=yaml.
safe_load, dict_params: Mapping=MappingProxyType({}), **decoder_kwargs
) ->T:
return cls.from_dict(decoder(data, **decoder_kwargs), **dict(
DEFAULT_DICT_PARAMS, **dict_params))
<|reserved_special_token_1|>
from types import MappingProxyType
from typing import Any, Dict, Mapping, Type, TypeVar, Union
import yaml
from typing_extensions import Protocol
from mashumaro.serializer.base import DataClassDictMixin
DEFAULT_DICT_PARAMS = {'use_bytes': False, 'use_enum': False,
'use_datetime': False}
EncodedData = Union[str, bytes]
T = TypeVar('T', bound='DataClassYAMLMixin')
class Encoder(Protocol):
def __call__(self, o, **kwargs) ->EncodedData:
...
class Decoder(Protocol):
def __call__(self, packed: EncodedData, **kwargs) ->Dict[Any, Any]:
...
class DataClassYAMLMixin(DataClassDictMixin):
def to_yaml(self: T, encoder: Encoder=yaml.dump, dict_params: Mapping=
MappingProxyType({}), **encoder_kwargs) ->EncodedData:
return encoder(self.to_dict(**dict(DEFAULT_DICT_PARAMS, **
dict_params)), **encoder_kwargs)
@classmethod
def from_yaml(cls: Type[T], data: EncodedData, decoder: Decoder=yaml.
safe_load, dict_params: Mapping=MappingProxyType({}), **decoder_kwargs
) ->T:
return cls.from_dict(decoder(data, **decoder_kwargs), **dict(
DEFAULT_DICT_PARAMS, **dict_params))
<|reserved_special_token_1|>
from types import MappingProxyType
from typing import Any, Dict, Mapping, Type, TypeVar, Union
import yaml
from typing_extensions import Protocol
from mashumaro.serializer.base import DataClassDictMixin
DEFAULT_DICT_PARAMS = {
"use_bytes": False,
"use_enum": False,
"use_datetime": False,
}
EncodedData = Union[str, bytes]
T = TypeVar("T", bound="DataClassYAMLMixin")
class Encoder(Protocol): # pragma no cover
def __call__(self, o, **kwargs) -> EncodedData:
...
class Decoder(Protocol): # pragma no cover
def __call__(self, packed: EncodedData, **kwargs) -> Dict[Any, Any]:
...
class DataClassYAMLMixin(DataClassDictMixin):
def to_yaml(
self: T,
encoder: Encoder = yaml.dump, # type: ignore
dict_params: Mapping = MappingProxyType({}),
**encoder_kwargs,
) -> EncodedData:
return encoder(
self.to_dict(**dict(DEFAULT_DICT_PARAMS, **dict_params)),
**encoder_kwargs,
)
@classmethod
def from_yaml(
cls: Type[T],
data: EncodedData,
decoder: Decoder = yaml.safe_load, # type: ignore
dict_params: Mapping = MappingProxyType({}),
**decoder_kwargs,
) -> T:
return cls.from_dict(
decoder(data, **decoder_kwargs),
**dict(DEFAULT_DICT_PARAMS, **dict_params),
)
|
flexible
|
{
"blob_id": "15edb1c051ccbc6f927c0a859288511f94a3d853",
"index": 986,
"step-1": "<mask token>\n\n\nclass Encoder(Protocol):\n <mask token>\n\n\nclass Decoder(Protocol):\n\n def __call__(self, packed: EncodedData, **kwargs) ->Dict[Any, Any]:\n ...\n\n\nclass DataClassYAMLMixin(DataClassDictMixin):\n\n def to_yaml(self: T, encoder: Encoder=yaml.dump, dict_params: Mapping=\n MappingProxyType({}), **encoder_kwargs) ->EncodedData:\n return encoder(self.to_dict(**dict(DEFAULT_DICT_PARAMS, **\n dict_params)), **encoder_kwargs)\n\n @classmethod\n def from_yaml(cls: Type[T], data: EncodedData, decoder: Decoder=yaml.\n safe_load, dict_params: Mapping=MappingProxyType({}), **decoder_kwargs\n ) ->T:\n return cls.from_dict(decoder(data, **decoder_kwargs), **dict(\n DEFAULT_DICT_PARAMS, **dict_params))\n",
"step-2": "<mask token>\n\n\nclass Encoder(Protocol):\n\n def __call__(self, o, **kwargs) ->EncodedData:\n ...\n\n\nclass Decoder(Protocol):\n\n def __call__(self, packed: EncodedData, **kwargs) ->Dict[Any, Any]:\n ...\n\n\nclass DataClassYAMLMixin(DataClassDictMixin):\n\n def to_yaml(self: T, encoder: Encoder=yaml.dump, dict_params: Mapping=\n MappingProxyType({}), **encoder_kwargs) ->EncodedData:\n return encoder(self.to_dict(**dict(DEFAULT_DICT_PARAMS, **\n dict_params)), **encoder_kwargs)\n\n @classmethod\n def from_yaml(cls: Type[T], data: EncodedData, decoder: Decoder=yaml.\n safe_load, dict_params: Mapping=MappingProxyType({}), **decoder_kwargs\n ) ->T:\n return cls.from_dict(decoder(data, **decoder_kwargs), **dict(\n DEFAULT_DICT_PARAMS, **dict_params))\n",
"step-3": "<mask token>\nDEFAULT_DICT_PARAMS = {'use_bytes': False, 'use_enum': False,\n 'use_datetime': False}\nEncodedData = Union[str, bytes]\nT = TypeVar('T', bound='DataClassYAMLMixin')\n\n\nclass Encoder(Protocol):\n\n def __call__(self, o, **kwargs) ->EncodedData:\n ...\n\n\nclass Decoder(Protocol):\n\n def __call__(self, packed: EncodedData, **kwargs) ->Dict[Any, Any]:\n ...\n\n\nclass DataClassYAMLMixin(DataClassDictMixin):\n\n def to_yaml(self: T, encoder: Encoder=yaml.dump, dict_params: Mapping=\n MappingProxyType({}), **encoder_kwargs) ->EncodedData:\n return encoder(self.to_dict(**dict(DEFAULT_DICT_PARAMS, **\n dict_params)), **encoder_kwargs)\n\n @classmethod\n def from_yaml(cls: Type[T], data: EncodedData, decoder: Decoder=yaml.\n safe_load, dict_params: Mapping=MappingProxyType({}), **decoder_kwargs\n ) ->T:\n return cls.from_dict(decoder(data, **decoder_kwargs), **dict(\n DEFAULT_DICT_PARAMS, **dict_params))\n",
"step-4": "from types import MappingProxyType\nfrom typing import Any, Dict, Mapping, Type, TypeVar, Union\nimport yaml\nfrom typing_extensions import Protocol\nfrom mashumaro.serializer.base import DataClassDictMixin\nDEFAULT_DICT_PARAMS = {'use_bytes': False, 'use_enum': False,\n 'use_datetime': False}\nEncodedData = Union[str, bytes]\nT = TypeVar('T', bound='DataClassYAMLMixin')\n\n\nclass Encoder(Protocol):\n\n def __call__(self, o, **kwargs) ->EncodedData:\n ...\n\n\nclass Decoder(Protocol):\n\n def __call__(self, packed: EncodedData, **kwargs) ->Dict[Any, Any]:\n ...\n\n\nclass DataClassYAMLMixin(DataClassDictMixin):\n\n def to_yaml(self: T, encoder: Encoder=yaml.dump, dict_params: Mapping=\n MappingProxyType({}), **encoder_kwargs) ->EncodedData:\n return encoder(self.to_dict(**dict(DEFAULT_DICT_PARAMS, **\n dict_params)), **encoder_kwargs)\n\n @classmethod\n def from_yaml(cls: Type[T], data: EncodedData, decoder: Decoder=yaml.\n safe_load, dict_params: Mapping=MappingProxyType({}), **decoder_kwargs\n ) ->T:\n return cls.from_dict(decoder(data, **decoder_kwargs), **dict(\n DEFAULT_DICT_PARAMS, **dict_params))\n",
"step-5": "from types import MappingProxyType\nfrom typing import Any, Dict, Mapping, Type, TypeVar, Union\n\nimport yaml\nfrom typing_extensions import Protocol\n\nfrom mashumaro.serializer.base import DataClassDictMixin\n\nDEFAULT_DICT_PARAMS = {\n \"use_bytes\": False,\n \"use_enum\": False,\n \"use_datetime\": False,\n}\nEncodedData = Union[str, bytes]\nT = TypeVar(\"T\", bound=\"DataClassYAMLMixin\")\n\n\nclass Encoder(Protocol): # pragma no cover\n def __call__(self, o, **kwargs) -> EncodedData:\n ...\n\n\nclass Decoder(Protocol): # pragma no cover\n def __call__(self, packed: EncodedData, **kwargs) -> Dict[Any, Any]:\n ...\n\n\nclass DataClassYAMLMixin(DataClassDictMixin):\n def to_yaml(\n self: T,\n encoder: Encoder = yaml.dump, # type: ignore\n dict_params: Mapping = MappingProxyType({}),\n **encoder_kwargs,\n ) -> EncodedData:\n\n return encoder(\n self.to_dict(**dict(DEFAULT_DICT_PARAMS, **dict_params)),\n **encoder_kwargs,\n )\n\n @classmethod\n def from_yaml(\n cls: Type[T],\n data: EncodedData,\n decoder: Decoder = yaml.safe_load, # type: ignore\n dict_params: Mapping = MappingProxyType({}),\n **decoder_kwargs,\n ) -> T:\n return cls.from_dict(\n decoder(data, **decoder_kwargs),\n **dict(DEFAULT_DICT_PARAMS, **dict_params),\n )\n",
"step-ids": [
6,
7,
8,
9,
10
]
}
|
[
6,
7,
8,
9,
10
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
l.sort()
print('+'.join(l))
<|reserved_special_token_1|>
l = input().split('+')
l.sort()
print('+'.join(l))
<|reserved_special_token_1|>
l = input().split("+")
l.sort()
print('+'.join(l))
|
flexible
|
{
"blob_id": "30d891c18f3635b7419fa0d0539b2665ad60b22c",
"index": 4748,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nl.sort()\nprint('+'.join(l))\n",
"step-3": "l = input().split('+')\nl.sort()\nprint('+'.join(l))\n",
"step-4": "l = input().split(\"+\")\r\r\nl.sort()\r\r\nprint('+'.join(l))\r\r\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
import socket
import threading
#WebSocket Server Address
WS_ADDR = ("127.0.0.1",9876)
def ws_handler(sock,addr):
print 'ws handshaking...'
print 'connected...'
print 'closing...'
def websocket_server():
print 'listening for a WS connection... '
svSock = socket.socket()
svSock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR,1)
svSock.bind(WS_ADDR)
svSock.listen(5)
while (1):
wSock,wAddr = svSock.accept()
print 'accepted!'
threading.Thread(target=ws_handler,args=(wSock,wAddr)).start()
# a new listen thread
def listen_ws():
threading.Thread(target=websocket_server()).start()
|
normal
|
{
"blob_id": "668fe3d561d94be73f2f721fac89e9e25005769b",
"index": 2652,
"step-1": "import socket\nimport threading\n\n#WebSocket Server Address\nWS_ADDR = (\"127.0.0.1\",9876)\n\n\ndef ws_handler(sock,addr):\n print 'ws handshaking...'\n print 'connected...'\n print 'closing...'\n\n\ndef websocket_server():\n print 'listening for a WS connection... '\n svSock = socket.socket()\n svSock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR,1)\n svSock.bind(WS_ADDR)\n svSock.listen(5)\n while (1):\n wSock,wAddr = svSock.accept()\n print 'accepted!'\n threading.Thread(target=ws_handler,args=(wSock,wAddr)).start()\n\n\n# a new listen thread\ndef listen_ws():\n threading.Thread(target=websocket_server()).start()",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
import datetime
import shutil
from pathlib import Path
from jinja2 import Environment, FileSystemLoader
from dataclasses import dataclass
PATH_TO_TEMPLATES = Path('TEMPLATES/')
PATH_TO_RESOURCES = Path('RESOURCES/')
PATH_TO_OUTPUT = Path('../docs/')
URL_ROOT = "https://katys.cz/"
link_to_homepage = "/" # TODO: always / in production
html_file_suffix = ".html"
@dataclass()
class Page(object):
title: str
keywords: str
description: str
content_file: str
url: str
language: str
last_mod: datetime.datetime
phone: str = '+420 603 217 867'
email: str = 'katys@katys.cz'
def keys(self):
"""Get keys that allows conversion of this class to dictionary.
Returns:
List[str]: List of the keys to be passed to template.
"""
return ['title', 'keywords', 'description', 'url', 'content_file',
'language', 'phone', 'email']
def __getitem__(self, key):
"""Allows conversion of this class to dictionary.
"""
return getattr(self, key)
def generate_site(self):
with open(PATH_TO_TEMPLATES.joinpath('page.html')) as tem_han:
template = Environment(
loader=FileSystemLoader(PATH_TO_TEMPLATES)
).from_string(tem_han.read())
html_str = template.render(
**dict(self),
link_to_homepage=link_to_homepage
)
return html_str
@property
def absolute_url(self):
if self.url != 'index':
return URL_ROOT + self.url + html_file_suffix
return URL_ROOT
@property
def last_modified(self):
if self.last_mod is None:
return None
return self.last_mod.strftime('%Y-%m-%d')
unified_description = "Vyrábíme atypický nábytek dle návrhů vytvořených zákazníkem, bytovým designérem nebo námi, dále kuchyně na míru, interiérové dveře, schodiště a další."
unified_keywords = "Katys, Truhlářství, Nábytek, Dřevovýroba, Liberec"
pages = [
Page(title="Domů",
keywords=unified_keywords,
description=unified_description,
url="index",
content_file='page_home.html',
language="cs",
last_mod=datetime.datetime(2020, 12, 17)
),
Page(title="Reference",
keywords=unified_keywords,
description=unified_description,
url="reference",
content_file='page_reference.html',
language="cs",
last_mod=datetime.datetime(2020, 12, 17)
),
*(
Page(title="Okna",
keywords=unified_keywords,
description=unified_description,
url="okna",
content_file='page_okna.html',
language="cs",
last_mod=datetime.datetime(2020, 12, 17)
),
Page(title="Vchodové dveře",
keywords=unified_keywords,
description=unified_description,
url="vchodove-dvere",
content_file='page_vchodove_dvere.html',
language="cs",
last_mod=datetime.datetime(2020, 12, 17)
),
Page(title="Interiérové dveře",
keywords=unified_keywords,
description=unified_description,
url="interierove-dvere",
content_file='page_interierove_dvere.html',
language="cs",
last_mod=datetime.datetime(2020, 12, 17)
),
Page(title="Zimní zahrady",
keywords=unified_keywords,
description=unified_description,
url="zimni-zahrady",
content_file='page_zimni_zahrady.html',
language="cs",
last_mod=datetime.datetime(2020, 12, 17)
),
Page(title="Interiéry",
keywords=unified_keywords,
description=unified_description,
url="interiery",
content_file='page_interiery.html',
language="cs",
last_mod=datetime.datetime(2020, 12, 17)
),
Page(title="Kuchyně",
keywords=unified_keywords,
description=unified_description,
url="kuchyne",
content_file='page_kuchyne.html',
language="cs",
last_mod=datetime.datetime(2020, 12, 17)
),
Page(title="Nábytek",
keywords=unified_keywords,
description=unified_description,
url="nabytek",
content_file='page_nabytek.html',
language="cs",
last_mod=datetime.datetime(2020, 12, 17)
),
Page(title="Stavební truhlářství",
keywords=unified_keywords,
description=unified_description,
url="stavebni-truhlarstvi",
content_file='page_stavebni_truhlarstvi.html',
language="cs",
last_mod=datetime.datetime(2020, 12, 17)
),
Page(title="Stoly a židle",
keywords=unified_keywords,
description=unified_description,
url="stoly-a-zidle",
content_file='page_stoly_a_zidle.html',
language="cs",
last_mod=datetime.datetime(2020, 12, 17)
),
),
Page(title="Zelená úsporám",
keywords=unified_keywords,
description=unified_description,
url="zelena-usporam",
content_file='page_zelena_usporam.html',
language="cs",
last_mod=datetime.datetime(2020, 12, 17)
),
Page(title="Fotogalerie",
keywords=unified_keywords,
description=unified_description,
url="fotogalerie",
content_file='page_fotogalerie.html',
language="cs",
last_mod=datetime.datetime(2020, 12, 17)
),
Page(title="Certifikáty",
keywords=unified_keywords,
description=unified_description,
url="certifikaty",
content_file='page_certifikaty.html',
language="cs",
last_mod=datetime.datetime(2020, 12, 17)
),
Page(title="Kontakt",
keywords=unified_keywords,
description=unified_description,
url="kontakt",
content_file='page_kontakt.html',
language="cs",
last_mod=datetime.datetime(2020, 12, 17)
)
]
# Remove all existing resources
if PATH_TO_OUTPUT.exists():
shutil.rmtree(PATH_TO_OUTPUT)
# Create new dir
PATH_TO_OUTPUT.mkdir()
for page in pages:
content = page.generate_site()
with PATH_TO_OUTPUT.joinpath(page.url + html_file_suffix).open('w') as fp:
fp.write(content)
# Copy resources
shutil.copytree(PATH_TO_RESOURCES, PATH_TO_OUTPUT, dirs_exist_ok=True)
# Generate resource map:
with open(PATH_TO_TEMPLATES.joinpath('site_map.xml')) as tem_han:
template = Environment(
loader=FileSystemLoader(PATH_TO_TEMPLATES)
).from_string(tem_han.read())
html_str = template.render(
sites=pages
)
with PATH_TO_OUTPUT.joinpath('sitemap.xml').open('w') as f_xml:
f_xml.write(html_str)
robots_txt_content = f"""User-agent: *
Allow: /
Sitemap: {URL_ROOT}sitemap.xml"""
with PATH_TO_OUTPUT.joinpath('robots.txt').open('w') as robots_txt_h:
robots_txt_h.write(robots_txt_content)
|
normal
|
{
"blob_id": "5cc18af40befab444df44bf3da1f0175e5d18983",
"index": 8206,
"step-1": "<mask token>\n\n\n@dataclass()\nclass Page(object):\n title: str\n keywords: str\n description: str\n content_file: str\n url: str\n language: str\n last_mod: datetime.datetime\n phone: str = '+420 603 217 867'\n email: str = 'katys@katys.cz'\n <mask token>\n\n def __getitem__(self, key):\n \"\"\"Allows conversion of this class to dictionary.\n \"\"\"\n return getattr(self, key)\n <mask token>\n <mask token>\n\n @property\n def last_modified(self):\n if self.last_mod is None:\n return None\n return self.last_mod.strftime('%Y-%m-%d')\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\n@dataclass()\nclass Page(object):\n title: str\n keywords: str\n description: str\n content_file: str\n url: str\n language: str\n last_mod: datetime.datetime\n phone: str = '+420 603 217 867'\n email: str = 'katys@katys.cz'\n <mask token>\n\n def __getitem__(self, key):\n \"\"\"Allows conversion of this class to dictionary.\n \"\"\"\n return getattr(self, key)\n\n def generate_site(self):\n with open(PATH_TO_TEMPLATES.joinpath('page.html')) as tem_han:\n template = Environment(loader=FileSystemLoader(PATH_TO_TEMPLATES)\n ).from_string(tem_han.read())\n html_str = template.render(**dict(self), link_to_homepage=\n link_to_homepage)\n return html_str\n\n @property\n def absolute_url(self):\n if self.url != 'index':\n return URL_ROOT + self.url + html_file_suffix\n return URL_ROOT\n\n @property\n def last_modified(self):\n if self.last_mod is None:\n return None\n return self.last_mod.strftime('%Y-%m-%d')\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\n@dataclass()\nclass Page(object):\n title: str\n keywords: str\n description: str\n content_file: str\n url: str\n language: str\n last_mod: datetime.datetime\n phone: str = '+420 603 217 867'\n email: str = 'katys@katys.cz'\n\n def keys(self):\n \"\"\"Get keys that allows conversion of this class to dictionary.\n\n Returns:\n List[str]: List of the keys to be passed to template.\n \"\"\"\n return ['title', 'keywords', 'description', 'url', 'content_file',\n 'language', 'phone', 'email']\n\n def __getitem__(self, key):\n \"\"\"Allows conversion of this class to dictionary.\n \"\"\"\n return getattr(self, key)\n\n def generate_site(self):\n with open(PATH_TO_TEMPLATES.joinpath('page.html')) as tem_han:\n template = Environment(loader=FileSystemLoader(PATH_TO_TEMPLATES)\n ).from_string(tem_han.read())\n html_str = template.render(**dict(self), link_to_homepage=\n link_to_homepage)\n return html_str\n\n @property\n def absolute_url(self):\n if self.url != 'index':\n return URL_ROOT + self.url + html_file_suffix\n return URL_ROOT\n\n @property\n def last_modified(self):\n if self.last_mod is None:\n return None\n return self.last_mod.strftime('%Y-%m-%d')\n\n\n<mask token>\n",
"step-4": "import datetime\nimport shutil\nfrom pathlib import Path\nfrom jinja2 import Environment, FileSystemLoader\nfrom dataclasses import dataclass\nPATH_TO_TEMPLATES = Path('TEMPLATES/')\nPATH_TO_RESOURCES = Path('RESOURCES/')\nPATH_TO_OUTPUT = Path('../docs/')\nURL_ROOT = 'https://katys.cz/'\nlink_to_homepage = '/'\nhtml_file_suffix = '.html'\n\n\n@dataclass()\nclass Page(object):\n title: str\n keywords: str\n description: str\n content_file: str\n url: str\n language: str\n last_mod: datetime.datetime\n phone: str = '+420 603 217 867'\n email: str = 'katys@katys.cz'\n\n def keys(self):\n \"\"\"Get keys that allows conversion of this class to dictionary.\n\n Returns:\n List[str]: List of the keys to be passed to template.\n \"\"\"\n return ['title', 'keywords', 'description', 'url', 'content_file',\n 'language', 'phone', 'email']\n\n def __getitem__(self, key):\n \"\"\"Allows conversion of this class to dictionary.\n \"\"\"\n return getattr(self, key)\n\n def generate_site(self):\n with open(PATH_TO_TEMPLATES.joinpath('page.html')) as tem_han:\n template = Environment(loader=FileSystemLoader(PATH_TO_TEMPLATES)\n ).from_string(tem_han.read())\n html_str = template.render(**dict(self), link_to_homepage=\n link_to_homepage)\n return html_str\n\n @property\n def absolute_url(self):\n if self.url != 'index':\n return URL_ROOT + self.url + html_file_suffix\n return URL_ROOT\n\n @property\n def last_modified(self):\n if self.last_mod is None:\n return None\n return self.last_mod.strftime('%Y-%m-%d')\n\n\nunified_description = (\n 'Vyrábíme atypický nábytek dle návrhů vytvořených zákazníkem, bytovým designérem nebo námi, dále kuchyně na míru, interiérové dveře, schodiště a další.'\n )\nunified_keywords = 'Katys, Truhlářství, Nábytek, Dřevovýroba, Liberec'\npages = [Page(title='Domů', keywords=unified_keywords, description=\n unified_description, url='index', content_file='page_home.html',\n language='cs', last_mod=datetime.datetime(2020, 12, 17)), Page(title=\n 'Reference', keywords=unified_keywords, description=unified_description,\n url='reference', content_file='page_reference.html', language='cs',\n last_mod=datetime.datetime(2020, 12, 17)), *(Page(title='Okna',\n keywords=unified_keywords, description=unified_description, url='okna',\n content_file='page_okna.html', language='cs', last_mod=datetime.\n datetime(2020, 12, 17)), Page(title='Vchodové dveře', keywords=\n unified_keywords, description=unified_description, url='vchodove-dvere',\n content_file='page_vchodove_dvere.html', language='cs', last_mod=\n datetime.datetime(2020, 12, 17)), Page(title='Interiérové dveře',\n keywords=unified_keywords, description=unified_description, url=\n 'interierove-dvere', content_file='page_interierove_dvere.html',\n language='cs', last_mod=datetime.datetime(2020, 12, 17)), Page(title=\n 'Zimní zahrady', keywords=unified_keywords, description=\n unified_description, url='zimni-zahrady', content_file=\n 'page_zimni_zahrady.html', language='cs', last_mod=datetime.datetime(\n 2020, 12, 17)), Page(title='Interiéry', keywords=unified_keywords,\n description=unified_description, url='interiery', content_file=\n 'page_interiery.html', language='cs', last_mod=datetime.datetime(2020, \n 12, 17)), Page(title='Kuchyně', keywords=unified_keywords, description=\n unified_description, url='kuchyne', content_file='page_kuchyne.html',\n language='cs', last_mod=datetime.datetime(2020, 12, 17)), Page(title=\n 'Nábytek', keywords=unified_keywords, description=unified_description,\n url='nabytek', content_file='page_nabytek.html', language='cs',\n last_mod=datetime.datetime(2020, 12, 17)), Page(title=\n 'Stavební truhlářství', keywords=unified_keywords, description=\n unified_description, url='stavebni-truhlarstvi', content_file=\n 'page_stavebni_truhlarstvi.html', language='cs', last_mod=datetime.\n datetime(2020, 12, 17)), Page(title='Stoly a židle', keywords=\n unified_keywords, description=unified_description, url='stoly-a-zidle',\n content_file='page_stoly_a_zidle.html', language='cs', last_mod=\n datetime.datetime(2020, 12, 17))), Page(title='Zelená úsporám',\n keywords=unified_keywords, description=unified_description, url=\n 'zelena-usporam', content_file='page_zelena_usporam.html', language=\n 'cs', last_mod=datetime.datetime(2020, 12, 17)), Page(title=\n 'Fotogalerie', keywords=unified_keywords, description=\n unified_description, url='fotogalerie', content_file=\n 'page_fotogalerie.html', language='cs', last_mod=datetime.datetime(2020,\n 12, 17)), Page(title='Certifikáty', keywords=unified_keywords,\n description=unified_description, url='certifikaty', content_file=\n 'page_certifikaty.html', language='cs', last_mod=datetime.datetime(2020,\n 12, 17)), Page(title='Kontakt', keywords=unified_keywords, description=\n unified_description, url='kontakt', content_file='page_kontakt.html',\n language='cs', last_mod=datetime.datetime(2020, 12, 17))]\nif PATH_TO_OUTPUT.exists():\n shutil.rmtree(PATH_TO_OUTPUT)\nPATH_TO_OUTPUT.mkdir()\nfor page in pages:\n content = page.generate_site()\n with PATH_TO_OUTPUT.joinpath(page.url + html_file_suffix).open('w') as fp:\n fp.write(content)\nshutil.copytree(PATH_TO_RESOURCES, PATH_TO_OUTPUT, dirs_exist_ok=True)\nwith open(PATH_TO_TEMPLATES.joinpath('site_map.xml')) as tem_han:\n template = Environment(loader=FileSystemLoader(PATH_TO_TEMPLATES)\n ).from_string(tem_han.read())\n html_str = template.render(sites=pages)\n with PATH_TO_OUTPUT.joinpath('sitemap.xml').open('w') as f_xml:\n f_xml.write(html_str)\nrobots_txt_content = f\"\"\"User-agent: *\nAllow: /\nSitemap: {URL_ROOT}sitemap.xml\"\"\"\nwith PATH_TO_OUTPUT.joinpath('robots.txt').open('w') as robots_txt_h:\n robots_txt_h.write(robots_txt_content)\n",
"step-5": "import datetime\nimport shutil\nfrom pathlib import Path\nfrom jinja2 import Environment, FileSystemLoader\n\nfrom dataclasses import dataclass\n\nPATH_TO_TEMPLATES = Path('TEMPLATES/')\nPATH_TO_RESOURCES = Path('RESOURCES/')\nPATH_TO_OUTPUT = Path('../docs/')\nURL_ROOT = \"https://katys.cz/\"\n\nlink_to_homepage = \"/\" # TODO: always / in production\nhtml_file_suffix = \".html\"\n\n\n@dataclass()\nclass Page(object):\n title: str\n keywords: str\n description: str\n content_file: str\n url: str\n language: str\n last_mod: datetime.datetime\n phone: str = '+420 603 217 867'\n email: str = 'katys@katys.cz'\n\n def keys(self):\n \"\"\"Get keys that allows conversion of this class to dictionary.\n\n Returns:\n List[str]: List of the keys to be passed to template.\n \"\"\"\n return ['title', 'keywords', 'description', 'url', 'content_file',\n 'language', 'phone', 'email']\n\n def __getitem__(self, key):\n \"\"\"Allows conversion of this class to dictionary.\n \"\"\"\n return getattr(self, key)\n\n def generate_site(self):\n with open(PATH_TO_TEMPLATES.joinpath('page.html')) as tem_han:\n template = Environment(\n loader=FileSystemLoader(PATH_TO_TEMPLATES)\n ).from_string(tem_han.read())\n html_str = template.render(\n **dict(self),\n link_to_homepage=link_to_homepage\n )\n return html_str\n\n @property\n def absolute_url(self):\n if self.url != 'index':\n return URL_ROOT + self.url + html_file_suffix\n return URL_ROOT\n\n @property\n def last_modified(self):\n if self.last_mod is None:\n return None\n return self.last_mod.strftime('%Y-%m-%d')\n\n\nunified_description = \"Vyrábíme atypický nábytek dle návrhů vytvořených zákazníkem, bytovým designérem nebo námi, dále kuchyně na míru, interiérové dveře, schodiště a další.\"\nunified_keywords = \"Katys, Truhlářství, Nábytek, Dřevovýroba, Liberec\"\n\npages = [\n Page(title=\"Domů\",\n keywords=unified_keywords,\n description=unified_description,\n url=\"index\",\n content_file='page_home.html',\n language=\"cs\",\n last_mod=datetime.datetime(2020, 12, 17)\n ),\n Page(title=\"Reference\",\n keywords=unified_keywords,\n description=unified_description,\n url=\"reference\",\n content_file='page_reference.html',\n language=\"cs\",\n last_mod=datetime.datetime(2020, 12, 17)\n ),\n *(\n Page(title=\"Okna\",\n keywords=unified_keywords,\n description=unified_description,\n url=\"okna\",\n content_file='page_okna.html',\n language=\"cs\",\n last_mod=datetime.datetime(2020, 12, 17)\n ),\n Page(title=\"Vchodové dveře\",\n keywords=unified_keywords,\n description=unified_description,\n url=\"vchodove-dvere\",\n content_file='page_vchodove_dvere.html',\n language=\"cs\",\n last_mod=datetime.datetime(2020, 12, 17)\n ),\n Page(title=\"Interiérové dveře\",\n keywords=unified_keywords,\n description=unified_description,\n url=\"interierove-dvere\",\n content_file='page_interierove_dvere.html',\n language=\"cs\",\n last_mod=datetime.datetime(2020, 12, 17)\n ),\n Page(title=\"Zimní zahrady\",\n keywords=unified_keywords,\n description=unified_description,\n url=\"zimni-zahrady\",\n content_file='page_zimni_zahrady.html',\n language=\"cs\",\n last_mod=datetime.datetime(2020, 12, 17)\n ),\n Page(title=\"Interiéry\",\n keywords=unified_keywords,\n description=unified_description,\n url=\"interiery\",\n content_file='page_interiery.html',\n language=\"cs\",\n last_mod=datetime.datetime(2020, 12, 17)\n ),\n Page(title=\"Kuchyně\",\n keywords=unified_keywords,\n description=unified_description,\n url=\"kuchyne\",\n content_file='page_kuchyne.html',\n language=\"cs\",\n last_mod=datetime.datetime(2020, 12, 17)\n ),\n Page(title=\"Nábytek\",\n keywords=unified_keywords,\n description=unified_description,\n url=\"nabytek\",\n content_file='page_nabytek.html',\n language=\"cs\",\n last_mod=datetime.datetime(2020, 12, 17)\n ),\n Page(title=\"Stavební truhlářství\",\n keywords=unified_keywords,\n description=unified_description,\n url=\"stavebni-truhlarstvi\",\n content_file='page_stavebni_truhlarstvi.html',\n language=\"cs\",\n last_mod=datetime.datetime(2020, 12, 17)\n ),\n Page(title=\"Stoly a židle\",\n keywords=unified_keywords,\n description=unified_description,\n url=\"stoly-a-zidle\",\n content_file='page_stoly_a_zidle.html',\n language=\"cs\",\n last_mod=datetime.datetime(2020, 12, 17)\n ),\n ),\n Page(title=\"Zelená úsporám\",\n keywords=unified_keywords,\n description=unified_description,\n url=\"zelena-usporam\",\n content_file='page_zelena_usporam.html',\n language=\"cs\",\n last_mod=datetime.datetime(2020, 12, 17)\n ),\n Page(title=\"Fotogalerie\",\n keywords=unified_keywords,\n description=unified_description,\n url=\"fotogalerie\",\n content_file='page_fotogalerie.html',\n language=\"cs\",\n last_mod=datetime.datetime(2020, 12, 17)\n ),\n Page(title=\"Certifikáty\",\n keywords=unified_keywords,\n description=unified_description,\n url=\"certifikaty\",\n content_file='page_certifikaty.html',\n language=\"cs\",\n last_mod=datetime.datetime(2020, 12, 17)\n ),\n Page(title=\"Kontakt\",\n keywords=unified_keywords,\n description=unified_description,\n url=\"kontakt\",\n content_file='page_kontakt.html',\n language=\"cs\",\n last_mod=datetime.datetime(2020, 12, 17)\n )\n]\n\n# Remove all existing resources\nif PATH_TO_OUTPUT.exists():\n shutil.rmtree(PATH_TO_OUTPUT)\n\n# Create new dir\nPATH_TO_OUTPUT.mkdir()\n\nfor page in pages:\n content = page.generate_site()\n with PATH_TO_OUTPUT.joinpath(page.url + html_file_suffix).open('w') as fp:\n fp.write(content)\n\n# Copy resources\nshutil.copytree(PATH_TO_RESOURCES, PATH_TO_OUTPUT, dirs_exist_ok=True)\n\n# Generate resource map:\nwith open(PATH_TO_TEMPLATES.joinpath('site_map.xml')) as tem_han:\n template = Environment(\n loader=FileSystemLoader(PATH_TO_TEMPLATES)\n ).from_string(tem_han.read())\n html_str = template.render(\n sites=pages\n )\n with PATH_TO_OUTPUT.joinpath('sitemap.xml').open('w') as f_xml:\n f_xml.write(html_str)\n\nrobots_txt_content = f\"\"\"User-agent: *\nAllow: /\nSitemap: {URL_ROOT}sitemap.xml\"\"\"\nwith PATH_TO_OUTPUT.joinpath('robots.txt').open('w') as robots_txt_h:\n robots_txt_h.write(robots_txt_content)\n",
"step-ids": [
3,
5,
6,
9,
10
]
}
|
[
3,
5,
6,
9,
10
] |
<|reserved_special_token_0|>
class LocationDetail(generics.RetrieveUpdateDestroyAPIView):
queryset = Location.objects.all()
serializer_class = LocationSerializer
name = 'location'
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class LocationList(generics.ListCreateAPIView):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
class LocationDetail(generics.RetrieveUpdateDestroyAPIView):
queryset = Location.objects.all()
serializer_class = LocationSerializer
name = 'location'
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class LocationList(generics.ListCreateAPIView):
queryset = Location.objects.all()
serializer_class = LocationSerializer
name = 'location-list'
class LocationDetail(generics.RetrieveUpdateDestroyAPIView):
queryset = Location.objects.all()
serializer_class = LocationSerializer
name = 'location'
<|reserved_special_token_1|>
from rest_framework import generics
from animals.models import Location
from animals.serializers import LocationSerializer
class LocationList(generics.ListCreateAPIView):
queryset = Location.objects.all()
serializer_class = LocationSerializer
name = 'location-list'
class LocationDetail(generics.RetrieveUpdateDestroyAPIView):
queryset = Location.objects.all()
serializer_class = LocationSerializer
name = 'location'
|
flexible
|
{
"blob_id": "245e407c9e92b3ac34389a48fcef4fc1b349ea18",
"index": 8252,
"step-1": "<mask token>\n\n\nclass LocationDetail(generics.RetrieveUpdateDestroyAPIView):\n queryset = Location.objects.all()\n serializer_class = LocationSerializer\n name = 'location'\n",
"step-2": "<mask token>\n\n\nclass LocationList(generics.ListCreateAPIView):\n <mask token>\n <mask token>\n <mask token>\n\n\nclass LocationDetail(generics.RetrieveUpdateDestroyAPIView):\n queryset = Location.objects.all()\n serializer_class = LocationSerializer\n name = 'location'\n",
"step-3": "<mask token>\n\n\nclass LocationList(generics.ListCreateAPIView):\n queryset = Location.objects.all()\n serializer_class = LocationSerializer\n name = 'location-list'\n\n\nclass LocationDetail(generics.RetrieveUpdateDestroyAPIView):\n queryset = Location.objects.all()\n serializer_class = LocationSerializer\n name = 'location'\n",
"step-4": "from rest_framework import generics\nfrom animals.models import Location\nfrom animals.serializers import LocationSerializer\n\n\nclass LocationList(generics.ListCreateAPIView):\n queryset = Location.objects.all()\n serializer_class = LocationSerializer\n name = 'location-list'\n\n\nclass LocationDetail(generics.RetrieveUpdateDestroyAPIView):\n queryset = Location.objects.all()\n serializer_class = LocationSerializer\n name = 'location'\n",
"step-5": null,
"step-ids": [
2,
3,
4,
5
]
}
|
[
2,
3,
4,
5
] |
# coding: utf-8
from datetime import datetime
#from __future__ import unicode_literals
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
from django.utils import timezone
from persol_users.models import PersolUser
from django.db.models import Q, Count
# アンケート
from questions.models import Question
@python_2_unicode_compatible
class Person(models.Model):
name = models.CharField(max_length=50)
pass
def __str__(self):
return self.name
class Event(models.Model):
STATUS_CHOICES = (
("N","募集中"),
("E","募集終了")
)
author = models.ForeignKey(PersolUser, verbose_name='作成者', related_name='author')
event_name = models.CharField('イベントタイトル', max_length=200)
event_image = models.ImageField('イメージ画像', upload_to='event_image', blank=True, null=True)
event_datetime = models.DateTimeField('開催日時', null=True)
event_location = models.CharField('開催場所', max_length=200, blank=True)
num_of_members = models.IntegerField('募集人数')
dead_line = models.DateField('募集締切日', blank=True,null=True)
overview = models.TextField('イベント概要')
# comment = models.ManyToManyField(Comment)
like = models.ManyToManyField(PersolUser,verbose_name='いいね', related_name='like')
watch = models.ManyToManyField(PersolUser,verbose_name='ウォッチ', related_name='Watch')
members = models.ManyToManyField(PersolUser)
search_tag = models.TextField('検索用タグ', blank=True, null=True)
event_status = models.CharField('イベントステータス', max_length=1, choices=STATUS_CHOICES, blank=False, null=False, default='N')
# アンケート
question_date = models.OneToOneField(Question, related_name='event_date', blank=True, null=True)
question_location = models.OneToOneField(Question, related_name='event_location', blank=True, null=True)
def __str__(self):
return self.event_name
def nokori(self):
now_member = self.members.count()
return self.num_of_members - now_member
def like_list(self):
return self.like.all()
def event_date(self):
try:
return self.event_datetime.strftime('%Y.%m.%d')
except AttributeError:
return ""
def event_starttime(self):
try:
return self.event_datetime.strftime('%H:%M~')
except AttributeError:
return ""
def nobreak_overview(self):
return self.overview.replace("\n", "")
# アンケート削除
def question_delete(self, type):
if type == 'd':
q = self.question_date
self.question_date = None
elif type == 'l':
q = self.question_location
self.question_location = None
if q:
q.delete()
# アンケート取得。なければデフォルト値のダミーアンケートを返す
def question_date_or_dummy(self):
qd = self.question_date
if not qd:
qd = Question.get_default_question('d')
return qd
def question_location_or_dummy(self):
ql = self.question_location
if not ql:
ql = Question.get_default_question('l')
return ql
def mailing_list(self):
member_addr=[member.mail_address for member in self.members.all()]
watcher_addr=[watcher.mail_address for watcher in self.watch.all()]
ml=member_addr+watcher_addr
return ml
def status(self):
if self.event_status == "N": return "募集中"
if self.event_status == "E": return "イベント終了"
else:return ""
def datetimeForIndex(self):
if self.event_datetime:
return self.event_datetime
if not self.question_date:
return "未定"
else:
return "アンケート中"
def locationForIndex(self):
if self.event_location:
return self.event_location
if not self.question_location:
return "未定"
else:
return "アンケート中"
def oldstatus(self):
if self.event_datetime < datetime.now():
return 'old'
else:
return ''
"""
python manage.py makemigrations
python manage.py migrate
"""
|
normal
|
{
"blob_id": "ca0bca24509df2bf0bd07fb2f31d3e7909957405",
"index": 3483,
"step-1": "<mask token>\n\n\nclass Event(models.Model):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def __str__(self):\n return self.event_name\n <mask token>\n\n def like_list(self):\n return self.like.all()\n\n def event_date(self):\n try:\n return self.event_datetime.strftime('%Y.%m.%d')\n except AttributeError:\n return ''\n <mask token>\n\n def nobreak_overview(self):\n return self.overview.replace('\\n', '')\n <mask token>\n\n def question_date_or_dummy(self):\n qd = self.question_date\n if not qd:\n qd = Question.get_default_question('d')\n return qd\n\n def question_location_or_dummy(self):\n ql = self.question_location\n if not ql:\n ql = Question.get_default_question('l')\n return ql\n\n def mailing_list(self):\n member_addr = [member.mail_address for member in self.members.all()]\n watcher_addr = [watcher.mail_address for watcher in self.watch.all()]\n ml = member_addr + watcher_addr\n return ml\n\n def status(self):\n if self.event_status == 'N':\n return '募集中'\n if self.event_status == 'E':\n return 'イベント終了'\n else:\n return ''\n\n def datetimeForIndex(self):\n if self.event_datetime:\n return self.event_datetime\n if not self.question_date:\n return '未定'\n else:\n return 'アンケート中'\n\n def locationForIndex(self):\n if self.event_location:\n return self.event_location\n if not self.question_location:\n return '未定'\n else:\n return 'アンケート中'\n <mask token>\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass Event(models.Model):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def __str__(self):\n return self.event_name\n\n def nokori(self):\n now_member = self.members.count()\n return self.num_of_members - now_member\n\n def like_list(self):\n return self.like.all()\n\n def event_date(self):\n try:\n return self.event_datetime.strftime('%Y.%m.%d')\n except AttributeError:\n return ''\n <mask token>\n\n def nobreak_overview(self):\n return self.overview.replace('\\n', '')\n <mask token>\n\n def question_date_or_dummy(self):\n qd = self.question_date\n if not qd:\n qd = Question.get_default_question('d')\n return qd\n\n def question_location_or_dummy(self):\n ql = self.question_location\n if not ql:\n ql = Question.get_default_question('l')\n return ql\n\n def mailing_list(self):\n member_addr = [member.mail_address for member in self.members.all()]\n watcher_addr = [watcher.mail_address for watcher in self.watch.all()]\n ml = member_addr + watcher_addr\n return ml\n\n def status(self):\n if self.event_status == 'N':\n return '募集中'\n if self.event_status == 'E':\n return 'イベント終了'\n else:\n return ''\n\n def datetimeForIndex(self):\n if self.event_datetime:\n return self.event_datetime\n if not self.question_date:\n return '未定'\n else:\n return 'アンケート中'\n\n def locationForIndex(self):\n if self.event_location:\n return self.event_location\n if not self.question_location:\n return '未定'\n else:\n return 'アンケート中'\n\n def oldstatus(self):\n if self.event_datetime < datetime.now():\n return 'old'\n else:\n return ''\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass Event(models.Model):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def __str__(self):\n return self.event_name\n\n def nokori(self):\n now_member = self.members.count()\n return self.num_of_members - now_member\n\n def like_list(self):\n return self.like.all()\n\n def event_date(self):\n try:\n return self.event_datetime.strftime('%Y.%m.%d')\n except AttributeError:\n return ''\n\n def event_starttime(self):\n try:\n return self.event_datetime.strftime('%H:%M~')\n except AttributeError:\n return ''\n\n def nobreak_overview(self):\n return self.overview.replace('\\n', '')\n\n def question_delete(self, type):\n if type == 'd':\n q = self.question_date\n self.question_date = None\n elif type == 'l':\n q = self.question_location\n self.question_location = None\n if q:\n q.delete()\n\n def question_date_or_dummy(self):\n qd = self.question_date\n if not qd:\n qd = Question.get_default_question('d')\n return qd\n\n def question_location_or_dummy(self):\n ql = self.question_location\n if not ql:\n ql = Question.get_default_question('l')\n return ql\n\n def mailing_list(self):\n member_addr = [member.mail_address for member in self.members.all()]\n watcher_addr = [watcher.mail_address for watcher in self.watch.all()]\n ml = member_addr + watcher_addr\n return ml\n\n def status(self):\n if self.event_status == 'N':\n return '募集中'\n if self.event_status == 'E':\n return 'イベント終了'\n else:\n return ''\n\n def datetimeForIndex(self):\n if self.event_datetime:\n return self.event_datetime\n if not self.question_date:\n return '未定'\n else:\n return 'アンケート中'\n\n def locationForIndex(self):\n if self.event_location:\n return self.event_location\n if not self.question_location:\n return '未定'\n else:\n return 'アンケート中'\n\n def oldstatus(self):\n if self.event_datetime < datetime.now():\n return 'old'\n else:\n return ''\n\n\n<mask token>\n",
"step-4": "<mask token>\n\n\nclass Event(models.Model):\n STATUS_CHOICES = ('N', '募集中'), ('E', '募集終了')\n author = models.ForeignKey(PersolUser, verbose_name='作成者', related_name\n ='author')\n event_name = models.CharField('イベントタイトル', max_length=200)\n event_image = models.ImageField('イメージ画像', upload_to='event_image',\n blank=True, null=True)\n event_datetime = models.DateTimeField('開催日時', null=True)\n event_location = models.CharField('開催場所', max_length=200, blank=True)\n num_of_members = models.IntegerField('募集人数')\n dead_line = models.DateField('募集締切日', blank=True, null=True)\n overview = models.TextField('イベント概要')\n like = models.ManyToManyField(PersolUser, verbose_name='いいね',\n related_name='like')\n watch = models.ManyToManyField(PersolUser, verbose_name='ウォッチ',\n related_name='Watch')\n members = models.ManyToManyField(PersolUser)\n search_tag = models.TextField('検索用タグ', blank=True, null=True)\n event_status = models.CharField('イベントステータス', max_length=1, choices=\n STATUS_CHOICES, blank=False, null=False, default='N')\n question_date = models.OneToOneField(Question, related_name=\n 'event_date', blank=True, null=True)\n question_location = models.OneToOneField(Question, related_name=\n 'event_location', blank=True, null=True)\n\n def __str__(self):\n return self.event_name\n\n def nokori(self):\n now_member = self.members.count()\n return self.num_of_members - now_member\n\n def like_list(self):\n return self.like.all()\n\n def event_date(self):\n try:\n return self.event_datetime.strftime('%Y.%m.%d')\n except AttributeError:\n return ''\n\n def event_starttime(self):\n try:\n return self.event_datetime.strftime('%H:%M~')\n except AttributeError:\n return ''\n\n def nobreak_overview(self):\n return self.overview.replace('\\n', '')\n\n def question_delete(self, type):\n if type == 'd':\n q = self.question_date\n self.question_date = None\n elif type == 'l':\n q = self.question_location\n self.question_location = None\n if q:\n q.delete()\n\n def question_date_or_dummy(self):\n qd = self.question_date\n if not qd:\n qd = Question.get_default_question('d')\n return qd\n\n def question_location_or_dummy(self):\n ql = self.question_location\n if not ql:\n ql = Question.get_default_question('l')\n return ql\n\n def mailing_list(self):\n member_addr = [member.mail_address for member in self.members.all()]\n watcher_addr = [watcher.mail_address for watcher in self.watch.all()]\n ml = member_addr + watcher_addr\n return ml\n\n def status(self):\n if self.event_status == 'N':\n return '募集中'\n if self.event_status == 'E':\n return 'イベント終了'\n else:\n return ''\n\n def datetimeForIndex(self):\n if self.event_datetime:\n return self.event_datetime\n if not self.question_date:\n return '未定'\n else:\n return 'アンケート中'\n\n def locationForIndex(self):\n if self.event_location:\n return self.event_location\n if not self.question_location:\n return '未定'\n else:\n return 'アンケート中'\n\n def oldstatus(self):\n if self.event_datetime < datetime.now():\n return 'old'\n else:\n return ''\n\n\n<mask token>\n",
"step-5": "# coding: utf-8\nfrom datetime import datetime\n\n#from __future__ import unicode_literals\n\nfrom django.db import models\nfrom django.utils.encoding import python_2_unicode_compatible\nfrom django.utils import timezone\nfrom persol_users.models import PersolUser\nfrom django.db.models import Q, Count\n\n# アンケート\nfrom questions.models import Question\n\n@python_2_unicode_compatible\nclass Person(models.Model):\n name = models.CharField(max_length=50)\n pass\n \n def __str__(self): \n return self.name\n\nclass Event(models.Model):\n STATUS_CHOICES = (\n (\"N\",\"募集中\"),\n (\"E\",\"募集終了\")\n )\n author = models.ForeignKey(PersolUser, verbose_name='作成者', related_name='author')\n event_name = models.CharField('イベントタイトル', max_length=200)\n event_image = models.ImageField('イメージ画像', upload_to='event_image', blank=True, null=True)\n event_datetime = models.DateTimeField('開催日時', null=True)\n event_location = models.CharField('開催場所', max_length=200, blank=True)\n num_of_members = models.IntegerField('募集人数')\n dead_line = models.DateField('募集締切日', blank=True,null=True)\n overview = models.TextField('イベント概要')\n# comment = models.ManyToManyField(Comment)\n like = models.ManyToManyField(PersolUser,verbose_name='いいね', related_name='like')\n watch = models.ManyToManyField(PersolUser,verbose_name='ウォッチ', related_name='Watch')\n members = models.ManyToManyField(PersolUser)\n search_tag = models.TextField('検索用タグ', blank=True, null=True)\n event_status = models.CharField('イベントステータス', max_length=1, choices=STATUS_CHOICES, blank=False, null=False, default='N')\n \n # アンケート\n question_date = models.OneToOneField(Question, related_name='event_date', blank=True, null=True)\n question_location = models.OneToOneField(Question, related_name='event_location', blank=True, null=True)\n \n \n def __str__(self): \n return self.event_name\n \n def nokori(self):\n now_member = self.members.count()\n return self.num_of_members - now_member\n\n def like_list(self):\n return self.like.all()\n \n def event_date(self):\n try:\n return self.event_datetime.strftime('%Y.%m.%d')\n except AttributeError:\n return \"\"\n\n def event_starttime(self):\n try:\n return self.event_datetime.strftime('%H:%M~')\n except AttributeError:\n return \"\"\n \n def nobreak_overview(self):\n return self.overview.replace(\"\\n\", \"\")\n \n \n # アンケート削除\n def question_delete(self, type):\n if type == 'd':\n q = self.question_date\n self.question_date = None\n elif type == 'l':\n q = self.question_location\n self.question_location = None\n \n if q:\n q.delete()\n \n # アンケート取得。なければデフォルト値のダミーアンケートを返す\n def question_date_or_dummy(self):\n qd = self.question_date\n if not qd:\n qd = Question.get_default_question('d')\n return qd\n \n def question_location_or_dummy(self):\n ql = self.question_location\n if not ql:\n ql = Question.get_default_question('l')\n return ql\n \n def mailing_list(self):\n member_addr=[member.mail_address for member in self.members.all()]\n watcher_addr=[watcher.mail_address for watcher in self.watch.all()]\n ml=member_addr+watcher_addr\n return ml\n \n def status(self):\n if self.event_status == \"N\": return \"募集中\"\n if self.event_status == \"E\": return \"イベント終了\"\n else:return \"\"\n\n def datetimeForIndex(self):\n if self.event_datetime:\n return self.event_datetime\n \n if not self.question_date:\n return \"未定\"\n else:\n return \"アンケート中\"\n\n def locationForIndex(self):\n if self.event_location:\n return self.event_location\n \n if not self.question_location:\n return \"未定\"\n else:\n return \"アンケート中\"\n \n def oldstatus(self):\n if self.event_datetime < datetime.now():\n return 'old'\n else:\n return ''\n\n\"\"\"\npython manage.py makemigrations\npython manage.py migrate\n\"\"\"",
"step-ids": [
11,
13,
15,
16,
21
]
}
|
[
11,
13,
15,
16,
21
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class UserTests(APITestCase):
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class UserTests(APITestCase):
def test_user_list(self):
response = self.client.get('/api/users/', {}, format='json')
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
user = User.objects.create(username='user', email=
'user@example.com', password='user123', is_staff=True)
self.client.force_authenticate(user=user)
response = self.client.get('/api/users/', {}, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data['count'], 1)
actual = response.data['results'][0]
self.assertEqual(actual['username'], user.username)
self.assertEqual(actual['email'], user.email)
<|reserved_special_token_1|>
from django.urls import reverse
from rest_framework import status
from rest_framework.test import APITestCase
from django.contrib.auth.models import User, Group
class UserTests(APITestCase):
def test_user_list(self):
response = self.client.get('/api/users/', {}, format='json')
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
user = User.objects.create(username='user', email=
'user@example.com', password='user123', is_staff=True)
self.client.force_authenticate(user=user)
response = self.client.get('/api/users/', {}, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data['count'], 1)
actual = response.data['results'][0]
self.assertEqual(actual['username'], user.username)
self.assertEqual(actual['email'], user.email)
<|reserved_special_token_1|>
from django.urls import reverse
from rest_framework import status
from rest_framework.test import APITestCase
from django.contrib.auth.models import User, Group
class UserTests(APITestCase):
def test_user_list(self):
# must be rejected without validation
response = self.client.get('/api/users/', {}, format='json')
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
# must be success
user = User.objects.create(username='user', email='user@example.com', password='user123', is_staff=True)
self.client.force_authenticate(user=user)
response = self.client.get('/api/users/', {}, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data['count'], 1)
actual = response.data['results'][0]
self.assertEqual(actual['username'], user.username)
self.assertEqual(actual['email'], user.email)
|
flexible
|
{
"blob_id": "ca7b0553e55e1c5e6cd23139a158101e72456a50",
"index": 8844,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass UserTests(APITestCase):\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass UserTests(APITestCase):\n\n def test_user_list(self):\n response = self.client.get('/api/users/', {}, format='json')\n self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)\n user = User.objects.create(username='user', email=\n 'user@example.com', password='user123', is_staff=True)\n self.client.force_authenticate(user=user)\n response = self.client.get('/api/users/', {}, format='json')\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertEqual(response.data['count'], 1)\n actual = response.data['results'][0]\n self.assertEqual(actual['username'], user.username)\n self.assertEqual(actual['email'], user.email)\n",
"step-4": "from django.urls import reverse\nfrom rest_framework import status\nfrom rest_framework.test import APITestCase\nfrom django.contrib.auth.models import User, Group\n\n\nclass UserTests(APITestCase):\n\n def test_user_list(self):\n response = self.client.get('/api/users/', {}, format='json')\n self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)\n user = User.objects.create(username='user', email=\n 'user@example.com', password='user123', is_staff=True)\n self.client.force_authenticate(user=user)\n response = self.client.get('/api/users/', {}, format='json')\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertEqual(response.data['count'], 1)\n actual = response.data['results'][0]\n self.assertEqual(actual['username'], user.username)\n self.assertEqual(actual['email'], user.email)\n",
"step-5": "from django.urls import reverse\nfrom rest_framework import status\nfrom rest_framework.test import APITestCase\nfrom django.contrib.auth.models import User, Group\n\nclass UserTests(APITestCase): \n \n def test_user_list(self):\n # must be rejected without validation\n response = self.client.get('/api/users/', {}, format='json')\n self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)\n\n # must be success\n user = User.objects.create(username='user', email='user@example.com', password='user123', is_staff=True)\n self.client.force_authenticate(user=user)\n response = self.client.get('/api/users/', {}, format='json')\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertEqual(response.data['count'], 1)\n actual = response.data['results'][0]\n self.assertEqual(actual['username'], user.username)\n self.assertEqual(actual['email'], user.email)\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
from csv import writer
with open("movies.csv","w") as file:
csv_writer=writer(file)
csv_writer.writerow(['Name','Year'])
csv_writer.writerow(['Ratchasan',2018])
csv_writer.writerow(['Vadachennai',2018])
csv_writer.writerow(['Naran',2007])
|
normal
|
{
"blob_id": "83e231480c618d290089340c642313bbba4f1070",
"index": 2035,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nwith open('movies.csv', 'w') as file:\n csv_writer = writer(file)\n csv_writer.writerow(['Name', 'Year'])\n csv_writer.writerow(['Ratchasan', 2018])\n csv_writer.writerow(['Vadachennai', 2018])\n csv_writer.writerow(['Naran', 2007])\n",
"step-3": "from csv import writer\nwith open('movies.csv', 'w') as file:\n csv_writer = writer(file)\n csv_writer.writerow(['Name', 'Year'])\n csv_writer.writerow(['Ratchasan', 2018])\n csv_writer.writerow(['Vadachennai', 2018])\n csv_writer.writerow(['Naran', 2007])\n",
"step-4": "from csv import writer\nwith open(\"movies.csv\",\"w\") as file:\n csv_writer=writer(file)\n csv_writer.writerow(['Name','Year'])\n csv_writer.writerow(['Ratchasan',2018])\n csv_writer.writerow(['Vadachennai',2018])\n csv_writer.writerow(['Naran',2007])\n \n\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
# 8-7. Album: Write a function called make_album() that builds a dictionary
# describing a music album. The function should take in an artist name and an
# album title, and it should return a dictionary containing these two pieces
# of information. Use the function to make three dictionaries representing
# different albums. Print each return value to show that the dictionaries are
# storing the album information correctly. Use None to add an optional
# parameter to make_album() that allows you to store the number of songs on an
# album. If the calling line includes a value for the number of songs, add
# that value to the album’s dictionary. Make at least one new function call
# that includes the number of songs on an album.
# PART ONE
def make_album(artist_name, album_title):
"""Build a dictionary describing a music album"""
music_album = {
'Artist': artist_name.title(),
'Album': album_title.title()
}
return music_album
print("Here's Part One:")
cardi = make_album('cardi b', 'invasion of privacy')
print(cardi)
jhene = make_album('jhene aiko', 'souled out')
print(jhene)
lennon = make_album('lennon stella', 'three. two. one.')
print(lennon)
# PART TWO
def make_album_two(artist_name, album_title, number_of_songs= None):
"""Build a dictionary describing a music album"""
music_album = {'Artist': artist_name.title(),
'Album': album_title.title()}
if number_of_songs:
music_album['Number of Songs'] = number_of_songs
return music_album
print("\nHere's Part Two:")
cardi = make_album_two('cardi b', 'invasion of privacy')
print(cardi)
jhene = make_album_two('jhene aiko', 'souled out')
print(jhene)
lennon = make_album_two('lennon stella', 'three. two. one.', 13)
print(lennon)
|
normal
|
{
"blob_id": "19888c998e8787533e84413272da1183f16fcdb1",
"index": 2974,
"step-1": "<mask token>\n\n\ndef make_album_two(artist_name, album_title, number_of_songs=None):\n \"\"\"Build a dictionary describing a music album\"\"\"\n music_album = {'Artist': artist_name.title(), 'Album': album_title.title()}\n if number_of_songs:\n music_album['Number of Songs'] = number_of_songs\n return music_album\n\n\n<mask token>\n",
"step-2": "def make_album(artist_name, album_title):\n \"\"\"Build a dictionary describing a music album\"\"\"\n music_album = {'Artist': artist_name.title(), 'Album': album_title.title()}\n return music_album\n\n\n<mask token>\n\n\ndef make_album_two(artist_name, album_title, number_of_songs=None):\n \"\"\"Build a dictionary describing a music album\"\"\"\n music_album = {'Artist': artist_name.title(), 'Album': album_title.title()}\n if number_of_songs:\n music_album['Number of Songs'] = number_of_songs\n return music_album\n\n\n<mask token>\n",
"step-3": "def make_album(artist_name, album_title):\n \"\"\"Build a dictionary describing a music album\"\"\"\n music_album = {'Artist': artist_name.title(), 'Album': album_title.title()}\n return music_album\n\n\nprint(\"Here's Part One:\")\n<mask token>\nprint(cardi)\n<mask token>\nprint(jhene)\n<mask token>\nprint(lennon)\n\n\ndef make_album_two(artist_name, album_title, number_of_songs=None):\n \"\"\"Build a dictionary describing a music album\"\"\"\n music_album = {'Artist': artist_name.title(), 'Album': album_title.title()}\n if number_of_songs:\n music_album['Number of Songs'] = number_of_songs\n return music_album\n\n\nprint(\"\"\"\nHere's Part Two:\"\"\")\n<mask token>\nprint(cardi)\n<mask token>\nprint(jhene)\n<mask token>\nprint(lennon)\n",
"step-4": "def make_album(artist_name, album_title):\n \"\"\"Build a dictionary describing a music album\"\"\"\n music_album = {'Artist': artist_name.title(), 'Album': album_title.title()}\n return music_album\n\n\nprint(\"Here's Part One:\")\ncardi = make_album('cardi b', 'invasion of privacy')\nprint(cardi)\njhene = make_album('jhene aiko', 'souled out')\nprint(jhene)\nlennon = make_album('lennon stella', 'three. two. one.')\nprint(lennon)\n\n\ndef make_album_two(artist_name, album_title, number_of_songs=None):\n \"\"\"Build a dictionary describing a music album\"\"\"\n music_album = {'Artist': artist_name.title(), 'Album': album_title.title()}\n if number_of_songs:\n music_album['Number of Songs'] = number_of_songs\n return music_album\n\n\nprint(\"\"\"\nHere's Part Two:\"\"\")\ncardi = make_album_two('cardi b', 'invasion of privacy')\nprint(cardi)\njhene = make_album_two('jhene aiko', 'souled out')\nprint(jhene)\nlennon = make_album_two('lennon stella', 'three. two. one.', 13)\nprint(lennon)\n",
"step-5": "# 8-7. Album: Write a function called make_album() that builds a dictionary\n# describing a music album. The function should take in an artist name and an\n# album title, and it should return a dictionary containing these two pieces\n# of information. Use the function to make three dictionaries representing\n# different albums. Print each return value to show that the dictionaries are\n# storing the album information correctly. Use None to add an optional\n# parameter to make_album() that allows you to store the number of songs on an\n# album. If the calling line includes a value for the number of songs, add\n# that value to the album’s dictionary. Make at least one new function call\n# that includes the number of songs on an album.\n\n# PART ONE\n\ndef make_album(artist_name, album_title): \n \"\"\"Build a dictionary describing a music album\"\"\" \n music_album = {\n 'Artist': artist_name.title(),\n 'Album': album_title.title()\n }\n return music_album\n\nprint(\"Here's Part One:\")\ncardi = make_album('cardi b', 'invasion of privacy')\nprint(cardi)\n\njhene = make_album('jhene aiko', 'souled out')\nprint(jhene)\n\nlennon = make_album('lennon stella', 'three. two. one.')\nprint(lennon)\n\n# PART TWO\ndef make_album_two(artist_name, album_title, number_of_songs= None): \n \"\"\"Build a dictionary describing a music album\"\"\" \n music_album = {'Artist': artist_name.title(),\n 'Album': album_title.title()}\n if number_of_songs:\n music_album['Number of Songs'] = number_of_songs\n return music_album\n\nprint(\"\\nHere's Part Two:\")\ncardi = make_album_two('cardi b', 'invasion of privacy')\nprint(cardi)\n\njhene = make_album_two('jhene aiko', 'souled out')\nprint(jhene)\n\nlennon = make_album_two('lennon stella', 'three. two. one.', 13)\nprint(lennon)\n\n\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
file = open('thegazelle.wordpress.2016-06-22.xml', 'r')
text = file.read()
authors = []
start = text.find("<wp:author_display_name>")
length = len("<wp:author_display_name>")
end = text.find("</wp:author_display_name")
authors.append(text[start+length+len("<![CDATA["):end-len("]]>")])
while text.find("<wp:author_display_name>", start+1) != -1:
start = text.find("<wp:author_display_name>", start+1)
end = text.find("</wp:author_display_name>", end+1)
authors.append(text[start+length+len("<![CDATA["):end-len("]]>")])
authors.sort()
for author in authors:
print(author)
for i in range(len(authors)-1):
if (authors[i] == authors[i+1]):
print(authors[i], "was double counted")
print(len(authors))
|
normal
|
{
"blob_id": "cf5062c999c6c29f103428c247d8d1a4550f9d75",
"index": 8086,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nauthors.append(text[start + length + len('<![CDATA['):end - len(']]>')])\nwhile text.find('<wp:author_display_name>', start + 1) != -1:\n start = text.find('<wp:author_display_name>', start + 1)\n end = text.find('</wp:author_display_name>', end + 1)\n authors.append(text[start + length + len('<![CDATA['):end - len(']]>')])\nauthors.sort()\nfor author in authors:\n print(author)\nfor i in range(len(authors) - 1):\n if authors[i] == authors[i + 1]:\n print(authors[i], 'was double counted')\nprint(len(authors))\n",
"step-3": "file = open('thegazelle.wordpress.2016-06-22.xml', 'r')\ntext = file.read()\nauthors = []\nstart = text.find('<wp:author_display_name>')\nlength = len('<wp:author_display_name>')\nend = text.find('</wp:author_display_name')\nauthors.append(text[start + length + len('<![CDATA['):end - len(']]>')])\nwhile text.find('<wp:author_display_name>', start + 1) != -1:\n start = text.find('<wp:author_display_name>', start + 1)\n end = text.find('</wp:author_display_name>', end + 1)\n authors.append(text[start + length + len('<![CDATA['):end - len(']]>')])\nauthors.sort()\nfor author in authors:\n print(author)\nfor i in range(len(authors) - 1):\n if authors[i] == authors[i + 1]:\n print(authors[i], 'was double counted')\nprint(len(authors))\n",
"step-4": "file = open('thegazelle.wordpress.2016-06-22.xml', 'r')\ntext = file.read()\nauthors = []\nstart = text.find(\"<wp:author_display_name>\")\nlength = len(\"<wp:author_display_name>\")\nend = text.find(\"</wp:author_display_name\")\nauthors.append(text[start+length+len(\"<![CDATA[\"):end-len(\"]]>\")])\nwhile text.find(\"<wp:author_display_name>\", start+1) != -1:\n start = text.find(\"<wp:author_display_name>\", start+1)\n end = text.find(\"</wp:author_display_name>\", end+1)\n authors.append(text[start+length+len(\"<![CDATA[\"):end-len(\"]]>\")])\nauthors.sort()\nfor author in authors:\n print(author)\n\nfor i in range(len(authors)-1):\n if (authors[i] == authors[i+1]):\n print(authors[i], \"was double counted\")\n\nprint(len(authors))",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
#Pràctica 9 Condicionals, Exercici 2:
print("Introduce un valor par:")
numpar=int(input())
print("Introduce un valor impar:")
numimp=int(input())
if numpar==numimp*2:
print(numpar," es el doble que ",numimp,".")
else:
print(numpar," no es el doble que ",numimp,".")
|
normal
|
{
"blob_id": "8ad5f3e5f73eae191a3fe9bc20f73b4bfcfedc8c",
"index": 4884,
"step-1": "<mask token>\n",
"step-2": "print('Introduce un valor par:')\n<mask token>\nprint('Introduce un valor impar:')\n<mask token>\nif numpar == numimp * 2:\n print(numpar, ' es el doble que ', numimp, '.')\nelse:\n print(numpar, ' no es el doble que ', numimp, '.')\n",
"step-3": "print('Introduce un valor par:')\nnumpar = int(input())\nprint('Introduce un valor impar:')\nnumimp = int(input())\nif numpar == numimp * 2:\n print(numpar, ' es el doble que ', numimp, '.')\nelse:\n print(numpar, ' no es el doble que ', numimp, '.')\n",
"step-4": "#Pràctica 9 Condicionals, Exercici 2:\nprint(\"Introduce un valor par:\")\nnumpar=int(input())\nprint(\"Introduce un valor impar:\")\nnumimp=int(input())\nif numpar==numimp*2:\n print(numpar,\" es el doble que \",numimp,\".\")\nelse:\n print(numpar,\" no es el doble que \",numimp,\".\")",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
#!/usr/bin/env python3
import gatt
class AnyDevice(gatt.Device):
def connect_succeeded(self):
super().connect_succeeded()
print("[%s] Connected" % (self.mac_address))
def connect_failed(self, error):
super().connect_failed(error)
print("[%s] Connection failed: %s" % (self.mac_address, str(error)))
def disconnect_succeeded(self):
super().disconnect_succeeded()
print("[%s] Disconnected" % (self.mac_address))
def services_resolved(self):
super().services_resolved()
print("[%s] Resolved services" % (self.mac_address))
for service in self.services:
print("[%s] Service [%s]" % (self.mac_address, service.uuid))
for characteristic in service.characteristics:
print("[%s] Characteristic [%s]" % (self.mac_address, characteristic.uuid))
print(dir(characteristic))
print("*****")
class AnyDeviceManager(gatt.DeviceManager):
def __init__(self, adapter_name, mac_list):
super().__init__(adapter_name)
self.mac_list = mac_list
def device_discovered(self, device):
#print("Discovered [%s] %s" % (device.mac_address, device.alias()))
if ('powertap' in device.alias() and 'L' in device.alias()):
print(device.mac_address)
manager.stop()
manager = AnyDeviceManager(adapter_name='hci0',mac_list=[])
manager.start_discovery()
manager.run()
#74:5c:4b:0b:4e:f2
#device = AnyDevice(mac_address='66:12:d1:56:6b:3c', manager=manager)
|
normal
|
{
"blob_id": "480e636cfe28f2509d8ecf1e6e89924e994f100d",
"index": 4888,
"step-1": "<mask token>\n\n\nclass AnyDevice(gatt.Device):\n <mask token>\n\n def connect_failed(self, error):\n super().connect_failed(error)\n print('[%s] Connection failed: %s' % (self.mac_address, str(error)))\n <mask token>\n <mask token>\n\n\nclass AnyDeviceManager(gatt.DeviceManager):\n\n def __init__(self, adapter_name, mac_list):\n super().__init__(adapter_name)\n self.mac_list = mac_list\n\n def device_discovered(self, device):\n if 'powertap' in device.alias() and 'L' in device.alias():\n print(device.mac_address)\n manager.stop()\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass AnyDevice(gatt.Device):\n\n def connect_succeeded(self):\n super().connect_succeeded()\n print('[%s] Connected' % self.mac_address)\n\n def connect_failed(self, error):\n super().connect_failed(error)\n print('[%s] Connection failed: %s' % (self.mac_address, str(error)))\n\n def disconnect_succeeded(self):\n super().disconnect_succeeded()\n print('[%s] Disconnected' % self.mac_address)\n <mask token>\n\n\nclass AnyDeviceManager(gatt.DeviceManager):\n\n def __init__(self, adapter_name, mac_list):\n super().__init__(adapter_name)\n self.mac_list = mac_list\n\n def device_discovered(self, device):\n if 'powertap' in device.alias() and 'L' in device.alias():\n print(device.mac_address)\n manager.stop()\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass AnyDevice(gatt.Device):\n\n def connect_succeeded(self):\n super().connect_succeeded()\n print('[%s] Connected' % self.mac_address)\n\n def connect_failed(self, error):\n super().connect_failed(error)\n print('[%s] Connection failed: %s' % (self.mac_address, str(error)))\n\n def disconnect_succeeded(self):\n super().disconnect_succeeded()\n print('[%s] Disconnected' % self.mac_address)\n\n def services_resolved(self):\n super().services_resolved()\n print('[%s] Resolved services' % self.mac_address)\n for service in self.services:\n print('[%s] Service [%s]' % (self.mac_address, service.uuid))\n for characteristic in service.characteristics:\n print('[%s] Characteristic [%s]' % (self.mac_address,\n characteristic.uuid))\n print(dir(characteristic))\n print('*****')\n\n\nclass AnyDeviceManager(gatt.DeviceManager):\n\n def __init__(self, adapter_name, mac_list):\n super().__init__(adapter_name)\n self.mac_list = mac_list\n\n def device_discovered(self, device):\n if 'powertap' in device.alias() and 'L' in device.alias():\n print(device.mac_address)\n manager.stop()\n\n\n<mask token>\nmanager.start_discovery()\nmanager.run()\n",
"step-4": "import gatt\n\n\nclass AnyDevice(gatt.Device):\n\n def connect_succeeded(self):\n super().connect_succeeded()\n print('[%s] Connected' % self.mac_address)\n\n def connect_failed(self, error):\n super().connect_failed(error)\n print('[%s] Connection failed: %s' % (self.mac_address, str(error)))\n\n def disconnect_succeeded(self):\n super().disconnect_succeeded()\n print('[%s] Disconnected' % self.mac_address)\n\n def services_resolved(self):\n super().services_resolved()\n print('[%s] Resolved services' % self.mac_address)\n for service in self.services:\n print('[%s] Service [%s]' % (self.mac_address, service.uuid))\n for characteristic in service.characteristics:\n print('[%s] Characteristic [%s]' % (self.mac_address,\n characteristic.uuid))\n print(dir(characteristic))\n print('*****')\n\n\nclass AnyDeviceManager(gatt.DeviceManager):\n\n def __init__(self, adapter_name, mac_list):\n super().__init__(adapter_name)\n self.mac_list = mac_list\n\n def device_discovered(self, device):\n if 'powertap' in device.alias() and 'L' in device.alias():\n print(device.mac_address)\n manager.stop()\n\n\nmanager = AnyDeviceManager(adapter_name='hci0', mac_list=[])\nmanager.start_discovery()\nmanager.run()\n",
"step-5": "#!/usr/bin/env python3\nimport gatt\n\nclass AnyDevice(gatt.Device):\n def connect_succeeded(self):\n super().connect_succeeded()\n print(\"[%s] Connected\" % (self.mac_address))\n\n def connect_failed(self, error):\n super().connect_failed(error)\n print(\"[%s] Connection failed: %s\" % (self.mac_address, str(error)))\n\n def disconnect_succeeded(self):\n super().disconnect_succeeded()\n print(\"[%s] Disconnected\" % (self.mac_address))\n\n def services_resolved(self):\n super().services_resolved()\n\n print(\"[%s] Resolved services\" % (self.mac_address))\n for service in self.services:\n print(\"[%s] Service [%s]\" % (self.mac_address, service.uuid))\n for characteristic in service.characteristics:\n print(\"[%s] Characteristic [%s]\" % (self.mac_address, characteristic.uuid))\n print(dir(characteristic))\n print(\"*****\")\n\n\nclass AnyDeviceManager(gatt.DeviceManager):\n def __init__(self, adapter_name, mac_list):\n super().__init__(adapter_name)\n self.mac_list = mac_list\n\n def device_discovered(self, device):\n #print(\"Discovered [%s] %s\" % (device.mac_address, device.alias()))\n if ('powertap' in device.alias() and 'L' in device.alias()):\n print(device.mac_address)\n manager.stop()\n \nmanager = AnyDeviceManager(adapter_name='hci0',mac_list=[])\nmanager.start_discovery()\nmanager.run()\n\n\n#74:5c:4b:0b:4e:f2\n\n\n#device = AnyDevice(mac_address='66:12:d1:56:6b:3c', manager=manager)\n\n",
"step-ids": [
5,
7,
9,
11,
12
]
}
|
[
5,
7,
9,
11,
12
] |
# coding: utf-8
from django.test.client import Client
from django.contrib.contenttypes.models import ContentType
from main.models import Descriptor, ResourceThematic, ThematicArea
from utils.tests import BaseTestCase
from models import *
def minimal_form_data():
'''
Define a minimal fields for submit a media form
'''
form_data = {
'status': '0',
'title': 'Foto 1',
'description': 'Foto 1',
'media_type' : '1',
'main-descriptor-content_type-object_id-TOTAL_FORMS': '0',
'main-descriptor-content_type-object_id-INITIAL_FORMS': '0',
'main-keyword-content_type-object_id-TOTAL_FORMS': '0',
'main-keyword-content_type-object_id-INITIAL_FORMS': '0',
'main-resourcethematic-content_type-object_id-TOTAL_FORMS': '0',
'main-resourcethematic-content_type-object_id-INITIAL_FORMS': '0',
}
return form_data
def complete_form_data():
'''
Define missing fields for a valid submission of media object
'''
missing_fields = {
'link' : 'http://www.youtube.com',
'publication_date' : '01/12/2015',
'main-descriptor-content_type-object_id-TOTAL_FORMS' : '1',
'main-descriptor-content_type-object_id-0-id' : '',
'main-descriptor-content_type-object_id-0-text' : 'malaria',
'main-descriptor-content_type-object_id-0-code' : '^d8462',
'main-descriptor-content_type-object_id-0-status' : '0',
'main-resourcethematic-content_type-object_id-TOTAL_FORMS' : '1',
'main-resourcethematic-content_type-object_id-0-thematic_area' : '1',
'main-resourcethematic-content_type-object_id-0-status' : '0',
}
complete_form_data = minimal_form_data()
complete_form_data.update(missing_fields)
return complete_form_data
def create_media_object():
'''
Create media object for tests
'''
# Create a Media object and test that is present on list
media1 = Media.objects.create(status=0,title='Midia de teste (BR1.1)',
media_type_id=1, link='http://bvsalud.org', created_by_id=1,
cooperative_center_code='BR1.1')
media_ct = ContentType.objects.get_for_model(media1)
descriptor = Descriptor.objects.create(object_id=1, content_type=media_ct, text='malaria')
thematic = ResourceThematic.objects.create(object_id=1, content_type=media_ct, thematic_area_id=1)
media2 = Media.objects.create(status=0,title='Media de prueba (PY3.1)',
media_type_id=1, link='http://bvsalud.org', created_by_id=2,
cooperative_center_code='PY3.1')
class MultimediaTest(BaseTestCase):
"""
Tests for multimedia app
"""
def setUp(self):
super(MultimediaTest, self).setUp()
# create auxiliary models used on tests
media_type = MediaType.objects.create(acronym='video', name='Video')
thematic_area = ThematicArea.objects.create(acronym='LISBR1.1', name='Teste')
def test_list_media(self):
"""
Test list media
"""
self.login_editor()
create_media_object()
response = self.client.get('/multimedia/')
self.assertContains(response, "Midia de teste (BR1.1")
# list only medias from user cooperative center (BR1.1)
self.assertNotContains(response, "Media de prueba (PY3.1)")
def test_add_media(self):
"""
Tests create media
"""
self.login_editor()
# invalid submission with missing required fields
form_data = minimal_form_data()
response = self.client.post('/multimedia/new', form_data )
self.assertContains(response,'Por favor verifique os campos obrigatórios')
self.assertContains(response,'Você precisa inserir pelo menos um descritor de assunto')
self.assertContains(response,'Você precisa selecionar pelo menos uma área temática')
# complete form_data with required fields and re-submit form
form_data = complete_form_data()
# test valid submission
# after submit a valid content the view will redirect to /multimedia and list the objects
# follow=True will allow check if the new data is on the list
response = self.client.post('/multimedia/new', form_data, follow=True)
self.assertRedirects(response, '/multimedia/')
self.assertContains(response, "Foto 1")
# check if is set cooperative center code of user (editor = BR1.1)
self.assertEquals(Media.objects.all()[0].cooperative_center_code, "BR1.1")
def test_edit_media(self):
"""
Tests edit media
"""
self.login_editor()
create_media_object()
media_test = Media.objects.all()[0]
url = '/multimedia/edit/{0}'.format(media_test.id)
response = self.client.get(url)
# Test if return form with fields
self.assertContains(response, media_test.title)
# Test changes values and submit
form_data = complete_form_data()
form_data['status'] = '1'
response = self.client.post(url, form_data)
# check for validation of descriptor and thematic area for status = Admitted
self.assertContains(response, "é necessário ter pelo menos um descritor")
# check for normal edition
form_data['status'] = '0'
response = self.client.post(url, form_data, follow=True)
self.assertRedirects(response, '/multimedia/')
self.assertContains(response, "Foto 1")
def test_delete_media(self):
"""
Tests delete media
"""
self.login_editor()
create_media_object()
response = self.client.get('/multimedia/delete/1')
self.assertContains(response, "Você tem certeza que deseja apagar?")
response = self.client.post('/multimedia/delete/1')
self.assertTrue(Media.objects.filter(id=1).count() == 0)
self.assertTrue(Descriptor.objects.filter(object_id=1).count() == 0)
self.assertTrue(ResourceThematic.objects.filter(object_id=1).count() == 0)
self.assertRedirects(response, '/multimedia/')
def test_list_media_type(self):
"""
Tests list media type
"""
# check if documentalist has access to list media-types
self.login_documentalist()
response = self.client.get('/multimedia/media-types/' )
# 403 = unauthorized
self.assertEqual(response.status_code, 403)
self.client.logout()
self.login_admin()
response = self.client.get('/multimedia/media-types/')
self.assertContains(response, "Video")
def test_add_media_type(self):
"""
Tests create media type
"""
# check if documentalist has access to create new media-types
self.login_documentalist()
response = self.client.get('/multimedia/media-type/new' )
# 403 = unauthorized
self.assertEqual(response.status_code, 403)
self.client.logout()
self.login_admin()
form_data = {
'status': '0',
'acronym': 'foto',
'name': 'Foto',
'language' : 'pt-br',
'mediatypelocal_set-TOTAL_FORMS': '0',
'mediatypelocal_set-INITIAL_FORMS': '0',
}
response = self.client.post('/multimedia/media-type/new', form_data, follow=True )
self.assertRedirects(response, '/multimedia/media-types')
self.assertContains(response, "Foto")
def test_list_media_collection(self):
"""
Tests list of media collection
"""
self.login_editor()
# Create a media collection object and test that is present on list
MediaCollection.objects.create(name='Coleção 1',
description='Coleção de teste 1',
created_by_id=1, cooperative_center_code='BR1.1')
MediaCollection.objects.create(name='Coleção 2',
description='Coleção de teste 2',
created_by_id=2, cooperative_center_code='BR1.1')
MediaCollection.objects.create(name='Coleção 3',
description='Coleção de teste 3',
created_by_id=3, cooperative_center_code='PY3.8')
response = self.client.get('/multimedia/collections')
# check if only one collection is returned (restrict by user)
self.assertContains(response, "Coleção 1")
self.assertEquals(response.context['object_list'].count(), 3)
# check if return only colections from cooperative center BR1.1
response = self.client.get('/multimedia/collections/?filter_created_by_cc=BR1.1')
self.assertEquals(response.context['object_list'].count(), 2)
def test_add_media_collection(self):
"""
Tests add media collection
"""
self.login_editor()
form_data = {
'name': 'Coleção nova',
'description': 'Coleção de teste',
'language': 'pt-br',
'mediacollectionlocal_set-TOTAL_FORMS': '0',
'mediacollectionlocal_set-INITIAL_FORMS': '0',
}
response = self.client.post('/multimedia/collection/new', form_data, follow=True )
self.assertRedirects(response, '/multimedia/collections')
self.assertContains(response, "Coleção nova")
|
normal
|
{
"blob_id": "a253ab5ef80a61c3784862625cde81de4c4ef984",
"index": 2094,
"step-1": "<mask token>\n\n\nclass MultimediaTest(BaseTestCase):\n <mask token>\n <mask token>\n <mask token>\n\n def test_add_media(self):\n \"\"\"\n Tests create media\n \"\"\"\n self.login_editor()\n form_data = minimal_form_data()\n response = self.client.post('/multimedia/new', form_data)\n self.assertContains(response,\n 'Por favor verifique os campos obrigatórios')\n self.assertContains(response,\n 'Você precisa inserir pelo menos um descritor de assunto')\n self.assertContains(response,\n 'Você precisa selecionar pelo menos uma área temática')\n form_data = complete_form_data()\n response = self.client.post('/multimedia/new', form_data, follow=True)\n self.assertRedirects(response, '/multimedia/')\n self.assertContains(response, 'Foto 1')\n self.assertEquals(Media.objects.all()[0].cooperative_center_code,\n 'BR1.1')\n\n def test_edit_media(self):\n \"\"\"\n Tests edit media\n \"\"\"\n self.login_editor()\n create_media_object()\n media_test = Media.objects.all()[0]\n url = '/multimedia/edit/{0}'.format(media_test.id)\n response = self.client.get(url)\n self.assertContains(response, media_test.title)\n form_data = complete_form_data()\n form_data['status'] = '1'\n response = self.client.post(url, form_data)\n self.assertContains(response,\n 'é necessário ter pelo menos um descritor')\n form_data['status'] = '0'\n response = self.client.post(url, form_data, follow=True)\n self.assertRedirects(response, '/multimedia/')\n self.assertContains(response, 'Foto 1')\n\n def test_delete_media(self):\n \"\"\"\n Tests delete media\n \"\"\"\n self.login_editor()\n create_media_object()\n response = self.client.get('/multimedia/delete/1')\n self.assertContains(response, 'Você tem certeza que deseja apagar?')\n response = self.client.post('/multimedia/delete/1')\n self.assertTrue(Media.objects.filter(id=1).count() == 0)\n self.assertTrue(Descriptor.objects.filter(object_id=1).count() == 0)\n self.assertTrue(ResourceThematic.objects.filter(object_id=1).count(\n ) == 0)\n self.assertRedirects(response, '/multimedia/')\n\n def test_list_media_type(self):\n \"\"\"\n Tests list media type\n \"\"\"\n self.login_documentalist()\n response = self.client.get('/multimedia/media-types/')\n self.assertEqual(response.status_code, 403)\n self.client.logout()\n self.login_admin()\n response = self.client.get('/multimedia/media-types/')\n self.assertContains(response, 'Video')\n\n def test_add_media_type(self):\n \"\"\"\n Tests create media type\n \"\"\"\n self.login_documentalist()\n response = self.client.get('/multimedia/media-type/new')\n self.assertEqual(response.status_code, 403)\n self.client.logout()\n self.login_admin()\n form_data = {'status': '0', 'acronym': 'foto', 'name': 'Foto',\n 'language': 'pt-br', 'mediatypelocal_set-TOTAL_FORMS': '0',\n 'mediatypelocal_set-INITIAL_FORMS': '0'}\n response = self.client.post('/multimedia/media-type/new', form_data,\n follow=True)\n self.assertRedirects(response, '/multimedia/media-types')\n self.assertContains(response, 'Foto')\n\n def test_list_media_collection(self):\n \"\"\"\n Tests list of media collection\n \"\"\"\n self.login_editor()\n MediaCollection.objects.create(name='Coleção 1', description=\n 'Coleção de teste 1', created_by_id=1, cooperative_center_code=\n 'BR1.1')\n MediaCollection.objects.create(name='Coleção 2', description=\n 'Coleção de teste 2', created_by_id=2, cooperative_center_code=\n 'BR1.1')\n MediaCollection.objects.create(name='Coleção 3', description=\n 'Coleção de teste 3', created_by_id=3, cooperative_center_code=\n 'PY3.8')\n response = self.client.get('/multimedia/collections')\n self.assertContains(response, 'Coleção 1')\n self.assertEquals(response.context['object_list'].count(), 3)\n response = self.client.get(\n '/multimedia/collections/?filter_created_by_cc=BR1.1')\n self.assertEquals(response.context['object_list'].count(), 2)\n\n def test_add_media_collection(self):\n \"\"\"\n Tests add media collection\n \"\"\"\n self.login_editor()\n form_data = {'name': 'Coleção nova', 'description':\n 'Coleção de teste', 'language': 'pt-br',\n 'mediacollectionlocal_set-TOTAL_FORMS': '0',\n 'mediacollectionlocal_set-INITIAL_FORMS': '0'}\n response = self.client.post('/multimedia/collection/new', form_data,\n follow=True)\n self.assertRedirects(response, '/multimedia/collections')\n self.assertContains(response, 'Coleção nova')\n",
"step-2": "<mask token>\n\n\nclass MultimediaTest(BaseTestCase):\n <mask token>\n\n def setUp(self):\n super(MultimediaTest, self).setUp()\n media_type = MediaType.objects.create(acronym='video', name='Video')\n thematic_area = ThematicArea.objects.create(acronym='LISBR1.1',\n name='Teste')\n <mask token>\n\n def test_add_media(self):\n \"\"\"\n Tests create media\n \"\"\"\n self.login_editor()\n form_data = minimal_form_data()\n response = self.client.post('/multimedia/new', form_data)\n self.assertContains(response,\n 'Por favor verifique os campos obrigatórios')\n self.assertContains(response,\n 'Você precisa inserir pelo menos um descritor de assunto')\n self.assertContains(response,\n 'Você precisa selecionar pelo menos uma área temática')\n form_data = complete_form_data()\n response = self.client.post('/multimedia/new', form_data, follow=True)\n self.assertRedirects(response, '/multimedia/')\n self.assertContains(response, 'Foto 1')\n self.assertEquals(Media.objects.all()[0].cooperative_center_code,\n 'BR1.1')\n\n def test_edit_media(self):\n \"\"\"\n Tests edit media\n \"\"\"\n self.login_editor()\n create_media_object()\n media_test = Media.objects.all()[0]\n url = '/multimedia/edit/{0}'.format(media_test.id)\n response = self.client.get(url)\n self.assertContains(response, media_test.title)\n form_data = complete_form_data()\n form_data['status'] = '1'\n response = self.client.post(url, form_data)\n self.assertContains(response,\n 'é necessário ter pelo menos um descritor')\n form_data['status'] = '0'\n response = self.client.post(url, form_data, follow=True)\n self.assertRedirects(response, '/multimedia/')\n self.assertContains(response, 'Foto 1')\n\n def test_delete_media(self):\n \"\"\"\n Tests delete media\n \"\"\"\n self.login_editor()\n create_media_object()\n response = self.client.get('/multimedia/delete/1')\n self.assertContains(response, 'Você tem certeza que deseja apagar?')\n response = self.client.post('/multimedia/delete/1')\n self.assertTrue(Media.objects.filter(id=1).count() == 0)\n self.assertTrue(Descriptor.objects.filter(object_id=1).count() == 0)\n self.assertTrue(ResourceThematic.objects.filter(object_id=1).count(\n ) == 0)\n self.assertRedirects(response, '/multimedia/')\n\n def test_list_media_type(self):\n \"\"\"\n Tests list media type\n \"\"\"\n self.login_documentalist()\n response = self.client.get('/multimedia/media-types/')\n self.assertEqual(response.status_code, 403)\n self.client.logout()\n self.login_admin()\n response = self.client.get('/multimedia/media-types/')\n self.assertContains(response, 'Video')\n\n def test_add_media_type(self):\n \"\"\"\n Tests create media type\n \"\"\"\n self.login_documentalist()\n response = self.client.get('/multimedia/media-type/new')\n self.assertEqual(response.status_code, 403)\n self.client.logout()\n self.login_admin()\n form_data = {'status': '0', 'acronym': 'foto', 'name': 'Foto',\n 'language': 'pt-br', 'mediatypelocal_set-TOTAL_FORMS': '0',\n 'mediatypelocal_set-INITIAL_FORMS': '0'}\n response = self.client.post('/multimedia/media-type/new', form_data,\n follow=True)\n self.assertRedirects(response, '/multimedia/media-types')\n self.assertContains(response, 'Foto')\n\n def test_list_media_collection(self):\n \"\"\"\n Tests list of media collection\n \"\"\"\n self.login_editor()\n MediaCollection.objects.create(name='Coleção 1', description=\n 'Coleção de teste 1', created_by_id=1, cooperative_center_code=\n 'BR1.1')\n MediaCollection.objects.create(name='Coleção 2', description=\n 'Coleção de teste 2', created_by_id=2, cooperative_center_code=\n 'BR1.1')\n MediaCollection.objects.create(name='Coleção 3', description=\n 'Coleção de teste 3', created_by_id=3, cooperative_center_code=\n 'PY3.8')\n response = self.client.get('/multimedia/collections')\n self.assertContains(response, 'Coleção 1')\n self.assertEquals(response.context['object_list'].count(), 3)\n response = self.client.get(\n '/multimedia/collections/?filter_created_by_cc=BR1.1')\n self.assertEquals(response.context['object_list'].count(), 2)\n\n def test_add_media_collection(self):\n \"\"\"\n Tests add media collection\n \"\"\"\n self.login_editor()\n form_data = {'name': 'Coleção nova', 'description':\n 'Coleção de teste', 'language': 'pt-br',\n 'mediacollectionlocal_set-TOTAL_FORMS': '0',\n 'mediacollectionlocal_set-INITIAL_FORMS': '0'}\n response = self.client.post('/multimedia/collection/new', form_data,\n follow=True)\n self.assertRedirects(response, '/multimedia/collections')\n self.assertContains(response, 'Coleção nova')\n",
"step-3": "<mask token>\n\n\ndef minimal_form_data():\n \"\"\"\n Define a minimal fields for submit a media form\n \"\"\"\n form_data = {'status': '0', 'title': 'Foto 1', 'description': 'Foto 1',\n 'media_type': '1',\n 'main-descriptor-content_type-object_id-TOTAL_FORMS': '0',\n 'main-descriptor-content_type-object_id-INITIAL_FORMS': '0',\n 'main-keyword-content_type-object_id-TOTAL_FORMS': '0',\n 'main-keyword-content_type-object_id-INITIAL_FORMS': '0',\n 'main-resourcethematic-content_type-object_id-TOTAL_FORMS': '0',\n 'main-resourcethematic-content_type-object_id-INITIAL_FORMS': '0'}\n return form_data\n\n\n<mask token>\n\n\nclass MultimediaTest(BaseTestCase):\n \"\"\"\n Tests for multimedia app\n \"\"\"\n\n def setUp(self):\n super(MultimediaTest, self).setUp()\n media_type = MediaType.objects.create(acronym='video', name='Video')\n thematic_area = ThematicArea.objects.create(acronym='LISBR1.1',\n name='Teste')\n\n def test_list_media(self):\n \"\"\"\n Test list media\n \"\"\"\n self.login_editor()\n create_media_object()\n response = self.client.get('/multimedia/')\n self.assertContains(response, 'Midia de teste (BR1.1')\n self.assertNotContains(response, 'Media de prueba (PY3.1)')\n\n def test_add_media(self):\n \"\"\"\n Tests create media\n \"\"\"\n self.login_editor()\n form_data = minimal_form_data()\n response = self.client.post('/multimedia/new', form_data)\n self.assertContains(response,\n 'Por favor verifique os campos obrigatórios')\n self.assertContains(response,\n 'Você precisa inserir pelo menos um descritor de assunto')\n self.assertContains(response,\n 'Você precisa selecionar pelo menos uma área temática')\n form_data = complete_form_data()\n response = self.client.post('/multimedia/new', form_data, follow=True)\n self.assertRedirects(response, '/multimedia/')\n self.assertContains(response, 'Foto 1')\n self.assertEquals(Media.objects.all()[0].cooperative_center_code,\n 'BR1.1')\n\n def test_edit_media(self):\n \"\"\"\n Tests edit media\n \"\"\"\n self.login_editor()\n create_media_object()\n media_test = Media.objects.all()[0]\n url = '/multimedia/edit/{0}'.format(media_test.id)\n response = self.client.get(url)\n self.assertContains(response, media_test.title)\n form_data = complete_form_data()\n form_data['status'] = '1'\n response = self.client.post(url, form_data)\n self.assertContains(response,\n 'é necessário ter pelo menos um descritor')\n form_data['status'] = '0'\n response = self.client.post(url, form_data, follow=True)\n self.assertRedirects(response, '/multimedia/')\n self.assertContains(response, 'Foto 1')\n\n def test_delete_media(self):\n \"\"\"\n Tests delete media\n \"\"\"\n self.login_editor()\n create_media_object()\n response = self.client.get('/multimedia/delete/1')\n self.assertContains(response, 'Você tem certeza que deseja apagar?')\n response = self.client.post('/multimedia/delete/1')\n self.assertTrue(Media.objects.filter(id=1).count() == 0)\n self.assertTrue(Descriptor.objects.filter(object_id=1).count() == 0)\n self.assertTrue(ResourceThematic.objects.filter(object_id=1).count(\n ) == 0)\n self.assertRedirects(response, '/multimedia/')\n\n def test_list_media_type(self):\n \"\"\"\n Tests list media type\n \"\"\"\n self.login_documentalist()\n response = self.client.get('/multimedia/media-types/')\n self.assertEqual(response.status_code, 403)\n self.client.logout()\n self.login_admin()\n response = self.client.get('/multimedia/media-types/')\n self.assertContains(response, 'Video')\n\n def test_add_media_type(self):\n \"\"\"\n Tests create media type\n \"\"\"\n self.login_documentalist()\n response = self.client.get('/multimedia/media-type/new')\n self.assertEqual(response.status_code, 403)\n self.client.logout()\n self.login_admin()\n form_data = {'status': '0', 'acronym': 'foto', 'name': 'Foto',\n 'language': 'pt-br', 'mediatypelocal_set-TOTAL_FORMS': '0',\n 'mediatypelocal_set-INITIAL_FORMS': '0'}\n response = self.client.post('/multimedia/media-type/new', form_data,\n follow=True)\n self.assertRedirects(response, '/multimedia/media-types')\n self.assertContains(response, 'Foto')\n\n def test_list_media_collection(self):\n \"\"\"\n Tests list of media collection\n \"\"\"\n self.login_editor()\n MediaCollection.objects.create(name='Coleção 1', description=\n 'Coleção de teste 1', created_by_id=1, cooperative_center_code=\n 'BR1.1')\n MediaCollection.objects.create(name='Coleção 2', description=\n 'Coleção de teste 2', created_by_id=2, cooperative_center_code=\n 'BR1.1')\n MediaCollection.objects.create(name='Coleção 3', description=\n 'Coleção de teste 3', created_by_id=3, cooperative_center_code=\n 'PY3.8')\n response = self.client.get('/multimedia/collections')\n self.assertContains(response, 'Coleção 1')\n self.assertEquals(response.context['object_list'].count(), 3)\n response = self.client.get(\n '/multimedia/collections/?filter_created_by_cc=BR1.1')\n self.assertEquals(response.context['object_list'].count(), 2)\n\n def test_add_media_collection(self):\n \"\"\"\n Tests add media collection\n \"\"\"\n self.login_editor()\n form_data = {'name': 'Coleção nova', 'description':\n 'Coleção de teste', 'language': 'pt-br',\n 'mediacollectionlocal_set-TOTAL_FORMS': '0',\n 'mediacollectionlocal_set-INITIAL_FORMS': '0'}\n response = self.client.post('/multimedia/collection/new', form_data,\n follow=True)\n self.assertRedirects(response, '/multimedia/collections')\n self.assertContains(response, 'Coleção nova')\n",
"step-4": "<mask token>\n\n\ndef minimal_form_data():\n \"\"\"\n Define a minimal fields for submit a media form\n \"\"\"\n form_data = {'status': '0', 'title': 'Foto 1', 'description': 'Foto 1',\n 'media_type': '1',\n 'main-descriptor-content_type-object_id-TOTAL_FORMS': '0',\n 'main-descriptor-content_type-object_id-INITIAL_FORMS': '0',\n 'main-keyword-content_type-object_id-TOTAL_FORMS': '0',\n 'main-keyword-content_type-object_id-INITIAL_FORMS': '0',\n 'main-resourcethematic-content_type-object_id-TOTAL_FORMS': '0',\n 'main-resourcethematic-content_type-object_id-INITIAL_FORMS': '0'}\n return form_data\n\n\ndef complete_form_data():\n \"\"\"\n Define missing fields for a valid submission of media object\n \"\"\"\n missing_fields = {'link': 'http://www.youtube.com', 'publication_date':\n '01/12/2015', 'main-descriptor-content_type-object_id-TOTAL_FORMS':\n '1', 'main-descriptor-content_type-object_id-0-id': '',\n 'main-descriptor-content_type-object_id-0-text': 'malaria',\n 'main-descriptor-content_type-object_id-0-code': '^d8462',\n 'main-descriptor-content_type-object_id-0-status': '0',\n 'main-resourcethematic-content_type-object_id-TOTAL_FORMS': '1',\n 'main-resourcethematic-content_type-object_id-0-thematic_area': '1',\n 'main-resourcethematic-content_type-object_id-0-status': '0'}\n complete_form_data = minimal_form_data()\n complete_form_data.update(missing_fields)\n return complete_form_data\n\n\ndef create_media_object():\n \"\"\"\n Create media object for tests\n \"\"\"\n media1 = Media.objects.create(status=0, title='Midia de teste (BR1.1)',\n media_type_id=1, link='http://bvsalud.org', created_by_id=1,\n cooperative_center_code='BR1.1')\n media_ct = ContentType.objects.get_for_model(media1)\n descriptor = Descriptor.objects.create(object_id=1, content_type=\n media_ct, text='malaria')\n thematic = ResourceThematic.objects.create(object_id=1, content_type=\n media_ct, thematic_area_id=1)\n media2 = Media.objects.create(status=0, title='Media de prueba (PY3.1)',\n media_type_id=1, link='http://bvsalud.org', created_by_id=2,\n cooperative_center_code='PY3.1')\n\n\nclass MultimediaTest(BaseTestCase):\n \"\"\"\n Tests for multimedia app\n \"\"\"\n\n def setUp(self):\n super(MultimediaTest, self).setUp()\n media_type = MediaType.objects.create(acronym='video', name='Video')\n thematic_area = ThematicArea.objects.create(acronym='LISBR1.1',\n name='Teste')\n\n def test_list_media(self):\n \"\"\"\n Test list media\n \"\"\"\n self.login_editor()\n create_media_object()\n response = self.client.get('/multimedia/')\n self.assertContains(response, 'Midia de teste (BR1.1')\n self.assertNotContains(response, 'Media de prueba (PY3.1)')\n\n def test_add_media(self):\n \"\"\"\n Tests create media\n \"\"\"\n self.login_editor()\n form_data = minimal_form_data()\n response = self.client.post('/multimedia/new', form_data)\n self.assertContains(response,\n 'Por favor verifique os campos obrigatórios')\n self.assertContains(response,\n 'Você precisa inserir pelo menos um descritor de assunto')\n self.assertContains(response,\n 'Você precisa selecionar pelo menos uma área temática')\n form_data = complete_form_data()\n response = self.client.post('/multimedia/new', form_data, follow=True)\n self.assertRedirects(response, '/multimedia/')\n self.assertContains(response, 'Foto 1')\n self.assertEquals(Media.objects.all()[0].cooperative_center_code,\n 'BR1.1')\n\n def test_edit_media(self):\n \"\"\"\n Tests edit media\n \"\"\"\n self.login_editor()\n create_media_object()\n media_test = Media.objects.all()[0]\n url = '/multimedia/edit/{0}'.format(media_test.id)\n response = self.client.get(url)\n self.assertContains(response, media_test.title)\n form_data = complete_form_data()\n form_data['status'] = '1'\n response = self.client.post(url, form_data)\n self.assertContains(response,\n 'é necessário ter pelo menos um descritor')\n form_data['status'] = '0'\n response = self.client.post(url, form_data, follow=True)\n self.assertRedirects(response, '/multimedia/')\n self.assertContains(response, 'Foto 1')\n\n def test_delete_media(self):\n \"\"\"\n Tests delete media\n \"\"\"\n self.login_editor()\n create_media_object()\n response = self.client.get('/multimedia/delete/1')\n self.assertContains(response, 'Você tem certeza que deseja apagar?')\n response = self.client.post('/multimedia/delete/1')\n self.assertTrue(Media.objects.filter(id=1).count() == 0)\n self.assertTrue(Descriptor.objects.filter(object_id=1).count() == 0)\n self.assertTrue(ResourceThematic.objects.filter(object_id=1).count(\n ) == 0)\n self.assertRedirects(response, '/multimedia/')\n\n def test_list_media_type(self):\n \"\"\"\n Tests list media type\n \"\"\"\n self.login_documentalist()\n response = self.client.get('/multimedia/media-types/')\n self.assertEqual(response.status_code, 403)\n self.client.logout()\n self.login_admin()\n response = self.client.get('/multimedia/media-types/')\n self.assertContains(response, 'Video')\n\n def test_add_media_type(self):\n \"\"\"\n Tests create media type\n \"\"\"\n self.login_documentalist()\n response = self.client.get('/multimedia/media-type/new')\n self.assertEqual(response.status_code, 403)\n self.client.logout()\n self.login_admin()\n form_data = {'status': '0', 'acronym': 'foto', 'name': 'Foto',\n 'language': 'pt-br', 'mediatypelocal_set-TOTAL_FORMS': '0',\n 'mediatypelocal_set-INITIAL_FORMS': '0'}\n response = self.client.post('/multimedia/media-type/new', form_data,\n follow=True)\n self.assertRedirects(response, '/multimedia/media-types')\n self.assertContains(response, 'Foto')\n\n def test_list_media_collection(self):\n \"\"\"\n Tests list of media collection\n \"\"\"\n self.login_editor()\n MediaCollection.objects.create(name='Coleção 1', description=\n 'Coleção de teste 1', created_by_id=1, cooperative_center_code=\n 'BR1.1')\n MediaCollection.objects.create(name='Coleção 2', description=\n 'Coleção de teste 2', created_by_id=2, cooperative_center_code=\n 'BR1.1')\n MediaCollection.objects.create(name='Coleção 3', description=\n 'Coleção de teste 3', created_by_id=3, cooperative_center_code=\n 'PY3.8')\n response = self.client.get('/multimedia/collections')\n self.assertContains(response, 'Coleção 1')\n self.assertEquals(response.context['object_list'].count(), 3)\n response = self.client.get(\n '/multimedia/collections/?filter_created_by_cc=BR1.1')\n self.assertEquals(response.context['object_list'].count(), 2)\n\n def test_add_media_collection(self):\n \"\"\"\n Tests add media collection\n \"\"\"\n self.login_editor()\n form_data = {'name': 'Coleção nova', 'description':\n 'Coleção de teste', 'language': 'pt-br',\n 'mediacollectionlocal_set-TOTAL_FORMS': '0',\n 'mediacollectionlocal_set-INITIAL_FORMS': '0'}\n response = self.client.post('/multimedia/collection/new', form_data,\n follow=True)\n self.assertRedirects(response, '/multimedia/collections')\n self.assertContains(response, 'Coleção nova')\n",
"step-5": "# coding: utf-8\n\nfrom django.test.client import Client\nfrom django.contrib.contenttypes.models import ContentType\n\nfrom main.models import Descriptor, ResourceThematic, ThematicArea\n\nfrom utils.tests import BaseTestCase\nfrom models import *\n\ndef minimal_form_data():\n '''\n Define a minimal fields for submit a media form\n '''\n\n form_data = {\n 'status': '0',\n 'title': 'Foto 1',\n 'description': 'Foto 1',\n 'media_type' : '1',\n\n 'main-descriptor-content_type-object_id-TOTAL_FORMS': '0',\n 'main-descriptor-content_type-object_id-INITIAL_FORMS': '0',\n\n 'main-keyword-content_type-object_id-TOTAL_FORMS': '0',\n 'main-keyword-content_type-object_id-INITIAL_FORMS': '0',\n\n 'main-resourcethematic-content_type-object_id-TOTAL_FORMS': '0',\n 'main-resourcethematic-content_type-object_id-INITIAL_FORMS': '0',\n }\n\n return form_data\n\ndef complete_form_data():\n '''\n Define missing fields for a valid submission of media object\n '''\n\n missing_fields = {\n 'link' : 'http://www.youtube.com',\n 'publication_date' : '01/12/2015',\n\n 'main-descriptor-content_type-object_id-TOTAL_FORMS' : '1',\n\n 'main-descriptor-content_type-object_id-0-id' : '',\n 'main-descriptor-content_type-object_id-0-text' : 'malaria',\n 'main-descriptor-content_type-object_id-0-code' : '^d8462',\n 'main-descriptor-content_type-object_id-0-status' : '0',\n\n 'main-resourcethematic-content_type-object_id-TOTAL_FORMS' : '1',\n 'main-resourcethematic-content_type-object_id-0-thematic_area' : '1',\n 'main-resourcethematic-content_type-object_id-0-status' : '0',\n }\n\n complete_form_data = minimal_form_data()\n complete_form_data.update(missing_fields)\n\n return complete_form_data\n\n\ndef create_media_object():\n '''\n Create media object for tests\n '''\n\n # Create a Media object and test that is present on list\n media1 = Media.objects.create(status=0,title='Midia de teste (BR1.1)',\n media_type_id=1, link='http://bvsalud.org', created_by_id=1,\n cooperative_center_code='BR1.1')\n\n media_ct = ContentType.objects.get_for_model(media1)\n descriptor = Descriptor.objects.create(object_id=1, content_type=media_ct, text='malaria')\n thematic = ResourceThematic.objects.create(object_id=1, content_type=media_ct, thematic_area_id=1)\n\n media2 = Media.objects.create(status=0,title='Media de prueba (PY3.1)',\n media_type_id=1, link='http://bvsalud.org', created_by_id=2,\n cooperative_center_code='PY3.1')\n\n\nclass MultimediaTest(BaseTestCase):\n \"\"\"\n Tests for multimedia app\n \"\"\"\n\n def setUp(self):\n super(MultimediaTest, self).setUp()\n\n # create auxiliary models used on tests\n media_type = MediaType.objects.create(acronym='video', name='Video')\n thematic_area = ThematicArea.objects.create(acronym='LISBR1.1', name='Teste')\n\n\n def test_list_media(self):\n \"\"\"\n Test list media\n \"\"\"\n self.login_editor()\n create_media_object()\n\n response = self.client.get('/multimedia/')\n self.assertContains(response, \"Midia de teste (BR1.1\")\n\n # list only medias from user cooperative center (BR1.1)\n self.assertNotContains(response, \"Media de prueba (PY3.1)\")\n\n\n def test_add_media(self):\n \"\"\"\n Tests create media\n \"\"\"\n self.login_editor()\n\n # invalid submission with missing required fields\n form_data = minimal_form_data()\n response = self.client.post('/multimedia/new', form_data )\n\n self.assertContains(response,'Por favor verifique os campos obrigatórios')\n self.assertContains(response,'Você precisa inserir pelo menos um descritor de assunto')\n self.assertContains(response,'Você precisa selecionar pelo menos uma área temática')\n\n # complete form_data with required fields and re-submit form\n form_data = complete_form_data()\n\n # test valid submission\n # after submit a valid content the view will redirect to /multimedia and list the objects\n # follow=True will allow check if the new data is on the list\n response = self.client.post('/multimedia/new', form_data, follow=True)\n self.assertRedirects(response, '/multimedia/')\n self.assertContains(response, \"Foto 1\")\n\n # check if is set cooperative center code of user (editor = BR1.1)\n self.assertEquals(Media.objects.all()[0].cooperative_center_code, \"BR1.1\")\n\n def test_edit_media(self):\n \"\"\"\n Tests edit media\n \"\"\"\n self.login_editor()\n create_media_object()\n\n media_test = Media.objects.all()[0]\n url = '/multimedia/edit/{0}'.format(media_test.id)\n response = self.client.get(url)\n\n # Test if return form with fields\n self.assertContains(response, media_test.title)\n\n # Test changes values and submit\n form_data = complete_form_data()\n form_data['status'] = '1'\n\n response = self.client.post(url, form_data)\n # check for validation of descriptor and thematic area for status = Admitted\n self.assertContains(response, \"é necessário ter pelo menos um descritor\")\n\n # check for normal edition\n form_data['status'] = '0'\n response = self.client.post(url, form_data, follow=True)\n self.assertRedirects(response, '/multimedia/')\n self.assertContains(response, \"Foto 1\")\n\n\n def test_delete_media(self):\n \"\"\"\n Tests delete media\n \"\"\"\n self.login_editor()\n create_media_object()\n\n response = self.client.get('/multimedia/delete/1')\n self.assertContains(response, \"Você tem certeza que deseja apagar?\")\n\n response = self.client.post('/multimedia/delete/1')\n\n self.assertTrue(Media.objects.filter(id=1).count() == 0)\n self.assertTrue(Descriptor.objects.filter(object_id=1).count() == 0)\n self.assertTrue(ResourceThematic.objects.filter(object_id=1).count() == 0)\n\n self.assertRedirects(response, '/multimedia/')\n\n\n def test_list_media_type(self):\n \"\"\"\n Tests list media type\n \"\"\"\n\n # check if documentalist has access to list media-types\n self.login_documentalist()\n response = self.client.get('/multimedia/media-types/' )\n\n # 403 = unauthorized\n self.assertEqual(response.status_code, 403)\n\n self.client.logout()\n self.login_admin()\n\n response = self.client.get('/multimedia/media-types/')\n self.assertContains(response, \"Video\")\n\n\n def test_add_media_type(self):\n \"\"\"\n Tests create media type\n \"\"\"\n\n # check if documentalist has access to create new media-types\n self.login_documentalist()\n response = self.client.get('/multimedia/media-type/new' )\n\n # 403 = unauthorized\n self.assertEqual(response.status_code, 403)\n\n self.client.logout()\n self.login_admin()\n\n form_data = {\n 'status': '0',\n 'acronym': 'foto',\n 'name': 'Foto',\n 'language' : 'pt-br',\n 'mediatypelocal_set-TOTAL_FORMS': '0',\n 'mediatypelocal_set-INITIAL_FORMS': '0',\n }\n\n response = self.client.post('/multimedia/media-type/new', form_data, follow=True )\n\n self.assertRedirects(response, '/multimedia/media-types')\n self.assertContains(response, \"Foto\")\n\n\n def test_list_media_collection(self):\n \"\"\"\n Tests list of media collection\n \"\"\"\n self.login_editor()\n\n # Create a media collection object and test that is present on list\n MediaCollection.objects.create(name='Coleção 1',\n description='Coleção de teste 1',\n created_by_id=1, cooperative_center_code='BR1.1')\n\n MediaCollection.objects.create(name='Coleção 2',\n description='Coleção de teste 2',\n created_by_id=2, cooperative_center_code='BR1.1')\n\n MediaCollection.objects.create(name='Coleção 3',\n description='Coleção de teste 3',\n created_by_id=3, cooperative_center_code='PY3.8')\n\n\n response = self.client.get('/multimedia/collections')\n # check if only one collection is returned (restrict by user)\n self.assertContains(response, \"Coleção 1\")\n self.assertEquals(response.context['object_list'].count(), 3)\n\n # check if return only colections from cooperative center BR1.1\n response = self.client.get('/multimedia/collections/?filter_created_by_cc=BR1.1')\n self.assertEquals(response.context['object_list'].count(), 2)\n\n\n def test_add_media_collection(self):\n \"\"\"\n Tests add media collection\n \"\"\"\n self.login_editor()\n\n form_data = {\n 'name': 'Coleção nova',\n 'description': 'Coleção de teste',\n 'language': 'pt-br',\n 'mediacollectionlocal_set-TOTAL_FORMS': '0',\n 'mediacollectionlocal_set-INITIAL_FORMS': '0',\n }\n\n response = self.client.post('/multimedia/collection/new', form_data, follow=True )\n\n self.assertRedirects(response, '/multimedia/collections')\n self.assertContains(response, \"Coleção nova\")\n",
"step-ids": [
8,
9,
12,
14,
16
]
}
|
[
8,
9,
12,
14,
16
] |
# vim: set et ts=4 sw=4 fileencoding=utf-8:
'''
tests.integration.test_pipeline
===============================
'''
import unittest
import yaml
import subprocess
import time
import pickle
from datetime import datetime
from amqp.exceptions import ChannelError
from yalp.config import settings
@unittest.skip('need to make this less brittle')
class TestSerialization(unittest.TestCase):
'''
Test that serialization via celery does not break
'''
def setUp(self):
settings.parsers = [{
'passthrough': {}
}]
try:
import socket
import amqp
self.connection = amqp.Connection()
self.channel = self.connection.channel()
except socket.error:
from nose.plugins.skip import SkipTest
raise SkipTest('Unable to connect to rabbitmq')
self.now = datetime.now()
self.event = {
'host': 'test_host',
'message': 'test message',
'date_time': self.now,
}
with open('/tmp/test_serial.yml', 'w') as config_file:
config = {
'parsers': [{
'passthrough': {}
}],
'parser_workers': 1
}
yaml.dump(config, config_file)
self.parser_process = subprocess.Popen(
'scripts/yalp-parsers -c /tmp/test_serial.yml',
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
def tearDown(self):
self.channel.queue_delete(queue=settings.parser_queue)
self.channel.queue_delete(queue='outputs')
self.channel.close()
self.connection.close()
self.parser_process.kill()
def test_default_serializer(self):
from yalp.pipeline import tasks
tasks.process_message.apply_async(
args=[self.event],
queue=settings.parser_queue,
serializer=settings.celery_serializer,
)
while True:
try:
message = self.channel.basic_get(queue='outputs')
break
except ChannelError:
time.sleep(0.1)
self.assertIsNotNone(message)
event = pickle.loads(message.body)['message']
self.assertEqual('test message', event['message'])
self.assertEqual(self.now, event['date_time'])
|
normal
|
{
"blob_id": "c945dc4df68fe110e8b38713fb77e2dce9efad8d",
"index": 8418,
"step-1": "<mask token>\n\n\n@unittest.skip('need to make this less brittle')\nclass TestSerialization(unittest.TestCase):\n <mask token>\n <mask token>\n <mask token>\n\n def test_default_serializer(self):\n from yalp.pipeline import tasks\n tasks.process_message.apply_async(args=[self.event], queue=settings\n .parser_queue, serializer=settings.celery_serializer)\n while True:\n try:\n message = self.channel.basic_get(queue='outputs')\n break\n except ChannelError:\n time.sleep(0.1)\n self.assertIsNotNone(message)\n event = pickle.loads(message.body)['message']\n self.assertEqual('test message', event['message'])\n self.assertEqual(self.now, event['date_time'])\n",
"step-2": "<mask token>\n\n\n@unittest.skip('need to make this less brittle')\nclass TestSerialization(unittest.TestCase):\n <mask token>\n\n def setUp(self):\n settings.parsers = [{'passthrough': {}}]\n try:\n import socket\n import amqp\n self.connection = amqp.Connection()\n self.channel = self.connection.channel()\n except socket.error:\n from nose.plugins.skip import SkipTest\n raise SkipTest('Unable to connect to rabbitmq')\n self.now = datetime.now()\n self.event = {'host': 'test_host', 'message': 'test message',\n 'date_time': self.now}\n with open('/tmp/test_serial.yml', 'w') as config_file:\n config = {'parsers': [{'passthrough': {}}], 'parser_workers': 1}\n yaml.dump(config, config_file)\n self.parser_process = subprocess.Popen(\n 'scripts/yalp-parsers -c /tmp/test_serial.yml', shell=True,\n stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n\n def tearDown(self):\n self.channel.queue_delete(queue=settings.parser_queue)\n self.channel.queue_delete(queue='outputs')\n self.channel.close()\n self.connection.close()\n self.parser_process.kill()\n\n def test_default_serializer(self):\n from yalp.pipeline import tasks\n tasks.process_message.apply_async(args=[self.event], queue=settings\n .parser_queue, serializer=settings.celery_serializer)\n while True:\n try:\n message = self.channel.basic_get(queue='outputs')\n break\n except ChannelError:\n time.sleep(0.1)\n self.assertIsNotNone(message)\n event = pickle.loads(message.body)['message']\n self.assertEqual('test message', event['message'])\n self.assertEqual(self.now, event['date_time'])\n",
"step-3": "<mask token>\n\n\n@unittest.skip('need to make this less brittle')\nclass TestSerialization(unittest.TestCase):\n \"\"\"\n Test that serialization via celery does not break\n \"\"\"\n\n def setUp(self):\n settings.parsers = [{'passthrough': {}}]\n try:\n import socket\n import amqp\n self.connection = amqp.Connection()\n self.channel = self.connection.channel()\n except socket.error:\n from nose.plugins.skip import SkipTest\n raise SkipTest('Unable to connect to rabbitmq')\n self.now = datetime.now()\n self.event = {'host': 'test_host', 'message': 'test message',\n 'date_time': self.now}\n with open('/tmp/test_serial.yml', 'w') as config_file:\n config = {'parsers': [{'passthrough': {}}], 'parser_workers': 1}\n yaml.dump(config, config_file)\n self.parser_process = subprocess.Popen(\n 'scripts/yalp-parsers -c /tmp/test_serial.yml', shell=True,\n stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n\n def tearDown(self):\n self.channel.queue_delete(queue=settings.parser_queue)\n self.channel.queue_delete(queue='outputs')\n self.channel.close()\n self.connection.close()\n self.parser_process.kill()\n\n def test_default_serializer(self):\n from yalp.pipeline import tasks\n tasks.process_message.apply_async(args=[self.event], queue=settings\n .parser_queue, serializer=settings.celery_serializer)\n while True:\n try:\n message = self.channel.basic_get(queue='outputs')\n break\n except ChannelError:\n time.sleep(0.1)\n self.assertIsNotNone(message)\n event = pickle.loads(message.body)['message']\n self.assertEqual('test message', event['message'])\n self.assertEqual(self.now, event['date_time'])\n",
"step-4": "<mask token>\nimport unittest\nimport yaml\nimport subprocess\nimport time\nimport pickle\nfrom datetime import datetime\nfrom amqp.exceptions import ChannelError\nfrom yalp.config import settings\n\n\n@unittest.skip('need to make this less brittle')\nclass TestSerialization(unittest.TestCase):\n \"\"\"\n Test that serialization via celery does not break\n \"\"\"\n\n def setUp(self):\n settings.parsers = [{'passthrough': {}}]\n try:\n import socket\n import amqp\n self.connection = amqp.Connection()\n self.channel = self.connection.channel()\n except socket.error:\n from nose.plugins.skip import SkipTest\n raise SkipTest('Unable to connect to rabbitmq')\n self.now = datetime.now()\n self.event = {'host': 'test_host', 'message': 'test message',\n 'date_time': self.now}\n with open('/tmp/test_serial.yml', 'w') as config_file:\n config = {'parsers': [{'passthrough': {}}], 'parser_workers': 1}\n yaml.dump(config, config_file)\n self.parser_process = subprocess.Popen(\n 'scripts/yalp-parsers -c /tmp/test_serial.yml', shell=True,\n stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n\n def tearDown(self):\n self.channel.queue_delete(queue=settings.parser_queue)\n self.channel.queue_delete(queue='outputs')\n self.channel.close()\n self.connection.close()\n self.parser_process.kill()\n\n def test_default_serializer(self):\n from yalp.pipeline import tasks\n tasks.process_message.apply_async(args=[self.event], queue=settings\n .parser_queue, serializer=settings.celery_serializer)\n while True:\n try:\n message = self.channel.basic_get(queue='outputs')\n break\n except ChannelError:\n time.sleep(0.1)\n self.assertIsNotNone(message)\n event = pickle.loads(message.body)['message']\n self.assertEqual('test message', event['message'])\n self.assertEqual(self.now, event['date_time'])\n",
"step-5": "# vim: set et ts=4 sw=4 fileencoding=utf-8:\n'''\ntests.integration.test_pipeline\n===============================\n'''\nimport unittest\n\nimport yaml\nimport subprocess\nimport time\nimport pickle\nfrom datetime import datetime\n\nfrom amqp.exceptions import ChannelError\n\nfrom yalp.config import settings\n\n\n@unittest.skip('need to make this less brittle')\nclass TestSerialization(unittest.TestCase):\n '''\n Test that serialization via celery does not break\n '''\n def setUp(self):\n settings.parsers = [{\n 'passthrough': {}\n }]\n try:\n import socket\n import amqp\n self.connection = amqp.Connection()\n self.channel = self.connection.channel()\n except socket.error:\n from nose.plugins.skip import SkipTest\n raise SkipTest('Unable to connect to rabbitmq')\n self.now = datetime.now()\n self.event = {\n 'host': 'test_host',\n 'message': 'test message',\n 'date_time': self.now,\n }\n with open('/tmp/test_serial.yml', 'w') as config_file:\n config = {\n 'parsers': [{\n 'passthrough': {}\n }],\n 'parser_workers': 1\n }\n yaml.dump(config, config_file)\n\n self.parser_process = subprocess.Popen(\n 'scripts/yalp-parsers -c /tmp/test_serial.yml',\n shell=True,\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE,\n )\n\n def tearDown(self):\n self.channel.queue_delete(queue=settings.parser_queue)\n self.channel.queue_delete(queue='outputs')\n self.channel.close()\n self.connection.close()\n self.parser_process.kill()\n\n def test_default_serializer(self):\n from yalp.pipeline import tasks\n tasks.process_message.apply_async(\n args=[self.event],\n queue=settings.parser_queue,\n serializer=settings.celery_serializer,\n )\n while True:\n try:\n message = self.channel.basic_get(queue='outputs')\n break\n except ChannelError:\n time.sleep(0.1)\n self.assertIsNotNone(message)\n event = pickle.loads(message.body)['message']\n self.assertEqual('test message', event['message'])\n self.assertEqual(self.now, event['date_time'])\n\n",
"step-ids": [
2,
4,
5,
6,
7
]
}
|
[
2,
4,
5,
6,
7
] |
import os
import numpy as np
import warnings
import soundfile as sf
def load_path():
path = os.path.join(os.path.dirname(__file__))
if path == "":
path = "."
return path
def create_folder(directory):
try:
if not os.path.exists(directory):
os.makedirs(directory)
except OSError:
print ('Issue: Creating directory. ' + directory)
def read_dir_list(dirname, extention=""):
try:
return_list = []
filenames = os.listdir(dirname)
for filename in filenames:
full_filename = os.path.join(dirname, filename)
if os.path.isdir(full_filename):
return_list.extend(read_dir_list(full_filename, extention))
else:
ext = os.path.splitext(full_filename)[-1][1:]
if extention == "" or ext == extention:
return_list.append(full_filename)
return_list.sort()
return return_list
except PermissionError:
pass
def wav_to_float(x):
try:
max_value = np.iinfo(x.dtype).max
min_value = np.iinfo(x.dtype).min
except:
max_value = np.finfo(x.dtype).max
min_value = np.finfo(x.dtype).min
x = x.astype('float64', casting='safe')
x -= min_value
x /= ((max_value - min_value) / 2.)
x -= 1.
return x
def read_wav(filename):
# Reads in a wav audio file, takes the first channel, converts the signal to float64 representation
audio_signal, sample_rate = sf.read(filename)
if audio_signal.ndim > 1:
audio_signal = audio_signal[:, 0]
if audio_signal.dtype != 'float64':
audio_signal = wav_to_float(audio_signal)
return audio_signal, sample_rate
def write_wav(x, filename, sample_rate):
if type(x) != np.ndarray:
x = np.array(x)
with warnings.catch_warnings():
warnings.simplefilter("error")
sf.write(filename, x, sample_rate)
|
normal
|
{
"blob_id": "cab233976653b8135276ff849955f32766833354",
"index": 7555,
"step-1": "<mask token>\n\n\ndef load_path():\n path = os.path.join(os.path.dirname(__file__))\n if path == '':\n path = '.'\n return path\n\n\ndef create_folder(directory):\n try:\n if not os.path.exists(directory):\n os.makedirs(directory)\n except OSError:\n print('Issue: Creating directory. ' + directory)\n\n\n<mask token>\n\n\ndef read_wav(filename):\n audio_signal, sample_rate = sf.read(filename)\n if audio_signal.ndim > 1:\n audio_signal = audio_signal[:, 0]\n if audio_signal.dtype != 'float64':\n audio_signal = wav_to_float(audio_signal)\n return audio_signal, sample_rate\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef load_path():\n path = os.path.join(os.path.dirname(__file__))\n if path == '':\n path = '.'\n return path\n\n\ndef create_folder(directory):\n try:\n if not os.path.exists(directory):\n os.makedirs(directory)\n except OSError:\n print('Issue: Creating directory. ' + directory)\n\n\ndef read_dir_list(dirname, extention=''):\n try:\n return_list = []\n filenames = os.listdir(dirname)\n for filename in filenames:\n full_filename = os.path.join(dirname, filename)\n if os.path.isdir(full_filename):\n return_list.extend(read_dir_list(full_filename, extention))\n else:\n ext = os.path.splitext(full_filename)[-1][1:]\n if extention == '' or ext == extention:\n return_list.append(full_filename)\n return_list.sort()\n return return_list\n except PermissionError:\n pass\n\n\n<mask token>\n\n\ndef read_wav(filename):\n audio_signal, sample_rate = sf.read(filename)\n if audio_signal.ndim > 1:\n audio_signal = audio_signal[:, 0]\n if audio_signal.dtype != 'float64':\n audio_signal = wav_to_float(audio_signal)\n return audio_signal, sample_rate\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef load_path():\n path = os.path.join(os.path.dirname(__file__))\n if path == '':\n path = '.'\n return path\n\n\ndef create_folder(directory):\n try:\n if not os.path.exists(directory):\n os.makedirs(directory)\n except OSError:\n print('Issue: Creating directory. ' + directory)\n\n\ndef read_dir_list(dirname, extention=''):\n try:\n return_list = []\n filenames = os.listdir(dirname)\n for filename in filenames:\n full_filename = os.path.join(dirname, filename)\n if os.path.isdir(full_filename):\n return_list.extend(read_dir_list(full_filename, extention))\n else:\n ext = os.path.splitext(full_filename)[-1][1:]\n if extention == '' or ext == extention:\n return_list.append(full_filename)\n return_list.sort()\n return return_list\n except PermissionError:\n pass\n\n\ndef wav_to_float(x):\n try:\n max_value = np.iinfo(x.dtype).max\n min_value = np.iinfo(x.dtype).min\n except:\n max_value = np.finfo(x.dtype).max\n min_value = np.finfo(x.dtype).min\n x = x.astype('float64', casting='safe')\n x -= min_value\n x /= (max_value - min_value) / 2.0\n x -= 1.0\n return x\n\n\ndef read_wav(filename):\n audio_signal, sample_rate = sf.read(filename)\n if audio_signal.ndim > 1:\n audio_signal = audio_signal[:, 0]\n if audio_signal.dtype != 'float64':\n audio_signal = wav_to_float(audio_signal)\n return audio_signal, sample_rate\n\n\ndef write_wav(x, filename, sample_rate):\n if type(x) != np.ndarray:\n x = np.array(x)\n with warnings.catch_warnings():\n warnings.simplefilter('error')\n sf.write(filename, x, sample_rate)\n",
"step-4": "import os\nimport numpy as np\nimport warnings\nimport soundfile as sf\n\n\ndef load_path():\n path = os.path.join(os.path.dirname(__file__))\n if path == '':\n path = '.'\n return path\n\n\ndef create_folder(directory):\n try:\n if not os.path.exists(directory):\n os.makedirs(directory)\n except OSError:\n print('Issue: Creating directory. ' + directory)\n\n\ndef read_dir_list(dirname, extention=''):\n try:\n return_list = []\n filenames = os.listdir(dirname)\n for filename in filenames:\n full_filename = os.path.join(dirname, filename)\n if os.path.isdir(full_filename):\n return_list.extend(read_dir_list(full_filename, extention))\n else:\n ext = os.path.splitext(full_filename)[-1][1:]\n if extention == '' or ext == extention:\n return_list.append(full_filename)\n return_list.sort()\n return return_list\n except PermissionError:\n pass\n\n\ndef wav_to_float(x):\n try:\n max_value = np.iinfo(x.dtype).max\n min_value = np.iinfo(x.dtype).min\n except:\n max_value = np.finfo(x.dtype).max\n min_value = np.finfo(x.dtype).min\n x = x.astype('float64', casting='safe')\n x -= min_value\n x /= (max_value - min_value) / 2.0\n x -= 1.0\n return x\n\n\ndef read_wav(filename):\n audio_signal, sample_rate = sf.read(filename)\n if audio_signal.ndim > 1:\n audio_signal = audio_signal[:, 0]\n if audio_signal.dtype != 'float64':\n audio_signal = wav_to_float(audio_signal)\n return audio_signal, sample_rate\n\n\ndef write_wav(x, filename, sample_rate):\n if type(x) != np.ndarray:\n x = np.array(x)\n with warnings.catch_warnings():\n warnings.simplefilter('error')\n sf.write(filename, x, sample_rate)\n",
"step-5": "import os\nimport numpy as np\nimport warnings\nimport soundfile as sf\n\n\ndef load_path():\n path = os.path.join(os.path.dirname(__file__))\n if path == \"\":\n path = \".\"\n return path\n\n\ndef create_folder(directory):\n try:\n if not os.path.exists(directory):\n os.makedirs(directory)\n except OSError:\n print ('Issue: Creating directory. ' + directory)\n\ndef read_dir_list(dirname, extention=\"\"):\n try:\n return_list = []\n filenames = os.listdir(dirname)\n for filename in filenames:\n full_filename = os.path.join(dirname, filename)\n if os.path.isdir(full_filename):\n return_list.extend(read_dir_list(full_filename, extention))\n else:\n ext = os.path.splitext(full_filename)[-1][1:]\n if extention == \"\" or ext == extention:\n return_list.append(full_filename)\n return_list.sort()\n return return_list\n except PermissionError:\n pass\n\ndef wav_to_float(x):\n try:\n max_value = np.iinfo(x.dtype).max\n min_value = np.iinfo(x.dtype).min\n except:\n max_value = np.finfo(x.dtype).max\n min_value = np.finfo(x.dtype).min\n x = x.astype('float64', casting='safe')\n x -= min_value\n x /= ((max_value - min_value) / 2.)\n x -= 1.\n return x\n\ndef read_wav(filename):\n # Reads in a wav audio file, takes the first channel, converts the signal to float64 representation\n audio_signal, sample_rate = sf.read(filename)\n\n if audio_signal.ndim > 1:\n audio_signal = audio_signal[:, 0]\n\n if audio_signal.dtype != 'float64':\n audio_signal = wav_to_float(audio_signal)\n\n return audio_signal, sample_rate\n\ndef write_wav(x, filename, sample_rate):\n if type(x) != np.ndarray:\n x = np.array(x)\n\n with warnings.catch_warnings():\n warnings.simplefilter(\"error\")\n sf.write(filename, x, sample_rate)\n",
"step-ids": [
3,
4,
6,
7,
8
]
}
|
[
3,
4,
6,
7,
8
] |
<|reserved_special_token_0|>
class Get_res_DataFrame:
<|reserved_special_token_0|>
def __init__(self, lr, df, df_bin, df_woe, use_lst, woe_dic, type_train
='type_train', y='is_7_p'):
self.df = df
self.df_bin = df_bin
self.df_woe = df_woe
self.use_lst = use_lst
self.woe_dic = woe_dic
self.type_train = type_train
self.model = lr
self.y = y
def main(self):
print('d2_1 = self.get_2_1_imp()', 'd2_2 = self.get_2_2_des()',
'd2_3 = self.get_2_3_corr()',
"d3 = self.get_bin_ins_oot(type_lst=['ins', 'oot', 'oot2'])")
def get_2_1_imp(self, df):
d1 = DataFrame(index=self.use_lst)
cover_dic = dict(df[use_lst].notnull().sum())
d1['auc'] = [round(0.5 + abs(0.5 - roc_auc_score(df[self.y], df[i])
), 3) for i in self.use_lst]
d1['ks'] = [round(float(self.ks_calc_cross(df, name, self.y)[0][
'gap']), 3) for name in self.use_lst]
d1['ins_iv'] = [round(self.cal_iv(df[df[self.type_train] == 'ins'],
name, self.y), 3) for name in self.use_lst]
d1['oot_iv'] = [round(self.cal_iv(df[df[self.type_train] == 'oot'],
name, self.y), 3) for name in self.use_lst]
d1['coef'] = [round(i, 4) for i in self.model.coef_[0]]
d1 = d1.reset_index()
d1['psi'] = [round(self.cal_psi(df, name), 5) for name in self.use_lst]
d1['vif'] = [round(variance_inflation_factor(np.matrix(df[self.
use_lst]), i), 3) for i in range(len(self.use_lst))]
d1.index = range(1, d1.shape[0] + 1)
return d1
def get_2_2_des(self):
df = self.df[self.df[self.type_train].isin(['ins', 'oot'])]
df_data_des = df[self.use_lst].describe().T
cover_dic = dict(df[use_lst].notnull().sum())
df_data_des = df_data_des.reset_index()
df_data_des['cover'] = df_data_des['index'].apply(lambda x: round(
cover_dic[x] / df.shape[0], 4))
df_data_des.index = df_data_des['index']
df_data_des.drop(columns=['index', 'count'], inplace=True)
d2_2 = df_data_des.reset_index()
d2_2.index = range(1, d2_2.shape[0] + 1)
return d2_2
def get_2_3_corr(self):
corr = np.corrcoef(np.array(self.df_woe[self.use_lst]).T)
d2_3 = DataFrame(corr, columns=range(len(self.use_lst)), index=self
.use_lst).reset_index()
d2_3.index = range(1, d2_3.shape[0] + 1)
return d2_3
def get_bin_ins_oot(self, type_lst=['ins', 'oot', 'oot2']):
res = []
for loc, i in enumerate(type_lst):
lst = []
df_tmp = self.df_bin[self.df_bin[self.type_train] == i]
for name in self.use_lst:
dd_tmp = df_tmp.groupby(name).sum()[[self.y, 'count']]
dd_tmp['bad_rate'] = dd_tmp[self.y] / dd_tmp['count']
dd_tmp = dd_tmp.reset_index()
dd_tmp['woe'] = dd_tmp[name].apply(lambda x: self.woe_dic[
name][x])
dd_tmp.sort_values(by='bad_rate', inplace=True)
dd_tmp['sort_key'] = [(float(i.split(',')[0][1:]) if i[0] ==
'(' else float('inf')) for i in dd_tmp[name]]
dd_tmp.sort_values(by='sort_key', inplace=True)
dd_tmp.drop(columns=['sort_key'], inplace=True)
name1 = '-'
d = DataFrame(columns=['slice', 'bad', 'count', 'bad_rio',
'woe'], data=[[str(name1), '-', '-', '-', '-']] +
dd_tmp.values.tolist()[:], index=[[name]] + ['-'] *
dd_tmp.shape[0])
if loc < 1:
split_name = '<-->' + str(i)
else:
split_name = str(type_lst[loc - 1]) + '<-->' + str(i)
d[split_name] = [split_name for i in range(d.shape[0])]
d = d[[split_name, 'slice', 'bad', 'count', 'bad_rio', 'woe']]
lst.append(d)
res.append(lst)
return pd.concat((pd.concat(i for i in res[i]) for i in range(len(
type_lst))), axis=1)
def get_categories_df(self, df, cate='type_new', base_cut='ins', y=
'final_score'):
df_tmp = copy.deepcopy(df[[cate, self.y, y]])
df_tmp.rename(columns={cate: 'category', self.y: 'bad'}, inplace=True)
cut_line = list(np.percentile(list(df_tmp[df_tmp['category'] ==
base_cut][y]), range(1, 101, 10)))
cut_line[0] = -float('inf')
cut_line.append(float('inf'))
df_tmp['bins'] = pd.cut(df_tmp[y], bins=cut_line)
df_tmp['count'] = [(1) for i in range(df_tmp.shape[0])]
ks_lst = []
for i in sorted(Counter(df_tmp['category']).keys()):
lst = list(ks_calc_cross(df_tmp[df_tmp['category'] == i],
'bins', 'bad')[1]['gap'])
while len(lst) < 10:
lst = [0] + lst
ks_lst.extend(lst)
df = df_tmp.groupby(['category', 'bins']).sum()[['bad', 'count']]
df = df.reset_index()
df['bad_rate'] = df['bad'] / df['count']
df['ks'] = ks_lst
for i in ['bad', 'count', 'bad_rate', 'ks']:
df[i] = df[i].astype(float)
df[['bad', 'count', 'bad_rate', 'ks']] = df[['bad', 'count',
'bad_rate', 'ks']].fillna(0)
df.index = range(1, df.shape[0] + 1)
return df
def ks_calc_cross(self, data, pred, y_label):
"""
功能: 计算KS值,输出对应分割点和累计分布函数曲线图
输入值:
data: 二维数组或dataframe,包括模型得分和真实的标签
pred: 一维数组或series,代表模型得分(一般为预测正类的概率)
y_label: 一维数组或series,代表真实的标签({0,1}或{-1,1})
输出值:
'ks': KS值,'crossdens': 好坏客户累积概率分布以及其差值gap
"""
crossfreq = pd.crosstab(data[pred], data[y_label])
crossdens = crossfreq.cumsum(axis=0) / crossfreq.sum()
crossdens['gap'] = abs(crossdens[0] - crossdens[1])
ks = crossdens[crossdens['gap'] == crossdens['gap'].max()]
return ks, crossdens
def cal_iv(self, df1, x, y='is_7_p'):
df = copy.deepcopy(df1)
if 'count' not in df.columns:
df['count'] = [(1) for i in range(df.shape[0])]
df_tmp = df[[x, 'count', y]].groupby(x).sum()
df_tmp['good'] = df_tmp['count'] - df_tmp[y]
df_tmp[y] = df_tmp[y].apply(lambda x: max(x, 1e-05) / sum(df_tmp[y]))
df_tmp['good'] = df_tmp['good'].apply(lambda x: max(x, 1e-05) / sum
(df_tmp['good']))
df_tmp['woe'] = np.log(df_tmp[y] / df_tmp['good'])
df_tmp['iv'] = (df_tmp[y] - df_tmp['good']) * df_tmp['woe']
return df_tmp['iv'].sum()
def cal_psi(self, df_sf_bin, name, lst=['ins', 'oot']):
name1, name2 = lst
df_in = copy.deepcopy(df_sf_bin[df_sf_bin['type_train'] == name1])
sum_1 = df_in.shape[0]
df_in['count1'] = [(1) for i in range(sum_1)]
df_in = df_in.groupby(name).sum()[['count1']]
df_out = copy.deepcopy(df_sf_bin[df_sf_bin['type_train'] == name2])
sum_2 = df_out.shape[0]
df_out['count2'] = [(1) for i in range(sum_2)]
df_out = df_out.groupby(name).sum()[['count2']]
df_psi = pd.concat((df_in, df_out), axis=1)
df_psi['count1'] = df_psi['count1'].apply(lambda x: x / sum_1)
df_psi['count2'] = df_psi['count2'].apply(lambda x: x / sum_2)
df_psi[['count1', 'count2']].replace(0, 0.001, inplace=True)
df_psi['psi_tmp'] = df_psi['count1'] / df_psi['count2']
df_psi['psi_tmp'] = df_psi['psi_tmp'].apply(lambda x: math.log(x))
df_psi['psi'] = (df_psi['count1'] - df_psi['count2']) * df_psi[
'psi_tmp']
return sum(df_psi['psi'])
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Get_res_DataFrame:
"""
sheet1:数据概况
sheet2:变量的大小,效果,相关性 ok
sheet3:分箱结果及woe ok
sheet4:按单一类别分 输入 df[['类别', 'final_score']] cut_line依据 输出 并计算ks
通过输入不同的df来返回不同的df分析
ins,oot,oot2 第一个函数
新老客区分 第一个函数 输入df_new, df_old, type_train
月份区分 第一个函数 输入df_new , df_old, month
"""
def __init__(self, lr, df, df_bin, df_woe, use_lst, woe_dic, type_train
='type_train', y='is_7_p'):
self.df = df
self.df_bin = df_bin
self.df_woe = df_woe
self.use_lst = use_lst
self.woe_dic = woe_dic
self.type_train = type_train
self.model = lr
self.y = y
def main(self):
print('d2_1 = self.get_2_1_imp()', 'd2_2 = self.get_2_2_des()',
'd2_3 = self.get_2_3_corr()',
"d3 = self.get_bin_ins_oot(type_lst=['ins', 'oot', 'oot2'])")
def get_2_1_imp(self, df):
d1 = DataFrame(index=self.use_lst)
cover_dic = dict(df[use_lst].notnull().sum())
d1['auc'] = [round(0.5 + abs(0.5 - roc_auc_score(df[self.y], df[i])
), 3) for i in self.use_lst]
d1['ks'] = [round(float(self.ks_calc_cross(df, name, self.y)[0][
'gap']), 3) for name in self.use_lst]
d1['ins_iv'] = [round(self.cal_iv(df[df[self.type_train] == 'ins'],
name, self.y), 3) for name in self.use_lst]
d1['oot_iv'] = [round(self.cal_iv(df[df[self.type_train] == 'oot'],
name, self.y), 3) for name in self.use_lst]
d1['coef'] = [round(i, 4) for i in self.model.coef_[0]]
d1 = d1.reset_index()
d1['psi'] = [round(self.cal_psi(df, name), 5) for name in self.use_lst]
d1['vif'] = [round(variance_inflation_factor(np.matrix(df[self.
use_lst]), i), 3) for i in range(len(self.use_lst))]
d1.index = range(1, d1.shape[0] + 1)
return d1
def get_2_2_des(self):
df = self.df[self.df[self.type_train].isin(['ins', 'oot'])]
df_data_des = df[self.use_lst].describe().T
cover_dic = dict(df[use_lst].notnull().sum())
df_data_des = df_data_des.reset_index()
df_data_des['cover'] = df_data_des['index'].apply(lambda x: round(
cover_dic[x] / df.shape[0], 4))
df_data_des.index = df_data_des['index']
df_data_des.drop(columns=['index', 'count'], inplace=True)
d2_2 = df_data_des.reset_index()
d2_2.index = range(1, d2_2.shape[0] + 1)
return d2_2
def get_2_3_corr(self):
corr = np.corrcoef(np.array(self.df_woe[self.use_lst]).T)
d2_3 = DataFrame(corr, columns=range(len(self.use_lst)), index=self
.use_lst).reset_index()
d2_3.index = range(1, d2_3.shape[0] + 1)
return d2_3
def get_bin_ins_oot(self, type_lst=['ins', 'oot', 'oot2']):
res = []
for loc, i in enumerate(type_lst):
lst = []
df_tmp = self.df_bin[self.df_bin[self.type_train] == i]
for name in self.use_lst:
dd_tmp = df_tmp.groupby(name).sum()[[self.y, 'count']]
dd_tmp['bad_rate'] = dd_tmp[self.y] / dd_tmp['count']
dd_tmp = dd_tmp.reset_index()
dd_tmp['woe'] = dd_tmp[name].apply(lambda x: self.woe_dic[
name][x])
dd_tmp.sort_values(by='bad_rate', inplace=True)
dd_tmp['sort_key'] = [(float(i.split(',')[0][1:]) if i[0] ==
'(' else float('inf')) for i in dd_tmp[name]]
dd_tmp.sort_values(by='sort_key', inplace=True)
dd_tmp.drop(columns=['sort_key'], inplace=True)
name1 = '-'
d = DataFrame(columns=['slice', 'bad', 'count', 'bad_rio',
'woe'], data=[[str(name1), '-', '-', '-', '-']] +
dd_tmp.values.tolist()[:], index=[[name]] + ['-'] *
dd_tmp.shape[0])
if loc < 1:
split_name = '<-->' + str(i)
else:
split_name = str(type_lst[loc - 1]) + '<-->' + str(i)
d[split_name] = [split_name for i in range(d.shape[0])]
d = d[[split_name, 'slice', 'bad', 'count', 'bad_rio', 'woe']]
lst.append(d)
res.append(lst)
return pd.concat((pd.concat(i for i in res[i]) for i in range(len(
type_lst))), axis=1)
def get_categories_df(self, df, cate='type_new', base_cut='ins', y=
'final_score'):
df_tmp = copy.deepcopy(df[[cate, self.y, y]])
df_tmp.rename(columns={cate: 'category', self.y: 'bad'}, inplace=True)
cut_line = list(np.percentile(list(df_tmp[df_tmp['category'] ==
base_cut][y]), range(1, 101, 10)))
cut_line[0] = -float('inf')
cut_line.append(float('inf'))
df_tmp['bins'] = pd.cut(df_tmp[y], bins=cut_line)
df_tmp['count'] = [(1) for i in range(df_tmp.shape[0])]
ks_lst = []
for i in sorted(Counter(df_tmp['category']).keys()):
lst = list(ks_calc_cross(df_tmp[df_tmp['category'] == i],
'bins', 'bad')[1]['gap'])
while len(lst) < 10:
lst = [0] + lst
ks_lst.extend(lst)
df = df_tmp.groupby(['category', 'bins']).sum()[['bad', 'count']]
df = df.reset_index()
df['bad_rate'] = df['bad'] / df['count']
df['ks'] = ks_lst
for i in ['bad', 'count', 'bad_rate', 'ks']:
df[i] = df[i].astype(float)
df[['bad', 'count', 'bad_rate', 'ks']] = df[['bad', 'count',
'bad_rate', 'ks']].fillna(0)
df.index = range(1, df.shape[0] + 1)
return df
def ks_calc_cross(self, data, pred, y_label):
"""
功能: 计算KS值,输出对应分割点和累计分布函数曲线图
输入值:
data: 二维数组或dataframe,包括模型得分和真实的标签
pred: 一维数组或series,代表模型得分(一般为预测正类的概率)
y_label: 一维数组或series,代表真实的标签({0,1}或{-1,1})
输出值:
'ks': KS值,'crossdens': 好坏客户累积概率分布以及其差值gap
"""
crossfreq = pd.crosstab(data[pred], data[y_label])
crossdens = crossfreq.cumsum(axis=0) / crossfreq.sum()
crossdens['gap'] = abs(crossdens[0] - crossdens[1])
ks = crossdens[crossdens['gap'] == crossdens['gap'].max()]
return ks, crossdens
def cal_iv(self, df1, x, y='is_7_p'):
df = copy.deepcopy(df1)
if 'count' not in df.columns:
df['count'] = [(1) for i in range(df.shape[0])]
df_tmp = df[[x, 'count', y]].groupby(x).sum()
df_tmp['good'] = df_tmp['count'] - df_tmp[y]
df_tmp[y] = df_tmp[y].apply(lambda x: max(x, 1e-05) / sum(df_tmp[y]))
df_tmp['good'] = df_tmp['good'].apply(lambda x: max(x, 1e-05) / sum
(df_tmp['good']))
df_tmp['woe'] = np.log(df_tmp[y] / df_tmp['good'])
df_tmp['iv'] = (df_tmp[y] - df_tmp['good']) * df_tmp['woe']
return df_tmp['iv'].sum()
def cal_psi(self, df_sf_bin, name, lst=['ins', 'oot']):
name1, name2 = lst
df_in = copy.deepcopy(df_sf_bin[df_sf_bin['type_train'] == name1])
sum_1 = df_in.shape[0]
df_in['count1'] = [(1) for i in range(sum_1)]
df_in = df_in.groupby(name).sum()[['count1']]
df_out = copy.deepcopy(df_sf_bin[df_sf_bin['type_train'] == name2])
sum_2 = df_out.shape[0]
df_out['count2'] = [(1) for i in range(sum_2)]
df_out = df_out.groupby(name).sum()[['count2']]
df_psi = pd.concat((df_in, df_out), axis=1)
df_psi['count1'] = df_psi['count1'].apply(lambda x: x / sum_1)
df_psi['count2'] = df_psi['count2'].apply(lambda x: x / sum_2)
df_psi[['count1', 'count2']].replace(0, 0.001, inplace=True)
df_psi['psi_tmp'] = df_psi['count1'] / df_psi['count2']
df_psi['psi_tmp'] = df_psi['psi_tmp'].apply(lambda x: math.log(x))
df_psi['psi'] = (df_psi['count1'] - df_psi['count2']) * df_psi[
'psi_tmp']
return sum(df_psi['psi'])
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Get_res_DataFrame:
"""
sheet1:数据概况
sheet2:变量的大小,效果,相关性 ok
sheet3:分箱结果及woe ok
sheet4:按单一类别分 输入 df[['类别', 'final_score']] cut_line依据 输出 并计算ks
通过输入不同的df来返回不同的df分析
ins,oot,oot2 第一个函数
新老客区分 第一个函数 输入df_new, df_old, type_train
月份区分 第一个函数 输入df_new , df_old, month
"""
def __init__(self, lr, df, df_bin, df_woe, use_lst, woe_dic, type_train
='type_train', y='is_7_p'):
self.df = df
self.df_bin = df_bin
self.df_woe = df_woe
self.use_lst = use_lst
self.woe_dic = woe_dic
self.type_train = type_train
self.model = lr
self.y = y
def main(self):
print('d2_1 = self.get_2_1_imp()', 'd2_2 = self.get_2_2_des()',
'd2_3 = self.get_2_3_corr()',
"d3 = self.get_bin_ins_oot(type_lst=['ins', 'oot', 'oot2'])")
def get_2_1_imp(self, df):
d1 = DataFrame(index=self.use_lst)
cover_dic = dict(df[use_lst].notnull().sum())
d1['auc'] = [round(0.5 + abs(0.5 - roc_auc_score(df[self.y], df[i])
), 3) for i in self.use_lst]
d1['ks'] = [round(float(self.ks_calc_cross(df, name, self.y)[0][
'gap']), 3) for name in self.use_lst]
d1['ins_iv'] = [round(self.cal_iv(df[df[self.type_train] == 'ins'],
name, self.y), 3) for name in self.use_lst]
d1['oot_iv'] = [round(self.cal_iv(df[df[self.type_train] == 'oot'],
name, self.y), 3) for name in self.use_lst]
d1['coef'] = [round(i, 4) for i in self.model.coef_[0]]
d1 = d1.reset_index()
d1['psi'] = [round(self.cal_psi(df, name), 5) for name in self.use_lst]
d1['vif'] = [round(variance_inflation_factor(np.matrix(df[self.
use_lst]), i), 3) for i in range(len(self.use_lst))]
d1.index = range(1, d1.shape[0] + 1)
return d1
def get_2_2_des(self):
df = self.df[self.df[self.type_train].isin(['ins', 'oot'])]
df_data_des = df[self.use_lst].describe().T
cover_dic = dict(df[use_lst].notnull().sum())
df_data_des = df_data_des.reset_index()
df_data_des['cover'] = df_data_des['index'].apply(lambda x: round(
cover_dic[x] / df.shape[0], 4))
df_data_des.index = df_data_des['index']
df_data_des.drop(columns=['index', 'count'], inplace=True)
d2_2 = df_data_des.reset_index()
d2_2.index = range(1, d2_2.shape[0] + 1)
return d2_2
def get_2_3_corr(self):
corr = np.corrcoef(np.array(self.df_woe[self.use_lst]).T)
d2_3 = DataFrame(corr, columns=range(len(self.use_lst)), index=self
.use_lst).reset_index()
d2_3.index = range(1, d2_3.shape[0] + 1)
return d2_3
def get_bin_ins_oot(self, type_lst=['ins', 'oot', 'oot2']):
res = []
for loc, i in enumerate(type_lst):
lst = []
df_tmp = self.df_bin[self.df_bin[self.type_train] == i]
for name in self.use_lst:
dd_tmp = df_tmp.groupby(name).sum()[[self.y, 'count']]
dd_tmp['bad_rate'] = dd_tmp[self.y] / dd_tmp['count']
dd_tmp = dd_tmp.reset_index()
dd_tmp['woe'] = dd_tmp[name].apply(lambda x: self.woe_dic[
name][x])
dd_tmp.sort_values(by='bad_rate', inplace=True)
dd_tmp['sort_key'] = [(float(i.split(',')[0][1:]) if i[0] ==
'(' else float('inf')) for i in dd_tmp[name]]
dd_tmp.sort_values(by='sort_key', inplace=True)
dd_tmp.drop(columns=['sort_key'], inplace=True)
name1 = '-'
d = DataFrame(columns=['slice', 'bad', 'count', 'bad_rio',
'woe'], data=[[str(name1), '-', '-', '-', '-']] +
dd_tmp.values.tolist()[:], index=[[name]] + ['-'] *
dd_tmp.shape[0])
if loc < 1:
split_name = '<-->' + str(i)
else:
split_name = str(type_lst[loc - 1]) + '<-->' + str(i)
d[split_name] = [split_name for i in range(d.shape[0])]
d = d[[split_name, 'slice', 'bad', 'count', 'bad_rio', 'woe']]
lst.append(d)
res.append(lst)
return pd.concat((pd.concat(i for i in res[i]) for i in range(len(
type_lst))), axis=1)
def get_categories_df(self, df, cate='type_new', base_cut='ins', y=
'final_score'):
df_tmp = copy.deepcopy(df[[cate, self.y, y]])
df_tmp.rename(columns={cate: 'category', self.y: 'bad'}, inplace=True)
cut_line = list(np.percentile(list(df_tmp[df_tmp['category'] ==
base_cut][y]), range(1, 101, 10)))
cut_line[0] = -float('inf')
cut_line.append(float('inf'))
df_tmp['bins'] = pd.cut(df_tmp[y], bins=cut_line)
df_tmp['count'] = [(1) for i in range(df_tmp.shape[0])]
ks_lst = []
for i in sorted(Counter(df_tmp['category']).keys()):
lst = list(ks_calc_cross(df_tmp[df_tmp['category'] == i],
'bins', 'bad')[1]['gap'])
while len(lst) < 10:
lst = [0] + lst
ks_lst.extend(lst)
df = df_tmp.groupby(['category', 'bins']).sum()[['bad', 'count']]
df = df.reset_index()
df['bad_rate'] = df['bad'] / df['count']
df['ks'] = ks_lst
for i in ['bad', 'count', 'bad_rate', 'ks']:
df[i] = df[i].astype(float)
df[['bad', 'count', 'bad_rate', 'ks']] = df[['bad', 'count',
'bad_rate', 'ks']].fillna(0)
df.index = range(1, df.shape[0] + 1)
return df
def ks_calc_cross(self, data, pred, y_label):
"""
功能: 计算KS值,输出对应分割点和累计分布函数曲线图
输入值:
data: 二维数组或dataframe,包括模型得分和真实的标签
pred: 一维数组或series,代表模型得分(一般为预测正类的概率)
y_label: 一维数组或series,代表真实的标签({0,1}或{-1,1})
输出值:
'ks': KS值,'crossdens': 好坏客户累积概率分布以及其差值gap
"""
crossfreq = pd.crosstab(data[pred], data[y_label])
crossdens = crossfreq.cumsum(axis=0) / crossfreq.sum()
crossdens['gap'] = abs(crossdens[0] - crossdens[1])
ks = crossdens[crossdens['gap'] == crossdens['gap'].max()]
return ks, crossdens
def cal_iv(self, df1, x, y='is_7_p'):
df = copy.deepcopy(df1)
if 'count' not in df.columns:
df['count'] = [(1) for i in range(df.shape[0])]
df_tmp = df[[x, 'count', y]].groupby(x).sum()
df_tmp['good'] = df_tmp['count'] - df_tmp[y]
df_tmp[y] = df_tmp[y].apply(lambda x: max(x, 1e-05) / sum(df_tmp[y]))
df_tmp['good'] = df_tmp['good'].apply(lambda x: max(x, 1e-05) / sum
(df_tmp['good']))
df_tmp['woe'] = np.log(df_tmp[y] / df_tmp['good'])
df_tmp['iv'] = (df_tmp[y] - df_tmp['good']) * df_tmp['woe']
return df_tmp['iv'].sum()
def cal_psi(self, df_sf_bin, name, lst=['ins', 'oot']):
name1, name2 = lst
df_in = copy.deepcopy(df_sf_bin[df_sf_bin['type_train'] == name1])
sum_1 = df_in.shape[0]
df_in['count1'] = [(1) for i in range(sum_1)]
df_in = df_in.groupby(name).sum()[['count1']]
df_out = copy.deepcopy(df_sf_bin[df_sf_bin['type_train'] == name2])
sum_2 = df_out.shape[0]
df_out['count2'] = [(1) for i in range(sum_2)]
df_out = df_out.groupby(name).sum()[['count2']]
df_psi = pd.concat((df_in, df_out), axis=1)
df_psi['count1'] = df_psi['count1'].apply(lambda x: x / sum_1)
df_psi['count2'] = df_psi['count2'].apply(lambda x: x / sum_2)
df_psi[['count1', 'count2']].replace(0, 0.001, inplace=True)
df_psi['psi_tmp'] = df_psi['count1'] / df_psi['count2']
df_psi['psi_tmp'] = df_psi['psi_tmp'].apply(lambda x: math.log(x))
df_psi['psi'] = (df_psi['count1'] - df_psi['count2']) * df_psi[
'psi_tmp']
return sum(df_psi['psi'])
if __name__ == '__main__':
s = """
c=Get_res_DataFrame(lr, a.df, a.df_bin, df_pb_woe, use_lst,a.woe_dic, type_train='type_train', y='is_7_p')
d2_1 = c.get_2_1_imp(df_pb_woe[df_pb_woe['customer_type_old']=='old_customer'])
d2_2 = c.get_2_2_des()
d2_3 = c.get_2_3_corr()
d3 = c.get_bin_ins_oot(type_lst=['ins', 'oot'])
d4 = c.get_categories_df(df_pb_all,cate='type_train',base_cut='ins', y='final_score')
#
df_new = df_pb_all[df_pb_all['customer_type_old']=='new_customer']
df_old = df_pb_all[df_pb_all['customer_type_old']=='old_customer']
#
d5_1 = c.get_categories_df(df_new,cate='type_train',base_cut='ins', y='final_score')
d5_2 = c.get_categories_df(df_old,cate='type_train',base_cut='ins', y='final_score')
d6_1 = c.get_categories_df(df_new,cate='month',base_cut='0', y='final_score')
d6_2 = c.get_categories_df(df_old,cate='month',base_cut='0', y='final_score')
"""
<|reserved_special_token_1|>
import copy
import pandas as pd
import numpy as np
from pandas import DataFrame
from collections import Counter
from sklearn.metrics import roc_auc_score, roc_curve
from statsmodels.stats.outliers_influence import variance_inflation_factor
class Get_res_DataFrame:
"""
sheet1:数据概况
sheet2:变量的大小,效果,相关性 ok
sheet3:分箱结果及woe ok
sheet4:按单一类别分 输入 df[['类别', 'final_score']] cut_line依据 输出 并计算ks
通过输入不同的df来返回不同的df分析
ins,oot,oot2 第一个函数
新老客区分 第一个函数 输入df_new, df_old, type_train
月份区分 第一个函数 输入df_new , df_old, month
"""
def __init__(self, lr, df, df_bin, df_woe, use_lst, woe_dic, type_train
='type_train', y='is_7_p'):
self.df = df
self.df_bin = df_bin
self.df_woe = df_woe
self.use_lst = use_lst
self.woe_dic = woe_dic
self.type_train = type_train
self.model = lr
self.y = y
def main(self):
print('d2_1 = self.get_2_1_imp()', 'd2_2 = self.get_2_2_des()',
'd2_3 = self.get_2_3_corr()',
"d3 = self.get_bin_ins_oot(type_lst=['ins', 'oot', 'oot2'])")
def get_2_1_imp(self, df):
d1 = DataFrame(index=self.use_lst)
cover_dic = dict(df[use_lst].notnull().sum())
d1['auc'] = [round(0.5 + abs(0.5 - roc_auc_score(df[self.y], df[i])
), 3) for i in self.use_lst]
d1['ks'] = [round(float(self.ks_calc_cross(df, name, self.y)[0][
'gap']), 3) for name in self.use_lst]
d1['ins_iv'] = [round(self.cal_iv(df[df[self.type_train] == 'ins'],
name, self.y), 3) for name in self.use_lst]
d1['oot_iv'] = [round(self.cal_iv(df[df[self.type_train] == 'oot'],
name, self.y), 3) for name in self.use_lst]
d1['coef'] = [round(i, 4) for i in self.model.coef_[0]]
d1 = d1.reset_index()
d1['psi'] = [round(self.cal_psi(df, name), 5) for name in self.use_lst]
d1['vif'] = [round(variance_inflation_factor(np.matrix(df[self.
use_lst]), i), 3) for i in range(len(self.use_lst))]
d1.index = range(1, d1.shape[0] + 1)
return d1
def get_2_2_des(self):
df = self.df[self.df[self.type_train].isin(['ins', 'oot'])]
df_data_des = df[self.use_lst].describe().T
cover_dic = dict(df[use_lst].notnull().sum())
df_data_des = df_data_des.reset_index()
df_data_des['cover'] = df_data_des['index'].apply(lambda x: round(
cover_dic[x] / df.shape[0], 4))
df_data_des.index = df_data_des['index']
df_data_des.drop(columns=['index', 'count'], inplace=True)
d2_2 = df_data_des.reset_index()
d2_2.index = range(1, d2_2.shape[0] + 1)
return d2_2
def get_2_3_corr(self):
corr = np.corrcoef(np.array(self.df_woe[self.use_lst]).T)
d2_3 = DataFrame(corr, columns=range(len(self.use_lst)), index=self
.use_lst).reset_index()
d2_3.index = range(1, d2_3.shape[0] + 1)
return d2_3
def get_bin_ins_oot(self, type_lst=['ins', 'oot', 'oot2']):
res = []
for loc, i in enumerate(type_lst):
lst = []
df_tmp = self.df_bin[self.df_bin[self.type_train] == i]
for name in self.use_lst:
dd_tmp = df_tmp.groupby(name).sum()[[self.y, 'count']]
dd_tmp['bad_rate'] = dd_tmp[self.y] / dd_tmp['count']
dd_tmp = dd_tmp.reset_index()
dd_tmp['woe'] = dd_tmp[name].apply(lambda x: self.woe_dic[
name][x])
dd_tmp.sort_values(by='bad_rate', inplace=True)
dd_tmp['sort_key'] = [(float(i.split(',')[0][1:]) if i[0] ==
'(' else float('inf')) for i in dd_tmp[name]]
dd_tmp.sort_values(by='sort_key', inplace=True)
dd_tmp.drop(columns=['sort_key'], inplace=True)
name1 = '-'
d = DataFrame(columns=['slice', 'bad', 'count', 'bad_rio',
'woe'], data=[[str(name1), '-', '-', '-', '-']] +
dd_tmp.values.tolist()[:], index=[[name]] + ['-'] *
dd_tmp.shape[0])
if loc < 1:
split_name = '<-->' + str(i)
else:
split_name = str(type_lst[loc - 1]) + '<-->' + str(i)
d[split_name] = [split_name for i in range(d.shape[0])]
d = d[[split_name, 'slice', 'bad', 'count', 'bad_rio', 'woe']]
lst.append(d)
res.append(lst)
return pd.concat((pd.concat(i for i in res[i]) for i in range(len(
type_lst))), axis=1)
def get_categories_df(self, df, cate='type_new', base_cut='ins', y=
'final_score'):
df_tmp = copy.deepcopy(df[[cate, self.y, y]])
df_tmp.rename(columns={cate: 'category', self.y: 'bad'}, inplace=True)
cut_line = list(np.percentile(list(df_tmp[df_tmp['category'] ==
base_cut][y]), range(1, 101, 10)))
cut_line[0] = -float('inf')
cut_line.append(float('inf'))
df_tmp['bins'] = pd.cut(df_tmp[y], bins=cut_line)
df_tmp['count'] = [(1) for i in range(df_tmp.shape[0])]
ks_lst = []
for i in sorted(Counter(df_tmp['category']).keys()):
lst = list(ks_calc_cross(df_tmp[df_tmp['category'] == i],
'bins', 'bad')[1]['gap'])
while len(lst) < 10:
lst = [0] + lst
ks_lst.extend(lst)
df = df_tmp.groupby(['category', 'bins']).sum()[['bad', 'count']]
df = df.reset_index()
df['bad_rate'] = df['bad'] / df['count']
df['ks'] = ks_lst
for i in ['bad', 'count', 'bad_rate', 'ks']:
df[i] = df[i].astype(float)
df[['bad', 'count', 'bad_rate', 'ks']] = df[['bad', 'count',
'bad_rate', 'ks']].fillna(0)
df.index = range(1, df.shape[0] + 1)
return df
def ks_calc_cross(self, data, pred, y_label):
"""
功能: 计算KS值,输出对应分割点和累计分布函数曲线图
输入值:
data: 二维数组或dataframe,包括模型得分和真实的标签
pred: 一维数组或series,代表模型得分(一般为预测正类的概率)
y_label: 一维数组或series,代表真实的标签({0,1}或{-1,1})
输出值:
'ks': KS值,'crossdens': 好坏客户累积概率分布以及其差值gap
"""
crossfreq = pd.crosstab(data[pred], data[y_label])
crossdens = crossfreq.cumsum(axis=0) / crossfreq.sum()
crossdens['gap'] = abs(crossdens[0] - crossdens[1])
ks = crossdens[crossdens['gap'] == crossdens['gap'].max()]
return ks, crossdens
def cal_iv(self, df1, x, y='is_7_p'):
df = copy.deepcopy(df1)
if 'count' not in df.columns:
df['count'] = [(1) for i in range(df.shape[0])]
df_tmp = df[[x, 'count', y]].groupby(x).sum()
df_tmp['good'] = df_tmp['count'] - df_tmp[y]
df_tmp[y] = df_tmp[y].apply(lambda x: max(x, 1e-05) / sum(df_tmp[y]))
df_tmp['good'] = df_tmp['good'].apply(lambda x: max(x, 1e-05) / sum
(df_tmp['good']))
df_tmp['woe'] = np.log(df_tmp[y] / df_tmp['good'])
df_tmp['iv'] = (df_tmp[y] - df_tmp['good']) * df_tmp['woe']
return df_tmp['iv'].sum()
def cal_psi(self, df_sf_bin, name, lst=['ins', 'oot']):
name1, name2 = lst
df_in = copy.deepcopy(df_sf_bin[df_sf_bin['type_train'] == name1])
sum_1 = df_in.shape[0]
df_in['count1'] = [(1) for i in range(sum_1)]
df_in = df_in.groupby(name).sum()[['count1']]
df_out = copy.deepcopy(df_sf_bin[df_sf_bin['type_train'] == name2])
sum_2 = df_out.shape[0]
df_out['count2'] = [(1) for i in range(sum_2)]
df_out = df_out.groupby(name).sum()[['count2']]
df_psi = pd.concat((df_in, df_out), axis=1)
df_psi['count1'] = df_psi['count1'].apply(lambda x: x / sum_1)
df_psi['count2'] = df_psi['count2'].apply(lambda x: x / sum_2)
df_psi[['count1', 'count2']].replace(0, 0.001, inplace=True)
df_psi['psi_tmp'] = df_psi['count1'] / df_psi['count2']
df_psi['psi_tmp'] = df_psi['psi_tmp'].apply(lambda x: math.log(x))
df_psi['psi'] = (df_psi['count1'] - df_psi['count2']) * df_psi[
'psi_tmp']
return sum(df_psi['psi'])
if __name__ == '__main__':
s = """
c=Get_res_DataFrame(lr, a.df, a.df_bin, df_pb_woe, use_lst,a.woe_dic, type_train='type_train', y='is_7_p')
d2_1 = c.get_2_1_imp(df_pb_woe[df_pb_woe['customer_type_old']=='old_customer'])
d2_2 = c.get_2_2_des()
d2_3 = c.get_2_3_corr()
d3 = c.get_bin_ins_oot(type_lst=['ins', 'oot'])
d4 = c.get_categories_df(df_pb_all,cate='type_train',base_cut='ins', y='final_score')
#
df_new = df_pb_all[df_pb_all['customer_type_old']=='new_customer']
df_old = df_pb_all[df_pb_all['customer_type_old']=='old_customer']
#
d5_1 = c.get_categories_df(df_new,cate='type_train',base_cut='ins', y='final_score')
d5_2 = c.get_categories_df(df_old,cate='type_train',base_cut='ins', y='final_score')
d6_1 = c.get_categories_df(df_new,cate='month',base_cut='0', y='final_score')
d6_2 = c.get_categories_df(df_old,cate='month',base_cut='0', y='final_score')
"""
<|reserved_special_token_1|>
import copy
import pandas as pd
import numpy as np
from pandas import DataFrame
from collections import Counter
from sklearn.metrics import roc_auc_score, roc_curve
from statsmodels.stats.outliers_influence import variance_inflation_factor
class Get_res_DataFrame:
'''
sheet1:数据概况
sheet2:变量的大小,效果,相关性 ok
sheet3:分箱结果及woe ok
sheet4:按单一类别分 输入 df[['类别', 'final_score']] cut_line依据 输出 并计算ks
通过输入不同的df来返回不同的df分析
ins,oot,oot2 第一个函数
新老客区分 第一个函数 输入df_new, df_old, type_train
月份区分 第一个函数 输入df_new , df_old, month
'''
def __init__(self, lr, df, df_bin, df_woe, use_lst, woe_dic, type_train='type_train', y='is_7_p'):
self.df = df
self.df_bin = df_bin
self.df_woe = df_woe
self.use_lst = use_lst
self.woe_dic = woe_dic
self.type_train = type_train
self.model = lr
self.y = y
def main(self):
print('d2_1 = self.get_2_1_imp()',#依次放好,
'd2_2 = self.get_2_2_des()',
'd2_3 = self.get_2_3_corr()',
'''d3 = self.get_bin_ins_oot(type_lst=['ins', 'oot', 'oot2'])''' ) #一整个
#return d2_1, d2_2, d2_3, d3
#df, df_woe, use_lst, cal_iv, type_train,cal_psi ,lr
def get_2_1_imp(self, df):
d1 = DataFrame(index=self.use_lst)
cover_dic = dict(df[use_lst].notnull().sum())
d1['auc'] = [round(0.5+abs(0.5-roc_auc_score(df[self.y], df[i])), 3) for i in self.use_lst]
#d1['ks'] = [round(max(abs(roc_curve(df[self.y],df[name])[0]- roc_curve(df[self.y],df[name])[1])), 3) for name in self.use_lst]
d1['ks'] = [round(float(self.ks_calc_cross(df, name, self.y)[0]['gap']), 3) for name in self.use_lst]
d1['ins_iv'] = [round(self.cal_iv(df[df[self.type_train]=='ins'], name, self.y), 3) for name in self.use_lst]
d1['oot_iv'] = [round(self.cal_iv(df[df[self.type_train]=='oot'], name, self.y), 3) for name in self.use_lst]
d1['coef'] = [round(i, 4) for i in self.model.coef_[0]]
#d1['importance'] = self.model.feature_importances_
d1 = d1.reset_index()
d1['psi'] = [round(self.cal_psi(df, name), 5) for name in self.use_lst]
d1['vif'] = [round(variance_inflation_factor(np.matrix(df[self.use_lst]), i),3) for i in range(len(self.use_lst))]
#d1['fill_missing_data'] = [fill_na_dic[name] for name in self.use_lst]
#d2_1 = d1
d1.index = range(1, d1.shape[0]+1)
return d1
#df, use_lst, type_train
def get_2_2_des(self):
df = self.df[self.df[self.type_train].isin(['ins', 'oot'])]
df_data_des = df[self.use_lst].describe().T
cover_dic = dict(df[use_lst].notnull().sum())
df_data_des = df_data_des.reset_index()
df_data_des['cover'] = df_data_des['index'].apply(lambda x: round(cover_dic[x]/df.shape[0], 4))
df_data_des.index = df_data_des['index']
df_data_des.drop(columns=['index', 'count'], inplace=True)
d2_2 = df_data_des.reset_index()
d2_2.index = range(1, d2_2.shape[0]+1)
return d2_2
#df_woe, use_lst
def get_2_3_corr(self):
corr = np.corrcoef(np.array(self.df_woe[self.use_lst]).T)
d2_3 = DataFrame(corr, columns=range(len(self.use_lst)), index=self.use_lst).reset_index()
d2_3.index = range(1, d2_3.shape[0]+1)
return d2_3
#df_bin, use_lst, #type_lst#, type_train, woe_dic
def get_bin_ins_oot(self, type_lst=['ins', 'oot', 'oot2']):
res = []
for loc, i in enumerate(type_lst):
lst = []
df_tmp = self.df_bin[(self.df_bin[self.type_train]==i)]
for name in self.use_lst:
#ks_lst = list(self.ks_calc_cross(df_tmp, name, self.y)[1]['gap'])
#while len(ks_lst) > df_tmp.shape[0]:
# ks_lst.pop()
#while len(ks_lst) < df_tmp.shape[0]:
# ks_lst.append(0)
#print(ks_lst)
dd_tmp = df_tmp.groupby(name).sum()[[self.y, 'count']]
dd_tmp['bad_rate'] = dd_tmp[self.y]/dd_tmp['count']
dd_tmp = dd_tmp.reset_index()
dd_tmp['woe'] = dd_tmp[name].apply(lambda x: self.woe_dic[name][x])
dd_tmp.sort_values(by='bad_rate', inplace=True)
dd_tmp['sort_key'] = [float(i.split(',')[0][1:]) if i[0]=='(' else float('inf') for i in dd_tmp[name]]
#print(dd_tmp)
dd_tmp.sort_values(by='sort_key', inplace=True)
dd_tmp.drop(columns=['sort_key'], inplace=True)
name1 = '-'
d = DataFrame(columns=['slice', 'bad', 'count', 'bad_rio', 'woe'],
data=[[str(name1), '-', '-', '-','-']]+dd_tmp.values.tolist()[:],
index=[[name]]+['-']*dd_tmp.shape[0])
if loc < 1:
split_name = '<-->'+str(i)
else:
split_name = str(type_lst[loc-1])+'<-->'+str(i)
d[split_name] = [split_name for i in range(d.shape[0])]
d = d[[split_name, 'slice', 'bad', 'count', 'bad_rio', 'woe' ]]
lst.append(d)
res.append(lst)
return pd.concat((pd.concat(i for i in res[i]) for i in range(len(type_lst))),axis=1)
#按照类别做DataFrame
def get_categories_df(self, df, cate='type_new', base_cut='ins', y='final_score'):
df_tmp = copy.deepcopy(df[[cate, self.y, y]])
df_tmp.rename(columns={cate:'category', self.y:'bad'}, inplace=True)
cut_line = list(np.percentile(list(df_tmp[df_tmp['category']==base_cut][y]), range(1, 101,10)))
#np.percentile出来的是np.array格式
cut_line[0] = -float('inf')
cut_line.append(float('inf'))
df_tmp['bins'] = pd.cut(df_tmp[y], bins=cut_line)
df_tmp['count'] = [1 for i in range(df_tmp.shape[0])]
#print(df_tmp)
ks_lst = []
for i in sorted(Counter(df_tmp['category']).keys()):
#print(df_tmp[df_tmp['category']==i].shape)
lst = list(ks_calc_cross(df_tmp[df_tmp['category']==i], 'bins', 'bad')[1]['gap'])
#print(lst)
while len(lst) < 10:
lst = [0]+lst
ks_lst.extend(lst)
df = df_tmp.groupby(['category', 'bins']).sum()[['bad', 'count']]
df = df.reset_index()
df['bad_rate'] = df['bad']/df['count']
df['ks'] = ks_lst
#print(df)
for i in ['bad', 'count', 'bad_rate', 'ks']:
df[i] = df[i].astype(float)
#df[['bad', 'count', 'bad_rate', 'ks']] = df[['bad', 'count', 'bad_rate', 'ks']].astype(float)
#df = df.astype(str)
df[['bad', 'count', 'bad_rate', 'ks'] ]= df[['bad', 'count', 'bad_rate', 'ks']].fillna(0)
#添加几行用来画画
#
#n = len(Counter(df_tmp[cate]))
#length = df.shape[0]//n
#for i in range(n):
#
#df[:length]
#print(df)
#
df.index = range(1, df.shape[0]+1)
return df
def ks_calc_cross(self,data,pred,y_label):
'''
功能: 计算KS值,输出对应分割点和累计分布函数曲线图
输入值:
data: 二维数组或dataframe,包括模型得分和真实的标签
pred: 一维数组或series,代表模型得分(一般为预测正类的概率)
y_label: 一维数组或series,代表真实的标签({0,1}或{-1,1})
输出值:
'ks': KS值,'crossdens': 好坏客户累积概率分布以及其差值gap
'''
crossfreq = pd.crosstab(data[pred],data[y_label])
crossdens = crossfreq.cumsum(axis=0) / crossfreq.sum()
crossdens['gap'] = abs(crossdens[0] - crossdens[1])
ks = crossdens[crossdens['gap'] == crossdens['gap'].max()]
return ks,crossdens
def cal_iv(self,df1, x, y='is_7_p'):
df = copy.deepcopy(df1)
if 'count' not in df.columns:
df['count'] = [1 for i in range(df.shape[0])]
df_tmp = df[[x,'count', y]].groupby(x).sum()
df_tmp['good'] = df_tmp['count'] - df_tmp[y]
df_tmp[y] = df_tmp[y].apply(lambda x: max(x, 0.00001)/sum(df_tmp[y]))
df_tmp['good'] = df_tmp['good'].apply(lambda x: max(x, 0.00001)/sum(df_tmp['good']))
#计算woe
df_tmp['woe'] = np.log(df_tmp[y]/df_tmp['good'])
#计算iv
df_tmp['iv'] = (df_tmp[y]-df_tmp['good']) * df_tmp['woe']
return df_tmp['iv'].sum()
#计算psi
def cal_psi(self, df_sf_bin, name, lst=['ins', 'oot']):
name1, name2 = lst
df_in = copy.deepcopy(df_sf_bin[df_sf_bin['type_train']==name1])
sum_1 = df_in.shape[0]
df_in['count1'] = [1 for i in range(sum_1)]
df_in = df_in.groupby(name).sum()[['count1']]
df_out = copy.deepcopy(df_sf_bin[df_sf_bin['type_train']==name2])
sum_2 = df_out.shape[0]
df_out['count2'] = [1 for i in range(sum_2)]
df_out = df_out.groupby(name).sum()[['count2']]
df_psi = pd.concat((df_in, df_out), axis=1)
#计算psi
df_psi['count1'] = df_psi['count1'].apply(lambda x: x/sum_1)
df_psi['count2'] = df_psi['count2'].apply(lambda x: x/sum_2)
#处理出现0的空箱
df_psi[['count1', 'count2']].replace(0, 0.001, inplace=True)
#
df_psi['psi_tmp'] = df_psi['count1']/df_psi['count2']
df_psi['psi_tmp'] = df_psi['psi_tmp'].apply(lambda x: math.log(x))
# print(df_psi)
df_psi['psi'] = (df_psi['count1'] - df_psi['count2'])*df_psi['psi_tmp']
#df_psi
return sum(df_psi['psi'])
if __name__ == '__main__':
s = '''
c=Get_res_DataFrame(lr, a.df, a.df_bin, df_pb_woe, use_lst,a.woe_dic, type_train='type_train', y='is_7_p')
d2_1 = c.get_2_1_imp(df_pb_woe[df_pb_woe['customer_type_old']=='old_customer'])
d2_2 = c.get_2_2_des()
d2_3 = c.get_2_3_corr()
d3 = c.get_bin_ins_oot(type_lst=['ins', 'oot'])
d4 = c.get_categories_df(df_pb_all,cate='type_train',base_cut='ins', y='final_score')
#
df_new = df_pb_all[df_pb_all['customer_type_old']=='new_customer']
df_old = df_pb_all[df_pb_all['customer_type_old']=='old_customer']
#
d5_1 = c.get_categories_df(df_new,cate='type_train',base_cut='ins', y='final_score')
d5_2 = c.get_categories_df(df_old,cate='type_train',base_cut='ins', y='final_score')
d6_1 = c.get_categories_df(df_new,cate='month',base_cut='0', y='final_score')
d6_2 = c.get_categories_df(df_old,cate='month',base_cut='0', y='final_score')
'''
|
flexible
|
{
"blob_id": "6336b31e51f0565c6b34ab5148645748fe899541",
"index": 3829,
"step-1": "<mask token>\n\n\nclass Get_res_DataFrame:\n <mask token>\n\n def __init__(self, lr, df, df_bin, df_woe, use_lst, woe_dic, type_train\n ='type_train', y='is_7_p'):\n self.df = df\n self.df_bin = df_bin\n self.df_woe = df_woe\n self.use_lst = use_lst\n self.woe_dic = woe_dic\n self.type_train = type_train\n self.model = lr\n self.y = y\n\n def main(self):\n print('d2_1 = self.get_2_1_imp()', 'd2_2 = self.get_2_2_des()',\n 'd2_3 = self.get_2_3_corr()',\n \"d3 = self.get_bin_ins_oot(type_lst=['ins', 'oot', 'oot2'])\")\n\n def get_2_1_imp(self, df):\n d1 = DataFrame(index=self.use_lst)\n cover_dic = dict(df[use_lst].notnull().sum())\n d1['auc'] = [round(0.5 + abs(0.5 - roc_auc_score(df[self.y], df[i])\n ), 3) for i in self.use_lst]\n d1['ks'] = [round(float(self.ks_calc_cross(df, name, self.y)[0][\n 'gap']), 3) for name in self.use_lst]\n d1['ins_iv'] = [round(self.cal_iv(df[df[self.type_train] == 'ins'],\n name, self.y), 3) for name in self.use_lst]\n d1['oot_iv'] = [round(self.cal_iv(df[df[self.type_train] == 'oot'],\n name, self.y), 3) for name in self.use_lst]\n d1['coef'] = [round(i, 4) for i in self.model.coef_[0]]\n d1 = d1.reset_index()\n d1['psi'] = [round(self.cal_psi(df, name), 5) for name in self.use_lst]\n d1['vif'] = [round(variance_inflation_factor(np.matrix(df[self.\n use_lst]), i), 3) for i in range(len(self.use_lst))]\n d1.index = range(1, d1.shape[0] + 1)\n return d1\n\n def get_2_2_des(self):\n df = self.df[self.df[self.type_train].isin(['ins', 'oot'])]\n df_data_des = df[self.use_lst].describe().T\n cover_dic = dict(df[use_lst].notnull().sum())\n df_data_des = df_data_des.reset_index()\n df_data_des['cover'] = df_data_des['index'].apply(lambda x: round(\n cover_dic[x] / df.shape[0], 4))\n df_data_des.index = df_data_des['index']\n df_data_des.drop(columns=['index', 'count'], inplace=True)\n d2_2 = df_data_des.reset_index()\n d2_2.index = range(1, d2_2.shape[0] + 1)\n return d2_2\n\n def get_2_3_corr(self):\n corr = np.corrcoef(np.array(self.df_woe[self.use_lst]).T)\n d2_3 = DataFrame(corr, columns=range(len(self.use_lst)), index=self\n .use_lst).reset_index()\n d2_3.index = range(1, d2_3.shape[0] + 1)\n return d2_3\n\n def get_bin_ins_oot(self, type_lst=['ins', 'oot', 'oot2']):\n res = []\n for loc, i in enumerate(type_lst):\n lst = []\n df_tmp = self.df_bin[self.df_bin[self.type_train] == i]\n for name in self.use_lst:\n dd_tmp = df_tmp.groupby(name).sum()[[self.y, 'count']]\n dd_tmp['bad_rate'] = dd_tmp[self.y] / dd_tmp['count']\n dd_tmp = dd_tmp.reset_index()\n dd_tmp['woe'] = dd_tmp[name].apply(lambda x: self.woe_dic[\n name][x])\n dd_tmp.sort_values(by='bad_rate', inplace=True)\n dd_tmp['sort_key'] = [(float(i.split(',')[0][1:]) if i[0] ==\n '(' else float('inf')) for i in dd_tmp[name]]\n dd_tmp.sort_values(by='sort_key', inplace=True)\n dd_tmp.drop(columns=['sort_key'], inplace=True)\n name1 = '-'\n d = DataFrame(columns=['slice', 'bad', 'count', 'bad_rio',\n 'woe'], data=[[str(name1), '-', '-', '-', '-']] +\n dd_tmp.values.tolist()[:], index=[[name]] + ['-'] *\n dd_tmp.shape[0])\n if loc < 1:\n split_name = '<-->' + str(i)\n else:\n split_name = str(type_lst[loc - 1]) + '<-->' + str(i)\n d[split_name] = [split_name for i in range(d.shape[0])]\n d = d[[split_name, 'slice', 'bad', 'count', 'bad_rio', 'woe']]\n lst.append(d)\n res.append(lst)\n return pd.concat((pd.concat(i for i in res[i]) for i in range(len(\n type_lst))), axis=1)\n\n def get_categories_df(self, df, cate='type_new', base_cut='ins', y=\n 'final_score'):\n df_tmp = copy.deepcopy(df[[cate, self.y, y]])\n df_tmp.rename(columns={cate: 'category', self.y: 'bad'}, inplace=True)\n cut_line = list(np.percentile(list(df_tmp[df_tmp['category'] ==\n base_cut][y]), range(1, 101, 10)))\n cut_line[0] = -float('inf')\n cut_line.append(float('inf'))\n df_tmp['bins'] = pd.cut(df_tmp[y], bins=cut_line)\n df_tmp['count'] = [(1) for i in range(df_tmp.shape[0])]\n ks_lst = []\n for i in sorted(Counter(df_tmp['category']).keys()):\n lst = list(ks_calc_cross(df_tmp[df_tmp['category'] == i],\n 'bins', 'bad')[1]['gap'])\n while len(lst) < 10:\n lst = [0] + lst\n ks_lst.extend(lst)\n df = df_tmp.groupby(['category', 'bins']).sum()[['bad', 'count']]\n df = df.reset_index()\n df['bad_rate'] = df['bad'] / df['count']\n df['ks'] = ks_lst\n for i in ['bad', 'count', 'bad_rate', 'ks']:\n df[i] = df[i].astype(float)\n df[['bad', 'count', 'bad_rate', 'ks']] = df[['bad', 'count',\n 'bad_rate', 'ks']].fillna(0)\n df.index = range(1, df.shape[0] + 1)\n return df\n\n def ks_calc_cross(self, data, pred, y_label):\n \"\"\"\n 功能: 计算KS值,输出对应分割点和累计分布函数曲线图\n 输入值:\n data: 二维数组或dataframe,包括模型得分和真实的标签\n pred: 一维数组或series,代表模型得分(一般为预测正类的概率)\n y_label: 一维数组或series,代表真实的标签({0,1}或{-1,1})\n 输出值:\n 'ks': KS值,'crossdens': 好坏客户累积概率分布以及其差值gap\n \"\"\"\n crossfreq = pd.crosstab(data[pred], data[y_label])\n crossdens = crossfreq.cumsum(axis=0) / crossfreq.sum()\n crossdens['gap'] = abs(crossdens[0] - crossdens[1])\n ks = crossdens[crossdens['gap'] == crossdens['gap'].max()]\n return ks, crossdens\n\n def cal_iv(self, df1, x, y='is_7_p'):\n df = copy.deepcopy(df1)\n if 'count' not in df.columns:\n df['count'] = [(1) for i in range(df.shape[0])]\n df_tmp = df[[x, 'count', y]].groupby(x).sum()\n df_tmp['good'] = df_tmp['count'] - df_tmp[y]\n df_tmp[y] = df_tmp[y].apply(lambda x: max(x, 1e-05) / sum(df_tmp[y]))\n df_tmp['good'] = df_tmp['good'].apply(lambda x: max(x, 1e-05) / sum\n (df_tmp['good']))\n df_tmp['woe'] = np.log(df_tmp[y] / df_tmp['good'])\n df_tmp['iv'] = (df_tmp[y] - df_tmp['good']) * df_tmp['woe']\n return df_tmp['iv'].sum()\n\n def cal_psi(self, df_sf_bin, name, lst=['ins', 'oot']):\n name1, name2 = lst\n df_in = copy.deepcopy(df_sf_bin[df_sf_bin['type_train'] == name1])\n sum_1 = df_in.shape[0]\n df_in['count1'] = [(1) for i in range(sum_1)]\n df_in = df_in.groupby(name).sum()[['count1']]\n df_out = copy.deepcopy(df_sf_bin[df_sf_bin['type_train'] == name2])\n sum_2 = df_out.shape[0]\n df_out['count2'] = [(1) for i in range(sum_2)]\n df_out = df_out.groupby(name).sum()[['count2']]\n df_psi = pd.concat((df_in, df_out), axis=1)\n df_psi['count1'] = df_psi['count1'].apply(lambda x: x / sum_1)\n df_psi['count2'] = df_psi['count2'].apply(lambda x: x / sum_2)\n df_psi[['count1', 'count2']].replace(0, 0.001, inplace=True)\n df_psi['psi_tmp'] = df_psi['count1'] / df_psi['count2']\n df_psi['psi_tmp'] = df_psi['psi_tmp'].apply(lambda x: math.log(x))\n df_psi['psi'] = (df_psi['count1'] - df_psi['count2']) * df_psi[\n 'psi_tmp']\n return sum(df_psi['psi'])\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass Get_res_DataFrame:\n \"\"\"\n sheet1:数据概况\n sheet2:变量的大小,效果,相关性 ok\n sheet3:分箱结果及woe ok\n sheet4:按单一类别分 输入 df[['类别', 'final_score']] cut_line依据 输出 并计算ks\n \n \n 通过输入不同的df来返回不同的df分析\n ins,oot,oot2 第一个函数\n 新老客区分 第一个函数 输入df_new, df_old, type_train\n 月份区分 第一个函数 输入df_new , df_old, month\n \"\"\"\n\n def __init__(self, lr, df, df_bin, df_woe, use_lst, woe_dic, type_train\n ='type_train', y='is_7_p'):\n self.df = df\n self.df_bin = df_bin\n self.df_woe = df_woe\n self.use_lst = use_lst\n self.woe_dic = woe_dic\n self.type_train = type_train\n self.model = lr\n self.y = y\n\n def main(self):\n print('d2_1 = self.get_2_1_imp()', 'd2_2 = self.get_2_2_des()',\n 'd2_3 = self.get_2_3_corr()',\n \"d3 = self.get_bin_ins_oot(type_lst=['ins', 'oot', 'oot2'])\")\n\n def get_2_1_imp(self, df):\n d1 = DataFrame(index=self.use_lst)\n cover_dic = dict(df[use_lst].notnull().sum())\n d1['auc'] = [round(0.5 + abs(0.5 - roc_auc_score(df[self.y], df[i])\n ), 3) for i in self.use_lst]\n d1['ks'] = [round(float(self.ks_calc_cross(df, name, self.y)[0][\n 'gap']), 3) for name in self.use_lst]\n d1['ins_iv'] = [round(self.cal_iv(df[df[self.type_train] == 'ins'],\n name, self.y), 3) for name in self.use_lst]\n d1['oot_iv'] = [round(self.cal_iv(df[df[self.type_train] == 'oot'],\n name, self.y), 3) for name in self.use_lst]\n d1['coef'] = [round(i, 4) for i in self.model.coef_[0]]\n d1 = d1.reset_index()\n d1['psi'] = [round(self.cal_psi(df, name), 5) for name in self.use_lst]\n d1['vif'] = [round(variance_inflation_factor(np.matrix(df[self.\n use_lst]), i), 3) for i in range(len(self.use_lst))]\n d1.index = range(1, d1.shape[0] + 1)\n return d1\n\n def get_2_2_des(self):\n df = self.df[self.df[self.type_train].isin(['ins', 'oot'])]\n df_data_des = df[self.use_lst].describe().T\n cover_dic = dict(df[use_lst].notnull().sum())\n df_data_des = df_data_des.reset_index()\n df_data_des['cover'] = df_data_des['index'].apply(lambda x: round(\n cover_dic[x] / df.shape[0], 4))\n df_data_des.index = df_data_des['index']\n df_data_des.drop(columns=['index', 'count'], inplace=True)\n d2_2 = df_data_des.reset_index()\n d2_2.index = range(1, d2_2.shape[0] + 1)\n return d2_2\n\n def get_2_3_corr(self):\n corr = np.corrcoef(np.array(self.df_woe[self.use_lst]).T)\n d2_3 = DataFrame(corr, columns=range(len(self.use_lst)), index=self\n .use_lst).reset_index()\n d2_3.index = range(1, d2_3.shape[0] + 1)\n return d2_3\n\n def get_bin_ins_oot(self, type_lst=['ins', 'oot', 'oot2']):\n res = []\n for loc, i in enumerate(type_lst):\n lst = []\n df_tmp = self.df_bin[self.df_bin[self.type_train] == i]\n for name in self.use_lst:\n dd_tmp = df_tmp.groupby(name).sum()[[self.y, 'count']]\n dd_tmp['bad_rate'] = dd_tmp[self.y] / dd_tmp['count']\n dd_tmp = dd_tmp.reset_index()\n dd_tmp['woe'] = dd_tmp[name].apply(lambda x: self.woe_dic[\n name][x])\n dd_tmp.sort_values(by='bad_rate', inplace=True)\n dd_tmp['sort_key'] = [(float(i.split(',')[0][1:]) if i[0] ==\n '(' else float('inf')) for i in dd_tmp[name]]\n dd_tmp.sort_values(by='sort_key', inplace=True)\n dd_tmp.drop(columns=['sort_key'], inplace=True)\n name1 = '-'\n d = DataFrame(columns=['slice', 'bad', 'count', 'bad_rio',\n 'woe'], data=[[str(name1), '-', '-', '-', '-']] +\n dd_tmp.values.tolist()[:], index=[[name]] + ['-'] *\n dd_tmp.shape[0])\n if loc < 1:\n split_name = '<-->' + str(i)\n else:\n split_name = str(type_lst[loc - 1]) + '<-->' + str(i)\n d[split_name] = [split_name for i in range(d.shape[0])]\n d = d[[split_name, 'slice', 'bad', 'count', 'bad_rio', 'woe']]\n lst.append(d)\n res.append(lst)\n return pd.concat((pd.concat(i for i in res[i]) for i in range(len(\n type_lst))), axis=1)\n\n def get_categories_df(self, df, cate='type_new', base_cut='ins', y=\n 'final_score'):\n df_tmp = copy.deepcopy(df[[cate, self.y, y]])\n df_tmp.rename(columns={cate: 'category', self.y: 'bad'}, inplace=True)\n cut_line = list(np.percentile(list(df_tmp[df_tmp['category'] ==\n base_cut][y]), range(1, 101, 10)))\n cut_line[0] = -float('inf')\n cut_line.append(float('inf'))\n df_tmp['bins'] = pd.cut(df_tmp[y], bins=cut_line)\n df_tmp['count'] = [(1) for i in range(df_tmp.shape[0])]\n ks_lst = []\n for i in sorted(Counter(df_tmp['category']).keys()):\n lst = list(ks_calc_cross(df_tmp[df_tmp['category'] == i],\n 'bins', 'bad')[1]['gap'])\n while len(lst) < 10:\n lst = [0] + lst\n ks_lst.extend(lst)\n df = df_tmp.groupby(['category', 'bins']).sum()[['bad', 'count']]\n df = df.reset_index()\n df['bad_rate'] = df['bad'] / df['count']\n df['ks'] = ks_lst\n for i in ['bad', 'count', 'bad_rate', 'ks']:\n df[i] = df[i].astype(float)\n df[['bad', 'count', 'bad_rate', 'ks']] = df[['bad', 'count',\n 'bad_rate', 'ks']].fillna(0)\n df.index = range(1, df.shape[0] + 1)\n return df\n\n def ks_calc_cross(self, data, pred, y_label):\n \"\"\"\n 功能: 计算KS值,输出对应分割点和累计分布函数曲线图\n 输入值:\n data: 二维数组或dataframe,包括模型得分和真实的标签\n pred: 一维数组或series,代表模型得分(一般为预测正类的概率)\n y_label: 一维数组或series,代表真实的标签({0,1}或{-1,1})\n 输出值:\n 'ks': KS值,'crossdens': 好坏客户累积概率分布以及其差值gap\n \"\"\"\n crossfreq = pd.crosstab(data[pred], data[y_label])\n crossdens = crossfreq.cumsum(axis=0) / crossfreq.sum()\n crossdens['gap'] = abs(crossdens[0] - crossdens[1])\n ks = crossdens[crossdens['gap'] == crossdens['gap'].max()]\n return ks, crossdens\n\n def cal_iv(self, df1, x, y='is_7_p'):\n df = copy.deepcopy(df1)\n if 'count' not in df.columns:\n df['count'] = [(1) for i in range(df.shape[0])]\n df_tmp = df[[x, 'count', y]].groupby(x).sum()\n df_tmp['good'] = df_tmp['count'] - df_tmp[y]\n df_tmp[y] = df_tmp[y].apply(lambda x: max(x, 1e-05) / sum(df_tmp[y]))\n df_tmp['good'] = df_tmp['good'].apply(lambda x: max(x, 1e-05) / sum\n (df_tmp['good']))\n df_tmp['woe'] = np.log(df_tmp[y] / df_tmp['good'])\n df_tmp['iv'] = (df_tmp[y] - df_tmp['good']) * df_tmp['woe']\n return df_tmp['iv'].sum()\n\n def cal_psi(self, df_sf_bin, name, lst=['ins', 'oot']):\n name1, name2 = lst\n df_in = copy.deepcopy(df_sf_bin[df_sf_bin['type_train'] == name1])\n sum_1 = df_in.shape[0]\n df_in['count1'] = [(1) for i in range(sum_1)]\n df_in = df_in.groupby(name).sum()[['count1']]\n df_out = copy.deepcopy(df_sf_bin[df_sf_bin['type_train'] == name2])\n sum_2 = df_out.shape[0]\n df_out['count2'] = [(1) for i in range(sum_2)]\n df_out = df_out.groupby(name).sum()[['count2']]\n df_psi = pd.concat((df_in, df_out), axis=1)\n df_psi['count1'] = df_psi['count1'].apply(lambda x: x / sum_1)\n df_psi['count2'] = df_psi['count2'].apply(lambda x: x / sum_2)\n df_psi[['count1', 'count2']].replace(0, 0.001, inplace=True)\n df_psi['psi_tmp'] = df_psi['count1'] / df_psi['count2']\n df_psi['psi_tmp'] = df_psi['psi_tmp'].apply(lambda x: math.log(x))\n df_psi['psi'] = (df_psi['count1'] - df_psi['count2']) * df_psi[\n 'psi_tmp']\n return sum(df_psi['psi'])\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass Get_res_DataFrame:\n \"\"\"\n sheet1:数据概况\n sheet2:变量的大小,效果,相关性 ok\n sheet3:分箱结果及woe ok\n sheet4:按单一类别分 输入 df[['类别', 'final_score']] cut_line依据 输出 并计算ks\n \n \n 通过输入不同的df来返回不同的df分析\n ins,oot,oot2 第一个函数\n 新老客区分 第一个函数 输入df_new, df_old, type_train\n 月份区分 第一个函数 输入df_new , df_old, month\n \"\"\"\n\n def __init__(self, lr, df, df_bin, df_woe, use_lst, woe_dic, type_train\n ='type_train', y='is_7_p'):\n self.df = df\n self.df_bin = df_bin\n self.df_woe = df_woe\n self.use_lst = use_lst\n self.woe_dic = woe_dic\n self.type_train = type_train\n self.model = lr\n self.y = y\n\n def main(self):\n print('d2_1 = self.get_2_1_imp()', 'd2_2 = self.get_2_2_des()',\n 'd2_3 = self.get_2_3_corr()',\n \"d3 = self.get_bin_ins_oot(type_lst=['ins', 'oot', 'oot2'])\")\n\n def get_2_1_imp(self, df):\n d1 = DataFrame(index=self.use_lst)\n cover_dic = dict(df[use_lst].notnull().sum())\n d1['auc'] = [round(0.5 + abs(0.5 - roc_auc_score(df[self.y], df[i])\n ), 3) for i in self.use_lst]\n d1['ks'] = [round(float(self.ks_calc_cross(df, name, self.y)[0][\n 'gap']), 3) for name in self.use_lst]\n d1['ins_iv'] = [round(self.cal_iv(df[df[self.type_train] == 'ins'],\n name, self.y), 3) for name in self.use_lst]\n d1['oot_iv'] = [round(self.cal_iv(df[df[self.type_train] == 'oot'],\n name, self.y), 3) for name in self.use_lst]\n d1['coef'] = [round(i, 4) for i in self.model.coef_[0]]\n d1 = d1.reset_index()\n d1['psi'] = [round(self.cal_psi(df, name), 5) for name in self.use_lst]\n d1['vif'] = [round(variance_inflation_factor(np.matrix(df[self.\n use_lst]), i), 3) for i in range(len(self.use_lst))]\n d1.index = range(1, d1.shape[0] + 1)\n return d1\n\n def get_2_2_des(self):\n df = self.df[self.df[self.type_train].isin(['ins', 'oot'])]\n df_data_des = df[self.use_lst].describe().T\n cover_dic = dict(df[use_lst].notnull().sum())\n df_data_des = df_data_des.reset_index()\n df_data_des['cover'] = df_data_des['index'].apply(lambda x: round(\n cover_dic[x] / df.shape[0], 4))\n df_data_des.index = df_data_des['index']\n df_data_des.drop(columns=['index', 'count'], inplace=True)\n d2_2 = df_data_des.reset_index()\n d2_2.index = range(1, d2_2.shape[0] + 1)\n return d2_2\n\n def get_2_3_corr(self):\n corr = np.corrcoef(np.array(self.df_woe[self.use_lst]).T)\n d2_3 = DataFrame(corr, columns=range(len(self.use_lst)), index=self\n .use_lst).reset_index()\n d2_3.index = range(1, d2_3.shape[0] + 1)\n return d2_3\n\n def get_bin_ins_oot(self, type_lst=['ins', 'oot', 'oot2']):\n res = []\n for loc, i in enumerate(type_lst):\n lst = []\n df_tmp = self.df_bin[self.df_bin[self.type_train] == i]\n for name in self.use_lst:\n dd_tmp = df_tmp.groupby(name).sum()[[self.y, 'count']]\n dd_tmp['bad_rate'] = dd_tmp[self.y] / dd_tmp['count']\n dd_tmp = dd_tmp.reset_index()\n dd_tmp['woe'] = dd_tmp[name].apply(lambda x: self.woe_dic[\n name][x])\n dd_tmp.sort_values(by='bad_rate', inplace=True)\n dd_tmp['sort_key'] = [(float(i.split(',')[0][1:]) if i[0] ==\n '(' else float('inf')) for i in dd_tmp[name]]\n dd_tmp.sort_values(by='sort_key', inplace=True)\n dd_tmp.drop(columns=['sort_key'], inplace=True)\n name1 = '-'\n d = DataFrame(columns=['slice', 'bad', 'count', 'bad_rio',\n 'woe'], data=[[str(name1), '-', '-', '-', '-']] +\n dd_tmp.values.tolist()[:], index=[[name]] + ['-'] *\n dd_tmp.shape[0])\n if loc < 1:\n split_name = '<-->' + str(i)\n else:\n split_name = str(type_lst[loc - 1]) + '<-->' + str(i)\n d[split_name] = [split_name for i in range(d.shape[0])]\n d = d[[split_name, 'slice', 'bad', 'count', 'bad_rio', 'woe']]\n lst.append(d)\n res.append(lst)\n return pd.concat((pd.concat(i for i in res[i]) for i in range(len(\n type_lst))), axis=1)\n\n def get_categories_df(self, df, cate='type_new', base_cut='ins', y=\n 'final_score'):\n df_tmp = copy.deepcopy(df[[cate, self.y, y]])\n df_tmp.rename(columns={cate: 'category', self.y: 'bad'}, inplace=True)\n cut_line = list(np.percentile(list(df_tmp[df_tmp['category'] ==\n base_cut][y]), range(1, 101, 10)))\n cut_line[0] = -float('inf')\n cut_line.append(float('inf'))\n df_tmp['bins'] = pd.cut(df_tmp[y], bins=cut_line)\n df_tmp['count'] = [(1) for i in range(df_tmp.shape[0])]\n ks_lst = []\n for i in sorted(Counter(df_tmp['category']).keys()):\n lst = list(ks_calc_cross(df_tmp[df_tmp['category'] == i],\n 'bins', 'bad')[1]['gap'])\n while len(lst) < 10:\n lst = [0] + lst\n ks_lst.extend(lst)\n df = df_tmp.groupby(['category', 'bins']).sum()[['bad', 'count']]\n df = df.reset_index()\n df['bad_rate'] = df['bad'] / df['count']\n df['ks'] = ks_lst\n for i in ['bad', 'count', 'bad_rate', 'ks']:\n df[i] = df[i].astype(float)\n df[['bad', 'count', 'bad_rate', 'ks']] = df[['bad', 'count',\n 'bad_rate', 'ks']].fillna(0)\n df.index = range(1, df.shape[0] + 1)\n return df\n\n def ks_calc_cross(self, data, pred, y_label):\n \"\"\"\n 功能: 计算KS值,输出对应分割点和累计分布函数曲线图\n 输入值:\n data: 二维数组或dataframe,包括模型得分和真实的标签\n pred: 一维数组或series,代表模型得分(一般为预测正类的概率)\n y_label: 一维数组或series,代表真实的标签({0,1}或{-1,1})\n 输出值:\n 'ks': KS值,'crossdens': 好坏客户累积概率分布以及其差值gap\n \"\"\"\n crossfreq = pd.crosstab(data[pred], data[y_label])\n crossdens = crossfreq.cumsum(axis=0) / crossfreq.sum()\n crossdens['gap'] = abs(crossdens[0] - crossdens[1])\n ks = crossdens[crossdens['gap'] == crossdens['gap'].max()]\n return ks, crossdens\n\n def cal_iv(self, df1, x, y='is_7_p'):\n df = copy.deepcopy(df1)\n if 'count' not in df.columns:\n df['count'] = [(1) for i in range(df.shape[0])]\n df_tmp = df[[x, 'count', y]].groupby(x).sum()\n df_tmp['good'] = df_tmp['count'] - df_tmp[y]\n df_tmp[y] = df_tmp[y].apply(lambda x: max(x, 1e-05) / sum(df_tmp[y]))\n df_tmp['good'] = df_tmp['good'].apply(lambda x: max(x, 1e-05) / sum\n (df_tmp['good']))\n df_tmp['woe'] = np.log(df_tmp[y] / df_tmp['good'])\n df_tmp['iv'] = (df_tmp[y] - df_tmp['good']) * df_tmp['woe']\n return df_tmp['iv'].sum()\n\n def cal_psi(self, df_sf_bin, name, lst=['ins', 'oot']):\n name1, name2 = lst\n df_in = copy.deepcopy(df_sf_bin[df_sf_bin['type_train'] == name1])\n sum_1 = df_in.shape[0]\n df_in['count1'] = [(1) for i in range(sum_1)]\n df_in = df_in.groupby(name).sum()[['count1']]\n df_out = copy.deepcopy(df_sf_bin[df_sf_bin['type_train'] == name2])\n sum_2 = df_out.shape[0]\n df_out['count2'] = [(1) for i in range(sum_2)]\n df_out = df_out.groupby(name).sum()[['count2']]\n df_psi = pd.concat((df_in, df_out), axis=1)\n df_psi['count1'] = df_psi['count1'].apply(lambda x: x / sum_1)\n df_psi['count2'] = df_psi['count2'].apply(lambda x: x / sum_2)\n df_psi[['count1', 'count2']].replace(0, 0.001, inplace=True)\n df_psi['psi_tmp'] = df_psi['count1'] / df_psi['count2']\n df_psi['psi_tmp'] = df_psi['psi_tmp'].apply(lambda x: math.log(x))\n df_psi['psi'] = (df_psi['count1'] - df_psi['count2']) * df_psi[\n 'psi_tmp']\n return sum(df_psi['psi'])\n\n\nif __name__ == '__main__':\n s = \"\"\"\n c=Get_res_DataFrame(lr, a.df, a.df_bin, df_pb_woe, use_lst,a.woe_dic, type_train='type_train', y='is_7_p')\n d2_1 = c.get_2_1_imp(df_pb_woe[df_pb_woe['customer_type_old']=='old_customer'])\n d2_2 = c.get_2_2_des()\n d2_3 = c.get_2_3_corr()\n \n d3 = c.get_bin_ins_oot(type_lst=['ins', 'oot'])\n d4 = c.get_categories_df(df_pb_all,cate='type_train',base_cut='ins', y='final_score')\n #\n df_new = df_pb_all[df_pb_all['customer_type_old']=='new_customer']\n df_old = df_pb_all[df_pb_all['customer_type_old']=='old_customer']\n #\n d5_1 = c.get_categories_df(df_new,cate='type_train',base_cut='ins', y='final_score')\n d5_2 = c.get_categories_df(df_old,cate='type_train',base_cut='ins', y='final_score')\n \n d6_1 = c.get_categories_df(df_new,cate='month',base_cut='0', y='final_score')\n d6_2 = c.get_categories_df(df_old,cate='month',base_cut='0', y='final_score')\n \"\"\"\n",
"step-4": "import copy\nimport pandas as pd\nimport numpy as np\nfrom pandas import DataFrame\nfrom collections import Counter\nfrom sklearn.metrics import roc_auc_score, roc_curve\nfrom statsmodels.stats.outliers_influence import variance_inflation_factor\n\n\nclass Get_res_DataFrame:\n \"\"\"\n sheet1:数据概况\n sheet2:变量的大小,效果,相关性 ok\n sheet3:分箱结果及woe ok\n sheet4:按单一类别分 输入 df[['类别', 'final_score']] cut_line依据 输出 并计算ks\n \n \n 通过输入不同的df来返回不同的df分析\n ins,oot,oot2 第一个函数\n 新老客区分 第一个函数 输入df_new, df_old, type_train\n 月份区分 第一个函数 输入df_new , df_old, month\n \"\"\"\n\n def __init__(self, lr, df, df_bin, df_woe, use_lst, woe_dic, type_train\n ='type_train', y='is_7_p'):\n self.df = df\n self.df_bin = df_bin\n self.df_woe = df_woe\n self.use_lst = use_lst\n self.woe_dic = woe_dic\n self.type_train = type_train\n self.model = lr\n self.y = y\n\n def main(self):\n print('d2_1 = self.get_2_1_imp()', 'd2_2 = self.get_2_2_des()',\n 'd2_3 = self.get_2_3_corr()',\n \"d3 = self.get_bin_ins_oot(type_lst=['ins', 'oot', 'oot2'])\")\n\n def get_2_1_imp(self, df):\n d1 = DataFrame(index=self.use_lst)\n cover_dic = dict(df[use_lst].notnull().sum())\n d1['auc'] = [round(0.5 + abs(0.5 - roc_auc_score(df[self.y], df[i])\n ), 3) for i in self.use_lst]\n d1['ks'] = [round(float(self.ks_calc_cross(df, name, self.y)[0][\n 'gap']), 3) for name in self.use_lst]\n d1['ins_iv'] = [round(self.cal_iv(df[df[self.type_train] == 'ins'],\n name, self.y), 3) for name in self.use_lst]\n d1['oot_iv'] = [round(self.cal_iv(df[df[self.type_train] == 'oot'],\n name, self.y), 3) for name in self.use_lst]\n d1['coef'] = [round(i, 4) for i in self.model.coef_[0]]\n d1 = d1.reset_index()\n d1['psi'] = [round(self.cal_psi(df, name), 5) for name in self.use_lst]\n d1['vif'] = [round(variance_inflation_factor(np.matrix(df[self.\n use_lst]), i), 3) for i in range(len(self.use_lst))]\n d1.index = range(1, d1.shape[0] + 1)\n return d1\n\n def get_2_2_des(self):\n df = self.df[self.df[self.type_train].isin(['ins', 'oot'])]\n df_data_des = df[self.use_lst].describe().T\n cover_dic = dict(df[use_lst].notnull().sum())\n df_data_des = df_data_des.reset_index()\n df_data_des['cover'] = df_data_des['index'].apply(lambda x: round(\n cover_dic[x] / df.shape[0], 4))\n df_data_des.index = df_data_des['index']\n df_data_des.drop(columns=['index', 'count'], inplace=True)\n d2_2 = df_data_des.reset_index()\n d2_2.index = range(1, d2_2.shape[0] + 1)\n return d2_2\n\n def get_2_3_corr(self):\n corr = np.corrcoef(np.array(self.df_woe[self.use_lst]).T)\n d2_3 = DataFrame(corr, columns=range(len(self.use_lst)), index=self\n .use_lst).reset_index()\n d2_3.index = range(1, d2_3.shape[0] + 1)\n return d2_3\n\n def get_bin_ins_oot(self, type_lst=['ins', 'oot', 'oot2']):\n res = []\n for loc, i in enumerate(type_lst):\n lst = []\n df_tmp = self.df_bin[self.df_bin[self.type_train] == i]\n for name in self.use_lst:\n dd_tmp = df_tmp.groupby(name).sum()[[self.y, 'count']]\n dd_tmp['bad_rate'] = dd_tmp[self.y] / dd_tmp['count']\n dd_tmp = dd_tmp.reset_index()\n dd_tmp['woe'] = dd_tmp[name].apply(lambda x: self.woe_dic[\n name][x])\n dd_tmp.sort_values(by='bad_rate', inplace=True)\n dd_tmp['sort_key'] = [(float(i.split(',')[0][1:]) if i[0] ==\n '(' else float('inf')) for i in dd_tmp[name]]\n dd_tmp.sort_values(by='sort_key', inplace=True)\n dd_tmp.drop(columns=['sort_key'], inplace=True)\n name1 = '-'\n d = DataFrame(columns=['slice', 'bad', 'count', 'bad_rio',\n 'woe'], data=[[str(name1), '-', '-', '-', '-']] +\n dd_tmp.values.tolist()[:], index=[[name]] + ['-'] *\n dd_tmp.shape[0])\n if loc < 1:\n split_name = '<-->' + str(i)\n else:\n split_name = str(type_lst[loc - 1]) + '<-->' + str(i)\n d[split_name] = [split_name for i in range(d.shape[0])]\n d = d[[split_name, 'slice', 'bad', 'count', 'bad_rio', 'woe']]\n lst.append(d)\n res.append(lst)\n return pd.concat((pd.concat(i for i in res[i]) for i in range(len(\n type_lst))), axis=1)\n\n def get_categories_df(self, df, cate='type_new', base_cut='ins', y=\n 'final_score'):\n df_tmp = copy.deepcopy(df[[cate, self.y, y]])\n df_tmp.rename(columns={cate: 'category', self.y: 'bad'}, inplace=True)\n cut_line = list(np.percentile(list(df_tmp[df_tmp['category'] ==\n base_cut][y]), range(1, 101, 10)))\n cut_line[0] = -float('inf')\n cut_line.append(float('inf'))\n df_tmp['bins'] = pd.cut(df_tmp[y], bins=cut_line)\n df_tmp['count'] = [(1) for i in range(df_tmp.shape[0])]\n ks_lst = []\n for i in sorted(Counter(df_tmp['category']).keys()):\n lst = list(ks_calc_cross(df_tmp[df_tmp['category'] == i],\n 'bins', 'bad')[1]['gap'])\n while len(lst) < 10:\n lst = [0] + lst\n ks_lst.extend(lst)\n df = df_tmp.groupby(['category', 'bins']).sum()[['bad', 'count']]\n df = df.reset_index()\n df['bad_rate'] = df['bad'] / df['count']\n df['ks'] = ks_lst\n for i in ['bad', 'count', 'bad_rate', 'ks']:\n df[i] = df[i].astype(float)\n df[['bad', 'count', 'bad_rate', 'ks']] = df[['bad', 'count',\n 'bad_rate', 'ks']].fillna(0)\n df.index = range(1, df.shape[0] + 1)\n return df\n\n def ks_calc_cross(self, data, pred, y_label):\n \"\"\"\n 功能: 计算KS值,输出对应分割点和累计分布函数曲线图\n 输入值:\n data: 二维数组或dataframe,包括模型得分和真实的标签\n pred: 一维数组或series,代表模型得分(一般为预测正类的概率)\n y_label: 一维数组或series,代表真实的标签({0,1}或{-1,1})\n 输出值:\n 'ks': KS值,'crossdens': 好坏客户累积概率分布以及其差值gap\n \"\"\"\n crossfreq = pd.crosstab(data[pred], data[y_label])\n crossdens = crossfreq.cumsum(axis=0) / crossfreq.sum()\n crossdens['gap'] = abs(crossdens[0] - crossdens[1])\n ks = crossdens[crossdens['gap'] == crossdens['gap'].max()]\n return ks, crossdens\n\n def cal_iv(self, df1, x, y='is_7_p'):\n df = copy.deepcopy(df1)\n if 'count' not in df.columns:\n df['count'] = [(1) for i in range(df.shape[0])]\n df_tmp = df[[x, 'count', y]].groupby(x).sum()\n df_tmp['good'] = df_tmp['count'] - df_tmp[y]\n df_tmp[y] = df_tmp[y].apply(lambda x: max(x, 1e-05) / sum(df_tmp[y]))\n df_tmp['good'] = df_tmp['good'].apply(lambda x: max(x, 1e-05) / sum\n (df_tmp['good']))\n df_tmp['woe'] = np.log(df_tmp[y] / df_tmp['good'])\n df_tmp['iv'] = (df_tmp[y] - df_tmp['good']) * df_tmp['woe']\n return df_tmp['iv'].sum()\n\n def cal_psi(self, df_sf_bin, name, lst=['ins', 'oot']):\n name1, name2 = lst\n df_in = copy.deepcopy(df_sf_bin[df_sf_bin['type_train'] == name1])\n sum_1 = df_in.shape[0]\n df_in['count1'] = [(1) for i in range(sum_1)]\n df_in = df_in.groupby(name).sum()[['count1']]\n df_out = copy.deepcopy(df_sf_bin[df_sf_bin['type_train'] == name2])\n sum_2 = df_out.shape[0]\n df_out['count2'] = [(1) for i in range(sum_2)]\n df_out = df_out.groupby(name).sum()[['count2']]\n df_psi = pd.concat((df_in, df_out), axis=1)\n df_psi['count1'] = df_psi['count1'].apply(lambda x: x / sum_1)\n df_psi['count2'] = df_psi['count2'].apply(lambda x: x / sum_2)\n df_psi[['count1', 'count2']].replace(0, 0.001, inplace=True)\n df_psi['psi_tmp'] = df_psi['count1'] / df_psi['count2']\n df_psi['psi_tmp'] = df_psi['psi_tmp'].apply(lambda x: math.log(x))\n df_psi['psi'] = (df_psi['count1'] - df_psi['count2']) * df_psi[\n 'psi_tmp']\n return sum(df_psi['psi'])\n\n\nif __name__ == '__main__':\n s = \"\"\"\n c=Get_res_DataFrame(lr, a.df, a.df_bin, df_pb_woe, use_lst,a.woe_dic, type_train='type_train', y='is_7_p')\n d2_1 = c.get_2_1_imp(df_pb_woe[df_pb_woe['customer_type_old']=='old_customer'])\n d2_2 = c.get_2_2_des()\n d2_3 = c.get_2_3_corr()\n \n d3 = c.get_bin_ins_oot(type_lst=['ins', 'oot'])\n d4 = c.get_categories_df(df_pb_all,cate='type_train',base_cut='ins', y='final_score')\n #\n df_new = df_pb_all[df_pb_all['customer_type_old']=='new_customer']\n df_old = df_pb_all[df_pb_all['customer_type_old']=='old_customer']\n #\n d5_1 = c.get_categories_df(df_new,cate='type_train',base_cut='ins', y='final_score')\n d5_2 = c.get_categories_df(df_old,cate='type_train',base_cut='ins', y='final_score')\n \n d6_1 = c.get_categories_df(df_new,cate='month',base_cut='0', y='final_score')\n d6_2 = c.get_categories_df(df_old,cate='month',base_cut='0', y='final_score')\n \"\"\"\n",
"step-5": "import copy\nimport pandas as pd\nimport numpy as np\nfrom pandas import DataFrame\nfrom collections import Counter\nfrom sklearn.metrics import roc_auc_score, roc_curve\nfrom statsmodels.stats.outliers_influence import variance_inflation_factor\n\nclass Get_res_DataFrame:\n '''\n sheet1:数据概况\n sheet2:变量的大小,效果,相关性 ok\n sheet3:分箱结果及woe ok\n sheet4:按单一类别分 输入 df[['类别', 'final_score']] cut_line依据 输出 并计算ks\n \n \n 通过输入不同的df来返回不同的df分析\n ins,oot,oot2 第一个函数\n 新老客区分 第一个函数 输入df_new, df_old, type_train\n 月份区分 第一个函数 输入df_new , df_old, month\n '''\n \n def __init__(self, lr, df, df_bin, df_woe, use_lst, woe_dic, type_train='type_train', y='is_7_p'):\n self.df = df\n self.df_bin = df_bin\n self.df_woe = df_woe\n self.use_lst = use_lst\n self.woe_dic = woe_dic\n self.type_train = type_train\n self.model = lr\n self.y = y\n \n def main(self):\n \n print('d2_1 = self.get_2_1_imp()',#依次放好,\n 'd2_2 = self.get_2_2_des()',\n 'd2_3 = self.get_2_3_corr()',\n '''d3 = self.get_bin_ins_oot(type_lst=['ins', 'oot', 'oot2'])''' ) #一整个\n \n #return d2_1, d2_2, d2_3, d3\n \n #df, df_woe, use_lst, cal_iv, type_train,cal_psi ,lr\n def get_2_1_imp(self, df):\n d1 = DataFrame(index=self.use_lst)\n cover_dic = dict(df[use_lst].notnull().sum())\n d1['auc'] = [round(0.5+abs(0.5-roc_auc_score(df[self.y], df[i])), 3) for i in self.use_lst]\n #d1['ks'] = [round(max(abs(roc_curve(df[self.y],df[name])[0]- roc_curve(df[self.y],df[name])[1])), 3) for name in self.use_lst]\n d1['ks'] = [round(float(self.ks_calc_cross(df, name, self.y)[0]['gap']), 3) for name in self.use_lst]\n d1['ins_iv'] = [round(self.cal_iv(df[df[self.type_train]=='ins'], name, self.y), 3) for name in self.use_lst]\n d1['oot_iv'] = [round(self.cal_iv(df[df[self.type_train]=='oot'], name, self.y), 3) for name in self.use_lst]\n \n d1['coef'] = [round(i, 4) for i in self.model.coef_[0]]\n #d1['importance'] = self.model.feature_importances_\n d1 = d1.reset_index()\n d1['psi'] = [round(self.cal_psi(df, name), 5) for name in self.use_lst]\n d1['vif'] = [round(variance_inflation_factor(np.matrix(df[self.use_lst]), i),3) for i in range(len(self.use_lst))]\n #d1['fill_missing_data'] = [fill_na_dic[name] for name in self.use_lst]\n #d2_1 = d1\n d1.index = range(1, d1.shape[0]+1)\n return d1\n \n #df, use_lst, type_train\n def get_2_2_des(self):\n df = self.df[self.df[self.type_train].isin(['ins', 'oot'])]\n df_data_des = df[self.use_lst].describe().T \n \n \n cover_dic = dict(df[use_lst].notnull().sum())\n \n df_data_des = df_data_des.reset_index()\n df_data_des['cover'] = df_data_des['index'].apply(lambda x: round(cover_dic[x]/df.shape[0], 4))\n df_data_des.index = df_data_des['index']\n df_data_des.drop(columns=['index', 'count'], inplace=True)\n d2_2 = df_data_des.reset_index()\n d2_2.index = range(1, d2_2.shape[0]+1)\n return d2_2\n \n #df_woe, use_lst\n def get_2_3_corr(self):\n corr = np.corrcoef(np.array(self.df_woe[self.use_lst]).T)\n d2_3 = DataFrame(corr, columns=range(len(self.use_lst)), index=self.use_lst).reset_index()\n d2_3.index = range(1, d2_3.shape[0]+1)\n return d2_3\n \n #df_bin, use_lst, #type_lst#, type_train, woe_dic\n def get_bin_ins_oot(self, type_lst=['ins', 'oot', 'oot2']):\n res = []\n for loc, i in enumerate(type_lst):\n lst = []\n df_tmp = self.df_bin[(self.df_bin[self.type_train]==i)]\n\n for name in self.use_lst:\n #ks_lst = list(self.ks_calc_cross(df_tmp, name, self.y)[1]['gap'])\n #while len(ks_lst) > df_tmp.shape[0]:\n # ks_lst.pop()\n #while len(ks_lst) < df_tmp.shape[0]:\n # ks_lst.append(0)\n #print(ks_lst)\n dd_tmp = df_tmp.groupby(name).sum()[[self.y, 'count']]\n dd_tmp['bad_rate'] = dd_tmp[self.y]/dd_tmp['count']\n dd_tmp = dd_tmp.reset_index()\n dd_tmp['woe'] = dd_tmp[name].apply(lambda x: self.woe_dic[name][x])\n dd_tmp.sort_values(by='bad_rate', inplace=True) \n dd_tmp['sort_key'] = [float(i.split(',')[0][1:]) if i[0]=='(' else float('inf') for i in dd_tmp[name]]\n #print(dd_tmp)\n dd_tmp.sort_values(by='sort_key', inplace=True)\n dd_tmp.drop(columns=['sort_key'], inplace=True)\n name1 = '-'\n d = DataFrame(columns=['slice', 'bad', 'count', 'bad_rio', 'woe'],\n data=[[str(name1), '-', '-', '-','-']]+dd_tmp.values.tolist()[:], \n index=[[name]]+['-']*dd_tmp.shape[0])\n if loc < 1:\n split_name = '<-->'+str(i)\n else:\n split_name = str(type_lst[loc-1])+'<-->'+str(i)\n d[split_name] = [split_name for i in range(d.shape[0])]\n d = d[[split_name, 'slice', 'bad', 'count', 'bad_rio', 'woe' ]] \n lst.append(d)\n res.append(lst) \n return pd.concat((pd.concat(i for i in res[i]) for i in range(len(type_lst))),axis=1)\n \n #按照类别做DataFrame\n def get_categories_df(self, df, cate='type_new', base_cut='ins', y='final_score'):\n \n df_tmp = copy.deepcopy(df[[cate, self.y, y]])\n df_tmp.rename(columns={cate:'category', self.y:'bad'}, inplace=True)\n cut_line = list(np.percentile(list(df_tmp[df_tmp['category']==base_cut][y]), range(1, 101,10)))\n #np.percentile出来的是np.array格式\n cut_line[0] = -float('inf')\n cut_line.append(float('inf'))\n df_tmp['bins'] = pd.cut(df_tmp[y], bins=cut_line)\n df_tmp['count'] = [1 for i in range(df_tmp.shape[0])]\n #print(df_tmp)\n \n ks_lst = []\n for i in sorted(Counter(df_tmp['category']).keys()):\n #print(df_tmp[df_tmp['category']==i].shape)\n lst = list(ks_calc_cross(df_tmp[df_tmp['category']==i], 'bins', 'bad')[1]['gap'])\n #print(lst)\n while len(lst) < 10:\n lst = [0]+lst\n ks_lst.extend(lst)\n \n \n df = df_tmp.groupby(['category', 'bins']).sum()[['bad', 'count']]\n df = df.reset_index()\n df['bad_rate'] = df['bad']/df['count']\n df['ks'] = ks_lst\n #print(df)\n for i in ['bad', 'count', 'bad_rate', 'ks']:\n df[i] = df[i].astype(float)\n #df[['bad', 'count', 'bad_rate', 'ks']] = df[['bad', 'count', 'bad_rate', 'ks']].astype(float)\n #df = df.astype(str)\n df[['bad', 'count', 'bad_rate', 'ks'] ]= df[['bad', 'count', 'bad_rate', 'ks']].fillna(0)\n #添加几行用来画画\n #\n #n = len(Counter(df_tmp[cate]))\n #length = df.shape[0]//n\n #for i in range(n):\n # \n #df[:length]\n #print(df)\n #\n df.index = range(1, df.shape[0]+1)\n return df\n def ks_calc_cross(self,data,pred,y_label):\n '''\n 功能: 计算KS值,输出对应分割点和累计分布函数曲线图\n 输入值:\n data: 二维数组或dataframe,包括模型得分和真实的标签\n pred: 一维数组或series,代表模型得分(一般为预测正类的概率)\n y_label: 一维数组或series,代表真实的标签({0,1}或{-1,1})\n 输出值:\n 'ks': KS值,'crossdens': 好坏客户累积概率分布以及其差值gap\n '''\n crossfreq = pd.crosstab(data[pred],data[y_label])\n crossdens = crossfreq.cumsum(axis=0) / crossfreq.sum()\n crossdens['gap'] = abs(crossdens[0] - crossdens[1])\n ks = crossdens[crossdens['gap'] == crossdens['gap'].max()]\n return ks,crossdens\n \n def cal_iv(self,df1, x, y='is_7_p'):\n df = copy.deepcopy(df1)\n if 'count' not in df.columns:\n df['count'] = [1 for i in range(df.shape[0])]\n df_tmp = df[[x,'count', y]].groupby(x).sum()\n df_tmp['good'] = df_tmp['count'] - df_tmp[y]\n df_tmp[y] = df_tmp[y].apply(lambda x: max(x, 0.00001)/sum(df_tmp[y]))\n df_tmp['good'] = df_tmp['good'].apply(lambda x: max(x, 0.00001)/sum(df_tmp['good']))\n #计算woe\n df_tmp['woe'] = np.log(df_tmp[y]/df_tmp['good'])\n #计算iv\n df_tmp['iv'] = (df_tmp[y]-df_tmp['good']) * df_tmp['woe']\n return df_tmp['iv'].sum()\n \n \n #计算psi\n def cal_psi(self, df_sf_bin, name, lst=['ins', 'oot']):\n name1, name2 = lst\n \n df_in = copy.deepcopy(df_sf_bin[df_sf_bin['type_train']==name1])\n sum_1 = df_in.shape[0]\n df_in['count1'] = [1 for i in range(sum_1)]\n df_in = df_in.groupby(name).sum()[['count1']]\n \n df_out = copy.deepcopy(df_sf_bin[df_sf_bin['type_train']==name2])\n sum_2 = df_out.shape[0]\n df_out['count2'] = [1 for i in range(sum_2)]\n df_out = df_out.groupby(name).sum()[['count2']]\n df_psi = pd.concat((df_in, df_out), axis=1)\n #计算psi\n df_psi['count1'] = df_psi['count1'].apply(lambda x: x/sum_1)\n df_psi['count2'] = df_psi['count2'].apply(lambda x: x/sum_2)\n #处理出现0的空箱\n df_psi[['count1', 'count2']].replace(0, 0.001, inplace=True)\n #\n df_psi['psi_tmp'] = df_psi['count1']/df_psi['count2']\n df_psi['psi_tmp'] = df_psi['psi_tmp'].apply(lambda x: math.log(x))\n # print(df_psi)\n df_psi['psi'] = (df_psi['count1'] - df_psi['count2'])*df_psi['psi_tmp']\n #df_psi\n return sum(df_psi['psi'])\n \nif __name__ == '__main__':\n \n s = '''\n c=Get_res_DataFrame(lr, a.df, a.df_bin, df_pb_woe, use_lst,a.woe_dic, type_train='type_train', y='is_7_p')\n d2_1 = c.get_2_1_imp(df_pb_woe[df_pb_woe['customer_type_old']=='old_customer'])\n d2_2 = c.get_2_2_des()\n d2_3 = c.get_2_3_corr()\n \n d3 = c.get_bin_ins_oot(type_lst=['ins', 'oot'])\n d4 = c.get_categories_df(df_pb_all,cate='type_train',base_cut='ins', y='final_score')\n #\n df_new = df_pb_all[df_pb_all['customer_type_old']=='new_customer']\n df_old = df_pb_all[df_pb_all['customer_type_old']=='old_customer']\n #\n d5_1 = c.get_categories_df(df_new,cate='type_train',base_cut='ins', y='final_score')\n d5_2 = c.get_categories_df(df_old,cate='type_train',base_cut='ins', y='final_score')\n \n d6_1 = c.get_categories_df(df_new,cate='month',base_cut='0', y='final_score')\n d6_2 = c.get_categories_df(df_old,cate='month',base_cut='0', y='final_score')\n '''\n \n",
"step-ids": [
11,
12,
13,
14,
15
]
}
|
[
11,
12,
13,
14,
15
] |
<|reserved_special_token_0|>
class DeviceShadowHandler:
def status_post(self, status, state=None):
"""Post status message and device state to AWSIoT and LOGGER
:param status: status string
:param state: optional dictionary to add to shadow reported state
:return:
"""
new_payload = {'state': {'reported': {'status': str(status)},
'desired': None}}
if state:
new_payload.update({'state': {'reported': state}})
self.shadow_handler.shadowUpdate(json.dumps(new_payload), None, 20)
LOGGER.info(status)
LOGGER.debug(json.dumps(new_payload))
def __init__(self, thingname: str, host: str, root_ca_path: str,
private_key_path: str, certificate_path: str):
"""Initiate AWS IoT connection
:param thingname: AWSIoT thing name
:param host: AWSIoT endpoint FQDN
:param root_ca_path: local file path to Amazon root certificate
:param private_key_path: local file path to device private key
:param certificate_path: local file path to device certificate
"""
self.shadow_client = AWSIoTMQTTShadowClient(thingname)
self.shadow_client.configureEndpoint(host, 8883)
self.shadow_client.configureCredentials(root_ca_path,
private_key_path, certificate_path)
self.shadow_client.configureAutoReconnectBackoffTime(1, 32, 20)
self.shadow_client.configureConnectDisconnectTimeout(20)
self.shadow_client.configureMQTTOperationTimeout(20)
mqtt_client = self.shadow_client.getMQTTConnection()
mqtt_client.configureOfflinePublishQueueing(-1)
self.shadow_client.connect(300)
self.shadow_handler = self.shadow_client.createShadowHandlerWithName(
thingname, True)
self.shadow_handler.shadowRegisterDeltaCallback(self.
custom_shadow_callback_delta)
self.status_post('STARTING')
self._callbackresponses = {}
self.event_queue = queue.SimpleQueue()
self.settings = {}
def custom_shadow_callback_delta(self, payload: str, response_status, token
):
"""
:param payload: JSON string ready to be parsed using json.loads(...)
:param response_status: ignored
:param token: ignored
"""
LOGGER.debug(payload)
payload_dict = json.loads(payload)
new_payload = {}
if payload_dict.get('state').get('command'):
self.event_queue.put_nowait({'command': payload_dict.get(
'state').get('command')})
new_payload.update({'state': {'desired': {'command': None}}})
if payload_dict.get('state').get('settings'):
self.event_queue.put_nowait({'settings': payload_dict.get(
'state').get('settings')})
new_payload.update({'state': {'desired': {'settings':
payload_dict.get('state').get('settings')}}})
LOGGER.info('Shadow update: ' + json.dumps(new_payload))
self.shadow_handler.shadowUpdate(json.dumps(new_payload), None, 5)
def custom_shadow_callback_get(self, payload, response_status, token):
"""Callback function records response from get shadow operation
:param payload:
:param response_status:
:param token:
:return:
"""
self._callbackresponses.update({token: {'payload': json.loads(
payload), 'responseStatus': response_status}})
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def post_state(self, state):
new_payload = {'state': {'reported': {'status': state}, 'desired':
None}}
self.shadow_handler.shadowUpdate(json.dumps(new_payload), None, 20)
LOGGER.info('New state' + json.dumps(state))
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class DeviceShadowHandler:
def status_post(self, status, state=None):
"""Post status message and device state to AWSIoT and LOGGER
:param status: status string
:param state: optional dictionary to add to shadow reported state
:return:
"""
new_payload = {'state': {'reported': {'status': str(status)},
'desired': None}}
if state:
new_payload.update({'state': {'reported': state}})
self.shadow_handler.shadowUpdate(json.dumps(new_payload), None, 20)
LOGGER.info(status)
LOGGER.debug(json.dumps(new_payload))
def __init__(self, thingname: str, host: str, root_ca_path: str,
private_key_path: str, certificate_path: str):
"""Initiate AWS IoT connection
:param thingname: AWSIoT thing name
:param host: AWSIoT endpoint FQDN
:param root_ca_path: local file path to Amazon root certificate
:param private_key_path: local file path to device private key
:param certificate_path: local file path to device certificate
"""
self.shadow_client = AWSIoTMQTTShadowClient(thingname)
self.shadow_client.configureEndpoint(host, 8883)
self.shadow_client.configureCredentials(root_ca_path,
private_key_path, certificate_path)
self.shadow_client.configureAutoReconnectBackoffTime(1, 32, 20)
self.shadow_client.configureConnectDisconnectTimeout(20)
self.shadow_client.configureMQTTOperationTimeout(20)
mqtt_client = self.shadow_client.getMQTTConnection()
mqtt_client.configureOfflinePublishQueueing(-1)
self.shadow_client.connect(300)
self.shadow_handler = self.shadow_client.createShadowHandlerWithName(
thingname, True)
self.shadow_handler.shadowRegisterDeltaCallback(self.
custom_shadow_callback_delta)
self.status_post('STARTING')
self._callbackresponses = {}
self.event_queue = queue.SimpleQueue()
self.settings = {}
def custom_shadow_callback_delta(self, payload: str, response_status, token
):
"""
:param payload: JSON string ready to be parsed using json.loads(...)
:param response_status: ignored
:param token: ignored
"""
LOGGER.debug(payload)
payload_dict = json.loads(payload)
new_payload = {}
if payload_dict.get('state').get('command'):
self.event_queue.put_nowait({'command': payload_dict.get(
'state').get('command')})
new_payload.update({'state': {'desired': {'command': None}}})
if payload_dict.get('state').get('settings'):
self.event_queue.put_nowait({'settings': payload_dict.get(
'state').get('settings')})
new_payload.update({'state': {'desired': {'settings':
payload_dict.get('state').get('settings')}}})
LOGGER.info('Shadow update: ' + json.dumps(new_payload))
self.shadow_handler.shadowUpdate(json.dumps(new_payload), None, 5)
def custom_shadow_callback_get(self, payload, response_status, token):
"""Callback function records response from get shadow operation
:param payload:
:param response_status:
:param token:
:return:
"""
self._callbackresponses.update({token: {'payload': json.loads(
payload), 'responseStatus': response_status}})
<|reserved_special_token_0|>
def post_param(self):
new_payload = {'state': {'reported': {'settings': self.settings},
'desired': None}}
self.shadow_handler.shadowUpdate(json.dumps(new_payload), None, 5)
def post_state(self, state):
new_payload = {'state': {'reported': {'status': state}, 'desired':
None}}
self.shadow_handler.shadowUpdate(json.dumps(new_payload), None, 20)
LOGGER.info('New state' + json.dumps(state))
def post_temperature(self, temp):
new_payload = {'state': {'reported': {'cputemp': temp}}}
self.shadow_handler.shadowUpdate(json.dumps(new_payload), None, 20)
LOGGER.debug('New temp payload ' + json.dumps(new_payload))
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class DeviceShadowHandler:
def status_post(self, status, state=None):
"""Post status message and device state to AWSIoT and LOGGER
:param status: status string
:param state: optional dictionary to add to shadow reported state
:return:
"""
new_payload = {'state': {'reported': {'status': str(status)},
'desired': None}}
if state:
new_payload.update({'state': {'reported': state}})
self.shadow_handler.shadowUpdate(json.dumps(new_payload), None, 20)
LOGGER.info(status)
LOGGER.debug(json.dumps(new_payload))
def __init__(self, thingname: str, host: str, root_ca_path: str,
private_key_path: str, certificate_path: str):
"""Initiate AWS IoT connection
:param thingname: AWSIoT thing name
:param host: AWSIoT endpoint FQDN
:param root_ca_path: local file path to Amazon root certificate
:param private_key_path: local file path to device private key
:param certificate_path: local file path to device certificate
"""
self.shadow_client = AWSIoTMQTTShadowClient(thingname)
self.shadow_client.configureEndpoint(host, 8883)
self.shadow_client.configureCredentials(root_ca_path,
private_key_path, certificate_path)
self.shadow_client.configureAutoReconnectBackoffTime(1, 32, 20)
self.shadow_client.configureConnectDisconnectTimeout(20)
self.shadow_client.configureMQTTOperationTimeout(20)
mqtt_client = self.shadow_client.getMQTTConnection()
mqtt_client.configureOfflinePublishQueueing(-1)
self.shadow_client.connect(300)
self.shadow_handler = self.shadow_client.createShadowHandlerWithName(
thingname, True)
self.shadow_handler.shadowRegisterDeltaCallback(self.
custom_shadow_callback_delta)
self.status_post('STARTING')
self._callbackresponses = {}
self.event_queue = queue.SimpleQueue()
self.settings = {}
def custom_shadow_callback_delta(self, payload: str, response_status, token
):
"""
:param payload: JSON string ready to be parsed using json.loads(...)
:param response_status: ignored
:param token: ignored
"""
LOGGER.debug(payload)
payload_dict = json.loads(payload)
new_payload = {}
if payload_dict.get('state').get('command'):
self.event_queue.put_nowait({'command': payload_dict.get(
'state').get('command')})
new_payload.update({'state': {'desired': {'command': None}}})
if payload_dict.get('state').get('settings'):
self.event_queue.put_nowait({'settings': payload_dict.get(
'state').get('settings')})
new_payload.update({'state': {'desired': {'settings':
payload_dict.get('state').get('settings')}}})
LOGGER.info('Shadow update: ' + json.dumps(new_payload))
self.shadow_handler.shadowUpdate(json.dumps(new_payload), None, 5)
def custom_shadow_callback_get(self, payload, response_status, token):
"""Callback function records response from get shadow operation
:param payload:
:param response_status:
:param token:
:return:
"""
self._callbackresponses.update({token: {'payload': json.loads(
payload), 'responseStatus': response_status}})
def get_response(self, token):
"""Return prior get shadow operation response
note each response is deleted when returned, i.e. can only be returned once
:param token:
:return:
"""
return self._callbackresponses.pop(token)
def post_param(self):
new_payload = {'state': {'reported': {'settings': self.settings},
'desired': None}}
self.shadow_handler.shadowUpdate(json.dumps(new_payload), None, 5)
def post_state(self, state):
new_payload = {'state': {'reported': {'status': state}, 'desired':
None}}
self.shadow_handler.shadowUpdate(json.dumps(new_payload), None, 20)
LOGGER.info('New state' + json.dumps(state))
def post_temperature(self, temp):
new_payload = {'state': {'reported': {'cputemp': temp}}}
self.shadow_handler.shadowUpdate(json.dumps(new_payload), None, 20)
LOGGER.debug('New temp payload ' + json.dumps(new_payload))
<|reserved_special_token_1|>
<|reserved_special_token_0|>
LOGGER = logging.getLogger(__name__)
class DeviceShadowHandler:
def status_post(self, status, state=None):
"""Post status message and device state to AWSIoT and LOGGER
:param status: status string
:param state: optional dictionary to add to shadow reported state
:return:
"""
new_payload = {'state': {'reported': {'status': str(status)},
'desired': None}}
if state:
new_payload.update({'state': {'reported': state}})
self.shadow_handler.shadowUpdate(json.dumps(new_payload), None, 20)
LOGGER.info(status)
LOGGER.debug(json.dumps(new_payload))
def __init__(self, thingname: str, host: str, root_ca_path: str,
private_key_path: str, certificate_path: str):
"""Initiate AWS IoT connection
:param thingname: AWSIoT thing name
:param host: AWSIoT endpoint FQDN
:param root_ca_path: local file path to Amazon root certificate
:param private_key_path: local file path to device private key
:param certificate_path: local file path to device certificate
"""
self.shadow_client = AWSIoTMQTTShadowClient(thingname)
self.shadow_client.configureEndpoint(host, 8883)
self.shadow_client.configureCredentials(root_ca_path,
private_key_path, certificate_path)
self.shadow_client.configureAutoReconnectBackoffTime(1, 32, 20)
self.shadow_client.configureConnectDisconnectTimeout(20)
self.shadow_client.configureMQTTOperationTimeout(20)
mqtt_client = self.shadow_client.getMQTTConnection()
mqtt_client.configureOfflinePublishQueueing(-1)
self.shadow_client.connect(300)
self.shadow_handler = self.shadow_client.createShadowHandlerWithName(
thingname, True)
self.shadow_handler.shadowRegisterDeltaCallback(self.
custom_shadow_callback_delta)
self.status_post('STARTING')
self._callbackresponses = {}
self.event_queue = queue.SimpleQueue()
self.settings = {}
def custom_shadow_callback_delta(self, payload: str, response_status, token
):
"""
:param payload: JSON string ready to be parsed using json.loads(...)
:param response_status: ignored
:param token: ignored
"""
LOGGER.debug(payload)
payload_dict = json.loads(payload)
new_payload = {}
if payload_dict.get('state').get('command'):
self.event_queue.put_nowait({'command': payload_dict.get(
'state').get('command')})
new_payload.update({'state': {'desired': {'command': None}}})
if payload_dict.get('state').get('settings'):
self.event_queue.put_nowait({'settings': payload_dict.get(
'state').get('settings')})
new_payload.update({'state': {'desired': {'settings':
payload_dict.get('state').get('settings')}}})
LOGGER.info('Shadow update: ' + json.dumps(new_payload))
self.shadow_handler.shadowUpdate(json.dumps(new_payload), None, 5)
def custom_shadow_callback_get(self, payload, response_status, token):
"""Callback function records response from get shadow operation
:param payload:
:param response_status:
:param token:
:return:
"""
self._callbackresponses.update({token: {'payload': json.loads(
payload), 'responseStatus': response_status}})
def get_response(self, token):
"""Return prior get shadow operation response
note each response is deleted when returned, i.e. can only be returned once
:param token:
:return:
"""
return self._callbackresponses.pop(token)
def post_param(self):
new_payload = {'state': {'reported': {'settings': self.settings},
'desired': None}}
self.shadow_handler.shadowUpdate(json.dumps(new_payload), None, 5)
def post_state(self, state):
new_payload = {'state': {'reported': {'status': state}, 'desired':
None}}
self.shadow_handler.shadowUpdate(json.dumps(new_payload), None, 20)
LOGGER.info('New state' + json.dumps(state))
def post_temperature(self, temp):
new_payload = {'state': {'reported': {'cputemp': temp}}}
self.shadow_handler.shadowUpdate(json.dumps(new_payload), None, 20)
LOGGER.debug('New temp payload ' + json.dumps(new_payload))
<|reserved_special_token_1|>
#!/usr/bin/env python3
"""Initiates connection to AWSIoT and provides helper functions
deviceshadowhandler.py
by Darren Dunford
"""
import json
import logging
import queue
from AWSIoTPythonSDK.MQTTLib import AWSIoTMQTTShadowClient
LOGGER = logging.getLogger(__name__)
class DeviceShadowHandler:
def status_post(self, status, state=None):
"""Post status message and device state to AWSIoT and LOGGER
:param status: status string
:param state: optional dictionary to add to shadow reported state
:return:
"""
# create new JSON payload to update device shadow
new_payload = {"state": {"reported": {"status": str(status)}, "desired": None}}
if state:
new_payload.update({"state": {"reported": state}})
# update shadow
self.shadow_handler.shadowUpdate(json.dumps(new_payload), None, 20)
# log to syslog
LOGGER.info(status)
LOGGER.debug(json.dumps(new_payload))
# constructor
def __init__(self, thingname: str, host: str, root_ca_path: str, private_key_path: str, certificate_path: str):
"""Initiate AWS IoT connection
:param thingname: AWSIoT thing name
:param host: AWSIoT endpoint FQDN
:param root_ca_path: local file path to Amazon root certificate
:param private_key_path: local file path to device private key
:param certificate_path: local file path to device certificate
"""
# Init Shadow Client MQTT connection
self.shadow_client = AWSIoTMQTTShadowClient(thingname)
self.shadow_client.configureEndpoint(host, 8883)
self.shadow_client.configureCredentials(root_ca_path, private_key_path, certificate_path)
# AWSIoTMQTTShadowClient configuration
self.shadow_client.configureAutoReconnectBackoffTime(1, 32, 20)
self.shadow_client.configureConnectDisconnectTimeout(20) # 20 sec
self.shadow_client.configureMQTTOperationTimeout(20) # 20 sec
# force shadow client to use offline publish queueing
# overriding the default behaviour for shadow clients in the SDK
mqtt_client = self.shadow_client.getMQTTConnection()
mqtt_client.configureOfflinePublishQueueing(-1)
# Connect to AWS IoT with a 300 second keepalive
self.shadow_client.connect(300)
# Create a deviceShadow with persistent subscription and register delta handler
self.shadow_handler = self.shadow_client.createShadowHandlerWithName(thingname, True)
self.shadow_handler.shadowRegisterDeltaCallback(self.custom_shadow_callback_delta)
# initial status post
self.status_post('STARTING')
# dictionary to hold callback responses
self._callbackresponses = {}
# callbacks in this class post events on to this queue
self.event_queue = queue.SimpleQueue()
self.settings = {}
# Custom shadow callback for delta -> remote triggering
def custom_shadow_callback_delta(self, payload: str, response_status, token):
"""
:param payload: JSON string ready to be parsed using json.loads(...)
:param response_status: ignored
:param token: ignored
"""
# DEBUG dump payload in to syslog
LOGGER.debug(payload)
# create JSON dictionary from payload
payload_dict = json.loads(payload)
new_payload = {}
# check for command, if received push event on to queue
if payload_dict.get('state').get('command'):
self.event_queue.put_nowait({"command":payload_dict.get('state').get('command')})
new_payload.update({"state": {"desired": {"command": None}}})
# check for settings, if received push event on to queue
if payload_dict.get('state').get('settings'):
self.event_queue.put_nowait({"settings":payload_dict.get('state').get('settings')})
new_payload.update({"state": {"desired": {"settings": payload_dict.get('state').get('settings')}}})
LOGGER.info("Shadow update: " + json.dumps(new_payload))
# update shadow instance status
self.shadow_handler.shadowUpdate(json.dumps(new_payload), None, 5)
def custom_shadow_callback_get(self, payload, response_status, token):
"""Callback function records response from get shadow operation
:param payload:
:param response_status:
:param token:
:return:
"""
self._callbackresponses.update({token: {"payload": json.loads(payload), "responseStatus": response_status}})
def get_response(self, token):
"""Return prior get shadow operation response
note each response is deleted when returned, i.e. can only be returned once
:param token:
:return:
"""
return self._callbackresponses.pop(token)
# post all parameters as a shadow update
def post_param(self):
new_payload = {"state": {"reported": {"settings": self.settings}, "desired": None}}
self.shadow_handler.shadowUpdate(json.dumps(new_payload), None, 5)
# post state update to device shadow and, if enabled, syslog
def post_state(self, state):
# create new JSON payload to update device shadow
new_payload = {"state": {"reported": {"status": state}, "desired": None}}
self.shadow_handler.shadowUpdate(json.dumps(new_payload), None, 20)
# log to syslog
LOGGER.info("New state" + json.dumps(state))
def post_temperature(self, temp):
# create new JSON payload to send device temperature to shadow
new_payload = {"state": {"reported": {"cputemp": temp}}}
self.shadow_handler.shadowUpdate(json.dumps(new_payload), None, 20)
# log to syslog on debug only
LOGGER.debug("New temp payload " + json.dumps(new_payload))
|
flexible
|
{
"blob_id": "a6d409b806dbd1e174cac65a26c5e8106a8b93ea",
"index": 3760,
"step-1": "<mask token>\n\n\nclass DeviceShadowHandler:\n\n def status_post(self, status, state=None):\n \"\"\"Post status message and device state to AWSIoT and LOGGER\n\n :param status: status string\n :param state: optional dictionary to add to shadow reported state\n :return:\n \"\"\"\n new_payload = {'state': {'reported': {'status': str(status)},\n 'desired': None}}\n if state:\n new_payload.update({'state': {'reported': state}})\n self.shadow_handler.shadowUpdate(json.dumps(new_payload), None, 20)\n LOGGER.info(status)\n LOGGER.debug(json.dumps(new_payload))\n\n def __init__(self, thingname: str, host: str, root_ca_path: str,\n private_key_path: str, certificate_path: str):\n \"\"\"Initiate AWS IoT connection\n\n :param thingname: AWSIoT thing name\n :param host: AWSIoT endpoint FQDN\n :param root_ca_path: local file path to Amazon root certificate\n :param private_key_path: local file path to device private key\n :param certificate_path: local file path to device certificate\n \"\"\"\n self.shadow_client = AWSIoTMQTTShadowClient(thingname)\n self.shadow_client.configureEndpoint(host, 8883)\n self.shadow_client.configureCredentials(root_ca_path,\n private_key_path, certificate_path)\n self.shadow_client.configureAutoReconnectBackoffTime(1, 32, 20)\n self.shadow_client.configureConnectDisconnectTimeout(20)\n self.shadow_client.configureMQTTOperationTimeout(20)\n mqtt_client = self.shadow_client.getMQTTConnection()\n mqtt_client.configureOfflinePublishQueueing(-1)\n self.shadow_client.connect(300)\n self.shadow_handler = self.shadow_client.createShadowHandlerWithName(\n thingname, True)\n self.shadow_handler.shadowRegisterDeltaCallback(self.\n custom_shadow_callback_delta)\n self.status_post('STARTING')\n self._callbackresponses = {}\n self.event_queue = queue.SimpleQueue()\n self.settings = {}\n\n def custom_shadow_callback_delta(self, payload: str, response_status, token\n ):\n \"\"\"\n\n :param payload: JSON string ready to be parsed using json.loads(...)\n :param response_status: ignored\n :param token: ignored\n \"\"\"\n LOGGER.debug(payload)\n payload_dict = json.loads(payload)\n new_payload = {}\n if payload_dict.get('state').get('command'):\n self.event_queue.put_nowait({'command': payload_dict.get(\n 'state').get('command')})\n new_payload.update({'state': {'desired': {'command': None}}})\n if payload_dict.get('state').get('settings'):\n self.event_queue.put_nowait({'settings': payload_dict.get(\n 'state').get('settings')})\n new_payload.update({'state': {'desired': {'settings':\n payload_dict.get('state').get('settings')}}})\n LOGGER.info('Shadow update: ' + json.dumps(new_payload))\n self.shadow_handler.shadowUpdate(json.dumps(new_payload), None, 5)\n\n def custom_shadow_callback_get(self, payload, response_status, token):\n \"\"\"Callback function records response from get shadow operation\n\n :param payload:\n :param response_status:\n :param token:\n :return:\n \"\"\"\n self._callbackresponses.update({token: {'payload': json.loads(\n payload), 'responseStatus': response_status}})\n <mask token>\n <mask token>\n\n def post_state(self, state):\n new_payload = {'state': {'reported': {'status': state}, 'desired':\n None}}\n self.shadow_handler.shadowUpdate(json.dumps(new_payload), None, 20)\n LOGGER.info('New state' + json.dumps(state))\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass DeviceShadowHandler:\n\n def status_post(self, status, state=None):\n \"\"\"Post status message and device state to AWSIoT and LOGGER\n\n :param status: status string\n :param state: optional dictionary to add to shadow reported state\n :return:\n \"\"\"\n new_payload = {'state': {'reported': {'status': str(status)},\n 'desired': None}}\n if state:\n new_payload.update({'state': {'reported': state}})\n self.shadow_handler.shadowUpdate(json.dumps(new_payload), None, 20)\n LOGGER.info(status)\n LOGGER.debug(json.dumps(new_payload))\n\n def __init__(self, thingname: str, host: str, root_ca_path: str,\n private_key_path: str, certificate_path: str):\n \"\"\"Initiate AWS IoT connection\n\n :param thingname: AWSIoT thing name\n :param host: AWSIoT endpoint FQDN\n :param root_ca_path: local file path to Amazon root certificate\n :param private_key_path: local file path to device private key\n :param certificate_path: local file path to device certificate\n \"\"\"\n self.shadow_client = AWSIoTMQTTShadowClient(thingname)\n self.shadow_client.configureEndpoint(host, 8883)\n self.shadow_client.configureCredentials(root_ca_path,\n private_key_path, certificate_path)\n self.shadow_client.configureAutoReconnectBackoffTime(1, 32, 20)\n self.shadow_client.configureConnectDisconnectTimeout(20)\n self.shadow_client.configureMQTTOperationTimeout(20)\n mqtt_client = self.shadow_client.getMQTTConnection()\n mqtt_client.configureOfflinePublishQueueing(-1)\n self.shadow_client.connect(300)\n self.shadow_handler = self.shadow_client.createShadowHandlerWithName(\n thingname, True)\n self.shadow_handler.shadowRegisterDeltaCallback(self.\n custom_shadow_callback_delta)\n self.status_post('STARTING')\n self._callbackresponses = {}\n self.event_queue = queue.SimpleQueue()\n self.settings = {}\n\n def custom_shadow_callback_delta(self, payload: str, response_status, token\n ):\n \"\"\"\n\n :param payload: JSON string ready to be parsed using json.loads(...)\n :param response_status: ignored\n :param token: ignored\n \"\"\"\n LOGGER.debug(payload)\n payload_dict = json.loads(payload)\n new_payload = {}\n if payload_dict.get('state').get('command'):\n self.event_queue.put_nowait({'command': payload_dict.get(\n 'state').get('command')})\n new_payload.update({'state': {'desired': {'command': None}}})\n if payload_dict.get('state').get('settings'):\n self.event_queue.put_nowait({'settings': payload_dict.get(\n 'state').get('settings')})\n new_payload.update({'state': {'desired': {'settings':\n payload_dict.get('state').get('settings')}}})\n LOGGER.info('Shadow update: ' + json.dumps(new_payload))\n self.shadow_handler.shadowUpdate(json.dumps(new_payload), None, 5)\n\n def custom_shadow_callback_get(self, payload, response_status, token):\n \"\"\"Callback function records response from get shadow operation\n\n :param payload:\n :param response_status:\n :param token:\n :return:\n \"\"\"\n self._callbackresponses.update({token: {'payload': json.loads(\n payload), 'responseStatus': response_status}})\n <mask token>\n\n def post_param(self):\n new_payload = {'state': {'reported': {'settings': self.settings},\n 'desired': None}}\n self.shadow_handler.shadowUpdate(json.dumps(new_payload), None, 5)\n\n def post_state(self, state):\n new_payload = {'state': {'reported': {'status': state}, 'desired':\n None}}\n self.shadow_handler.shadowUpdate(json.dumps(new_payload), None, 20)\n LOGGER.info('New state' + json.dumps(state))\n\n def post_temperature(self, temp):\n new_payload = {'state': {'reported': {'cputemp': temp}}}\n self.shadow_handler.shadowUpdate(json.dumps(new_payload), None, 20)\n LOGGER.debug('New temp payload ' + json.dumps(new_payload))\n",
"step-3": "<mask token>\n\n\nclass DeviceShadowHandler:\n\n def status_post(self, status, state=None):\n \"\"\"Post status message and device state to AWSIoT and LOGGER\n\n :param status: status string\n :param state: optional dictionary to add to shadow reported state\n :return:\n \"\"\"\n new_payload = {'state': {'reported': {'status': str(status)},\n 'desired': None}}\n if state:\n new_payload.update({'state': {'reported': state}})\n self.shadow_handler.shadowUpdate(json.dumps(new_payload), None, 20)\n LOGGER.info(status)\n LOGGER.debug(json.dumps(new_payload))\n\n def __init__(self, thingname: str, host: str, root_ca_path: str,\n private_key_path: str, certificate_path: str):\n \"\"\"Initiate AWS IoT connection\n\n :param thingname: AWSIoT thing name\n :param host: AWSIoT endpoint FQDN\n :param root_ca_path: local file path to Amazon root certificate\n :param private_key_path: local file path to device private key\n :param certificate_path: local file path to device certificate\n \"\"\"\n self.shadow_client = AWSIoTMQTTShadowClient(thingname)\n self.shadow_client.configureEndpoint(host, 8883)\n self.shadow_client.configureCredentials(root_ca_path,\n private_key_path, certificate_path)\n self.shadow_client.configureAutoReconnectBackoffTime(1, 32, 20)\n self.shadow_client.configureConnectDisconnectTimeout(20)\n self.shadow_client.configureMQTTOperationTimeout(20)\n mqtt_client = self.shadow_client.getMQTTConnection()\n mqtt_client.configureOfflinePublishQueueing(-1)\n self.shadow_client.connect(300)\n self.shadow_handler = self.shadow_client.createShadowHandlerWithName(\n thingname, True)\n self.shadow_handler.shadowRegisterDeltaCallback(self.\n custom_shadow_callback_delta)\n self.status_post('STARTING')\n self._callbackresponses = {}\n self.event_queue = queue.SimpleQueue()\n self.settings = {}\n\n def custom_shadow_callback_delta(self, payload: str, response_status, token\n ):\n \"\"\"\n\n :param payload: JSON string ready to be parsed using json.loads(...)\n :param response_status: ignored\n :param token: ignored\n \"\"\"\n LOGGER.debug(payload)\n payload_dict = json.loads(payload)\n new_payload = {}\n if payload_dict.get('state').get('command'):\n self.event_queue.put_nowait({'command': payload_dict.get(\n 'state').get('command')})\n new_payload.update({'state': {'desired': {'command': None}}})\n if payload_dict.get('state').get('settings'):\n self.event_queue.put_nowait({'settings': payload_dict.get(\n 'state').get('settings')})\n new_payload.update({'state': {'desired': {'settings':\n payload_dict.get('state').get('settings')}}})\n LOGGER.info('Shadow update: ' + json.dumps(new_payload))\n self.shadow_handler.shadowUpdate(json.dumps(new_payload), None, 5)\n\n def custom_shadow_callback_get(self, payload, response_status, token):\n \"\"\"Callback function records response from get shadow operation\n\n :param payload:\n :param response_status:\n :param token:\n :return:\n \"\"\"\n self._callbackresponses.update({token: {'payload': json.loads(\n payload), 'responseStatus': response_status}})\n\n def get_response(self, token):\n \"\"\"Return prior get shadow operation response\n\n note each response is deleted when returned, i.e. can only be returned once\n\n :param token:\n :return:\n \"\"\"\n return self._callbackresponses.pop(token)\n\n def post_param(self):\n new_payload = {'state': {'reported': {'settings': self.settings},\n 'desired': None}}\n self.shadow_handler.shadowUpdate(json.dumps(new_payload), None, 5)\n\n def post_state(self, state):\n new_payload = {'state': {'reported': {'status': state}, 'desired':\n None}}\n self.shadow_handler.shadowUpdate(json.dumps(new_payload), None, 20)\n LOGGER.info('New state' + json.dumps(state))\n\n def post_temperature(self, temp):\n new_payload = {'state': {'reported': {'cputemp': temp}}}\n self.shadow_handler.shadowUpdate(json.dumps(new_payload), None, 20)\n LOGGER.debug('New temp payload ' + json.dumps(new_payload))\n",
"step-4": "<mask token>\nLOGGER = logging.getLogger(__name__)\n\n\nclass DeviceShadowHandler:\n\n def status_post(self, status, state=None):\n \"\"\"Post status message and device state to AWSIoT and LOGGER\n\n :param status: status string\n :param state: optional dictionary to add to shadow reported state\n :return:\n \"\"\"\n new_payload = {'state': {'reported': {'status': str(status)},\n 'desired': None}}\n if state:\n new_payload.update({'state': {'reported': state}})\n self.shadow_handler.shadowUpdate(json.dumps(new_payload), None, 20)\n LOGGER.info(status)\n LOGGER.debug(json.dumps(new_payload))\n\n def __init__(self, thingname: str, host: str, root_ca_path: str,\n private_key_path: str, certificate_path: str):\n \"\"\"Initiate AWS IoT connection\n\n :param thingname: AWSIoT thing name\n :param host: AWSIoT endpoint FQDN\n :param root_ca_path: local file path to Amazon root certificate\n :param private_key_path: local file path to device private key\n :param certificate_path: local file path to device certificate\n \"\"\"\n self.shadow_client = AWSIoTMQTTShadowClient(thingname)\n self.shadow_client.configureEndpoint(host, 8883)\n self.shadow_client.configureCredentials(root_ca_path,\n private_key_path, certificate_path)\n self.shadow_client.configureAutoReconnectBackoffTime(1, 32, 20)\n self.shadow_client.configureConnectDisconnectTimeout(20)\n self.shadow_client.configureMQTTOperationTimeout(20)\n mqtt_client = self.shadow_client.getMQTTConnection()\n mqtt_client.configureOfflinePublishQueueing(-1)\n self.shadow_client.connect(300)\n self.shadow_handler = self.shadow_client.createShadowHandlerWithName(\n thingname, True)\n self.shadow_handler.shadowRegisterDeltaCallback(self.\n custom_shadow_callback_delta)\n self.status_post('STARTING')\n self._callbackresponses = {}\n self.event_queue = queue.SimpleQueue()\n self.settings = {}\n\n def custom_shadow_callback_delta(self, payload: str, response_status, token\n ):\n \"\"\"\n\n :param payload: JSON string ready to be parsed using json.loads(...)\n :param response_status: ignored\n :param token: ignored\n \"\"\"\n LOGGER.debug(payload)\n payload_dict = json.loads(payload)\n new_payload = {}\n if payload_dict.get('state').get('command'):\n self.event_queue.put_nowait({'command': payload_dict.get(\n 'state').get('command')})\n new_payload.update({'state': {'desired': {'command': None}}})\n if payload_dict.get('state').get('settings'):\n self.event_queue.put_nowait({'settings': payload_dict.get(\n 'state').get('settings')})\n new_payload.update({'state': {'desired': {'settings':\n payload_dict.get('state').get('settings')}}})\n LOGGER.info('Shadow update: ' + json.dumps(new_payload))\n self.shadow_handler.shadowUpdate(json.dumps(new_payload), None, 5)\n\n def custom_shadow_callback_get(self, payload, response_status, token):\n \"\"\"Callback function records response from get shadow operation\n\n :param payload:\n :param response_status:\n :param token:\n :return:\n \"\"\"\n self._callbackresponses.update({token: {'payload': json.loads(\n payload), 'responseStatus': response_status}})\n\n def get_response(self, token):\n \"\"\"Return prior get shadow operation response\n\n note each response is deleted when returned, i.e. can only be returned once\n\n :param token:\n :return:\n \"\"\"\n return self._callbackresponses.pop(token)\n\n def post_param(self):\n new_payload = {'state': {'reported': {'settings': self.settings},\n 'desired': None}}\n self.shadow_handler.shadowUpdate(json.dumps(new_payload), None, 5)\n\n def post_state(self, state):\n new_payload = {'state': {'reported': {'status': state}, 'desired':\n None}}\n self.shadow_handler.shadowUpdate(json.dumps(new_payload), None, 20)\n LOGGER.info('New state' + json.dumps(state))\n\n def post_temperature(self, temp):\n new_payload = {'state': {'reported': {'cputemp': temp}}}\n self.shadow_handler.shadowUpdate(json.dumps(new_payload), None, 20)\n LOGGER.debug('New temp payload ' + json.dumps(new_payload))\n",
"step-5": "#!/usr/bin/env python3\n\"\"\"Initiates connection to AWSIoT and provides helper functions\n\ndeviceshadowhandler.py\n\nby Darren Dunford\n\"\"\"\n\nimport json\nimport logging\nimport queue\nfrom AWSIoTPythonSDK.MQTTLib import AWSIoTMQTTShadowClient\n\nLOGGER = logging.getLogger(__name__)\n\n\nclass DeviceShadowHandler:\n\n def status_post(self, status, state=None):\n \"\"\"Post status message and device state to AWSIoT and LOGGER\n\n :param status: status string\n :param state: optional dictionary to add to shadow reported state\n :return:\n \"\"\"\n\n # create new JSON payload to update device shadow\n new_payload = {\"state\": {\"reported\": {\"status\": str(status)}, \"desired\": None}}\n if state:\n new_payload.update({\"state\": {\"reported\": state}})\n\n # update shadow\n self.shadow_handler.shadowUpdate(json.dumps(new_payload), None, 20)\n\n # log to syslog\n LOGGER.info(status)\n LOGGER.debug(json.dumps(new_payload))\n\n # constructor\n def __init__(self, thingname: str, host: str, root_ca_path: str, private_key_path: str, certificate_path: str):\n \"\"\"Initiate AWS IoT connection\n\n :param thingname: AWSIoT thing name\n :param host: AWSIoT endpoint FQDN\n :param root_ca_path: local file path to Amazon root certificate\n :param private_key_path: local file path to device private key\n :param certificate_path: local file path to device certificate\n \"\"\"\n\n # Init Shadow Client MQTT connection\n self.shadow_client = AWSIoTMQTTShadowClient(thingname)\n self.shadow_client.configureEndpoint(host, 8883)\n self.shadow_client.configureCredentials(root_ca_path, private_key_path, certificate_path)\n\n # AWSIoTMQTTShadowClient configuration\n self.shadow_client.configureAutoReconnectBackoffTime(1, 32, 20)\n self.shadow_client.configureConnectDisconnectTimeout(20) # 20 sec\n self.shadow_client.configureMQTTOperationTimeout(20) # 20 sec\n\n # force shadow client to use offline publish queueing\n # overriding the default behaviour for shadow clients in the SDK\n mqtt_client = self.shadow_client.getMQTTConnection()\n mqtt_client.configureOfflinePublishQueueing(-1)\n\n # Connect to AWS IoT with a 300 second keepalive\n self.shadow_client.connect(300)\n\n # Create a deviceShadow with persistent subscription and register delta handler\n self.shadow_handler = self.shadow_client.createShadowHandlerWithName(thingname, True)\n self.shadow_handler.shadowRegisterDeltaCallback(self.custom_shadow_callback_delta)\n\n # initial status post\n self.status_post('STARTING')\n\n # dictionary to hold callback responses\n self._callbackresponses = {}\n\n # callbacks in this class post events on to this queue\n self.event_queue = queue.SimpleQueue()\n\n self.settings = {}\n\n # Custom shadow callback for delta -> remote triggering\n def custom_shadow_callback_delta(self, payload: str, response_status, token):\n \"\"\"\n\n :param payload: JSON string ready to be parsed using json.loads(...)\n :param response_status: ignored\n :param token: ignored\n \"\"\"\n\n # DEBUG dump payload in to syslog\n LOGGER.debug(payload)\n\n # create JSON dictionary from payload\n payload_dict = json.loads(payload)\n new_payload = {}\n\n # check for command, if received push event on to queue\n if payload_dict.get('state').get('command'):\n self.event_queue.put_nowait({\"command\":payload_dict.get('state').get('command')})\n new_payload.update({\"state\": {\"desired\": {\"command\": None}}})\n\n # check for settings, if received push event on to queue\n if payload_dict.get('state').get('settings'):\n self.event_queue.put_nowait({\"settings\":payload_dict.get('state').get('settings')})\n new_payload.update({\"state\": {\"desired\": {\"settings\": payload_dict.get('state').get('settings')}}})\n\n LOGGER.info(\"Shadow update: \" + json.dumps(new_payload))\n\n # update shadow instance status\n self.shadow_handler.shadowUpdate(json.dumps(new_payload), None, 5)\n\n def custom_shadow_callback_get(self, payload, response_status, token):\n \"\"\"Callback function records response from get shadow operation\n\n :param payload:\n :param response_status:\n :param token:\n :return:\n \"\"\"\n self._callbackresponses.update({token: {\"payload\": json.loads(payload), \"responseStatus\": response_status}})\n\n def get_response(self, token):\n \"\"\"Return prior get shadow operation response\n\n note each response is deleted when returned, i.e. can only be returned once\n\n :param token:\n :return:\n \"\"\"\n return self._callbackresponses.pop(token)\n\n # post all parameters as a shadow update\n def post_param(self):\n new_payload = {\"state\": {\"reported\": {\"settings\": self.settings}, \"desired\": None}}\n self.shadow_handler.shadowUpdate(json.dumps(new_payload), None, 5)\n\n # post state update to device shadow and, if enabled, syslog\n def post_state(self, state):\n\n # create new JSON payload to update device shadow\n new_payload = {\"state\": {\"reported\": {\"status\": state}, \"desired\": None}}\n self.shadow_handler.shadowUpdate(json.dumps(new_payload), None, 20)\n\n # log to syslog\n LOGGER.info(\"New state\" + json.dumps(state))\n\n def post_temperature(self, temp):\n\n # create new JSON payload to send device temperature to shadow\n new_payload = {\"state\": {\"reported\": {\"cputemp\": temp}}}\n self.shadow_handler.shadowUpdate(json.dumps(new_payload), None, 20)\n\n # log to syslog on debug only\n LOGGER.debug(\"New temp payload \" + json.dumps(new_payload))\n",
"step-ids": [
6,
8,
9,
10,
12
]
}
|
[
6,
8,
9,
10,
12
] |
import xdrlib,sys
import xlrd
def open_excel(file='D:\基金公司\数据库-制表符\资产组合-基金公司维度.xlsx'):
try:
data=xlrd.open_workbook('D:\基金公司\数据库-制表符\资产组合-基金公司维度.xlsx')
return data
except Exception as e:
print (str(e))
def excel_table_byindex(file='D:\基金公司\数据库-制表符\资产组合-基金公司维度.xlsx',colnameindex=0,by_index=0):
data=open_excel(file='D:\基金公司\数据库-制表符\资产组合-基金公司维度.xlsx')
table=data.sheets()[by_index]
nrows=table.nrows
ncols=table.ncols
colnames=table.row_values(colnameindex)
list=[]
for rownum in range(1,nrows):
row=table.row_values(rownum)
if row:
app={}
for i in range(len(colnames)):
app[colnames[i]]=row[i]
list.apend(app)
return list
|
normal
|
{
"blob_id": "d211594a034489d36a5648bf0b926fbd734fd0df",
"index": 6928,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef excel_table_byindex(file='D:\\\\基金公司\\\\数据库-制表符\\\\资产组合-基金公司维度.xlsx',\n colnameindex=0, by_index=0):\n data = open_excel(file='D:\\\\基金公司\\\\数据库-制表符\\\\资产组合-基金公司维度.xlsx')\n table = data.sheets()[by_index]\n nrows = table.nrows\n ncols = table.ncols\n colnames = table.row_values(colnameindex)\n list = []\n for rownum in range(1, nrows):\n row = table.row_values(rownum)\n if row:\n app = {}\n for i in range(len(colnames)):\n app[colnames[i]] = row[i]\n list.apend(app)\n return list\n",
"step-3": "<mask token>\n\n\ndef open_excel(file='D:\\\\基金公司\\\\数据库-制表符\\\\资产组合-基金公司维度.xlsx'):\n try:\n data = xlrd.open_workbook('D:\\\\基金公司\\\\数据库-制表符\\\\资产组合-基金公司维度.xlsx')\n return data\n except Exception as e:\n print(str(e))\n\n\ndef excel_table_byindex(file='D:\\\\基金公司\\\\数据库-制表符\\\\资产组合-基金公司维度.xlsx',\n colnameindex=0, by_index=0):\n data = open_excel(file='D:\\\\基金公司\\\\数据库-制表符\\\\资产组合-基金公司维度.xlsx')\n table = data.sheets()[by_index]\n nrows = table.nrows\n ncols = table.ncols\n colnames = table.row_values(colnameindex)\n list = []\n for rownum in range(1, nrows):\n row = table.row_values(rownum)\n if row:\n app = {}\n for i in range(len(colnames)):\n app[colnames[i]] = row[i]\n list.apend(app)\n return list\n",
"step-4": "import xdrlib, sys\nimport xlrd\n\n\ndef open_excel(file='D:\\\\基金公司\\\\数据库-制表符\\\\资产组合-基金公司维度.xlsx'):\n try:\n data = xlrd.open_workbook('D:\\\\基金公司\\\\数据库-制表符\\\\资产组合-基金公司维度.xlsx')\n return data\n except Exception as e:\n print(str(e))\n\n\ndef excel_table_byindex(file='D:\\\\基金公司\\\\数据库-制表符\\\\资产组合-基金公司维度.xlsx',\n colnameindex=0, by_index=0):\n data = open_excel(file='D:\\\\基金公司\\\\数据库-制表符\\\\资产组合-基金公司维度.xlsx')\n table = data.sheets()[by_index]\n nrows = table.nrows\n ncols = table.ncols\n colnames = table.row_values(colnameindex)\n list = []\n for rownum in range(1, nrows):\n row = table.row_values(rownum)\n if row:\n app = {}\n for i in range(len(colnames)):\n app[colnames[i]] = row[i]\n list.apend(app)\n return list\n",
"step-5": "import xdrlib,sys\nimport xlrd\ndef open_excel(file='D:\\基金公司\\数据库-制表符\\资产组合-基金公司维度.xlsx'):\n try:\n data=xlrd.open_workbook('D:\\基金公司\\数据库-制表符\\资产组合-基金公司维度.xlsx')\n return data\n except Exception as e:\n print (str(e))\ndef excel_table_byindex(file='D:\\基金公司\\数据库-制表符\\资产组合-基金公司维度.xlsx',colnameindex=0,by_index=0):\n data=open_excel(file='D:\\基金公司\\数据库-制表符\\资产组合-基金公司维度.xlsx')\n table=data.sheets()[by_index]\n nrows=table.nrows\n ncols=table.ncols\n colnames=table.row_values(colnameindex)\n list=[]\n for rownum in range(1,nrows):\n row=table.row_values(rownum)\n if row:\n app={}\n for i in range(len(colnames)):\n app[colnames[i]]=row[i]\n list.apend(app)\n return list\n\n\n\n\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
from practice.demo4 import paixu
if __name__ == '__main__':
n=int(input("请输入最大的数字范围:"))
paixu(n)
|
normal
|
{
"blob_id": "a777c6d76ef2ae15544a91bcfba0dbeabce0470a",
"index": 5377,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nif __name__ == '__main__':\n n = int(input('请输入最大的数字范围:'))\n paixu(n)\n",
"step-3": "from practice.demo4 import paixu\nif __name__ == '__main__':\n n = int(input('请输入最大的数字范围:'))\n paixu(n)\n",
"step-4": "from practice.demo4 import paixu\nif __name__ == '__main__':\n n=int(input(\"请输入最大的数字范围:\"))\n paixu(n)",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
car_state = False
u_input = input(f'>')
if car_state == True:
print('Car is stopped!')
if u_input == 'start':
car_state = True
print('Car has started!')
elif u_input == 'stop':
car_state == False
print('Car has stopped!')
else:
print('''I don''t understand that...''')
|
normal
|
{
"blob_id": "2766339632200c26a8c6cd3abff28b1495870b9a",
"index": 9207,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nif car_state == True:\n print('Car is stopped!')\nif u_input == 'start':\n car_state = True\n print('Car has started!')\nelif u_input == 'stop':\n car_state == False\n print('Car has stopped!')\nelse:\n print(\"I don''t understand that...\")\n",
"step-3": "car_state = False\nu_input = input(f'>')\nif car_state == True:\n print('Car is stopped!')\nif u_input == 'start':\n car_state = True\n print('Car has started!')\nelif u_input == 'stop':\n car_state == False\n print('Car has stopped!')\nelse:\n print(\"I don''t understand that...\")\n",
"step-4": "car_state = False\r\nu_input = input(f'>')\r\n\r\nif car_state == True:\r\n print('Car is stopped!')\r\n\r\nif u_input == 'start':\r\n car_state = True\r\n print('Car has started!')\r\nelif u_input == 'stop':\r\n car_state == False\r\n print('Car has stopped!')\r\nelse:\r\n print('''I don''t understand that...''')",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
def contador_notas(multiplo, numero):
if(numero % multiplo == 0):
notas = numero / multiplo
return notas
else:
return -1
entrada = int(input())
resultado = contador_notas(100, entrada)
if (resultado != -1):
print("{} nota(s) de R$ {}".format(resultado, 100))
|
normal
|
{
"blob_id": "a5c19ad60ac6312631273858cebaae944a2008ec",
"index": 8876,
"step-1": "<mask token>\n",
"step-2": "def contador_notas(multiplo, numero):\n if numero % multiplo == 0:\n notas = numero / multiplo\n return notas\n else:\n return -1\n\n\n<mask token>\n",
"step-3": "def contador_notas(multiplo, numero):\n if numero % multiplo == 0:\n notas = numero / multiplo\n return notas\n else:\n return -1\n\n\n<mask token>\nif resultado != -1:\n print('{} nota(s) de R$ {}'.format(resultado, 100))\n",
"step-4": "def contador_notas(multiplo, numero):\n if numero % multiplo == 0:\n notas = numero / multiplo\n return notas\n else:\n return -1\n\n\nentrada = int(input())\nresultado = contador_notas(100, entrada)\nif resultado != -1:\n print('{} nota(s) de R$ {}'.format(resultado, 100))\n",
"step-5": "def contador_notas(multiplo, numero):\n if(numero % multiplo == 0):\n notas = numero / multiplo\n return notas\n else:\n return -1\n\n\nentrada = int(input())\nresultado = contador_notas(100, entrada)\nif (resultado != -1):\n print(\"{} nota(s) de R$ {}\".format(resultado, 100))",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
class User(UserMixin, db.Model):
__tablename__ = 'users'
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(64))
email = db.Column(db.String(64), unique=True, index=True)
username = db.Column(db.String(64), unique=True, index=True)
password_hash = db.Column(db.String(128))
games = db.relationship('Game', secondary='game_players')
@property
def password(self):
raise AttributeError('password is not a readable attribute')
@password.setter
def password(self, password):
self.password_hash = generate_password_hash(password)
def verify_password(self, password):
return check_password_hash(self.password_hash, password)
@login_manager.user_loader
def load_user(user_id):
return User.query.get(int(user_id))
class Game(db.Model):
__tablename__ = 'games'
id = db.Column(db.Integer, primary_key=True)
admin_id = db.Column(db.Integer, db.ForeignKey('users.id'))
admin = db.relationship('User', backref='admin', lazy='joined')
name = db.Column(db.String(64))
description = db.Column(db.String(100))
date = db.Column(db.Date())
time = db.Column(db.Time())
field = db.Column(db.String(100), nullable=True)
players = db.relationship('User', secondary='game_players')
def __init__(self, **kwargs):
super(Game, self).__init__(**kwargs)
self.players.append(self.admin)
class GamePlayer(db.Model):
__tablename__ = 'game_players'
id = db.Column(db.Integer, primary_key=True)
user_id = db.Column(db.Integer, db.ForeignKey('users.id'))
game_id = db.Column(db.Integer, db.ForeignKey('games.id'))
user = db.relationship('User', backref='user', lazy='joined')
game = db.relationship('Game', backref='game', lazy='joined')
class RegistrationForm(FlaskForm):
email = StringField('Email', validators=[DataRequired(), Length(1, 64),
Email()])
username = StringField('Username', validators=[DataRequired(), Length(1,
64), Regexp('^[A-Za-z][A-Za-z0-9_.]*$', 0,
'Usernames must have only letters, numbers, dots or underscores')])
name = StringField('Name', validators=[DataRequired(), Length(1, 64)])
password = PasswordField('Password', validators=[DataRequired()])
password2 = PasswordField('Confirm password', validators=[DataRequired(
), EqualTo('password', message='Passwords must match.')])
submit = SubmitField('Register')
def validate_email(self, field):
if User.query.filter_by(email=field.data.lower()).first():
raise ValidationError('Email alreadu registered')
def validate_username(self, field):
if User.query.filter_by(username=field.data).first():
raise ValidationError('Username already taken')
class LoginForm(FlaskForm):
email = StringField('Email', validators=[DataRequired(), Length(1, 64),
Email()])
password = PasswordField('Password', validators=[DataRequired()])
remember_me = BooleanField('Keep me logged in')
submit = SubmitField('Log In')
class NewGameForm(FlaskForm):
name = StringField('Name', validators=[DataRequired()])
description = TextAreaField('Description', validators=[DataRequired()])
date = DateField('Date', validators=[DataRequired()], format='%Y-%m-%d')
time = TimeField('Time', format='%H:%M')
field = StringField('Field', validators=[DataRequired()])
submit = SubmitField('Create New Game')
<|reserved_special_token_0|>
@app.route('/')
@app.route('/games')
@login_required
def index():
game_player = GamePlayer.query.filter_by(user=current_user).all()
games = list(map(render_player_game, game_player))
return render_template('index.html', games=games)
<|reserved_special_token_0|>
@app.route('/games/<int:id>/quit')
@login_required
def game_quit(id):
game = Game.query.get(id)
if game:
if current_user in game.players:
game.players.remove(current_user)
db.session.commit()
flash('You were removed succesfully', 'sucess')
else:
flash('You were not in this game', 'warning')
else:
flash("The game you're trying to quit does not exist", 'warning')
return redirect(url_for('game_details', id=id))
<|reserved_special_token_0|>
@app.route('/login', methods=['GET', 'POST'])
def user_login():
form = LoginForm()
if form.validate_on_submit():
user = User.query.filter_by(email=form.email.data.lower()).first()
if user is not None and user.verify_password(form.password.data):
login_user(user, form.remember_me.data)
next = request.args.get('next')
if next is None or not next.startswith('/'):
next = url_for('index')
return redirect(next)
flash('Invalid email or password')
return render_template('login.html', form=form)
@app.route('/logout')
@login_required
def user_logout():
logout_user()
flash('You have been logged out.')
return redirect(url_for('index'))
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class User(UserMixin, db.Model):
__tablename__ = 'users'
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(64))
email = db.Column(db.String(64), unique=True, index=True)
username = db.Column(db.String(64), unique=True, index=True)
password_hash = db.Column(db.String(128))
games = db.relationship('Game', secondary='game_players')
@property
def password(self):
raise AttributeError('password is not a readable attribute')
@password.setter
def password(self, password):
self.password_hash = generate_password_hash(password)
def verify_password(self, password):
return check_password_hash(self.password_hash, password)
@login_manager.user_loader
def load_user(user_id):
return User.query.get(int(user_id))
class Game(db.Model):
__tablename__ = 'games'
id = db.Column(db.Integer, primary_key=True)
admin_id = db.Column(db.Integer, db.ForeignKey('users.id'))
admin = db.relationship('User', backref='admin', lazy='joined')
name = db.Column(db.String(64))
description = db.Column(db.String(100))
date = db.Column(db.Date())
time = db.Column(db.Time())
field = db.Column(db.String(100), nullable=True)
players = db.relationship('User', secondary='game_players')
def __init__(self, **kwargs):
super(Game, self).__init__(**kwargs)
self.players.append(self.admin)
class GamePlayer(db.Model):
__tablename__ = 'game_players'
id = db.Column(db.Integer, primary_key=True)
user_id = db.Column(db.Integer, db.ForeignKey('users.id'))
game_id = db.Column(db.Integer, db.ForeignKey('games.id'))
user = db.relationship('User', backref='user', lazy='joined')
game = db.relationship('Game', backref='game', lazy='joined')
class RegistrationForm(FlaskForm):
email = StringField('Email', validators=[DataRequired(), Length(1, 64),
Email()])
username = StringField('Username', validators=[DataRequired(), Length(1,
64), Regexp('^[A-Za-z][A-Za-z0-9_.]*$', 0,
'Usernames must have only letters, numbers, dots or underscores')])
name = StringField('Name', validators=[DataRequired(), Length(1, 64)])
password = PasswordField('Password', validators=[DataRequired()])
password2 = PasswordField('Confirm password', validators=[DataRequired(
), EqualTo('password', message='Passwords must match.')])
submit = SubmitField('Register')
def validate_email(self, field):
if User.query.filter_by(email=field.data.lower()).first():
raise ValidationError('Email alreadu registered')
def validate_username(self, field):
if User.query.filter_by(username=field.data).first():
raise ValidationError('Username already taken')
class LoginForm(FlaskForm):
email = StringField('Email', validators=[DataRequired(), Length(1, 64),
Email()])
password = PasswordField('Password', validators=[DataRequired()])
remember_me = BooleanField('Keep me logged in')
submit = SubmitField('Log In')
class NewGameForm(FlaskForm):
name = StringField('Name', validators=[DataRequired()])
description = TextAreaField('Description', validators=[DataRequired()])
date = DateField('Date', validators=[DataRequired()], format='%Y-%m-%d')
time = TimeField('Time', format='%H:%M')
field = StringField('Field', validators=[DataRequired()])
submit = SubmitField('Create New Game')
<|reserved_special_token_0|>
@app.route('/')
@app.route('/games')
@login_required
def index():
game_player = GamePlayer.query.filter_by(user=current_user).all()
games = list(map(render_player_game, game_player))
return render_template('index.html', games=games)
<|reserved_special_token_0|>
@app.route('/games/<int:id>/join')
@login_required
def game_join(id):
game = Game.query.get(id)
if game:
if current_user not in game.players:
game.players.append(current_user)
db.session.commit()
flash('You were added succesfully', 'sucess')
else:
flash('You already were in this game', 'warning')
else:
flash("The game you're trying to enter does not exist", 'warning')
return redirect(url_for('game_details', id=id))
@app.route('/games/<int:id>/quit')
@login_required
def game_quit(id):
game = Game.query.get(id)
if game:
if current_user in game.players:
game.players.remove(current_user)
db.session.commit()
flash('You were removed succesfully', 'sucess')
else:
flash('You were not in this game', 'warning')
else:
flash("The game you're trying to quit does not exist", 'warning')
return redirect(url_for('game_details', id=id))
<|reserved_special_token_0|>
@app.route('/login', methods=['GET', 'POST'])
def user_login():
form = LoginForm()
if form.validate_on_submit():
user = User.query.filter_by(email=form.email.data.lower()).first()
if user is not None and user.verify_password(form.password.data):
login_user(user, form.remember_me.data)
next = request.args.get('next')
if next is None or not next.startswith('/'):
next = url_for('index')
return redirect(next)
flash('Invalid email or password')
return render_template('login.html', form=form)
@app.route('/logout')
@login_required
def user_logout():
logout_user()
flash('You have been logged out.')
return redirect(url_for('index'))
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class User(UserMixin, db.Model):
__tablename__ = 'users'
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(64))
email = db.Column(db.String(64), unique=True, index=True)
username = db.Column(db.String(64), unique=True, index=True)
password_hash = db.Column(db.String(128))
games = db.relationship('Game', secondary='game_players')
@property
def password(self):
raise AttributeError('password is not a readable attribute')
@password.setter
def password(self, password):
self.password_hash = generate_password_hash(password)
def verify_password(self, password):
return check_password_hash(self.password_hash, password)
@login_manager.user_loader
def load_user(user_id):
return User.query.get(int(user_id))
class Game(db.Model):
__tablename__ = 'games'
id = db.Column(db.Integer, primary_key=True)
admin_id = db.Column(db.Integer, db.ForeignKey('users.id'))
admin = db.relationship('User', backref='admin', lazy='joined')
name = db.Column(db.String(64))
description = db.Column(db.String(100))
date = db.Column(db.Date())
time = db.Column(db.Time())
field = db.Column(db.String(100), nullable=True)
players = db.relationship('User', secondary='game_players')
def __init__(self, **kwargs):
super(Game, self).__init__(**kwargs)
self.players.append(self.admin)
class GamePlayer(db.Model):
__tablename__ = 'game_players'
id = db.Column(db.Integer, primary_key=True)
user_id = db.Column(db.Integer, db.ForeignKey('users.id'))
game_id = db.Column(db.Integer, db.ForeignKey('games.id'))
user = db.relationship('User', backref='user', lazy='joined')
game = db.relationship('Game', backref='game', lazy='joined')
class RegistrationForm(FlaskForm):
email = StringField('Email', validators=[DataRequired(), Length(1, 64),
Email()])
username = StringField('Username', validators=[DataRequired(), Length(1,
64), Regexp('^[A-Za-z][A-Za-z0-9_.]*$', 0,
'Usernames must have only letters, numbers, dots or underscores')])
name = StringField('Name', validators=[DataRequired(), Length(1, 64)])
password = PasswordField('Password', validators=[DataRequired()])
password2 = PasswordField('Confirm password', validators=[DataRequired(
), EqualTo('password', message='Passwords must match.')])
submit = SubmitField('Register')
def validate_email(self, field):
if User.query.filter_by(email=field.data.lower()).first():
raise ValidationError('Email alreadu registered')
def validate_username(self, field):
if User.query.filter_by(username=field.data).first():
raise ValidationError('Username already taken')
class LoginForm(FlaskForm):
email = StringField('Email', validators=[DataRequired(), Length(1, 64),
Email()])
password = PasswordField('Password', validators=[DataRequired()])
remember_me = BooleanField('Keep me logged in')
submit = SubmitField('Log In')
class NewGameForm(FlaskForm):
name = StringField('Name', validators=[DataRequired()])
description = TextAreaField('Description', validators=[DataRequired()])
date = DateField('Date', validators=[DataRequired()], format='%Y-%m-%d')
time = TimeField('Time', format='%H:%M')
field = StringField('Field', validators=[DataRequired()])
submit = SubmitField('Create New Game')
<|reserved_special_token_0|>
@app.route('/')
@app.route('/games')
@login_required
def index():
game_player = GamePlayer.query.filter_by(user=current_user).all()
games = list(map(render_player_game, game_player))
return render_template('index.html', games=games)
@app.route('/games/join')
@app.route('/games/<int:id>')
def game_details(id=None):
game = Game.query.get(id)
return render_template('game_details.html', game=game)
@app.route('/games/<int:id>/join')
@login_required
def game_join(id):
game = Game.query.get(id)
if game:
if current_user not in game.players:
game.players.append(current_user)
db.session.commit()
flash('You were added succesfully', 'sucess')
else:
flash('You already were in this game', 'warning')
else:
flash("The game you're trying to enter does not exist", 'warning')
return redirect(url_for('game_details', id=id))
@app.route('/games/<int:id>/quit')
@login_required
def game_quit(id):
game = Game.query.get(id)
if game:
if current_user in game.players:
game.players.remove(current_user)
db.session.commit()
flash('You were removed succesfully', 'sucess')
else:
flash('You were not in this game', 'warning')
else:
flash("The game you're trying to quit does not exist", 'warning')
return redirect(url_for('game_details', id=id))
<|reserved_special_token_0|>
@app.route('/login', methods=['GET', 'POST'])
def user_login():
form = LoginForm()
if form.validate_on_submit():
user = User.query.filter_by(email=form.email.data.lower()).first()
if user is not None and user.verify_password(form.password.data):
login_user(user, form.remember_me.data)
next = request.args.get('next')
if next is None or not next.startswith('/'):
next = url_for('index')
return redirect(next)
flash('Invalid email or password')
return render_template('login.html', form=form)
@app.route('/logout')
@login_required
def user_logout():
logout_user()
flash('You have been logged out.')
return redirect(url_for('index'))
@app.route('/test')
def test():
return render_template('layout.html', title='Test')
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class User(UserMixin, db.Model):
__tablename__ = 'users'
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(64))
email = db.Column(db.String(64), unique=True, index=True)
username = db.Column(db.String(64), unique=True, index=True)
password_hash = db.Column(db.String(128))
games = db.relationship('Game', secondary='game_players')
@property
def password(self):
raise AttributeError('password is not a readable attribute')
@password.setter
def password(self, password):
self.password_hash = generate_password_hash(password)
def verify_password(self, password):
return check_password_hash(self.password_hash, password)
@login_manager.user_loader
def load_user(user_id):
return User.query.get(int(user_id))
class Game(db.Model):
__tablename__ = 'games'
id = db.Column(db.Integer, primary_key=True)
admin_id = db.Column(db.Integer, db.ForeignKey('users.id'))
admin = db.relationship('User', backref='admin', lazy='joined')
name = db.Column(db.String(64))
description = db.Column(db.String(100))
date = db.Column(db.Date())
time = db.Column(db.Time())
field = db.Column(db.String(100), nullable=True)
players = db.relationship('User', secondary='game_players')
def __init__(self, **kwargs):
super(Game, self).__init__(**kwargs)
self.players.append(self.admin)
class GamePlayer(db.Model):
__tablename__ = 'game_players'
id = db.Column(db.Integer, primary_key=True)
user_id = db.Column(db.Integer, db.ForeignKey('users.id'))
game_id = db.Column(db.Integer, db.ForeignKey('games.id'))
user = db.relationship('User', backref='user', lazy='joined')
game = db.relationship('Game', backref='game', lazy='joined')
class RegistrationForm(FlaskForm):
email = StringField('Email', validators=[DataRequired(), Length(1, 64),
Email()])
username = StringField('Username', validators=[DataRequired(), Length(1,
64), Regexp('^[A-Za-z][A-Za-z0-9_.]*$', 0,
'Usernames must have only letters, numbers, dots or underscores')])
name = StringField('Name', validators=[DataRequired(), Length(1, 64)])
password = PasswordField('Password', validators=[DataRequired()])
password2 = PasswordField('Confirm password', validators=[DataRequired(
), EqualTo('password', message='Passwords must match.')])
submit = SubmitField('Register')
def validate_email(self, field):
if User.query.filter_by(email=field.data.lower()).first():
raise ValidationError('Email alreadu registered')
def validate_username(self, field):
if User.query.filter_by(username=field.data).first():
raise ValidationError('Username already taken')
class LoginForm(FlaskForm):
email = StringField('Email', validators=[DataRequired(), Length(1, 64),
Email()])
password = PasswordField('Password', validators=[DataRequired()])
remember_me = BooleanField('Keep me logged in')
submit = SubmitField('Log In')
class NewGameForm(FlaskForm):
name = StringField('Name', validators=[DataRequired()])
description = TextAreaField('Description', validators=[DataRequired()])
date = DateField('Date', validators=[DataRequired()], format='%Y-%m-%d')
time = TimeField('Time', format='%H:%M')
field = StringField('Field', validators=[DataRequired()])
submit = SubmitField('Create New Game')
def render_player_game(game_player):
game = game_player.game
game_rendered = {'date': game.date, 'time': game.time, 'name': game.
name, 'field': game.field, 'id': game.id}
return game_rendered
@app.route('/')
@app.route('/games')
@login_required
def index():
game_player = GamePlayer.query.filter_by(user=current_user).all()
games = list(map(render_player_game, game_player))
return render_template('index.html', games=games)
@app.route('/games/join')
@app.route('/games/<int:id>')
def game_details(id=None):
game = Game.query.get(id)
return render_template('game_details.html', game=game)
@app.route('/games/<int:id>/join')
@login_required
def game_join(id):
game = Game.query.get(id)
if game:
if current_user not in game.players:
game.players.append(current_user)
db.session.commit()
flash('You were added succesfully', 'sucess')
else:
flash('You already were in this game', 'warning')
else:
flash("The game you're trying to enter does not exist", 'warning')
return redirect(url_for('game_details', id=id))
@app.route('/games/<int:id>/quit')
@login_required
def game_quit(id):
game = Game.query.get(id)
if game:
if current_user in game.players:
game.players.remove(current_user)
db.session.commit()
flash('You were removed succesfully', 'sucess')
else:
flash('You were not in this game', 'warning')
else:
flash("The game you're trying to quit does not exist", 'warning')
return redirect(url_for('game_details', id=id))
<|reserved_special_token_0|>
@app.route('/login', methods=['GET', 'POST'])
def user_login():
form = LoginForm()
if form.validate_on_submit():
user = User.query.filter_by(email=form.email.data.lower()).first()
if user is not None and user.verify_password(form.password.data):
login_user(user, form.remember_me.data)
next = request.args.get('next')
if next is None or not next.startswith('/'):
next = url_for('index')
return redirect(next)
flash('Invalid email or password')
return render_template('login.html', form=form)
@app.route('/logout')
@login_required
def user_logout():
logout_user()
flash('You have been logged out.')
return redirect(url_for('index'))
@app.route('/test')
def test():
return render_template('layout.html', title='Test')
<|reserved_special_token_1|>
# Imports
import os
from flask import Flask, redirect, render_template, url_for, request, flash
from flask_login import LoginManager, login_user, logout_user, login_required, current_user
# Import - Database
from flask_sqlalchemy import SQLAlchemy
# Import - Models
from werkzeug.security import generate_password_hash, check_password_hash
from flask_login import UserMixin
# Import - Forms
from flask_wtf import FlaskForm
from wtforms import StringField, PasswordField, BooleanField, SubmitField, ValidationError, DateField, TimeField, TextAreaField, IntegerField
from wtforms.validators import DataRequired, Length, Email, Regexp, EqualTo
# Config
basedir = os.path.abspath(os.path.dirname(__file__))
app = Flask(__name__)
app.config['SECRET_KEY'] = os.environ.get('SECRET_KEY')
app.config['SQLALCHEMY_DATABASE_URI'] =\
'sqlite:///' + os.path.join(basedir, 'data.sqlite')
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
# Setup
db = SQLAlchemy(app)
login_manager = LoginManager(app)
login_manager.login_view = 'user_login'
# Models
class User(UserMixin, db.Model):
__tablename__ = 'users'
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(64))
email = db.Column(db.String(64), unique=True, index=True)
username = db.Column(db.String(64), unique=True, index=True)
password_hash = db.Column(db.String(128))
games = db.relationship('Game', secondary="game_players")
@property
def password(self):
raise AttributeError('password is not a readable attribute')
@password.setter
def password(self, password):
self.password_hash = generate_password_hash(password)
def verify_password(self, password):
return check_password_hash(self.password_hash, password)
@login_manager.user_loader
def load_user(user_id):
return User.query.get(int(user_id))
class Game(db.Model):
__tablename__ = 'games'
id = db.Column(db.Integer, primary_key=True)
admin_id = db.Column(db.Integer, db.ForeignKey('users.id'))
admin = db.relationship('User', backref='admin', lazy='joined')
name = db.Column(db.String(64))
description = db.Column(db.String(100))
date = db.Column(db.Date())
time = db.Column(db.Time())
field = db.Column(db.String(100), nullable=True)
players = db.relationship('User', secondary="game_players")
def __init__(self, **kwargs):
super(Game, self).__init__(**kwargs)
self.players.append(self.admin)
class GamePlayer(db.Model):
__tablename__ = 'game_players'
id = db.Column(db.Integer, primary_key=True)
user_id = db.Column(db.Integer, db.ForeignKey('users.id'))
game_id = db.Column(db.Integer, db.ForeignKey('games.id'))
user = db.relationship('User', backref='user', lazy='joined')
game = db.relationship('Game', backref='game', lazy='joined')
# Forms
# Forms - User
class RegistrationForm(FlaskForm):
email = StringField('Email', validators=[
DataRequired(), Length(1, 64), Email()])
username = StringField('Username', validators=[
DataRequired(), Length(1, 64),
Regexp('^[A-Za-z][A-Za-z0-9_.]*$', 0,
'Usernames must have only letters, numbers, dots or '
'underscores')
])
name = StringField('Name', validators=[DataRequired(), Length(1, 64)])
password = PasswordField('Password', validators=[DataRequired()])
password2 = PasswordField('Confirm password', validators=[
DataRequired(), EqualTo('password', message='Passwords must match.')])
submit = SubmitField('Register')
def validate_email(self, field):
if User.query.filter_by(email=field.data.lower()).first():
raise ValidationError('Email alreadu registered')
def validate_username(self, field):
if User.query.filter_by(username=field.data).first():
raise ValidationError('Username already taken')
class LoginForm(FlaskForm):
email = StringField('Email', validators=[DataRequired(), Length(1, 64),
Email()])
password = PasswordField('Password', validators=[DataRequired()])
remember_me = BooleanField('Keep me logged in')
submit = SubmitField('Log In')
# Forms - Games
class NewGameForm(FlaskForm):
name = StringField('Name', validators=[DataRequired()])
description = TextAreaField('Description', validators=[DataRequired()])
date = DateField('Date', validators=[
DataRequired()], format='%Y-%m-%d')
time = TimeField('Time', format='%H:%M')
field = StringField('Field', validators=[DataRequired()])
submit = SubmitField('Create New Game')
# Utils functions
def render_player_game(game_player):
game = game_player.game
game_rendered = {
'date': game.date,
'time': game.time,
'name': game.name,
'field': game.field,
'id': game.id
}
return game_rendered
# Routes
# Routes - Games
@app.route('/')
@app.route('/games')
@login_required
def index():
# Show all the games the user is in and a button to join game and another to create one
game_player = GamePlayer.query.filter_by(user=current_user).all()
games = list(map(render_player_game, game_player))
return render_template('index.html', games=games)
@app.route('/games/join')
@app.route('/games/<int:id>')
def game_details(id=None):
# Show the game details and players
# The admin has the same page, but with extra commands to remove, add...
game = Game.query.get(id)
return render_template('game_details.html', game=game)
@app.route('/games/<int:id>/join')
@login_required
def game_join(id):
game = Game.query.get(id)
if game:
if current_user not in game.players:
game.players.append(current_user)
db.session.commit()
flash('You were added succesfully', 'sucess')
else:
flash('You already were in this game', 'warning')
else:
flash("The game you're trying to enter does not exist", 'warning')
return redirect(url_for('game_details', id=id))
@app.route('/games/<int:id>/quit')
@login_required
def game_quit(id):
game = Game.query.get(id)
if game:
if current_user in game.players:
game.players.remove(current_user)
db.session.commit()
flash('You were removed succesfully', 'sucess')
else:
flash('You were not in this game', 'warning')
else:
flash("The game you're trying to quit does not exist", 'warning')
return redirect(url_for('game_details', id=id))
@app.route('/games/new', methods=['GET', 'POST'])
@login_required
def new_game():
# A form to create a new game
form = NewGameForm()
if form.validate_on_submit():
new_game = Game(
admin=current_user,
name=form.name.data,
description=form.description.data,
date=form.date.data,
time=form.time.data,
field=form.field.data,
)
db.session.add(new_game)
db.session.commit()
flash('Your game was created succesfully', 'success')
return redirect(url_for('game_details', id=new_game.id))
return render_template('new_game.html', form=form)
# Routes - User
@app.route('/register', methods=['GET', 'POST'])
def user_register():
form = RegistrationForm()
if form.validate_on_submit():
user = User(
email=form.email.data.lower(),
username=form.username.data,
password=form.password.data,
name=form.name.data
)
db.session.add(user)
db.session.commit()
login_user(user)
return redirect(url_for('index'))
return render_template('register.html', form=form)
@app.route('/login', methods=['GET', 'POST'])
def user_login():
form = LoginForm()
if form.validate_on_submit():
user = User.query.filter_by(email=form.email.data.lower()).first()
if user is not None and user.verify_password(form.password.data):
login_user(user, form.remember_me.data)
next = request.args.get('next')
if next is None or not next.startswith('/'):
next = url_for('index')
return redirect(next)
flash('Invalid email or password')
return render_template('login.html', form=form)
@app.route('/logout')
@login_required
def user_logout():
logout_user()
flash('You have been logged out.')
return redirect(url_for('index'))
@app.route('/test')
def test():
return render_template('layout.html', title='Test')
|
flexible
|
{
"blob_id": "6ff4aff5811d2bd7ad150d7e8f925308d120ef74",
"index": 2566,
"step-1": "<mask token>\n\n\nclass User(UserMixin, db.Model):\n __tablename__ = 'users'\n id = db.Column(db.Integer, primary_key=True)\n name = db.Column(db.String(64))\n email = db.Column(db.String(64), unique=True, index=True)\n username = db.Column(db.String(64), unique=True, index=True)\n password_hash = db.Column(db.String(128))\n games = db.relationship('Game', secondary='game_players')\n\n @property\n def password(self):\n raise AttributeError('password is not a readable attribute')\n\n @password.setter\n def password(self, password):\n self.password_hash = generate_password_hash(password)\n\n def verify_password(self, password):\n return check_password_hash(self.password_hash, password)\n\n\n@login_manager.user_loader\ndef load_user(user_id):\n return User.query.get(int(user_id))\n\n\nclass Game(db.Model):\n __tablename__ = 'games'\n id = db.Column(db.Integer, primary_key=True)\n admin_id = db.Column(db.Integer, db.ForeignKey('users.id'))\n admin = db.relationship('User', backref='admin', lazy='joined')\n name = db.Column(db.String(64))\n description = db.Column(db.String(100))\n date = db.Column(db.Date())\n time = db.Column(db.Time())\n field = db.Column(db.String(100), nullable=True)\n players = db.relationship('User', secondary='game_players')\n\n def __init__(self, **kwargs):\n super(Game, self).__init__(**kwargs)\n self.players.append(self.admin)\n\n\nclass GamePlayer(db.Model):\n __tablename__ = 'game_players'\n id = db.Column(db.Integer, primary_key=True)\n user_id = db.Column(db.Integer, db.ForeignKey('users.id'))\n game_id = db.Column(db.Integer, db.ForeignKey('games.id'))\n user = db.relationship('User', backref='user', lazy='joined')\n game = db.relationship('Game', backref='game', lazy='joined')\n\n\nclass RegistrationForm(FlaskForm):\n email = StringField('Email', validators=[DataRequired(), Length(1, 64),\n Email()])\n username = StringField('Username', validators=[DataRequired(), Length(1,\n 64), Regexp('^[A-Za-z][A-Za-z0-9_.]*$', 0,\n 'Usernames must have only letters, numbers, dots or underscores')])\n name = StringField('Name', validators=[DataRequired(), Length(1, 64)])\n password = PasswordField('Password', validators=[DataRequired()])\n password2 = PasswordField('Confirm password', validators=[DataRequired(\n ), EqualTo('password', message='Passwords must match.')])\n submit = SubmitField('Register')\n\n def validate_email(self, field):\n if User.query.filter_by(email=field.data.lower()).first():\n raise ValidationError('Email alreadu registered')\n\n def validate_username(self, field):\n if User.query.filter_by(username=field.data).first():\n raise ValidationError('Username already taken')\n\n\nclass LoginForm(FlaskForm):\n email = StringField('Email', validators=[DataRequired(), Length(1, 64),\n Email()])\n password = PasswordField('Password', validators=[DataRequired()])\n remember_me = BooleanField('Keep me logged in')\n submit = SubmitField('Log In')\n\n\nclass NewGameForm(FlaskForm):\n name = StringField('Name', validators=[DataRequired()])\n description = TextAreaField('Description', validators=[DataRequired()])\n date = DateField('Date', validators=[DataRequired()], format='%Y-%m-%d')\n time = TimeField('Time', format='%H:%M')\n field = StringField('Field', validators=[DataRequired()])\n submit = SubmitField('Create New Game')\n\n\n<mask token>\n\n\n@app.route('/')\n@app.route('/games')\n@login_required\ndef index():\n game_player = GamePlayer.query.filter_by(user=current_user).all()\n games = list(map(render_player_game, game_player))\n return render_template('index.html', games=games)\n\n\n<mask token>\n\n\n@app.route('/games/<int:id>/quit')\n@login_required\ndef game_quit(id):\n game = Game.query.get(id)\n if game:\n if current_user in game.players:\n game.players.remove(current_user)\n db.session.commit()\n flash('You were removed succesfully', 'sucess')\n else:\n flash('You were not in this game', 'warning')\n else:\n flash(\"The game you're trying to quit does not exist\", 'warning')\n return redirect(url_for('game_details', id=id))\n\n\n<mask token>\n\n\n@app.route('/login', methods=['GET', 'POST'])\ndef user_login():\n form = LoginForm()\n if form.validate_on_submit():\n user = User.query.filter_by(email=form.email.data.lower()).first()\n if user is not None and user.verify_password(form.password.data):\n login_user(user, form.remember_me.data)\n next = request.args.get('next')\n if next is None or not next.startswith('/'):\n next = url_for('index')\n return redirect(next)\n flash('Invalid email or password')\n return render_template('login.html', form=form)\n\n\n@app.route('/logout')\n@login_required\ndef user_logout():\n logout_user()\n flash('You have been logged out.')\n return redirect(url_for('index'))\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass User(UserMixin, db.Model):\n __tablename__ = 'users'\n id = db.Column(db.Integer, primary_key=True)\n name = db.Column(db.String(64))\n email = db.Column(db.String(64), unique=True, index=True)\n username = db.Column(db.String(64), unique=True, index=True)\n password_hash = db.Column(db.String(128))\n games = db.relationship('Game', secondary='game_players')\n\n @property\n def password(self):\n raise AttributeError('password is not a readable attribute')\n\n @password.setter\n def password(self, password):\n self.password_hash = generate_password_hash(password)\n\n def verify_password(self, password):\n return check_password_hash(self.password_hash, password)\n\n\n@login_manager.user_loader\ndef load_user(user_id):\n return User.query.get(int(user_id))\n\n\nclass Game(db.Model):\n __tablename__ = 'games'\n id = db.Column(db.Integer, primary_key=True)\n admin_id = db.Column(db.Integer, db.ForeignKey('users.id'))\n admin = db.relationship('User', backref='admin', lazy='joined')\n name = db.Column(db.String(64))\n description = db.Column(db.String(100))\n date = db.Column(db.Date())\n time = db.Column(db.Time())\n field = db.Column(db.String(100), nullable=True)\n players = db.relationship('User', secondary='game_players')\n\n def __init__(self, **kwargs):\n super(Game, self).__init__(**kwargs)\n self.players.append(self.admin)\n\n\nclass GamePlayer(db.Model):\n __tablename__ = 'game_players'\n id = db.Column(db.Integer, primary_key=True)\n user_id = db.Column(db.Integer, db.ForeignKey('users.id'))\n game_id = db.Column(db.Integer, db.ForeignKey('games.id'))\n user = db.relationship('User', backref='user', lazy='joined')\n game = db.relationship('Game', backref='game', lazy='joined')\n\n\nclass RegistrationForm(FlaskForm):\n email = StringField('Email', validators=[DataRequired(), Length(1, 64),\n Email()])\n username = StringField('Username', validators=[DataRequired(), Length(1,\n 64), Regexp('^[A-Za-z][A-Za-z0-9_.]*$', 0,\n 'Usernames must have only letters, numbers, dots or underscores')])\n name = StringField('Name', validators=[DataRequired(), Length(1, 64)])\n password = PasswordField('Password', validators=[DataRequired()])\n password2 = PasswordField('Confirm password', validators=[DataRequired(\n ), EqualTo('password', message='Passwords must match.')])\n submit = SubmitField('Register')\n\n def validate_email(self, field):\n if User.query.filter_by(email=field.data.lower()).first():\n raise ValidationError('Email alreadu registered')\n\n def validate_username(self, field):\n if User.query.filter_by(username=field.data).first():\n raise ValidationError('Username already taken')\n\n\nclass LoginForm(FlaskForm):\n email = StringField('Email', validators=[DataRequired(), Length(1, 64),\n Email()])\n password = PasswordField('Password', validators=[DataRequired()])\n remember_me = BooleanField('Keep me logged in')\n submit = SubmitField('Log In')\n\n\nclass NewGameForm(FlaskForm):\n name = StringField('Name', validators=[DataRequired()])\n description = TextAreaField('Description', validators=[DataRequired()])\n date = DateField('Date', validators=[DataRequired()], format='%Y-%m-%d')\n time = TimeField('Time', format='%H:%M')\n field = StringField('Field', validators=[DataRequired()])\n submit = SubmitField('Create New Game')\n\n\n<mask token>\n\n\n@app.route('/')\n@app.route('/games')\n@login_required\ndef index():\n game_player = GamePlayer.query.filter_by(user=current_user).all()\n games = list(map(render_player_game, game_player))\n return render_template('index.html', games=games)\n\n\n<mask token>\n\n\n@app.route('/games/<int:id>/join')\n@login_required\ndef game_join(id):\n game = Game.query.get(id)\n if game:\n if current_user not in game.players:\n game.players.append(current_user)\n db.session.commit()\n flash('You were added succesfully', 'sucess')\n else:\n flash('You already were in this game', 'warning')\n else:\n flash(\"The game you're trying to enter does not exist\", 'warning')\n return redirect(url_for('game_details', id=id))\n\n\n@app.route('/games/<int:id>/quit')\n@login_required\ndef game_quit(id):\n game = Game.query.get(id)\n if game:\n if current_user in game.players:\n game.players.remove(current_user)\n db.session.commit()\n flash('You were removed succesfully', 'sucess')\n else:\n flash('You were not in this game', 'warning')\n else:\n flash(\"The game you're trying to quit does not exist\", 'warning')\n return redirect(url_for('game_details', id=id))\n\n\n<mask token>\n\n\n@app.route('/login', methods=['GET', 'POST'])\ndef user_login():\n form = LoginForm()\n if form.validate_on_submit():\n user = User.query.filter_by(email=form.email.data.lower()).first()\n if user is not None and user.verify_password(form.password.data):\n login_user(user, form.remember_me.data)\n next = request.args.get('next')\n if next is None or not next.startswith('/'):\n next = url_for('index')\n return redirect(next)\n flash('Invalid email or password')\n return render_template('login.html', form=form)\n\n\n@app.route('/logout')\n@login_required\ndef user_logout():\n logout_user()\n flash('You have been logged out.')\n return redirect(url_for('index'))\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass User(UserMixin, db.Model):\n __tablename__ = 'users'\n id = db.Column(db.Integer, primary_key=True)\n name = db.Column(db.String(64))\n email = db.Column(db.String(64), unique=True, index=True)\n username = db.Column(db.String(64), unique=True, index=True)\n password_hash = db.Column(db.String(128))\n games = db.relationship('Game', secondary='game_players')\n\n @property\n def password(self):\n raise AttributeError('password is not a readable attribute')\n\n @password.setter\n def password(self, password):\n self.password_hash = generate_password_hash(password)\n\n def verify_password(self, password):\n return check_password_hash(self.password_hash, password)\n\n\n@login_manager.user_loader\ndef load_user(user_id):\n return User.query.get(int(user_id))\n\n\nclass Game(db.Model):\n __tablename__ = 'games'\n id = db.Column(db.Integer, primary_key=True)\n admin_id = db.Column(db.Integer, db.ForeignKey('users.id'))\n admin = db.relationship('User', backref='admin', lazy='joined')\n name = db.Column(db.String(64))\n description = db.Column(db.String(100))\n date = db.Column(db.Date())\n time = db.Column(db.Time())\n field = db.Column(db.String(100), nullable=True)\n players = db.relationship('User', secondary='game_players')\n\n def __init__(self, **kwargs):\n super(Game, self).__init__(**kwargs)\n self.players.append(self.admin)\n\n\nclass GamePlayer(db.Model):\n __tablename__ = 'game_players'\n id = db.Column(db.Integer, primary_key=True)\n user_id = db.Column(db.Integer, db.ForeignKey('users.id'))\n game_id = db.Column(db.Integer, db.ForeignKey('games.id'))\n user = db.relationship('User', backref='user', lazy='joined')\n game = db.relationship('Game', backref='game', lazy='joined')\n\n\nclass RegistrationForm(FlaskForm):\n email = StringField('Email', validators=[DataRequired(), Length(1, 64),\n Email()])\n username = StringField('Username', validators=[DataRequired(), Length(1,\n 64), Regexp('^[A-Za-z][A-Za-z0-9_.]*$', 0,\n 'Usernames must have only letters, numbers, dots or underscores')])\n name = StringField('Name', validators=[DataRequired(), Length(1, 64)])\n password = PasswordField('Password', validators=[DataRequired()])\n password2 = PasswordField('Confirm password', validators=[DataRequired(\n ), EqualTo('password', message='Passwords must match.')])\n submit = SubmitField('Register')\n\n def validate_email(self, field):\n if User.query.filter_by(email=field.data.lower()).first():\n raise ValidationError('Email alreadu registered')\n\n def validate_username(self, field):\n if User.query.filter_by(username=field.data).first():\n raise ValidationError('Username already taken')\n\n\nclass LoginForm(FlaskForm):\n email = StringField('Email', validators=[DataRequired(), Length(1, 64),\n Email()])\n password = PasswordField('Password', validators=[DataRequired()])\n remember_me = BooleanField('Keep me logged in')\n submit = SubmitField('Log In')\n\n\nclass NewGameForm(FlaskForm):\n name = StringField('Name', validators=[DataRequired()])\n description = TextAreaField('Description', validators=[DataRequired()])\n date = DateField('Date', validators=[DataRequired()], format='%Y-%m-%d')\n time = TimeField('Time', format='%H:%M')\n field = StringField('Field', validators=[DataRequired()])\n submit = SubmitField('Create New Game')\n\n\n<mask token>\n\n\n@app.route('/')\n@app.route('/games')\n@login_required\ndef index():\n game_player = GamePlayer.query.filter_by(user=current_user).all()\n games = list(map(render_player_game, game_player))\n return render_template('index.html', games=games)\n\n\n@app.route('/games/join')\n@app.route('/games/<int:id>')\ndef game_details(id=None):\n game = Game.query.get(id)\n return render_template('game_details.html', game=game)\n\n\n@app.route('/games/<int:id>/join')\n@login_required\ndef game_join(id):\n game = Game.query.get(id)\n if game:\n if current_user not in game.players:\n game.players.append(current_user)\n db.session.commit()\n flash('You were added succesfully', 'sucess')\n else:\n flash('You already were in this game', 'warning')\n else:\n flash(\"The game you're trying to enter does not exist\", 'warning')\n return redirect(url_for('game_details', id=id))\n\n\n@app.route('/games/<int:id>/quit')\n@login_required\ndef game_quit(id):\n game = Game.query.get(id)\n if game:\n if current_user in game.players:\n game.players.remove(current_user)\n db.session.commit()\n flash('You were removed succesfully', 'sucess')\n else:\n flash('You were not in this game', 'warning')\n else:\n flash(\"The game you're trying to quit does not exist\", 'warning')\n return redirect(url_for('game_details', id=id))\n\n\n<mask token>\n\n\n@app.route('/login', methods=['GET', 'POST'])\ndef user_login():\n form = LoginForm()\n if form.validate_on_submit():\n user = User.query.filter_by(email=form.email.data.lower()).first()\n if user is not None and user.verify_password(form.password.data):\n login_user(user, form.remember_me.data)\n next = request.args.get('next')\n if next is None or not next.startswith('/'):\n next = url_for('index')\n return redirect(next)\n flash('Invalid email or password')\n return render_template('login.html', form=form)\n\n\n@app.route('/logout')\n@login_required\ndef user_logout():\n logout_user()\n flash('You have been logged out.')\n return redirect(url_for('index'))\n\n\n@app.route('/test')\ndef test():\n return render_template('layout.html', title='Test')\n",
"step-4": "<mask token>\n\n\nclass User(UserMixin, db.Model):\n __tablename__ = 'users'\n id = db.Column(db.Integer, primary_key=True)\n name = db.Column(db.String(64))\n email = db.Column(db.String(64), unique=True, index=True)\n username = db.Column(db.String(64), unique=True, index=True)\n password_hash = db.Column(db.String(128))\n games = db.relationship('Game', secondary='game_players')\n\n @property\n def password(self):\n raise AttributeError('password is not a readable attribute')\n\n @password.setter\n def password(self, password):\n self.password_hash = generate_password_hash(password)\n\n def verify_password(self, password):\n return check_password_hash(self.password_hash, password)\n\n\n@login_manager.user_loader\ndef load_user(user_id):\n return User.query.get(int(user_id))\n\n\nclass Game(db.Model):\n __tablename__ = 'games'\n id = db.Column(db.Integer, primary_key=True)\n admin_id = db.Column(db.Integer, db.ForeignKey('users.id'))\n admin = db.relationship('User', backref='admin', lazy='joined')\n name = db.Column(db.String(64))\n description = db.Column(db.String(100))\n date = db.Column(db.Date())\n time = db.Column(db.Time())\n field = db.Column(db.String(100), nullable=True)\n players = db.relationship('User', secondary='game_players')\n\n def __init__(self, **kwargs):\n super(Game, self).__init__(**kwargs)\n self.players.append(self.admin)\n\n\nclass GamePlayer(db.Model):\n __tablename__ = 'game_players'\n id = db.Column(db.Integer, primary_key=True)\n user_id = db.Column(db.Integer, db.ForeignKey('users.id'))\n game_id = db.Column(db.Integer, db.ForeignKey('games.id'))\n user = db.relationship('User', backref='user', lazy='joined')\n game = db.relationship('Game', backref='game', lazy='joined')\n\n\nclass RegistrationForm(FlaskForm):\n email = StringField('Email', validators=[DataRequired(), Length(1, 64),\n Email()])\n username = StringField('Username', validators=[DataRequired(), Length(1,\n 64), Regexp('^[A-Za-z][A-Za-z0-9_.]*$', 0,\n 'Usernames must have only letters, numbers, dots or underscores')])\n name = StringField('Name', validators=[DataRequired(), Length(1, 64)])\n password = PasswordField('Password', validators=[DataRequired()])\n password2 = PasswordField('Confirm password', validators=[DataRequired(\n ), EqualTo('password', message='Passwords must match.')])\n submit = SubmitField('Register')\n\n def validate_email(self, field):\n if User.query.filter_by(email=field.data.lower()).first():\n raise ValidationError('Email alreadu registered')\n\n def validate_username(self, field):\n if User.query.filter_by(username=field.data).first():\n raise ValidationError('Username already taken')\n\n\nclass LoginForm(FlaskForm):\n email = StringField('Email', validators=[DataRequired(), Length(1, 64),\n Email()])\n password = PasswordField('Password', validators=[DataRequired()])\n remember_me = BooleanField('Keep me logged in')\n submit = SubmitField('Log In')\n\n\nclass NewGameForm(FlaskForm):\n name = StringField('Name', validators=[DataRequired()])\n description = TextAreaField('Description', validators=[DataRequired()])\n date = DateField('Date', validators=[DataRequired()], format='%Y-%m-%d')\n time = TimeField('Time', format='%H:%M')\n field = StringField('Field', validators=[DataRequired()])\n submit = SubmitField('Create New Game')\n\n\ndef render_player_game(game_player):\n game = game_player.game\n game_rendered = {'date': game.date, 'time': game.time, 'name': game.\n name, 'field': game.field, 'id': game.id}\n return game_rendered\n\n\n@app.route('/')\n@app.route('/games')\n@login_required\ndef index():\n game_player = GamePlayer.query.filter_by(user=current_user).all()\n games = list(map(render_player_game, game_player))\n return render_template('index.html', games=games)\n\n\n@app.route('/games/join')\n@app.route('/games/<int:id>')\ndef game_details(id=None):\n game = Game.query.get(id)\n return render_template('game_details.html', game=game)\n\n\n@app.route('/games/<int:id>/join')\n@login_required\ndef game_join(id):\n game = Game.query.get(id)\n if game:\n if current_user not in game.players:\n game.players.append(current_user)\n db.session.commit()\n flash('You were added succesfully', 'sucess')\n else:\n flash('You already were in this game', 'warning')\n else:\n flash(\"The game you're trying to enter does not exist\", 'warning')\n return redirect(url_for('game_details', id=id))\n\n\n@app.route('/games/<int:id>/quit')\n@login_required\ndef game_quit(id):\n game = Game.query.get(id)\n if game:\n if current_user in game.players:\n game.players.remove(current_user)\n db.session.commit()\n flash('You were removed succesfully', 'sucess')\n else:\n flash('You were not in this game', 'warning')\n else:\n flash(\"The game you're trying to quit does not exist\", 'warning')\n return redirect(url_for('game_details', id=id))\n\n\n<mask token>\n\n\n@app.route('/login', methods=['GET', 'POST'])\ndef user_login():\n form = LoginForm()\n if form.validate_on_submit():\n user = User.query.filter_by(email=form.email.data.lower()).first()\n if user is not None and user.verify_password(form.password.data):\n login_user(user, form.remember_me.data)\n next = request.args.get('next')\n if next is None or not next.startswith('/'):\n next = url_for('index')\n return redirect(next)\n flash('Invalid email or password')\n return render_template('login.html', form=form)\n\n\n@app.route('/logout')\n@login_required\ndef user_logout():\n logout_user()\n flash('You have been logged out.')\n return redirect(url_for('index'))\n\n\n@app.route('/test')\ndef test():\n return render_template('layout.html', title='Test')\n",
"step-5": "# Imports\nimport os\nfrom flask import Flask, redirect, render_template, url_for, request, flash\nfrom flask_login import LoginManager, login_user, logout_user, login_required, current_user\n\n# Import - Database\nfrom flask_sqlalchemy import SQLAlchemy\n\n# Import - Models\nfrom werkzeug.security import generate_password_hash, check_password_hash\nfrom flask_login import UserMixin\n\n# Import - Forms\nfrom flask_wtf import FlaskForm\nfrom wtforms import StringField, PasswordField, BooleanField, SubmitField, ValidationError, DateField, TimeField, TextAreaField, IntegerField\nfrom wtforms.validators import DataRequired, Length, Email, Regexp, EqualTo\n\n# Config\nbasedir = os.path.abspath(os.path.dirname(__file__))\n\napp = Flask(__name__)\napp.config['SECRET_KEY'] = os.environ.get('SECRET_KEY')\napp.config['SQLALCHEMY_DATABASE_URI'] =\\\n 'sqlite:///' + os.path.join(basedir, 'data.sqlite')\napp.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False\n\n# Setup\ndb = SQLAlchemy(app)\nlogin_manager = LoginManager(app)\nlogin_manager.login_view = 'user_login'\n\n# Models\n\n\nclass User(UserMixin, db.Model):\n __tablename__ = 'users'\n id = db.Column(db.Integer, primary_key=True)\n name = db.Column(db.String(64))\n email = db.Column(db.String(64), unique=True, index=True)\n username = db.Column(db.String(64), unique=True, index=True)\n password_hash = db.Column(db.String(128))\n games = db.relationship('Game', secondary=\"game_players\")\n\n @property\n def password(self):\n raise AttributeError('password is not a readable attribute')\n\n @password.setter\n def password(self, password):\n self.password_hash = generate_password_hash(password)\n\n def verify_password(self, password):\n return check_password_hash(self.password_hash, password)\n\n\n@login_manager.user_loader\ndef load_user(user_id):\n return User.query.get(int(user_id))\n\n\nclass Game(db.Model):\n __tablename__ = 'games'\n id = db.Column(db.Integer, primary_key=True)\n admin_id = db.Column(db.Integer, db.ForeignKey('users.id'))\n admin = db.relationship('User', backref='admin', lazy='joined')\n name = db.Column(db.String(64))\n description = db.Column(db.String(100))\n date = db.Column(db.Date())\n time = db.Column(db.Time())\n field = db.Column(db.String(100), nullable=True)\n players = db.relationship('User', secondary=\"game_players\")\n\n def __init__(self, **kwargs):\n super(Game, self).__init__(**kwargs)\n self.players.append(self.admin)\n\n\nclass GamePlayer(db.Model):\n __tablename__ = 'game_players'\n id = db.Column(db.Integer, primary_key=True)\n user_id = db.Column(db.Integer, db.ForeignKey('users.id'))\n game_id = db.Column(db.Integer, db.ForeignKey('games.id'))\n\n user = db.relationship('User', backref='user', lazy='joined')\n game = db.relationship('Game', backref='game', lazy='joined')\n\n\n# Forms\n# Forms - User\nclass RegistrationForm(FlaskForm):\n email = StringField('Email', validators=[\n DataRequired(), Length(1, 64), Email()])\n username = StringField('Username', validators=[\n DataRequired(), Length(1, 64),\n Regexp('^[A-Za-z][A-Za-z0-9_.]*$', 0,\n 'Usernames must have only letters, numbers, dots or '\n 'underscores')\n ])\n name = StringField('Name', validators=[DataRequired(), Length(1, 64)])\n password = PasswordField('Password', validators=[DataRequired()])\n password2 = PasswordField('Confirm password', validators=[\n DataRequired(), EqualTo('password', message='Passwords must match.')])\n submit = SubmitField('Register')\n\n def validate_email(self, field):\n if User.query.filter_by(email=field.data.lower()).first():\n raise ValidationError('Email alreadu registered')\n\n def validate_username(self, field):\n if User.query.filter_by(username=field.data).first():\n raise ValidationError('Username already taken')\n\n\nclass LoginForm(FlaskForm):\n email = StringField('Email', validators=[DataRequired(), Length(1, 64),\n Email()])\n password = PasswordField('Password', validators=[DataRequired()])\n remember_me = BooleanField('Keep me logged in')\n submit = SubmitField('Log In')\n\n# Forms - Games\n\n\nclass NewGameForm(FlaskForm):\n name = StringField('Name', validators=[DataRequired()])\n description = TextAreaField('Description', validators=[DataRequired()])\n date = DateField('Date', validators=[\n DataRequired()], format='%Y-%m-%d')\n time = TimeField('Time', format='%H:%M')\n field = StringField('Field', validators=[DataRequired()])\n submit = SubmitField('Create New Game')\n\n\n# Utils functions\ndef render_player_game(game_player):\n game = game_player.game\n\n game_rendered = {\n 'date': game.date,\n 'time': game.time,\n 'name': game.name,\n 'field': game.field,\n 'id': game.id\n }\n return game_rendered\n\n\n# Routes\n# Routes - Games\n@app.route('/')\n@app.route('/games')\n@login_required\ndef index():\n # Show all the games the user is in and a button to join game and another to create one\n game_player = GamePlayer.query.filter_by(user=current_user).all()\n\n games = list(map(render_player_game, game_player))\n\n return render_template('index.html', games=games)\n\n\n@app.route('/games/join')\n@app.route('/games/<int:id>')\ndef game_details(id=None):\n # Show the game details and players\n # The admin has the same page, but with extra commands to remove, add...\n game = Game.query.get(id)\n\n return render_template('game_details.html', game=game)\n\n\n@app.route('/games/<int:id>/join')\n@login_required\ndef game_join(id):\n game = Game.query.get(id)\n\n if game:\n if current_user not in game.players:\n game.players.append(current_user)\n db.session.commit()\n flash('You were added succesfully', 'sucess')\n else:\n flash('You already were in this game', 'warning')\n else:\n flash(\"The game you're trying to enter does not exist\", 'warning')\n\n return redirect(url_for('game_details', id=id))\n\n@app.route('/games/<int:id>/quit')\n@login_required\ndef game_quit(id):\n game = Game.query.get(id)\n\n if game:\n if current_user in game.players:\n game.players.remove(current_user)\n db.session.commit()\n flash('You were removed succesfully', 'sucess')\n else:\n flash('You were not in this game', 'warning')\n else:\n flash(\"The game you're trying to quit does not exist\", 'warning')\n\n return redirect(url_for('game_details', id=id))\n\n\n@app.route('/games/new', methods=['GET', 'POST'])\n@login_required\ndef new_game():\n # A form to create a new game\n form = NewGameForm()\n if form.validate_on_submit():\n new_game = Game(\n admin=current_user,\n name=form.name.data,\n description=form.description.data,\n date=form.date.data,\n time=form.time.data,\n field=form.field.data,\n )\n\n db.session.add(new_game)\n db.session.commit()\n\n flash('Your game was created succesfully', 'success')\n return redirect(url_for('game_details', id=new_game.id))\n return render_template('new_game.html', form=form)\n\n\n# Routes - User\n@app.route('/register', methods=['GET', 'POST'])\ndef user_register():\n form = RegistrationForm()\n\n if form.validate_on_submit():\n user = User(\n email=form.email.data.lower(),\n username=form.username.data,\n password=form.password.data,\n name=form.name.data\n )\n\n db.session.add(user)\n db.session.commit()\n\n login_user(user)\n\n return redirect(url_for('index'))\n return render_template('register.html', form=form)\n\n\n@app.route('/login', methods=['GET', 'POST'])\ndef user_login():\n form = LoginForm()\n\n if form.validate_on_submit():\n user = User.query.filter_by(email=form.email.data.lower()).first()\n\n if user is not None and user.verify_password(form.password.data):\n login_user(user, form.remember_me.data)\n next = request.args.get('next')\n if next is None or not next.startswith('/'):\n next = url_for('index')\n return redirect(next)\n flash('Invalid email or password')\n return render_template('login.html', form=form)\n\n\n@app.route('/logout')\n@login_required\ndef user_logout():\n logout_user()\n flash('You have been logged out.')\n return redirect(url_for('index'))\n\n\n@app.route('/test')\ndef test():\n return render_template('layout.html', title='Test')\n",
"step-ids": [
23,
24,
26,
27,
32
]
}
|
[
23,
24,
26,
27,
32
] |
from BeautifulSoup import BeautifulSoup, NavigableString
from urllib2 import urlopen
from time import ctime
import sys
import os
import re
restaurants = ["http://finweb.rit.edu/diningservices/brickcity",
"http://finweb.rit.edu/diningservices/commons",
"http://finweb.rit.edu/diningservices/crossroads",
"http://finweb.rit.edu/diningservices/gvcantinagrille",
"http://finweb.rit.edu/diningservices/gracies",
"http://finweb.rit.edu/diningservices/ritzsportszone"]
pretty_header = """
---------------------------------------------------
Parser Of On-campus Preferred Specials
a.k.a.
______ ______ ______ ______ _____
| _ | __ | __ | _ |/ ____|
| |_) | | | | | | | |_) | (___
| ___| | | | | | | ___|\___ \\
| | | |__| | |__| | | ____) |
| | | | | | | |
|__| |______|______|__| |_____/
It is currently {curtime}
---------------------------------------------------
[1] Brick City Cafe
[2] Commons
[3] Crossroads
[4] Global Village Cantina and Grille
[5] Gracies
[6] Ritz Sports Zone
[q] Quit
==================================================="""
def menu():
""" Do all the heavy lifting."""
while True:
# Loop till user quits.
sel = 0
while ( sel < 1 or sel > len(restaurants)):
# Input validation
print pretty_header.format(curtime=ctime())
sel = raw_input("Enter your menu choice [1-6 or q]: ")
if sel.lower() == "q":
sys.exit(0)
try:
sel = int(sel)
except:
sel = 0
os.system("clear")
# Load meals from desired restaurant.
html = urlopen(restaurants[sel-1])
soup = BeautifulSoup(html, convertEntities = BeautifulSoup.HTML_ENTITIES)
meals = soup.findAll(id=re.compile("meal_\d"))
tabs = soup.findAll(id=re.compile("tab_\d"))
# get the name of the restaurant, minus the "RIT Dining Services" bs.
print ("\nOn the menu at " + re.sub("^[\w\W]*\s?:\s?", "",
str(soup.title.string)) + " today is:")
meal_num = 0
for meal in meals:
if meal:
# print all meals served + meal name / subrestaurant name
print ("=====================")
print tabs[meal_num].contents[0].string
print ("=====================\n")
meal_num += 1
for item in meal.findAll("li"):
if item.string and str(item.string) != "":
print item.string
print ("\n")
raw_input("Press any key to continue...")
os.system("clear")
if sys.version[0] != "2":
print "This script uses BeautifulSoup for html parsing."
print "BeautifulSoup only supports Python 2.x"
menu()
|
normal
|
{
"blob_id": "02e40e051c19116c9cb3a903e738232dc8f5d026",
"index": 9522,
"step-1": "\nfrom BeautifulSoup import BeautifulSoup, NavigableString\nfrom urllib2 import urlopen\nfrom time import ctime\nimport sys\nimport os\nimport re\nrestaurants = [\"http://finweb.rit.edu/diningservices/brickcity\",\n\"http://finweb.rit.edu/diningservices/commons\",\n\"http://finweb.rit.edu/diningservices/crossroads\",\n\"http://finweb.rit.edu/diningservices/gvcantinagrille\",\n\"http://finweb.rit.edu/diningservices/gracies\",\n\"http://finweb.rit.edu/diningservices/ritzsportszone\"]\n\npretty_header = \"\"\"\n---------------------------------------------------\n Parser Of On-campus Preferred Specials\n a.k.a.\n ______ ______ ______ ______ _____\n | _ | __ | __ | _ |/ ____|\n | |_) | | | | | | | |_) | (___\n | ___| | | | | | | ___|\\___ \\\\\n | | | |__| | |__| | | ____) |\n | | | | | | | |\n |__| |______|______|__| |_____/\n\n\n It is currently {curtime}\n---------------------------------------------------\n[1] Brick City Cafe\n[2] Commons\n[3] Crossroads\n[4] Global Village Cantina and Grille\n[5] Gracies\n[6] Ritz Sports Zone\n[q] Quit\n===================================================\"\"\"\n\ndef menu():\n\t\"\"\" Do all the heavy lifting.\"\"\"\n\twhile True:\n\t\t# Loop till user quits.\n\t\tsel = 0\n\t\twhile ( sel < 1 or sel > len(restaurants)):\n\t\t\t# Input validation\n\t\t\tprint pretty_header.format(curtime=ctime())\n\t\t\tsel = raw_input(\"Enter your menu choice [1-6 or q]: \")\n\t\t\tif sel.lower() == \"q\":\n\t\t\t\tsys.exit(0)\n\t\t\ttry:\n\t\t\t\tsel = int(sel)\n\t\t\texcept:\n\t\t\t\tsel = 0\n\t\t\tos.system(\"clear\")\n\n\t\t# Load meals from desired restaurant.\n\t\thtml = urlopen(restaurants[sel-1])\n\t\tsoup = BeautifulSoup(html, convertEntities = BeautifulSoup.HTML_ENTITIES)\n\t\tmeals = soup.findAll(id=re.compile(\"meal_\\d\"))\n\t\ttabs = soup.findAll(id=re.compile(\"tab_\\d\"))\n\n\t\t# get the name of the restaurant, minus the \"RIT Dining Services\" bs.\n\t\tprint (\"\\nOn the menu at \" + re.sub(\"^[\\w\\W]*\\s?:\\s?\", \"\",\n\t\t\tstr(soup.title.string)) + \" today is:\")\n\t\tmeal_num = 0\n\t\tfor meal in meals:\n\t\t\tif meal:\n\t\t\t\t# print all meals served + meal name / subrestaurant name\n\t\t\t\tprint (\"=====================\")\n\t\t\t\tprint tabs[meal_num].contents[0].string\n\t\t\t\tprint (\"=====================\\n\")\n\t\t\t\tmeal_num += 1\n\t\t\t\tfor item in meal.findAll(\"li\"):\n\t\t\t\t\tif item.string and str(item.string) != \"\":\n\t\t\t\t\t\tprint item.string\n\t\t\t\tprint (\"\\n\")\n\t\traw_input(\"Press any key to continue...\")\n\t\tos.system(\"clear\")\n\nif sys.version[0] != \"2\":\n\tprint \"This script uses BeautifulSoup for html parsing.\"\n\tprint \"BeautifulSoup only supports Python 2.x\"\nmenu()\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
"""
The epitome package is a set of command-line tools for analyzing MRI data, and a
set of scriptuit modules for stitching them (and others) together.
"""
from . import utilities
from . import stats
from . import signal
from . import plot
from . import docopt
|
normal
|
{
"blob_id": "4d58926e812789768fdf5be59bd54f9b66850e57",
"index": 2554,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfrom . import utilities\nfrom . import stats\nfrom . import signal\nfrom . import plot\nfrom . import docopt\n",
"step-3": "\"\"\"\nThe epitome package is a set of command-line tools for analyzing MRI data, and a \nset of scriptuit modules for stitching them (and others) together.\n\"\"\"\n\nfrom . import utilities\nfrom . import stats\nfrom . import signal\nfrom . import plot\nfrom . import docopt\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
<|reserved_special_token_0|>
def gradient_descent(f: Callable[[np.ndarray], float], f_grad: Callable[[np
.ndarray], np.ndarray], start: np.ndarray, step_strategy: st.
StepStrategy, stop_criteria: sc.StopCriteria, eps_strategy: float=
DEFAULT_EPSILON, eps_stop_criteria: float=DEFAULT_EPSILON,
max_iterations_strategy=DEFAULT_MAX_ITERATIONS, max_iterations_criteria
=DEFAULT_MAX_ITERATIONS, trajectory: Optional[List]=None):
strategy = st.get_step_strategy(step_strategy, f, f_grad, eps_strategy,
max_iterations_strategy)
criteria = sc.get_stop_criteria(stop_criteria, f, f_grad,
eps_stop_criteria, max_iterations_criteria)
cur_x = start
iters = 0
if trajectory is not None:
trajectory.append(cur_x)
while True:
iters += 1
cur_grad = f_grad(cur_x)
step = strategy.next_step(cur_x)
next_x = cur_x - step * cur_grad
if criteria.should_stop(cur_x, next_x):
return cur_x, iters
cur_x = next_x
if trajectory is not None:
trajectory.append(cur_x)
if iters == max_iterations_criteria:
return cur_x, iters
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def gradient_descent(f: Callable[[np.ndarray], float], f_grad: Callable[[np
.ndarray], np.ndarray], start: np.ndarray, step_strategy: st.
StepStrategy, stop_criteria: sc.StopCriteria, eps_strategy: float=
DEFAULT_EPSILON, eps_stop_criteria: float=DEFAULT_EPSILON,
max_iterations_strategy=DEFAULT_MAX_ITERATIONS, max_iterations_criteria
=DEFAULT_MAX_ITERATIONS, trajectory: Optional[List]=None):
strategy = st.get_step_strategy(step_strategy, f, f_grad, eps_strategy,
max_iterations_strategy)
criteria = sc.get_stop_criteria(stop_criteria, f, f_grad,
eps_stop_criteria, max_iterations_criteria)
cur_x = start
iters = 0
if trajectory is not None:
trajectory.append(cur_x)
while True:
iters += 1
cur_grad = f_grad(cur_x)
step = strategy.next_step(cur_x)
next_x = cur_x - step * cur_grad
if criteria.should_stop(cur_x, next_x):
return cur_x, iters
cur_x = next_x
if trajectory is not None:
trajectory.append(cur_x)
if iters == max_iterations_criteria:
return cur_x, iters
if __name__ == '__main__':
def foo(p):
return p[0] ** 2 + p[1] ** 2
def foo_grad(p):
x, y = p[0], p[1]
return np.array([2 * x, 2 * y])
res, _ = gradient_descent(foo, foo_grad, start=np.array([3, 4]),
step_strategy=st.StepStrategy.DIVIDE_STEP, stop_criteria=sc.
StopCriteria.BY_GRAD)
print(res)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
DEFAULT_EPSILON = 1e-09
DEFAULT_MAX_ITERATIONS = 100000.0
def gradient_descent(f: Callable[[np.ndarray], float], f_grad: Callable[[np
.ndarray], np.ndarray], start: np.ndarray, step_strategy: st.
StepStrategy, stop_criteria: sc.StopCriteria, eps_strategy: float=
DEFAULT_EPSILON, eps_stop_criteria: float=DEFAULT_EPSILON,
max_iterations_strategy=DEFAULT_MAX_ITERATIONS, max_iterations_criteria
=DEFAULT_MAX_ITERATIONS, trajectory: Optional[List]=None):
strategy = st.get_step_strategy(step_strategy, f, f_grad, eps_strategy,
max_iterations_strategy)
criteria = sc.get_stop_criteria(stop_criteria, f, f_grad,
eps_stop_criteria, max_iterations_criteria)
cur_x = start
iters = 0
if trajectory is not None:
trajectory.append(cur_x)
while True:
iters += 1
cur_grad = f_grad(cur_x)
step = strategy.next_step(cur_x)
next_x = cur_x - step * cur_grad
if criteria.should_stop(cur_x, next_x):
return cur_x, iters
cur_x = next_x
if trajectory is not None:
trajectory.append(cur_x)
if iters == max_iterations_criteria:
return cur_x, iters
if __name__ == '__main__':
def foo(p):
return p[0] ** 2 + p[1] ** 2
def foo_grad(p):
x, y = p[0], p[1]
return np.array([2 * x, 2 * y])
res, _ = gradient_descent(foo, foo_grad, start=np.array([3, 4]),
step_strategy=st.StepStrategy.DIVIDE_STEP, stop_criteria=sc.
StopCriteria.BY_GRAD)
print(res)
<|reserved_special_token_1|>
from typing import Callable, List, Optional
import numpy as np
import lab1.src.grad.grad_step_strategy as st
import lab1.src.grad.stop_criteria as sc
DEFAULT_EPSILON = 1e-09
DEFAULT_MAX_ITERATIONS = 100000.0
def gradient_descent(f: Callable[[np.ndarray], float], f_grad: Callable[[np
.ndarray], np.ndarray], start: np.ndarray, step_strategy: st.
StepStrategy, stop_criteria: sc.StopCriteria, eps_strategy: float=
DEFAULT_EPSILON, eps_stop_criteria: float=DEFAULT_EPSILON,
max_iterations_strategy=DEFAULT_MAX_ITERATIONS, max_iterations_criteria
=DEFAULT_MAX_ITERATIONS, trajectory: Optional[List]=None):
strategy = st.get_step_strategy(step_strategy, f, f_grad, eps_strategy,
max_iterations_strategy)
criteria = sc.get_stop_criteria(stop_criteria, f, f_grad,
eps_stop_criteria, max_iterations_criteria)
cur_x = start
iters = 0
if trajectory is not None:
trajectory.append(cur_x)
while True:
iters += 1
cur_grad = f_grad(cur_x)
step = strategy.next_step(cur_x)
next_x = cur_x - step * cur_grad
if criteria.should_stop(cur_x, next_x):
return cur_x, iters
cur_x = next_x
if trajectory is not None:
trajectory.append(cur_x)
if iters == max_iterations_criteria:
return cur_x, iters
if __name__ == '__main__':
def foo(p):
return p[0] ** 2 + p[1] ** 2
def foo_grad(p):
x, y = p[0], p[1]
return np.array([2 * x, 2 * y])
res, _ = gradient_descent(foo, foo_grad, start=np.array([3, 4]),
step_strategy=st.StepStrategy.DIVIDE_STEP, stop_criteria=sc.
StopCriteria.BY_GRAD)
print(res)
<|reserved_special_token_1|>
from typing import Callable, List, Optional
import numpy as np
import lab1.src.grad.grad_step_strategy as st
import lab1.src.grad.stop_criteria as sc
DEFAULT_EPSILON = 1e-9
DEFAULT_MAX_ITERATIONS = 1e5
def gradient_descent(f: Callable[[np.ndarray], float],
f_grad: Callable[[np.ndarray], np.ndarray],
start: np.ndarray,
step_strategy: st.StepStrategy,
stop_criteria: sc.StopCriteria,
eps_strategy: float = DEFAULT_EPSILON,
eps_stop_criteria: float = DEFAULT_EPSILON,
max_iterations_strategy=DEFAULT_MAX_ITERATIONS,
max_iterations_criteria=DEFAULT_MAX_ITERATIONS,
trajectory: Optional[List] = None):
strategy = st.get_step_strategy(step_strategy, f, f_grad, eps_strategy, max_iterations_strategy)
criteria = sc.get_stop_criteria(stop_criteria, f, f_grad, eps_stop_criteria, max_iterations_criteria)
cur_x = start
iters = 0
if trajectory is not None:
trajectory.append(cur_x)
while True:
iters += 1
cur_grad = f_grad(cur_x)
step = strategy.next_step(cur_x)
next_x = cur_x - step * cur_grad
if criteria.should_stop(cur_x, next_x):
return cur_x, iters
cur_x = next_x
if trajectory is not None:
trajectory.append(cur_x)
if iters == max_iterations_criteria:
return cur_x, iters
if __name__ == '__main__':
def foo(p):
return p[0] ** 2 + p[1] ** 2
def foo_grad(p):
x, y = p[0], p[1]
return np.array([2 * x, 2 * y])
res, _ = gradient_descent(foo,
foo_grad,
start=np.array([3, 4]),
step_strategy=st.StepStrategy.DIVIDE_STEP,
stop_criteria=sc.StopCriteria.BY_GRAD)
print(res)
|
flexible
|
{
"blob_id": "919e1f8a4b021d75496f3bcff369261a09362a65",
"index": 3645,
"step-1": "<mask token>\n\n\ndef gradient_descent(f: Callable[[np.ndarray], float], f_grad: Callable[[np\n .ndarray], np.ndarray], start: np.ndarray, step_strategy: st.\n StepStrategy, stop_criteria: sc.StopCriteria, eps_strategy: float=\n DEFAULT_EPSILON, eps_stop_criteria: float=DEFAULT_EPSILON,\n max_iterations_strategy=DEFAULT_MAX_ITERATIONS, max_iterations_criteria\n =DEFAULT_MAX_ITERATIONS, trajectory: Optional[List]=None):\n strategy = st.get_step_strategy(step_strategy, f, f_grad, eps_strategy,\n max_iterations_strategy)\n criteria = sc.get_stop_criteria(stop_criteria, f, f_grad,\n eps_stop_criteria, max_iterations_criteria)\n cur_x = start\n iters = 0\n if trajectory is not None:\n trajectory.append(cur_x)\n while True:\n iters += 1\n cur_grad = f_grad(cur_x)\n step = strategy.next_step(cur_x)\n next_x = cur_x - step * cur_grad\n if criteria.should_stop(cur_x, next_x):\n return cur_x, iters\n cur_x = next_x\n if trajectory is not None:\n trajectory.append(cur_x)\n if iters == max_iterations_criteria:\n return cur_x, iters\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef gradient_descent(f: Callable[[np.ndarray], float], f_grad: Callable[[np\n .ndarray], np.ndarray], start: np.ndarray, step_strategy: st.\n StepStrategy, stop_criteria: sc.StopCriteria, eps_strategy: float=\n DEFAULT_EPSILON, eps_stop_criteria: float=DEFAULT_EPSILON,\n max_iterations_strategy=DEFAULT_MAX_ITERATIONS, max_iterations_criteria\n =DEFAULT_MAX_ITERATIONS, trajectory: Optional[List]=None):\n strategy = st.get_step_strategy(step_strategy, f, f_grad, eps_strategy,\n max_iterations_strategy)\n criteria = sc.get_stop_criteria(stop_criteria, f, f_grad,\n eps_stop_criteria, max_iterations_criteria)\n cur_x = start\n iters = 0\n if trajectory is not None:\n trajectory.append(cur_x)\n while True:\n iters += 1\n cur_grad = f_grad(cur_x)\n step = strategy.next_step(cur_x)\n next_x = cur_x - step * cur_grad\n if criteria.should_stop(cur_x, next_x):\n return cur_x, iters\n cur_x = next_x\n if trajectory is not None:\n trajectory.append(cur_x)\n if iters == max_iterations_criteria:\n return cur_x, iters\n\n\nif __name__ == '__main__':\n\n def foo(p):\n return p[0] ** 2 + p[1] ** 2\n\n def foo_grad(p):\n x, y = p[0], p[1]\n return np.array([2 * x, 2 * y])\n res, _ = gradient_descent(foo, foo_grad, start=np.array([3, 4]),\n step_strategy=st.StepStrategy.DIVIDE_STEP, stop_criteria=sc.\n StopCriteria.BY_GRAD)\n print(res)\n",
"step-3": "<mask token>\nDEFAULT_EPSILON = 1e-09\nDEFAULT_MAX_ITERATIONS = 100000.0\n\n\ndef gradient_descent(f: Callable[[np.ndarray], float], f_grad: Callable[[np\n .ndarray], np.ndarray], start: np.ndarray, step_strategy: st.\n StepStrategy, stop_criteria: sc.StopCriteria, eps_strategy: float=\n DEFAULT_EPSILON, eps_stop_criteria: float=DEFAULT_EPSILON,\n max_iterations_strategy=DEFAULT_MAX_ITERATIONS, max_iterations_criteria\n =DEFAULT_MAX_ITERATIONS, trajectory: Optional[List]=None):\n strategy = st.get_step_strategy(step_strategy, f, f_grad, eps_strategy,\n max_iterations_strategy)\n criteria = sc.get_stop_criteria(stop_criteria, f, f_grad,\n eps_stop_criteria, max_iterations_criteria)\n cur_x = start\n iters = 0\n if trajectory is not None:\n trajectory.append(cur_x)\n while True:\n iters += 1\n cur_grad = f_grad(cur_x)\n step = strategy.next_step(cur_x)\n next_x = cur_x - step * cur_grad\n if criteria.should_stop(cur_x, next_x):\n return cur_x, iters\n cur_x = next_x\n if trajectory is not None:\n trajectory.append(cur_x)\n if iters == max_iterations_criteria:\n return cur_x, iters\n\n\nif __name__ == '__main__':\n\n def foo(p):\n return p[0] ** 2 + p[1] ** 2\n\n def foo_grad(p):\n x, y = p[0], p[1]\n return np.array([2 * x, 2 * y])\n res, _ = gradient_descent(foo, foo_grad, start=np.array([3, 4]),\n step_strategy=st.StepStrategy.DIVIDE_STEP, stop_criteria=sc.\n StopCriteria.BY_GRAD)\n print(res)\n",
"step-4": "from typing import Callable, List, Optional\nimport numpy as np\nimport lab1.src.grad.grad_step_strategy as st\nimport lab1.src.grad.stop_criteria as sc\nDEFAULT_EPSILON = 1e-09\nDEFAULT_MAX_ITERATIONS = 100000.0\n\n\ndef gradient_descent(f: Callable[[np.ndarray], float], f_grad: Callable[[np\n .ndarray], np.ndarray], start: np.ndarray, step_strategy: st.\n StepStrategy, stop_criteria: sc.StopCriteria, eps_strategy: float=\n DEFAULT_EPSILON, eps_stop_criteria: float=DEFAULT_EPSILON,\n max_iterations_strategy=DEFAULT_MAX_ITERATIONS, max_iterations_criteria\n =DEFAULT_MAX_ITERATIONS, trajectory: Optional[List]=None):\n strategy = st.get_step_strategy(step_strategy, f, f_grad, eps_strategy,\n max_iterations_strategy)\n criteria = sc.get_stop_criteria(stop_criteria, f, f_grad,\n eps_stop_criteria, max_iterations_criteria)\n cur_x = start\n iters = 0\n if trajectory is not None:\n trajectory.append(cur_x)\n while True:\n iters += 1\n cur_grad = f_grad(cur_x)\n step = strategy.next_step(cur_x)\n next_x = cur_x - step * cur_grad\n if criteria.should_stop(cur_x, next_x):\n return cur_x, iters\n cur_x = next_x\n if trajectory is not None:\n trajectory.append(cur_x)\n if iters == max_iterations_criteria:\n return cur_x, iters\n\n\nif __name__ == '__main__':\n\n def foo(p):\n return p[0] ** 2 + p[1] ** 2\n\n def foo_grad(p):\n x, y = p[0], p[1]\n return np.array([2 * x, 2 * y])\n res, _ = gradient_descent(foo, foo_grad, start=np.array([3, 4]),\n step_strategy=st.StepStrategy.DIVIDE_STEP, stop_criteria=sc.\n StopCriteria.BY_GRAD)\n print(res)\n",
"step-5": "from typing import Callable, List, Optional\nimport numpy as np\n\nimport lab1.src.grad.grad_step_strategy as st\nimport lab1.src.grad.stop_criteria as sc\n\n\nDEFAULT_EPSILON = 1e-9\nDEFAULT_MAX_ITERATIONS = 1e5\n\n\ndef gradient_descent(f: Callable[[np.ndarray], float],\n f_grad: Callable[[np.ndarray], np.ndarray],\n start: np.ndarray,\n step_strategy: st.StepStrategy,\n stop_criteria: sc.StopCriteria,\n eps_strategy: float = DEFAULT_EPSILON,\n eps_stop_criteria: float = DEFAULT_EPSILON,\n max_iterations_strategy=DEFAULT_MAX_ITERATIONS,\n max_iterations_criteria=DEFAULT_MAX_ITERATIONS,\n trajectory: Optional[List] = None):\n strategy = st.get_step_strategy(step_strategy, f, f_grad, eps_strategy, max_iterations_strategy)\n criteria = sc.get_stop_criteria(stop_criteria, f, f_grad, eps_stop_criteria, max_iterations_criteria)\n cur_x = start\n iters = 0\n\n if trajectory is not None:\n trajectory.append(cur_x)\n\n while True:\n iters += 1\n cur_grad = f_grad(cur_x)\n step = strategy.next_step(cur_x)\n next_x = cur_x - step * cur_grad\n\n if criteria.should_stop(cur_x, next_x):\n return cur_x, iters\n\n cur_x = next_x\n if trajectory is not None:\n trajectory.append(cur_x)\n\n if iters == max_iterations_criteria:\n return cur_x, iters\n\n\nif __name__ == '__main__':\n def foo(p):\n return p[0] ** 2 + p[1] ** 2\n\n def foo_grad(p):\n x, y = p[0], p[1]\n return np.array([2 * x, 2 * y])\n\n\n res, _ = gradient_descent(foo,\n foo_grad,\n start=np.array([3, 4]),\n step_strategy=st.StepStrategy.DIVIDE_STEP,\n stop_criteria=sc.StopCriteria.BY_GRAD)\n print(res)\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def block(request, limit=None):
try:
links = cache.get_cache('sape', expire=3600).get(key='links',
createfunc=load_links)
except:
links = cache.get_cache('sape', expire=300).get(key='links',
createfunc=lambda : {})
if request.path in links:
if not hasattr(request, 'sape_links_shown'):
request.sape_links_shown = 0
slc = links[request.path][request.sape_links_shown:request.
sape_links_shown + limit if limit is not None else None]
request.sape_links_shown += len(slc)
if slc:
return {'class': 'sape', 'links': links['__sape_delimiter__'].
join(slc)}
return None
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def block(request, limit=None):
try:
links = cache.get_cache('sape', expire=3600).get(key='links',
createfunc=load_links)
except:
links = cache.get_cache('sape', expire=300).get(key='links',
createfunc=lambda : {})
if request.path in links:
if not hasattr(request, 'sape_links_shown'):
request.sape_links_shown = 0
slc = links[request.path][request.sape_links_shown:request.
sape_links_shown + limit if limit is not None else None]
request.sape_links_shown += len(slc)
if slc:
return {'class': 'sape', 'links': links['__sape_delimiter__'].
join(slc)}
return None
def load_links():
return dict(map(lambda path_links: (path_links[0], [link.decode(
'windows-1251') for link in path_links[1].values()] if isinstance(
path_links[1], dict) else path_links[1]), phpserialize.loads(
urllib2.urlopen(urllib2.Request(
'http://dispenser-01.sape.ru/code.php?user={0}&host={1}'.format(
config.sape_user_id, config.sape_host))).read()).items()))
<|reserved_special_token_1|>
import phpserialize
import urllib2
from cache import cache
from config import config
def block(request, limit=None):
try:
links = cache.get_cache('sape', expire=3600).get(key='links',
createfunc=load_links)
except:
links = cache.get_cache('sape', expire=300).get(key='links',
createfunc=lambda : {})
if request.path in links:
if not hasattr(request, 'sape_links_shown'):
request.sape_links_shown = 0
slc = links[request.path][request.sape_links_shown:request.
sape_links_shown + limit if limit is not None else None]
request.sape_links_shown += len(slc)
if slc:
return {'class': 'sape', 'links': links['__sape_delimiter__'].
join(slc)}
return None
def load_links():
return dict(map(lambda path_links: (path_links[0], [link.decode(
'windows-1251') for link in path_links[1].values()] if isinstance(
path_links[1], dict) else path_links[1]), phpserialize.loads(
urllib2.urlopen(urllib2.Request(
'http://dispenser-01.sape.ru/code.php?user={0}&host={1}'.format(
config.sape_user_id, config.sape_host))).read()).items()))
<|reserved_special_token_1|>
#!/usr/bin/python
# -*- coding: utf-8 -*-
import phpserialize
import urllib2
from cache import cache
from config import config
def block(request, limit=None):
try:
links = cache.get_cache("sape", expire=3600).get(key="links", createfunc=load_links)
except:
links = cache.get_cache("sape", expire=300).get(key="links", createfunc=lambda: {})
if request.path in links:
if not hasattr(request, "sape_links_shown"):
request.sape_links_shown = 0
slc = links[request.path][request.sape_links_shown : request.sape_links_shown + limit if limit is not None else None]
request.sape_links_shown += len(slc)
if slc:
return {
"class" : "sape",
"links" : links["__sape_delimiter__"].join(slc),
}
return None
def load_links():
return dict(
map(
lambda path_links: (path_links[0], [link.decode("windows-1251") for link in path_links[1].values()] if isinstance(path_links[1], dict) else path_links[1]),
phpserialize.loads(
urllib2.urlopen(urllib2.Request(
"http://dispenser-01.sape.ru/code.php?user={0}&host={1}".format(config.sape_user_id, config.sape_host)
)).read()
).items()
)
)
|
flexible
|
{
"blob_id": "6d5acaa4a60b646432feb59f4d8eb9c9d0dceb0f",
"index": 1151,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef block(request, limit=None):\n try:\n links = cache.get_cache('sape', expire=3600).get(key='links',\n createfunc=load_links)\n except:\n links = cache.get_cache('sape', expire=300).get(key='links',\n createfunc=lambda : {})\n if request.path in links:\n if not hasattr(request, 'sape_links_shown'):\n request.sape_links_shown = 0\n slc = links[request.path][request.sape_links_shown:request.\n sape_links_shown + limit if limit is not None else None]\n request.sape_links_shown += len(slc)\n if slc:\n return {'class': 'sape', 'links': links['__sape_delimiter__'].\n join(slc)}\n return None\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef block(request, limit=None):\n try:\n links = cache.get_cache('sape', expire=3600).get(key='links',\n createfunc=load_links)\n except:\n links = cache.get_cache('sape', expire=300).get(key='links',\n createfunc=lambda : {})\n if request.path in links:\n if not hasattr(request, 'sape_links_shown'):\n request.sape_links_shown = 0\n slc = links[request.path][request.sape_links_shown:request.\n sape_links_shown + limit if limit is not None else None]\n request.sape_links_shown += len(slc)\n if slc:\n return {'class': 'sape', 'links': links['__sape_delimiter__'].\n join(slc)}\n return None\n\n\ndef load_links():\n return dict(map(lambda path_links: (path_links[0], [link.decode(\n 'windows-1251') for link in path_links[1].values()] if isinstance(\n path_links[1], dict) else path_links[1]), phpserialize.loads(\n urllib2.urlopen(urllib2.Request(\n 'http://dispenser-01.sape.ru/code.php?user={0}&host={1}'.format(\n config.sape_user_id, config.sape_host))).read()).items()))\n",
"step-4": "import phpserialize\nimport urllib2\nfrom cache import cache\nfrom config import config\n\n\ndef block(request, limit=None):\n try:\n links = cache.get_cache('sape', expire=3600).get(key='links',\n createfunc=load_links)\n except:\n links = cache.get_cache('sape', expire=300).get(key='links',\n createfunc=lambda : {})\n if request.path in links:\n if not hasattr(request, 'sape_links_shown'):\n request.sape_links_shown = 0\n slc = links[request.path][request.sape_links_shown:request.\n sape_links_shown + limit if limit is not None else None]\n request.sape_links_shown += len(slc)\n if slc:\n return {'class': 'sape', 'links': links['__sape_delimiter__'].\n join(slc)}\n return None\n\n\ndef load_links():\n return dict(map(lambda path_links: (path_links[0], [link.decode(\n 'windows-1251') for link in path_links[1].values()] if isinstance(\n path_links[1], dict) else path_links[1]), phpserialize.loads(\n urllib2.urlopen(urllib2.Request(\n 'http://dispenser-01.sape.ru/code.php?user={0}&host={1}'.format(\n config.sape_user_id, config.sape_host))).read()).items()))\n",
"step-5": "#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\nimport phpserialize\nimport urllib2\n\nfrom cache import cache\nfrom config import config\n\ndef block(request, limit=None):\n try:\n links = cache.get_cache(\"sape\", expire=3600).get(key=\"links\", createfunc=load_links)\n except:\n links = cache.get_cache(\"sape\", expire=300).get(key=\"links\", createfunc=lambda: {})\n\n if request.path in links:\n if not hasattr(request, \"sape_links_shown\"):\n request.sape_links_shown = 0\n\n slc = links[request.path][request.sape_links_shown : request.sape_links_shown + limit if limit is not None else None]\n request.sape_links_shown += len(slc)\n\n if slc:\n return {\n \"class\" : \"sape\",\n \"links\" : links[\"__sape_delimiter__\"].join(slc),\n }\n\n return None\n\ndef load_links():\n return dict(\n map(\n lambda path_links: (path_links[0], [link.decode(\"windows-1251\") for link in path_links[1].values()] if isinstance(path_links[1], dict) else path_links[1]),\n phpserialize.loads(\n urllib2.urlopen(urllib2.Request(\n \"http://dispenser-01.sape.ru/code.php?user={0}&host={1}\".format(config.sape_user_id, config.sape_host)\n )).read()\n ).items()\n )\n )\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
"""
Created on 02.09.2013
@author: Paul Schweizer
@email: paulschweizer@gmx.net
@brief: Holds all the namingconventions for pandora's box
"""
import os
import json
class NamingConvention():
"""Imports naming conventions from the respective .json file and puts them
into class variables.
"""
def __init__(self):
namingconventions = os.path.join(os.path.dirname(os.path.dirname(__file__)),
'data', 'strings', 'namingconvention.json')
namingconventions = json.load(open(namingconventions))
for key, value in namingconventions.items():
setattr(NamingConvention, key, value)
# end for constant in constants
# end def __init__
# end class NamingConvention
|
normal
|
{
"blob_id": "d2a153fffccd4b681eebce823e641e195197cde7",
"index": 54,
"step-1": "<mask token>\n\n\nclass NamingConvention:\n <mask token>\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass NamingConvention:\n <mask token>\n\n def __init__(self):\n namingconventions = os.path.join(os.path.dirname(os.path.dirname(\n __file__)), 'data', 'strings', 'namingconvention.json')\n namingconventions = json.load(open(namingconventions))\n for key, value in namingconventions.items():\n setattr(NamingConvention, key, value)\n",
"step-3": "<mask token>\n\n\nclass NamingConvention:\n \"\"\"Imports naming conventions from the respective .json file and puts them\n into class variables.\n \"\"\"\n\n def __init__(self):\n namingconventions = os.path.join(os.path.dirname(os.path.dirname(\n __file__)), 'data', 'strings', 'namingconvention.json')\n namingconventions = json.load(open(namingconventions))\n for key, value in namingconventions.items():\n setattr(NamingConvention, key, value)\n",
"step-4": "<mask token>\nimport os\nimport json\n\n\nclass NamingConvention:\n \"\"\"Imports naming conventions from the respective .json file and puts them\n into class variables.\n \"\"\"\n\n def __init__(self):\n namingconventions = os.path.join(os.path.dirname(os.path.dirname(\n __file__)), 'data', 'strings', 'namingconvention.json')\n namingconventions = json.load(open(namingconventions))\n for key, value in namingconventions.items():\n setattr(NamingConvention, key, value)\n",
"step-5": "\"\"\"\nCreated on 02.09.2013\n@author: Paul Schweizer\n@email: paulschweizer@gmx.net\n@brief: Holds all the namingconventions for pandora's box\n\"\"\"\n\nimport os\nimport json\n\n\nclass NamingConvention():\n \"\"\"Imports naming conventions from the respective .json file and puts them\n into class variables.\n \"\"\"\n def __init__(self):\n namingconventions = os.path.join(os.path.dirname(os.path.dirname(__file__)),\n 'data', 'strings', 'namingconvention.json')\n namingconventions = json.load(open(namingconventions))\n for key, value in namingconventions.items():\n setattr(NamingConvention, key, value)\n # end for constant in constants\n # end def __init__\n# end class NamingConvention\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
"""
Flask app for testing the OpenID Connect extension.
"""
import json
from unittest.mock import MagicMock, Mock
from flask import Flask, g
import flask_oidc
from tests.json_snippets import *
oidc = None
def index():
return "too many secrets", 200, {
'Content-Type': 'text/plain; charset=utf-8'
}
def get_at():
return oidc.get_access_token(), 200, {
'Content-Type': 'text/plain; charset=utf-8'
}
def get_rt():
return oidc.get_refresh_token(), 200, {
'Content-Type': 'text/plain; charset=utf-8'
}
def get_test1():
return "successful call to test1", 200, {
'Content-Type': 'text/plain; charset=utf-8'
}
def get_test2():
return "successful call to test2", 200, {
'Content-Type': 'text/plain; charset=utf-8'
}
def get_test3():
return "successful call to test3", 200, {
'Content-Type': 'text/plain; charset=utf-8'
}
def get_unprotected():
return "successful call to unprotected", 200, {
'Content-Type': 'text/plain; charset=utf-8'
}
def raw_api():
return {'token': g.oidc_token_info}
def api():
return json.dumps(raw_api())
def get_test4():
return "successful call to test4", 200, {
'Content-Type': 'text/plain; charset=utf-8'
}
callback_method = Mock()
def create_app(config, oidc_overrides=None):
global oidc
app = Flask(__name__)
app.config.update(config)
if oidc_overrides is None:
oidc_overrides = {}
app.oidc = flask_oidc.OpenIDConnect(app, **oidc_overrides)
oidc = app.oidc
app.route('/')(app.oidc.check(index))
app.route('/at')(app.oidc.check(get_at))
app.route('/rt')(app.oidc.check(get_rt))
# Check standalone usage
rendered = app.oidc.accept_token(True, ['openid'], auth_header_key='Authorization')(api)
app.route('/api', methods=['GET', 'POST'])(rendered)
configure_keycloak_test_uris(app)
# Check combination with an external API renderer like Flask-RESTful
unrendered = app.oidc.accept_token(True, ['openid'], render_errors=False, auth_header_key='Authorization')(raw_api)
def externally_rendered_api(*args, **kwds):
inner_response = unrendered(*args, **kwds)
if isinstance(inner_response, tuple):
raw_response, response_code, headers = inner_response
rendered_response = json.dumps(raw_response), response_code, headers
else:
rendered_response = json.dumps(inner_response)
return rendered_response
app.route('/external_api', methods=['GET', 'POST'])(externally_rendered_api)
return app
def configure_keycloak_test_uris(app):
test1 = app.oidc.check_authorization(True)(get_test1)
app.route('/test1', methods=['GET', 'POST'])(test1)
test2 = app.oidc.check_authorization(True)(get_test2)
app.route('/test2', methods=['GET', 'POST'])(test2)
test3 = app.oidc.check_authorization(True)(get_test3)
app.route('/test3', methods=['GET', 'POST'])(test3)
callback_method.return_value = True
test4 = app.oidc.check_authorization(True, validation_func=callback_method)(get_test4)
app.route('/test4', methods=['GET', 'POST'])(test4)
unprotected = app.oidc.check_authorization(False)(get_unprotected)
app.route('/unprotected', methods=['GET'])(unprotected)
def _configure_mock_object(test_app):
test_app.oidc.validate_token = Mock()
test_app.oidc.validate_token.return_value = True
test_app.oidc.keycloakApi = MagicMock(autospec=flask_oidc.KeycloakAPI)
test_app.oidc.keycloakApi.authorize = Mock()
test_app.oidc.keycloakApi.authorize.return_value = valid_rpt
test_app.oidc.keycloakApi.get_access_token = Mock()
test_app.oidc.keycloakApi.get_access_token.return_value = access_token
test_app.oidc.keycloakApi._get_realm_pub_key = Mock()
test_app.oidc.keycloakApi._get_realm_pub_key.return_value = "abc"
def configure_mock_object_version1(test_app):
_configure_mock_object(test_app)
test_app.oidc.keycloakApi.jwt_decode = Mock()
test_app.oidc.keycloakApi.jwt_decode.return_value = decoded_jwt_with_permission_test1_and_test2
test_app.oidc.keycloakApi.get_resource_info = Mock()
test_app.oidc.keycloakApi.get_resource_info.side_effect = [resource_test1, resource_test2]
def configure_mock_version2(test_app):
_configure_mock_object(test_app)
test_app.oidc.keycloakApi.jwt_decode.return_value = decoded_jwt_with_permission_test3
test_app.oidc.keycloakApi.get_resource_info = Mock()
test_app.oidc.keycloakApi.get_resource_info.side_effect = [resource_test3]
def configure_mock_version3(test_app):
_configure_mock_object(test_app)
test_app.oidc.keycloakApi.jwt_decode.return_value = None
test_app.oidc.keycloakApi.get_resource_info = Mock()
test_app.oidc.keycloakApi.get_resource_info.side_effect = [resource_test3]
|
normal
|
{
"blob_id": "ef3fa538828315845de5e2f7d4949f690e44276e",
"index": 6009,
"step-1": "<mask token>\n\n\ndef index():\n return 'too many secrets', 200, {'Content-Type':\n 'text/plain; charset=utf-8'}\n\n\ndef get_at():\n return oidc.get_access_token(), 200, {'Content-Type':\n 'text/plain; charset=utf-8'}\n\n\n<mask token>\n\n\ndef get_test1():\n return 'successful call to test1', 200, {'Content-Type':\n 'text/plain; charset=utf-8'}\n\n\ndef get_test2():\n return 'successful call to test2', 200, {'Content-Type':\n 'text/plain; charset=utf-8'}\n\n\ndef get_test3():\n return 'successful call to test3', 200, {'Content-Type':\n 'text/plain; charset=utf-8'}\n\n\ndef get_unprotected():\n return 'successful call to unprotected', 200, {'Content-Type':\n 'text/plain; charset=utf-8'}\n\n\ndef raw_api():\n return {'token': g.oidc_token_info}\n\n\n<mask token>\n\n\ndef create_app(config, oidc_overrides=None):\n global oidc\n app = Flask(__name__)\n app.config.update(config)\n if oidc_overrides is None:\n oidc_overrides = {}\n app.oidc = flask_oidc.OpenIDConnect(app, **oidc_overrides)\n oidc = app.oidc\n app.route('/')(app.oidc.check(index))\n app.route('/at')(app.oidc.check(get_at))\n app.route('/rt')(app.oidc.check(get_rt))\n rendered = app.oidc.accept_token(True, ['openid'], auth_header_key=\n 'Authorization')(api)\n app.route('/api', methods=['GET', 'POST'])(rendered)\n configure_keycloak_test_uris(app)\n unrendered = app.oidc.accept_token(True, ['openid'], render_errors=\n False, auth_header_key='Authorization')(raw_api)\n\n def externally_rendered_api(*args, **kwds):\n inner_response = unrendered(*args, **kwds)\n if isinstance(inner_response, tuple):\n raw_response, response_code, headers = inner_response\n rendered_response = json.dumps(raw_response\n ), response_code, headers\n else:\n rendered_response = json.dumps(inner_response)\n return rendered_response\n app.route('/external_api', methods=['GET', 'POST'])(externally_rendered_api\n )\n return app\n\n\n<mask token>\n\n\ndef configure_mock_object_version1(test_app):\n _configure_mock_object(test_app)\n test_app.oidc.keycloakApi.jwt_decode = Mock()\n test_app.oidc.keycloakApi.jwt_decode.return_value = (\n decoded_jwt_with_permission_test1_and_test2)\n test_app.oidc.keycloakApi.get_resource_info = Mock()\n test_app.oidc.keycloakApi.get_resource_info.side_effect = [resource_test1,\n resource_test2]\n\n\ndef configure_mock_version2(test_app):\n _configure_mock_object(test_app)\n test_app.oidc.keycloakApi.jwt_decode.return_value = (\n decoded_jwt_with_permission_test3)\n test_app.oidc.keycloakApi.get_resource_info = Mock()\n test_app.oidc.keycloakApi.get_resource_info.side_effect = [resource_test3]\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef index():\n return 'too many secrets', 200, {'Content-Type':\n 'text/plain; charset=utf-8'}\n\n\ndef get_at():\n return oidc.get_access_token(), 200, {'Content-Type':\n 'text/plain; charset=utf-8'}\n\n\n<mask token>\n\n\ndef get_test1():\n return 'successful call to test1', 200, {'Content-Type':\n 'text/plain; charset=utf-8'}\n\n\ndef get_test2():\n return 'successful call to test2', 200, {'Content-Type':\n 'text/plain; charset=utf-8'}\n\n\ndef get_test3():\n return 'successful call to test3', 200, {'Content-Type':\n 'text/plain; charset=utf-8'}\n\n\ndef get_unprotected():\n return 'successful call to unprotected', 200, {'Content-Type':\n 'text/plain; charset=utf-8'}\n\n\ndef raw_api():\n return {'token': g.oidc_token_info}\n\n\n<mask token>\n\n\ndef create_app(config, oidc_overrides=None):\n global oidc\n app = Flask(__name__)\n app.config.update(config)\n if oidc_overrides is None:\n oidc_overrides = {}\n app.oidc = flask_oidc.OpenIDConnect(app, **oidc_overrides)\n oidc = app.oidc\n app.route('/')(app.oidc.check(index))\n app.route('/at')(app.oidc.check(get_at))\n app.route('/rt')(app.oidc.check(get_rt))\n rendered = app.oidc.accept_token(True, ['openid'], auth_header_key=\n 'Authorization')(api)\n app.route('/api', methods=['GET', 'POST'])(rendered)\n configure_keycloak_test_uris(app)\n unrendered = app.oidc.accept_token(True, ['openid'], render_errors=\n False, auth_header_key='Authorization')(raw_api)\n\n def externally_rendered_api(*args, **kwds):\n inner_response = unrendered(*args, **kwds)\n if isinstance(inner_response, tuple):\n raw_response, response_code, headers = inner_response\n rendered_response = json.dumps(raw_response\n ), response_code, headers\n else:\n rendered_response = json.dumps(inner_response)\n return rendered_response\n app.route('/external_api', methods=['GET', 'POST'])(externally_rendered_api\n )\n return app\n\n\n<mask token>\n\n\ndef _configure_mock_object(test_app):\n test_app.oidc.validate_token = Mock()\n test_app.oidc.validate_token.return_value = True\n test_app.oidc.keycloakApi = MagicMock(autospec=flask_oidc.KeycloakAPI)\n test_app.oidc.keycloakApi.authorize = Mock()\n test_app.oidc.keycloakApi.authorize.return_value = valid_rpt\n test_app.oidc.keycloakApi.get_access_token = Mock()\n test_app.oidc.keycloakApi.get_access_token.return_value = access_token\n test_app.oidc.keycloakApi._get_realm_pub_key = Mock()\n test_app.oidc.keycloakApi._get_realm_pub_key.return_value = 'abc'\n\n\ndef configure_mock_object_version1(test_app):\n _configure_mock_object(test_app)\n test_app.oidc.keycloakApi.jwt_decode = Mock()\n test_app.oidc.keycloakApi.jwt_decode.return_value = (\n decoded_jwt_with_permission_test1_and_test2)\n test_app.oidc.keycloakApi.get_resource_info = Mock()\n test_app.oidc.keycloakApi.get_resource_info.side_effect = [resource_test1,\n resource_test2]\n\n\ndef configure_mock_version2(test_app):\n _configure_mock_object(test_app)\n test_app.oidc.keycloakApi.jwt_decode.return_value = (\n decoded_jwt_with_permission_test3)\n test_app.oidc.keycloakApi.get_resource_info = Mock()\n test_app.oidc.keycloakApi.get_resource_info.side_effect = [resource_test3]\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef index():\n return 'too many secrets', 200, {'Content-Type':\n 'text/plain; charset=utf-8'}\n\n\ndef get_at():\n return oidc.get_access_token(), 200, {'Content-Type':\n 'text/plain; charset=utf-8'}\n\n\n<mask token>\n\n\ndef get_test1():\n return 'successful call to test1', 200, {'Content-Type':\n 'text/plain; charset=utf-8'}\n\n\ndef get_test2():\n return 'successful call to test2', 200, {'Content-Type':\n 'text/plain; charset=utf-8'}\n\n\ndef get_test3():\n return 'successful call to test3', 200, {'Content-Type':\n 'text/plain; charset=utf-8'}\n\n\ndef get_unprotected():\n return 'successful call to unprotected', 200, {'Content-Type':\n 'text/plain; charset=utf-8'}\n\n\ndef raw_api():\n return {'token': g.oidc_token_info}\n\n\n<mask token>\n\n\ndef get_test4():\n return 'successful call to test4', 200, {'Content-Type':\n 'text/plain; charset=utf-8'}\n\n\n<mask token>\n\n\ndef create_app(config, oidc_overrides=None):\n global oidc\n app = Flask(__name__)\n app.config.update(config)\n if oidc_overrides is None:\n oidc_overrides = {}\n app.oidc = flask_oidc.OpenIDConnect(app, **oidc_overrides)\n oidc = app.oidc\n app.route('/')(app.oidc.check(index))\n app.route('/at')(app.oidc.check(get_at))\n app.route('/rt')(app.oidc.check(get_rt))\n rendered = app.oidc.accept_token(True, ['openid'], auth_header_key=\n 'Authorization')(api)\n app.route('/api', methods=['GET', 'POST'])(rendered)\n configure_keycloak_test_uris(app)\n unrendered = app.oidc.accept_token(True, ['openid'], render_errors=\n False, auth_header_key='Authorization')(raw_api)\n\n def externally_rendered_api(*args, **kwds):\n inner_response = unrendered(*args, **kwds)\n if isinstance(inner_response, tuple):\n raw_response, response_code, headers = inner_response\n rendered_response = json.dumps(raw_response\n ), response_code, headers\n else:\n rendered_response = json.dumps(inner_response)\n return rendered_response\n app.route('/external_api', methods=['GET', 'POST'])(externally_rendered_api\n )\n return app\n\n\ndef configure_keycloak_test_uris(app):\n test1 = app.oidc.check_authorization(True)(get_test1)\n app.route('/test1', methods=['GET', 'POST'])(test1)\n test2 = app.oidc.check_authorization(True)(get_test2)\n app.route('/test2', methods=['GET', 'POST'])(test2)\n test3 = app.oidc.check_authorization(True)(get_test3)\n app.route('/test3', methods=['GET', 'POST'])(test3)\n callback_method.return_value = True\n test4 = app.oidc.check_authorization(True, validation_func=callback_method\n )(get_test4)\n app.route('/test4', methods=['GET', 'POST'])(test4)\n unprotected = app.oidc.check_authorization(False)(get_unprotected)\n app.route('/unprotected', methods=['GET'])(unprotected)\n\n\ndef _configure_mock_object(test_app):\n test_app.oidc.validate_token = Mock()\n test_app.oidc.validate_token.return_value = True\n test_app.oidc.keycloakApi = MagicMock(autospec=flask_oidc.KeycloakAPI)\n test_app.oidc.keycloakApi.authorize = Mock()\n test_app.oidc.keycloakApi.authorize.return_value = valid_rpt\n test_app.oidc.keycloakApi.get_access_token = Mock()\n test_app.oidc.keycloakApi.get_access_token.return_value = access_token\n test_app.oidc.keycloakApi._get_realm_pub_key = Mock()\n test_app.oidc.keycloakApi._get_realm_pub_key.return_value = 'abc'\n\n\ndef configure_mock_object_version1(test_app):\n _configure_mock_object(test_app)\n test_app.oidc.keycloakApi.jwt_decode = Mock()\n test_app.oidc.keycloakApi.jwt_decode.return_value = (\n decoded_jwt_with_permission_test1_and_test2)\n test_app.oidc.keycloakApi.get_resource_info = Mock()\n test_app.oidc.keycloakApi.get_resource_info.side_effect = [resource_test1,\n resource_test2]\n\n\ndef configure_mock_version2(test_app):\n _configure_mock_object(test_app)\n test_app.oidc.keycloakApi.jwt_decode.return_value = (\n decoded_jwt_with_permission_test3)\n test_app.oidc.keycloakApi.get_resource_info = Mock()\n test_app.oidc.keycloakApi.get_resource_info.side_effect = [resource_test3]\n\n\n<mask token>\n",
"step-4": "<mask token>\n\n\ndef index():\n return 'too many secrets', 200, {'Content-Type':\n 'text/plain; charset=utf-8'}\n\n\ndef get_at():\n return oidc.get_access_token(), 200, {'Content-Type':\n 'text/plain; charset=utf-8'}\n\n\n<mask token>\n\n\ndef get_test1():\n return 'successful call to test1', 200, {'Content-Type':\n 'text/plain; charset=utf-8'}\n\n\ndef get_test2():\n return 'successful call to test2', 200, {'Content-Type':\n 'text/plain; charset=utf-8'}\n\n\ndef get_test3():\n return 'successful call to test3', 200, {'Content-Type':\n 'text/plain; charset=utf-8'}\n\n\ndef get_unprotected():\n return 'successful call to unprotected', 200, {'Content-Type':\n 'text/plain; charset=utf-8'}\n\n\ndef raw_api():\n return {'token': g.oidc_token_info}\n\n\ndef api():\n return json.dumps(raw_api())\n\n\ndef get_test4():\n return 'successful call to test4', 200, {'Content-Type':\n 'text/plain; charset=utf-8'}\n\n\n<mask token>\n\n\ndef create_app(config, oidc_overrides=None):\n global oidc\n app = Flask(__name__)\n app.config.update(config)\n if oidc_overrides is None:\n oidc_overrides = {}\n app.oidc = flask_oidc.OpenIDConnect(app, **oidc_overrides)\n oidc = app.oidc\n app.route('/')(app.oidc.check(index))\n app.route('/at')(app.oidc.check(get_at))\n app.route('/rt')(app.oidc.check(get_rt))\n rendered = app.oidc.accept_token(True, ['openid'], auth_header_key=\n 'Authorization')(api)\n app.route('/api', methods=['GET', 'POST'])(rendered)\n configure_keycloak_test_uris(app)\n unrendered = app.oidc.accept_token(True, ['openid'], render_errors=\n False, auth_header_key='Authorization')(raw_api)\n\n def externally_rendered_api(*args, **kwds):\n inner_response = unrendered(*args, **kwds)\n if isinstance(inner_response, tuple):\n raw_response, response_code, headers = inner_response\n rendered_response = json.dumps(raw_response\n ), response_code, headers\n else:\n rendered_response = json.dumps(inner_response)\n return rendered_response\n app.route('/external_api', methods=['GET', 'POST'])(externally_rendered_api\n )\n return app\n\n\ndef configure_keycloak_test_uris(app):\n test1 = app.oidc.check_authorization(True)(get_test1)\n app.route('/test1', methods=['GET', 'POST'])(test1)\n test2 = app.oidc.check_authorization(True)(get_test2)\n app.route('/test2', methods=['GET', 'POST'])(test2)\n test3 = app.oidc.check_authorization(True)(get_test3)\n app.route('/test3', methods=['GET', 'POST'])(test3)\n callback_method.return_value = True\n test4 = app.oidc.check_authorization(True, validation_func=callback_method\n )(get_test4)\n app.route('/test4', methods=['GET', 'POST'])(test4)\n unprotected = app.oidc.check_authorization(False)(get_unprotected)\n app.route('/unprotected', methods=['GET'])(unprotected)\n\n\ndef _configure_mock_object(test_app):\n test_app.oidc.validate_token = Mock()\n test_app.oidc.validate_token.return_value = True\n test_app.oidc.keycloakApi = MagicMock(autospec=flask_oidc.KeycloakAPI)\n test_app.oidc.keycloakApi.authorize = Mock()\n test_app.oidc.keycloakApi.authorize.return_value = valid_rpt\n test_app.oidc.keycloakApi.get_access_token = Mock()\n test_app.oidc.keycloakApi.get_access_token.return_value = access_token\n test_app.oidc.keycloakApi._get_realm_pub_key = Mock()\n test_app.oidc.keycloakApi._get_realm_pub_key.return_value = 'abc'\n\n\ndef configure_mock_object_version1(test_app):\n _configure_mock_object(test_app)\n test_app.oidc.keycloakApi.jwt_decode = Mock()\n test_app.oidc.keycloakApi.jwt_decode.return_value = (\n decoded_jwt_with_permission_test1_and_test2)\n test_app.oidc.keycloakApi.get_resource_info = Mock()\n test_app.oidc.keycloakApi.get_resource_info.side_effect = [resource_test1,\n resource_test2]\n\n\ndef configure_mock_version2(test_app):\n _configure_mock_object(test_app)\n test_app.oidc.keycloakApi.jwt_decode.return_value = (\n decoded_jwt_with_permission_test3)\n test_app.oidc.keycloakApi.get_resource_info = Mock()\n test_app.oidc.keycloakApi.get_resource_info.side_effect = [resource_test3]\n\n\ndef configure_mock_version3(test_app):\n _configure_mock_object(test_app)\n test_app.oidc.keycloakApi.jwt_decode.return_value = None\n test_app.oidc.keycloakApi.get_resource_info = Mock()\n test_app.oidc.keycloakApi.get_resource_info.side_effect = [resource_test3]\n",
"step-5": "\"\"\"\nFlask app for testing the OpenID Connect extension.\n\"\"\"\n\nimport json\nfrom unittest.mock import MagicMock, Mock\n\nfrom flask import Flask, g\nimport flask_oidc\nfrom tests.json_snippets import *\n\noidc = None\n\n\ndef index():\n return \"too many secrets\", 200, {\n 'Content-Type': 'text/plain; charset=utf-8'\n }\n\n\ndef get_at():\n return oidc.get_access_token(), 200, {\n 'Content-Type': 'text/plain; charset=utf-8'\n }\n\n\ndef get_rt():\n return oidc.get_refresh_token(), 200, {\n 'Content-Type': 'text/plain; charset=utf-8'\n }\n\n\ndef get_test1():\n return \"successful call to test1\", 200, {\n 'Content-Type': 'text/plain; charset=utf-8'\n }\n\n\ndef get_test2():\n return \"successful call to test2\", 200, {\n 'Content-Type': 'text/plain; charset=utf-8'\n }\n\n\ndef get_test3():\n return \"successful call to test3\", 200, {\n 'Content-Type': 'text/plain; charset=utf-8'\n }\n\n\ndef get_unprotected():\n return \"successful call to unprotected\", 200, {\n 'Content-Type': 'text/plain; charset=utf-8'\n }\n\n\ndef raw_api():\n return {'token': g.oidc_token_info}\n\n\ndef api():\n return json.dumps(raw_api())\n\n\ndef get_test4():\n return \"successful call to test4\", 200, {\n 'Content-Type': 'text/plain; charset=utf-8'\n }\n\n\ncallback_method = Mock()\n\n\ndef create_app(config, oidc_overrides=None):\n global oidc\n\n app = Flask(__name__)\n app.config.update(config)\n if oidc_overrides is None:\n oidc_overrides = {}\n app.oidc = flask_oidc.OpenIDConnect(app, **oidc_overrides)\n oidc = app.oidc\n\n app.route('/')(app.oidc.check(index))\n app.route('/at')(app.oidc.check(get_at))\n app.route('/rt')(app.oidc.check(get_rt))\n # Check standalone usage\n rendered = app.oidc.accept_token(True, ['openid'], auth_header_key='Authorization')(api)\n app.route('/api', methods=['GET', 'POST'])(rendered)\n\n configure_keycloak_test_uris(app)\n\n # Check combination with an external API renderer like Flask-RESTful\n unrendered = app.oidc.accept_token(True, ['openid'], render_errors=False, auth_header_key='Authorization')(raw_api)\n\n def externally_rendered_api(*args, **kwds):\n inner_response = unrendered(*args, **kwds)\n if isinstance(inner_response, tuple):\n raw_response, response_code, headers = inner_response\n rendered_response = json.dumps(raw_response), response_code, headers\n else:\n rendered_response = json.dumps(inner_response)\n return rendered_response\n\n app.route('/external_api', methods=['GET', 'POST'])(externally_rendered_api)\n return app\n\n\ndef configure_keycloak_test_uris(app):\n test1 = app.oidc.check_authorization(True)(get_test1)\n app.route('/test1', methods=['GET', 'POST'])(test1)\n test2 = app.oidc.check_authorization(True)(get_test2)\n app.route('/test2', methods=['GET', 'POST'])(test2)\n test3 = app.oidc.check_authorization(True)(get_test3)\n app.route('/test3', methods=['GET', 'POST'])(test3)\n\n callback_method.return_value = True\n\n test4 = app.oidc.check_authorization(True, validation_func=callback_method)(get_test4)\n app.route('/test4', methods=['GET', 'POST'])(test4)\n\n unprotected = app.oidc.check_authorization(False)(get_unprotected)\n app.route('/unprotected', methods=['GET'])(unprotected)\n\n\ndef _configure_mock_object(test_app):\n test_app.oidc.validate_token = Mock()\n test_app.oidc.validate_token.return_value = True\n test_app.oidc.keycloakApi = MagicMock(autospec=flask_oidc.KeycloakAPI)\n test_app.oidc.keycloakApi.authorize = Mock()\n test_app.oidc.keycloakApi.authorize.return_value = valid_rpt\n test_app.oidc.keycloakApi.get_access_token = Mock()\n test_app.oidc.keycloakApi.get_access_token.return_value = access_token\n test_app.oidc.keycloakApi._get_realm_pub_key = Mock()\n test_app.oidc.keycloakApi._get_realm_pub_key.return_value = \"abc\"\n\n\ndef configure_mock_object_version1(test_app):\n _configure_mock_object(test_app)\n\n test_app.oidc.keycloakApi.jwt_decode = Mock()\n test_app.oidc.keycloakApi.jwt_decode.return_value = decoded_jwt_with_permission_test1_and_test2\n test_app.oidc.keycloakApi.get_resource_info = Mock()\n test_app.oidc.keycloakApi.get_resource_info.side_effect = [resource_test1, resource_test2]\n\n\ndef configure_mock_version2(test_app):\n _configure_mock_object(test_app)\n test_app.oidc.keycloakApi.jwt_decode.return_value = decoded_jwt_with_permission_test3\n test_app.oidc.keycloakApi.get_resource_info = Mock()\n test_app.oidc.keycloakApi.get_resource_info.side_effect = [resource_test3]\n\n\ndef configure_mock_version3(test_app):\n _configure_mock_object(test_app)\n test_app.oidc.keycloakApi.jwt_decode.return_value = None\n test_app.oidc.keycloakApi.get_resource_info = Mock()\n test_app.oidc.keycloakApi.get_resource_info.side_effect = [resource_test3]\n",
"step-ids": [
10,
11,
13,
15,
19
]
}
|
[
10,
11,
13,
15,
19
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
@dataclass
class Music(object):
url: str
title: Optional[str] = None
<|reserved_special_token_1|>
from dataclasses import dataclass
from typing import Optional
@dataclass
class Music(object):
url: str
title: Optional[str] = None
|
flexible
|
{
"blob_id": "2506c5b042f04d1490ba2199a71e38829d4a0adc",
"index": 5738,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\n@dataclass\nclass Music(object):\n url: str\n title: Optional[str] = None\n",
"step-3": "from dataclasses import dataclass\nfrom typing import Optional\n\n\n@dataclass\nclass Music(object):\n url: str\n title: Optional[str] = None\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
# Generated by Django 3.1.4 on 2021-01-11 16:06
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('tutorials', '0003_auto_20210111_1705'),
]
operations = [
migrations.AlterField(
model_name='tutorial',
name='upload',
field=models.ImageField(upload_to='images'),
),
]
|
normal
|
{
"blob_id": "ac664cd7d62f89399e37f74e0234b3ad244fe460",
"index": 6158,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Migration(migrations.Migration):\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Migration(migrations.Migration):\n dependencies = [('tutorials', '0003_auto_20210111_1705')]\n operations = [migrations.AlterField(model_name='tutorial', name=\n 'upload', field=models.ImageField(upload_to='images'))]\n",
"step-4": "from django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n dependencies = [('tutorials', '0003_auto_20210111_1705')]\n operations = [migrations.AlterField(model_name='tutorial', name=\n 'upload', field=models.ImageField(upload_to='images'))]\n",
"step-5": "# Generated by Django 3.1.4 on 2021-01-11 16:06\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('tutorials', '0003_auto_20210111_1705'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='tutorial',\n name='upload',\n field=models.ImageField(upload_to='images'),\n ),\n ]\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
# Generated by Django 3.0.5 on 2020-04-25 12:29
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('api', '0006_order_date'),
]
operations = [
migrations.RemoveField(
model_name='order',
name='product',
),
migrations.AddField(
model_name='order',
name='product',
field=models.ManyToManyField(to='api.Product'),
),
migrations.AlterField(
model_name='order',
name='status',
field=models.TextField(default='неплачено', max_length=50),
),
]
|
normal
|
{
"blob_id": "4cc138016cb1f82e12c76c185be19188d3e38bf9",
"index": 2186,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Migration(migrations.Migration):\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Migration(migrations.Migration):\n dependencies = [('api', '0006_order_date')]\n operations = [migrations.RemoveField(model_name='order', name='product'\n ), migrations.AddField(model_name='order', name='product', field=\n models.ManyToManyField(to='api.Product')), migrations.AlterField(\n model_name='order', name='status', field=models.TextField(default=\n 'неплачено', max_length=50))]\n",
"step-4": "from django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n dependencies = [('api', '0006_order_date')]\n operations = [migrations.RemoveField(model_name='order', name='product'\n ), migrations.AddField(model_name='order', name='product', field=\n models.ManyToManyField(to='api.Product')), migrations.AlterField(\n model_name='order', name='status', field=models.TextField(default=\n 'неплачено', max_length=50))]\n",
"step-5": "# Generated by Django 3.0.5 on 2020-04-25 12:29\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('api', '0006_order_date'),\n ]\n\n operations = [\n migrations.RemoveField(\n model_name='order',\n name='product',\n ),\n migrations.AddField(\n model_name='order',\n name='product',\n field=models.ManyToManyField(to='api.Product'),\n ),\n migrations.AlterField(\n model_name='order',\n name='status',\n field=models.TextField(default='неплачено', max_length=50),\n ),\n ]\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
from django.urls import path
from . import views
app_name = 'orders'
urlpatterns = [
path('checkout' , views.order_checkout_view , name='orders-checkout') ,
]
|
normal
|
{
"blob_id": "031f668fbf75b54ec874a59f53c60ceca53779cf",
"index": 8942,
"step-1": "<mask token>\n",
"step-2": "<mask token>\napp_name = 'orders'\nurlpatterns = [path('checkout', views.order_checkout_view, name=\n 'orders-checkout')]\n",
"step-3": "from django.urls import path\nfrom . import views\napp_name = 'orders'\nurlpatterns = [path('checkout', views.order_checkout_view, name=\n 'orders-checkout')]\n",
"step-4": "from django.urls import path\n\nfrom . import views\n\napp_name = 'orders'\nurlpatterns = [\n path('checkout' , views.order_checkout_view , name='orders-checkout') ,\n]\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
# -*- coding:utf-8 -*-
import datetime
import json
import os
import urllib
import requests
from selenium.webdriver.common.by import By
from selenium.webdriver.support.wait import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
import properties
from time import sleep
from appium import webdriver
def logPrint(logstr):
pyfileName = str(__file__).split(".py")[0].split("/")[-1]
filepath = ".\\log\\" + pyfileName + '-runlog.log'
now = str(datetime.datetime.now())
logstr = now + ' ' + logstr
with open(filepath, 'a', encoding='utf-8') as f:
print(logstr)
f.write(logstr + '\t\n')
def isElementExist(driver, xpath):
try:
driver.find_element_by_xpath(xpath)
return True
except:
return False
def find_toast(driver, contains_message):
'''判断toast信息'''
locat = ("xpath", '//*[contains(@text,"' + contains_message + '")]')
try:
element = WebDriverWait(driver, 2).until(EC.presence_of_element_located(locat))
return True
except:
return False
def restart_app(driver):
optsRestartAPP = {'command': 'am broadcast -a',
'args': ['com.inhand.intent.INBOXCORE_RESTART_APP']}
driver.execute_script("mobile: shell", optsRestartAPP)
def wifi_disable(driver):
opts = {'command': 'su 0',
'args': ['svc wifi disable']}
driver.execute_script("mobile: shell", opts)
def wifi_enable(driver):
opts = {'command': 'su 0',
'args': ['svc wifi enable']}
driver.execute_script("mobile: shell", opts)
if __name__ == '__main__':
try:
logpath = os.getcwd() + "\\log"
# print(logpath)
os.mkdir(logpath)
except:
pass
pyfileName = str(__file__).split(".py")[0].split("/")[-1]
logfilepath = ".\\log\\" + pyfileName + '-runlog.log'
try:
os.remove(logfilepath)
except:
pass
host = 'http://182.150.21.232:10081'
requesturl = "/oauth2/access_token"
headers = {
"Content-Type": "application/x-www-form-urlencoded",
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.87 Safari/537.36'
}
get_token_value = {
"client_id": "000017953450251798098136",
"client_secret": "08E9EC6793345759456CB8BAE52615F3",
"grant_type": "password",
"username": "chenzhiz@inhand.com.cn",
"password": "czz123456",
"password_type": "1",
"language": "2"
}
data = urllib.parse.urlencode(get_token_value).encode('utf-8')
url = host + requesturl
request = urllib.request.Request(url, data, headers)
token_response = urllib.request.urlopen(request).read().decode('utf-8')
logPrint(token_response)
access_token = json.loads(token_response)['access_token']
requesturl = "/api/goods/list?cursor=0&limit=30&name=&access_token=" + access_token
url = host + requesturl
response = requests.get(url=url, headers={'Content-Type': 'application/json'})
goods_count = json.loads(response.text)['total']
print(goods_count)
driver = webdriver.Remote('http://127.0.0.1:4723/wd/hub', properties.desired_caps)
sleep(0.5)
wifi_enable(driver)
sleep(0.5)
opts1 = {'command': 'rm -rf',
'args': ['/sdcard/inbox/data/picture']}
redata = driver.execute_script("mobile: shell", opts1)
driver.find_element_by_xpath("//android.widget.TextView[@text='货道配置']").click()
driver.find_element_by_xpath("//android.widget.TextView[@text='同步商品(从平台)']").click()
driver.find_element_by_xpath("//android.widget.Button[@text='确定']").click()
try:
xpath = "//android.widget.TextView[contains(@text,'总商品数 " + str(goods_count) + "')]"
logPrint(xpath)
WebDriverWait(driver, 2, 0.5).until(lambda x: x.find_element_by_xpath(xpath))
progressFlag = True
except Exception as e:
print(e)
progressFlag = False
if progressFlag:
logPrint("同步过程:PASS")
else:
logPrint("同步过程:FAIL!!")
loadmasklocator = ("xpath", "//android.widget.ProgressBar")
try:
WebDriverWait(driver, 180).until_not(EC.presence_of_element_located(loadmasklocator))
completeFlag = True
except Exception as e:
completeFlag = False
if completeFlag:
logPrint("同步结果出现:PASS")
else:
logPrint("同步结果出现:FAIL!!")
if isElementExist(driver, "//android.widget.TextView[contains(@text,'操作成功')]"):
logPrint("同步成功:PASS")
else:
logPrint("同步成功:FAIL!!")
driver.find_element_by_xpath("//android.widget.Button[@text='确定']").click()
sleep(20)
driver = webdriver.Remote('http://127.0.0.1:4723/wd/hub', properties.desired_caps)
driver.find_element_by_xpath("//android.widget.TextView[@text='货道配置']").click()
driver.find_element_by_xpath("//android.widget.TextView[@text='同步商品(从平台)']").click()
driver.find_element_by_xpath("//android.widget.Button[@text='确定']").click()
try:
WebDriverWait(driver, 180).until_not(EC.presence_of_element_located(loadmasklocator))
completeFlag = True
except Exception as e:
completeFlag = False
if completeFlag:
logPrint("同步结果出现:PASS")
else:
logPrint("同步结果出现:FAIL!!")
if isElementExist(driver, "//android.widget.TextView[contains(@text,'已经是最新配置')]"):
logPrint("已经是最新配置:PASS")
else:
logPrint("已经是最新配置:FAIL!!")
driver.find_element_by_xpath("//android.widget.Button[@text='确定']").click()
wifi_disable(driver)
driver.find_element_by_xpath("//android.widget.TextView[@text='同步商品(从平台)']").click()
driver.find_element_by_xpath("//android.widget.Button[@text='确定']").click()
okdialoglocator = ("xpath", "//android.widget.TextView[contains(@text,'操作失败')]")
try:
WebDriverWait(driver, 3).until(EC.presence_of_element_located(okdialoglocator))
failFlag = True
except Exception as e:
failFlag = False
if failFlag:
logPrint("断网同步,操作失败:PASS")
else:
logPrint("断网同步,操作失败:FAIL!!")
wifi_enable(driver)
driver.find_element_by_xpath("//android.widget.Button[@text='确定']").click()
opts1 = {'command': 'rm -rf',
'args': ['/sdcard/inbox/data/picture']}
redata = driver.execute_script("mobile: shell", opts1)
sleep(10)
driver.find_element_by_xpath("//android.widget.TextView[@text='同步商品(从平台)']").click()
driver.find_element_by_xpath("//android.widget.Button[@text='确定']").click()
sleep(5)
wifi_disable(driver)
loadmasklocator = ("xpath", "//android.widget.ProgressBar")
try:
WebDriverWait(driver, 180).until_not(EC.presence_of_element_located(loadmasklocator))
completeFlag = True
except Exception as e:
completeFlag = False
if completeFlag:
logPrint("同步结果出现:PASS")
else:
logPrint("同步结果出现:FAIL!!")
if isElementExist(driver, "//android.widget.TextView[contains(@text,'操作成功')]"):
logPrint("断网结束同步:PASS")
else:
logPrint("断网结束同步:FAIL!!")
driver.find_element_by_xpath("//android.widget.Button[@text='确定']").click()
sleep(12)
driver = webdriver.Remote('http://127.0.0.1:4723/wd/hub', properties.desired_caps)
driver.find_element_by_xpath("//android.widget.TextView[@text='货道配置']").click()
driver.find_element_by_xpath("//android.widget.TextView[@text='同步商品(从平台)']").click()
noNetFlag = find_toast(driver, "平台")
if noNetFlag:
logPrint("未与平台建立连接:PASS")
else:
logPrint("未与平台建立连接:FAIL!!")
wifi_enable(driver)
|
normal
|
{
"blob_id": "2465a73d958d88dcd27cfac75a4e7b1fcd6a884e",
"index": 3389,
"step-1": "<mask token>\n\n\ndef logPrint(logstr):\n pyfileName = str(__file__).split('.py')[0].split('/')[-1]\n filepath = '.\\\\log\\\\' + pyfileName + '-runlog.log'\n now = str(datetime.datetime.now())\n logstr = now + ' ' + logstr\n with open(filepath, 'a', encoding='utf-8') as f:\n print(logstr)\n f.write(logstr + '\\t\\n')\n\n\n<mask token>\n\n\ndef find_toast(driver, contains_message):\n \"\"\"判断toast信息\"\"\"\n locat = 'xpath', '//*[contains(@text,\"' + contains_message + '\")]'\n try:\n element = WebDriverWait(driver, 2).until(EC.\n presence_of_element_located(locat))\n return True\n except:\n return False\n\n\n<mask token>\n\n\ndef wifi_disable(driver):\n opts = {'command': 'su 0', 'args': ['svc wifi disable']}\n driver.execute_script('mobile: shell', opts)\n\n\ndef wifi_enable(driver):\n opts = {'command': 'su 0', 'args': ['svc wifi enable']}\n driver.execute_script('mobile: shell', opts)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef logPrint(logstr):\n pyfileName = str(__file__).split('.py')[0].split('/')[-1]\n filepath = '.\\\\log\\\\' + pyfileName + '-runlog.log'\n now = str(datetime.datetime.now())\n logstr = now + ' ' + logstr\n with open(filepath, 'a', encoding='utf-8') as f:\n print(logstr)\n f.write(logstr + '\\t\\n')\n\n\n<mask token>\n\n\ndef find_toast(driver, contains_message):\n \"\"\"判断toast信息\"\"\"\n locat = 'xpath', '//*[contains(@text,\"' + contains_message + '\")]'\n try:\n element = WebDriverWait(driver, 2).until(EC.\n presence_of_element_located(locat))\n return True\n except:\n return False\n\n\ndef restart_app(driver):\n optsRestartAPP = {'command': 'am broadcast -a', 'args': [\n 'com.inhand.intent.INBOXCORE_RESTART_APP']}\n driver.execute_script('mobile: shell', optsRestartAPP)\n\n\ndef wifi_disable(driver):\n opts = {'command': 'su 0', 'args': ['svc wifi disable']}\n driver.execute_script('mobile: shell', opts)\n\n\ndef wifi_enable(driver):\n opts = {'command': 'su 0', 'args': ['svc wifi enable']}\n driver.execute_script('mobile: shell', opts)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef logPrint(logstr):\n pyfileName = str(__file__).split('.py')[0].split('/')[-1]\n filepath = '.\\\\log\\\\' + pyfileName + '-runlog.log'\n now = str(datetime.datetime.now())\n logstr = now + ' ' + logstr\n with open(filepath, 'a', encoding='utf-8') as f:\n print(logstr)\n f.write(logstr + '\\t\\n')\n\n\ndef isElementExist(driver, xpath):\n try:\n driver.find_element_by_xpath(xpath)\n return True\n except:\n return False\n\n\ndef find_toast(driver, contains_message):\n \"\"\"判断toast信息\"\"\"\n locat = 'xpath', '//*[contains(@text,\"' + contains_message + '\")]'\n try:\n element = WebDriverWait(driver, 2).until(EC.\n presence_of_element_located(locat))\n return True\n except:\n return False\n\n\ndef restart_app(driver):\n optsRestartAPP = {'command': 'am broadcast -a', 'args': [\n 'com.inhand.intent.INBOXCORE_RESTART_APP']}\n driver.execute_script('mobile: shell', optsRestartAPP)\n\n\ndef wifi_disable(driver):\n opts = {'command': 'su 0', 'args': ['svc wifi disable']}\n driver.execute_script('mobile: shell', opts)\n\n\ndef wifi_enable(driver):\n opts = {'command': 'su 0', 'args': ['svc wifi enable']}\n driver.execute_script('mobile: shell', opts)\n\n\nif __name__ == '__main__':\n try:\n logpath = os.getcwd() + '\\\\log'\n os.mkdir(logpath)\n except:\n pass\n pyfileName = str(__file__).split('.py')[0].split('/')[-1]\n logfilepath = '.\\\\log\\\\' + pyfileName + '-runlog.log'\n try:\n os.remove(logfilepath)\n except:\n pass\n host = 'http://182.150.21.232:10081'\n requesturl = '/oauth2/access_token'\n headers = {'Content-Type': 'application/x-www-form-urlencoded',\n 'User-Agent':\n 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.87 Safari/537.36'\n }\n get_token_value = {'client_id': '000017953450251798098136',\n 'client_secret': '08E9EC6793345759456CB8BAE52615F3',\n 'grant_type': 'password', 'username': 'chenzhiz@inhand.com.cn',\n 'password': 'czz123456', 'password_type': '1', 'language': '2'}\n data = urllib.parse.urlencode(get_token_value).encode('utf-8')\n url = host + requesturl\n request = urllib.request.Request(url, data, headers)\n token_response = urllib.request.urlopen(request).read().decode('utf-8')\n logPrint(token_response)\n access_token = json.loads(token_response)['access_token']\n requesturl = ('/api/goods/list?cursor=0&limit=30&name=&access_token=' +\n access_token)\n url = host + requesturl\n response = requests.get(url=url, headers={'Content-Type':\n 'application/json'})\n goods_count = json.loads(response.text)['total']\n print(goods_count)\n driver = webdriver.Remote('http://127.0.0.1:4723/wd/hub', properties.\n desired_caps)\n sleep(0.5)\n wifi_enable(driver)\n sleep(0.5)\n opts1 = {'command': 'rm -rf', 'args': ['/sdcard/inbox/data/picture']}\n redata = driver.execute_script('mobile: shell', opts1)\n driver.find_element_by_xpath(\"//android.widget.TextView[@text='货道配置']\"\n ).click()\n driver.find_element_by_xpath(\"//android.widget.TextView[@text='同步商品(从平台)']\"\n ).click()\n driver.find_element_by_xpath(\"//android.widget.Button[@text='确定']\").click()\n try:\n xpath = \"//android.widget.TextView[contains(@text,'总商品数 \" + str(\n goods_count) + \"')]\"\n logPrint(xpath)\n WebDriverWait(driver, 2, 0.5).until(lambda x: x.\n find_element_by_xpath(xpath))\n progressFlag = True\n except Exception as e:\n print(e)\n progressFlag = False\n if progressFlag:\n logPrint('同步过程:PASS')\n else:\n logPrint('同步过程:FAIL!!')\n loadmasklocator = 'xpath', '//android.widget.ProgressBar'\n try:\n WebDriverWait(driver, 180).until_not(EC.presence_of_element_located\n (loadmasklocator))\n completeFlag = True\n except Exception as e:\n completeFlag = False\n if completeFlag:\n logPrint('同步结果出现:PASS')\n else:\n logPrint('同步结果出现:FAIL!!')\n if isElementExist(driver,\n \"//android.widget.TextView[contains(@text,'操作成功')]\"):\n logPrint('同步成功:PASS')\n else:\n logPrint('同步成功:FAIL!!')\n driver.find_element_by_xpath(\"//android.widget.Button[@text='确定']\").click()\n sleep(20)\n driver = webdriver.Remote('http://127.0.0.1:4723/wd/hub', properties.\n desired_caps)\n driver.find_element_by_xpath(\"//android.widget.TextView[@text='货道配置']\"\n ).click()\n driver.find_element_by_xpath(\"//android.widget.TextView[@text='同步商品(从平台)']\"\n ).click()\n driver.find_element_by_xpath(\"//android.widget.Button[@text='确定']\").click()\n try:\n WebDriverWait(driver, 180).until_not(EC.presence_of_element_located\n (loadmasklocator))\n completeFlag = True\n except Exception as e:\n completeFlag = False\n if completeFlag:\n logPrint('同步结果出现:PASS')\n else:\n logPrint('同步结果出现:FAIL!!')\n if isElementExist(driver,\n \"//android.widget.TextView[contains(@text,'已经是最新配置')]\"):\n logPrint('已经是最新配置:PASS')\n else:\n logPrint('已经是最新配置:FAIL!!')\n driver.find_element_by_xpath(\"//android.widget.Button[@text='确定']\").click()\n wifi_disable(driver)\n driver.find_element_by_xpath(\"//android.widget.TextView[@text='同步商品(从平台)']\"\n ).click()\n driver.find_element_by_xpath(\"//android.widget.Button[@text='确定']\").click()\n okdialoglocator = ('xpath',\n \"//android.widget.TextView[contains(@text,'操作失败')]\")\n try:\n WebDriverWait(driver, 3).until(EC.presence_of_element_located(\n okdialoglocator))\n failFlag = True\n except Exception as e:\n failFlag = False\n if failFlag:\n logPrint('断网同步,操作失败:PASS')\n else:\n logPrint('断网同步,操作失败:FAIL!!')\n wifi_enable(driver)\n driver.find_element_by_xpath(\"//android.widget.Button[@text='确定']\").click()\n opts1 = {'command': 'rm -rf', 'args': ['/sdcard/inbox/data/picture']}\n redata = driver.execute_script('mobile: shell', opts1)\n sleep(10)\n driver.find_element_by_xpath(\"//android.widget.TextView[@text='同步商品(从平台)']\"\n ).click()\n driver.find_element_by_xpath(\"//android.widget.Button[@text='确定']\").click()\n sleep(5)\n wifi_disable(driver)\n loadmasklocator = 'xpath', '//android.widget.ProgressBar'\n try:\n WebDriverWait(driver, 180).until_not(EC.presence_of_element_located\n (loadmasklocator))\n completeFlag = True\n except Exception as e:\n completeFlag = False\n if completeFlag:\n logPrint('同步结果出现:PASS')\n else:\n logPrint('同步结果出现:FAIL!!')\n if isElementExist(driver,\n \"//android.widget.TextView[contains(@text,'操作成功')]\"):\n logPrint('断网结束同步:PASS')\n else:\n logPrint('断网结束同步:FAIL!!')\n driver.find_element_by_xpath(\"//android.widget.Button[@text='确定']\").click()\n sleep(12)\n driver = webdriver.Remote('http://127.0.0.1:4723/wd/hub', properties.\n desired_caps)\n driver.find_element_by_xpath(\"//android.widget.TextView[@text='货道配置']\"\n ).click()\n driver.find_element_by_xpath(\"//android.widget.TextView[@text='同步商品(从平台)']\"\n ).click()\n noNetFlag = find_toast(driver, '平台')\n if noNetFlag:\n logPrint('未与平台建立连接:PASS')\n else:\n logPrint('未与平台建立连接:FAIL!!')\n wifi_enable(driver)\n",
"step-4": "import datetime\nimport json\nimport os\nimport urllib\nimport requests\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.support.wait import WebDriverWait\nfrom selenium.webdriver.support import expected_conditions as EC\nimport properties\nfrom time import sleep\nfrom appium import webdriver\n\n\ndef logPrint(logstr):\n pyfileName = str(__file__).split('.py')[0].split('/')[-1]\n filepath = '.\\\\log\\\\' + pyfileName + '-runlog.log'\n now = str(datetime.datetime.now())\n logstr = now + ' ' + logstr\n with open(filepath, 'a', encoding='utf-8') as f:\n print(logstr)\n f.write(logstr + '\\t\\n')\n\n\ndef isElementExist(driver, xpath):\n try:\n driver.find_element_by_xpath(xpath)\n return True\n except:\n return False\n\n\ndef find_toast(driver, contains_message):\n \"\"\"判断toast信息\"\"\"\n locat = 'xpath', '//*[contains(@text,\"' + contains_message + '\")]'\n try:\n element = WebDriverWait(driver, 2).until(EC.\n presence_of_element_located(locat))\n return True\n except:\n return False\n\n\ndef restart_app(driver):\n optsRestartAPP = {'command': 'am broadcast -a', 'args': [\n 'com.inhand.intent.INBOXCORE_RESTART_APP']}\n driver.execute_script('mobile: shell', optsRestartAPP)\n\n\ndef wifi_disable(driver):\n opts = {'command': 'su 0', 'args': ['svc wifi disable']}\n driver.execute_script('mobile: shell', opts)\n\n\ndef wifi_enable(driver):\n opts = {'command': 'su 0', 'args': ['svc wifi enable']}\n driver.execute_script('mobile: shell', opts)\n\n\nif __name__ == '__main__':\n try:\n logpath = os.getcwd() + '\\\\log'\n os.mkdir(logpath)\n except:\n pass\n pyfileName = str(__file__).split('.py')[0].split('/')[-1]\n logfilepath = '.\\\\log\\\\' + pyfileName + '-runlog.log'\n try:\n os.remove(logfilepath)\n except:\n pass\n host = 'http://182.150.21.232:10081'\n requesturl = '/oauth2/access_token'\n headers = {'Content-Type': 'application/x-www-form-urlencoded',\n 'User-Agent':\n 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.87 Safari/537.36'\n }\n get_token_value = {'client_id': '000017953450251798098136',\n 'client_secret': '08E9EC6793345759456CB8BAE52615F3',\n 'grant_type': 'password', 'username': 'chenzhiz@inhand.com.cn',\n 'password': 'czz123456', 'password_type': '1', 'language': '2'}\n data = urllib.parse.urlencode(get_token_value).encode('utf-8')\n url = host + requesturl\n request = urllib.request.Request(url, data, headers)\n token_response = urllib.request.urlopen(request).read().decode('utf-8')\n logPrint(token_response)\n access_token = json.loads(token_response)['access_token']\n requesturl = ('/api/goods/list?cursor=0&limit=30&name=&access_token=' +\n access_token)\n url = host + requesturl\n response = requests.get(url=url, headers={'Content-Type':\n 'application/json'})\n goods_count = json.loads(response.text)['total']\n print(goods_count)\n driver = webdriver.Remote('http://127.0.0.1:4723/wd/hub', properties.\n desired_caps)\n sleep(0.5)\n wifi_enable(driver)\n sleep(0.5)\n opts1 = {'command': 'rm -rf', 'args': ['/sdcard/inbox/data/picture']}\n redata = driver.execute_script('mobile: shell', opts1)\n driver.find_element_by_xpath(\"//android.widget.TextView[@text='货道配置']\"\n ).click()\n driver.find_element_by_xpath(\"//android.widget.TextView[@text='同步商品(从平台)']\"\n ).click()\n driver.find_element_by_xpath(\"//android.widget.Button[@text='确定']\").click()\n try:\n xpath = \"//android.widget.TextView[contains(@text,'总商品数 \" + str(\n goods_count) + \"')]\"\n logPrint(xpath)\n WebDriverWait(driver, 2, 0.5).until(lambda x: x.\n find_element_by_xpath(xpath))\n progressFlag = True\n except Exception as e:\n print(e)\n progressFlag = False\n if progressFlag:\n logPrint('同步过程:PASS')\n else:\n logPrint('同步过程:FAIL!!')\n loadmasklocator = 'xpath', '//android.widget.ProgressBar'\n try:\n WebDriverWait(driver, 180).until_not(EC.presence_of_element_located\n (loadmasklocator))\n completeFlag = True\n except Exception as e:\n completeFlag = False\n if completeFlag:\n logPrint('同步结果出现:PASS')\n else:\n logPrint('同步结果出现:FAIL!!')\n if isElementExist(driver,\n \"//android.widget.TextView[contains(@text,'操作成功')]\"):\n logPrint('同步成功:PASS')\n else:\n logPrint('同步成功:FAIL!!')\n driver.find_element_by_xpath(\"//android.widget.Button[@text='确定']\").click()\n sleep(20)\n driver = webdriver.Remote('http://127.0.0.1:4723/wd/hub', properties.\n desired_caps)\n driver.find_element_by_xpath(\"//android.widget.TextView[@text='货道配置']\"\n ).click()\n driver.find_element_by_xpath(\"//android.widget.TextView[@text='同步商品(从平台)']\"\n ).click()\n driver.find_element_by_xpath(\"//android.widget.Button[@text='确定']\").click()\n try:\n WebDriverWait(driver, 180).until_not(EC.presence_of_element_located\n (loadmasklocator))\n completeFlag = True\n except Exception as e:\n completeFlag = False\n if completeFlag:\n logPrint('同步结果出现:PASS')\n else:\n logPrint('同步结果出现:FAIL!!')\n if isElementExist(driver,\n \"//android.widget.TextView[contains(@text,'已经是最新配置')]\"):\n logPrint('已经是最新配置:PASS')\n else:\n logPrint('已经是最新配置:FAIL!!')\n driver.find_element_by_xpath(\"//android.widget.Button[@text='确定']\").click()\n wifi_disable(driver)\n driver.find_element_by_xpath(\"//android.widget.TextView[@text='同步商品(从平台)']\"\n ).click()\n driver.find_element_by_xpath(\"//android.widget.Button[@text='确定']\").click()\n okdialoglocator = ('xpath',\n \"//android.widget.TextView[contains(@text,'操作失败')]\")\n try:\n WebDriverWait(driver, 3).until(EC.presence_of_element_located(\n okdialoglocator))\n failFlag = True\n except Exception as e:\n failFlag = False\n if failFlag:\n logPrint('断网同步,操作失败:PASS')\n else:\n logPrint('断网同步,操作失败:FAIL!!')\n wifi_enable(driver)\n driver.find_element_by_xpath(\"//android.widget.Button[@text='确定']\").click()\n opts1 = {'command': 'rm -rf', 'args': ['/sdcard/inbox/data/picture']}\n redata = driver.execute_script('mobile: shell', opts1)\n sleep(10)\n driver.find_element_by_xpath(\"//android.widget.TextView[@text='同步商品(从平台)']\"\n ).click()\n driver.find_element_by_xpath(\"//android.widget.Button[@text='确定']\").click()\n sleep(5)\n wifi_disable(driver)\n loadmasklocator = 'xpath', '//android.widget.ProgressBar'\n try:\n WebDriverWait(driver, 180).until_not(EC.presence_of_element_located\n (loadmasklocator))\n completeFlag = True\n except Exception as e:\n completeFlag = False\n if completeFlag:\n logPrint('同步结果出现:PASS')\n else:\n logPrint('同步结果出现:FAIL!!')\n if isElementExist(driver,\n \"//android.widget.TextView[contains(@text,'操作成功')]\"):\n logPrint('断网结束同步:PASS')\n else:\n logPrint('断网结束同步:FAIL!!')\n driver.find_element_by_xpath(\"//android.widget.Button[@text='确定']\").click()\n sleep(12)\n driver = webdriver.Remote('http://127.0.0.1:4723/wd/hub', properties.\n desired_caps)\n driver.find_element_by_xpath(\"//android.widget.TextView[@text='货道配置']\"\n ).click()\n driver.find_element_by_xpath(\"//android.widget.TextView[@text='同步商品(从平台)']\"\n ).click()\n noNetFlag = find_toast(driver, '平台')\n if noNetFlag:\n logPrint('未与平台建立连接:PASS')\n else:\n logPrint('未与平台建立连接:FAIL!!')\n wifi_enable(driver)\n",
"step-5": "# -*- coding:utf-8 -*-\nimport datetime\nimport json\nimport os\nimport urllib\nimport requests\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.support.wait import WebDriverWait\nfrom selenium.webdriver.support import expected_conditions as EC\nimport properties\nfrom time import sleep\nfrom appium import webdriver\n\n\ndef logPrint(logstr):\n pyfileName = str(__file__).split(\".py\")[0].split(\"/\")[-1]\n filepath = \".\\\\log\\\\\" + pyfileName + '-runlog.log'\n now = str(datetime.datetime.now())\n logstr = now + ' ' + logstr\n with open(filepath, 'a', encoding='utf-8') as f:\n print(logstr)\n f.write(logstr + '\\t\\n')\n\n\ndef isElementExist(driver, xpath):\n try:\n driver.find_element_by_xpath(xpath)\n return True\n except:\n return False\n\n\ndef find_toast(driver, contains_message):\n '''判断toast信息'''\n locat = (\"xpath\", '//*[contains(@text,\"' + contains_message + '\")]')\n try:\n element = WebDriverWait(driver, 2).until(EC.presence_of_element_located(locat))\n return True\n except:\n return False\n\n\ndef restart_app(driver):\n optsRestartAPP = {'command': 'am broadcast -a',\n 'args': ['com.inhand.intent.INBOXCORE_RESTART_APP']}\n driver.execute_script(\"mobile: shell\", optsRestartAPP)\n\n\ndef wifi_disable(driver):\n opts = {'command': 'su 0',\n 'args': ['svc wifi disable']}\n driver.execute_script(\"mobile: shell\", opts)\n\ndef wifi_enable(driver):\n opts = {'command': 'su 0',\n 'args': ['svc wifi enable']}\n driver.execute_script(\"mobile: shell\", opts)\n\nif __name__ == '__main__':\n try:\n logpath = os.getcwd() + \"\\\\log\"\n # print(logpath)\n os.mkdir(logpath)\n except:\n pass\n pyfileName = str(__file__).split(\".py\")[0].split(\"/\")[-1]\n logfilepath = \".\\\\log\\\\\" + pyfileName + '-runlog.log'\n try:\n os.remove(logfilepath)\n except:\n pass\n host = 'http://182.150.21.232:10081'\n requesturl = \"/oauth2/access_token\"\n headers = {\n \"Content-Type\": \"application/x-www-form-urlencoded\",\n 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.87 Safari/537.36'\n }\n get_token_value = {\n \"client_id\": \"000017953450251798098136\",\n \"client_secret\": \"08E9EC6793345759456CB8BAE52615F3\",\n \"grant_type\": \"password\",\n \"username\": \"chenzhiz@inhand.com.cn\",\n \"password\": \"czz123456\",\n \"password_type\": \"1\",\n \"language\": \"2\"\n }\n data = urllib.parse.urlencode(get_token_value).encode('utf-8')\n url = host + requesturl\n request = urllib.request.Request(url, data, headers)\n token_response = urllib.request.urlopen(request).read().decode('utf-8')\n logPrint(token_response)\n access_token = json.loads(token_response)['access_token']\n\n requesturl = \"/api/goods/list?cursor=0&limit=30&name=&access_token=\" + access_token\n url = host + requesturl\n response = requests.get(url=url, headers={'Content-Type': 'application/json'})\n goods_count = json.loads(response.text)['total']\n print(goods_count)\n driver = webdriver.Remote('http://127.0.0.1:4723/wd/hub', properties.desired_caps)\n sleep(0.5)\n wifi_enable(driver)\n sleep(0.5)\n opts1 = {'command': 'rm -rf',\n 'args': ['/sdcard/inbox/data/picture']}\n redata = driver.execute_script(\"mobile: shell\", opts1)\n driver.find_element_by_xpath(\"//android.widget.TextView[@text='货道配置']\").click()\n driver.find_element_by_xpath(\"//android.widget.TextView[@text='同步商品(从平台)']\").click()\n driver.find_element_by_xpath(\"//android.widget.Button[@text='确定']\").click()\n try:\n xpath = \"//android.widget.TextView[contains(@text,'总商品数 \" + str(goods_count) + \"')]\"\n logPrint(xpath)\n WebDriverWait(driver, 2, 0.5).until(lambda x: x.find_element_by_xpath(xpath))\n progressFlag = True\n except Exception as e:\n print(e)\n progressFlag = False\n if progressFlag:\n logPrint(\"同步过程:PASS\")\n else:\n logPrint(\"同步过程:FAIL!!\")\n loadmasklocator = (\"xpath\", \"//android.widget.ProgressBar\")\n try:\n WebDriverWait(driver, 180).until_not(EC.presence_of_element_located(loadmasklocator))\n completeFlag = True\n except Exception as e:\n completeFlag = False\n if completeFlag:\n logPrint(\"同步结果出现:PASS\")\n else:\n logPrint(\"同步结果出现:FAIL!!\")\n if isElementExist(driver, \"//android.widget.TextView[contains(@text,'操作成功')]\"):\n logPrint(\"同步成功:PASS\")\n else:\n logPrint(\"同步成功:FAIL!!\")\n driver.find_element_by_xpath(\"//android.widget.Button[@text='确定']\").click()\n sleep(20)\n driver = webdriver.Remote('http://127.0.0.1:4723/wd/hub', properties.desired_caps)\n driver.find_element_by_xpath(\"//android.widget.TextView[@text='货道配置']\").click()\n driver.find_element_by_xpath(\"//android.widget.TextView[@text='同步商品(从平台)']\").click()\n driver.find_element_by_xpath(\"//android.widget.Button[@text='确定']\").click()\n try:\n WebDriverWait(driver, 180).until_not(EC.presence_of_element_located(loadmasklocator))\n completeFlag = True\n except Exception as e:\n completeFlag = False\n if completeFlag:\n logPrint(\"同步结果出现:PASS\")\n else:\n logPrint(\"同步结果出现:FAIL!!\")\n if isElementExist(driver, \"//android.widget.TextView[contains(@text,'已经是最新配置')]\"):\n logPrint(\"已经是最新配置:PASS\")\n else:\n logPrint(\"已经是最新配置:FAIL!!\")\n driver.find_element_by_xpath(\"//android.widget.Button[@text='确定']\").click()\n wifi_disable(driver)\n driver.find_element_by_xpath(\"//android.widget.TextView[@text='同步商品(从平台)']\").click()\n driver.find_element_by_xpath(\"//android.widget.Button[@text='确定']\").click()\n okdialoglocator = (\"xpath\", \"//android.widget.TextView[contains(@text,'操作失败')]\")\n try:\n WebDriverWait(driver, 3).until(EC.presence_of_element_located(okdialoglocator))\n failFlag = True\n except Exception as e:\n failFlag = False\n if failFlag:\n logPrint(\"断网同步,操作失败:PASS\")\n else:\n logPrint(\"断网同步,操作失败:FAIL!!\")\n wifi_enable(driver)\n driver.find_element_by_xpath(\"//android.widget.Button[@text='确定']\").click()\n opts1 = {'command': 'rm -rf',\n 'args': ['/sdcard/inbox/data/picture']}\n redata = driver.execute_script(\"mobile: shell\", opts1)\n sleep(10)\n driver.find_element_by_xpath(\"//android.widget.TextView[@text='同步商品(从平台)']\").click()\n driver.find_element_by_xpath(\"//android.widget.Button[@text='确定']\").click()\n sleep(5)\n wifi_disable(driver)\n loadmasklocator = (\"xpath\", \"//android.widget.ProgressBar\")\n try:\n WebDriverWait(driver, 180).until_not(EC.presence_of_element_located(loadmasklocator))\n completeFlag = True\n except Exception as e:\n completeFlag = False\n if completeFlag:\n logPrint(\"同步结果出现:PASS\")\n else:\n logPrint(\"同步结果出现:FAIL!!\")\n if isElementExist(driver, \"//android.widget.TextView[contains(@text,'操作成功')]\"):\n logPrint(\"断网结束同步:PASS\")\n else:\n logPrint(\"断网结束同步:FAIL!!\")\n driver.find_element_by_xpath(\"//android.widget.Button[@text='确定']\").click()\n sleep(12)\n driver = webdriver.Remote('http://127.0.0.1:4723/wd/hub', properties.desired_caps)\n driver.find_element_by_xpath(\"//android.widget.TextView[@text='货道配置']\").click()\n driver.find_element_by_xpath(\"//android.widget.TextView[@text='同步商品(从平台)']\").click()\n noNetFlag = find_toast(driver, \"平台\")\n if noNetFlag:\n logPrint(\"未与平台建立连接:PASS\")\n else:\n logPrint(\"未与平台建立连接:FAIL!!\")\n wifi_enable(driver)\n",
"step-ids": [
4,
5,
7,
8,
9
]
}
|
[
4,
5,
7,
8,
9
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.