code
stringlengths 13
6.09M
| order_type
stringclasses 2
values | original_example
dict | step_ids
listlengths 1
5
|
|---|---|---|---|
# GERALDO AMELIO DE LIMA JUNIOR
# UNIFIP - Patos
# 05 de março de 2020
# Questão 08 - Escreva um programa que leia um valor inteiro e calcule o seu cubo.
n = int(input('Digite um numero:'))
t = n*3
print('O triplo de {} vale {}.'.format(n, t))
|
normal
|
{
"blob_id": "8f311e15c15fe3309218dfaed5eefa4a8fc3f453",
"index": 3234,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint('O triplo de {} vale {}.'.format(n, t))\n",
"step-3": "n = int(input('Digite um numero:'))\nt = n * 3\nprint('O triplo de {} vale {}.'.format(n, t))\n",
"step-4": "# GERALDO AMELIO DE LIMA JUNIOR\n# UNIFIP - Patos\n# 05 de março de 2020\n# Questão 08 - Escreva um programa que leia um valor inteiro e calcule o seu cubo.\n\nn = int(input('Digite um numero:'))\nt = n*3\nprint('O triplo de {} vale {}.'.format(n, t))\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
"""
Package with a facade to the several expansion strategies.
"""
from acres.resolution import resolver
__all__ = ['resolver']
|
normal
|
{
"blob_id": "e31267871453d87aee409f1c751c36908f7f151a",
"index": 804,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n__all__ = ['resolver']\n",
"step-3": "<mask token>\nfrom acres.resolution import resolver\n__all__ = ['resolver']\n",
"step-4": "\"\"\"\nPackage with a facade to the several expansion strategies.\n\"\"\"\nfrom acres.resolution import resolver\n\n__all__ = ['resolver']\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
def funky():
spam = 302
print(spam)
<|reserved_special_token_0|>
def sayHello(name):
print('Hello, ' + name)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def funky():
spam = 302
print(spam)
<|reserved_special_token_0|>
def sayHello(name):
print('Hello, ' + name)
<|reserved_special_token_0|>
def spam(myName):
print('Hello, ' + myName)
myName = 'Waffles'
print('Your new name is ' + myName)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
print('Why not ?')
print(True and not False)
<|reserved_special_token_0|>
def funky():
spam = 302
print(spam)
funky()
print(spam)
def sayHello(name):
print('Hello, ' + name)
print('Say hello to Alice.')
<|reserved_special_token_0|>
sayHello(fizzy)
print('Do not forget to say hello to Bob.')
sayHello('Bob')
sayHello('Lee')
def spam(myName):
print('Hello, ' + myName)
myName = 'Waffles'
print('Your new name is ' + myName)
<|reserved_special_token_0|>
spam(myName)
print('Howdy, ' + myName)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
print('Why not ?')
print(True and not False)
spam = 1208
def funky():
spam = 302
print(spam)
funky()
print(spam)
def sayHello(name):
print('Hello, ' + name)
print('Say hello to Alice.')
fizzy = 'Alice'
sayHello(fizzy)
print('Do not forget to say hello to Bob.')
sayHello('Bob')
sayHello('Lee')
def spam(myName):
print('Hello, ' + myName)
myName = 'Waffles'
print('Your new name is ' + myName)
myName = 'Albert'
spam(myName)
print('Howdy, ' + myName)
<|reserved_special_token_1|>
'''
# VariableScope.py
#
# Written by leezhm on 13th March, 2012.
#
# Copyright (C) leezhm(c)126.com. All Right Reserved.
#
# For Chapter 6 Dragon Realm
#
# <<Invent Your Own Computer Games with Python>>
'''
print('Why not ?')
print(True and not False)
# A global variable named "spam"
spam = 1208
# This block doesn't run until funky() is called.
def funky() :
# We read the global variable's value:
# print(spam)
# We create a local variable named "spam"
# instead of changing the value of the global variable "spam"
spam = 302
# The name "spam" now refers to the local variable only
# for the rest of this function:
print(spam)
# Call the function funky():
funky()
# The global variable was not changed in funky():
print(spam)
# Function with parameters
def sayHello(name) :
print('Hello, ' + name)
print('Say hello to Alice.')
fizzy = 'Alice'
sayHello(fizzy)
print('Do not forget to say hello to Bob.')
sayHello('Bob')
sayHello('Lee')
def spam(myName) :
print('Hello, ' + myName)
myName = 'Waffles'
print('Your new name is ' + myName)
myName = 'Albert'
spam(myName)
print('Howdy, ' + myName)
|
flexible
|
{
"blob_id": "6af5faaaa9d894dd2b882cfe1bb8b8225780743c",
"index": 630,
"step-1": "<mask token>\n\n\ndef funky():\n spam = 302\n print(spam)\n\n\n<mask token>\n\n\ndef sayHello(name):\n print('Hello, ' + name)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef funky():\n spam = 302\n print(spam)\n\n\n<mask token>\n\n\ndef sayHello(name):\n print('Hello, ' + name)\n\n\n<mask token>\n\n\ndef spam(myName):\n print('Hello, ' + myName)\n myName = 'Waffles'\n print('Your new name is ' + myName)\n\n\n<mask token>\n",
"step-3": "<mask token>\nprint('Why not ?')\nprint(True and not False)\n<mask token>\n\n\ndef funky():\n spam = 302\n print(spam)\n\n\nfunky()\nprint(spam)\n\n\ndef sayHello(name):\n print('Hello, ' + name)\n\n\nprint('Say hello to Alice.')\n<mask token>\nsayHello(fizzy)\nprint('Do not forget to say hello to Bob.')\nsayHello('Bob')\nsayHello('Lee')\n\n\ndef spam(myName):\n print('Hello, ' + myName)\n myName = 'Waffles'\n print('Your new name is ' + myName)\n\n\n<mask token>\nspam(myName)\nprint('Howdy, ' + myName)\n",
"step-4": "<mask token>\nprint('Why not ?')\nprint(True and not False)\nspam = 1208\n\n\ndef funky():\n spam = 302\n print(spam)\n\n\nfunky()\nprint(spam)\n\n\ndef sayHello(name):\n print('Hello, ' + name)\n\n\nprint('Say hello to Alice.')\nfizzy = 'Alice'\nsayHello(fizzy)\nprint('Do not forget to say hello to Bob.')\nsayHello('Bob')\nsayHello('Lee')\n\n\ndef spam(myName):\n print('Hello, ' + myName)\n myName = 'Waffles'\n print('Your new name is ' + myName)\n\n\nmyName = 'Albert'\nspam(myName)\nprint('Howdy, ' + myName)\n",
"step-5": "'''\n# VariableScope.py\n#\n# Written by leezhm on 13th March, 2012.\n#\n# Copyright (C) leezhm(c)126.com. All Right Reserved.\n#\n# For Chapter 6 Dragon Realm\n#\n# <<Invent Your Own Computer Games with Python>>\n'''\n\nprint('Why not ?')\n\nprint(True and not False)\n\n# A global variable named \"spam\"\nspam = 1208\n\n# This block doesn't run until funky() is called.\ndef funky() :\n # We read the global variable's value:\n # print(spam)\n\n # We create a local variable named \"spam\"\n # instead of changing the value of the global variable \"spam\"\n spam = 302\n\n # The name \"spam\" now refers to the local variable only\n # for the rest of this function:\n print(spam)\n\n# Call the function funky():\nfunky()\n\n# The global variable was not changed in funky():\nprint(spam)\n\n# Function with parameters\ndef sayHello(name) :\n print('Hello, ' + name)\n\nprint('Say hello to Alice.')\nfizzy = 'Alice'\nsayHello(fizzy)\nprint('Do not forget to say hello to Bob.')\nsayHello('Bob')\n\nsayHello('Lee')\n\ndef spam(myName) :\n print('Hello, ' + myName)\n myName = 'Waffles'\n print('Your new name is ' + myName)\n\nmyName = 'Albert'\nspam(myName)\nprint('Howdy, ' + myName)",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
import os, tempfile, shutil
from flask import Flask, flash, request, redirect, url_for, send_from_directory, send_file
from werkzeug.utils import secure_filename
from contextlib import contextmanager
"""
Flask stores uploaded FileStorage objects in memory if they are small. Otherwise, it internally uses tempfile.gettempdir() which returns the globally
configured temporary directory that tempfile is using.
WARNING: Flask accepts an unlimited file size unless I limit it
Flask encourages the use of <FileStorage>.save() to save uploaded files on the server. Afterwards, I can interact with the files normally. There does
not appear to be an easy way to directly interact with a FileStorage object with such functions as open()
"""
#UPLOAD_FOLDER = './uploads'
ALLOWED_EXTENSIONS = set(['txt', 'pdf', 'png', 'jpg', 'jpeg', 'gif'])
app = Flask(__name__)
# Limit the file size fo 16 MB
app.config['MAX_CONTENT_LENGTH'] = 16 * 1024 * 1024
# I want each user to have their own upload folder
#app.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER
def allowed_file(filename):
return '.' in filename and filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS
"""
Upload a text file and the server will process the file by writing a single line to it and returning the modified file. The temporary directory where
the file was saved (and modified) is deleted at the end of the request. It works exactly as expected! Try stepping through it.
"""
@app.route('/', methods=['GET', 'POST'])
def upload_file():
if request.method == 'POST':
# check if the post request has the file part
if 'file' not in request.files:
flash('No file part')
return redirect(request.url)
f = request.files['file']
# if the user does not select file, browser should also submit an empty part without filename
if f.filename == '':
flash('No selected file')
return redirect(request.url)
if f and allowed_file(f.filename):
"""
This code is fine because 'with' acts like a finally block. The context manager will always exit (unless the program abnormally
terminates), even if an exception is thrown or return is called within the 'with' block. Thus, I can send the processed file to the
client and then the entire directory will be deleted.
"""
filename = secure_filename(f.filename)
with TemporaryDirectory() as temp_dir:
print("temp_dir was: " + temp_dir)
path = os.path.join(temp_dir, filename)
f.save(path)
#f.save(os.path.join(app.config['UPLOAD_FOLDER'], filename))
with open(path, "r+") as my_file:
my_file.write("The server wrote this line.\n")
return send_from_directory(temp_dir, filename)
#return redirect(url_for('uploaded_file', filename=filename))
return '''
<!doctype html>
<title>Upload new File</title>
<h1>Upload new File</h1>
<form method=post enctype=multipart/form-data>
<input type=file name=file>
<input type=submit value=Upload>
</form>
'''
# Send the uploaded file right back to the user as an example. I don't do this because I process the file and spit it back to the user
"""
@app.route('/uploads/<filename>')
def uploaded_file(filename):
return send_from_directory(app.config['UPLOAD_FOLDER'], filename)
"""
# Create a context manager to deal with automatically deleting the temporary directory when the 'with' statement exists
@contextmanager
def TemporaryDirectory():
name = tempfile.mkdtemp()
try:
yield name
finally:
shutil.rmtree(name)
@app.route("/safe", methods=["POST"])
def safe():
f = request.files["file-form-param"]
name = secure_filename(f.filename)
filepath = os.path.join(os.path.dirname(__file__), "uploads", name)
f.save(filepath)
return str({
"filename": name,
"saved at": filepath
})
@app.route("/unsafe", methods=["POST"])
def unsafe():
f = request.files["file-form-param"]
filepath = os.path.join(os.path.dirname(__file__), "uploads", f.filename)
f.save(filepath)
return str({
"filename": f.filename,
"saved at": filepath
})
@app.route("/sendfile", methods=["POST"])
def send_file_py():
filename = request.form.get("filename")
return send_file(os.path.join(os.path.dirname(__file__), "uploads", filename))
@app.route("/sendfromdirectory", methods=["POST"])
def send_from_directory_py():
filename = request.form.get("filename")
return send_from_directory(os.path.join(os.path.dirname(__file__), "uploads"), filename)
|
normal
|
{
"blob_id": "9f6cfeff9e00079715827a2887263c14a1bb51ff",
"index": 7679,
"step-1": "<mask token>\n\n\n@contextmanager\ndef TemporaryDirectory():\n name = tempfile.mkdtemp()\n try:\n yield name\n finally:\n shutil.rmtree(name)\n\n\n@app.route('/safe', methods=['POST'])\ndef safe():\n f = request.files['file-form-param']\n name = secure_filename(f.filename)\n filepath = os.path.join(os.path.dirname(__file__), 'uploads', name)\n f.save(filepath)\n return str({'filename': name, 'saved at': filepath})\n\n\n@app.route('/unsafe', methods=['POST'])\ndef unsafe():\n f = request.files['file-form-param']\n filepath = os.path.join(os.path.dirname(__file__), 'uploads', f.filename)\n f.save(filepath)\n return str({'filename': f.filename, 'saved at': filepath})\n\n\n@app.route('/sendfile', methods=['POST'])\ndef send_file_py():\n filename = request.form.get('filename')\n return send_file(os.path.join(os.path.dirname(__file__), 'uploads',\n filename))\n\n\n@app.route('/sendfromdirectory', methods=['POST'])\ndef send_from_directory_py():\n filename = request.form.get('filename')\n return send_from_directory(os.path.join(os.path.dirname(__file__),\n 'uploads'), filename)\n",
"step-2": "<mask token>\n\n\ndef allowed_file(filename):\n return '.' in filename and filename.rsplit('.', 1)[1].lower(\n ) in ALLOWED_EXTENSIONS\n\n\n<mask token>\n\n\n@contextmanager\ndef TemporaryDirectory():\n name = tempfile.mkdtemp()\n try:\n yield name\n finally:\n shutil.rmtree(name)\n\n\n@app.route('/safe', methods=['POST'])\ndef safe():\n f = request.files['file-form-param']\n name = secure_filename(f.filename)\n filepath = os.path.join(os.path.dirname(__file__), 'uploads', name)\n f.save(filepath)\n return str({'filename': name, 'saved at': filepath})\n\n\n@app.route('/unsafe', methods=['POST'])\ndef unsafe():\n f = request.files['file-form-param']\n filepath = os.path.join(os.path.dirname(__file__), 'uploads', f.filename)\n f.save(filepath)\n return str({'filename': f.filename, 'saved at': filepath})\n\n\n@app.route('/sendfile', methods=['POST'])\ndef send_file_py():\n filename = request.form.get('filename')\n return send_file(os.path.join(os.path.dirname(__file__), 'uploads',\n filename))\n\n\n@app.route('/sendfromdirectory', methods=['POST'])\ndef send_from_directory_py():\n filename = request.form.get('filename')\n return send_from_directory(os.path.join(os.path.dirname(__file__),\n 'uploads'), filename)\n",
"step-3": "<mask token>\nALLOWED_EXTENSIONS = set(['txt', 'pdf', 'png', 'jpg', 'jpeg', 'gif'])\napp = Flask(__name__)\napp.config['MAX_CONTENT_LENGTH'] = 16 * 1024 * 1024\n\n\ndef allowed_file(filename):\n return '.' in filename and filename.rsplit('.', 1)[1].lower(\n ) in ALLOWED_EXTENSIONS\n\n\n<mask token>\n\n\n@app.route('/', methods=['GET', 'POST'])\ndef upload_file():\n if request.method == 'POST':\n if 'file' not in request.files:\n flash('No file part')\n return redirect(request.url)\n f = request.files['file']\n if f.filename == '':\n flash('No selected file')\n return redirect(request.url)\n if f and allowed_file(f.filename):\n \"\"\" \n This code is fine because 'with' acts like a finally block. The context manager will always exit (unless the program abnormally\n terminates), even if an exception is thrown or return is called within the 'with' block. Thus, I can send the processed file to the\n client and then the entire directory will be deleted.\n \"\"\"\n filename = secure_filename(f.filename)\n with TemporaryDirectory() as temp_dir:\n print('temp_dir was: ' + temp_dir)\n path = os.path.join(temp_dir, filename)\n f.save(path)\n with open(path, 'r+') as my_file:\n my_file.write('The server wrote this line.\\n')\n return send_from_directory(temp_dir, filename)\n return \"\"\"\n <!doctype html>\n <title>Upload new File</title>\n <h1>Upload new File</h1>\n <form method=post enctype=multipart/form-data>\n <input type=file name=file>\n <input type=submit value=Upload>\n </form>\n \"\"\"\n\n\n<mask token>\n\n\n@contextmanager\ndef TemporaryDirectory():\n name = tempfile.mkdtemp()\n try:\n yield name\n finally:\n shutil.rmtree(name)\n\n\n@app.route('/safe', methods=['POST'])\ndef safe():\n f = request.files['file-form-param']\n name = secure_filename(f.filename)\n filepath = os.path.join(os.path.dirname(__file__), 'uploads', name)\n f.save(filepath)\n return str({'filename': name, 'saved at': filepath})\n\n\n@app.route('/unsafe', methods=['POST'])\ndef unsafe():\n f = request.files['file-form-param']\n filepath = os.path.join(os.path.dirname(__file__), 'uploads', f.filename)\n f.save(filepath)\n return str({'filename': f.filename, 'saved at': filepath})\n\n\n@app.route('/sendfile', methods=['POST'])\ndef send_file_py():\n filename = request.form.get('filename')\n return send_file(os.path.join(os.path.dirname(__file__), 'uploads',\n filename))\n\n\n@app.route('/sendfromdirectory', methods=['POST'])\ndef send_from_directory_py():\n filename = request.form.get('filename')\n return send_from_directory(os.path.join(os.path.dirname(__file__),\n 'uploads'), filename)\n",
"step-4": "import os, tempfile, shutil\nfrom flask import Flask, flash, request, redirect, url_for, send_from_directory, send_file\nfrom werkzeug.utils import secure_filename\nfrom contextlib import contextmanager\n<mask token>\nALLOWED_EXTENSIONS = set(['txt', 'pdf', 'png', 'jpg', 'jpeg', 'gif'])\napp = Flask(__name__)\napp.config['MAX_CONTENT_LENGTH'] = 16 * 1024 * 1024\n\n\ndef allowed_file(filename):\n return '.' in filename and filename.rsplit('.', 1)[1].lower(\n ) in ALLOWED_EXTENSIONS\n\n\n<mask token>\n\n\n@app.route('/', methods=['GET', 'POST'])\ndef upload_file():\n if request.method == 'POST':\n if 'file' not in request.files:\n flash('No file part')\n return redirect(request.url)\n f = request.files['file']\n if f.filename == '':\n flash('No selected file')\n return redirect(request.url)\n if f and allowed_file(f.filename):\n \"\"\" \n This code is fine because 'with' acts like a finally block. The context manager will always exit (unless the program abnormally\n terminates), even if an exception is thrown or return is called within the 'with' block. Thus, I can send the processed file to the\n client and then the entire directory will be deleted.\n \"\"\"\n filename = secure_filename(f.filename)\n with TemporaryDirectory() as temp_dir:\n print('temp_dir was: ' + temp_dir)\n path = os.path.join(temp_dir, filename)\n f.save(path)\n with open(path, 'r+') as my_file:\n my_file.write('The server wrote this line.\\n')\n return send_from_directory(temp_dir, filename)\n return \"\"\"\n <!doctype html>\n <title>Upload new File</title>\n <h1>Upload new File</h1>\n <form method=post enctype=multipart/form-data>\n <input type=file name=file>\n <input type=submit value=Upload>\n </form>\n \"\"\"\n\n\n<mask token>\n\n\n@contextmanager\ndef TemporaryDirectory():\n name = tempfile.mkdtemp()\n try:\n yield name\n finally:\n shutil.rmtree(name)\n\n\n@app.route('/safe', methods=['POST'])\ndef safe():\n f = request.files['file-form-param']\n name = secure_filename(f.filename)\n filepath = os.path.join(os.path.dirname(__file__), 'uploads', name)\n f.save(filepath)\n return str({'filename': name, 'saved at': filepath})\n\n\n@app.route('/unsafe', methods=['POST'])\ndef unsafe():\n f = request.files['file-form-param']\n filepath = os.path.join(os.path.dirname(__file__), 'uploads', f.filename)\n f.save(filepath)\n return str({'filename': f.filename, 'saved at': filepath})\n\n\n@app.route('/sendfile', methods=['POST'])\ndef send_file_py():\n filename = request.form.get('filename')\n return send_file(os.path.join(os.path.dirname(__file__), 'uploads',\n filename))\n\n\n@app.route('/sendfromdirectory', methods=['POST'])\ndef send_from_directory_py():\n filename = request.form.get('filename')\n return send_from_directory(os.path.join(os.path.dirname(__file__),\n 'uploads'), filename)\n",
"step-5": "import os, tempfile, shutil\nfrom flask import Flask, flash, request, redirect, url_for, send_from_directory, send_file\nfrom werkzeug.utils import secure_filename\nfrom contextlib import contextmanager\n\n\n\"\"\"\nFlask stores uploaded FileStorage objects in memory if they are small. Otherwise, it internally uses tempfile.gettempdir() which returns the globally\nconfigured temporary directory that tempfile is using.\n\nWARNING: Flask accepts an unlimited file size unless I limit it\n\nFlask encourages the use of <FileStorage>.save() to save uploaded files on the server. Afterwards, I can interact with the files normally. There does\nnot appear to be an easy way to directly interact with a FileStorage object with such functions as open()\n\"\"\"\n\n\n#UPLOAD_FOLDER = './uploads'\nALLOWED_EXTENSIONS = set(['txt', 'pdf', 'png', 'jpg', 'jpeg', 'gif'])\n\n\napp = Flask(__name__)\n# Limit the file size fo 16 MB\napp.config['MAX_CONTENT_LENGTH'] = 16 * 1024 * 1024\n# I want each user to have their own upload folder\n#app.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER\n\n\ndef allowed_file(filename):\n return '.' in filename and filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS\n\n\n\"\"\"\nUpload a text file and the server will process the file by writing a single line to it and returning the modified file. The temporary directory where\nthe file was saved (and modified) is deleted at the end of the request. It works exactly as expected! Try stepping through it.\n\"\"\"\n@app.route('/', methods=['GET', 'POST'])\ndef upload_file():\n if request.method == 'POST':\n # check if the post request has the file part\n if 'file' not in request.files:\n flash('No file part')\n return redirect(request.url)\n f = request.files['file']\n # if the user does not select file, browser should also submit an empty part without filename\n if f.filename == '':\n flash('No selected file')\n return redirect(request.url)\n if f and allowed_file(f.filename):\n \"\"\" \n This code is fine because 'with' acts like a finally block. The context manager will always exit (unless the program abnormally\n terminates), even if an exception is thrown or return is called within the 'with' block. Thus, I can send the processed file to the\n client and then the entire directory will be deleted.\n \"\"\"\n filename = secure_filename(f.filename)\n with TemporaryDirectory() as temp_dir:\n print(\"temp_dir was: \" + temp_dir)\n path = os.path.join(temp_dir, filename)\n f.save(path)\n #f.save(os.path.join(app.config['UPLOAD_FOLDER'], filename))\n with open(path, \"r+\") as my_file:\n my_file.write(\"The server wrote this line.\\n\")\n return send_from_directory(temp_dir, filename)\n #return redirect(url_for('uploaded_file', filename=filename))\n return '''\n <!doctype html>\n <title>Upload new File</title>\n <h1>Upload new File</h1>\n <form method=post enctype=multipart/form-data>\n <input type=file name=file>\n <input type=submit value=Upload>\n </form>\n '''\n\n\n# Send the uploaded file right back to the user as an example. I don't do this because I process the file and spit it back to the user\n\"\"\"\n@app.route('/uploads/<filename>')\ndef uploaded_file(filename):\n return send_from_directory(app.config['UPLOAD_FOLDER'], filename)\n\"\"\"\n\n\n# Create a context manager to deal with automatically deleting the temporary directory when the 'with' statement exists\n@contextmanager\ndef TemporaryDirectory():\n name = tempfile.mkdtemp()\n try:\n yield name\n finally:\n shutil.rmtree(name)\n\n\n@app.route(\"/safe\", methods=[\"POST\"])\ndef safe():\n f = request.files[\"file-form-param\"]\n name = secure_filename(f.filename)\n filepath = os.path.join(os.path.dirname(__file__), \"uploads\", name)\n f.save(filepath)\n return str({\n \"filename\": name,\n \"saved at\": filepath\n })\n\n\n@app.route(\"/unsafe\", methods=[\"POST\"])\ndef unsafe():\n f = request.files[\"file-form-param\"]\n filepath = os.path.join(os.path.dirname(__file__), \"uploads\", f.filename)\n f.save(filepath)\n return str({\n \"filename\": f.filename,\n \"saved at\": filepath\n })\n\n\n@app.route(\"/sendfile\", methods=[\"POST\"])\ndef send_file_py():\n filename = request.form.get(\"filename\")\n return send_file(os.path.join(os.path.dirname(__file__), \"uploads\", filename))\n\n\n@app.route(\"/sendfromdirectory\", methods=[\"POST\"])\ndef send_from_directory_py():\n filename = request.form.get(\"filename\")\n return send_from_directory(os.path.join(os.path.dirname(__file__), \"uploads\"), filename)\n",
"step-ids": [
5,
6,
8,
9,
10
]
}
|
[
5,
6,
8,
9,
10
] |
<|reserved_special_token_0|>
class Disengage(smach.State):
def __init__(self, flare_task):
smach.State.__init__(self, outcomes=['start_complete',
'complete_outcome', 'aborted'])
self.flare = flare_task
<|reserved_special_token_0|>
class Search(smach.State):
timeout = 10000
def __init__(self, flare_task):
smach.State.__init__(self, outcomes=['search_complete', 'aborted',
'mission_abort'])
self.flare = flare_task
if self.flare.testing:
self.flare.unregisterHeading()
def execute(self, userdata):
if self.flare.isAborted:
rospy.signal_shutdown('Bye!')
return 'aborted'
timecount = 0
while not self.flare.rectData['detected']:
if timecount > self.timeout or rospy.is_shutdown(
) or self.flare.isKilled:
self.flare.abortMission()
self.flare.failedTask()
return 'aborted'
self.flare.sendMovement(forward=1.0)
rospy.sleep(rospy.Duration(0.5))
timecount += 1
return 'search_complete'
class Manuoevre(smach.State):
def __init__(self, flare_task):
smach.State.__init__(self, outcomes=['manuoevring',
'manuoevre_complete', 'aborted', 'mission_abort'])
self.flare = flare_task
self.deltaThresh = 0.15
self.prevAngle = []
self.count = 0
self.flareSeen = True
def execute(self, userdata):
if self.flare.isAborted:
rospy.signal_shutdown('Bye!')
return 'aborted'
screenWidth = self.flare.screen['width']
screenCenterX = screenWidth / 2
deltaX = (self.flare.rectData['centroids'][0] - screenCenterX
) / screenWidth
rospy.loginfo('Area {}'.format(self.flare.rectData['area']))
rospy.loginfo('Delta X: {}'.format(deltaX))
if abs(deltaX) < 0.15:
self.flare.sendMovement(forward=self.flare.forwardOffset)
rospy.sleep(rospy.Duration(0.5))
else:
sidemove = math.copysign(deltaX * self.flare.deltaXMultiplier,
deltaX)
self.flare.sendMovement(forward=0.1, sidemove=sidemove)
rospy.sleep(rospy.Duration(0.5))
if self.flare.rectData['area'] > self.flare.headOnArea:
return 'manuoevre_complete'
return 'manuoevring'
class Completing(smach.State):
def __init__(self, flare_task):
smach.State.__init__(self, outcomes=['complete_complete',
'completing', 'aborted', 'mission_abort'])
self.flare = flare_task
self.count = 0
def execute(self, userdata):
if self.flare.isAborted:
self.flare.isKilled = True
rospy.signal_shutdown('Bye!')
return 'aborted'
screenWidth = self.flare.screen['width']
screenCenterX = screenWidth / 2
deltaX = (self.flare.rectData['centroids'][0] - screenCenterX
) / screenWidth
deltaXMult = 2.0
rospy.loginfo('Delta X:{}'.format(deltaX))
if abs(deltaX) < 0.03:
self.count += 1
rospy.loginfo('Count: {}'.format(self.count))
return 'completing'
if self.count >= 2000:
self.flare.sendMovement(forward=4.0)
rospy.loginfo('Hitting the flare')
self.flare.locomotionClient.wait_for_result()
self.flare.sendMovement(forward=-2.0)
self.flare.locomotionClient.wait_for_result()
self.flare.taskComplete()
return 'complete_complete'
else:
self.count = 0
sidemove = math.copysign(deltaX * deltaXMult, deltaX)
self.flare.sendMovement(forward=0.0, sidemove=sidemove)
rospy.sleep(rospy.Duration(0.5))
return 'completing'
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Disengage(smach.State):
def __init__(self, flare_task):
smach.State.__init__(self, outcomes=['start_complete',
'complete_outcome', 'aborted'])
self.flare = flare_task
def execute(self, userdata):
if self.flare.isKilled:
rospy.signal_shutdown('Bye')
return 'aborted'
while self.flare.isAborted:
rospy.sleep(rospy.Duration(0.2))
if self.flare.testing:
self.flare.register()
rospy.loginfo('Starting Flare')
return 'start_complete'
class Search(smach.State):
timeout = 10000
def __init__(self, flare_task):
smach.State.__init__(self, outcomes=['search_complete', 'aborted',
'mission_abort'])
self.flare = flare_task
if self.flare.testing:
self.flare.unregisterHeading()
def execute(self, userdata):
if self.flare.isAborted:
rospy.signal_shutdown('Bye!')
return 'aborted'
timecount = 0
while not self.flare.rectData['detected']:
if timecount > self.timeout or rospy.is_shutdown(
) or self.flare.isKilled:
self.flare.abortMission()
self.flare.failedTask()
return 'aborted'
self.flare.sendMovement(forward=1.0)
rospy.sleep(rospy.Duration(0.5))
timecount += 1
return 'search_complete'
class Manuoevre(smach.State):
def __init__(self, flare_task):
smach.State.__init__(self, outcomes=['manuoevring',
'manuoevre_complete', 'aborted', 'mission_abort'])
self.flare = flare_task
self.deltaThresh = 0.15
self.prevAngle = []
self.count = 0
self.flareSeen = True
def execute(self, userdata):
if self.flare.isAborted:
rospy.signal_shutdown('Bye!')
return 'aborted'
screenWidth = self.flare.screen['width']
screenCenterX = screenWidth / 2
deltaX = (self.flare.rectData['centroids'][0] - screenCenterX
) / screenWidth
rospy.loginfo('Area {}'.format(self.flare.rectData['area']))
rospy.loginfo('Delta X: {}'.format(deltaX))
if abs(deltaX) < 0.15:
self.flare.sendMovement(forward=self.flare.forwardOffset)
rospy.sleep(rospy.Duration(0.5))
else:
sidemove = math.copysign(deltaX * self.flare.deltaXMultiplier,
deltaX)
self.flare.sendMovement(forward=0.1, sidemove=sidemove)
rospy.sleep(rospy.Duration(0.5))
if self.flare.rectData['area'] > self.flare.headOnArea:
return 'manuoevre_complete'
return 'manuoevring'
class Completing(smach.State):
def __init__(self, flare_task):
smach.State.__init__(self, outcomes=['complete_complete',
'completing', 'aborted', 'mission_abort'])
self.flare = flare_task
self.count = 0
def execute(self, userdata):
if self.flare.isAborted:
self.flare.isKilled = True
rospy.signal_shutdown('Bye!')
return 'aborted'
screenWidth = self.flare.screen['width']
screenCenterX = screenWidth / 2
deltaX = (self.flare.rectData['centroids'][0] - screenCenterX
) / screenWidth
deltaXMult = 2.0
rospy.loginfo('Delta X:{}'.format(deltaX))
if abs(deltaX) < 0.03:
self.count += 1
rospy.loginfo('Count: {}'.format(self.count))
return 'completing'
if self.count >= 2000:
self.flare.sendMovement(forward=4.0)
rospy.loginfo('Hitting the flare')
self.flare.locomotionClient.wait_for_result()
self.flare.sendMovement(forward=-2.0)
self.flare.locomotionClient.wait_for_result()
self.flare.taskComplete()
return 'complete_complete'
else:
self.count = 0
sidemove = math.copysign(deltaX * deltaXMult, deltaX)
self.flare.sendMovement(forward=0.0, sidemove=sidemove)
rospy.sleep(rospy.Duration(0.5))
return 'completing'
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Disengage(smach.State):
def __init__(self, flare_task):
smach.State.__init__(self, outcomes=['start_complete',
'complete_outcome', 'aborted'])
self.flare = flare_task
def execute(self, userdata):
if self.flare.isKilled:
rospy.signal_shutdown('Bye')
return 'aborted'
while self.flare.isAborted:
rospy.sleep(rospy.Duration(0.2))
if self.flare.testing:
self.flare.register()
rospy.loginfo('Starting Flare')
return 'start_complete'
class Search(smach.State):
timeout = 10000
def __init__(self, flare_task):
smach.State.__init__(self, outcomes=['search_complete', 'aborted',
'mission_abort'])
self.flare = flare_task
if self.flare.testing:
self.flare.unregisterHeading()
def execute(self, userdata):
if self.flare.isAborted:
rospy.signal_shutdown('Bye!')
return 'aborted'
timecount = 0
while not self.flare.rectData['detected']:
if timecount > self.timeout or rospy.is_shutdown(
) or self.flare.isKilled:
self.flare.abortMission()
self.flare.failedTask()
return 'aborted'
self.flare.sendMovement(forward=1.0)
rospy.sleep(rospy.Duration(0.5))
timecount += 1
return 'search_complete'
class Manuoevre(smach.State):
def __init__(self, flare_task):
smach.State.__init__(self, outcomes=['manuoevring',
'manuoevre_complete', 'aborted', 'mission_abort'])
self.flare = flare_task
self.deltaThresh = 0.15
self.prevAngle = []
self.count = 0
self.flareSeen = True
def execute(self, userdata):
if self.flare.isAborted:
rospy.signal_shutdown('Bye!')
return 'aborted'
screenWidth = self.flare.screen['width']
screenCenterX = screenWidth / 2
deltaX = (self.flare.rectData['centroids'][0] - screenCenterX
) / screenWidth
rospy.loginfo('Area {}'.format(self.flare.rectData['area']))
rospy.loginfo('Delta X: {}'.format(deltaX))
if abs(deltaX) < 0.15:
self.flare.sendMovement(forward=self.flare.forwardOffset)
rospy.sleep(rospy.Duration(0.5))
else:
sidemove = math.copysign(deltaX * self.flare.deltaXMultiplier,
deltaX)
self.flare.sendMovement(forward=0.1, sidemove=sidemove)
rospy.sleep(rospy.Duration(0.5))
if self.flare.rectData['area'] > self.flare.headOnArea:
return 'manuoevre_complete'
return 'manuoevring'
class Completing(smach.State):
def __init__(self, flare_task):
smach.State.__init__(self, outcomes=['complete_complete',
'completing', 'aborted', 'mission_abort'])
self.flare = flare_task
self.count = 0
def execute(self, userdata):
if self.flare.isAborted:
self.flare.isKilled = True
rospy.signal_shutdown('Bye!')
return 'aborted'
screenWidth = self.flare.screen['width']
screenCenterX = screenWidth / 2
deltaX = (self.flare.rectData['centroids'][0] - screenCenterX
) / screenWidth
deltaXMult = 2.0
rospy.loginfo('Delta X:{}'.format(deltaX))
if abs(deltaX) < 0.03:
self.count += 1
rospy.loginfo('Count: {}'.format(self.count))
return 'completing'
if self.count >= 2000:
self.flare.sendMovement(forward=4.0)
rospy.loginfo('Hitting the flare')
self.flare.locomotionClient.wait_for_result()
self.flare.sendMovement(forward=-2.0)
self.flare.locomotionClient.wait_for_result()
self.flare.taskComplete()
return 'complete_complete'
else:
self.count = 0
sidemove = math.copysign(deltaX * deltaXMult, deltaX)
self.flare.sendMovement(forward=0.0, sidemove=sidemove)
rospy.sleep(rospy.Duration(0.5))
return 'completing'
<|reserved_special_token_0|>
def flareCallback(conig, level):
for param in flare.yellow_params:
flare.yellow_params[param] = config['yellow_' + param]
isTestMode = config['testing']
return config
def normHeading(heading):
if heading > 360:
return heading - 360
elif heading < 0:
return heading + 360
else:
return heading
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Disengage(smach.State):
def __init__(self, flare_task):
smach.State.__init__(self, outcomes=['start_complete',
'complete_outcome', 'aborted'])
self.flare = flare_task
def execute(self, userdata):
if self.flare.isKilled:
rospy.signal_shutdown('Bye')
return 'aborted'
while self.flare.isAborted:
rospy.sleep(rospy.Duration(0.2))
if self.flare.testing:
self.flare.register()
rospy.loginfo('Starting Flare')
return 'start_complete'
class Search(smach.State):
timeout = 10000
def __init__(self, flare_task):
smach.State.__init__(self, outcomes=['search_complete', 'aborted',
'mission_abort'])
self.flare = flare_task
if self.flare.testing:
self.flare.unregisterHeading()
def execute(self, userdata):
if self.flare.isAborted:
rospy.signal_shutdown('Bye!')
return 'aborted'
timecount = 0
while not self.flare.rectData['detected']:
if timecount > self.timeout or rospy.is_shutdown(
) or self.flare.isKilled:
self.flare.abortMission()
self.flare.failedTask()
return 'aborted'
self.flare.sendMovement(forward=1.0)
rospy.sleep(rospy.Duration(0.5))
timecount += 1
return 'search_complete'
class Manuoevre(smach.State):
def __init__(self, flare_task):
smach.State.__init__(self, outcomes=['manuoevring',
'manuoevre_complete', 'aborted', 'mission_abort'])
self.flare = flare_task
self.deltaThresh = 0.15
self.prevAngle = []
self.count = 0
self.flareSeen = True
def execute(self, userdata):
if self.flare.isAborted:
rospy.signal_shutdown('Bye!')
return 'aborted'
screenWidth = self.flare.screen['width']
screenCenterX = screenWidth / 2
deltaX = (self.flare.rectData['centroids'][0] - screenCenterX
) / screenWidth
rospy.loginfo('Area {}'.format(self.flare.rectData['area']))
rospy.loginfo('Delta X: {}'.format(deltaX))
if abs(deltaX) < 0.15:
self.flare.sendMovement(forward=self.flare.forwardOffset)
rospy.sleep(rospy.Duration(0.5))
else:
sidemove = math.copysign(deltaX * self.flare.deltaXMultiplier,
deltaX)
self.flare.sendMovement(forward=0.1, sidemove=sidemove)
rospy.sleep(rospy.Duration(0.5))
if self.flare.rectData['area'] > self.flare.headOnArea:
return 'manuoevre_complete'
return 'manuoevring'
class Completing(smach.State):
def __init__(self, flare_task):
smach.State.__init__(self, outcomes=['complete_complete',
'completing', 'aborted', 'mission_abort'])
self.flare = flare_task
self.count = 0
def execute(self, userdata):
if self.flare.isAborted:
self.flare.isKilled = True
rospy.signal_shutdown('Bye!')
return 'aborted'
screenWidth = self.flare.screen['width']
screenCenterX = screenWidth / 2
deltaX = (self.flare.rectData['centroids'][0] - screenCenterX
) / screenWidth
deltaXMult = 2.0
rospy.loginfo('Delta X:{}'.format(deltaX))
if abs(deltaX) < 0.03:
self.count += 1
rospy.loginfo('Count: {}'.format(self.count))
return 'completing'
if self.count >= 2000:
self.flare.sendMovement(forward=4.0)
rospy.loginfo('Hitting the flare')
self.flare.locomotionClient.wait_for_result()
self.flare.sendMovement(forward=-2.0)
self.flare.locomotionClient.wait_for_result()
self.flare.taskComplete()
return 'complete_complete'
else:
self.count = 0
sidemove = math.copysign(deltaX * deltaXMult, deltaX)
self.flare.sendMovement(forward=0.0, sidemove=sidemove)
rospy.sleep(rospy.Duration(0.5))
return 'completing'
<|reserved_special_token_0|>
def handle_srv(req):
global isStart
global isAbort
global locomotionGoal
global flare
rospy.loginfo('Flare service handled')
if req.start_request:
rospy.loginfo('Flare is Start')
isStart = True
isAbort = False
if req.abort_reqest:
rospy.loginfo('Flare abort received')
isAbort = True
isStart = False
flare.unregister()
return mission_to_visionResponse(isStart, isAbort)
def flareCallback(conig, level):
for param in flare.yellow_params:
flare.yellow_params[param] = config['yellow_' + param]
isTestMode = config['testing']
return config
def normHeading(heading):
if heading > 360:
return heading - 360
elif heading < 0:
return heading + 360
else:
return heading
<|reserved_special_token_0|>
<|reserved_special_token_1|>
#!/usr/bin/env python
'''
State Machine for the Flare task
'''
import roslib
import rospy
import actionlib
from rospy.timer import sleep
import smach
import smach_ros
from dynamic_reconfigure.server import Server
import math
import os
import sys
import numpy as np
from bbauv_msgs.msg import *
from bbauv_msgs.srv import *
from flare_vision import Flare
#Global variables
isStart = False
isEnd = False
isTestMode = False #If test mode then don't wait for mission call
rosRate = None
flare = None
VisionLoopCount = 0 #Counter for number of times the image is being processed
flareSeen = False
mani_pub = None
movement_client = None
locomotionGoal = None
flare_params = {'flare_area':0, 'centering_x':0, 'centering_y':0}
#Starts off in disengage class
class Disengage(smach.State):
def __init__(self, flare_task):
smach.State.__init__(self, outcomes=['start_complete', 'complete_outcome', 'aborted'])
self.flare = flare_task
def execute(self, userdata):
# self.flare.unregister()
if self.flare.isKilled:
rospy.signal_shutdown("Bye")
return 'aborted'
while self.flare.isAborted:
rospy.sleep(rospy.Duration(0.2))
if self.flare.testing:
self.flare.register()
rospy.loginfo("Starting Flare")
return 'start_complete'
#Searches for the flare
class Search(smach.State):
timeout = 10000 #5s timeout before aborting task
def __init__(self, flare_task):
smach.State.__init__(self, outcomes=['search_complete', 'aborted', 'mission_abort'])
self.flare = flare_task
if self.flare.testing:
self.flare.unregisterHeading()
#rospy.loginfo(self.flare.curHeading)
def execute(self, userdata):
#Check for abort signal
if self.flare.isAborted:
rospy.signal_shutdown("Bye!")
return 'aborted'
#Check if flare found or timeout already
timecount = 0
while not self.flare.rectData['detected']:
if timecount > self.timeout or rospy.is_shutdown() or self.flare.isKilled:
self.flare.abortMission()
self.flare.failedTask();
return 'aborted'
self.flare.sendMovement(forward=1.0)
rospy.sleep(rospy.Duration(0.5))
timecount += 1
return 'search_complete'
#Bash towards the flare!
class Manuoevre(smach.State):
def __init__(self, flare_task):
smach.State.__init__(self, outcomes=['manuoevring', 'manuoevre_complete',
'aborted', 'mission_abort'])
self.flare = flare_task
self.deltaThresh = 0.15
self.prevAngle = []
self.count = 0
self.flareSeen = True
def execute(self,userdata):
#Check for aborted signal
if self.flare.isAborted:
rospy.signal_shutdown("Bye!")
return 'aborted'
# #Cannot detect already
# if not self.flare.rectData['detected']:
# self.count += 1
# if self.count > 4:
# self.flare.taskComplete()
# return 'manuoevre_complete'
# if not self.flare.rectData['detected'] and self.flareSeen:
# self.flare.sendMovement(forward=2.0)
# rospy.sleep(rospy.Duration(3))
# self.flare.taskComplete()
# return 'manuoevre_complete'
#Get to the flare
screenWidth = self.flare.screen['width']
screenCenterX = screenWidth / 2
deltaX = (self.flare.rectData['centroids'][0] - screenCenterX) / screenWidth
#rospy.loginfo("Delta X {}".format(deltaX))
rospy.loginfo("Area {}".format(self.flare.rectData['area']))
#Forward if center
rospy.loginfo("Delta X: {}".format(deltaX))
if abs(deltaX) < 0.15:
self.flare.sendMovement(forward=self.flare.forwardOffset)
rospy.sleep(rospy.Duration(0.5))
else:
#Sidemove if too far off center
sidemove = math.copysign(deltaX*self.flare.deltaXMultiplier, deltaX) #Random number
# sidemove = math.copysign(0.5, deltaX)
self.flare.sendMovement(forward=0.10, sidemove=sidemove)
rospy.sleep(rospy.Duration(0.5))
#Shoot straight and aim
if self.flare.rectData['area'] > self.flare.headOnArea:
return 'manuoevre_complete'
return 'manuoevring'
#return 'manuoevre_complete'
class Completing(smach.State):
def __init__(self, flare_task):
smach.State.__init__(self, outcomes=['complete_complete', 'completing',
'aborted', 'mission_abort'])
self.flare = flare_task
self.count = 0
def execute(self,userdata):
#Check for aborted signal
if self.flare.isAborted:
self.flare.isKilled = True
rospy.signal_shutdown("Bye!")
return 'aborted'
screenWidth = self.flare.screen['width']
screenCenterX = screenWidth / 2
deltaX = (self.flare.rectData['centroids'][0] - screenCenterX) / screenWidth
deltaXMult =2.0
rospy.loginfo("Delta X:{}".format(deltaX))
if abs(deltaX) < 0.03:
self.count += 1
rospy.loginfo("Count: {}".format(self.count))
return 'completing'
if self.count >= 2000:
self.flare.sendMovement(forward=4.0)
rospy.loginfo("Hitting the flare")
self.flare.locomotionClient.wait_for_result()
self.flare.sendMovement(forward=-2.0) #Retract
self.flare.locomotionClient.wait_for_result()
self.flare.taskComplete()
return 'complete_complete'
else:
self.count = 0
sidemove = math.copysign(deltaX*deltaXMult, deltaX) #Random number
self.flare.sendMovement(forward=0.00, sidemove=sidemove)
rospy.sleep(rospy.Duration(0.5))
return 'completing'
#self.flare.taskComplete()
#return 'complete_complete'
'''
Main python thread
'''
def handle_srv(req):
global isStart
global isAbort
global locomotionGoal
global flare
rospy.loginfo("Flare service handled")
if req.start_request:
rospy.loginfo("Flare is Start")
isStart = True
isAbort = False
#locomotionGoal = req.start_ctrl
if req.abort_reqest:
rospy.loginfo("Flare abort received")
isAbort = True
isStart = False
flare.unregister()
#To fill accordingly
return mission_to_visionResponse(isStart, isAbort)
#Param config callback
def flareCallback(conig, level):
for param in flare.yellow_params:
flare.yellow_params[param] = config['yellow_' + param]
isTestMode = config["testing"]
return config
#Utility function for normalising heading
def normHeading(heading):
if heading > 360:
return heading - 360
elif heading < 0:
return heading + 360
else:
return heading
if __name__ == '__main__':
rospy.init_node("Flare", anonymous=False)
rosRate = rospy.Rate(20)
flare_task = Flare()
rospy.loginfo("Flare loaded!")
#Create state machine container
sm = smach.StateMachine(outcomes=['complete_flare', 'aborted'])
#Disengage, Search, Manuoevre
with sm:
smach.StateMachine.add("DISENGAGE", Disengage(flare_task),
transitions={'start_complete': "SEARCH",
'complete_outcome': 'complete_flare',
'aborted': 'aborted'})
smach.StateMachine.add("SEARCH", Search(flare_task),
transitions={'search_complete': "MANUOEVRE", 'aborted': 'aborted',
'mission_abort': "DISENGAGE"})
smach.StateMachine.add("MANUOEVRE", Manuoevre(flare_task),
transitions = {'manuoevring': "MANUOEVRE",
'manuoevre_complete': "COMPLETING",
'aborted': 'aborted',
'mission_abort': "DISENGAGE"})
smach.StateMachine.add("COMPLETING", Completing(flare_task),
transitions = {'complete_complete': "DISENGAGE",
'completing': "COMPLETING",
'aborted': 'aborted',
'mission_abort': "DISENGAGE"})
sis = smach_ros.IntrospectionServer('flare_task', sm, '/SM_ROOT')
sis.start()
outcomes = sm.execute()
#wait for ctrl-c
rospy.spin()
sis.stop()
|
flexible
|
{
"blob_id": "0bb2a6ebbf75fae3466c34a435a531fabdc07f62",
"index": 2984,
"step-1": "<mask token>\n\n\nclass Disengage(smach.State):\n\n def __init__(self, flare_task):\n smach.State.__init__(self, outcomes=['start_complete',\n 'complete_outcome', 'aborted'])\n self.flare = flare_task\n <mask token>\n\n\nclass Search(smach.State):\n timeout = 10000\n\n def __init__(self, flare_task):\n smach.State.__init__(self, outcomes=['search_complete', 'aborted',\n 'mission_abort'])\n self.flare = flare_task\n if self.flare.testing:\n self.flare.unregisterHeading()\n\n def execute(self, userdata):\n if self.flare.isAborted:\n rospy.signal_shutdown('Bye!')\n return 'aborted'\n timecount = 0\n while not self.flare.rectData['detected']:\n if timecount > self.timeout or rospy.is_shutdown(\n ) or self.flare.isKilled:\n self.flare.abortMission()\n self.flare.failedTask()\n return 'aborted'\n self.flare.sendMovement(forward=1.0)\n rospy.sleep(rospy.Duration(0.5))\n timecount += 1\n return 'search_complete'\n\n\nclass Manuoevre(smach.State):\n\n def __init__(self, flare_task):\n smach.State.__init__(self, outcomes=['manuoevring',\n 'manuoevre_complete', 'aborted', 'mission_abort'])\n self.flare = flare_task\n self.deltaThresh = 0.15\n self.prevAngle = []\n self.count = 0\n self.flareSeen = True\n\n def execute(self, userdata):\n if self.flare.isAborted:\n rospy.signal_shutdown('Bye!')\n return 'aborted'\n screenWidth = self.flare.screen['width']\n screenCenterX = screenWidth / 2\n deltaX = (self.flare.rectData['centroids'][0] - screenCenterX\n ) / screenWidth\n rospy.loginfo('Area {}'.format(self.flare.rectData['area']))\n rospy.loginfo('Delta X: {}'.format(deltaX))\n if abs(deltaX) < 0.15:\n self.flare.sendMovement(forward=self.flare.forwardOffset)\n rospy.sleep(rospy.Duration(0.5))\n else:\n sidemove = math.copysign(deltaX * self.flare.deltaXMultiplier,\n deltaX)\n self.flare.sendMovement(forward=0.1, sidemove=sidemove)\n rospy.sleep(rospy.Duration(0.5))\n if self.flare.rectData['area'] > self.flare.headOnArea:\n return 'manuoevre_complete'\n return 'manuoevring'\n\n\nclass Completing(smach.State):\n\n def __init__(self, flare_task):\n smach.State.__init__(self, outcomes=['complete_complete',\n 'completing', 'aborted', 'mission_abort'])\n self.flare = flare_task\n self.count = 0\n\n def execute(self, userdata):\n if self.flare.isAborted:\n self.flare.isKilled = True\n rospy.signal_shutdown('Bye!')\n return 'aborted'\n screenWidth = self.flare.screen['width']\n screenCenterX = screenWidth / 2\n deltaX = (self.flare.rectData['centroids'][0] - screenCenterX\n ) / screenWidth\n deltaXMult = 2.0\n rospy.loginfo('Delta X:{}'.format(deltaX))\n if abs(deltaX) < 0.03:\n self.count += 1\n rospy.loginfo('Count: {}'.format(self.count))\n return 'completing'\n if self.count >= 2000:\n self.flare.sendMovement(forward=4.0)\n rospy.loginfo('Hitting the flare')\n self.flare.locomotionClient.wait_for_result()\n self.flare.sendMovement(forward=-2.0)\n self.flare.locomotionClient.wait_for_result()\n self.flare.taskComplete()\n return 'complete_complete'\n else:\n self.count = 0\n sidemove = math.copysign(deltaX * deltaXMult, deltaX)\n self.flare.sendMovement(forward=0.0, sidemove=sidemove)\n rospy.sleep(rospy.Duration(0.5))\n return 'completing'\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass Disengage(smach.State):\n\n def __init__(self, flare_task):\n smach.State.__init__(self, outcomes=['start_complete',\n 'complete_outcome', 'aborted'])\n self.flare = flare_task\n\n def execute(self, userdata):\n if self.flare.isKilled:\n rospy.signal_shutdown('Bye')\n return 'aborted'\n while self.flare.isAborted:\n rospy.sleep(rospy.Duration(0.2))\n if self.flare.testing:\n self.flare.register()\n rospy.loginfo('Starting Flare')\n return 'start_complete'\n\n\nclass Search(smach.State):\n timeout = 10000\n\n def __init__(self, flare_task):\n smach.State.__init__(self, outcomes=['search_complete', 'aborted',\n 'mission_abort'])\n self.flare = flare_task\n if self.flare.testing:\n self.flare.unregisterHeading()\n\n def execute(self, userdata):\n if self.flare.isAborted:\n rospy.signal_shutdown('Bye!')\n return 'aborted'\n timecount = 0\n while not self.flare.rectData['detected']:\n if timecount > self.timeout or rospy.is_shutdown(\n ) or self.flare.isKilled:\n self.flare.abortMission()\n self.flare.failedTask()\n return 'aborted'\n self.flare.sendMovement(forward=1.0)\n rospy.sleep(rospy.Duration(0.5))\n timecount += 1\n return 'search_complete'\n\n\nclass Manuoevre(smach.State):\n\n def __init__(self, flare_task):\n smach.State.__init__(self, outcomes=['manuoevring',\n 'manuoevre_complete', 'aborted', 'mission_abort'])\n self.flare = flare_task\n self.deltaThresh = 0.15\n self.prevAngle = []\n self.count = 0\n self.flareSeen = True\n\n def execute(self, userdata):\n if self.flare.isAborted:\n rospy.signal_shutdown('Bye!')\n return 'aborted'\n screenWidth = self.flare.screen['width']\n screenCenterX = screenWidth / 2\n deltaX = (self.flare.rectData['centroids'][0] - screenCenterX\n ) / screenWidth\n rospy.loginfo('Area {}'.format(self.flare.rectData['area']))\n rospy.loginfo('Delta X: {}'.format(deltaX))\n if abs(deltaX) < 0.15:\n self.flare.sendMovement(forward=self.flare.forwardOffset)\n rospy.sleep(rospy.Duration(0.5))\n else:\n sidemove = math.copysign(deltaX * self.flare.deltaXMultiplier,\n deltaX)\n self.flare.sendMovement(forward=0.1, sidemove=sidemove)\n rospy.sleep(rospy.Duration(0.5))\n if self.flare.rectData['area'] > self.flare.headOnArea:\n return 'manuoevre_complete'\n return 'manuoevring'\n\n\nclass Completing(smach.State):\n\n def __init__(self, flare_task):\n smach.State.__init__(self, outcomes=['complete_complete',\n 'completing', 'aborted', 'mission_abort'])\n self.flare = flare_task\n self.count = 0\n\n def execute(self, userdata):\n if self.flare.isAborted:\n self.flare.isKilled = True\n rospy.signal_shutdown('Bye!')\n return 'aborted'\n screenWidth = self.flare.screen['width']\n screenCenterX = screenWidth / 2\n deltaX = (self.flare.rectData['centroids'][0] - screenCenterX\n ) / screenWidth\n deltaXMult = 2.0\n rospy.loginfo('Delta X:{}'.format(deltaX))\n if abs(deltaX) < 0.03:\n self.count += 1\n rospy.loginfo('Count: {}'.format(self.count))\n return 'completing'\n if self.count >= 2000:\n self.flare.sendMovement(forward=4.0)\n rospy.loginfo('Hitting the flare')\n self.flare.locomotionClient.wait_for_result()\n self.flare.sendMovement(forward=-2.0)\n self.flare.locomotionClient.wait_for_result()\n self.flare.taskComplete()\n return 'complete_complete'\n else:\n self.count = 0\n sidemove = math.copysign(deltaX * deltaXMult, deltaX)\n self.flare.sendMovement(forward=0.0, sidemove=sidemove)\n rospy.sleep(rospy.Duration(0.5))\n return 'completing'\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass Disengage(smach.State):\n\n def __init__(self, flare_task):\n smach.State.__init__(self, outcomes=['start_complete',\n 'complete_outcome', 'aborted'])\n self.flare = flare_task\n\n def execute(self, userdata):\n if self.flare.isKilled:\n rospy.signal_shutdown('Bye')\n return 'aborted'\n while self.flare.isAborted:\n rospy.sleep(rospy.Duration(0.2))\n if self.flare.testing:\n self.flare.register()\n rospy.loginfo('Starting Flare')\n return 'start_complete'\n\n\nclass Search(smach.State):\n timeout = 10000\n\n def __init__(self, flare_task):\n smach.State.__init__(self, outcomes=['search_complete', 'aborted',\n 'mission_abort'])\n self.flare = flare_task\n if self.flare.testing:\n self.flare.unregisterHeading()\n\n def execute(self, userdata):\n if self.flare.isAborted:\n rospy.signal_shutdown('Bye!')\n return 'aborted'\n timecount = 0\n while not self.flare.rectData['detected']:\n if timecount > self.timeout or rospy.is_shutdown(\n ) or self.flare.isKilled:\n self.flare.abortMission()\n self.flare.failedTask()\n return 'aborted'\n self.flare.sendMovement(forward=1.0)\n rospy.sleep(rospy.Duration(0.5))\n timecount += 1\n return 'search_complete'\n\n\nclass Manuoevre(smach.State):\n\n def __init__(self, flare_task):\n smach.State.__init__(self, outcomes=['manuoevring',\n 'manuoevre_complete', 'aborted', 'mission_abort'])\n self.flare = flare_task\n self.deltaThresh = 0.15\n self.prevAngle = []\n self.count = 0\n self.flareSeen = True\n\n def execute(self, userdata):\n if self.flare.isAborted:\n rospy.signal_shutdown('Bye!')\n return 'aborted'\n screenWidth = self.flare.screen['width']\n screenCenterX = screenWidth / 2\n deltaX = (self.flare.rectData['centroids'][0] - screenCenterX\n ) / screenWidth\n rospy.loginfo('Area {}'.format(self.flare.rectData['area']))\n rospy.loginfo('Delta X: {}'.format(deltaX))\n if abs(deltaX) < 0.15:\n self.flare.sendMovement(forward=self.flare.forwardOffset)\n rospy.sleep(rospy.Duration(0.5))\n else:\n sidemove = math.copysign(deltaX * self.flare.deltaXMultiplier,\n deltaX)\n self.flare.sendMovement(forward=0.1, sidemove=sidemove)\n rospy.sleep(rospy.Duration(0.5))\n if self.flare.rectData['area'] > self.flare.headOnArea:\n return 'manuoevre_complete'\n return 'manuoevring'\n\n\nclass Completing(smach.State):\n\n def __init__(self, flare_task):\n smach.State.__init__(self, outcomes=['complete_complete',\n 'completing', 'aborted', 'mission_abort'])\n self.flare = flare_task\n self.count = 0\n\n def execute(self, userdata):\n if self.flare.isAborted:\n self.flare.isKilled = True\n rospy.signal_shutdown('Bye!')\n return 'aborted'\n screenWidth = self.flare.screen['width']\n screenCenterX = screenWidth / 2\n deltaX = (self.flare.rectData['centroids'][0] - screenCenterX\n ) / screenWidth\n deltaXMult = 2.0\n rospy.loginfo('Delta X:{}'.format(deltaX))\n if abs(deltaX) < 0.03:\n self.count += 1\n rospy.loginfo('Count: {}'.format(self.count))\n return 'completing'\n if self.count >= 2000:\n self.flare.sendMovement(forward=4.0)\n rospy.loginfo('Hitting the flare')\n self.flare.locomotionClient.wait_for_result()\n self.flare.sendMovement(forward=-2.0)\n self.flare.locomotionClient.wait_for_result()\n self.flare.taskComplete()\n return 'complete_complete'\n else:\n self.count = 0\n sidemove = math.copysign(deltaX * deltaXMult, deltaX)\n self.flare.sendMovement(forward=0.0, sidemove=sidemove)\n rospy.sleep(rospy.Duration(0.5))\n return 'completing'\n\n\n<mask token>\n\n\ndef flareCallback(conig, level):\n for param in flare.yellow_params:\n flare.yellow_params[param] = config['yellow_' + param]\n isTestMode = config['testing']\n return config\n\n\ndef normHeading(heading):\n if heading > 360:\n return heading - 360\n elif heading < 0:\n return heading + 360\n else:\n return heading\n\n\n<mask token>\n",
"step-4": "<mask token>\n\n\nclass Disengage(smach.State):\n\n def __init__(self, flare_task):\n smach.State.__init__(self, outcomes=['start_complete',\n 'complete_outcome', 'aborted'])\n self.flare = flare_task\n\n def execute(self, userdata):\n if self.flare.isKilled:\n rospy.signal_shutdown('Bye')\n return 'aborted'\n while self.flare.isAborted:\n rospy.sleep(rospy.Duration(0.2))\n if self.flare.testing:\n self.flare.register()\n rospy.loginfo('Starting Flare')\n return 'start_complete'\n\n\nclass Search(smach.State):\n timeout = 10000\n\n def __init__(self, flare_task):\n smach.State.__init__(self, outcomes=['search_complete', 'aborted',\n 'mission_abort'])\n self.flare = flare_task\n if self.flare.testing:\n self.flare.unregisterHeading()\n\n def execute(self, userdata):\n if self.flare.isAborted:\n rospy.signal_shutdown('Bye!')\n return 'aborted'\n timecount = 0\n while not self.flare.rectData['detected']:\n if timecount > self.timeout or rospy.is_shutdown(\n ) or self.flare.isKilled:\n self.flare.abortMission()\n self.flare.failedTask()\n return 'aborted'\n self.flare.sendMovement(forward=1.0)\n rospy.sleep(rospy.Duration(0.5))\n timecount += 1\n return 'search_complete'\n\n\nclass Manuoevre(smach.State):\n\n def __init__(self, flare_task):\n smach.State.__init__(self, outcomes=['manuoevring',\n 'manuoevre_complete', 'aborted', 'mission_abort'])\n self.flare = flare_task\n self.deltaThresh = 0.15\n self.prevAngle = []\n self.count = 0\n self.flareSeen = True\n\n def execute(self, userdata):\n if self.flare.isAborted:\n rospy.signal_shutdown('Bye!')\n return 'aborted'\n screenWidth = self.flare.screen['width']\n screenCenterX = screenWidth / 2\n deltaX = (self.flare.rectData['centroids'][0] - screenCenterX\n ) / screenWidth\n rospy.loginfo('Area {}'.format(self.flare.rectData['area']))\n rospy.loginfo('Delta X: {}'.format(deltaX))\n if abs(deltaX) < 0.15:\n self.flare.sendMovement(forward=self.flare.forwardOffset)\n rospy.sleep(rospy.Duration(0.5))\n else:\n sidemove = math.copysign(deltaX * self.flare.deltaXMultiplier,\n deltaX)\n self.flare.sendMovement(forward=0.1, sidemove=sidemove)\n rospy.sleep(rospy.Duration(0.5))\n if self.flare.rectData['area'] > self.flare.headOnArea:\n return 'manuoevre_complete'\n return 'manuoevring'\n\n\nclass Completing(smach.State):\n\n def __init__(self, flare_task):\n smach.State.__init__(self, outcomes=['complete_complete',\n 'completing', 'aborted', 'mission_abort'])\n self.flare = flare_task\n self.count = 0\n\n def execute(self, userdata):\n if self.flare.isAborted:\n self.flare.isKilled = True\n rospy.signal_shutdown('Bye!')\n return 'aborted'\n screenWidth = self.flare.screen['width']\n screenCenterX = screenWidth / 2\n deltaX = (self.flare.rectData['centroids'][0] - screenCenterX\n ) / screenWidth\n deltaXMult = 2.0\n rospy.loginfo('Delta X:{}'.format(deltaX))\n if abs(deltaX) < 0.03:\n self.count += 1\n rospy.loginfo('Count: {}'.format(self.count))\n return 'completing'\n if self.count >= 2000:\n self.flare.sendMovement(forward=4.0)\n rospy.loginfo('Hitting the flare')\n self.flare.locomotionClient.wait_for_result()\n self.flare.sendMovement(forward=-2.0)\n self.flare.locomotionClient.wait_for_result()\n self.flare.taskComplete()\n return 'complete_complete'\n else:\n self.count = 0\n sidemove = math.copysign(deltaX * deltaXMult, deltaX)\n self.flare.sendMovement(forward=0.0, sidemove=sidemove)\n rospy.sleep(rospy.Duration(0.5))\n return 'completing'\n\n\n<mask token>\n\n\ndef handle_srv(req):\n global isStart\n global isAbort\n global locomotionGoal\n global flare\n rospy.loginfo('Flare service handled')\n if req.start_request:\n rospy.loginfo('Flare is Start')\n isStart = True\n isAbort = False\n if req.abort_reqest:\n rospy.loginfo('Flare abort received')\n isAbort = True\n isStart = False\n flare.unregister()\n return mission_to_visionResponse(isStart, isAbort)\n\n\ndef flareCallback(conig, level):\n for param in flare.yellow_params:\n flare.yellow_params[param] = config['yellow_' + param]\n isTestMode = config['testing']\n return config\n\n\ndef normHeading(heading):\n if heading > 360:\n return heading - 360\n elif heading < 0:\n return heading + 360\n else:\n return heading\n\n\n<mask token>\n",
"step-5": "#!/usr/bin/env python\n'''\nState Machine for the Flare task\n'''\n\nimport roslib\nimport rospy\nimport actionlib\nfrom rospy.timer import sleep\n\nimport smach\nimport smach_ros\n\nfrom dynamic_reconfigure.server import Server\n\nimport math\nimport os\nimport sys\n\n\nimport numpy as np\n\nfrom bbauv_msgs.msg import *\nfrom bbauv_msgs.srv import *\nfrom flare_vision import Flare\n\n#Global variables \nisStart = False\nisEnd = False\nisTestMode = False #If test mode then don't wait for mission call \nrosRate = None \nflare = None\nVisionLoopCount = 0 #Counter for number of times the image is being processed\nflareSeen = False\n\nmani_pub = None\nmovement_client = None\nlocomotionGoal = None\n\nflare_params = {'flare_area':0, 'centering_x':0, 'centering_y':0}\n\n\n#Starts off in disengage class\nclass Disengage(smach.State):\n \n def __init__(self, flare_task):\n smach.State.__init__(self, outcomes=['start_complete', 'complete_outcome', 'aborted'])\n self.flare = flare_task\n \n def execute(self, userdata):\n# self.flare.unregister()\n\n if self.flare.isKilled:\n rospy.signal_shutdown(\"Bye\")\n return 'aborted'\n\n while self.flare.isAborted:\n rospy.sleep(rospy.Duration(0.2))\n \n if self.flare.testing:\n self.flare.register()\n rospy.loginfo(\"Starting Flare\")\n \n return 'start_complete'\n \n#Searches for the flare\nclass Search(smach.State):\n timeout = 10000 #5s timeout before aborting task\n def __init__(self, flare_task):\n smach.State.__init__(self, outcomes=['search_complete', 'aborted', 'mission_abort'])\n self.flare = flare_task\n \n if self.flare.testing:\n self.flare.unregisterHeading()\n #rospy.loginfo(self.flare.curHeading)\n \n def execute(self, userdata):\n #Check for abort signal\n if self.flare.isAborted:\n rospy.signal_shutdown(\"Bye!\")\n return 'aborted'\n \n #Check if flare found or timeout already\n timecount = 0\n while not self.flare.rectData['detected']:\n if timecount > self.timeout or rospy.is_shutdown() or self.flare.isKilled:\n self.flare.abortMission()\n self.flare.failedTask();\n return 'aborted'\n self.flare.sendMovement(forward=1.0)\n rospy.sleep(rospy.Duration(0.5))\n timecount += 1\n \n return 'search_complete'\n\n#Bash towards the flare!\nclass Manuoevre(smach.State):\n def __init__(self, flare_task):\n smach.State.__init__(self, outcomes=['manuoevring', 'manuoevre_complete',\n 'aborted', 'mission_abort'])\n self.flare = flare_task\n self.deltaThresh = 0.15\n self.prevAngle = []\n self.count = 0\n self.flareSeen = True\n \n def execute(self,userdata):\n #Check for aborted signal\n if self.flare.isAborted:\n rospy.signal_shutdown(\"Bye!\")\n return 'aborted'\n \n# #Cannot detect already\n# if not self.flare.rectData['detected']:\n# self.count += 1\n# if self.count > 4:\n# self.flare.taskComplete()\n# return 'manuoevre_complete'\n \n# if not self.flare.rectData['detected'] and self.flareSeen:\n# self.flare.sendMovement(forward=2.0)\n# rospy.sleep(rospy.Duration(3))\n# self.flare.taskComplete()\n# return 'manuoevre_complete'\n \n #Get to the flare\n screenWidth = self.flare.screen['width']\n screenCenterX = screenWidth / 2\n deltaX = (self.flare.rectData['centroids'][0] - screenCenterX) / screenWidth\n #rospy.loginfo(\"Delta X {}\".format(deltaX))\n rospy.loginfo(\"Area {}\".format(self.flare.rectData['area']))\n \n #Forward if center\n rospy.loginfo(\"Delta X: {}\".format(deltaX))\n if abs(deltaX) < 0.15:\n self.flare.sendMovement(forward=self.flare.forwardOffset)\n rospy.sleep(rospy.Duration(0.5))\n else:\n #Sidemove if too far off center\n sidemove = math.copysign(deltaX*self.flare.deltaXMultiplier, deltaX) #Random number\n# sidemove = math.copysign(0.5, deltaX)\n self.flare.sendMovement(forward=0.10, sidemove=sidemove)\n rospy.sleep(rospy.Duration(0.5))\n \n #Shoot straight and aim\n if self.flare.rectData['area'] > self.flare.headOnArea:\n return 'manuoevre_complete'\n \n return 'manuoevring'\n\n #return 'manuoevre_complete'\n \nclass Completing(smach.State):\n def __init__(self, flare_task):\n smach.State.__init__(self, outcomes=['complete_complete', 'completing',\n 'aborted', 'mission_abort'])\n self.flare = flare_task\n self.count = 0\n \n def execute(self,userdata):\n #Check for aborted signal\n if self.flare.isAborted:\n self.flare.isKilled = True\n rospy.signal_shutdown(\"Bye!\")\n return 'aborted'\n \n screenWidth = self.flare.screen['width']\n screenCenterX = screenWidth / 2\n deltaX = (self.flare.rectData['centroids'][0] - screenCenterX) / screenWidth\n \n deltaXMult =2.0\n rospy.loginfo(\"Delta X:{}\".format(deltaX))\n \n if abs(deltaX) < 0.03:\n self.count += 1\n rospy.loginfo(\"Count: {}\".format(self.count))\n return 'completing'\n \n if self.count >= 2000:\n self.flare.sendMovement(forward=4.0)\n rospy.loginfo(\"Hitting the flare\")\n self.flare.locomotionClient.wait_for_result()\n self.flare.sendMovement(forward=-2.0) #Retract\n self.flare.locomotionClient.wait_for_result()\n self.flare.taskComplete()\n return 'complete_complete'\n \n else:\n self.count = 0\n sidemove = math.copysign(deltaX*deltaXMult, deltaX) #Random number\n self.flare.sendMovement(forward=0.00, sidemove=sidemove)\n rospy.sleep(rospy.Duration(0.5))\n return 'completing'\n\n #self.flare.taskComplete()\n #return 'complete_complete'\n\n'''\nMain python thread\n'''\n \ndef handle_srv(req):\n global isStart\n global isAbort\n global locomotionGoal\n global flare\n \n rospy.loginfo(\"Flare service handled\")\n \n if req.start_request:\n rospy.loginfo(\"Flare is Start\")\n isStart = True\n isAbort = False \n #locomotionGoal = req.start_ctrl\n if req.abort_reqest:\n rospy.loginfo(\"Flare abort received\")\n isAbort = True\n isStart = False\n flare.unregister()\n \n #To fill accordingly\n return mission_to_visionResponse(isStart, isAbort)\n \n#Param config callback\ndef flareCallback(conig, level):\n for param in flare.yellow_params:\n flare.yellow_params[param] = config['yellow_' + param]\n isTestMode = config[\"testing\"]\n return config\n\n#Utility function for normalising heading \ndef normHeading(heading):\n if heading > 360:\n return heading - 360\n elif heading < 0:\n return heading + 360\n else:\n return heading \n\nif __name__ == '__main__':\n rospy.init_node(\"Flare\", anonymous=False)\n rosRate = rospy.Rate(20)\n flare_task = Flare()\n rospy.loginfo(\"Flare loaded!\")\n \n #Create state machine container \n sm = smach.StateMachine(outcomes=['complete_flare', 'aborted'])\n \n #Disengage, Search, Manuoevre\n with sm:\n smach.StateMachine.add(\"DISENGAGE\", Disengage(flare_task),\n transitions={'start_complete': \"SEARCH\", \n 'complete_outcome': 'complete_flare', \n 'aborted': 'aborted'})\n \n smach.StateMachine.add(\"SEARCH\", Search(flare_task),\n transitions={'search_complete': \"MANUOEVRE\", 'aborted': 'aborted', \n 'mission_abort': \"DISENGAGE\"})\n \n smach.StateMachine.add(\"MANUOEVRE\", Manuoevre(flare_task),\n transitions = {'manuoevring': \"MANUOEVRE\",\n 'manuoevre_complete': \"COMPLETING\",\n 'aborted': 'aborted',\n 'mission_abort': \"DISENGAGE\"})\n \n smach.StateMachine.add(\"COMPLETING\", Completing(flare_task),\n transitions = {'complete_complete': \"DISENGAGE\",\n 'completing': \"COMPLETING\",\n 'aborted': 'aborted',\n 'mission_abort': \"DISENGAGE\"})\n \n sis = smach_ros.IntrospectionServer('flare_task', sm, '/SM_ROOT')\n sis.start()\n outcomes = sm.execute()\n \n #wait for ctrl-c\n rospy.spin()\n sis.stop()\n \n",
"step-ids": [
12,
13,
15,
16,
20
]
}
|
[
12,
13,
15,
16,
20
] |
from common import *
import serial
CMD_BAUD = chr(129)
BAUD_RATES = [300, 600, 1200, 2400, 4800, 9600, 14400, 19200, 28800, 38400, 57600, 115200]
class Communication(Module):
def __init__(self, parent, port_name, baud_rate):
self.parent = parent
if not isinstance(port_name, str):
raise Exception("Port name must be a string.")
if not isinstance(baud_rate, int):
raise Exception("Baud rate must be an integer.")
if baud_rate not in BAUD_RATES:
raise Exception("%d is not a valid baud rate; check the SCI Specification for acceptable values." % baud_rate)
self.port = serial.Serial(port_name, baud_rate)
def send(self, data):
if not isinstance(data, str):
raise Exception("Data must be a string.")
self.port.write(data)
def receive(self, length):
if not isinstance(length, int):
raise Exception("Receive length must be an integer.")
return self.port.read(length)
_port = None
@property
def port(self):
return self._port
@port.setter
def port(self, value):
self._port = value
|
normal
|
{
"blob_id": "eab5bf4776582349615ad56ee1ed93bc8f868565",
"index": 768,
"step-1": "<mask token>\n\n\nclass Communication(Module):\n <mask token>\n <mask token>\n\n def receive(self, length):\n if not isinstance(length, int):\n raise Exception('Receive length must be an integer.')\n return self.port.read(length)\n <mask token>\n\n @property\n def port(self):\n return self._port\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass Communication(Module):\n\n def __init__(self, parent, port_name, baud_rate):\n self.parent = parent\n if not isinstance(port_name, str):\n raise Exception('Port name must be a string.')\n if not isinstance(baud_rate, int):\n raise Exception('Baud rate must be an integer.')\n if baud_rate not in BAUD_RATES:\n raise Exception(\n '%d is not a valid baud rate; check the SCI Specification for acceptable values.'\n % baud_rate)\n self.port = serial.Serial(port_name, baud_rate)\n\n def send(self, data):\n if not isinstance(data, str):\n raise Exception('Data must be a string.')\n self.port.write(data)\n\n def receive(self, length):\n if not isinstance(length, int):\n raise Exception('Receive length must be an integer.')\n return self.port.read(length)\n <mask token>\n\n @property\n def port(self):\n return self._port\n\n @port.setter\n def port(self, value):\n self._port = value\n",
"step-3": "<mask token>\n\n\nclass Communication(Module):\n\n def __init__(self, parent, port_name, baud_rate):\n self.parent = parent\n if not isinstance(port_name, str):\n raise Exception('Port name must be a string.')\n if not isinstance(baud_rate, int):\n raise Exception('Baud rate must be an integer.')\n if baud_rate not in BAUD_RATES:\n raise Exception(\n '%d is not a valid baud rate; check the SCI Specification for acceptable values.'\n % baud_rate)\n self.port = serial.Serial(port_name, baud_rate)\n\n def send(self, data):\n if not isinstance(data, str):\n raise Exception('Data must be a string.')\n self.port.write(data)\n\n def receive(self, length):\n if not isinstance(length, int):\n raise Exception('Receive length must be an integer.')\n return self.port.read(length)\n _port = None\n\n @property\n def port(self):\n return self._port\n\n @port.setter\n def port(self, value):\n self._port = value\n",
"step-4": "from common import *\nimport serial\nCMD_BAUD = chr(129)\nBAUD_RATES = [300, 600, 1200, 2400, 4800, 9600, 14400, 19200, 28800, 38400,\n 57600, 115200]\n\n\nclass Communication(Module):\n\n def __init__(self, parent, port_name, baud_rate):\n self.parent = parent\n if not isinstance(port_name, str):\n raise Exception('Port name must be a string.')\n if not isinstance(baud_rate, int):\n raise Exception('Baud rate must be an integer.')\n if baud_rate not in BAUD_RATES:\n raise Exception(\n '%d is not a valid baud rate; check the SCI Specification for acceptable values.'\n % baud_rate)\n self.port = serial.Serial(port_name, baud_rate)\n\n def send(self, data):\n if not isinstance(data, str):\n raise Exception('Data must be a string.')\n self.port.write(data)\n\n def receive(self, length):\n if not isinstance(length, int):\n raise Exception('Receive length must be an integer.')\n return self.port.read(length)\n _port = None\n\n @property\n def port(self):\n return self._port\n\n @port.setter\n def port(self, value):\n self._port = value\n",
"step-5": "from common import *\n\nimport serial\n\nCMD_BAUD = chr(129)\n\nBAUD_RATES = [300, 600, 1200, 2400, 4800, 9600, 14400, 19200, 28800, 38400, 57600, 115200]\n\nclass Communication(Module):\n def __init__(self, parent, port_name, baud_rate):\n self.parent = parent\n\n if not isinstance(port_name, str):\n raise Exception(\"Port name must be a string.\")\n if not isinstance(baud_rate, int):\n raise Exception(\"Baud rate must be an integer.\")\n if baud_rate not in BAUD_RATES:\n raise Exception(\"%d is not a valid baud rate; check the SCI Specification for acceptable values.\" % baud_rate)\n\n self.port = serial.Serial(port_name, baud_rate)\n\n def send(self, data):\n if not isinstance(data, str):\n raise Exception(\"Data must be a string.\")\n self.port.write(data)\n\n def receive(self, length):\n if not isinstance(length, int):\n raise Exception(\"Receive length must be an integer.\")\n return self.port.read(length)\n\n _port = None\n @property\n def port(self):\n return self._port\n @port.setter\n def port(self, value):\n self._port = value\n",
"step-ids": [
3,
6,
7,
9,
10
]
}
|
[
3,
6,
7,
9,
10
] |
<|reserved_special_token_0|>
def process(trace_dir, out_dir):
trace_files = os.listdir(trace_dir)
trace_files = sorted(trace_files)
if trace_files[0] == 'error.log':
print('Rotating to properly order logs.')
trace_files = collections.deque(trace_files)
trace_files.rotate(-1)
full_trace = b''
all_lines = ''
for file_name in trace_files:
print('Processing: ' + str(file_name))
with open(os.path.join(trace_dir, file_name), 'rb') as f:
for line in f:
try:
all_lines += line.decode('utf-8')
except UnicodeDecodeError:
print('weird text')
full_trace = re.sub('(?<!\\r)\\n', '\r\n\r\n', all_lines)
"""
Is the issue with the input or my processing?
tmp_file = open('full_trace.json', 'wb')
json.dump(full_trace, tmp_file)
tmp_file.close()
INPUT Issue
"""
print('Collecting raw sessions')
raw_sessions = dict()
full_trace_iterator = iter(full_trace.splitlines(full_trace.count('\n')))
for line in full_trace_iterator:
send_recv = re.findall('(SEND|RECV)', line)
ipv4_port = re.findall('[0-9]+(?:\\.[0-9]+){3}:[0-9]+', line)
if ipv4_port:
port = re.findall(':[0-9]+$', ipv4_port[0])
if port:
if port[0] == ':443' or port[0] == ':80':
continue
if send_recv and ipv4_port:
ip_port_key = ipv4_port[0]
this_trace = line
while True:
try:
next_line = next(full_trace_iterator)
this_trace += next_line
end_trace = re.findall('\\[End Trace\\]', next_line)
if end_trace:
break
except Exception as e:
print(e)
break
if ip_port_key not in raw_sessions:
raw_sessions[ip_port_key] = this_trace
print(ip_port_key)
else:
raw_sessions[ip_port_key] += this_trace
print('Constructing session JSONs')
session_JSONs = dict()
for session, raw_traces in raw_sessions.items():
session_JSONs[session] = dict()
session_JSONs[session]['version'] = PROCESSOR_VERSION
session_JSONs[session]['encoding'] = 'url_encoded'
raw_text = ''
timestamp = ''
timestamp_list = list()
for line in raw_traces.splitlines(raw_traces.count('\n')):
trace_line = re.findall('^\\d{8}\\.\\d{2}h\\d{2}m\\d{2}s', line)
timestamp = re.findall('\\[\\d{10}\\.\\d{3}\\]', line)
if timestamp:
timestamp_list.append(timestamp[0][1:-1])
if not trace_line:
raw_text += line
session_JSONs[session]['timestamp'] = timestamp_list[0]
count = -1
delimiter = '\r\n\r\n'
is_request_chunk = True
raw_text_chunks = iter(raw_text.split(delimiter))
session_JSONs[session]['txns'] = list()
for chunk in raw_text_chunks:
request_chunk = re.findall('^\\S+\\s/\\S+\\sHTTP/\\d\\.\\d\\r\\n',
chunk)
response_chunk = re.findall(
'^HTTP/\\d\\.\\d\\s\\d{3}\\s[\\s\\S]+\\r\\n', chunk)
if request_chunk:
count += 1
is_reqeust_chunk = True
chunk += delimiter
if count <= len(session_JSONs[session]['txns']):
session_JSONs[session]['txns'].append(dict())
session_JSONs[session]['txns'][count]['request'] = dict()
session_JSONs[session]['txns'][count]['request']['timestamp'
] = timestamp_list[count - 1]
session_JSONs[session]['txns'][count]['request']['headers'
] = chunk
session_JSONs[session]['txns'][count]['uuid'] = uuid.uuid4(
).hex
elif response_chunk:
is_request_chunk = False
chunk += delimiter
if count <= len(session_JSONs[session]['txns']):
session_JSONs[session]['txns'].append(dict())
session_JSONs[session]['txns'][count]['response'] = dict()
session_JSONs[session]['txns'][count]['response']['timestamp'
] = timestamp_list[count - 1]
session_JSONs[session]['txns'][count]['response']['headers'
] = chunk
else:
try:
if count == -1:
continue
chunk = urllib.parse.quote(chunk)
if is_request_chunk:
if 'body' not in session_JSONs[session]['txns'][count][
'request']:
session_JSONs[session]['txns'][count]['request'][
'body'] = chunk
else:
session_JSONs[session]['txns'][count]['request'][
'body'] += chunk
elif 'body' not in session_JSONs[session]['txns'][count][
'response']:
session_JSONs[session]['txns'][count]['response'][
'body'] = chunk
else:
session_JSONs[session]['txns'][count]['response'][
'body'] += chunk
except KeyError as k:
continue
print(len(session_JSONs[session]['txns']))
session_JSONs[session]['txns'] = list(filter(bool, session_JSONs[
session]['txns']))
if len(session_JSONs[session]['txns']) == 0:
del session_JSONs[session]
unicode_errors = 0
print('Writing sessions to disk')
out_files = dict()
for session, data in session_JSONs.items():
out_files[session] = open(os.path.join(out_dir, 'session_' + str(
session)) + '.json', 'w')
try:
json.dump(data, out_files[session])
out_files[session].close()
except:
unicode_errors += 1
out_files[session].close()
os.remove(os.path.join(out_dir, 'session_' + str(session)) +
'.json')
print(str(unicode_errors) + ' unicode errors')
def main(argv):
if len(argv) != 3:
print('Script to preprocess trace logs for client.')
print("Outputs JSONs to directory 'sessions'")
print('Usage: python ' + str(argv[0]) +
' <in directory> <out directory>')
return
if not os.path.isdir(argv[1]):
print(str(argv[1]) + ' is not a directory. Aborting.')
return
if not os.path.exists(argv[2]):
os.makedirs(argv[2])
else:
print(str(argv[2]) +
' already exists, choose another output directory!')
return
t1 = time.time()
process(argv[1], argv[2])
t2 = time.time()
print('time taken:', t2 - t1)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def process(trace_dir, out_dir):
trace_files = os.listdir(trace_dir)
trace_files = sorted(trace_files)
if trace_files[0] == 'error.log':
print('Rotating to properly order logs.')
trace_files = collections.deque(trace_files)
trace_files.rotate(-1)
full_trace = b''
all_lines = ''
for file_name in trace_files:
print('Processing: ' + str(file_name))
with open(os.path.join(trace_dir, file_name), 'rb') as f:
for line in f:
try:
all_lines += line.decode('utf-8')
except UnicodeDecodeError:
print('weird text')
full_trace = re.sub('(?<!\\r)\\n', '\r\n\r\n', all_lines)
"""
Is the issue with the input or my processing?
tmp_file = open('full_trace.json', 'wb')
json.dump(full_trace, tmp_file)
tmp_file.close()
INPUT Issue
"""
print('Collecting raw sessions')
raw_sessions = dict()
full_trace_iterator = iter(full_trace.splitlines(full_trace.count('\n')))
for line in full_trace_iterator:
send_recv = re.findall('(SEND|RECV)', line)
ipv4_port = re.findall('[0-9]+(?:\\.[0-9]+){3}:[0-9]+', line)
if ipv4_port:
port = re.findall(':[0-9]+$', ipv4_port[0])
if port:
if port[0] == ':443' or port[0] == ':80':
continue
if send_recv and ipv4_port:
ip_port_key = ipv4_port[0]
this_trace = line
while True:
try:
next_line = next(full_trace_iterator)
this_trace += next_line
end_trace = re.findall('\\[End Trace\\]', next_line)
if end_trace:
break
except Exception as e:
print(e)
break
if ip_port_key not in raw_sessions:
raw_sessions[ip_port_key] = this_trace
print(ip_port_key)
else:
raw_sessions[ip_port_key] += this_trace
print('Constructing session JSONs')
session_JSONs = dict()
for session, raw_traces in raw_sessions.items():
session_JSONs[session] = dict()
session_JSONs[session]['version'] = PROCESSOR_VERSION
session_JSONs[session]['encoding'] = 'url_encoded'
raw_text = ''
timestamp = ''
timestamp_list = list()
for line in raw_traces.splitlines(raw_traces.count('\n')):
trace_line = re.findall('^\\d{8}\\.\\d{2}h\\d{2}m\\d{2}s', line)
timestamp = re.findall('\\[\\d{10}\\.\\d{3}\\]', line)
if timestamp:
timestamp_list.append(timestamp[0][1:-1])
if not trace_line:
raw_text += line
session_JSONs[session]['timestamp'] = timestamp_list[0]
count = -1
delimiter = '\r\n\r\n'
is_request_chunk = True
raw_text_chunks = iter(raw_text.split(delimiter))
session_JSONs[session]['txns'] = list()
for chunk in raw_text_chunks:
request_chunk = re.findall('^\\S+\\s/\\S+\\sHTTP/\\d\\.\\d\\r\\n',
chunk)
response_chunk = re.findall(
'^HTTP/\\d\\.\\d\\s\\d{3}\\s[\\s\\S]+\\r\\n', chunk)
if request_chunk:
count += 1
is_reqeust_chunk = True
chunk += delimiter
if count <= len(session_JSONs[session]['txns']):
session_JSONs[session]['txns'].append(dict())
session_JSONs[session]['txns'][count]['request'] = dict()
session_JSONs[session]['txns'][count]['request']['timestamp'
] = timestamp_list[count - 1]
session_JSONs[session]['txns'][count]['request']['headers'
] = chunk
session_JSONs[session]['txns'][count]['uuid'] = uuid.uuid4(
).hex
elif response_chunk:
is_request_chunk = False
chunk += delimiter
if count <= len(session_JSONs[session]['txns']):
session_JSONs[session]['txns'].append(dict())
session_JSONs[session]['txns'][count]['response'] = dict()
session_JSONs[session]['txns'][count]['response']['timestamp'
] = timestamp_list[count - 1]
session_JSONs[session]['txns'][count]['response']['headers'
] = chunk
else:
try:
if count == -1:
continue
chunk = urllib.parse.quote(chunk)
if is_request_chunk:
if 'body' not in session_JSONs[session]['txns'][count][
'request']:
session_JSONs[session]['txns'][count]['request'][
'body'] = chunk
else:
session_JSONs[session]['txns'][count]['request'][
'body'] += chunk
elif 'body' not in session_JSONs[session]['txns'][count][
'response']:
session_JSONs[session]['txns'][count]['response'][
'body'] = chunk
else:
session_JSONs[session]['txns'][count]['response'][
'body'] += chunk
except KeyError as k:
continue
print(len(session_JSONs[session]['txns']))
session_JSONs[session]['txns'] = list(filter(bool, session_JSONs[
session]['txns']))
if len(session_JSONs[session]['txns']) == 0:
del session_JSONs[session]
unicode_errors = 0
print('Writing sessions to disk')
out_files = dict()
for session, data in session_JSONs.items():
out_files[session] = open(os.path.join(out_dir, 'session_' + str(
session)) + '.json', 'w')
try:
json.dump(data, out_files[session])
out_files[session].close()
except:
unicode_errors += 1
out_files[session].close()
os.remove(os.path.join(out_dir, 'session_' + str(session)) +
'.json')
print(str(unicode_errors) + ' unicode errors')
def main(argv):
if len(argv) != 3:
print('Script to preprocess trace logs for client.')
print("Outputs JSONs to directory 'sessions'")
print('Usage: python ' + str(argv[0]) +
' <in directory> <out directory>')
return
if not os.path.isdir(argv[1]):
print(str(argv[1]) + ' is not a directory. Aborting.')
return
if not os.path.exists(argv[2]):
os.makedirs(argv[2])
else:
print(str(argv[2]) +
' already exists, choose another output directory!')
return
t1 = time.time()
process(argv[1], argv[2])
t2 = time.time()
print('time taken:', t2 - t1)
if __name__ == '__main__':
main(sys.argv)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
PROCESSOR_VERSION = '0.1'
def process(trace_dir, out_dir):
trace_files = os.listdir(trace_dir)
trace_files = sorted(trace_files)
if trace_files[0] == 'error.log':
print('Rotating to properly order logs.')
trace_files = collections.deque(trace_files)
trace_files.rotate(-1)
full_trace = b''
all_lines = ''
for file_name in trace_files:
print('Processing: ' + str(file_name))
with open(os.path.join(trace_dir, file_name), 'rb') as f:
for line in f:
try:
all_lines += line.decode('utf-8')
except UnicodeDecodeError:
print('weird text')
full_trace = re.sub('(?<!\\r)\\n', '\r\n\r\n', all_lines)
"""
Is the issue with the input or my processing?
tmp_file = open('full_trace.json', 'wb')
json.dump(full_trace, tmp_file)
tmp_file.close()
INPUT Issue
"""
print('Collecting raw sessions')
raw_sessions = dict()
full_trace_iterator = iter(full_trace.splitlines(full_trace.count('\n')))
for line in full_trace_iterator:
send_recv = re.findall('(SEND|RECV)', line)
ipv4_port = re.findall('[0-9]+(?:\\.[0-9]+){3}:[0-9]+', line)
if ipv4_port:
port = re.findall(':[0-9]+$', ipv4_port[0])
if port:
if port[0] == ':443' or port[0] == ':80':
continue
if send_recv and ipv4_port:
ip_port_key = ipv4_port[0]
this_trace = line
while True:
try:
next_line = next(full_trace_iterator)
this_trace += next_line
end_trace = re.findall('\\[End Trace\\]', next_line)
if end_trace:
break
except Exception as e:
print(e)
break
if ip_port_key not in raw_sessions:
raw_sessions[ip_port_key] = this_trace
print(ip_port_key)
else:
raw_sessions[ip_port_key] += this_trace
print('Constructing session JSONs')
session_JSONs = dict()
for session, raw_traces in raw_sessions.items():
session_JSONs[session] = dict()
session_JSONs[session]['version'] = PROCESSOR_VERSION
session_JSONs[session]['encoding'] = 'url_encoded'
raw_text = ''
timestamp = ''
timestamp_list = list()
for line in raw_traces.splitlines(raw_traces.count('\n')):
trace_line = re.findall('^\\d{8}\\.\\d{2}h\\d{2}m\\d{2}s', line)
timestamp = re.findall('\\[\\d{10}\\.\\d{3}\\]', line)
if timestamp:
timestamp_list.append(timestamp[0][1:-1])
if not trace_line:
raw_text += line
session_JSONs[session]['timestamp'] = timestamp_list[0]
count = -1
delimiter = '\r\n\r\n'
is_request_chunk = True
raw_text_chunks = iter(raw_text.split(delimiter))
session_JSONs[session]['txns'] = list()
for chunk in raw_text_chunks:
request_chunk = re.findall('^\\S+\\s/\\S+\\sHTTP/\\d\\.\\d\\r\\n',
chunk)
response_chunk = re.findall(
'^HTTP/\\d\\.\\d\\s\\d{3}\\s[\\s\\S]+\\r\\n', chunk)
if request_chunk:
count += 1
is_reqeust_chunk = True
chunk += delimiter
if count <= len(session_JSONs[session]['txns']):
session_JSONs[session]['txns'].append(dict())
session_JSONs[session]['txns'][count]['request'] = dict()
session_JSONs[session]['txns'][count]['request']['timestamp'
] = timestamp_list[count - 1]
session_JSONs[session]['txns'][count]['request']['headers'
] = chunk
session_JSONs[session]['txns'][count]['uuid'] = uuid.uuid4(
).hex
elif response_chunk:
is_request_chunk = False
chunk += delimiter
if count <= len(session_JSONs[session]['txns']):
session_JSONs[session]['txns'].append(dict())
session_JSONs[session]['txns'][count]['response'] = dict()
session_JSONs[session]['txns'][count]['response']['timestamp'
] = timestamp_list[count - 1]
session_JSONs[session]['txns'][count]['response']['headers'
] = chunk
else:
try:
if count == -1:
continue
chunk = urllib.parse.quote(chunk)
if is_request_chunk:
if 'body' not in session_JSONs[session]['txns'][count][
'request']:
session_JSONs[session]['txns'][count]['request'][
'body'] = chunk
else:
session_JSONs[session]['txns'][count]['request'][
'body'] += chunk
elif 'body' not in session_JSONs[session]['txns'][count][
'response']:
session_JSONs[session]['txns'][count]['response'][
'body'] = chunk
else:
session_JSONs[session]['txns'][count]['response'][
'body'] += chunk
except KeyError as k:
continue
print(len(session_JSONs[session]['txns']))
session_JSONs[session]['txns'] = list(filter(bool, session_JSONs[
session]['txns']))
if len(session_JSONs[session]['txns']) == 0:
del session_JSONs[session]
unicode_errors = 0
print('Writing sessions to disk')
out_files = dict()
for session, data in session_JSONs.items():
out_files[session] = open(os.path.join(out_dir, 'session_' + str(
session)) + '.json', 'w')
try:
json.dump(data, out_files[session])
out_files[session].close()
except:
unicode_errors += 1
out_files[session].close()
os.remove(os.path.join(out_dir, 'session_' + str(session)) +
'.json')
print(str(unicode_errors) + ' unicode errors')
def main(argv):
if len(argv) != 3:
print('Script to preprocess trace logs for client.')
print("Outputs JSONs to directory 'sessions'")
print('Usage: python ' + str(argv[0]) +
' <in directory> <out directory>')
return
if not os.path.isdir(argv[1]):
print(str(argv[1]) + ' is not a directory. Aborting.')
return
if not os.path.exists(argv[2]):
os.makedirs(argv[2])
else:
print(str(argv[2]) +
' already exists, choose another output directory!')
return
t1 = time.time()
process(argv[1], argv[2])
t2 = time.time()
print('time taken:', t2 - t1)
if __name__ == '__main__':
main(sys.argv)
<|reserved_special_token_1|>
import sys
import os
import collections
import re
import json
import urllib
import urllib.request
import uuid
import time
PROCESSOR_VERSION = '0.1'
def process(trace_dir, out_dir):
trace_files = os.listdir(trace_dir)
trace_files = sorted(trace_files)
if trace_files[0] == 'error.log':
print('Rotating to properly order logs.')
trace_files = collections.deque(trace_files)
trace_files.rotate(-1)
full_trace = b''
all_lines = ''
for file_name in trace_files:
print('Processing: ' + str(file_name))
with open(os.path.join(trace_dir, file_name), 'rb') as f:
for line in f:
try:
all_lines += line.decode('utf-8')
except UnicodeDecodeError:
print('weird text')
full_trace = re.sub('(?<!\\r)\\n', '\r\n\r\n', all_lines)
"""
Is the issue with the input or my processing?
tmp_file = open('full_trace.json', 'wb')
json.dump(full_trace, tmp_file)
tmp_file.close()
INPUT Issue
"""
print('Collecting raw sessions')
raw_sessions = dict()
full_trace_iterator = iter(full_trace.splitlines(full_trace.count('\n')))
for line in full_trace_iterator:
send_recv = re.findall('(SEND|RECV)', line)
ipv4_port = re.findall('[0-9]+(?:\\.[0-9]+){3}:[0-9]+', line)
if ipv4_port:
port = re.findall(':[0-9]+$', ipv4_port[0])
if port:
if port[0] == ':443' or port[0] == ':80':
continue
if send_recv and ipv4_port:
ip_port_key = ipv4_port[0]
this_trace = line
while True:
try:
next_line = next(full_trace_iterator)
this_trace += next_line
end_trace = re.findall('\\[End Trace\\]', next_line)
if end_trace:
break
except Exception as e:
print(e)
break
if ip_port_key not in raw_sessions:
raw_sessions[ip_port_key] = this_trace
print(ip_port_key)
else:
raw_sessions[ip_port_key] += this_trace
print('Constructing session JSONs')
session_JSONs = dict()
for session, raw_traces in raw_sessions.items():
session_JSONs[session] = dict()
session_JSONs[session]['version'] = PROCESSOR_VERSION
session_JSONs[session]['encoding'] = 'url_encoded'
raw_text = ''
timestamp = ''
timestamp_list = list()
for line in raw_traces.splitlines(raw_traces.count('\n')):
trace_line = re.findall('^\\d{8}\\.\\d{2}h\\d{2}m\\d{2}s', line)
timestamp = re.findall('\\[\\d{10}\\.\\d{3}\\]', line)
if timestamp:
timestamp_list.append(timestamp[0][1:-1])
if not trace_line:
raw_text += line
session_JSONs[session]['timestamp'] = timestamp_list[0]
count = -1
delimiter = '\r\n\r\n'
is_request_chunk = True
raw_text_chunks = iter(raw_text.split(delimiter))
session_JSONs[session]['txns'] = list()
for chunk in raw_text_chunks:
request_chunk = re.findall('^\\S+\\s/\\S+\\sHTTP/\\d\\.\\d\\r\\n',
chunk)
response_chunk = re.findall(
'^HTTP/\\d\\.\\d\\s\\d{3}\\s[\\s\\S]+\\r\\n', chunk)
if request_chunk:
count += 1
is_reqeust_chunk = True
chunk += delimiter
if count <= len(session_JSONs[session]['txns']):
session_JSONs[session]['txns'].append(dict())
session_JSONs[session]['txns'][count]['request'] = dict()
session_JSONs[session]['txns'][count]['request']['timestamp'
] = timestamp_list[count - 1]
session_JSONs[session]['txns'][count]['request']['headers'
] = chunk
session_JSONs[session]['txns'][count]['uuid'] = uuid.uuid4(
).hex
elif response_chunk:
is_request_chunk = False
chunk += delimiter
if count <= len(session_JSONs[session]['txns']):
session_JSONs[session]['txns'].append(dict())
session_JSONs[session]['txns'][count]['response'] = dict()
session_JSONs[session]['txns'][count]['response']['timestamp'
] = timestamp_list[count - 1]
session_JSONs[session]['txns'][count]['response']['headers'
] = chunk
else:
try:
if count == -1:
continue
chunk = urllib.parse.quote(chunk)
if is_request_chunk:
if 'body' not in session_JSONs[session]['txns'][count][
'request']:
session_JSONs[session]['txns'][count]['request'][
'body'] = chunk
else:
session_JSONs[session]['txns'][count]['request'][
'body'] += chunk
elif 'body' not in session_JSONs[session]['txns'][count][
'response']:
session_JSONs[session]['txns'][count]['response'][
'body'] = chunk
else:
session_JSONs[session]['txns'][count]['response'][
'body'] += chunk
except KeyError as k:
continue
print(len(session_JSONs[session]['txns']))
session_JSONs[session]['txns'] = list(filter(bool, session_JSONs[
session]['txns']))
if len(session_JSONs[session]['txns']) == 0:
del session_JSONs[session]
unicode_errors = 0
print('Writing sessions to disk')
out_files = dict()
for session, data in session_JSONs.items():
out_files[session] = open(os.path.join(out_dir, 'session_' + str(
session)) + '.json', 'w')
try:
json.dump(data, out_files[session])
out_files[session].close()
except:
unicode_errors += 1
out_files[session].close()
os.remove(os.path.join(out_dir, 'session_' + str(session)) +
'.json')
print(str(unicode_errors) + ' unicode errors')
def main(argv):
if len(argv) != 3:
print('Script to preprocess trace logs for client.')
print("Outputs JSONs to directory 'sessions'")
print('Usage: python ' + str(argv[0]) +
' <in directory> <out directory>')
return
if not os.path.isdir(argv[1]):
print(str(argv[1]) + ' is not a directory. Aborting.')
return
if not os.path.exists(argv[2]):
os.makedirs(argv[2])
else:
print(str(argv[2]) +
' already exists, choose another output directory!')
return
t1 = time.time()
process(argv[1], argv[2])
t2 = time.time()
print('time taken:', t2 - t1)
if __name__ == '__main__':
main(sys.argv)
<|reserved_special_token_1|>
#!/bin/env python
import sys
import os
import collections
import re
import json
import urllib
import urllib.request
import uuid
import time
PROCESSOR_VERSION = "0.1"
def process(trace_dir, out_dir):
#order files
trace_files = os.listdir(trace_dir)
trace_files = sorted(trace_files)
if trace_files[0] == "error.log": #we need to do this in case the last traces are in an error log file that wasn't rotated yet
print ("Rotating to properly order logs.")
trace_files = collections.deque(trace_files)
trace_files.rotate(-1)
#combine
full_trace = b""
all_lines= ""
for file_name in trace_files:
print ("Processing: " + str(file_name))
with open(os.path.join(trace_dir, file_name), "rb") as f:
for line in f:
try:
#print(line.decode('utf-8'))
all_lines += line.decode('utf-8')
except UnicodeDecodeError:
print("weird text")
# let's fix any pesky solitary \n's (these are at the end of all the bodies)
full_trace = re.sub(r'(?<!\r)\n', '\r\n\r\n', all_lines)
'''
Is the issue with the input or my processing?
tmp_file = open('full_trace.json', 'wb')
json.dump(full_trace, tmp_file)
tmp_file.close()
INPUT Issue
'''
#do the first step of preprocessing, getting raw sessions
print( "Collecting raw sessions")
raw_sessions = dict()
full_trace_iterator = iter(full_trace.splitlines(full_trace.count('\n')))
for line in full_trace_iterator:
#TODO IPv6
#TODO Responses (we get them but do we want to do this a different way)
send_recv = re.findall(r'(SEND|RECV)', line)
ipv4_port = re.findall(r'[0-9]+(?:\.[0-9]+){3}:[0-9]+', line)
if ipv4_port:
port = re.findall(r':[0-9]+$', ipv4_port[0])
if port:
if port[0] == ":443" or port[0] == ":80":
continue # we don't want the server conn side stuff yet
if send_recv and ipv4_port:
ip_port_key = ipv4_port[0]
this_trace = line
while True:
try:
next_line = next(full_trace_iterator)
this_trace += next_line
end_trace = re.findall(r'\[End Trace\]', next_line)
if end_trace:
break
except Exception as e:
#reached the end of the file
print( e)
break
if ip_port_key not in raw_sessions:
raw_sessions[ip_port_key] = this_trace
print(ip_port_key)
else:
raw_sessions[ip_port_key] += this_trace
#do the second step of preprocessing, getting JSONs from raw sessions
print( "Constructing session JSONs")
session_JSONs = dict()
for session, raw_traces in raw_sessions.items():
#basic data
session_JSONs[session] = dict()
session_JSONs[session]["version"] = PROCESSOR_VERSION
session_JSONs[session]["encoding"] = "url_encoded"
# let's get the raw text from the traces
raw_text = ""
timestamp = ""
timestamp_list = list()
for line in raw_traces.splitlines(raw_traces.count('\n')):
trace_line = re.findall(r'^\d{8}\.\d{2}h\d{2}m\d{2}s', line)
timestamp = re.findall(r'\[\d{10}\.\d{3}\]', line)
if timestamp:
timestamp_list.append(timestamp[0][1:-1])
if not trace_line:
raw_text += line
#get session start timestamp
session_JSONs[session]["timestamp"] = timestamp_list[0]
# let's parse out requests and responses
count = -1
delimiter = "\r\n\r\n"
is_request_chunk = True
raw_text_chunks = iter(raw_text.split(delimiter))
session_JSONs[session]["txns"] = list()
for chunk in raw_text_chunks:
#check if each chunk is request or response if it is do so accordingly
#otherwise append it to the previous chunk's data
request_chunk = re.findall(r'^\S+\s/\S+\sHTTP/\d\.\d\r\n', chunk)
response_chunk = re.findall(r'^HTTP/\d\.\d\s\d{3}\s[\s\S]+\r\n', chunk)
if request_chunk:
count += 1
is_reqeust_chunk = True
chunk += delimiter
if count <= len(session_JSONs[session]["txns"]):
session_JSONs[session]["txns"].append(dict())
session_JSONs[session]["txns"][count]["request"] = dict()
session_JSONs[session]["txns"][count]["request"]["timestamp"] = timestamp_list[count - 1]
session_JSONs[session]["txns"][count]["request"]["headers"] = chunk
session_JSONs[session]["txns"][count]["uuid"] = uuid.uuid4().hex
elif response_chunk:
is_request_chunk = False
chunk += delimiter
if count <= len(session_JSONs[session]["txns"]):
session_JSONs[session]["txns"].append(dict())
session_JSONs[session]["txns"][count]["response"] = dict()
session_JSONs[session]["txns"][count]["response"]["timestamp"] = timestamp_list[count - 1]
session_JSONs[session]["txns"][count]["response"]["headers"] = chunk
else: #is body chunk
try:
if count == -1: continue #if we have garbage at the front
chunk = urllib.parse.quote(chunk)
if is_request_chunk:
if "body" not in session_JSONs[session]["txns"][count]["request"]:
session_JSONs[session]["txns"][count]["request"]["body"] = chunk
else:
session_JSONs[session]["txns"][count]["request"]["body"] += chunk
else:
if "body" not in session_JSONs[session]["txns"][count]["response"]:
session_JSONs[session]["txns"][count]["response"]["body"] = chunk
else:
session_JSONs[session]["txns"][count]["response"]["body"] += chunk
except KeyError as k:
continue # for now we're dropping malformed bodies. will not be able to do this when we're validating. might have to go edit wiretracing code to give us better delimiters here for parsing. right now isn't particularly straightforward
print(len(session_JSONs[session]["txns"]))
session_JSONs[session]["txns"] = list(filter(bool, session_JSONs[session]["txns"]))
if len(session_JSONs[session]["txns"]) == 0:
del session_JSONs[session]
#write out
unicode_errors = 0
print( "Writing sessions to disk")
out_files = dict()
for session, data in session_JSONs.items():
out_files[session] = open(os.path.join(out_dir, 'session_' + str(session)) + '.json', 'w')
try:
json.dump(data, out_files[session])
out_files[session].close()
except:
unicode_errors += 1
out_files[session].close()
os.remove(os.path.join(out_dir, 'session_' + str(session)) + '.json')
print( str(unicode_errors) + " unicode errors")
def main(argv):
if len(argv) != 3:
print( "Script to preprocess trace logs for client.")
print( "Outputs JSONs to directory 'sessions'")
print( "Usage: python " + str(argv[0]) + " <in directory> <out directory>")
return
if not os.path.isdir(argv[1]):
print( str(argv[1]) + " is not a directory. Aborting.")
return
if not os.path.exists(argv[2]):
os.makedirs(argv[2])
else:
print( str(argv[2]) + " already exists, choose another output directory!")
return
t1=time.time()
process(argv[1], argv[2])
t2=time.time()
print("time taken:",(t2-t1))
if __name__ == "__main__":
main(sys.argv)
|
flexible
|
{
"blob_id": "4b83887e8d8e5c5dc7065354d24044d3c3a48714",
"index": 3387,
"step-1": "<mask token>\n\n\ndef process(trace_dir, out_dir):\n trace_files = os.listdir(trace_dir)\n trace_files = sorted(trace_files)\n if trace_files[0] == 'error.log':\n print('Rotating to properly order logs.')\n trace_files = collections.deque(trace_files)\n trace_files.rotate(-1)\n full_trace = b''\n all_lines = ''\n for file_name in trace_files:\n print('Processing: ' + str(file_name))\n with open(os.path.join(trace_dir, file_name), 'rb') as f:\n for line in f:\n try:\n all_lines += line.decode('utf-8')\n except UnicodeDecodeError:\n print('weird text')\n full_trace = re.sub('(?<!\\\\r)\\\\n', '\\r\\n\\r\\n', all_lines)\n \"\"\"\n Is the issue with the input or my processing? \n tmp_file = open('full_trace.json', 'wb')\n json.dump(full_trace, tmp_file)\n tmp_file.close()\n INPUT Issue\n \"\"\"\n print('Collecting raw sessions')\n raw_sessions = dict()\n full_trace_iterator = iter(full_trace.splitlines(full_trace.count('\\n')))\n for line in full_trace_iterator:\n send_recv = re.findall('(SEND|RECV)', line)\n ipv4_port = re.findall('[0-9]+(?:\\\\.[0-9]+){3}:[0-9]+', line)\n if ipv4_port:\n port = re.findall(':[0-9]+$', ipv4_port[0])\n if port:\n if port[0] == ':443' or port[0] == ':80':\n continue\n if send_recv and ipv4_port:\n ip_port_key = ipv4_port[0]\n this_trace = line\n while True:\n try:\n next_line = next(full_trace_iterator)\n this_trace += next_line\n end_trace = re.findall('\\\\[End Trace\\\\]', next_line)\n if end_trace:\n break\n except Exception as e:\n print(e)\n break\n if ip_port_key not in raw_sessions:\n raw_sessions[ip_port_key] = this_trace\n print(ip_port_key)\n else:\n raw_sessions[ip_port_key] += this_trace\n print('Constructing session JSONs')\n session_JSONs = dict()\n for session, raw_traces in raw_sessions.items():\n session_JSONs[session] = dict()\n session_JSONs[session]['version'] = PROCESSOR_VERSION\n session_JSONs[session]['encoding'] = 'url_encoded'\n raw_text = ''\n timestamp = ''\n timestamp_list = list()\n for line in raw_traces.splitlines(raw_traces.count('\\n')):\n trace_line = re.findall('^\\\\d{8}\\\\.\\\\d{2}h\\\\d{2}m\\\\d{2}s', line)\n timestamp = re.findall('\\\\[\\\\d{10}\\\\.\\\\d{3}\\\\]', line)\n if timestamp:\n timestamp_list.append(timestamp[0][1:-1])\n if not trace_line:\n raw_text += line\n session_JSONs[session]['timestamp'] = timestamp_list[0]\n count = -1\n delimiter = '\\r\\n\\r\\n'\n is_request_chunk = True\n raw_text_chunks = iter(raw_text.split(delimiter))\n session_JSONs[session]['txns'] = list()\n for chunk in raw_text_chunks:\n request_chunk = re.findall('^\\\\S+\\\\s/\\\\S+\\\\sHTTP/\\\\d\\\\.\\\\d\\\\r\\\\n',\n chunk)\n response_chunk = re.findall(\n '^HTTP/\\\\d\\\\.\\\\d\\\\s\\\\d{3}\\\\s[\\\\s\\\\S]+\\\\r\\\\n', chunk)\n if request_chunk:\n count += 1\n is_reqeust_chunk = True\n chunk += delimiter\n if count <= len(session_JSONs[session]['txns']):\n session_JSONs[session]['txns'].append(dict())\n session_JSONs[session]['txns'][count]['request'] = dict()\n session_JSONs[session]['txns'][count]['request']['timestamp'\n ] = timestamp_list[count - 1]\n session_JSONs[session]['txns'][count]['request']['headers'\n ] = chunk\n session_JSONs[session]['txns'][count]['uuid'] = uuid.uuid4(\n ).hex\n elif response_chunk:\n is_request_chunk = False\n chunk += delimiter\n if count <= len(session_JSONs[session]['txns']):\n session_JSONs[session]['txns'].append(dict())\n session_JSONs[session]['txns'][count]['response'] = dict()\n session_JSONs[session]['txns'][count]['response']['timestamp'\n ] = timestamp_list[count - 1]\n session_JSONs[session]['txns'][count]['response']['headers'\n ] = chunk\n else:\n try:\n if count == -1:\n continue\n chunk = urllib.parse.quote(chunk)\n if is_request_chunk:\n if 'body' not in session_JSONs[session]['txns'][count][\n 'request']:\n session_JSONs[session]['txns'][count]['request'][\n 'body'] = chunk\n else:\n session_JSONs[session]['txns'][count]['request'][\n 'body'] += chunk\n elif 'body' not in session_JSONs[session]['txns'][count][\n 'response']:\n session_JSONs[session]['txns'][count]['response'][\n 'body'] = chunk\n else:\n session_JSONs[session]['txns'][count]['response'][\n 'body'] += chunk\n except KeyError as k:\n continue\n print(len(session_JSONs[session]['txns']))\n session_JSONs[session]['txns'] = list(filter(bool, session_JSONs[\n session]['txns']))\n if len(session_JSONs[session]['txns']) == 0:\n del session_JSONs[session]\n unicode_errors = 0\n print('Writing sessions to disk')\n out_files = dict()\n for session, data in session_JSONs.items():\n out_files[session] = open(os.path.join(out_dir, 'session_' + str(\n session)) + '.json', 'w')\n try:\n json.dump(data, out_files[session])\n out_files[session].close()\n except:\n unicode_errors += 1\n out_files[session].close()\n os.remove(os.path.join(out_dir, 'session_' + str(session)) +\n '.json')\n print(str(unicode_errors) + ' unicode errors')\n\n\ndef main(argv):\n if len(argv) != 3:\n print('Script to preprocess trace logs for client.')\n print(\"Outputs JSONs to directory 'sessions'\")\n print('Usage: python ' + str(argv[0]) +\n ' <in directory> <out directory>')\n return\n if not os.path.isdir(argv[1]):\n print(str(argv[1]) + ' is not a directory. Aborting.')\n return\n if not os.path.exists(argv[2]):\n os.makedirs(argv[2])\n else:\n print(str(argv[2]) +\n ' already exists, choose another output directory!')\n return\n t1 = time.time()\n process(argv[1], argv[2])\n t2 = time.time()\n print('time taken:', t2 - t1)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef process(trace_dir, out_dir):\n trace_files = os.listdir(trace_dir)\n trace_files = sorted(trace_files)\n if trace_files[0] == 'error.log':\n print('Rotating to properly order logs.')\n trace_files = collections.deque(trace_files)\n trace_files.rotate(-1)\n full_trace = b''\n all_lines = ''\n for file_name in trace_files:\n print('Processing: ' + str(file_name))\n with open(os.path.join(trace_dir, file_name), 'rb') as f:\n for line in f:\n try:\n all_lines += line.decode('utf-8')\n except UnicodeDecodeError:\n print('weird text')\n full_trace = re.sub('(?<!\\\\r)\\\\n', '\\r\\n\\r\\n', all_lines)\n \"\"\"\n Is the issue with the input or my processing? \n tmp_file = open('full_trace.json', 'wb')\n json.dump(full_trace, tmp_file)\n tmp_file.close()\n INPUT Issue\n \"\"\"\n print('Collecting raw sessions')\n raw_sessions = dict()\n full_trace_iterator = iter(full_trace.splitlines(full_trace.count('\\n')))\n for line in full_trace_iterator:\n send_recv = re.findall('(SEND|RECV)', line)\n ipv4_port = re.findall('[0-9]+(?:\\\\.[0-9]+){3}:[0-9]+', line)\n if ipv4_port:\n port = re.findall(':[0-9]+$', ipv4_port[0])\n if port:\n if port[0] == ':443' or port[0] == ':80':\n continue\n if send_recv and ipv4_port:\n ip_port_key = ipv4_port[0]\n this_trace = line\n while True:\n try:\n next_line = next(full_trace_iterator)\n this_trace += next_line\n end_trace = re.findall('\\\\[End Trace\\\\]', next_line)\n if end_trace:\n break\n except Exception as e:\n print(e)\n break\n if ip_port_key not in raw_sessions:\n raw_sessions[ip_port_key] = this_trace\n print(ip_port_key)\n else:\n raw_sessions[ip_port_key] += this_trace\n print('Constructing session JSONs')\n session_JSONs = dict()\n for session, raw_traces in raw_sessions.items():\n session_JSONs[session] = dict()\n session_JSONs[session]['version'] = PROCESSOR_VERSION\n session_JSONs[session]['encoding'] = 'url_encoded'\n raw_text = ''\n timestamp = ''\n timestamp_list = list()\n for line in raw_traces.splitlines(raw_traces.count('\\n')):\n trace_line = re.findall('^\\\\d{8}\\\\.\\\\d{2}h\\\\d{2}m\\\\d{2}s', line)\n timestamp = re.findall('\\\\[\\\\d{10}\\\\.\\\\d{3}\\\\]', line)\n if timestamp:\n timestamp_list.append(timestamp[0][1:-1])\n if not trace_line:\n raw_text += line\n session_JSONs[session]['timestamp'] = timestamp_list[0]\n count = -1\n delimiter = '\\r\\n\\r\\n'\n is_request_chunk = True\n raw_text_chunks = iter(raw_text.split(delimiter))\n session_JSONs[session]['txns'] = list()\n for chunk in raw_text_chunks:\n request_chunk = re.findall('^\\\\S+\\\\s/\\\\S+\\\\sHTTP/\\\\d\\\\.\\\\d\\\\r\\\\n',\n chunk)\n response_chunk = re.findall(\n '^HTTP/\\\\d\\\\.\\\\d\\\\s\\\\d{3}\\\\s[\\\\s\\\\S]+\\\\r\\\\n', chunk)\n if request_chunk:\n count += 1\n is_reqeust_chunk = True\n chunk += delimiter\n if count <= len(session_JSONs[session]['txns']):\n session_JSONs[session]['txns'].append(dict())\n session_JSONs[session]['txns'][count]['request'] = dict()\n session_JSONs[session]['txns'][count]['request']['timestamp'\n ] = timestamp_list[count - 1]\n session_JSONs[session]['txns'][count]['request']['headers'\n ] = chunk\n session_JSONs[session]['txns'][count]['uuid'] = uuid.uuid4(\n ).hex\n elif response_chunk:\n is_request_chunk = False\n chunk += delimiter\n if count <= len(session_JSONs[session]['txns']):\n session_JSONs[session]['txns'].append(dict())\n session_JSONs[session]['txns'][count]['response'] = dict()\n session_JSONs[session]['txns'][count]['response']['timestamp'\n ] = timestamp_list[count - 1]\n session_JSONs[session]['txns'][count]['response']['headers'\n ] = chunk\n else:\n try:\n if count == -1:\n continue\n chunk = urllib.parse.quote(chunk)\n if is_request_chunk:\n if 'body' not in session_JSONs[session]['txns'][count][\n 'request']:\n session_JSONs[session]['txns'][count]['request'][\n 'body'] = chunk\n else:\n session_JSONs[session]['txns'][count]['request'][\n 'body'] += chunk\n elif 'body' not in session_JSONs[session]['txns'][count][\n 'response']:\n session_JSONs[session]['txns'][count]['response'][\n 'body'] = chunk\n else:\n session_JSONs[session]['txns'][count]['response'][\n 'body'] += chunk\n except KeyError as k:\n continue\n print(len(session_JSONs[session]['txns']))\n session_JSONs[session]['txns'] = list(filter(bool, session_JSONs[\n session]['txns']))\n if len(session_JSONs[session]['txns']) == 0:\n del session_JSONs[session]\n unicode_errors = 0\n print('Writing sessions to disk')\n out_files = dict()\n for session, data in session_JSONs.items():\n out_files[session] = open(os.path.join(out_dir, 'session_' + str(\n session)) + '.json', 'w')\n try:\n json.dump(data, out_files[session])\n out_files[session].close()\n except:\n unicode_errors += 1\n out_files[session].close()\n os.remove(os.path.join(out_dir, 'session_' + str(session)) +\n '.json')\n print(str(unicode_errors) + ' unicode errors')\n\n\ndef main(argv):\n if len(argv) != 3:\n print('Script to preprocess trace logs for client.')\n print(\"Outputs JSONs to directory 'sessions'\")\n print('Usage: python ' + str(argv[0]) +\n ' <in directory> <out directory>')\n return\n if not os.path.isdir(argv[1]):\n print(str(argv[1]) + ' is not a directory. Aborting.')\n return\n if not os.path.exists(argv[2]):\n os.makedirs(argv[2])\n else:\n print(str(argv[2]) +\n ' already exists, choose another output directory!')\n return\n t1 = time.time()\n process(argv[1], argv[2])\n t2 = time.time()\n print('time taken:', t2 - t1)\n\n\nif __name__ == '__main__':\n main(sys.argv)\n",
"step-3": "<mask token>\nPROCESSOR_VERSION = '0.1'\n\n\ndef process(trace_dir, out_dir):\n trace_files = os.listdir(trace_dir)\n trace_files = sorted(trace_files)\n if trace_files[0] == 'error.log':\n print('Rotating to properly order logs.')\n trace_files = collections.deque(trace_files)\n trace_files.rotate(-1)\n full_trace = b''\n all_lines = ''\n for file_name in trace_files:\n print('Processing: ' + str(file_name))\n with open(os.path.join(trace_dir, file_name), 'rb') as f:\n for line in f:\n try:\n all_lines += line.decode('utf-8')\n except UnicodeDecodeError:\n print('weird text')\n full_trace = re.sub('(?<!\\\\r)\\\\n', '\\r\\n\\r\\n', all_lines)\n \"\"\"\n Is the issue with the input or my processing? \n tmp_file = open('full_trace.json', 'wb')\n json.dump(full_trace, tmp_file)\n tmp_file.close()\n INPUT Issue\n \"\"\"\n print('Collecting raw sessions')\n raw_sessions = dict()\n full_trace_iterator = iter(full_trace.splitlines(full_trace.count('\\n')))\n for line in full_trace_iterator:\n send_recv = re.findall('(SEND|RECV)', line)\n ipv4_port = re.findall('[0-9]+(?:\\\\.[0-9]+){3}:[0-9]+', line)\n if ipv4_port:\n port = re.findall(':[0-9]+$', ipv4_port[0])\n if port:\n if port[0] == ':443' or port[0] == ':80':\n continue\n if send_recv and ipv4_port:\n ip_port_key = ipv4_port[0]\n this_trace = line\n while True:\n try:\n next_line = next(full_trace_iterator)\n this_trace += next_line\n end_trace = re.findall('\\\\[End Trace\\\\]', next_line)\n if end_trace:\n break\n except Exception as e:\n print(e)\n break\n if ip_port_key not in raw_sessions:\n raw_sessions[ip_port_key] = this_trace\n print(ip_port_key)\n else:\n raw_sessions[ip_port_key] += this_trace\n print('Constructing session JSONs')\n session_JSONs = dict()\n for session, raw_traces in raw_sessions.items():\n session_JSONs[session] = dict()\n session_JSONs[session]['version'] = PROCESSOR_VERSION\n session_JSONs[session]['encoding'] = 'url_encoded'\n raw_text = ''\n timestamp = ''\n timestamp_list = list()\n for line in raw_traces.splitlines(raw_traces.count('\\n')):\n trace_line = re.findall('^\\\\d{8}\\\\.\\\\d{2}h\\\\d{2}m\\\\d{2}s', line)\n timestamp = re.findall('\\\\[\\\\d{10}\\\\.\\\\d{3}\\\\]', line)\n if timestamp:\n timestamp_list.append(timestamp[0][1:-1])\n if not trace_line:\n raw_text += line\n session_JSONs[session]['timestamp'] = timestamp_list[0]\n count = -1\n delimiter = '\\r\\n\\r\\n'\n is_request_chunk = True\n raw_text_chunks = iter(raw_text.split(delimiter))\n session_JSONs[session]['txns'] = list()\n for chunk in raw_text_chunks:\n request_chunk = re.findall('^\\\\S+\\\\s/\\\\S+\\\\sHTTP/\\\\d\\\\.\\\\d\\\\r\\\\n',\n chunk)\n response_chunk = re.findall(\n '^HTTP/\\\\d\\\\.\\\\d\\\\s\\\\d{3}\\\\s[\\\\s\\\\S]+\\\\r\\\\n', chunk)\n if request_chunk:\n count += 1\n is_reqeust_chunk = True\n chunk += delimiter\n if count <= len(session_JSONs[session]['txns']):\n session_JSONs[session]['txns'].append(dict())\n session_JSONs[session]['txns'][count]['request'] = dict()\n session_JSONs[session]['txns'][count]['request']['timestamp'\n ] = timestamp_list[count - 1]\n session_JSONs[session]['txns'][count]['request']['headers'\n ] = chunk\n session_JSONs[session]['txns'][count]['uuid'] = uuid.uuid4(\n ).hex\n elif response_chunk:\n is_request_chunk = False\n chunk += delimiter\n if count <= len(session_JSONs[session]['txns']):\n session_JSONs[session]['txns'].append(dict())\n session_JSONs[session]['txns'][count]['response'] = dict()\n session_JSONs[session]['txns'][count]['response']['timestamp'\n ] = timestamp_list[count - 1]\n session_JSONs[session]['txns'][count]['response']['headers'\n ] = chunk\n else:\n try:\n if count == -1:\n continue\n chunk = urllib.parse.quote(chunk)\n if is_request_chunk:\n if 'body' not in session_JSONs[session]['txns'][count][\n 'request']:\n session_JSONs[session]['txns'][count]['request'][\n 'body'] = chunk\n else:\n session_JSONs[session]['txns'][count]['request'][\n 'body'] += chunk\n elif 'body' not in session_JSONs[session]['txns'][count][\n 'response']:\n session_JSONs[session]['txns'][count]['response'][\n 'body'] = chunk\n else:\n session_JSONs[session]['txns'][count]['response'][\n 'body'] += chunk\n except KeyError as k:\n continue\n print(len(session_JSONs[session]['txns']))\n session_JSONs[session]['txns'] = list(filter(bool, session_JSONs[\n session]['txns']))\n if len(session_JSONs[session]['txns']) == 0:\n del session_JSONs[session]\n unicode_errors = 0\n print('Writing sessions to disk')\n out_files = dict()\n for session, data in session_JSONs.items():\n out_files[session] = open(os.path.join(out_dir, 'session_' + str(\n session)) + '.json', 'w')\n try:\n json.dump(data, out_files[session])\n out_files[session].close()\n except:\n unicode_errors += 1\n out_files[session].close()\n os.remove(os.path.join(out_dir, 'session_' + str(session)) +\n '.json')\n print(str(unicode_errors) + ' unicode errors')\n\n\ndef main(argv):\n if len(argv) != 3:\n print('Script to preprocess trace logs for client.')\n print(\"Outputs JSONs to directory 'sessions'\")\n print('Usage: python ' + str(argv[0]) +\n ' <in directory> <out directory>')\n return\n if not os.path.isdir(argv[1]):\n print(str(argv[1]) + ' is not a directory. Aborting.')\n return\n if not os.path.exists(argv[2]):\n os.makedirs(argv[2])\n else:\n print(str(argv[2]) +\n ' already exists, choose another output directory!')\n return\n t1 = time.time()\n process(argv[1], argv[2])\n t2 = time.time()\n print('time taken:', t2 - t1)\n\n\nif __name__ == '__main__':\n main(sys.argv)\n",
"step-4": "import sys\nimport os\nimport collections\nimport re\nimport json\nimport urllib\nimport urllib.request\nimport uuid\nimport time\nPROCESSOR_VERSION = '0.1'\n\n\ndef process(trace_dir, out_dir):\n trace_files = os.listdir(trace_dir)\n trace_files = sorted(trace_files)\n if trace_files[0] == 'error.log':\n print('Rotating to properly order logs.')\n trace_files = collections.deque(trace_files)\n trace_files.rotate(-1)\n full_trace = b''\n all_lines = ''\n for file_name in trace_files:\n print('Processing: ' + str(file_name))\n with open(os.path.join(trace_dir, file_name), 'rb') as f:\n for line in f:\n try:\n all_lines += line.decode('utf-8')\n except UnicodeDecodeError:\n print('weird text')\n full_trace = re.sub('(?<!\\\\r)\\\\n', '\\r\\n\\r\\n', all_lines)\n \"\"\"\n Is the issue with the input or my processing? \n tmp_file = open('full_trace.json', 'wb')\n json.dump(full_trace, tmp_file)\n tmp_file.close()\n INPUT Issue\n \"\"\"\n print('Collecting raw sessions')\n raw_sessions = dict()\n full_trace_iterator = iter(full_trace.splitlines(full_trace.count('\\n')))\n for line in full_trace_iterator:\n send_recv = re.findall('(SEND|RECV)', line)\n ipv4_port = re.findall('[0-9]+(?:\\\\.[0-9]+){3}:[0-9]+', line)\n if ipv4_port:\n port = re.findall(':[0-9]+$', ipv4_port[0])\n if port:\n if port[0] == ':443' or port[0] == ':80':\n continue\n if send_recv and ipv4_port:\n ip_port_key = ipv4_port[0]\n this_trace = line\n while True:\n try:\n next_line = next(full_trace_iterator)\n this_trace += next_line\n end_trace = re.findall('\\\\[End Trace\\\\]', next_line)\n if end_trace:\n break\n except Exception as e:\n print(e)\n break\n if ip_port_key not in raw_sessions:\n raw_sessions[ip_port_key] = this_trace\n print(ip_port_key)\n else:\n raw_sessions[ip_port_key] += this_trace\n print('Constructing session JSONs')\n session_JSONs = dict()\n for session, raw_traces in raw_sessions.items():\n session_JSONs[session] = dict()\n session_JSONs[session]['version'] = PROCESSOR_VERSION\n session_JSONs[session]['encoding'] = 'url_encoded'\n raw_text = ''\n timestamp = ''\n timestamp_list = list()\n for line in raw_traces.splitlines(raw_traces.count('\\n')):\n trace_line = re.findall('^\\\\d{8}\\\\.\\\\d{2}h\\\\d{2}m\\\\d{2}s', line)\n timestamp = re.findall('\\\\[\\\\d{10}\\\\.\\\\d{3}\\\\]', line)\n if timestamp:\n timestamp_list.append(timestamp[0][1:-1])\n if not trace_line:\n raw_text += line\n session_JSONs[session]['timestamp'] = timestamp_list[0]\n count = -1\n delimiter = '\\r\\n\\r\\n'\n is_request_chunk = True\n raw_text_chunks = iter(raw_text.split(delimiter))\n session_JSONs[session]['txns'] = list()\n for chunk in raw_text_chunks:\n request_chunk = re.findall('^\\\\S+\\\\s/\\\\S+\\\\sHTTP/\\\\d\\\\.\\\\d\\\\r\\\\n',\n chunk)\n response_chunk = re.findall(\n '^HTTP/\\\\d\\\\.\\\\d\\\\s\\\\d{3}\\\\s[\\\\s\\\\S]+\\\\r\\\\n', chunk)\n if request_chunk:\n count += 1\n is_reqeust_chunk = True\n chunk += delimiter\n if count <= len(session_JSONs[session]['txns']):\n session_JSONs[session]['txns'].append(dict())\n session_JSONs[session]['txns'][count]['request'] = dict()\n session_JSONs[session]['txns'][count]['request']['timestamp'\n ] = timestamp_list[count - 1]\n session_JSONs[session]['txns'][count]['request']['headers'\n ] = chunk\n session_JSONs[session]['txns'][count]['uuid'] = uuid.uuid4(\n ).hex\n elif response_chunk:\n is_request_chunk = False\n chunk += delimiter\n if count <= len(session_JSONs[session]['txns']):\n session_JSONs[session]['txns'].append(dict())\n session_JSONs[session]['txns'][count]['response'] = dict()\n session_JSONs[session]['txns'][count]['response']['timestamp'\n ] = timestamp_list[count - 1]\n session_JSONs[session]['txns'][count]['response']['headers'\n ] = chunk\n else:\n try:\n if count == -1:\n continue\n chunk = urllib.parse.quote(chunk)\n if is_request_chunk:\n if 'body' not in session_JSONs[session]['txns'][count][\n 'request']:\n session_JSONs[session]['txns'][count]['request'][\n 'body'] = chunk\n else:\n session_JSONs[session]['txns'][count]['request'][\n 'body'] += chunk\n elif 'body' not in session_JSONs[session]['txns'][count][\n 'response']:\n session_JSONs[session]['txns'][count]['response'][\n 'body'] = chunk\n else:\n session_JSONs[session]['txns'][count]['response'][\n 'body'] += chunk\n except KeyError as k:\n continue\n print(len(session_JSONs[session]['txns']))\n session_JSONs[session]['txns'] = list(filter(bool, session_JSONs[\n session]['txns']))\n if len(session_JSONs[session]['txns']) == 0:\n del session_JSONs[session]\n unicode_errors = 0\n print('Writing sessions to disk')\n out_files = dict()\n for session, data in session_JSONs.items():\n out_files[session] = open(os.path.join(out_dir, 'session_' + str(\n session)) + '.json', 'w')\n try:\n json.dump(data, out_files[session])\n out_files[session].close()\n except:\n unicode_errors += 1\n out_files[session].close()\n os.remove(os.path.join(out_dir, 'session_' + str(session)) +\n '.json')\n print(str(unicode_errors) + ' unicode errors')\n\n\ndef main(argv):\n if len(argv) != 3:\n print('Script to preprocess trace logs for client.')\n print(\"Outputs JSONs to directory 'sessions'\")\n print('Usage: python ' + str(argv[0]) +\n ' <in directory> <out directory>')\n return\n if not os.path.isdir(argv[1]):\n print(str(argv[1]) + ' is not a directory. Aborting.')\n return\n if not os.path.exists(argv[2]):\n os.makedirs(argv[2])\n else:\n print(str(argv[2]) +\n ' already exists, choose another output directory!')\n return\n t1 = time.time()\n process(argv[1], argv[2])\n t2 = time.time()\n print('time taken:', t2 - t1)\n\n\nif __name__ == '__main__':\n main(sys.argv)\n",
"step-5": "#!/bin/env python\n\nimport sys\nimport os\nimport collections\nimport re\nimport json\nimport urllib\nimport urllib.request\nimport uuid\nimport time\nPROCESSOR_VERSION = \"0.1\"\n\ndef process(trace_dir, out_dir):\n #order files\n trace_files = os.listdir(trace_dir)\n trace_files = sorted(trace_files)\n if trace_files[0] == \"error.log\": #we need to do this in case the last traces are in an error log file that wasn't rotated yet\n print (\"Rotating to properly order logs.\")\n trace_files = collections.deque(trace_files)\n trace_files.rotate(-1)\n\n #combine\n full_trace = b\"\"\n all_lines= \"\"\n for file_name in trace_files:\n print (\"Processing: \" + str(file_name))\n with open(os.path.join(trace_dir, file_name), \"rb\") as f:\n for line in f:\n try:\n #print(line.decode('utf-8'))\n all_lines += line.decode('utf-8')\n except UnicodeDecodeError:\n print(\"weird text\")\n # let's fix any pesky solitary \\n's (these are at the end of all the bodies)\n full_trace = re.sub(r'(?<!\\r)\\n', '\\r\\n\\r\\n', all_lines)\n \n '''\n Is the issue with the input or my processing? \n tmp_file = open('full_trace.json', 'wb')\n json.dump(full_trace, tmp_file)\n tmp_file.close()\n INPUT Issue\n '''\n\n #do the first step of preprocessing, getting raw sessions\n print( \"Collecting raw sessions\")\n raw_sessions = dict()\n full_trace_iterator = iter(full_trace.splitlines(full_trace.count('\\n')))\n for line in full_trace_iterator:\n #TODO IPv6\n #TODO Responses (we get them but do we want to do this a different way)\n send_recv = re.findall(r'(SEND|RECV)', line)\n ipv4_port = re.findall(r'[0-9]+(?:\\.[0-9]+){3}:[0-9]+', line)\n if ipv4_port:\n port = re.findall(r':[0-9]+$', ipv4_port[0])\n if port:\n if port[0] == \":443\" or port[0] == \":80\":\n continue # we don't want the server conn side stuff yet\n if send_recv and ipv4_port:\n ip_port_key = ipv4_port[0]\n this_trace = line\n while True:\n try:\n next_line = next(full_trace_iterator)\n this_trace += next_line\n end_trace = re.findall(r'\\[End Trace\\]', next_line)\n if end_trace:\n break\n except Exception as e:\n #reached the end of the file\n print( e)\n break\n\n if ip_port_key not in raw_sessions:\n raw_sessions[ip_port_key] = this_trace\n print(ip_port_key)\n else:\n raw_sessions[ip_port_key] += this_trace\n\n #do the second step of preprocessing, getting JSONs from raw sessions\n print( \"Constructing session JSONs\")\n session_JSONs = dict()\n for session, raw_traces in raw_sessions.items():\n #basic data\n session_JSONs[session] = dict()\n session_JSONs[session][\"version\"] = PROCESSOR_VERSION\n session_JSONs[session][\"encoding\"] = \"url_encoded\"\n\n # let's get the raw text from the traces\n raw_text = \"\"\n timestamp = \"\"\n timestamp_list = list()\n for line in raw_traces.splitlines(raw_traces.count('\\n')):\n trace_line = re.findall(r'^\\d{8}\\.\\d{2}h\\d{2}m\\d{2}s', line)\n timestamp = re.findall(r'\\[\\d{10}\\.\\d{3}\\]', line)\n if timestamp:\n timestamp_list.append(timestamp[0][1:-1])\n if not trace_line:\n raw_text += line\n \n #get session start timestamp\n session_JSONs[session][\"timestamp\"] = timestamp_list[0]\n \n # let's parse out requests and responses\n count = -1\n delimiter = \"\\r\\n\\r\\n\"\n is_request_chunk = True\n raw_text_chunks = iter(raw_text.split(delimiter))\n session_JSONs[session][\"txns\"] = list()\n for chunk in raw_text_chunks:\n #check if each chunk is request or response if it is do so accordingly\n #otherwise append it to the previous chunk's data\n request_chunk = re.findall(r'^\\S+\\s/\\S+\\sHTTP/\\d\\.\\d\\r\\n', chunk)\n response_chunk = re.findall(r'^HTTP/\\d\\.\\d\\s\\d{3}\\s[\\s\\S]+\\r\\n', chunk)\n if request_chunk:\n count += 1\n is_reqeust_chunk = True\n chunk += delimiter\n if count <= len(session_JSONs[session][\"txns\"]):\n session_JSONs[session][\"txns\"].append(dict())\n session_JSONs[session][\"txns\"][count][\"request\"] = dict()\n session_JSONs[session][\"txns\"][count][\"request\"][\"timestamp\"] = timestamp_list[count - 1] \n session_JSONs[session][\"txns\"][count][\"request\"][\"headers\"] = chunk\n session_JSONs[session][\"txns\"][count][\"uuid\"] = uuid.uuid4().hex\n elif response_chunk:\n is_request_chunk = False\n chunk += delimiter\n if count <= len(session_JSONs[session][\"txns\"]):\n session_JSONs[session][\"txns\"].append(dict())\n session_JSONs[session][\"txns\"][count][\"response\"] = dict()\n session_JSONs[session][\"txns\"][count][\"response\"][\"timestamp\"] = timestamp_list[count - 1] \n session_JSONs[session][\"txns\"][count][\"response\"][\"headers\"] = chunk\n else: #is body chunk\n try:\n if count == -1: continue #if we have garbage at the front\n chunk = urllib.parse.quote(chunk)\n if is_request_chunk:\n if \"body\" not in session_JSONs[session][\"txns\"][count][\"request\"]:\n session_JSONs[session][\"txns\"][count][\"request\"][\"body\"] = chunk\n else:\n session_JSONs[session][\"txns\"][count][\"request\"][\"body\"] += chunk\n else:\n if \"body\" not in session_JSONs[session][\"txns\"][count][\"response\"]:\n session_JSONs[session][\"txns\"][count][\"response\"][\"body\"] = chunk\n else:\n session_JSONs[session][\"txns\"][count][\"response\"][\"body\"] += chunk\n except KeyError as k:\n continue # for now we're dropping malformed bodies. will not be able to do this when we're validating. might have to go edit wiretracing code to give us better delimiters here for parsing. right now isn't particularly straightforward\n print(len(session_JSONs[session][\"txns\"]))\n session_JSONs[session][\"txns\"] = list(filter(bool, session_JSONs[session][\"txns\"]))\n if len(session_JSONs[session][\"txns\"]) == 0:\n del session_JSONs[session] \n\n #write out\n unicode_errors = 0\n print( \"Writing sessions to disk\")\n out_files = dict()\n for session, data in session_JSONs.items():\n out_files[session] = open(os.path.join(out_dir, 'session_' + str(session)) + '.json', 'w')\n try:\n json.dump(data, out_files[session])\n out_files[session].close() \n except:\n unicode_errors += 1\n out_files[session].close()\n os.remove(os.path.join(out_dir, 'session_' + str(session)) + '.json') \n\n print( str(unicode_errors) + \" unicode errors\")\n\ndef main(argv):\n if len(argv) != 3:\n print( \"Script to preprocess trace logs for client.\")\n print( \"Outputs JSONs to directory 'sessions'\")\n print( \"Usage: python \" + str(argv[0]) + \" <in directory> <out directory>\")\n return\n\n if not os.path.isdir(argv[1]):\n print( str(argv[1]) + \" is not a directory. Aborting.\")\n return\n if not os.path.exists(argv[2]):\n os.makedirs(argv[2])\n else:\n print( str(argv[2]) + \" already exists, choose another output directory!\")\n return\n t1=time.time()\n process(argv[1], argv[2])\n t2=time.time()\n print(\"time taken:\",(t2-t1))\nif __name__ == \"__main__\":\n main(sys.argv)\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
<|reserved_special_token_0|>
class RunViewSet(ModelViewSet):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
@property
def template_name(self):
if self.action == 'retrieve':
template = 'detail'
else:
template = self.action
return 'data_wizard/run_{}.html'.format(template)
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def run_task(self, name, use_async=False, post=None):
run = self.get_object()
return run.run_task(name, use_async=use_async, post=post, backend=
self.backend, user=self.request.user)
def retrieve_and_run(self, task_name, use_async=False, post=None):
response = self.retrieve(self.request, **self.kwargs)
result = self.run_task(task_name, use_async, post)
response.data.update(result)
return response
@action(detail=True)
def serializers(self, request, *args, **kwargs):
response = self.retrieve(request, **self.kwargs)
response.data['serializer_choices'] = [{'name': s['class_name'],
'label': s['name']} for s in registry.get_serializers() if s[
'options'].get('show_in_list', True)]
return response
@action(detail=True, methods=['post'])
def updateserializer(self, request, *args, **kwargs):
run = self.get_object()
self.action = 'serializers'
name = request.POST.get('serializer', None)
if name and registry.get_serializer(name):
run.serializer = name
run.save()
run.add_event('update_serializer')
return self.serializers(request)
@action(detail=True)
def columns(self, request, *args, **kwargs):
return self.retrieve_and_run('read_columns')
<|reserved_special_token_0|>
<|reserved_special_token_0|>
@action(detail=True, methods=['post'])
def updateids(self, request, *args, **kwargs):
response = self.retrieve_and_run('read_row_identifiers')
self.action = 'ids'
result = self.run_task('update_row_identifiers', post=request.POST)
response.data.update(result)
return response
@action(detail=True, methods=['post'])
def data(self, request, *args, **kwargs):
return self.retrieve_and_run('import_data', use_async=True)
@action(detail=True, methods=['post', 'get'])
def auto(self, request, *args, **kwargs):
if request.method == 'GET':
response = self.retrieve(request, **kwargs)
task_id = request.GET.get('task', None)
if task_id:
response.data['task_id'] = task_id
else:
self.action = 'retrieve'
return response
return self.retrieve_and_run('auto_import', use_async=True)
@action(detail=True)
def records(self, request, *args, **kwargs):
response = self.retrieve(self.request, **kwargs)
response.data['records'] = self.record_serializer_class(self.
get_object().record_set.all(), many=True).data
return response
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class RunViewSet(ModelViewSet):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
@property
def backend(self):
from . import backend as data_wizard_backend
return data_wizard_backend
@property
def template_name(self):
if self.action == 'retrieve':
template = 'detail'
else:
template = self.action
return 'data_wizard/run_{}.html'.format(template)
<|reserved_special_token_0|>
@action(detail=True)
def status(self, request, *args, **kwargs):
task_id = request.GET.get('task', None)
result = self.backend.get_async_status(task_id)
status = result.get('status', 'UNKNOWN')
action = result.get('action', None)
if not action and status == 'SUCCESS':
action = 'records'
if action:
result['location'] = self.get_action_url(action)
elif status == 'FAILURE' and not result.get('error'):
result['error'] = 'Unknown Error'
result['status'] = status
return Response(result)
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def run_task(self, name, use_async=False, post=None):
run = self.get_object()
return run.run_task(name, use_async=use_async, post=post, backend=
self.backend, user=self.request.user)
def retrieve_and_run(self, task_name, use_async=False, post=None):
response = self.retrieve(self.request, **self.kwargs)
result = self.run_task(task_name, use_async, post)
response.data.update(result)
return response
@action(detail=True)
def serializers(self, request, *args, **kwargs):
response = self.retrieve(request, **self.kwargs)
response.data['serializer_choices'] = [{'name': s['class_name'],
'label': s['name']} for s in registry.get_serializers() if s[
'options'].get('show_in_list', True)]
return response
@action(detail=True, methods=['post'])
def updateserializer(self, request, *args, **kwargs):
run = self.get_object()
self.action = 'serializers'
name = request.POST.get('serializer', None)
if name and registry.get_serializer(name):
run.serializer = name
run.save()
run.add_event('update_serializer')
return self.serializers(request)
@action(detail=True)
def columns(self, request, *args, **kwargs):
return self.retrieve_and_run('read_columns')
<|reserved_special_token_0|>
<|reserved_special_token_0|>
@action(detail=True, methods=['post'])
def updateids(self, request, *args, **kwargs):
response = self.retrieve_and_run('read_row_identifiers')
self.action = 'ids'
result = self.run_task('update_row_identifiers', post=request.POST)
response.data.update(result)
return response
@action(detail=True, methods=['post'])
def data(self, request, *args, **kwargs):
return self.retrieve_and_run('import_data', use_async=True)
@action(detail=True, methods=['post', 'get'])
def auto(self, request, *args, **kwargs):
if request.method == 'GET':
response = self.retrieve(request, **kwargs)
task_id = request.GET.get('task', None)
if task_id:
response.data['task_id'] = task_id
else:
self.action = 'retrieve'
return response
return self.retrieve_and_run('auto_import', use_async=True)
@action(detail=True)
def records(self, request, *args, **kwargs):
response = self.retrieve(self.request, **kwargs)
response.data['records'] = self.record_serializer_class(self.
get_object().record_set.all(), many=True).data
return response
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class RunViewSet(ModelViewSet):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
@property
def backend(self):
from . import backend as data_wizard_backend
return data_wizard_backend
@property
def template_name(self):
if self.action == 'retrieve':
template = 'detail'
else:
template = self.action
return 'data_wizard/run_{}.html'.format(template)
def get_renderers(self):
if self.action == 'status':
return [renderers.JSONRenderer()]
else:
return super(RunViewSet, self).get_renderers()
@action(detail=True)
def status(self, request, *args, **kwargs):
task_id = request.GET.get('task', None)
result = self.backend.get_async_status(task_id)
status = result.get('status', 'UNKNOWN')
action = result.get('action', None)
if not action and status == 'SUCCESS':
action = 'records'
if action:
result['location'] = self.get_action_url(action)
elif status == 'FAILURE' and not result.get('error'):
result['error'] = 'Unknown Error'
result['status'] = status
return Response(result)
<|reserved_special_token_0|>
def get_action_url(self, action):
name = self._namespace + ':run-' + action
return reverse(name, kwargs={'pk': self.get_object().pk})
def run_task(self, name, use_async=False, post=None):
run = self.get_object()
return run.run_task(name, use_async=use_async, post=post, backend=
self.backend, user=self.request.user)
def retrieve_and_run(self, task_name, use_async=False, post=None):
response = self.retrieve(self.request, **self.kwargs)
result = self.run_task(task_name, use_async, post)
response.data.update(result)
return response
@action(detail=True)
def serializers(self, request, *args, **kwargs):
response = self.retrieve(request, **self.kwargs)
response.data['serializer_choices'] = [{'name': s['class_name'],
'label': s['name']} for s in registry.get_serializers() if s[
'options'].get('show_in_list', True)]
return response
@action(detail=True, methods=['post'])
def updateserializer(self, request, *args, **kwargs):
run = self.get_object()
self.action = 'serializers'
name = request.POST.get('serializer', None)
if name and registry.get_serializer(name):
run.serializer = name
run.save()
run.add_event('update_serializer')
return self.serializers(request)
@action(detail=True)
def columns(self, request, *args, **kwargs):
return self.retrieve_and_run('read_columns')
@action(detail=True, methods=['post'])
def updatecolumns(self, request, *args, **kwargs):
response = self.retrieve_and_run('read_columns')
self.action = 'columns'
result = self.run_task('update_columns', post=request.POST)
response.data.update(result)
return response
@action(detail=True)
def ids(self, request, *args, **kwargs):
return self.retrieve_and_run('read_row_identifiers')
@action(detail=True, methods=['post'])
def updateids(self, request, *args, **kwargs):
response = self.retrieve_and_run('read_row_identifiers')
self.action = 'ids'
result = self.run_task('update_row_identifiers', post=request.POST)
response.data.update(result)
return response
@action(detail=True, methods=['post'])
def data(self, request, *args, **kwargs):
return self.retrieve_and_run('import_data', use_async=True)
@action(detail=True, methods=['post', 'get'])
def auto(self, request, *args, **kwargs):
if request.method == 'GET':
response = self.retrieve(request, **kwargs)
task_id = request.GET.get('task', None)
if task_id:
response.data['task_id'] = task_id
else:
self.action = 'retrieve'
return response
return self.retrieve_and_run('auto_import', use_async=True)
@action(detail=True)
def records(self, request, *args, **kwargs):
response = self.retrieve(self.request, **kwargs)
response.data['records'] = self.record_serializer_class(self.
get_object().record_set.all(), many=True).data
return response
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class RunViewSet(ModelViewSet):
serializer_class = RunSerializer
pagination_class = PageNumberPagination
renderer_classes = [renderers.TemplateHTMLRenderer, renderers.
JSONRenderer, renderers.BrowsableAPIRenderer]
authentication_classes = [import_setting('AUTHENTICATION')]
permission_classes = [import_setting('PERMISSION')]
record_serializer_class = RecordSerializer
queryset = Run.objects.all()
@property
def backend(self):
from . import backend as data_wizard_backend
return data_wizard_backend
@property
def template_name(self):
if self.action == 'retrieve':
template = 'detail'
else:
template = self.action
return 'data_wizard/run_{}.html'.format(template)
def get_renderers(self):
if self.action == 'status':
return [renderers.JSONRenderer()]
else:
return super(RunViewSet, self).get_renderers()
@action(detail=True)
def status(self, request, *args, **kwargs):
task_id = request.GET.get('task', None)
result = self.backend.get_async_status(task_id)
status = result.get('status', 'UNKNOWN')
action = result.get('action', None)
if not action and status == 'SUCCESS':
action = 'records'
if action:
result['location'] = self.get_action_url(action)
elif status == 'FAILURE' and not result.get('error'):
result['error'] = 'Unknown Error'
result['status'] = status
return Response(result)
_namespace = 'data_wizard'
def get_action_url(self, action):
name = self._namespace + ':run-' + action
return reverse(name, kwargs={'pk': self.get_object().pk})
def run_task(self, name, use_async=False, post=None):
run = self.get_object()
return run.run_task(name, use_async=use_async, post=post, backend=
self.backend, user=self.request.user)
def retrieve_and_run(self, task_name, use_async=False, post=None):
response = self.retrieve(self.request, **self.kwargs)
result = self.run_task(task_name, use_async, post)
response.data.update(result)
return response
@action(detail=True)
def serializers(self, request, *args, **kwargs):
response = self.retrieve(request, **self.kwargs)
response.data['serializer_choices'] = [{'name': s['class_name'],
'label': s['name']} for s in registry.get_serializers() if s[
'options'].get('show_in_list', True)]
return response
@action(detail=True, methods=['post'])
def updateserializer(self, request, *args, **kwargs):
run = self.get_object()
self.action = 'serializers'
name = request.POST.get('serializer', None)
if name and registry.get_serializer(name):
run.serializer = name
run.save()
run.add_event('update_serializer')
return self.serializers(request)
@action(detail=True)
def columns(self, request, *args, **kwargs):
return self.retrieve_and_run('read_columns')
@action(detail=True, methods=['post'])
def updatecolumns(self, request, *args, **kwargs):
response = self.retrieve_and_run('read_columns')
self.action = 'columns'
result = self.run_task('update_columns', post=request.POST)
response.data.update(result)
return response
@action(detail=True)
def ids(self, request, *args, **kwargs):
return self.retrieve_and_run('read_row_identifiers')
@action(detail=True, methods=['post'])
def updateids(self, request, *args, **kwargs):
response = self.retrieve_and_run('read_row_identifiers')
self.action = 'ids'
result = self.run_task('update_row_identifiers', post=request.POST)
response.data.update(result)
return response
@action(detail=True, methods=['post'])
def data(self, request, *args, **kwargs):
return self.retrieve_and_run('import_data', use_async=True)
@action(detail=True, methods=['post', 'get'])
def auto(self, request, *args, **kwargs):
if request.method == 'GET':
response = self.retrieve(request, **kwargs)
task_id = request.GET.get('task', None)
if task_id:
response.data['task_id'] = task_id
else:
self.action = 'retrieve'
return response
return self.retrieve_and_run('auto_import', use_async=True)
@action(detail=True)
def records(self, request, *args, **kwargs):
response = self.retrieve(self.request, **kwargs)
response.data['records'] = self.record_serializer_class(self.
get_object().record_set.all(), many=True).data
return response
<|reserved_special_token_1|>
from .compat import reverse, action
from rest_framework.response import Response
from rest_framework.viewsets import ModelViewSet
from rest_framework import pagination
from rest_framework import renderers
from . import registry
from .serializers import RunSerializer, RecordSerializer
from .models import Run
from .settings import import_setting
class PageNumberPagination(pagination.PageNumberPagination):
page_size = 50
class RunViewSet(ModelViewSet):
serializer_class = RunSerializer
pagination_class = PageNumberPagination
renderer_classes = [
renderers.TemplateHTMLRenderer,
renderers.JSONRenderer,
renderers.BrowsableAPIRenderer,
]
authentication_classes = [
import_setting('AUTHENTICATION'),
]
permission_classes = [
import_setting('PERMISSION'),
]
record_serializer_class = RecordSerializer
queryset = Run.objects.all()
@property
def backend(self):
from . import backend as data_wizard_backend
return data_wizard_backend
@property
def template_name(self):
if self.action == 'retrieve':
template = 'detail'
else:
template = self.action
return 'data_wizard/run_{}.html'.format(template)
def get_renderers(self):
if self.action == 'status':
return [renderers.JSONRenderer()]
else:
return super(RunViewSet, self).get_renderers()
@action(detail=True)
def status(self, request, *args, **kwargs):
task_id = request.GET.get('task', None)
result = self.backend.get_async_status(task_id)
status = result.get('status', 'UNKNOWN')
action = result.get('action', None)
if not action and status == 'SUCCESS':
action = 'records'
if action:
result['location'] = self.get_action_url(action)
elif status == 'FAILURE' and not result.get('error'):
result['error'] = "Unknown Error"
result['status'] = status
return Response(result)
_namespace = 'data_wizard'
def get_action_url(self, action):
name = self._namespace + ':run-' + action
return reverse(name, kwargs={'pk': self.get_object().pk})
def run_task(self, name, use_async=False, post=None):
run = self.get_object()
return run.run_task(
name,
use_async=use_async,
post=post,
backend=self.backend,
user=self.request.user
)
def retrieve_and_run(self, task_name, use_async=False, post=None):
response = self.retrieve(self.request, **self.kwargs)
result = self.run_task(task_name, use_async, post)
response.data.update(result)
return response
@action(detail=True)
def serializers(self, request, *args, **kwargs):
response = self.retrieve(request, **self.kwargs)
response.data['serializer_choices'] = [
{
'name': s['class_name'],
'label': s['name'],
} for s in registry.get_serializers()
if s['options'].get('show_in_list', True)
]
return response
@action(detail=True, methods=['post'])
def updateserializer(self, request, *args, **kwargs):
run = self.get_object()
self.action = 'serializers'
name = request.POST.get('serializer', None)
if name and registry.get_serializer(name):
run.serializer = name
run.save()
run.add_event('update_serializer')
return self.serializers(request)
@action(detail=True)
def columns(self, request, *args, **kwargs):
return self.retrieve_and_run('read_columns')
@action(detail=True, methods=['post'])
def updatecolumns(self, request, *args, **kwargs):
response = self.retrieve_and_run('read_columns')
self.action = 'columns'
result = self.run_task('update_columns', post=request.POST)
response.data.update(result)
return response
@action(detail=True)
def ids(self, request, *args, **kwargs):
return self.retrieve_and_run('read_row_identifiers')
@action(detail=True, methods=['post'])
def updateids(self, request, *args, **kwargs):
response = self.retrieve_and_run('read_row_identifiers')
self.action = 'ids'
result = self.run_task('update_row_identifiers', post=request.POST)
response.data.update(result)
return response
@action(detail=True, methods=['post'])
def data(self, request, *args, **kwargs):
return self.retrieve_and_run('import_data', use_async=True)
@action(detail=True, methods=['post', 'get'])
def auto(self, request, *args, **kwargs):
if request.method == 'GET':
response = self.retrieve(request, **kwargs)
task_id = request.GET.get('task', None)
if task_id:
response.data['task_id'] = task_id
else:
self.action = 'retrieve'
return response
return self.retrieve_and_run('auto_import', use_async=True)
@action(detail=True)
def records(self, request, *args, **kwargs):
response = self.retrieve(self.request, **kwargs)
response.data['records'] = self.record_serializer_class(
self.get_object().record_set.all(),
many=True
).data
return response
|
flexible
|
{
"blob_id": "11a0c3307994a90d1d4de67d442ffa355e11e13b",
"index": 6836,
"step-1": "<mask token>\n\n\nclass RunViewSet(ModelViewSet):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n @property\n def template_name(self):\n if self.action == 'retrieve':\n template = 'detail'\n else:\n template = self.action\n return 'data_wizard/run_{}.html'.format(template)\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def run_task(self, name, use_async=False, post=None):\n run = self.get_object()\n return run.run_task(name, use_async=use_async, post=post, backend=\n self.backend, user=self.request.user)\n\n def retrieve_and_run(self, task_name, use_async=False, post=None):\n response = self.retrieve(self.request, **self.kwargs)\n result = self.run_task(task_name, use_async, post)\n response.data.update(result)\n return response\n\n @action(detail=True)\n def serializers(self, request, *args, **kwargs):\n response = self.retrieve(request, **self.kwargs)\n response.data['serializer_choices'] = [{'name': s['class_name'],\n 'label': s['name']} for s in registry.get_serializers() if s[\n 'options'].get('show_in_list', True)]\n return response\n\n @action(detail=True, methods=['post'])\n def updateserializer(self, request, *args, **kwargs):\n run = self.get_object()\n self.action = 'serializers'\n name = request.POST.get('serializer', None)\n if name and registry.get_serializer(name):\n run.serializer = name\n run.save()\n run.add_event('update_serializer')\n return self.serializers(request)\n\n @action(detail=True)\n def columns(self, request, *args, **kwargs):\n return self.retrieve_and_run('read_columns')\n <mask token>\n <mask token>\n\n @action(detail=True, methods=['post'])\n def updateids(self, request, *args, **kwargs):\n response = self.retrieve_and_run('read_row_identifiers')\n self.action = 'ids'\n result = self.run_task('update_row_identifiers', post=request.POST)\n response.data.update(result)\n return response\n\n @action(detail=True, methods=['post'])\n def data(self, request, *args, **kwargs):\n return self.retrieve_and_run('import_data', use_async=True)\n\n @action(detail=True, methods=['post', 'get'])\n def auto(self, request, *args, **kwargs):\n if request.method == 'GET':\n response = self.retrieve(request, **kwargs)\n task_id = request.GET.get('task', None)\n if task_id:\n response.data['task_id'] = task_id\n else:\n self.action = 'retrieve'\n return response\n return self.retrieve_and_run('auto_import', use_async=True)\n\n @action(detail=True)\n def records(self, request, *args, **kwargs):\n response = self.retrieve(self.request, **kwargs)\n response.data['records'] = self.record_serializer_class(self.\n get_object().record_set.all(), many=True).data\n return response\n",
"step-2": "<mask token>\n\n\nclass RunViewSet(ModelViewSet):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n @property\n def backend(self):\n from . import backend as data_wizard_backend\n return data_wizard_backend\n\n @property\n def template_name(self):\n if self.action == 'retrieve':\n template = 'detail'\n else:\n template = self.action\n return 'data_wizard/run_{}.html'.format(template)\n <mask token>\n\n @action(detail=True)\n def status(self, request, *args, **kwargs):\n task_id = request.GET.get('task', None)\n result = self.backend.get_async_status(task_id)\n status = result.get('status', 'UNKNOWN')\n action = result.get('action', None)\n if not action and status == 'SUCCESS':\n action = 'records'\n if action:\n result['location'] = self.get_action_url(action)\n elif status == 'FAILURE' and not result.get('error'):\n result['error'] = 'Unknown Error'\n result['status'] = status\n return Response(result)\n <mask token>\n <mask token>\n\n def run_task(self, name, use_async=False, post=None):\n run = self.get_object()\n return run.run_task(name, use_async=use_async, post=post, backend=\n self.backend, user=self.request.user)\n\n def retrieve_and_run(self, task_name, use_async=False, post=None):\n response = self.retrieve(self.request, **self.kwargs)\n result = self.run_task(task_name, use_async, post)\n response.data.update(result)\n return response\n\n @action(detail=True)\n def serializers(self, request, *args, **kwargs):\n response = self.retrieve(request, **self.kwargs)\n response.data['serializer_choices'] = [{'name': s['class_name'],\n 'label': s['name']} for s in registry.get_serializers() if s[\n 'options'].get('show_in_list', True)]\n return response\n\n @action(detail=True, methods=['post'])\n def updateserializer(self, request, *args, **kwargs):\n run = self.get_object()\n self.action = 'serializers'\n name = request.POST.get('serializer', None)\n if name and registry.get_serializer(name):\n run.serializer = name\n run.save()\n run.add_event('update_serializer')\n return self.serializers(request)\n\n @action(detail=True)\n def columns(self, request, *args, **kwargs):\n return self.retrieve_and_run('read_columns')\n <mask token>\n <mask token>\n\n @action(detail=True, methods=['post'])\n def updateids(self, request, *args, **kwargs):\n response = self.retrieve_and_run('read_row_identifiers')\n self.action = 'ids'\n result = self.run_task('update_row_identifiers', post=request.POST)\n response.data.update(result)\n return response\n\n @action(detail=True, methods=['post'])\n def data(self, request, *args, **kwargs):\n return self.retrieve_and_run('import_data', use_async=True)\n\n @action(detail=True, methods=['post', 'get'])\n def auto(self, request, *args, **kwargs):\n if request.method == 'GET':\n response = self.retrieve(request, **kwargs)\n task_id = request.GET.get('task', None)\n if task_id:\n response.data['task_id'] = task_id\n else:\n self.action = 'retrieve'\n return response\n return self.retrieve_and_run('auto_import', use_async=True)\n\n @action(detail=True)\n def records(self, request, *args, **kwargs):\n response = self.retrieve(self.request, **kwargs)\n response.data['records'] = self.record_serializer_class(self.\n get_object().record_set.all(), many=True).data\n return response\n",
"step-3": "<mask token>\n\n\nclass RunViewSet(ModelViewSet):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n @property\n def backend(self):\n from . import backend as data_wizard_backend\n return data_wizard_backend\n\n @property\n def template_name(self):\n if self.action == 'retrieve':\n template = 'detail'\n else:\n template = self.action\n return 'data_wizard/run_{}.html'.format(template)\n\n def get_renderers(self):\n if self.action == 'status':\n return [renderers.JSONRenderer()]\n else:\n return super(RunViewSet, self).get_renderers()\n\n @action(detail=True)\n def status(self, request, *args, **kwargs):\n task_id = request.GET.get('task', None)\n result = self.backend.get_async_status(task_id)\n status = result.get('status', 'UNKNOWN')\n action = result.get('action', None)\n if not action and status == 'SUCCESS':\n action = 'records'\n if action:\n result['location'] = self.get_action_url(action)\n elif status == 'FAILURE' and not result.get('error'):\n result['error'] = 'Unknown Error'\n result['status'] = status\n return Response(result)\n <mask token>\n\n def get_action_url(self, action):\n name = self._namespace + ':run-' + action\n return reverse(name, kwargs={'pk': self.get_object().pk})\n\n def run_task(self, name, use_async=False, post=None):\n run = self.get_object()\n return run.run_task(name, use_async=use_async, post=post, backend=\n self.backend, user=self.request.user)\n\n def retrieve_and_run(self, task_name, use_async=False, post=None):\n response = self.retrieve(self.request, **self.kwargs)\n result = self.run_task(task_name, use_async, post)\n response.data.update(result)\n return response\n\n @action(detail=True)\n def serializers(self, request, *args, **kwargs):\n response = self.retrieve(request, **self.kwargs)\n response.data['serializer_choices'] = [{'name': s['class_name'],\n 'label': s['name']} for s in registry.get_serializers() if s[\n 'options'].get('show_in_list', True)]\n return response\n\n @action(detail=True, methods=['post'])\n def updateserializer(self, request, *args, **kwargs):\n run = self.get_object()\n self.action = 'serializers'\n name = request.POST.get('serializer', None)\n if name and registry.get_serializer(name):\n run.serializer = name\n run.save()\n run.add_event('update_serializer')\n return self.serializers(request)\n\n @action(detail=True)\n def columns(self, request, *args, **kwargs):\n return self.retrieve_and_run('read_columns')\n\n @action(detail=True, methods=['post'])\n def updatecolumns(self, request, *args, **kwargs):\n response = self.retrieve_and_run('read_columns')\n self.action = 'columns'\n result = self.run_task('update_columns', post=request.POST)\n response.data.update(result)\n return response\n\n @action(detail=True)\n def ids(self, request, *args, **kwargs):\n return self.retrieve_and_run('read_row_identifiers')\n\n @action(detail=True, methods=['post'])\n def updateids(self, request, *args, **kwargs):\n response = self.retrieve_and_run('read_row_identifiers')\n self.action = 'ids'\n result = self.run_task('update_row_identifiers', post=request.POST)\n response.data.update(result)\n return response\n\n @action(detail=True, methods=['post'])\n def data(self, request, *args, **kwargs):\n return self.retrieve_and_run('import_data', use_async=True)\n\n @action(detail=True, methods=['post', 'get'])\n def auto(self, request, *args, **kwargs):\n if request.method == 'GET':\n response = self.retrieve(request, **kwargs)\n task_id = request.GET.get('task', None)\n if task_id:\n response.data['task_id'] = task_id\n else:\n self.action = 'retrieve'\n return response\n return self.retrieve_and_run('auto_import', use_async=True)\n\n @action(detail=True)\n def records(self, request, *args, **kwargs):\n response = self.retrieve(self.request, **kwargs)\n response.data['records'] = self.record_serializer_class(self.\n get_object().record_set.all(), many=True).data\n return response\n",
"step-4": "<mask token>\n\n\nclass RunViewSet(ModelViewSet):\n serializer_class = RunSerializer\n pagination_class = PageNumberPagination\n renderer_classes = [renderers.TemplateHTMLRenderer, renderers.\n JSONRenderer, renderers.BrowsableAPIRenderer]\n authentication_classes = [import_setting('AUTHENTICATION')]\n permission_classes = [import_setting('PERMISSION')]\n record_serializer_class = RecordSerializer\n queryset = Run.objects.all()\n\n @property\n def backend(self):\n from . import backend as data_wizard_backend\n return data_wizard_backend\n\n @property\n def template_name(self):\n if self.action == 'retrieve':\n template = 'detail'\n else:\n template = self.action\n return 'data_wizard/run_{}.html'.format(template)\n\n def get_renderers(self):\n if self.action == 'status':\n return [renderers.JSONRenderer()]\n else:\n return super(RunViewSet, self).get_renderers()\n\n @action(detail=True)\n def status(self, request, *args, **kwargs):\n task_id = request.GET.get('task', None)\n result = self.backend.get_async_status(task_id)\n status = result.get('status', 'UNKNOWN')\n action = result.get('action', None)\n if not action and status == 'SUCCESS':\n action = 'records'\n if action:\n result['location'] = self.get_action_url(action)\n elif status == 'FAILURE' and not result.get('error'):\n result['error'] = 'Unknown Error'\n result['status'] = status\n return Response(result)\n _namespace = 'data_wizard'\n\n def get_action_url(self, action):\n name = self._namespace + ':run-' + action\n return reverse(name, kwargs={'pk': self.get_object().pk})\n\n def run_task(self, name, use_async=False, post=None):\n run = self.get_object()\n return run.run_task(name, use_async=use_async, post=post, backend=\n self.backend, user=self.request.user)\n\n def retrieve_and_run(self, task_name, use_async=False, post=None):\n response = self.retrieve(self.request, **self.kwargs)\n result = self.run_task(task_name, use_async, post)\n response.data.update(result)\n return response\n\n @action(detail=True)\n def serializers(self, request, *args, **kwargs):\n response = self.retrieve(request, **self.kwargs)\n response.data['serializer_choices'] = [{'name': s['class_name'],\n 'label': s['name']} for s in registry.get_serializers() if s[\n 'options'].get('show_in_list', True)]\n return response\n\n @action(detail=True, methods=['post'])\n def updateserializer(self, request, *args, **kwargs):\n run = self.get_object()\n self.action = 'serializers'\n name = request.POST.get('serializer', None)\n if name and registry.get_serializer(name):\n run.serializer = name\n run.save()\n run.add_event('update_serializer')\n return self.serializers(request)\n\n @action(detail=True)\n def columns(self, request, *args, **kwargs):\n return self.retrieve_and_run('read_columns')\n\n @action(detail=True, methods=['post'])\n def updatecolumns(self, request, *args, **kwargs):\n response = self.retrieve_and_run('read_columns')\n self.action = 'columns'\n result = self.run_task('update_columns', post=request.POST)\n response.data.update(result)\n return response\n\n @action(detail=True)\n def ids(self, request, *args, **kwargs):\n return self.retrieve_and_run('read_row_identifiers')\n\n @action(detail=True, methods=['post'])\n def updateids(self, request, *args, **kwargs):\n response = self.retrieve_and_run('read_row_identifiers')\n self.action = 'ids'\n result = self.run_task('update_row_identifiers', post=request.POST)\n response.data.update(result)\n return response\n\n @action(detail=True, methods=['post'])\n def data(self, request, *args, **kwargs):\n return self.retrieve_and_run('import_data', use_async=True)\n\n @action(detail=True, methods=['post', 'get'])\n def auto(self, request, *args, **kwargs):\n if request.method == 'GET':\n response = self.retrieve(request, **kwargs)\n task_id = request.GET.get('task', None)\n if task_id:\n response.data['task_id'] = task_id\n else:\n self.action = 'retrieve'\n return response\n return self.retrieve_and_run('auto_import', use_async=True)\n\n @action(detail=True)\n def records(self, request, *args, **kwargs):\n response = self.retrieve(self.request, **kwargs)\n response.data['records'] = self.record_serializer_class(self.\n get_object().record_set.all(), many=True).data\n return response\n",
"step-5": "from .compat import reverse, action\nfrom rest_framework.response import Response\nfrom rest_framework.viewsets import ModelViewSet\nfrom rest_framework import pagination\nfrom rest_framework import renderers\nfrom . import registry\nfrom .serializers import RunSerializer, RecordSerializer\nfrom .models import Run\nfrom .settings import import_setting\n\n\nclass PageNumberPagination(pagination.PageNumberPagination):\n page_size = 50\n\n\nclass RunViewSet(ModelViewSet):\n serializer_class = RunSerializer\n pagination_class = PageNumberPagination\n renderer_classes = [\n renderers.TemplateHTMLRenderer,\n renderers.JSONRenderer,\n renderers.BrowsableAPIRenderer,\n ]\n authentication_classes = [\n import_setting('AUTHENTICATION'),\n ]\n permission_classes = [\n import_setting('PERMISSION'),\n ]\n record_serializer_class = RecordSerializer\n queryset = Run.objects.all()\n\n @property\n def backend(self):\n from . import backend as data_wizard_backend\n return data_wizard_backend\n\n @property\n def template_name(self):\n if self.action == 'retrieve':\n template = 'detail'\n else:\n template = self.action\n return 'data_wizard/run_{}.html'.format(template)\n\n def get_renderers(self):\n if self.action == 'status':\n return [renderers.JSONRenderer()]\n else:\n return super(RunViewSet, self).get_renderers()\n\n @action(detail=True)\n def status(self, request, *args, **kwargs):\n task_id = request.GET.get('task', None)\n result = self.backend.get_async_status(task_id)\n status = result.get('status', 'UNKNOWN')\n action = result.get('action', None)\n if not action and status == 'SUCCESS':\n action = 'records'\n if action:\n result['location'] = self.get_action_url(action)\n elif status == 'FAILURE' and not result.get('error'):\n result['error'] = \"Unknown Error\"\n result['status'] = status\n return Response(result)\n\n _namespace = 'data_wizard'\n\n def get_action_url(self, action):\n name = self._namespace + ':run-' + action\n return reverse(name, kwargs={'pk': self.get_object().pk})\n\n def run_task(self, name, use_async=False, post=None):\n run = self.get_object()\n return run.run_task(\n name,\n use_async=use_async,\n post=post,\n backend=self.backend,\n user=self.request.user\n )\n\n def retrieve_and_run(self, task_name, use_async=False, post=None):\n response = self.retrieve(self.request, **self.kwargs)\n result = self.run_task(task_name, use_async, post)\n response.data.update(result)\n return response\n\n @action(detail=True)\n def serializers(self, request, *args, **kwargs):\n response = self.retrieve(request, **self.kwargs)\n response.data['serializer_choices'] = [\n {\n 'name': s['class_name'],\n 'label': s['name'],\n } for s in registry.get_serializers()\n if s['options'].get('show_in_list', True)\n ]\n return response\n\n @action(detail=True, methods=['post'])\n def updateserializer(self, request, *args, **kwargs):\n run = self.get_object()\n self.action = 'serializers'\n name = request.POST.get('serializer', None)\n if name and registry.get_serializer(name):\n run.serializer = name\n run.save()\n run.add_event('update_serializer')\n return self.serializers(request)\n\n @action(detail=True)\n def columns(self, request, *args, **kwargs):\n return self.retrieve_and_run('read_columns')\n\n @action(detail=True, methods=['post'])\n def updatecolumns(self, request, *args, **kwargs):\n response = self.retrieve_and_run('read_columns')\n self.action = 'columns'\n result = self.run_task('update_columns', post=request.POST)\n response.data.update(result)\n return response\n\n @action(detail=True)\n def ids(self, request, *args, **kwargs):\n return self.retrieve_and_run('read_row_identifiers')\n\n @action(detail=True, methods=['post'])\n def updateids(self, request, *args, **kwargs):\n response = self.retrieve_and_run('read_row_identifiers')\n self.action = 'ids'\n result = self.run_task('update_row_identifiers', post=request.POST)\n response.data.update(result)\n return response\n\n @action(detail=True, methods=['post'])\n def data(self, request, *args, **kwargs):\n return self.retrieve_and_run('import_data', use_async=True)\n\n @action(detail=True, methods=['post', 'get'])\n def auto(self, request, *args, **kwargs):\n if request.method == 'GET':\n response = self.retrieve(request, **kwargs)\n task_id = request.GET.get('task', None)\n if task_id:\n response.data['task_id'] = task_id\n else:\n self.action = 'retrieve'\n return response\n return self.retrieve_and_run('auto_import', use_async=True)\n\n @action(detail=True)\n def records(self, request, *args, **kwargs):\n response = self.retrieve(self.request, **kwargs)\n response.data['records'] = self.record_serializer_class(\n self.get_object().record_set.all(),\n many=True\n ).data\n return response\n",
"step-ids": [
11,
13,
17,
18,
22
]
}
|
[
11,
13,
17,
18,
22
] |
<|reserved_special_token_0|>
class SummarizationTest(ArkoudaTest):
def setUp(self):
ArkoudaTest.setUp(self)
self.na = np.linspace(1, 10, 10)
self.pda = ak.array(self.na)
<|reserved_special_token_0|>
def testMin(self):
self.assertEqual(self.na.min(), self.pda.min())
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def testVar(self):
self.assertEqual(self.na.var(), self.pda.var())
def testAny(self):
self.assertEqual(self.na.any(), self.pda.any())
def testAll(self):
self.assertEqual(self.na.all(), self.pda.all())
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class SummarizationTest(ArkoudaTest):
def setUp(self):
ArkoudaTest.setUp(self)
self.na = np.linspace(1, 10, 10)
self.pda = ak.array(self.na)
<|reserved_special_token_0|>
def testMin(self):
self.assertEqual(self.na.min(), self.pda.min())
<|reserved_special_token_0|>
def testMean(self):
self.assertEqual(self.na.mean(), self.pda.mean())
def testVar(self):
self.assertEqual(self.na.var(), self.pda.var())
def testAny(self):
self.assertEqual(self.na.any(), self.pda.any())
def testAll(self):
self.assertEqual(self.na.all(), self.pda.all())
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class SummarizationTest(ArkoudaTest):
def setUp(self):
ArkoudaTest.setUp(self)
self.na = np.linspace(1, 10, 10)
self.pda = ak.array(self.na)
<|reserved_special_token_0|>
def testMin(self):
self.assertEqual(self.na.min(), self.pda.min())
def testMax(self):
self.assertEqual(self.na.max(), self.pda.max())
def testMean(self):
self.assertEqual(self.na.mean(), self.pda.mean())
def testVar(self):
self.assertEqual(self.na.var(), self.pda.var())
def testAny(self):
self.assertEqual(self.na.any(), self.pda.any())
def testAll(self):
self.assertEqual(self.na.all(), self.pda.all())
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class SummarizationTest(ArkoudaTest):
def setUp(self):
ArkoudaTest.setUp(self)
self.na = np.linspace(1, 10, 10)
self.pda = ak.array(self.na)
def testStd(self):
self.assertEqual(self.na.std(), self.pda.std())
def testMin(self):
self.assertEqual(self.na.min(), self.pda.min())
def testMax(self):
self.assertEqual(self.na.max(), self.pda.max())
def testMean(self):
self.assertEqual(self.na.mean(), self.pda.mean())
def testVar(self):
self.assertEqual(self.na.var(), self.pda.var())
def testAny(self):
self.assertEqual(self.na.any(), self.pda.any())
def testAll(self):
self.assertEqual(self.na.all(), self.pda.all())
<|reserved_special_token_1|>
import numpy as np
from base_test import ArkoudaTest
from context import arkouda as ak
"""
Encapsulates unit tests for the pdarrayclass module that provide
summarized values via reduction methods
"""
class SummarizationTest(ArkoudaTest):
def setUp(self):
ArkoudaTest.setUp(self)
self.na = np.linspace(1, 10, 10)
self.pda = ak.array(self.na)
def testStd(self):
self.assertEqual(self.na.std(), self.pda.std())
def testMin(self):
self.assertEqual(self.na.min(), self.pda.min())
def testMax(self):
self.assertEqual(self.na.max(), self.pda.max())
def testMean(self):
self.assertEqual(self.na.mean(), self.pda.mean())
def testVar(self):
self.assertEqual(self.na.var(), self.pda.var())
def testAny(self):
self.assertEqual(self.na.any(), self.pda.any())
def testAll(self):
self.assertEqual(self.na.all(), self.pda.all())
|
flexible
|
{
"blob_id": "88109909d0c80f25373f917426c3c3634bfc8114",
"index": 6267,
"step-1": "<mask token>\n\n\nclass SummarizationTest(ArkoudaTest):\n\n def setUp(self):\n ArkoudaTest.setUp(self)\n self.na = np.linspace(1, 10, 10)\n self.pda = ak.array(self.na)\n <mask token>\n\n def testMin(self):\n self.assertEqual(self.na.min(), self.pda.min())\n <mask token>\n <mask token>\n\n def testVar(self):\n self.assertEqual(self.na.var(), self.pda.var())\n\n def testAny(self):\n self.assertEqual(self.na.any(), self.pda.any())\n\n def testAll(self):\n self.assertEqual(self.na.all(), self.pda.all())\n",
"step-2": "<mask token>\n\n\nclass SummarizationTest(ArkoudaTest):\n\n def setUp(self):\n ArkoudaTest.setUp(self)\n self.na = np.linspace(1, 10, 10)\n self.pda = ak.array(self.na)\n <mask token>\n\n def testMin(self):\n self.assertEqual(self.na.min(), self.pda.min())\n <mask token>\n\n def testMean(self):\n self.assertEqual(self.na.mean(), self.pda.mean())\n\n def testVar(self):\n self.assertEqual(self.na.var(), self.pda.var())\n\n def testAny(self):\n self.assertEqual(self.na.any(), self.pda.any())\n\n def testAll(self):\n self.assertEqual(self.na.all(), self.pda.all())\n",
"step-3": "<mask token>\n\n\nclass SummarizationTest(ArkoudaTest):\n\n def setUp(self):\n ArkoudaTest.setUp(self)\n self.na = np.linspace(1, 10, 10)\n self.pda = ak.array(self.na)\n <mask token>\n\n def testMin(self):\n self.assertEqual(self.na.min(), self.pda.min())\n\n def testMax(self):\n self.assertEqual(self.na.max(), self.pda.max())\n\n def testMean(self):\n self.assertEqual(self.na.mean(), self.pda.mean())\n\n def testVar(self):\n self.assertEqual(self.na.var(), self.pda.var())\n\n def testAny(self):\n self.assertEqual(self.na.any(), self.pda.any())\n\n def testAll(self):\n self.assertEqual(self.na.all(), self.pda.all())\n",
"step-4": "<mask token>\n\n\nclass SummarizationTest(ArkoudaTest):\n\n def setUp(self):\n ArkoudaTest.setUp(self)\n self.na = np.linspace(1, 10, 10)\n self.pda = ak.array(self.na)\n\n def testStd(self):\n self.assertEqual(self.na.std(), self.pda.std())\n\n def testMin(self):\n self.assertEqual(self.na.min(), self.pda.min())\n\n def testMax(self):\n self.assertEqual(self.na.max(), self.pda.max())\n\n def testMean(self):\n self.assertEqual(self.na.mean(), self.pda.mean())\n\n def testVar(self):\n self.assertEqual(self.na.var(), self.pda.var())\n\n def testAny(self):\n self.assertEqual(self.na.any(), self.pda.any())\n\n def testAll(self):\n self.assertEqual(self.na.all(), self.pda.all())\n",
"step-5": "import numpy as np\nfrom base_test import ArkoudaTest\nfrom context import arkouda as ak\n\n\"\"\"\nEncapsulates unit tests for the pdarrayclass module that provide\nsummarized values via reduction methods\n\"\"\"\n\n\nclass SummarizationTest(ArkoudaTest):\n def setUp(self):\n ArkoudaTest.setUp(self)\n self.na = np.linspace(1, 10, 10)\n self.pda = ak.array(self.na)\n\n def testStd(self):\n self.assertEqual(self.na.std(), self.pda.std())\n\n def testMin(self):\n self.assertEqual(self.na.min(), self.pda.min())\n\n def testMax(self):\n self.assertEqual(self.na.max(), self.pda.max())\n\n def testMean(self):\n self.assertEqual(self.na.mean(), self.pda.mean())\n\n def testVar(self):\n self.assertEqual(self.na.var(), self.pda.var())\n\n def testAny(self):\n self.assertEqual(self.na.any(), self.pda.any())\n\n def testAll(self):\n self.assertEqual(self.na.all(), self.pda.all())\n",
"step-ids": [
6,
7,
8,
9,
11
]
}
|
[
6,
7,
8,
9,
11
] |
j= float(input("juros"))
Q0= 1500
t= 36
Qf=Q0*(1+j)**t
print(round(Qf,2))
|
normal
|
{
"blob_id": "700d6e0c7dab58ed0157265ff78021923c17e397",
"index": 5619,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint(round(Qf, 2))\n",
"step-3": "j = float(input('juros'))\nQ0 = 1500\nt = 36\nQf = Q0 * (1 + j) ** t\nprint(round(Qf, 2))\n",
"step-4": "j= float(input(\"juros\"))\nQ0= 1500\nt= 36\nQf=Q0*(1+j)**t\nprint(round(Qf,2))",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
class Lsoda(sim.SimulatorMG):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def _compile(self, step_code):
self._beta = 1
fc = open(os.path.join(os.path.split(os.path.realpath(__file__))[0],
'cuLsoda_all.cu'), 'r')
_sourceFromFile_ = fc.read()
_isize_ = '#define ISIZE ' + repr(20 + self._speciesNumber) + '\n'
_rsize_ = '#define RSIZE ' + repr(22 + self._speciesNumber * max(16,
self._speciesNumber + 9)) + '\n'
_textures_ = 'texture<float, 2, cudaReadModeElementType> param_tex;\n'
_common_block_ = '__device__ struct cuLsodaCommonBlock common[' + repr(
1 * 1) + '];\n'
_code_ = (_isize_ + _rsize_ + _textures_ + step_code +
_sourceFromFile_ + _common_block_ + self._lsoda_source_)
if self._dump:
of = open('full_ode_code.cu', 'w')
print >> of, _code_
compiled = pycuda.compiler.SourceModule(_code_, nvcc='nvcc',
options=[], no_extern_c=True, keep=False)
blocks, threads = self._getOptimalGPUParam(compiled.get_function(
'cuLsoda'))
blocks = self._MAXBLOCKSPERDEVICE
_common_block_ = '__device__ struct cuLsodaCommonBlock common[' + repr(
blocks * threads) + '];\n'
_code_ = (_isize_ + _rsize_ + _textures_ + step_code +
_sourceFromFile_ + _common_block_ + self._lsoda_source_)
if self._dump:
of = open('full_ode_code.cu', 'w')
print >> of, _code_
compiled = pycuda.compiler.SourceModule(_code_, nvcc='nvcc',
options=[], no_extern_c=True, keep=False)
self._param_tex = compiled.get_texref('param_tex')
lsoda_kernel = compiled.get_function('cuLsoda')
return compiled, lsoda_kernel
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Lsoda(sim.SimulatorMG):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def _compile(self, step_code):
self._beta = 1
fc = open(os.path.join(os.path.split(os.path.realpath(__file__))[0],
'cuLsoda_all.cu'), 'r')
_sourceFromFile_ = fc.read()
_isize_ = '#define ISIZE ' + repr(20 + self._speciesNumber) + '\n'
_rsize_ = '#define RSIZE ' + repr(22 + self._speciesNumber * max(16,
self._speciesNumber + 9)) + '\n'
_textures_ = 'texture<float, 2, cudaReadModeElementType> param_tex;\n'
_common_block_ = '__device__ struct cuLsodaCommonBlock common[' + repr(
1 * 1) + '];\n'
_code_ = (_isize_ + _rsize_ + _textures_ + step_code +
_sourceFromFile_ + _common_block_ + self._lsoda_source_)
if self._dump:
of = open('full_ode_code.cu', 'w')
print >> of, _code_
compiled = pycuda.compiler.SourceModule(_code_, nvcc='nvcc',
options=[], no_extern_c=True, keep=False)
blocks, threads = self._getOptimalGPUParam(compiled.get_function(
'cuLsoda'))
blocks = self._MAXBLOCKSPERDEVICE
_common_block_ = '__device__ struct cuLsodaCommonBlock common[' + repr(
blocks * threads) + '];\n'
_code_ = (_isize_ + _rsize_ + _textures_ + step_code +
_sourceFromFile_ + _common_block_ + self._lsoda_source_)
if self._dump:
of = open('full_ode_code.cu', 'w')
print >> of, _code_
compiled = pycuda.compiler.SourceModule(_code_, nvcc='nvcc',
options=[], no_extern_c=True, keep=False)
self._param_tex = compiled.get_texref('param_tex')
lsoda_kernel = compiled.get_function('cuLsoda')
return compiled, lsoda_kernel
def _run_simulation(self, parameters, init_values, blocks, threads,
in_atol=1e-06, in_rtol=1e-06):
total_threads = threads * blocks
experiments = len(parameters)
neqn = self._speciesNumber
init_common_kernel = self._completeCode.get_function('init_common')
init_common_kernel(block=(threads, 1, 1), grid=(blocks, 1))
ret_xt = np.zeros([total_threads, 1, self._resultNumber, self.
_speciesNumber])
ret_istate = np.ones([total_threads], dtype=np.int32)
isize = 20 + self._speciesNumber
rsize = 22 + self._speciesNumber * max(16, self._speciesNumber + 9)
t = np.zeros([total_threads], dtype=np.float64)
jt = np.zeros([total_threads], dtype=np.int32)
neq = np.zeros([total_threads], dtype=np.int32)
itol = np.zeros([total_threads], dtype=np.int32)
iopt = np.zeros([total_threads], dtype=np.int32)
rtol = np.zeros([total_threads], dtype=np.float64)
iout = np.zeros([total_threads], dtype=np.int32)
tout = np.zeros([total_threads], dtype=np.float64)
itask = np.zeros([total_threads], dtype=np.int32)
istate = np.zeros([total_threads], dtype=np.int32)
atol = np.zeros([total_threads], dtype=np.float64)
liw = np.zeros([total_threads], dtype=np.int32)
lrw = np.zeros([total_threads], dtype=np.int32)
iwork = np.zeros([isize * total_threads], dtype=np.int32)
rwork = np.zeros([rsize * total_threads], dtype=np.float64)
y = np.zeros([self._speciesNumber * total_threads], dtype=np.float64)
for i in range(total_threads):
neq[i] = neqn
t[i] = 0
itol[i] = 1
itask[i] = 1
istate[i] = 1
iopt[i] = 0
jt[i] = 2
atol[i] = in_atol
rtol[i] = in_rtol
liw[i] = isize
lrw[i] = rsize
try:
for j in range(self._speciesNumber):
y[i * self._speciesNumber + j] = init_values[i][j]
ret_xt[i, 0, 0, j] = init_values[i][j]
except IndexError:
pass
d_t = driver.mem_alloc(t.size * t.dtype.itemsize)
d_jt = driver.mem_alloc(jt.size * jt.dtype.itemsize)
d_neq = driver.mem_alloc(neq.size * neq.dtype.itemsize)
d_liw = driver.mem_alloc(liw.size * liw.dtype.itemsize)
d_lrw = driver.mem_alloc(lrw.size * lrw.dtype.itemsize)
d_itol = driver.mem_alloc(itol.size * itol.dtype.itemsize)
d_iopt = driver.mem_alloc(iopt.size * iopt.dtype.itemsize)
d_rtol = driver.mem_alloc(rtol.size * rtol.dtype.itemsize)
d_iout = driver.mem_alloc(iout.size * iout.dtype.itemsize)
d_tout = driver.mem_alloc(tout.size * tout.dtype.itemsize)
d_itask = driver.mem_alloc(itask.size * itask.dtype.itemsize)
d_istate = driver.mem_alloc(istate.size * istate.dtype.itemsize)
d_y = driver.mem_alloc(y.size * y.dtype.itemsize)
d_atol = driver.mem_alloc(atol.size * atol.dtype.itemsize)
d_iwork = driver.mem_alloc(iwork.size * iwork.dtype.itemsize)
d_rwork = driver.mem_alloc(rwork.size * rwork.dtype.itemsize)
driver.memcpy_htod(d_t, t)
driver.memcpy_htod(d_jt, jt)
driver.memcpy_htod(d_neq, neq)
driver.memcpy_htod(d_liw, liw)
driver.memcpy_htod(d_lrw, lrw)
driver.memcpy_htod(d_itol, itol)
driver.memcpy_htod(d_iopt, iopt)
driver.memcpy_htod(d_rtol, rtol)
driver.memcpy_htod(d_iout, iout)
driver.memcpy_htod(d_tout, tout)
driver.memcpy_htod(d_itask, itask)
driver.memcpy_htod(d_istate, istate)
driver.memcpy_htod(d_y, y)
driver.memcpy_htod(d_atol, atol)
driver.memcpy_htod(d_iwork, iwork)
driver.memcpy_htod(d_rwork, rwork)
param = np.zeros((total_threads, self._parameterNumber), dtype=np.
float32)
try:
for i in range(len(parameters)):
for j in range(self._parameterNumber):
param[i][j] = parameters[i][j]
except IndexError:
pass
ary = sim.create_2D_array(param)
sim.copy2D_host_to_array(ary, param, self._parameterNumber * 4,
total_threads)
self._param_tex.set_array(ary)
if self._dt <= 0:
for i in range(self._resultNumber):
for j in range(total_threads):
tout[j] = self._timepoints[i]
driver.memcpy_htod(d_tout, tout)
self._compiledRunMethod(d_neq, d_y, d_t, d_tout, d_itol,
d_rtol, d_atol, d_itask, d_istate, d_iopt, d_rwork,
d_lrw, d_iwork, d_liw, d_jt, block=(threads, 1, 1),
grid=(blocks, 1))
driver.memcpy_dtoh(t, d_t)
driver.memcpy_dtoh(y, d_y)
driver.memcpy_dtoh(istate, d_istate)
for j in range(total_threads):
for k in range(self._speciesNumber):
ret_xt[j, 0, i, k] = y[j * self._speciesNumber + k]
if istate[j] < 0:
ret_istate[j] = 0
else:
tt = self._timepoints[0]
for i in range(self._resultNumber):
while 1:
next_time = min(tt + self._dt, self._timepoints[i])
for j in range(total_threads):
tout[j] = next_time
driver.memcpy_htod(d_tout, tout)
self._compiledRunMethod(d_neq, d_y, d_t, d_tout, d_itol,
d_rtol, d_atol, d_itask, d_istate, d_iopt, d_rwork,
d_lrw, d_iwork, d_liw, d_jt, block=(threads, 1, 1),
grid=(blocks, 1))
driver.memcpy_dtoh(t, d_t)
driver.memcpy_dtoh(y, d_y)
driver.memcpy_dtoh(istate, d_istate)
if np.abs(next_time - self._timepoints[i]) < 1e-05:
tt = next_time
break
tt = next_time
for j in range(total_threads):
for k in range(self._speciesNumber):
ret_xt[j, 0, i, k] = y[j * self._speciesNumber + k]
if istate[j] < 0:
ret_istate[j] = 0
for j in range(total_threads):
if ret_istate[j] == 0:
for i in range(self._resultNumber):
for k in range(self._speciesNumber):
ret_xt[j, 0, i, k] = float('NaN')
return ret_xt[0:experiments]
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Lsoda(sim.SimulatorMG):
_param_tex = None
_step_code = None
_runtimeCompile = True
_lsoda_source_ = """
extern "C"{
#include <stdio.h>
__device__ myFex myfex;
__device__ myJex myjex;
__global__ void init_common(){
int tid = blockDim.x * blockIdx.x + threadIdx.x;
cuLsodaCommonBlockInit( &(common[tid]) );
}
__global__ void cuLsoda(int *neq, double *y, double *t, double *tout, int *itol,
double *rtol, double *atol, int *itask, int *istate, int *iopt,
double *rwork, int *lrw, int *iwork, int *liw, int *jt)
{
int tid = blockDim.x * blockIdx.x + threadIdx.x;
//if(tid==0){
//printf("I am thread time %d %f\\n", tid, t[0] );
//}
dlsoda_(myfex, neq+tid, y+tid*NSPECIES, t+tid, tout+tid, itol+tid, rtol+tid, atol+tid, itask+tid,
istate+tid, iopt+tid, rwork+tid*RSIZE, lrw+tid, iwork+tid*ISIZE, liw+tid, myjex, jt+tid, &(common[tid]) );
//if(tid==0){
//printf("I am done %d %f\\n", tid, t[0] );
//}
}
}
"""
def _compile(self, step_code):
self._beta = 1
fc = open(os.path.join(os.path.split(os.path.realpath(__file__))[0],
'cuLsoda_all.cu'), 'r')
_sourceFromFile_ = fc.read()
_isize_ = '#define ISIZE ' + repr(20 + self._speciesNumber) + '\n'
_rsize_ = '#define RSIZE ' + repr(22 + self._speciesNumber * max(16,
self._speciesNumber + 9)) + '\n'
_textures_ = 'texture<float, 2, cudaReadModeElementType> param_tex;\n'
_common_block_ = '__device__ struct cuLsodaCommonBlock common[' + repr(
1 * 1) + '];\n'
_code_ = (_isize_ + _rsize_ + _textures_ + step_code +
_sourceFromFile_ + _common_block_ + self._lsoda_source_)
if self._dump:
of = open('full_ode_code.cu', 'w')
print >> of, _code_
compiled = pycuda.compiler.SourceModule(_code_, nvcc='nvcc',
options=[], no_extern_c=True, keep=False)
blocks, threads = self._getOptimalGPUParam(compiled.get_function(
'cuLsoda'))
blocks = self._MAXBLOCKSPERDEVICE
_common_block_ = '__device__ struct cuLsodaCommonBlock common[' + repr(
blocks * threads) + '];\n'
_code_ = (_isize_ + _rsize_ + _textures_ + step_code +
_sourceFromFile_ + _common_block_ + self._lsoda_source_)
if self._dump:
of = open('full_ode_code.cu', 'w')
print >> of, _code_
compiled = pycuda.compiler.SourceModule(_code_, nvcc='nvcc',
options=[], no_extern_c=True, keep=False)
self._param_tex = compiled.get_texref('param_tex')
lsoda_kernel = compiled.get_function('cuLsoda')
return compiled, lsoda_kernel
def _run_simulation(self, parameters, init_values, blocks, threads,
in_atol=1e-06, in_rtol=1e-06):
total_threads = threads * blocks
experiments = len(parameters)
neqn = self._speciesNumber
init_common_kernel = self._completeCode.get_function('init_common')
init_common_kernel(block=(threads, 1, 1), grid=(blocks, 1))
ret_xt = np.zeros([total_threads, 1, self._resultNumber, self.
_speciesNumber])
ret_istate = np.ones([total_threads], dtype=np.int32)
isize = 20 + self._speciesNumber
rsize = 22 + self._speciesNumber * max(16, self._speciesNumber + 9)
t = np.zeros([total_threads], dtype=np.float64)
jt = np.zeros([total_threads], dtype=np.int32)
neq = np.zeros([total_threads], dtype=np.int32)
itol = np.zeros([total_threads], dtype=np.int32)
iopt = np.zeros([total_threads], dtype=np.int32)
rtol = np.zeros([total_threads], dtype=np.float64)
iout = np.zeros([total_threads], dtype=np.int32)
tout = np.zeros([total_threads], dtype=np.float64)
itask = np.zeros([total_threads], dtype=np.int32)
istate = np.zeros([total_threads], dtype=np.int32)
atol = np.zeros([total_threads], dtype=np.float64)
liw = np.zeros([total_threads], dtype=np.int32)
lrw = np.zeros([total_threads], dtype=np.int32)
iwork = np.zeros([isize * total_threads], dtype=np.int32)
rwork = np.zeros([rsize * total_threads], dtype=np.float64)
y = np.zeros([self._speciesNumber * total_threads], dtype=np.float64)
for i in range(total_threads):
neq[i] = neqn
t[i] = 0
itol[i] = 1
itask[i] = 1
istate[i] = 1
iopt[i] = 0
jt[i] = 2
atol[i] = in_atol
rtol[i] = in_rtol
liw[i] = isize
lrw[i] = rsize
try:
for j in range(self._speciesNumber):
y[i * self._speciesNumber + j] = init_values[i][j]
ret_xt[i, 0, 0, j] = init_values[i][j]
except IndexError:
pass
d_t = driver.mem_alloc(t.size * t.dtype.itemsize)
d_jt = driver.mem_alloc(jt.size * jt.dtype.itemsize)
d_neq = driver.mem_alloc(neq.size * neq.dtype.itemsize)
d_liw = driver.mem_alloc(liw.size * liw.dtype.itemsize)
d_lrw = driver.mem_alloc(lrw.size * lrw.dtype.itemsize)
d_itol = driver.mem_alloc(itol.size * itol.dtype.itemsize)
d_iopt = driver.mem_alloc(iopt.size * iopt.dtype.itemsize)
d_rtol = driver.mem_alloc(rtol.size * rtol.dtype.itemsize)
d_iout = driver.mem_alloc(iout.size * iout.dtype.itemsize)
d_tout = driver.mem_alloc(tout.size * tout.dtype.itemsize)
d_itask = driver.mem_alloc(itask.size * itask.dtype.itemsize)
d_istate = driver.mem_alloc(istate.size * istate.dtype.itemsize)
d_y = driver.mem_alloc(y.size * y.dtype.itemsize)
d_atol = driver.mem_alloc(atol.size * atol.dtype.itemsize)
d_iwork = driver.mem_alloc(iwork.size * iwork.dtype.itemsize)
d_rwork = driver.mem_alloc(rwork.size * rwork.dtype.itemsize)
driver.memcpy_htod(d_t, t)
driver.memcpy_htod(d_jt, jt)
driver.memcpy_htod(d_neq, neq)
driver.memcpy_htod(d_liw, liw)
driver.memcpy_htod(d_lrw, lrw)
driver.memcpy_htod(d_itol, itol)
driver.memcpy_htod(d_iopt, iopt)
driver.memcpy_htod(d_rtol, rtol)
driver.memcpy_htod(d_iout, iout)
driver.memcpy_htod(d_tout, tout)
driver.memcpy_htod(d_itask, itask)
driver.memcpy_htod(d_istate, istate)
driver.memcpy_htod(d_y, y)
driver.memcpy_htod(d_atol, atol)
driver.memcpy_htod(d_iwork, iwork)
driver.memcpy_htod(d_rwork, rwork)
param = np.zeros((total_threads, self._parameterNumber), dtype=np.
float32)
try:
for i in range(len(parameters)):
for j in range(self._parameterNumber):
param[i][j] = parameters[i][j]
except IndexError:
pass
ary = sim.create_2D_array(param)
sim.copy2D_host_to_array(ary, param, self._parameterNumber * 4,
total_threads)
self._param_tex.set_array(ary)
if self._dt <= 0:
for i in range(self._resultNumber):
for j in range(total_threads):
tout[j] = self._timepoints[i]
driver.memcpy_htod(d_tout, tout)
self._compiledRunMethod(d_neq, d_y, d_t, d_tout, d_itol,
d_rtol, d_atol, d_itask, d_istate, d_iopt, d_rwork,
d_lrw, d_iwork, d_liw, d_jt, block=(threads, 1, 1),
grid=(blocks, 1))
driver.memcpy_dtoh(t, d_t)
driver.memcpy_dtoh(y, d_y)
driver.memcpy_dtoh(istate, d_istate)
for j in range(total_threads):
for k in range(self._speciesNumber):
ret_xt[j, 0, i, k] = y[j * self._speciesNumber + k]
if istate[j] < 0:
ret_istate[j] = 0
else:
tt = self._timepoints[0]
for i in range(self._resultNumber):
while 1:
next_time = min(tt + self._dt, self._timepoints[i])
for j in range(total_threads):
tout[j] = next_time
driver.memcpy_htod(d_tout, tout)
self._compiledRunMethod(d_neq, d_y, d_t, d_tout, d_itol,
d_rtol, d_atol, d_itask, d_istate, d_iopt, d_rwork,
d_lrw, d_iwork, d_liw, d_jt, block=(threads, 1, 1),
grid=(blocks, 1))
driver.memcpy_dtoh(t, d_t)
driver.memcpy_dtoh(y, d_y)
driver.memcpy_dtoh(istate, d_istate)
if np.abs(next_time - self._timepoints[i]) < 1e-05:
tt = next_time
break
tt = next_time
for j in range(total_threads):
for k in range(self._speciesNumber):
ret_xt[j, 0, i, k] = y[j * self._speciesNumber + k]
if istate[j] < 0:
ret_istate[j] = 0
for j in range(total_threads):
if ret_istate[j] == 0:
for i in range(self._resultNumber):
for k in range(self._speciesNumber):
ret_xt[j, 0, i, k] = float('NaN')
return ret_xt[0:experiments]
<|reserved_special_token_1|>
import os
import numpy as np
import pycuda
import pycuda.driver as driver
import cudasim.solvers.cuda.Simulator_mg as sim
import cudasim
class Lsoda(sim.SimulatorMG):
_param_tex = None
_step_code = None
_runtimeCompile = True
_lsoda_source_ = """
extern "C"{
#include <stdio.h>
__device__ myFex myfex;
__device__ myJex myjex;
__global__ void init_common(){
int tid = blockDim.x * blockIdx.x + threadIdx.x;
cuLsodaCommonBlockInit( &(common[tid]) );
}
__global__ void cuLsoda(int *neq, double *y, double *t, double *tout, int *itol,
double *rtol, double *atol, int *itask, int *istate, int *iopt,
double *rwork, int *lrw, int *iwork, int *liw, int *jt)
{
int tid = blockDim.x * blockIdx.x + threadIdx.x;
//if(tid==0){
//printf("I am thread time %d %f\\n", tid, t[0] );
//}
dlsoda_(myfex, neq+tid, y+tid*NSPECIES, t+tid, tout+tid, itol+tid, rtol+tid, atol+tid, itask+tid,
istate+tid, iopt+tid, rwork+tid*RSIZE, lrw+tid, iwork+tid*ISIZE, liw+tid, myjex, jt+tid, &(common[tid]) );
//if(tid==0){
//printf("I am done %d %f\\n", tid, t[0] );
//}
}
}
"""
def _compile(self, step_code):
self._beta = 1
fc = open(os.path.join(os.path.split(os.path.realpath(__file__))[0],
'cuLsoda_all.cu'), 'r')
_sourceFromFile_ = fc.read()
_isize_ = '#define ISIZE ' + repr(20 + self._speciesNumber) + '\n'
_rsize_ = '#define RSIZE ' + repr(22 + self._speciesNumber * max(16,
self._speciesNumber + 9)) + '\n'
_textures_ = 'texture<float, 2, cudaReadModeElementType> param_tex;\n'
_common_block_ = '__device__ struct cuLsodaCommonBlock common[' + repr(
1 * 1) + '];\n'
_code_ = (_isize_ + _rsize_ + _textures_ + step_code +
_sourceFromFile_ + _common_block_ + self._lsoda_source_)
if self._dump:
of = open('full_ode_code.cu', 'w')
print >> of, _code_
compiled = pycuda.compiler.SourceModule(_code_, nvcc='nvcc',
options=[], no_extern_c=True, keep=False)
blocks, threads = self._getOptimalGPUParam(compiled.get_function(
'cuLsoda'))
blocks = self._MAXBLOCKSPERDEVICE
_common_block_ = '__device__ struct cuLsodaCommonBlock common[' + repr(
blocks * threads) + '];\n'
_code_ = (_isize_ + _rsize_ + _textures_ + step_code +
_sourceFromFile_ + _common_block_ + self._lsoda_source_)
if self._dump:
of = open('full_ode_code.cu', 'w')
print >> of, _code_
compiled = pycuda.compiler.SourceModule(_code_, nvcc='nvcc',
options=[], no_extern_c=True, keep=False)
self._param_tex = compiled.get_texref('param_tex')
lsoda_kernel = compiled.get_function('cuLsoda')
return compiled, lsoda_kernel
def _run_simulation(self, parameters, init_values, blocks, threads,
in_atol=1e-06, in_rtol=1e-06):
total_threads = threads * blocks
experiments = len(parameters)
neqn = self._speciesNumber
init_common_kernel = self._completeCode.get_function('init_common')
init_common_kernel(block=(threads, 1, 1), grid=(blocks, 1))
ret_xt = np.zeros([total_threads, 1, self._resultNumber, self.
_speciesNumber])
ret_istate = np.ones([total_threads], dtype=np.int32)
isize = 20 + self._speciesNumber
rsize = 22 + self._speciesNumber * max(16, self._speciesNumber + 9)
t = np.zeros([total_threads], dtype=np.float64)
jt = np.zeros([total_threads], dtype=np.int32)
neq = np.zeros([total_threads], dtype=np.int32)
itol = np.zeros([total_threads], dtype=np.int32)
iopt = np.zeros([total_threads], dtype=np.int32)
rtol = np.zeros([total_threads], dtype=np.float64)
iout = np.zeros([total_threads], dtype=np.int32)
tout = np.zeros([total_threads], dtype=np.float64)
itask = np.zeros([total_threads], dtype=np.int32)
istate = np.zeros([total_threads], dtype=np.int32)
atol = np.zeros([total_threads], dtype=np.float64)
liw = np.zeros([total_threads], dtype=np.int32)
lrw = np.zeros([total_threads], dtype=np.int32)
iwork = np.zeros([isize * total_threads], dtype=np.int32)
rwork = np.zeros([rsize * total_threads], dtype=np.float64)
y = np.zeros([self._speciesNumber * total_threads], dtype=np.float64)
for i in range(total_threads):
neq[i] = neqn
t[i] = 0
itol[i] = 1
itask[i] = 1
istate[i] = 1
iopt[i] = 0
jt[i] = 2
atol[i] = in_atol
rtol[i] = in_rtol
liw[i] = isize
lrw[i] = rsize
try:
for j in range(self._speciesNumber):
y[i * self._speciesNumber + j] = init_values[i][j]
ret_xt[i, 0, 0, j] = init_values[i][j]
except IndexError:
pass
d_t = driver.mem_alloc(t.size * t.dtype.itemsize)
d_jt = driver.mem_alloc(jt.size * jt.dtype.itemsize)
d_neq = driver.mem_alloc(neq.size * neq.dtype.itemsize)
d_liw = driver.mem_alloc(liw.size * liw.dtype.itemsize)
d_lrw = driver.mem_alloc(lrw.size * lrw.dtype.itemsize)
d_itol = driver.mem_alloc(itol.size * itol.dtype.itemsize)
d_iopt = driver.mem_alloc(iopt.size * iopt.dtype.itemsize)
d_rtol = driver.mem_alloc(rtol.size * rtol.dtype.itemsize)
d_iout = driver.mem_alloc(iout.size * iout.dtype.itemsize)
d_tout = driver.mem_alloc(tout.size * tout.dtype.itemsize)
d_itask = driver.mem_alloc(itask.size * itask.dtype.itemsize)
d_istate = driver.mem_alloc(istate.size * istate.dtype.itemsize)
d_y = driver.mem_alloc(y.size * y.dtype.itemsize)
d_atol = driver.mem_alloc(atol.size * atol.dtype.itemsize)
d_iwork = driver.mem_alloc(iwork.size * iwork.dtype.itemsize)
d_rwork = driver.mem_alloc(rwork.size * rwork.dtype.itemsize)
driver.memcpy_htod(d_t, t)
driver.memcpy_htod(d_jt, jt)
driver.memcpy_htod(d_neq, neq)
driver.memcpy_htod(d_liw, liw)
driver.memcpy_htod(d_lrw, lrw)
driver.memcpy_htod(d_itol, itol)
driver.memcpy_htod(d_iopt, iopt)
driver.memcpy_htod(d_rtol, rtol)
driver.memcpy_htod(d_iout, iout)
driver.memcpy_htod(d_tout, tout)
driver.memcpy_htod(d_itask, itask)
driver.memcpy_htod(d_istate, istate)
driver.memcpy_htod(d_y, y)
driver.memcpy_htod(d_atol, atol)
driver.memcpy_htod(d_iwork, iwork)
driver.memcpy_htod(d_rwork, rwork)
param = np.zeros((total_threads, self._parameterNumber), dtype=np.
float32)
try:
for i in range(len(parameters)):
for j in range(self._parameterNumber):
param[i][j] = parameters[i][j]
except IndexError:
pass
ary = sim.create_2D_array(param)
sim.copy2D_host_to_array(ary, param, self._parameterNumber * 4,
total_threads)
self._param_tex.set_array(ary)
if self._dt <= 0:
for i in range(self._resultNumber):
for j in range(total_threads):
tout[j] = self._timepoints[i]
driver.memcpy_htod(d_tout, tout)
self._compiledRunMethod(d_neq, d_y, d_t, d_tout, d_itol,
d_rtol, d_atol, d_itask, d_istate, d_iopt, d_rwork,
d_lrw, d_iwork, d_liw, d_jt, block=(threads, 1, 1),
grid=(blocks, 1))
driver.memcpy_dtoh(t, d_t)
driver.memcpy_dtoh(y, d_y)
driver.memcpy_dtoh(istate, d_istate)
for j in range(total_threads):
for k in range(self._speciesNumber):
ret_xt[j, 0, i, k] = y[j * self._speciesNumber + k]
if istate[j] < 0:
ret_istate[j] = 0
else:
tt = self._timepoints[0]
for i in range(self._resultNumber):
while 1:
next_time = min(tt + self._dt, self._timepoints[i])
for j in range(total_threads):
tout[j] = next_time
driver.memcpy_htod(d_tout, tout)
self._compiledRunMethod(d_neq, d_y, d_t, d_tout, d_itol,
d_rtol, d_atol, d_itask, d_istate, d_iopt, d_rwork,
d_lrw, d_iwork, d_liw, d_jt, block=(threads, 1, 1),
grid=(blocks, 1))
driver.memcpy_dtoh(t, d_t)
driver.memcpy_dtoh(y, d_y)
driver.memcpy_dtoh(istate, d_istate)
if np.abs(next_time - self._timepoints[i]) < 1e-05:
tt = next_time
break
tt = next_time
for j in range(total_threads):
for k in range(self._speciesNumber):
ret_xt[j, 0, i, k] = y[j * self._speciesNumber + k]
if istate[j] < 0:
ret_istate[j] = 0
for j in range(total_threads):
if ret_istate[j] == 0:
for i in range(self._resultNumber):
for k in range(self._speciesNumber):
ret_xt[j, 0, i, k] = float('NaN')
return ret_xt[0:experiments]
<|reserved_special_token_1|>
import os
import numpy as np
import pycuda
import pycuda.driver as driver
import cudasim.solvers.cuda.Simulator_mg as sim
import cudasim
class Lsoda(sim.SimulatorMG):
_param_tex = None
_step_code = None
_runtimeCompile = True
_lsoda_source_ = """
extern "C"{
#include <stdio.h>
__device__ myFex myfex;
__device__ myJex myjex;
__global__ void init_common(){
int tid = blockDim.x * blockIdx.x + threadIdx.x;
cuLsodaCommonBlockInit( &(common[tid]) );
}
__global__ void cuLsoda(int *neq, double *y, double *t, double *tout, int *itol,
double *rtol, double *atol, int *itask, int *istate, int *iopt,
double *rwork, int *lrw, int *iwork, int *liw, int *jt)
{
int tid = blockDim.x * blockIdx.x + threadIdx.x;
//if(tid==0){
//printf("I am thread time %d %f\\n", tid, t[0] );
//}
dlsoda_(myfex, neq+tid, y+tid*NSPECIES, t+tid, tout+tid, itol+tid, rtol+tid, atol+tid, itask+tid,
istate+tid, iopt+tid, rwork+tid*RSIZE, lrw+tid, iwork+tid*ISIZE, liw+tid, myjex, jt+tid, &(common[tid]) );
//if(tid==0){
//printf("I am done %d %f\\n", tid, t[0] );
//}
}
}
"""
def _compile(self, step_code):
# set beta to 1: repeats are pointless as simulation is deterministic
self._beta = 1
fc = open(os.path.join(os.path.split(os.path.realpath(__file__))[0], 'cuLsoda_all.cu'), 'r')
_sourceFromFile_ = fc.read()
_isize_ = "#define ISIZE " + repr(20 + self._speciesNumber) + "\n"
_rsize_ = "#define RSIZE " + repr(22 + self._speciesNumber * max(16, self._speciesNumber + 9)) + "\n"
_textures_ = "texture<float, 2, cudaReadModeElementType> param_tex;\n"
_common_block_ = "__device__ struct cuLsodaCommonBlock common[" + repr(1 * 1) + "];\n"
_code_ = _isize_ + _rsize_ + _textures_ + step_code + _sourceFromFile_ + _common_block_ + self._lsoda_source_
if self._dump:
of = open("full_ode_code.cu", "w")
print >> of, _code_
# dummy compile to determine optimal blockSize and gridSize
compiled = pycuda.compiler.SourceModule(_code_, nvcc="nvcc", options=[], no_extern_c=True, keep=False)
blocks, threads = self._getOptimalGPUParam(compiled.get_function("cuLsoda"))
blocks = self._MAXBLOCKSPERDEVICE
# real compile
_common_block_ = "__device__ struct cuLsodaCommonBlock common[" + repr(blocks * threads) + "];\n"
_code_ = _isize_ + _rsize_ + _textures_ + step_code + _sourceFromFile_ + _common_block_ + self._lsoda_source_
if self._dump:
of = open("full_ode_code.cu", "w")
print >> of, _code_
compiled = pycuda.compiler.SourceModule(_code_, nvcc="nvcc", options=[], no_extern_c=True, keep=False)
self._param_tex = compiled.get_texref("param_tex")
lsoda_kernel = compiled.get_function("cuLsoda")
return compiled, lsoda_kernel
def _run_simulation(self, parameters, init_values, blocks, threads, in_atol=1e-6, in_rtol=1e-6):
total_threads = threads * blocks
experiments = len(parameters)
neqn = self._speciesNumber
# compile
init_common_kernel = self._completeCode.get_function("init_common")
init_common_kernel(block=(threads, 1, 1), grid=(blocks, 1))
# output array
ret_xt = np.zeros([total_threads, 1, self._resultNumber, self._speciesNumber])
ret_istate = np.ones([total_threads], dtype=np.int32)
# calculate sizes of work spaces
isize = 20 + self._speciesNumber
rsize = 22 + self._speciesNumber * max(16, self._speciesNumber + 9)
# local variables
t = np.zeros([total_threads], dtype=np.float64)
jt = np.zeros([total_threads], dtype=np.int32)
neq = np.zeros([total_threads], dtype=np.int32)
itol = np.zeros([total_threads], dtype=np.int32)
iopt = np.zeros([total_threads], dtype=np.int32)
rtol = np.zeros([total_threads], dtype=np.float64)
iout = np.zeros([total_threads], dtype=np.int32)
tout = np.zeros([total_threads], dtype=np.float64)
itask = np.zeros([total_threads], dtype=np.int32)
istate = np.zeros([total_threads], dtype=np.int32)
atol = np.zeros([total_threads], dtype=np.float64)
liw = np.zeros([total_threads], dtype=np.int32)
lrw = np.zeros([total_threads], dtype=np.int32)
iwork = np.zeros([isize * total_threads], dtype=np.int32)
rwork = np.zeros([rsize * total_threads], dtype=np.float64)
y = np.zeros([self._speciesNumber * total_threads], dtype=np.float64)
for i in range(total_threads):
neq[i] = neqn
t[i] = 0
itol[i] = 1
itask[i] = 1
istate[i] = 1
iopt[i] = 0
jt[i] = 2
atol[i] = in_atol
rtol[i] = in_rtol
liw[i] = isize
lrw[i] = rsize
try:
# initial conditions
for j in range(self._speciesNumber):
# loop over species
y[i * self._speciesNumber + j] = init_values[i][j]
ret_xt[i, 0, 0, j] = init_values[i][j]
except IndexError:
pass
# allocate on device
d_t = driver.mem_alloc(t.size * t.dtype.itemsize)
d_jt = driver.mem_alloc(jt.size * jt.dtype.itemsize)
d_neq = driver.mem_alloc(neq.size * neq.dtype.itemsize)
d_liw = driver.mem_alloc(liw.size * liw.dtype.itemsize)
d_lrw = driver.mem_alloc(lrw.size * lrw.dtype.itemsize)
d_itol = driver.mem_alloc(itol.size * itol.dtype.itemsize)
d_iopt = driver.mem_alloc(iopt.size * iopt.dtype.itemsize)
d_rtol = driver.mem_alloc(rtol.size * rtol.dtype.itemsize)
d_iout = driver.mem_alloc(iout.size * iout.dtype.itemsize)
d_tout = driver.mem_alloc(tout.size * tout.dtype.itemsize)
d_itask = driver.mem_alloc(itask.size * itask.dtype.itemsize)
d_istate = driver.mem_alloc(istate.size * istate.dtype.itemsize)
d_y = driver.mem_alloc(y.size * y.dtype.itemsize)
d_atol = driver.mem_alloc(atol.size * atol.dtype.itemsize)
d_iwork = driver.mem_alloc(iwork.size * iwork.dtype.itemsize)
d_rwork = driver.mem_alloc(rwork.size * rwork.dtype.itemsize)
# copy to device
driver.memcpy_htod(d_t, t)
driver.memcpy_htod(d_jt, jt)
driver.memcpy_htod(d_neq, neq)
driver.memcpy_htod(d_liw, liw)
driver.memcpy_htod(d_lrw, lrw)
driver.memcpy_htod(d_itol, itol)
driver.memcpy_htod(d_iopt, iopt)
driver.memcpy_htod(d_rtol, rtol)
driver.memcpy_htod(d_iout, iout)
driver.memcpy_htod(d_tout, tout)
driver.memcpy_htod(d_itask, itask)
driver.memcpy_htod(d_istate, istate)
driver.memcpy_htod(d_y, y)
driver.memcpy_htod(d_atol, atol)
driver.memcpy_htod(d_iwork, iwork)
driver.memcpy_htod(d_rwork, rwork)
param = np.zeros((total_threads, self._parameterNumber), dtype=np.float32)
try:
for i in range(len(parameters)):
for j in range(self._parameterNumber):
param[i][j] = parameters[i][j]
except IndexError:
pass
# parameter texture
ary = sim.create_2D_array(param)
sim.copy2D_host_to_array(ary, param, self._parameterNumber * 4, total_threads)
self._param_tex.set_array(ary)
if self._dt <= 0:
for i in range(self._resultNumber):
for j in range(total_threads):
tout[j] = self._timepoints[i]
driver.memcpy_htod(d_tout, tout)
self._compiledRunMethod(d_neq, d_y, d_t, d_tout, d_itol, d_rtol, d_atol, d_itask, d_istate,
d_iopt, d_rwork, d_lrw, d_iwork, d_liw, d_jt, block=(threads, 1, 1),
grid=(blocks, 1))
driver.memcpy_dtoh(t, d_t)
driver.memcpy_dtoh(y, d_y)
driver.memcpy_dtoh(istate, d_istate)
for j in range(total_threads):
for k in range(self._speciesNumber):
ret_xt[j, 0, i, k] = y[j * self._speciesNumber + k]
if istate[j] < 0:
ret_istate[j] = 0
# end of loop over time points
else:
tt = self._timepoints[0]
for i in range(self._resultNumber):
while 1:
next_time = min(tt + self._dt, self._timepoints[i])
for j in range(total_threads):
tout[j] = next_time
driver.memcpy_htod(d_tout, tout)
self._compiledRunMethod(d_neq, d_y, d_t, d_tout, d_itol, d_rtol, d_atol, d_itask, d_istate,
d_iopt, d_rwork, d_lrw, d_iwork, d_liw, d_jt, block=(threads, 1, 1),
grid=(blocks, 1))
driver.memcpy_dtoh(t, d_t)
driver.memcpy_dtoh(y, d_y)
driver.memcpy_dtoh(istate, d_istate)
if np.abs(next_time - self._timepoints[i]) < 1e-5:
tt = next_time
break
tt = next_time
for j in range(total_threads):
for k in range(self._speciesNumber):
ret_xt[j, 0, i, k] = y[j * self._speciesNumber + k]
if istate[j] < 0:
ret_istate[j] = 0
# loop over and check ret_istate
# it will will be zero if there was problems
for j in range(total_threads):
if ret_istate[j] == 0:
for i in range(self._resultNumber):
for k in range(self._speciesNumber):
ret_xt[j, 0, i, k] = float('NaN')
return ret_xt[0:experiments]
|
flexible
|
{
"blob_id": "e9754530bef7614c16cdba0e818c1fa188e2d9a2",
"index": 9940,
"step-1": "<mask token>\n\n\nclass Lsoda(sim.SimulatorMG):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def _compile(self, step_code):\n self._beta = 1\n fc = open(os.path.join(os.path.split(os.path.realpath(__file__))[0],\n 'cuLsoda_all.cu'), 'r')\n _sourceFromFile_ = fc.read()\n _isize_ = '#define ISIZE ' + repr(20 + self._speciesNumber) + '\\n'\n _rsize_ = '#define RSIZE ' + repr(22 + self._speciesNumber * max(16,\n self._speciesNumber + 9)) + '\\n'\n _textures_ = 'texture<float, 2, cudaReadModeElementType> param_tex;\\n'\n _common_block_ = '__device__ struct cuLsodaCommonBlock common[' + repr(\n 1 * 1) + '];\\n'\n _code_ = (_isize_ + _rsize_ + _textures_ + step_code +\n _sourceFromFile_ + _common_block_ + self._lsoda_source_)\n if self._dump:\n of = open('full_ode_code.cu', 'w')\n print >> of, _code_\n compiled = pycuda.compiler.SourceModule(_code_, nvcc='nvcc',\n options=[], no_extern_c=True, keep=False)\n blocks, threads = self._getOptimalGPUParam(compiled.get_function(\n 'cuLsoda'))\n blocks = self._MAXBLOCKSPERDEVICE\n _common_block_ = '__device__ struct cuLsodaCommonBlock common[' + repr(\n blocks * threads) + '];\\n'\n _code_ = (_isize_ + _rsize_ + _textures_ + step_code +\n _sourceFromFile_ + _common_block_ + self._lsoda_source_)\n if self._dump:\n of = open('full_ode_code.cu', 'w')\n print >> of, _code_\n compiled = pycuda.compiler.SourceModule(_code_, nvcc='nvcc',\n options=[], no_extern_c=True, keep=False)\n self._param_tex = compiled.get_texref('param_tex')\n lsoda_kernel = compiled.get_function('cuLsoda')\n return compiled, lsoda_kernel\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass Lsoda(sim.SimulatorMG):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def _compile(self, step_code):\n self._beta = 1\n fc = open(os.path.join(os.path.split(os.path.realpath(__file__))[0],\n 'cuLsoda_all.cu'), 'r')\n _sourceFromFile_ = fc.read()\n _isize_ = '#define ISIZE ' + repr(20 + self._speciesNumber) + '\\n'\n _rsize_ = '#define RSIZE ' + repr(22 + self._speciesNumber * max(16,\n self._speciesNumber + 9)) + '\\n'\n _textures_ = 'texture<float, 2, cudaReadModeElementType> param_tex;\\n'\n _common_block_ = '__device__ struct cuLsodaCommonBlock common[' + repr(\n 1 * 1) + '];\\n'\n _code_ = (_isize_ + _rsize_ + _textures_ + step_code +\n _sourceFromFile_ + _common_block_ + self._lsoda_source_)\n if self._dump:\n of = open('full_ode_code.cu', 'w')\n print >> of, _code_\n compiled = pycuda.compiler.SourceModule(_code_, nvcc='nvcc',\n options=[], no_extern_c=True, keep=False)\n blocks, threads = self._getOptimalGPUParam(compiled.get_function(\n 'cuLsoda'))\n blocks = self._MAXBLOCKSPERDEVICE\n _common_block_ = '__device__ struct cuLsodaCommonBlock common[' + repr(\n blocks * threads) + '];\\n'\n _code_ = (_isize_ + _rsize_ + _textures_ + step_code +\n _sourceFromFile_ + _common_block_ + self._lsoda_source_)\n if self._dump:\n of = open('full_ode_code.cu', 'w')\n print >> of, _code_\n compiled = pycuda.compiler.SourceModule(_code_, nvcc='nvcc',\n options=[], no_extern_c=True, keep=False)\n self._param_tex = compiled.get_texref('param_tex')\n lsoda_kernel = compiled.get_function('cuLsoda')\n return compiled, lsoda_kernel\n\n def _run_simulation(self, parameters, init_values, blocks, threads,\n in_atol=1e-06, in_rtol=1e-06):\n total_threads = threads * blocks\n experiments = len(parameters)\n neqn = self._speciesNumber\n init_common_kernel = self._completeCode.get_function('init_common')\n init_common_kernel(block=(threads, 1, 1), grid=(blocks, 1))\n ret_xt = np.zeros([total_threads, 1, self._resultNumber, self.\n _speciesNumber])\n ret_istate = np.ones([total_threads], dtype=np.int32)\n isize = 20 + self._speciesNumber\n rsize = 22 + self._speciesNumber * max(16, self._speciesNumber + 9)\n t = np.zeros([total_threads], dtype=np.float64)\n jt = np.zeros([total_threads], dtype=np.int32)\n neq = np.zeros([total_threads], dtype=np.int32)\n itol = np.zeros([total_threads], dtype=np.int32)\n iopt = np.zeros([total_threads], dtype=np.int32)\n rtol = np.zeros([total_threads], dtype=np.float64)\n iout = np.zeros([total_threads], dtype=np.int32)\n tout = np.zeros([total_threads], dtype=np.float64)\n itask = np.zeros([total_threads], dtype=np.int32)\n istate = np.zeros([total_threads], dtype=np.int32)\n atol = np.zeros([total_threads], dtype=np.float64)\n liw = np.zeros([total_threads], dtype=np.int32)\n lrw = np.zeros([total_threads], dtype=np.int32)\n iwork = np.zeros([isize * total_threads], dtype=np.int32)\n rwork = np.zeros([rsize * total_threads], dtype=np.float64)\n y = np.zeros([self._speciesNumber * total_threads], dtype=np.float64)\n for i in range(total_threads):\n neq[i] = neqn\n t[i] = 0\n itol[i] = 1\n itask[i] = 1\n istate[i] = 1\n iopt[i] = 0\n jt[i] = 2\n atol[i] = in_atol\n rtol[i] = in_rtol\n liw[i] = isize\n lrw[i] = rsize\n try:\n for j in range(self._speciesNumber):\n y[i * self._speciesNumber + j] = init_values[i][j]\n ret_xt[i, 0, 0, j] = init_values[i][j]\n except IndexError:\n pass\n d_t = driver.mem_alloc(t.size * t.dtype.itemsize)\n d_jt = driver.mem_alloc(jt.size * jt.dtype.itemsize)\n d_neq = driver.mem_alloc(neq.size * neq.dtype.itemsize)\n d_liw = driver.mem_alloc(liw.size * liw.dtype.itemsize)\n d_lrw = driver.mem_alloc(lrw.size * lrw.dtype.itemsize)\n d_itol = driver.mem_alloc(itol.size * itol.dtype.itemsize)\n d_iopt = driver.mem_alloc(iopt.size * iopt.dtype.itemsize)\n d_rtol = driver.mem_alloc(rtol.size * rtol.dtype.itemsize)\n d_iout = driver.mem_alloc(iout.size * iout.dtype.itemsize)\n d_tout = driver.mem_alloc(tout.size * tout.dtype.itemsize)\n d_itask = driver.mem_alloc(itask.size * itask.dtype.itemsize)\n d_istate = driver.mem_alloc(istate.size * istate.dtype.itemsize)\n d_y = driver.mem_alloc(y.size * y.dtype.itemsize)\n d_atol = driver.mem_alloc(atol.size * atol.dtype.itemsize)\n d_iwork = driver.mem_alloc(iwork.size * iwork.dtype.itemsize)\n d_rwork = driver.mem_alloc(rwork.size * rwork.dtype.itemsize)\n driver.memcpy_htod(d_t, t)\n driver.memcpy_htod(d_jt, jt)\n driver.memcpy_htod(d_neq, neq)\n driver.memcpy_htod(d_liw, liw)\n driver.memcpy_htod(d_lrw, lrw)\n driver.memcpy_htod(d_itol, itol)\n driver.memcpy_htod(d_iopt, iopt)\n driver.memcpy_htod(d_rtol, rtol)\n driver.memcpy_htod(d_iout, iout)\n driver.memcpy_htod(d_tout, tout)\n driver.memcpy_htod(d_itask, itask)\n driver.memcpy_htod(d_istate, istate)\n driver.memcpy_htod(d_y, y)\n driver.memcpy_htod(d_atol, atol)\n driver.memcpy_htod(d_iwork, iwork)\n driver.memcpy_htod(d_rwork, rwork)\n param = np.zeros((total_threads, self._parameterNumber), dtype=np.\n float32)\n try:\n for i in range(len(parameters)):\n for j in range(self._parameterNumber):\n param[i][j] = parameters[i][j]\n except IndexError:\n pass\n ary = sim.create_2D_array(param)\n sim.copy2D_host_to_array(ary, param, self._parameterNumber * 4,\n total_threads)\n self._param_tex.set_array(ary)\n if self._dt <= 0:\n for i in range(self._resultNumber):\n for j in range(total_threads):\n tout[j] = self._timepoints[i]\n driver.memcpy_htod(d_tout, tout)\n self._compiledRunMethod(d_neq, d_y, d_t, d_tout, d_itol,\n d_rtol, d_atol, d_itask, d_istate, d_iopt, d_rwork,\n d_lrw, d_iwork, d_liw, d_jt, block=(threads, 1, 1),\n grid=(blocks, 1))\n driver.memcpy_dtoh(t, d_t)\n driver.memcpy_dtoh(y, d_y)\n driver.memcpy_dtoh(istate, d_istate)\n for j in range(total_threads):\n for k in range(self._speciesNumber):\n ret_xt[j, 0, i, k] = y[j * self._speciesNumber + k]\n if istate[j] < 0:\n ret_istate[j] = 0\n else:\n tt = self._timepoints[0]\n for i in range(self._resultNumber):\n while 1:\n next_time = min(tt + self._dt, self._timepoints[i])\n for j in range(total_threads):\n tout[j] = next_time\n driver.memcpy_htod(d_tout, tout)\n self._compiledRunMethod(d_neq, d_y, d_t, d_tout, d_itol,\n d_rtol, d_atol, d_itask, d_istate, d_iopt, d_rwork,\n d_lrw, d_iwork, d_liw, d_jt, block=(threads, 1, 1),\n grid=(blocks, 1))\n driver.memcpy_dtoh(t, d_t)\n driver.memcpy_dtoh(y, d_y)\n driver.memcpy_dtoh(istate, d_istate)\n if np.abs(next_time - self._timepoints[i]) < 1e-05:\n tt = next_time\n break\n tt = next_time\n for j in range(total_threads):\n for k in range(self._speciesNumber):\n ret_xt[j, 0, i, k] = y[j * self._speciesNumber + k]\n if istate[j] < 0:\n ret_istate[j] = 0\n for j in range(total_threads):\n if ret_istate[j] == 0:\n for i in range(self._resultNumber):\n for k in range(self._speciesNumber):\n ret_xt[j, 0, i, k] = float('NaN')\n return ret_xt[0:experiments]\n",
"step-3": "<mask token>\n\n\nclass Lsoda(sim.SimulatorMG):\n _param_tex = None\n _step_code = None\n _runtimeCompile = True\n _lsoda_source_ = \"\"\"\n \n extern \"C\"{\n\n #include <stdio.h>\n \n __device__ myFex myfex;\n __device__ myJex myjex;\n \n __global__ void init_common(){\n int tid = blockDim.x * blockIdx.x + threadIdx.x;\n cuLsodaCommonBlockInit( &(common[tid]) );\n }\n \n __global__ void cuLsoda(int *neq, double *y, double *t, double *tout, int *itol, \n double *rtol, double *atol, int *itask, int *istate, int *iopt, \n double *rwork, int *lrw, int *iwork, int *liw, int *jt)\n {\n int tid = blockDim.x * blockIdx.x + threadIdx.x;\n\n //if(tid==0){\n //printf(\"I am thread time %d %f\\\\n\", tid, t[0] );\n //}\n\n dlsoda_(myfex, neq+tid, y+tid*NSPECIES, t+tid, tout+tid, itol+tid, rtol+tid, atol+tid, itask+tid, \n istate+tid, iopt+tid, rwork+tid*RSIZE, lrw+tid, iwork+tid*ISIZE, liw+tid, myjex, jt+tid, &(common[tid]) );\n\n //if(tid==0){\n //printf(\"I am done %d %f\\\\n\", tid, t[0] );\n //}\n }\n }\n \n \"\"\"\n\n def _compile(self, step_code):\n self._beta = 1\n fc = open(os.path.join(os.path.split(os.path.realpath(__file__))[0],\n 'cuLsoda_all.cu'), 'r')\n _sourceFromFile_ = fc.read()\n _isize_ = '#define ISIZE ' + repr(20 + self._speciesNumber) + '\\n'\n _rsize_ = '#define RSIZE ' + repr(22 + self._speciesNumber * max(16,\n self._speciesNumber + 9)) + '\\n'\n _textures_ = 'texture<float, 2, cudaReadModeElementType> param_tex;\\n'\n _common_block_ = '__device__ struct cuLsodaCommonBlock common[' + repr(\n 1 * 1) + '];\\n'\n _code_ = (_isize_ + _rsize_ + _textures_ + step_code +\n _sourceFromFile_ + _common_block_ + self._lsoda_source_)\n if self._dump:\n of = open('full_ode_code.cu', 'w')\n print >> of, _code_\n compiled = pycuda.compiler.SourceModule(_code_, nvcc='nvcc',\n options=[], no_extern_c=True, keep=False)\n blocks, threads = self._getOptimalGPUParam(compiled.get_function(\n 'cuLsoda'))\n blocks = self._MAXBLOCKSPERDEVICE\n _common_block_ = '__device__ struct cuLsodaCommonBlock common[' + repr(\n blocks * threads) + '];\\n'\n _code_ = (_isize_ + _rsize_ + _textures_ + step_code +\n _sourceFromFile_ + _common_block_ + self._lsoda_source_)\n if self._dump:\n of = open('full_ode_code.cu', 'w')\n print >> of, _code_\n compiled = pycuda.compiler.SourceModule(_code_, nvcc='nvcc',\n options=[], no_extern_c=True, keep=False)\n self._param_tex = compiled.get_texref('param_tex')\n lsoda_kernel = compiled.get_function('cuLsoda')\n return compiled, lsoda_kernel\n\n def _run_simulation(self, parameters, init_values, blocks, threads,\n in_atol=1e-06, in_rtol=1e-06):\n total_threads = threads * blocks\n experiments = len(parameters)\n neqn = self._speciesNumber\n init_common_kernel = self._completeCode.get_function('init_common')\n init_common_kernel(block=(threads, 1, 1), grid=(blocks, 1))\n ret_xt = np.zeros([total_threads, 1, self._resultNumber, self.\n _speciesNumber])\n ret_istate = np.ones([total_threads], dtype=np.int32)\n isize = 20 + self._speciesNumber\n rsize = 22 + self._speciesNumber * max(16, self._speciesNumber + 9)\n t = np.zeros([total_threads], dtype=np.float64)\n jt = np.zeros([total_threads], dtype=np.int32)\n neq = np.zeros([total_threads], dtype=np.int32)\n itol = np.zeros([total_threads], dtype=np.int32)\n iopt = np.zeros([total_threads], dtype=np.int32)\n rtol = np.zeros([total_threads], dtype=np.float64)\n iout = np.zeros([total_threads], dtype=np.int32)\n tout = np.zeros([total_threads], dtype=np.float64)\n itask = np.zeros([total_threads], dtype=np.int32)\n istate = np.zeros([total_threads], dtype=np.int32)\n atol = np.zeros([total_threads], dtype=np.float64)\n liw = np.zeros([total_threads], dtype=np.int32)\n lrw = np.zeros([total_threads], dtype=np.int32)\n iwork = np.zeros([isize * total_threads], dtype=np.int32)\n rwork = np.zeros([rsize * total_threads], dtype=np.float64)\n y = np.zeros([self._speciesNumber * total_threads], dtype=np.float64)\n for i in range(total_threads):\n neq[i] = neqn\n t[i] = 0\n itol[i] = 1\n itask[i] = 1\n istate[i] = 1\n iopt[i] = 0\n jt[i] = 2\n atol[i] = in_atol\n rtol[i] = in_rtol\n liw[i] = isize\n lrw[i] = rsize\n try:\n for j in range(self._speciesNumber):\n y[i * self._speciesNumber + j] = init_values[i][j]\n ret_xt[i, 0, 0, j] = init_values[i][j]\n except IndexError:\n pass\n d_t = driver.mem_alloc(t.size * t.dtype.itemsize)\n d_jt = driver.mem_alloc(jt.size * jt.dtype.itemsize)\n d_neq = driver.mem_alloc(neq.size * neq.dtype.itemsize)\n d_liw = driver.mem_alloc(liw.size * liw.dtype.itemsize)\n d_lrw = driver.mem_alloc(lrw.size * lrw.dtype.itemsize)\n d_itol = driver.mem_alloc(itol.size * itol.dtype.itemsize)\n d_iopt = driver.mem_alloc(iopt.size * iopt.dtype.itemsize)\n d_rtol = driver.mem_alloc(rtol.size * rtol.dtype.itemsize)\n d_iout = driver.mem_alloc(iout.size * iout.dtype.itemsize)\n d_tout = driver.mem_alloc(tout.size * tout.dtype.itemsize)\n d_itask = driver.mem_alloc(itask.size * itask.dtype.itemsize)\n d_istate = driver.mem_alloc(istate.size * istate.dtype.itemsize)\n d_y = driver.mem_alloc(y.size * y.dtype.itemsize)\n d_atol = driver.mem_alloc(atol.size * atol.dtype.itemsize)\n d_iwork = driver.mem_alloc(iwork.size * iwork.dtype.itemsize)\n d_rwork = driver.mem_alloc(rwork.size * rwork.dtype.itemsize)\n driver.memcpy_htod(d_t, t)\n driver.memcpy_htod(d_jt, jt)\n driver.memcpy_htod(d_neq, neq)\n driver.memcpy_htod(d_liw, liw)\n driver.memcpy_htod(d_lrw, lrw)\n driver.memcpy_htod(d_itol, itol)\n driver.memcpy_htod(d_iopt, iopt)\n driver.memcpy_htod(d_rtol, rtol)\n driver.memcpy_htod(d_iout, iout)\n driver.memcpy_htod(d_tout, tout)\n driver.memcpy_htod(d_itask, itask)\n driver.memcpy_htod(d_istate, istate)\n driver.memcpy_htod(d_y, y)\n driver.memcpy_htod(d_atol, atol)\n driver.memcpy_htod(d_iwork, iwork)\n driver.memcpy_htod(d_rwork, rwork)\n param = np.zeros((total_threads, self._parameterNumber), dtype=np.\n float32)\n try:\n for i in range(len(parameters)):\n for j in range(self._parameterNumber):\n param[i][j] = parameters[i][j]\n except IndexError:\n pass\n ary = sim.create_2D_array(param)\n sim.copy2D_host_to_array(ary, param, self._parameterNumber * 4,\n total_threads)\n self._param_tex.set_array(ary)\n if self._dt <= 0:\n for i in range(self._resultNumber):\n for j in range(total_threads):\n tout[j] = self._timepoints[i]\n driver.memcpy_htod(d_tout, tout)\n self._compiledRunMethod(d_neq, d_y, d_t, d_tout, d_itol,\n d_rtol, d_atol, d_itask, d_istate, d_iopt, d_rwork,\n d_lrw, d_iwork, d_liw, d_jt, block=(threads, 1, 1),\n grid=(blocks, 1))\n driver.memcpy_dtoh(t, d_t)\n driver.memcpy_dtoh(y, d_y)\n driver.memcpy_dtoh(istate, d_istate)\n for j in range(total_threads):\n for k in range(self._speciesNumber):\n ret_xt[j, 0, i, k] = y[j * self._speciesNumber + k]\n if istate[j] < 0:\n ret_istate[j] = 0\n else:\n tt = self._timepoints[0]\n for i in range(self._resultNumber):\n while 1:\n next_time = min(tt + self._dt, self._timepoints[i])\n for j in range(total_threads):\n tout[j] = next_time\n driver.memcpy_htod(d_tout, tout)\n self._compiledRunMethod(d_neq, d_y, d_t, d_tout, d_itol,\n d_rtol, d_atol, d_itask, d_istate, d_iopt, d_rwork,\n d_lrw, d_iwork, d_liw, d_jt, block=(threads, 1, 1),\n grid=(blocks, 1))\n driver.memcpy_dtoh(t, d_t)\n driver.memcpy_dtoh(y, d_y)\n driver.memcpy_dtoh(istate, d_istate)\n if np.abs(next_time - self._timepoints[i]) < 1e-05:\n tt = next_time\n break\n tt = next_time\n for j in range(total_threads):\n for k in range(self._speciesNumber):\n ret_xt[j, 0, i, k] = y[j * self._speciesNumber + k]\n if istate[j] < 0:\n ret_istate[j] = 0\n for j in range(total_threads):\n if ret_istate[j] == 0:\n for i in range(self._resultNumber):\n for k in range(self._speciesNumber):\n ret_xt[j, 0, i, k] = float('NaN')\n return ret_xt[0:experiments]\n",
"step-4": "import os\nimport numpy as np\nimport pycuda\nimport pycuda.driver as driver\nimport cudasim.solvers.cuda.Simulator_mg as sim\nimport cudasim\n\n\nclass Lsoda(sim.SimulatorMG):\n _param_tex = None\n _step_code = None\n _runtimeCompile = True\n _lsoda_source_ = \"\"\"\n \n extern \"C\"{\n\n #include <stdio.h>\n \n __device__ myFex myfex;\n __device__ myJex myjex;\n \n __global__ void init_common(){\n int tid = blockDim.x * blockIdx.x + threadIdx.x;\n cuLsodaCommonBlockInit( &(common[tid]) );\n }\n \n __global__ void cuLsoda(int *neq, double *y, double *t, double *tout, int *itol, \n double *rtol, double *atol, int *itask, int *istate, int *iopt, \n double *rwork, int *lrw, int *iwork, int *liw, int *jt)\n {\n int tid = blockDim.x * blockIdx.x + threadIdx.x;\n\n //if(tid==0){\n //printf(\"I am thread time %d %f\\\\n\", tid, t[0] );\n //}\n\n dlsoda_(myfex, neq+tid, y+tid*NSPECIES, t+tid, tout+tid, itol+tid, rtol+tid, atol+tid, itask+tid, \n istate+tid, iopt+tid, rwork+tid*RSIZE, lrw+tid, iwork+tid*ISIZE, liw+tid, myjex, jt+tid, &(common[tid]) );\n\n //if(tid==0){\n //printf(\"I am done %d %f\\\\n\", tid, t[0] );\n //}\n }\n }\n \n \"\"\"\n\n def _compile(self, step_code):\n self._beta = 1\n fc = open(os.path.join(os.path.split(os.path.realpath(__file__))[0],\n 'cuLsoda_all.cu'), 'r')\n _sourceFromFile_ = fc.read()\n _isize_ = '#define ISIZE ' + repr(20 + self._speciesNumber) + '\\n'\n _rsize_ = '#define RSIZE ' + repr(22 + self._speciesNumber * max(16,\n self._speciesNumber + 9)) + '\\n'\n _textures_ = 'texture<float, 2, cudaReadModeElementType> param_tex;\\n'\n _common_block_ = '__device__ struct cuLsodaCommonBlock common[' + repr(\n 1 * 1) + '];\\n'\n _code_ = (_isize_ + _rsize_ + _textures_ + step_code +\n _sourceFromFile_ + _common_block_ + self._lsoda_source_)\n if self._dump:\n of = open('full_ode_code.cu', 'w')\n print >> of, _code_\n compiled = pycuda.compiler.SourceModule(_code_, nvcc='nvcc',\n options=[], no_extern_c=True, keep=False)\n blocks, threads = self._getOptimalGPUParam(compiled.get_function(\n 'cuLsoda'))\n blocks = self._MAXBLOCKSPERDEVICE\n _common_block_ = '__device__ struct cuLsodaCommonBlock common[' + repr(\n blocks * threads) + '];\\n'\n _code_ = (_isize_ + _rsize_ + _textures_ + step_code +\n _sourceFromFile_ + _common_block_ + self._lsoda_source_)\n if self._dump:\n of = open('full_ode_code.cu', 'w')\n print >> of, _code_\n compiled = pycuda.compiler.SourceModule(_code_, nvcc='nvcc',\n options=[], no_extern_c=True, keep=False)\n self._param_tex = compiled.get_texref('param_tex')\n lsoda_kernel = compiled.get_function('cuLsoda')\n return compiled, lsoda_kernel\n\n def _run_simulation(self, parameters, init_values, blocks, threads,\n in_atol=1e-06, in_rtol=1e-06):\n total_threads = threads * blocks\n experiments = len(parameters)\n neqn = self._speciesNumber\n init_common_kernel = self._completeCode.get_function('init_common')\n init_common_kernel(block=(threads, 1, 1), grid=(blocks, 1))\n ret_xt = np.zeros([total_threads, 1, self._resultNumber, self.\n _speciesNumber])\n ret_istate = np.ones([total_threads], dtype=np.int32)\n isize = 20 + self._speciesNumber\n rsize = 22 + self._speciesNumber * max(16, self._speciesNumber + 9)\n t = np.zeros([total_threads], dtype=np.float64)\n jt = np.zeros([total_threads], dtype=np.int32)\n neq = np.zeros([total_threads], dtype=np.int32)\n itol = np.zeros([total_threads], dtype=np.int32)\n iopt = np.zeros([total_threads], dtype=np.int32)\n rtol = np.zeros([total_threads], dtype=np.float64)\n iout = np.zeros([total_threads], dtype=np.int32)\n tout = np.zeros([total_threads], dtype=np.float64)\n itask = np.zeros([total_threads], dtype=np.int32)\n istate = np.zeros([total_threads], dtype=np.int32)\n atol = np.zeros([total_threads], dtype=np.float64)\n liw = np.zeros([total_threads], dtype=np.int32)\n lrw = np.zeros([total_threads], dtype=np.int32)\n iwork = np.zeros([isize * total_threads], dtype=np.int32)\n rwork = np.zeros([rsize * total_threads], dtype=np.float64)\n y = np.zeros([self._speciesNumber * total_threads], dtype=np.float64)\n for i in range(total_threads):\n neq[i] = neqn\n t[i] = 0\n itol[i] = 1\n itask[i] = 1\n istate[i] = 1\n iopt[i] = 0\n jt[i] = 2\n atol[i] = in_atol\n rtol[i] = in_rtol\n liw[i] = isize\n lrw[i] = rsize\n try:\n for j in range(self._speciesNumber):\n y[i * self._speciesNumber + j] = init_values[i][j]\n ret_xt[i, 0, 0, j] = init_values[i][j]\n except IndexError:\n pass\n d_t = driver.mem_alloc(t.size * t.dtype.itemsize)\n d_jt = driver.mem_alloc(jt.size * jt.dtype.itemsize)\n d_neq = driver.mem_alloc(neq.size * neq.dtype.itemsize)\n d_liw = driver.mem_alloc(liw.size * liw.dtype.itemsize)\n d_lrw = driver.mem_alloc(lrw.size * lrw.dtype.itemsize)\n d_itol = driver.mem_alloc(itol.size * itol.dtype.itemsize)\n d_iopt = driver.mem_alloc(iopt.size * iopt.dtype.itemsize)\n d_rtol = driver.mem_alloc(rtol.size * rtol.dtype.itemsize)\n d_iout = driver.mem_alloc(iout.size * iout.dtype.itemsize)\n d_tout = driver.mem_alloc(tout.size * tout.dtype.itemsize)\n d_itask = driver.mem_alloc(itask.size * itask.dtype.itemsize)\n d_istate = driver.mem_alloc(istate.size * istate.dtype.itemsize)\n d_y = driver.mem_alloc(y.size * y.dtype.itemsize)\n d_atol = driver.mem_alloc(atol.size * atol.dtype.itemsize)\n d_iwork = driver.mem_alloc(iwork.size * iwork.dtype.itemsize)\n d_rwork = driver.mem_alloc(rwork.size * rwork.dtype.itemsize)\n driver.memcpy_htod(d_t, t)\n driver.memcpy_htod(d_jt, jt)\n driver.memcpy_htod(d_neq, neq)\n driver.memcpy_htod(d_liw, liw)\n driver.memcpy_htod(d_lrw, lrw)\n driver.memcpy_htod(d_itol, itol)\n driver.memcpy_htod(d_iopt, iopt)\n driver.memcpy_htod(d_rtol, rtol)\n driver.memcpy_htod(d_iout, iout)\n driver.memcpy_htod(d_tout, tout)\n driver.memcpy_htod(d_itask, itask)\n driver.memcpy_htod(d_istate, istate)\n driver.memcpy_htod(d_y, y)\n driver.memcpy_htod(d_atol, atol)\n driver.memcpy_htod(d_iwork, iwork)\n driver.memcpy_htod(d_rwork, rwork)\n param = np.zeros((total_threads, self._parameterNumber), dtype=np.\n float32)\n try:\n for i in range(len(parameters)):\n for j in range(self._parameterNumber):\n param[i][j] = parameters[i][j]\n except IndexError:\n pass\n ary = sim.create_2D_array(param)\n sim.copy2D_host_to_array(ary, param, self._parameterNumber * 4,\n total_threads)\n self._param_tex.set_array(ary)\n if self._dt <= 0:\n for i in range(self._resultNumber):\n for j in range(total_threads):\n tout[j] = self._timepoints[i]\n driver.memcpy_htod(d_tout, tout)\n self._compiledRunMethod(d_neq, d_y, d_t, d_tout, d_itol,\n d_rtol, d_atol, d_itask, d_istate, d_iopt, d_rwork,\n d_lrw, d_iwork, d_liw, d_jt, block=(threads, 1, 1),\n grid=(blocks, 1))\n driver.memcpy_dtoh(t, d_t)\n driver.memcpy_dtoh(y, d_y)\n driver.memcpy_dtoh(istate, d_istate)\n for j in range(total_threads):\n for k in range(self._speciesNumber):\n ret_xt[j, 0, i, k] = y[j * self._speciesNumber + k]\n if istate[j] < 0:\n ret_istate[j] = 0\n else:\n tt = self._timepoints[0]\n for i in range(self._resultNumber):\n while 1:\n next_time = min(tt + self._dt, self._timepoints[i])\n for j in range(total_threads):\n tout[j] = next_time\n driver.memcpy_htod(d_tout, tout)\n self._compiledRunMethod(d_neq, d_y, d_t, d_tout, d_itol,\n d_rtol, d_atol, d_itask, d_istate, d_iopt, d_rwork,\n d_lrw, d_iwork, d_liw, d_jt, block=(threads, 1, 1),\n grid=(blocks, 1))\n driver.memcpy_dtoh(t, d_t)\n driver.memcpy_dtoh(y, d_y)\n driver.memcpy_dtoh(istate, d_istate)\n if np.abs(next_time - self._timepoints[i]) < 1e-05:\n tt = next_time\n break\n tt = next_time\n for j in range(total_threads):\n for k in range(self._speciesNumber):\n ret_xt[j, 0, i, k] = y[j * self._speciesNumber + k]\n if istate[j] < 0:\n ret_istate[j] = 0\n for j in range(total_threads):\n if ret_istate[j] == 0:\n for i in range(self._resultNumber):\n for k in range(self._speciesNumber):\n ret_xt[j, 0, i, k] = float('NaN')\n return ret_xt[0:experiments]\n",
"step-5": "import os\n\nimport numpy as np\nimport pycuda\nimport pycuda.driver as driver\n\nimport cudasim.solvers.cuda.Simulator_mg as sim\nimport cudasim\n\nclass Lsoda(sim.SimulatorMG):\n _param_tex = None\n\n _step_code = None\n _runtimeCompile = True\n\n _lsoda_source_ = \"\"\"\n \n extern \"C\"{\n\n #include <stdio.h>\n \n __device__ myFex myfex;\n __device__ myJex myjex;\n \n __global__ void init_common(){\n int tid = blockDim.x * blockIdx.x + threadIdx.x;\n cuLsodaCommonBlockInit( &(common[tid]) );\n }\n \n __global__ void cuLsoda(int *neq, double *y, double *t, double *tout, int *itol, \n double *rtol, double *atol, int *itask, int *istate, int *iopt, \n double *rwork, int *lrw, int *iwork, int *liw, int *jt)\n {\n int tid = blockDim.x * blockIdx.x + threadIdx.x;\n\n //if(tid==0){\n //printf(\"I am thread time %d %f\\\\n\", tid, t[0] );\n //}\n\n dlsoda_(myfex, neq+tid, y+tid*NSPECIES, t+tid, tout+tid, itol+tid, rtol+tid, atol+tid, itask+tid, \n istate+tid, iopt+tid, rwork+tid*RSIZE, lrw+tid, iwork+tid*ISIZE, liw+tid, myjex, jt+tid, &(common[tid]) );\n\n //if(tid==0){\n //printf(\"I am done %d %f\\\\n\", tid, t[0] );\n //}\n }\n }\n \n \"\"\"\n\n def _compile(self, step_code):\n # set beta to 1: repeats are pointless as simulation is deterministic\n self._beta = 1\n\n fc = open(os.path.join(os.path.split(os.path.realpath(__file__))[0], 'cuLsoda_all.cu'), 'r')\n\n _sourceFromFile_ = fc.read()\n\n _isize_ = \"#define ISIZE \" + repr(20 + self._speciesNumber) + \"\\n\"\n _rsize_ = \"#define RSIZE \" + repr(22 + self._speciesNumber * max(16, self._speciesNumber + 9)) + \"\\n\"\n\n _textures_ = \"texture<float, 2, cudaReadModeElementType> param_tex;\\n\"\n _common_block_ = \"__device__ struct cuLsodaCommonBlock common[\" + repr(1 * 1) + \"];\\n\"\n _code_ = _isize_ + _rsize_ + _textures_ + step_code + _sourceFromFile_ + _common_block_ + self._lsoda_source_\n\n if self._dump:\n of = open(\"full_ode_code.cu\", \"w\")\n print >> of, _code_\n\n # dummy compile to determine optimal blockSize and gridSize\n compiled = pycuda.compiler.SourceModule(_code_, nvcc=\"nvcc\", options=[], no_extern_c=True, keep=False)\n\n blocks, threads = self._getOptimalGPUParam(compiled.get_function(\"cuLsoda\"))\n blocks = self._MAXBLOCKSPERDEVICE\n\n # real compile\n _common_block_ = \"__device__ struct cuLsodaCommonBlock common[\" + repr(blocks * threads) + \"];\\n\"\n _code_ = _isize_ + _rsize_ + _textures_ + step_code + _sourceFromFile_ + _common_block_ + self._lsoda_source_\n\n if self._dump:\n of = open(\"full_ode_code.cu\", \"w\")\n print >> of, _code_\n\n compiled = pycuda.compiler.SourceModule(_code_, nvcc=\"nvcc\", options=[], no_extern_c=True, keep=False)\n\n self._param_tex = compiled.get_texref(\"param_tex\")\n\n lsoda_kernel = compiled.get_function(\"cuLsoda\")\n return compiled, lsoda_kernel\n\n def _run_simulation(self, parameters, init_values, blocks, threads, in_atol=1e-6, in_rtol=1e-6):\n\n total_threads = threads * blocks\n experiments = len(parameters)\n\n neqn = self._speciesNumber\n\n # compile\n init_common_kernel = self._completeCode.get_function(\"init_common\")\n init_common_kernel(block=(threads, 1, 1), grid=(blocks, 1))\n\n # output array\n ret_xt = np.zeros([total_threads, 1, self._resultNumber, self._speciesNumber])\n ret_istate = np.ones([total_threads], dtype=np.int32)\n\n # calculate sizes of work spaces\n isize = 20 + self._speciesNumber\n rsize = 22 + self._speciesNumber * max(16, self._speciesNumber + 9)\n\n # local variables\n t = np.zeros([total_threads], dtype=np.float64)\n jt = np.zeros([total_threads], dtype=np.int32)\n neq = np.zeros([total_threads], dtype=np.int32)\n itol = np.zeros([total_threads], dtype=np.int32)\n iopt = np.zeros([total_threads], dtype=np.int32)\n rtol = np.zeros([total_threads], dtype=np.float64)\n iout = np.zeros([total_threads], dtype=np.int32)\n tout = np.zeros([total_threads], dtype=np.float64)\n itask = np.zeros([total_threads], dtype=np.int32)\n istate = np.zeros([total_threads], dtype=np.int32)\n atol = np.zeros([total_threads], dtype=np.float64)\n\n liw = np.zeros([total_threads], dtype=np.int32)\n lrw = np.zeros([total_threads], dtype=np.int32)\n iwork = np.zeros([isize * total_threads], dtype=np.int32)\n rwork = np.zeros([rsize * total_threads], dtype=np.float64)\n y = np.zeros([self._speciesNumber * total_threads], dtype=np.float64)\n\n for i in range(total_threads):\n neq[i] = neqn\n t[i] = 0\n itol[i] = 1\n itask[i] = 1\n istate[i] = 1\n iopt[i] = 0\n jt[i] = 2\n atol[i] = in_atol\n rtol[i] = in_rtol\n\n liw[i] = isize\n lrw[i] = rsize\n\n try:\n # initial conditions\n for j in range(self._speciesNumber):\n # loop over species\n y[i * self._speciesNumber + j] = init_values[i][j]\n ret_xt[i, 0, 0, j] = init_values[i][j]\n except IndexError:\n pass\n\n # allocate on device\n d_t = driver.mem_alloc(t.size * t.dtype.itemsize)\n d_jt = driver.mem_alloc(jt.size * jt.dtype.itemsize)\n d_neq = driver.mem_alloc(neq.size * neq.dtype.itemsize)\n d_liw = driver.mem_alloc(liw.size * liw.dtype.itemsize)\n d_lrw = driver.mem_alloc(lrw.size * lrw.dtype.itemsize)\n d_itol = driver.mem_alloc(itol.size * itol.dtype.itemsize)\n d_iopt = driver.mem_alloc(iopt.size * iopt.dtype.itemsize)\n d_rtol = driver.mem_alloc(rtol.size * rtol.dtype.itemsize)\n d_iout = driver.mem_alloc(iout.size * iout.dtype.itemsize)\n d_tout = driver.mem_alloc(tout.size * tout.dtype.itemsize)\n d_itask = driver.mem_alloc(itask.size * itask.dtype.itemsize)\n d_istate = driver.mem_alloc(istate.size * istate.dtype.itemsize)\n d_y = driver.mem_alloc(y.size * y.dtype.itemsize)\n d_atol = driver.mem_alloc(atol.size * atol.dtype.itemsize)\n d_iwork = driver.mem_alloc(iwork.size * iwork.dtype.itemsize)\n d_rwork = driver.mem_alloc(rwork.size * rwork.dtype.itemsize)\n\n # copy to device\n driver.memcpy_htod(d_t, t)\n driver.memcpy_htod(d_jt, jt)\n driver.memcpy_htod(d_neq, neq)\n driver.memcpy_htod(d_liw, liw)\n driver.memcpy_htod(d_lrw, lrw)\n driver.memcpy_htod(d_itol, itol)\n driver.memcpy_htod(d_iopt, iopt)\n driver.memcpy_htod(d_rtol, rtol)\n driver.memcpy_htod(d_iout, iout)\n driver.memcpy_htod(d_tout, tout)\n driver.memcpy_htod(d_itask, itask)\n driver.memcpy_htod(d_istate, istate)\n driver.memcpy_htod(d_y, y)\n driver.memcpy_htod(d_atol, atol)\n driver.memcpy_htod(d_iwork, iwork)\n driver.memcpy_htod(d_rwork, rwork)\n\n param = np.zeros((total_threads, self._parameterNumber), dtype=np.float32)\n try:\n for i in range(len(parameters)):\n for j in range(self._parameterNumber):\n param[i][j] = parameters[i][j]\n except IndexError:\n pass\n\n # parameter texture\n ary = sim.create_2D_array(param)\n sim.copy2D_host_to_array(ary, param, self._parameterNumber * 4, total_threads)\n self._param_tex.set_array(ary)\n\n if self._dt <= 0:\n for i in range(self._resultNumber):\n\n for j in range(total_threads):\n tout[j] = self._timepoints[i]\n driver.memcpy_htod(d_tout, tout)\n\n self._compiledRunMethod(d_neq, d_y, d_t, d_tout, d_itol, d_rtol, d_atol, d_itask, d_istate,\n d_iopt, d_rwork, d_lrw, d_iwork, d_liw, d_jt, block=(threads, 1, 1),\n grid=(blocks, 1))\n\n driver.memcpy_dtoh(t, d_t)\n driver.memcpy_dtoh(y, d_y)\n driver.memcpy_dtoh(istate, d_istate)\n\n for j in range(total_threads):\n for k in range(self._speciesNumber):\n ret_xt[j, 0, i, k] = y[j * self._speciesNumber + k]\n\n if istate[j] < 0:\n ret_istate[j] = 0\n\n # end of loop over time points\n\n else:\n tt = self._timepoints[0]\n\n for i in range(self._resultNumber):\n while 1:\n\n next_time = min(tt + self._dt, self._timepoints[i])\n\n for j in range(total_threads):\n tout[j] = next_time\n driver.memcpy_htod(d_tout, tout)\n\n self._compiledRunMethod(d_neq, d_y, d_t, d_tout, d_itol, d_rtol, d_atol, d_itask, d_istate,\n d_iopt, d_rwork, d_lrw, d_iwork, d_liw, d_jt, block=(threads, 1, 1),\n grid=(blocks, 1))\n\n driver.memcpy_dtoh(t, d_t)\n driver.memcpy_dtoh(y, d_y)\n driver.memcpy_dtoh(istate, d_istate)\n\n if np.abs(next_time - self._timepoints[i]) < 1e-5:\n tt = next_time\n break\n\n tt = next_time\n\n for j in range(total_threads):\n for k in range(self._speciesNumber):\n ret_xt[j, 0, i, k] = y[j * self._speciesNumber + k]\n\n if istate[j] < 0:\n ret_istate[j] = 0\n\n # loop over and check ret_istate\n # it will will be zero if there was problems\n for j in range(total_threads):\n if ret_istate[j] == 0:\n for i in range(self._resultNumber):\n for k in range(self._speciesNumber):\n ret_xt[j, 0, i, k] = float('NaN')\n\n return ret_xt[0:experiments]\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
# import random module from Python standard library
# define a dictionary with image urls and number of flucks
# set the served img variable to be a random element from imgs
# hints:
# to put dict keys in a list: list(dict.keys())
# to choose a random item from a list: random.choice(lst)
# keep asking user if they want to fluck the image until
# they say either 'yes' or 'no'
# if they say 'yes', output a message and increment the flucks
# if they say 'no', serve another image?
# repeat process for another image...
# hint: group blocks of task-specific code into functions?
import random
imgs = {"img_1":1,"img_2":2,"img_3":3,"img_4":4}
img = imgs.keys()
random.choice(imgs)
served_img = imgs[random.randrange(0,len(imgs)-1)]
print(served_img)
input = raw_input("Would you like to fluck it?!")
if input == "yes":
print("YOU FLUCKED IT")
elif input == "no":
print("WHAT ARE YOU???..")
|
normal
|
{
"blob_id": "4ae611ee8c019c76bb5d7c1d733ffb4bd06e2e8d",
"index": 5508,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nrandom.choice(imgs)\n<mask token>\nprint(served_img)\n<mask token>\nif input == 'yes':\n print('YOU FLUCKED IT')\nelif input == 'no':\n print('WHAT ARE YOU???..')\n",
"step-3": "<mask token>\nimgs = {'img_1': 1, 'img_2': 2, 'img_3': 3, 'img_4': 4}\nimg = imgs.keys()\nrandom.choice(imgs)\nserved_img = imgs[random.randrange(0, len(imgs) - 1)]\nprint(served_img)\ninput = raw_input('Would you like to fluck it?!')\nif input == 'yes':\n print('YOU FLUCKED IT')\nelif input == 'no':\n print('WHAT ARE YOU???..')\n",
"step-4": "import random\nimgs = {'img_1': 1, 'img_2': 2, 'img_3': 3, 'img_4': 4}\nimg = imgs.keys()\nrandom.choice(imgs)\nserved_img = imgs[random.randrange(0, len(imgs) - 1)]\nprint(served_img)\ninput = raw_input('Would you like to fluck it?!')\nif input == 'yes':\n print('YOU FLUCKED IT')\nelif input == 'no':\n print('WHAT ARE YOU???..')\n",
"step-5": "# import random module from Python standard library\n\n# define a dictionary with image urls and number of flucks\n\n# set the served img variable to be a random element from imgs\n# hints: \n#\tto put dict keys in a list: list(dict.keys())\n#\tto choose a random item from a list: random.choice(lst)\n\n# keep asking user if they want to fluck the image until\n# they say either 'yes' or 'no'\n\n# if they say 'yes', output a message and increment the flucks\n# if they say 'no', serve another image?\n\n# repeat process for another image...\n# hint: group blocks of task-specific code into functions?\n\nimport random\n\nimgs = {\"img_1\":1,\"img_2\":2,\"img_3\":3,\"img_4\":4}\nimg = imgs.keys()\nrandom.choice(imgs)\nserved_img = imgs[random.randrange(0,len(imgs)-1)]\n\nprint(served_img)\n\ninput = raw_input(\"Would you like to fluck it?!\")\n\nif input == \"yes\":\n print(\"YOU FLUCKED IT\")\n \nelif input == \"no\":\n print(\"WHAT ARE YOU???..\")\n \n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
print('Different Code!!!')
<|reserved_special_token_1|>
#Sample Python Code
print("Different Code!!!")
#print("Hello World!")
|
flexible
|
{
"blob_id": "1e24952006afebb7bf10a83077fc4effd5cc9c58",
"index": 1301,
"step-1": "<mask token>\n",
"step-2": "print('Different Code!!!')\n",
"step-3": "#Sample Python Code\nprint(\"Different Code!!!\")\n#print(\"Hello World!\")\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
from FluidStream import *
# List of chemicals and their constant properties
CHEMICALS_KEY_GUIDE = ['MW' , 'Density']
CHEMICALS = {
'Bacteria' : ['NA' , 1.05 ],
'Calcium Carbonate' : [100.087 , 2.71 ],
'Calcium Lactate' : [218.22 , 1.494 ],
'Corn Steep Liquor' : ['NA' , 1.2326],
'Glucose' : [180.156 , 1.54 ],
'Lactic Acid' : [90.08 , 1.206 ],
'Octanol' : [130.231 , .824 ],
'Tween 80' : ['NA' , 1.07 ],
'Water' : [18.015 , .995 ],
'Water/Glucose 10%' : [34.2291 , 1.0375]
}
SOLVE_FOR_PRODUCTION = True
PRODUCTION_TO_SOLVE = 100000000
def convert_mass_to_concentration(fluidStream, component):
total_mass = fluidStream.TotalMass
def component_mass_to_volume(mass, component):
component_density = CHEMICALS[component][1]
component_volume = mass*component_density
return component_volume
# Bacterial Growth Curve
# TIME_INIT --> hours
TIME_INIT = 0
# C_BACT_INIT --> g/L
C_BACT_INIT = .7
# C_GLUC_INIT --> g/L
C_GLUC_INIT = 100.0
# C_LA_INIT --> g/L
C_LA_INIT = 12.57
# C_TWEEN_INIT --> g/L
C_TWEEN_INIT = 1.0
# dBACT_dT -- > g/L*h
dBACT_dT_INIT = 0.0
FERMENT_IN = {
'Bacteria Concentration' : C_BACT_INIT,
'Glucose Concentration' : C_GLUC_INIT,
'Lactic Acid Concentration' : C_LA_INIT,
'Tween 80 Concentration' : C_TWEEN_INIT
}
# HOLDING TANK SPECS
# Initial Fermentation Water Charge in Liters
FERMENT_WATER_VOL = 750000
# Number of Fermentation Vessels
FERMENT_VESSEL_COUNT = 4
# Runtime of Fermentation Process
FERMENT_RUNTIME = 32
# Downtime of Fermentation Process
FERMENT_DOWNTIME = 8
# Total Runtime of Each Fermentation Batch
FERMENT_BATCH_TIME = FERMENT_RUNTIME + FERMENT_DOWNTIME
FERMENT_CONST = {
'Water Volume' : FERMENT_WATER_VOL,
'Vessel Count' : FERMENT_VESSEL_COUNT,
'Runtime' : FERMENT_RUNTIME,
'Downtime' : FERMENT_DOWNTIME,
'Batch Time' : FERMENT_BATCH_TIME }
# Acid Dissociation Constant Ka
SALTS_pKa = 3.86
SALTS_Ka = pow(10, (-1*SALTS_pKa))
MAX_pH = 3.8
pKa_pH_CALC = pow(10, (SALTS_pKa - MAX_pH))
MW_SALT = CHEMICALS['Calcium Lactate'][0]
MW_LA = CHEMICALS['Lactic Acid'][0]
|
normal
|
{
"blob_id": "3471f02f507104202c1e49440172f120ba17730f",
"index": 9263,
"step-1": "<mask token>\n\n\ndef convert_mass_to_concentration(fluidStream, component):\n total_mass = fluidStream.TotalMass\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef convert_mass_to_concentration(fluidStream, component):\n total_mass = fluidStream.TotalMass\n\n\ndef component_mass_to_volume(mass, component):\n component_density = CHEMICALS[component][1]\n component_volume = mass * component_density\n return component_volume\n\n\n<mask token>\n",
"step-3": "<mask token>\nCHEMICALS_KEY_GUIDE = ['MW', 'Density']\nCHEMICALS = {'Bacteria': ['NA', 1.05], 'Calcium Carbonate': [100.087, 2.71],\n 'Calcium Lactate': [218.22, 1.494], 'Corn Steep Liquor': ['NA', 1.2326],\n 'Glucose': [180.156, 1.54], 'Lactic Acid': [90.08, 1.206], 'Octanol': [\n 130.231, 0.824], 'Tween 80': ['NA', 1.07], 'Water': [18.015, 0.995],\n 'Water/Glucose 10%': [34.2291, 1.0375]}\nSOLVE_FOR_PRODUCTION = True\nPRODUCTION_TO_SOLVE = 100000000\n\n\ndef convert_mass_to_concentration(fluidStream, component):\n total_mass = fluidStream.TotalMass\n\n\ndef component_mass_to_volume(mass, component):\n component_density = CHEMICALS[component][1]\n component_volume = mass * component_density\n return component_volume\n\n\nTIME_INIT = 0\nC_BACT_INIT = 0.7\nC_GLUC_INIT = 100.0\nC_LA_INIT = 12.57\nC_TWEEN_INIT = 1.0\ndBACT_dT_INIT = 0.0\nFERMENT_IN = {'Bacteria Concentration': C_BACT_INIT,\n 'Glucose Concentration': C_GLUC_INIT, 'Lactic Acid Concentration':\n C_LA_INIT, 'Tween 80 Concentration': C_TWEEN_INIT}\nFERMENT_WATER_VOL = 750000\nFERMENT_VESSEL_COUNT = 4\nFERMENT_RUNTIME = 32\nFERMENT_DOWNTIME = 8\nFERMENT_BATCH_TIME = FERMENT_RUNTIME + FERMENT_DOWNTIME\nFERMENT_CONST = {'Water Volume': FERMENT_WATER_VOL, 'Vessel Count':\n FERMENT_VESSEL_COUNT, 'Runtime': FERMENT_RUNTIME, 'Downtime':\n FERMENT_DOWNTIME, 'Batch Time': FERMENT_BATCH_TIME}\nSALTS_pKa = 3.86\nSALTS_Ka = pow(10, -1 * SALTS_pKa)\nMAX_pH = 3.8\npKa_pH_CALC = pow(10, SALTS_pKa - MAX_pH)\nMW_SALT = CHEMICALS['Calcium Lactate'][0]\nMW_LA = CHEMICALS['Lactic Acid'][0]\n",
"step-4": "from FluidStream import *\nCHEMICALS_KEY_GUIDE = ['MW', 'Density']\nCHEMICALS = {'Bacteria': ['NA', 1.05], 'Calcium Carbonate': [100.087, 2.71],\n 'Calcium Lactate': [218.22, 1.494], 'Corn Steep Liquor': ['NA', 1.2326],\n 'Glucose': [180.156, 1.54], 'Lactic Acid': [90.08, 1.206], 'Octanol': [\n 130.231, 0.824], 'Tween 80': ['NA', 1.07], 'Water': [18.015, 0.995],\n 'Water/Glucose 10%': [34.2291, 1.0375]}\nSOLVE_FOR_PRODUCTION = True\nPRODUCTION_TO_SOLVE = 100000000\n\n\ndef convert_mass_to_concentration(fluidStream, component):\n total_mass = fluidStream.TotalMass\n\n\ndef component_mass_to_volume(mass, component):\n component_density = CHEMICALS[component][1]\n component_volume = mass * component_density\n return component_volume\n\n\nTIME_INIT = 0\nC_BACT_INIT = 0.7\nC_GLUC_INIT = 100.0\nC_LA_INIT = 12.57\nC_TWEEN_INIT = 1.0\ndBACT_dT_INIT = 0.0\nFERMENT_IN = {'Bacteria Concentration': C_BACT_INIT,\n 'Glucose Concentration': C_GLUC_INIT, 'Lactic Acid Concentration':\n C_LA_INIT, 'Tween 80 Concentration': C_TWEEN_INIT}\nFERMENT_WATER_VOL = 750000\nFERMENT_VESSEL_COUNT = 4\nFERMENT_RUNTIME = 32\nFERMENT_DOWNTIME = 8\nFERMENT_BATCH_TIME = FERMENT_RUNTIME + FERMENT_DOWNTIME\nFERMENT_CONST = {'Water Volume': FERMENT_WATER_VOL, 'Vessel Count':\n FERMENT_VESSEL_COUNT, 'Runtime': FERMENT_RUNTIME, 'Downtime':\n FERMENT_DOWNTIME, 'Batch Time': FERMENT_BATCH_TIME}\nSALTS_pKa = 3.86\nSALTS_Ka = pow(10, -1 * SALTS_pKa)\nMAX_pH = 3.8\npKa_pH_CALC = pow(10, SALTS_pKa - MAX_pH)\nMW_SALT = CHEMICALS['Calcium Lactate'][0]\nMW_LA = CHEMICALS['Lactic Acid'][0]\n",
"step-5": "from FluidStream import *\n# List of chemicals and their constant properties\n\nCHEMICALS_KEY_GUIDE = ['MW' , 'Density']\nCHEMICALS = {\n'Bacteria'\t\t\t: ['NA' , 1.05 ],\n'Calcium Carbonate' : [100.087 , 2.71 ],\n'Calcium Lactate' : [218.22 , 1.494 ],\n'Corn Steep Liquor' : ['NA'\t , 1.2326],\n'Glucose'\t\t\t: [180.156 , 1.54 ],\n'Lactic Acid'\t\t: [90.08 , 1.206 ],\n'Octanol' : [130.231 , .824 ],\n'Tween 80'\t\t\t: ['NA'\t , 1.07 ],\n'Water'\t\t\t\t: [18.015 , .995 ],\n'Water/Glucose 10%'\t: [34.2291 , 1.0375]\n}\n\nSOLVE_FOR_PRODUCTION = True\nPRODUCTION_TO_SOLVE = 100000000\n\n\ndef convert_mass_to_concentration(fluidStream, component):\n total_mass = fluidStream.TotalMass\n\n\ndef component_mass_to_volume(mass, component):\n component_density = CHEMICALS[component][1]\n component_volume = mass*component_density\n return component_volume\n\n\n# Bacterial Growth Curve\n\n# TIME_INIT --> hours\nTIME_INIT = 0\n\n# C_BACT_INIT --> g/L\nC_BACT_INIT = .7\n\n# C_GLUC_INIT --> g/L\nC_GLUC_INIT = 100.0\n\n# C_LA_INIT --> g/L\nC_LA_INIT = 12.57\n\n# C_TWEEN_INIT --> g/L\nC_TWEEN_INIT = 1.0\n\n# dBACT_dT -- > g/L*h\ndBACT_dT_INIT = 0.0\n\nFERMENT_IN = {\n'Bacteria Concentration' : C_BACT_INIT,\n'Glucose Concentration' : C_GLUC_INIT,\n'Lactic Acid Concentration' : C_LA_INIT,\n'Tween 80 Concentration' : C_TWEEN_INIT\n}\n\n# HOLDING TANK SPECS\n# Initial Fermentation Water Charge in Liters\nFERMENT_WATER_VOL = 750000\n# Number of Fermentation Vessels\nFERMENT_VESSEL_COUNT = 4\n# Runtime of Fermentation Process\nFERMENT_RUNTIME = 32\n# Downtime of Fermentation Process\nFERMENT_DOWNTIME = 8\n# Total Runtime of Each Fermentation Batch\nFERMENT_BATCH_TIME = FERMENT_RUNTIME + FERMENT_DOWNTIME\n\nFERMENT_CONST = {\n'Water Volume' : FERMENT_WATER_VOL,\n'Vessel Count' : FERMENT_VESSEL_COUNT,\n'Runtime' : FERMENT_RUNTIME,\n'Downtime' : FERMENT_DOWNTIME,\n'Batch Time' : FERMENT_BATCH_TIME }\n\n# Acid Dissociation Constant Ka\nSALTS_pKa = 3.86\nSALTS_Ka = pow(10, (-1*SALTS_pKa))\nMAX_pH = 3.8\npKa_pH_CALC = pow(10, (SALTS_pKa - MAX_pH))\nMW_SALT = CHEMICALS['Calcium Lactate'][0]\nMW_LA = CHEMICALS['Lactic Acid'][0]\n\n\n\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
<|reserved_special_token_0|>
class PrimerForm(forms.Form):
<|reserved_special_token_0|>
fasta = forms.CharField(initial='')
primer_min = forms.IntegerField(initial=18, max_value=35)
primer_max = forms.IntegerField(initial=27, max_value=35)
primer_optimum = forms.IntegerField(initial=20, max_value=35)
amplicon_min = forms.IntegerField(initial=60, min_value=50, max_value=20000
)
amplicon_max = forms.IntegerField(initial=80, min_value=50, max_value=20000
)
tm_min = forms.FloatField(initial=59, min_value=0, max_value=100)
tm_max = forms.FloatField(initial=61, min_value=0, max_value=100)
tm_optimum = forms.FloatField(initial=60, min_value=0, max_value=100)
self_dimer_any = forms.FloatField(initial=8.0, min_value=0, max_value=
9999.99)
self_dimer_end = forms.FloatField(initial=3.0, min_value=0, max_value=
9999.99)
gc_min = forms.FloatField(initial=20.0, min_value=0, max_value=100)
gc_clamp = forms.IntegerField(initial=0)
def clean(self):
"""Validate and return user input."""
data = self.cleaned_data
data['fasta'] = Fasta.from_string(data['fasta'])
validate_fasta(data)
return data
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class PrimerForm(forms.Form):
"""Collect user input to run primer prediction."""
fasta = forms.CharField(initial='')
primer_min = forms.IntegerField(initial=18, max_value=35)
primer_max = forms.IntegerField(initial=27, max_value=35)
primer_optimum = forms.IntegerField(initial=20, max_value=35)
amplicon_min = forms.IntegerField(initial=60, min_value=50, max_value=20000
)
amplicon_max = forms.IntegerField(initial=80, min_value=50, max_value=20000
)
tm_min = forms.FloatField(initial=59, min_value=0, max_value=100)
tm_max = forms.FloatField(initial=61, min_value=0, max_value=100)
tm_optimum = forms.FloatField(initial=60, min_value=0, max_value=100)
self_dimer_any = forms.FloatField(initial=8.0, min_value=0, max_value=
9999.99)
self_dimer_end = forms.FloatField(initial=3.0, min_value=0, max_value=
9999.99)
gc_min = forms.FloatField(initial=20.0, min_value=0, max_value=100)
gc_clamp = forms.IntegerField(initial=0)
def clean(self):
"""Validate and return user input."""
data = self.cleaned_data
data['fasta'] = Fasta.from_string(data['fasta'])
validate_fasta(data)
return data
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class PrimerForm(forms.Form):
"""Collect user input to run primer prediction."""
fasta = forms.CharField(initial='')
primer_min = forms.IntegerField(initial=18, max_value=35)
primer_max = forms.IntegerField(initial=27, max_value=35)
primer_optimum = forms.IntegerField(initial=20, max_value=35)
amplicon_min = forms.IntegerField(initial=60, min_value=50, max_value=20000
)
amplicon_max = forms.IntegerField(initial=80, min_value=50, max_value=20000
)
tm_min = forms.FloatField(initial=59, min_value=0, max_value=100)
tm_max = forms.FloatField(initial=61, min_value=0, max_value=100)
tm_optimum = forms.FloatField(initial=60, min_value=0, max_value=100)
self_dimer_any = forms.FloatField(initial=8.0, min_value=0, max_value=
9999.99)
self_dimer_end = forms.FloatField(initial=3.0, min_value=0, max_value=
9999.99)
gc_min = forms.FloatField(initial=20.0, min_value=0, max_value=100)
gc_clamp = forms.IntegerField(initial=0)
def clean(self):
"""Validate and return user input."""
data = self.cleaned_data
data['fasta'] = Fasta.from_string(data['fasta'])
validate_fasta(data)
return data
def validate_fasta(data):
"""Validate input sequence lengths."""
for sequence in data['fasta'].values():
print(f'Sequence length {len(sequence)} nt')
if len(sequence) < data['amplicon_min']:
raise ValidationError({'fasta':
f'Input sequence must be longer than minimum' +
f" amplicon length parameter ({data['amplicon_min']} nt)"})
<|reserved_special_token_1|>
<|reserved_special_token_0|>
from django import forms
from django.core.exceptions import ValidationError
from .fasta import Fasta
class PrimerForm(forms.Form):
"""Collect user input to run primer prediction."""
fasta = forms.CharField(initial='')
primer_min = forms.IntegerField(initial=18, max_value=35)
primer_max = forms.IntegerField(initial=27, max_value=35)
primer_optimum = forms.IntegerField(initial=20, max_value=35)
amplicon_min = forms.IntegerField(initial=60, min_value=50, max_value=20000
)
amplicon_max = forms.IntegerField(initial=80, min_value=50, max_value=20000
)
tm_min = forms.FloatField(initial=59, min_value=0, max_value=100)
tm_max = forms.FloatField(initial=61, min_value=0, max_value=100)
tm_optimum = forms.FloatField(initial=60, min_value=0, max_value=100)
self_dimer_any = forms.FloatField(initial=8.0, min_value=0, max_value=
9999.99)
self_dimer_end = forms.FloatField(initial=3.0, min_value=0, max_value=
9999.99)
gc_min = forms.FloatField(initial=20.0, min_value=0, max_value=100)
gc_clamp = forms.IntegerField(initial=0)
def clean(self):
"""Validate and return user input."""
data = self.cleaned_data
data['fasta'] = Fasta.from_string(data['fasta'])
validate_fasta(data)
return data
def validate_fasta(data):
"""Validate input sequence lengths."""
for sequence in data['fasta'].values():
print(f'Sequence length {len(sequence)} nt')
if len(sequence) < data['amplicon_min']:
raise ValidationError({'fasta':
f'Input sequence must be longer than minimum' +
f" amplicon length parameter ({data['amplicon_min']} nt)"})
<|reserved_special_token_1|>
"""Primer3 input form.
For details on input params see:
https://primer3.org/manual.html#globalTags
"""
from django import forms
from django.core.exceptions import ValidationError
from .fasta import Fasta
class PrimerForm(forms.Form):
"""Collect user input to run primer prediction."""
fasta = forms.CharField(initial="")
# Primer size range
primer_min = forms.IntegerField(initial=18, max_value=35)
primer_max = forms.IntegerField(initial=27, max_value=35)
primer_optimum = forms.IntegerField(initial=20, max_value=35)
# Amplicon size range
amplicon_min = forms.IntegerField(
initial=60, min_value=50, max_value=20000)
amplicon_max = forms.IntegerField(
initial=80, min_value=50, max_value=20000)
# Primer melting temperature range
tm_min = forms.FloatField(initial=59, min_value=0, max_value=100)
tm_max = forms.FloatField(initial=61, min_value=0, max_value=100)
tm_optimum = forms.FloatField(initial=60, min_value=0, max_value=100)
# Max self complement
self_dimer_any = forms.FloatField(
initial=8.0, min_value=0, max_value=9999.99)
# Max self complement 3'
self_dimer_end = forms.FloatField(
initial=3.0, min_value=0, max_value=9999.99)
# GC content
gc_min = forms.FloatField(initial=20.0, min_value=0, max_value=100)
gc_clamp = forms.IntegerField(initial=0)
def clean(self):
"""Validate and return user input."""
data = self.cleaned_data
data['fasta'] = Fasta.from_string(data['fasta'])
validate_fasta(data)
return data
def validate_fasta(data):
"""Validate input sequence lengths."""
for sequence in data['fasta'].values():
print(f'Sequence length {len(sequence)} nt')
if len(sequence) < data['amplicon_min']:
raise ValidationError({'fasta':
f'Input sequence must be longer than minimum'
+ f' amplicon length parameter ({data["amplicon_min"]} nt)'
})
|
flexible
|
{
"blob_id": "6291375738db7914d551f9a1c6d2897b7d236b87",
"index": 1742,
"step-1": "<mask token>\n\n\nclass PrimerForm(forms.Form):\n <mask token>\n fasta = forms.CharField(initial='')\n primer_min = forms.IntegerField(initial=18, max_value=35)\n primer_max = forms.IntegerField(initial=27, max_value=35)\n primer_optimum = forms.IntegerField(initial=20, max_value=35)\n amplicon_min = forms.IntegerField(initial=60, min_value=50, max_value=20000\n )\n amplicon_max = forms.IntegerField(initial=80, min_value=50, max_value=20000\n )\n tm_min = forms.FloatField(initial=59, min_value=0, max_value=100)\n tm_max = forms.FloatField(initial=61, min_value=0, max_value=100)\n tm_optimum = forms.FloatField(initial=60, min_value=0, max_value=100)\n self_dimer_any = forms.FloatField(initial=8.0, min_value=0, max_value=\n 9999.99)\n self_dimer_end = forms.FloatField(initial=3.0, min_value=0, max_value=\n 9999.99)\n gc_min = forms.FloatField(initial=20.0, min_value=0, max_value=100)\n gc_clamp = forms.IntegerField(initial=0)\n\n def clean(self):\n \"\"\"Validate and return user input.\"\"\"\n data = self.cleaned_data\n data['fasta'] = Fasta.from_string(data['fasta'])\n validate_fasta(data)\n return data\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass PrimerForm(forms.Form):\n \"\"\"Collect user input to run primer prediction.\"\"\"\n fasta = forms.CharField(initial='')\n primer_min = forms.IntegerField(initial=18, max_value=35)\n primer_max = forms.IntegerField(initial=27, max_value=35)\n primer_optimum = forms.IntegerField(initial=20, max_value=35)\n amplicon_min = forms.IntegerField(initial=60, min_value=50, max_value=20000\n )\n amplicon_max = forms.IntegerField(initial=80, min_value=50, max_value=20000\n )\n tm_min = forms.FloatField(initial=59, min_value=0, max_value=100)\n tm_max = forms.FloatField(initial=61, min_value=0, max_value=100)\n tm_optimum = forms.FloatField(initial=60, min_value=0, max_value=100)\n self_dimer_any = forms.FloatField(initial=8.0, min_value=0, max_value=\n 9999.99)\n self_dimer_end = forms.FloatField(initial=3.0, min_value=0, max_value=\n 9999.99)\n gc_min = forms.FloatField(initial=20.0, min_value=0, max_value=100)\n gc_clamp = forms.IntegerField(initial=0)\n\n def clean(self):\n \"\"\"Validate and return user input.\"\"\"\n data = self.cleaned_data\n data['fasta'] = Fasta.from_string(data['fasta'])\n validate_fasta(data)\n return data\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass PrimerForm(forms.Form):\n \"\"\"Collect user input to run primer prediction.\"\"\"\n fasta = forms.CharField(initial='')\n primer_min = forms.IntegerField(initial=18, max_value=35)\n primer_max = forms.IntegerField(initial=27, max_value=35)\n primer_optimum = forms.IntegerField(initial=20, max_value=35)\n amplicon_min = forms.IntegerField(initial=60, min_value=50, max_value=20000\n )\n amplicon_max = forms.IntegerField(initial=80, min_value=50, max_value=20000\n )\n tm_min = forms.FloatField(initial=59, min_value=0, max_value=100)\n tm_max = forms.FloatField(initial=61, min_value=0, max_value=100)\n tm_optimum = forms.FloatField(initial=60, min_value=0, max_value=100)\n self_dimer_any = forms.FloatField(initial=8.0, min_value=0, max_value=\n 9999.99)\n self_dimer_end = forms.FloatField(initial=3.0, min_value=0, max_value=\n 9999.99)\n gc_min = forms.FloatField(initial=20.0, min_value=0, max_value=100)\n gc_clamp = forms.IntegerField(initial=0)\n\n def clean(self):\n \"\"\"Validate and return user input.\"\"\"\n data = self.cleaned_data\n data['fasta'] = Fasta.from_string(data['fasta'])\n validate_fasta(data)\n return data\n\n\ndef validate_fasta(data):\n \"\"\"Validate input sequence lengths.\"\"\"\n for sequence in data['fasta'].values():\n print(f'Sequence length {len(sequence)} nt')\n if len(sequence) < data['amplicon_min']:\n raise ValidationError({'fasta': \n f'Input sequence must be longer than minimum' +\n f\" amplicon length parameter ({data['amplicon_min']} nt)\"})\n",
"step-4": "<mask token>\nfrom django import forms\nfrom django.core.exceptions import ValidationError\nfrom .fasta import Fasta\n\n\nclass PrimerForm(forms.Form):\n \"\"\"Collect user input to run primer prediction.\"\"\"\n fasta = forms.CharField(initial='')\n primer_min = forms.IntegerField(initial=18, max_value=35)\n primer_max = forms.IntegerField(initial=27, max_value=35)\n primer_optimum = forms.IntegerField(initial=20, max_value=35)\n amplicon_min = forms.IntegerField(initial=60, min_value=50, max_value=20000\n )\n amplicon_max = forms.IntegerField(initial=80, min_value=50, max_value=20000\n )\n tm_min = forms.FloatField(initial=59, min_value=0, max_value=100)\n tm_max = forms.FloatField(initial=61, min_value=0, max_value=100)\n tm_optimum = forms.FloatField(initial=60, min_value=0, max_value=100)\n self_dimer_any = forms.FloatField(initial=8.0, min_value=0, max_value=\n 9999.99)\n self_dimer_end = forms.FloatField(initial=3.0, min_value=0, max_value=\n 9999.99)\n gc_min = forms.FloatField(initial=20.0, min_value=0, max_value=100)\n gc_clamp = forms.IntegerField(initial=0)\n\n def clean(self):\n \"\"\"Validate and return user input.\"\"\"\n data = self.cleaned_data\n data['fasta'] = Fasta.from_string(data['fasta'])\n validate_fasta(data)\n return data\n\n\ndef validate_fasta(data):\n \"\"\"Validate input sequence lengths.\"\"\"\n for sequence in data['fasta'].values():\n print(f'Sequence length {len(sequence)} nt')\n if len(sequence) < data['amplicon_min']:\n raise ValidationError({'fasta': \n f'Input sequence must be longer than minimum' +\n f\" amplicon length parameter ({data['amplicon_min']} nt)\"})\n",
"step-5": "\"\"\"Primer3 input form.\n\nFor details on input params see:\nhttps://primer3.org/manual.html#globalTags\n\"\"\"\n\nfrom django import forms\nfrom django.core.exceptions import ValidationError\n\nfrom .fasta import Fasta\n\n\nclass PrimerForm(forms.Form):\n \"\"\"Collect user input to run primer prediction.\"\"\"\n\n fasta = forms.CharField(initial=\"\")\n # Primer size range\n primer_min = forms.IntegerField(initial=18, max_value=35)\n primer_max = forms.IntegerField(initial=27, max_value=35)\n primer_optimum = forms.IntegerField(initial=20, max_value=35)\n # Amplicon size range\n amplicon_min = forms.IntegerField(\n initial=60, min_value=50, max_value=20000)\n amplicon_max = forms.IntegerField(\n initial=80, min_value=50, max_value=20000)\n # Primer melting temperature range\n tm_min = forms.FloatField(initial=59, min_value=0, max_value=100)\n tm_max = forms.FloatField(initial=61, min_value=0, max_value=100)\n tm_optimum = forms.FloatField(initial=60, min_value=0, max_value=100)\n # Max self complement\n self_dimer_any = forms.FloatField(\n initial=8.0, min_value=0, max_value=9999.99)\n # Max self complement 3'\n self_dimer_end = forms.FloatField(\n initial=3.0, min_value=0, max_value=9999.99)\n # GC content\n gc_min = forms.FloatField(initial=20.0, min_value=0, max_value=100)\n gc_clamp = forms.IntegerField(initial=0)\n\n def clean(self):\n \"\"\"Validate and return user input.\"\"\"\n data = self.cleaned_data\n data['fasta'] = Fasta.from_string(data['fasta'])\n validate_fasta(data)\n return data\n\n\ndef validate_fasta(data):\n \"\"\"Validate input sequence lengths.\"\"\"\n for sequence in data['fasta'].values():\n print(f'Sequence length {len(sequence)} nt')\n if len(sequence) < data['amplicon_min']:\n raise ValidationError({'fasta':\n f'Input sequence must be longer than minimum'\n + f' amplicon length parameter ({data[\"amplicon_min\"]} nt)'\n })\n",
"step-ids": [
3,
4,
5,
6,
7
]
}
|
[
3,
4,
5,
6,
7
] |
import os, pickle, logging, numpy as np
from .. import utils as U
class CMU_Generator():
def __init__(self, args, dataset_args):
self.in_path = dataset_args['cmu_data_path']
self.out_path = '{}/{}'.format(dataset_args['path'], args.dataset)
self.actions = ['walking', 'running', 'directing_traffic', 'soccer',
'basketball', 'washwindow', 'jumping', 'basketball_signal']
self.dim_ignore = [0, 1, 2, 3, 4, 5, 6, 7, 8, 21, 22, 23, 24, 25, 26,
39, 40, 41, 60, 61, 62, 63, 64, 65, 81, 82, 83,
87, 88, 89, 90, 91, 92, 108, 109, 110, 114, 115, 116]
self.dim_use = list(set(range(39*3)).difference(set(self.dim_ignore)))
U.create_folder(self.out_path)
def start(self):
logging.info('Reading data ...')
self.all_train_data, train_data = self.read_data('train')
_, eval_data = self.read_data('test')
logging.info('Normalizing data ...')
self.data_mean, self.data_std, self.dim_zero, self.dim_nonzero = self.normalize_state()
train_data = self.normalize_data(train_data)
eval_data = self.normalize_data(eval_data)
logging.info('Saving data ...')
with open('{}/data.pkl'.format(self.out_path), 'wb') as f:
pickle.dump((train_data, eval_data, self.actions), f)
with open('{}/normalization.pkl'.format(self.out_path), 'wb') as f:
pickle.dump((self.data_mean, self.data_std, self.dim_zero, self.dim_nonzero), f)
with open('{}/ignore.pkl'.format(self.out_path), 'wb') as f:
pickle.dump((self.dim_use, self.dim_ignore), f)
def read_data(self, phase):
all_data, even_data = [], {}
for action_idx, action in enumerate(self.actions):
action_path = '{}/{}/{}'.format(self.in_path, phase, action)
for sequence_idx, file in enumerate(os.listdir(action_path)):
sequence = []
with open('{}/{}'.format(action_path, file), 'r') as f:
for line in f.readlines():
line = line.strip().split(',')
if len(line) > 0:
sequence.append(np.array([np.float32(x) for x in line]))
sequence = np.array(sequence)
all_data.append(sequence)
even_data[(action_idx, sequence_idx)] = sequence[range(0,sequence.shape[0],2),:]
return np.concatenate(all_data, axis=0), even_data
def normalize_state(self):
data_mean = np.mean(self.all_train_data, axis=0)
data_std = np.std(self.all_train_data, axis=0)
dim_zero = list(np.where(data_std < 0.0001)[0])
dim_nonzero = list(np.where(data_std >= 0.0001)[0])
data_std[dim_zero] = 1.0
return data_mean, data_std, dim_zero, dim_nonzero
def normalize_data(self, data):
for key in data.keys():
data[key] = np.divide((data[key] - self.data_mean), self.data_std)
data[key] = data[key][:, self.dim_use]
return data
|
normal
|
{
"blob_id": "2c58a9e83f80d437160b87ec64c7631e7a35bf90",
"index": 6315,
"step-1": "<mask token>\n\n\nclass CMU_Generator:\n <mask token>\n <mask token>\n\n def read_data(self, phase):\n all_data, even_data = [], {}\n for action_idx, action in enumerate(self.actions):\n action_path = '{}/{}/{}'.format(self.in_path, phase, action)\n for sequence_idx, file in enumerate(os.listdir(action_path)):\n sequence = []\n with open('{}/{}'.format(action_path, file), 'r') as f:\n for line in f.readlines():\n line = line.strip().split(',')\n if len(line) > 0:\n sequence.append(np.array([np.float32(x) for x in\n line]))\n sequence = np.array(sequence)\n all_data.append(sequence)\n even_data[action_idx, sequence_idx] = sequence[range(0,\n sequence.shape[0], 2), :]\n return np.concatenate(all_data, axis=0), even_data\n <mask token>\n\n def normalize_data(self, data):\n for key in data.keys():\n data[key] = np.divide(data[key] - self.data_mean, self.data_std)\n data[key] = data[key][:, self.dim_use]\n return data\n",
"step-2": "<mask token>\n\n\nclass CMU_Generator:\n\n def __init__(self, args, dataset_args):\n self.in_path = dataset_args['cmu_data_path']\n self.out_path = '{}/{}'.format(dataset_args['path'], args.dataset)\n self.actions = ['walking', 'running', 'directing_traffic', 'soccer',\n 'basketball', 'washwindow', 'jumping', 'basketball_signal']\n self.dim_ignore = [0, 1, 2, 3, 4, 5, 6, 7, 8, 21, 22, 23, 24, 25, \n 26, 39, 40, 41, 60, 61, 62, 63, 64, 65, 81, 82, 83, 87, 88, 89,\n 90, 91, 92, 108, 109, 110, 114, 115, 116]\n self.dim_use = list(set(range(39 * 3)).difference(set(self.dim_ignore))\n )\n U.create_folder(self.out_path)\n <mask token>\n\n def read_data(self, phase):\n all_data, even_data = [], {}\n for action_idx, action in enumerate(self.actions):\n action_path = '{}/{}/{}'.format(self.in_path, phase, action)\n for sequence_idx, file in enumerate(os.listdir(action_path)):\n sequence = []\n with open('{}/{}'.format(action_path, file), 'r') as f:\n for line in f.readlines():\n line = line.strip().split(',')\n if len(line) > 0:\n sequence.append(np.array([np.float32(x) for x in\n line]))\n sequence = np.array(sequence)\n all_data.append(sequence)\n even_data[action_idx, sequence_idx] = sequence[range(0,\n sequence.shape[0], 2), :]\n return np.concatenate(all_data, axis=0), even_data\n\n def normalize_state(self):\n data_mean = np.mean(self.all_train_data, axis=0)\n data_std = np.std(self.all_train_data, axis=0)\n dim_zero = list(np.where(data_std < 0.0001)[0])\n dim_nonzero = list(np.where(data_std >= 0.0001)[0])\n data_std[dim_zero] = 1.0\n return data_mean, data_std, dim_zero, dim_nonzero\n\n def normalize_data(self, data):\n for key in data.keys():\n data[key] = np.divide(data[key] - self.data_mean, self.data_std)\n data[key] = data[key][:, self.dim_use]\n return data\n",
"step-3": "<mask token>\n\n\nclass CMU_Generator:\n\n def __init__(self, args, dataset_args):\n self.in_path = dataset_args['cmu_data_path']\n self.out_path = '{}/{}'.format(dataset_args['path'], args.dataset)\n self.actions = ['walking', 'running', 'directing_traffic', 'soccer',\n 'basketball', 'washwindow', 'jumping', 'basketball_signal']\n self.dim_ignore = [0, 1, 2, 3, 4, 5, 6, 7, 8, 21, 22, 23, 24, 25, \n 26, 39, 40, 41, 60, 61, 62, 63, 64, 65, 81, 82, 83, 87, 88, 89,\n 90, 91, 92, 108, 109, 110, 114, 115, 116]\n self.dim_use = list(set(range(39 * 3)).difference(set(self.dim_ignore))\n )\n U.create_folder(self.out_path)\n\n def start(self):\n logging.info('Reading data ...')\n self.all_train_data, train_data = self.read_data('train')\n _, eval_data = self.read_data('test')\n logging.info('Normalizing data ...')\n self.data_mean, self.data_std, self.dim_zero, self.dim_nonzero = (self\n .normalize_state())\n train_data = self.normalize_data(train_data)\n eval_data = self.normalize_data(eval_data)\n logging.info('Saving data ...')\n with open('{}/data.pkl'.format(self.out_path), 'wb') as f:\n pickle.dump((train_data, eval_data, self.actions), f)\n with open('{}/normalization.pkl'.format(self.out_path), 'wb') as f:\n pickle.dump((self.data_mean, self.data_std, self.dim_zero, self\n .dim_nonzero), f)\n with open('{}/ignore.pkl'.format(self.out_path), 'wb') as f:\n pickle.dump((self.dim_use, self.dim_ignore), f)\n\n def read_data(self, phase):\n all_data, even_data = [], {}\n for action_idx, action in enumerate(self.actions):\n action_path = '{}/{}/{}'.format(self.in_path, phase, action)\n for sequence_idx, file in enumerate(os.listdir(action_path)):\n sequence = []\n with open('{}/{}'.format(action_path, file), 'r') as f:\n for line in f.readlines():\n line = line.strip().split(',')\n if len(line) > 0:\n sequence.append(np.array([np.float32(x) for x in\n line]))\n sequence = np.array(sequence)\n all_data.append(sequence)\n even_data[action_idx, sequence_idx] = sequence[range(0,\n sequence.shape[0], 2), :]\n return np.concatenate(all_data, axis=0), even_data\n\n def normalize_state(self):\n data_mean = np.mean(self.all_train_data, axis=0)\n data_std = np.std(self.all_train_data, axis=0)\n dim_zero = list(np.where(data_std < 0.0001)[0])\n dim_nonzero = list(np.where(data_std >= 0.0001)[0])\n data_std[dim_zero] = 1.0\n return data_mean, data_std, dim_zero, dim_nonzero\n\n def normalize_data(self, data):\n for key in data.keys():\n data[key] = np.divide(data[key] - self.data_mean, self.data_std)\n data[key] = data[key][:, self.dim_use]\n return data\n",
"step-4": "import os, pickle, logging, numpy as np\nfrom .. import utils as U\n\n\nclass CMU_Generator:\n\n def __init__(self, args, dataset_args):\n self.in_path = dataset_args['cmu_data_path']\n self.out_path = '{}/{}'.format(dataset_args['path'], args.dataset)\n self.actions = ['walking', 'running', 'directing_traffic', 'soccer',\n 'basketball', 'washwindow', 'jumping', 'basketball_signal']\n self.dim_ignore = [0, 1, 2, 3, 4, 5, 6, 7, 8, 21, 22, 23, 24, 25, \n 26, 39, 40, 41, 60, 61, 62, 63, 64, 65, 81, 82, 83, 87, 88, 89,\n 90, 91, 92, 108, 109, 110, 114, 115, 116]\n self.dim_use = list(set(range(39 * 3)).difference(set(self.dim_ignore))\n )\n U.create_folder(self.out_path)\n\n def start(self):\n logging.info('Reading data ...')\n self.all_train_data, train_data = self.read_data('train')\n _, eval_data = self.read_data('test')\n logging.info('Normalizing data ...')\n self.data_mean, self.data_std, self.dim_zero, self.dim_nonzero = (self\n .normalize_state())\n train_data = self.normalize_data(train_data)\n eval_data = self.normalize_data(eval_data)\n logging.info('Saving data ...')\n with open('{}/data.pkl'.format(self.out_path), 'wb') as f:\n pickle.dump((train_data, eval_data, self.actions), f)\n with open('{}/normalization.pkl'.format(self.out_path), 'wb') as f:\n pickle.dump((self.data_mean, self.data_std, self.dim_zero, self\n .dim_nonzero), f)\n with open('{}/ignore.pkl'.format(self.out_path), 'wb') as f:\n pickle.dump((self.dim_use, self.dim_ignore), f)\n\n def read_data(self, phase):\n all_data, even_data = [], {}\n for action_idx, action in enumerate(self.actions):\n action_path = '{}/{}/{}'.format(self.in_path, phase, action)\n for sequence_idx, file in enumerate(os.listdir(action_path)):\n sequence = []\n with open('{}/{}'.format(action_path, file), 'r') as f:\n for line in f.readlines():\n line = line.strip().split(',')\n if len(line) > 0:\n sequence.append(np.array([np.float32(x) for x in\n line]))\n sequence = np.array(sequence)\n all_data.append(sequence)\n even_data[action_idx, sequence_idx] = sequence[range(0,\n sequence.shape[0], 2), :]\n return np.concatenate(all_data, axis=0), even_data\n\n def normalize_state(self):\n data_mean = np.mean(self.all_train_data, axis=0)\n data_std = np.std(self.all_train_data, axis=0)\n dim_zero = list(np.where(data_std < 0.0001)[0])\n dim_nonzero = list(np.where(data_std >= 0.0001)[0])\n data_std[dim_zero] = 1.0\n return data_mean, data_std, dim_zero, dim_nonzero\n\n def normalize_data(self, data):\n for key in data.keys():\n data[key] = np.divide(data[key] - self.data_mean, self.data_std)\n data[key] = data[key][:, self.dim_use]\n return data\n",
"step-5": "import os, pickle, logging, numpy as np\r\n\r\nfrom .. import utils as U\r\n\r\n\r\nclass CMU_Generator():\r\n def __init__(self, args, dataset_args):\r\n self.in_path = dataset_args['cmu_data_path']\r\n self.out_path = '{}/{}'.format(dataset_args['path'], args.dataset)\r\n self.actions = ['walking', 'running', 'directing_traffic', 'soccer',\r\n 'basketball', 'washwindow', 'jumping', 'basketball_signal']\r\n self.dim_ignore = [0, 1, 2, 3, 4, 5, 6, 7, 8, 21, 22, 23, 24, 25, 26,\r\n 39, 40, 41, 60, 61, 62, 63, 64, 65, 81, 82, 83,\r\n 87, 88, 89, 90, 91, 92, 108, 109, 110, 114, 115, 116]\r\n self.dim_use = list(set(range(39*3)).difference(set(self.dim_ignore)))\r\n U.create_folder(self.out_path)\r\n\r\n def start(self):\r\n logging.info('Reading data ...')\r\n self.all_train_data, train_data = self.read_data('train')\r\n _, eval_data = self.read_data('test')\r\n\r\n logging.info('Normalizing data ...')\r\n self.data_mean, self.data_std, self.dim_zero, self.dim_nonzero = self.normalize_state()\r\n train_data = self.normalize_data(train_data)\r\n eval_data = self.normalize_data(eval_data)\r\n\r\n logging.info('Saving data ...')\r\n with open('{}/data.pkl'.format(self.out_path), 'wb') as f:\r\n pickle.dump((train_data, eval_data, self.actions), f)\r\n with open('{}/normalization.pkl'.format(self.out_path), 'wb') as f:\r\n pickle.dump((self.data_mean, self.data_std, self.dim_zero, self.dim_nonzero), f)\r\n with open('{}/ignore.pkl'.format(self.out_path), 'wb') as f:\r\n pickle.dump((self.dim_use, self.dim_ignore), f)\r\n\r\n def read_data(self, phase):\r\n all_data, even_data = [], {}\r\n for action_idx, action in enumerate(self.actions):\r\n action_path = '{}/{}/{}'.format(self.in_path, phase, action)\r\n for sequence_idx, file in enumerate(os.listdir(action_path)):\r\n sequence = []\r\n with open('{}/{}'.format(action_path, file), 'r') as f:\r\n for line in f.readlines():\r\n line = line.strip().split(',')\r\n if len(line) > 0:\r\n sequence.append(np.array([np.float32(x) for x in line]))\r\n sequence = np.array(sequence)\r\n all_data.append(sequence)\r\n even_data[(action_idx, sequence_idx)] = sequence[range(0,sequence.shape[0],2),:]\r\n return np.concatenate(all_data, axis=0), even_data\r\n\r\n def normalize_state(self):\r\n data_mean = np.mean(self.all_train_data, axis=0)\r\n data_std = np.std(self.all_train_data, axis=0)\r\n dim_zero = list(np.where(data_std < 0.0001)[0])\r\n dim_nonzero = list(np.where(data_std >= 0.0001)[0])\r\n data_std[dim_zero] = 1.0\r\n return data_mean, data_std, dim_zero, dim_nonzero\r\n\r\n def normalize_data(self, data):\r\n for key in data.keys():\r\n data[key] = np.divide((data[key] - self.data_mean), self.data_std)\r\n data[key] = data[key][:, self.dim_use]\r\n return data\r\n",
"step-ids": [
3,
5,
6,
7,
8
]
}
|
[
3,
5,
6,
7,
8
] |
import os
import requests
def download(url: str, dest_folder: str):
#https://stackoverflow.com/a/56951135/8761164
if not os.path.exists(dest_folder):
os.makedirs(dest_folder) # create folder if it does not exist
filename = url.split('/')[-1].replace(" ", "_") # be careful with file names
file_path = os.path.join(dest_folder, filename)
r = requests.get(url, stream=True)
if r.ok:
print("saving to", os.path.abspath(file_path))
with open(file_path, 'wb') as f:
for chunk in r.iter_content(chunk_size=1024 * 8):
if chunk:
f.write(chunk)
f.flush()
os.fsync(f.fileno())
else:
print("Download failed: status code {}\n{}".format(r.status_code, r.text))
def parse_lat(lat: int):
lat_str = 'N' if lat >= 0 else 'S'
if 10 > lat > -10:
lat_str += '0'
lat_str += str(abs(lat))
return lat_str
def parse_long(long: int):
long_str = 'E' if long >= 0 else 'W'
if 100 > long > -100:
long_str += '0'
if 10 > long > -10:
long_str += '0'
long_str += str(abs(long))
return long_str
if __name__=='__main__':
for lat in range(47, 21, -1):
for long in range(-14, 43, 1):
#print(parse_lat(lat), parse_long(long))
#print(f"https://gdemdl.aster.jspacesystems.or.jp/download/Download_{parse_lat(lat)}{parse_long(long)}.zip")
download(f"https://gdemdl.aster.jspacesystems.or.jp/download/Download_{parse_lat(lat)}{parse_long(long)}.zip", dest_folder="/media/data-ext/aster-gdem")
|
normal
|
{
"blob_id": "0726a4fa3af196e2ba1592019f09afb0e7bb47d7",
"index": 9731,
"step-1": "<mask token>\n\n\ndef parse_lat(lat: int):\n lat_str = 'N' if lat >= 0 else 'S'\n if 10 > lat > -10:\n lat_str += '0'\n lat_str += str(abs(lat))\n return lat_str\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef download(url: str, dest_folder: str):\n if not os.path.exists(dest_folder):\n os.makedirs(dest_folder)\n filename = url.split('/')[-1].replace(' ', '_')\n file_path = os.path.join(dest_folder, filename)\n r = requests.get(url, stream=True)\n if r.ok:\n print('saving to', os.path.abspath(file_path))\n with open(file_path, 'wb') as f:\n for chunk in r.iter_content(chunk_size=1024 * 8):\n if chunk:\n f.write(chunk)\n f.flush()\n os.fsync(f.fileno())\n else:\n print('Download failed: status code {}\\n{}'.format(r.status_code, r\n .text))\n\n\ndef parse_lat(lat: int):\n lat_str = 'N' if lat >= 0 else 'S'\n if 10 > lat > -10:\n lat_str += '0'\n lat_str += str(abs(lat))\n return lat_str\n\n\ndef parse_long(long: int):\n long_str = 'E' if long >= 0 else 'W'\n if 100 > long > -100:\n long_str += '0'\n if 10 > long > -10:\n long_str += '0'\n long_str += str(abs(long))\n return long_str\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef download(url: str, dest_folder: str):\n if not os.path.exists(dest_folder):\n os.makedirs(dest_folder)\n filename = url.split('/')[-1].replace(' ', '_')\n file_path = os.path.join(dest_folder, filename)\n r = requests.get(url, stream=True)\n if r.ok:\n print('saving to', os.path.abspath(file_path))\n with open(file_path, 'wb') as f:\n for chunk in r.iter_content(chunk_size=1024 * 8):\n if chunk:\n f.write(chunk)\n f.flush()\n os.fsync(f.fileno())\n else:\n print('Download failed: status code {}\\n{}'.format(r.status_code, r\n .text))\n\n\ndef parse_lat(lat: int):\n lat_str = 'N' if lat >= 0 else 'S'\n if 10 > lat > -10:\n lat_str += '0'\n lat_str += str(abs(lat))\n return lat_str\n\n\ndef parse_long(long: int):\n long_str = 'E' if long >= 0 else 'W'\n if 100 > long > -100:\n long_str += '0'\n if 10 > long > -10:\n long_str += '0'\n long_str += str(abs(long))\n return long_str\n\n\nif __name__ == '__main__':\n for lat in range(47, 21, -1):\n for long in range(-14, 43, 1):\n download(\n f'https://gdemdl.aster.jspacesystems.or.jp/download/Download_{parse_lat(lat)}{parse_long(long)}.zip'\n , dest_folder='/media/data-ext/aster-gdem')\n",
"step-4": "import os\nimport requests\n\n\ndef download(url: str, dest_folder: str):\n if not os.path.exists(dest_folder):\n os.makedirs(dest_folder)\n filename = url.split('/')[-1].replace(' ', '_')\n file_path = os.path.join(dest_folder, filename)\n r = requests.get(url, stream=True)\n if r.ok:\n print('saving to', os.path.abspath(file_path))\n with open(file_path, 'wb') as f:\n for chunk in r.iter_content(chunk_size=1024 * 8):\n if chunk:\n f.write(chunk)\n f.flush()\n os.fsync(f.fileno())\n else:\n print('Download failed: status code {}\\n{}'.format(r.status_code, r\n .text))\n\n\ndef parse_lat(lat: int):\n lat_str = 'N' if lat >= 0 else 'S'\n if 10 > lat > -10:\n lat_str += '0'\n lat_str += str(abs(lat))\n return lat_str\n\n\ndef parse_long(long: int):\n long_str = 'E' if long >= 0 else 'W'\n if 100 > long > -100:\n long_str += '0'\n if 10 > long > -10:\n long_str += '0'\n long_str += str(abs(long))\n return long_str\n\n\nif __name__ == '__main__':\n for lat in range(47, 21, -1):\n for long in range(-14, 43, 1):\n download(\n f'https://gdemdl.aster.jspacesystems.or.jp/download/Download_{parse_lat(lat)}{parse_long(long)}.zip'\n , dest_folder='/media/data-ext/aster-gdem')\n",
"step-5": "import os\nimport requests\n\ndef download(url: str, dest_folder: str):\n #https://stackoverflow.com/a/56951135/8761164\n if not os.path.exists(dest_folder):\n os.makedirs(dest_folder) # create folder if it does not exist\n\n filename = url.split('/')[-1].replace(\" \", \"_\") # be careful with file names\n file_path = os.path.join(dest_folder, filename)\n\n r = requests.get(url, stream=True)\n\n if r.ok:\n print(\"saving to\", os.path.abspath(file_path))\n with open(file_path, 'wb') as f:\n for chunk in r.iter_content(chunk_size=1024 * 8):\n if chunk:\n f.write(chunk)\n f.flush()\n os.fsync(f.fileno())\n else:\n print(\"Download failed: status code {}\\n{}\".format(r.status_code, r.text))\n\n\ndef parse_lat(lat: int):\n lat_str = 'N' if lat >= 0 else 'S'\n if 10 > lat > -10:\n lat_str += '0'\n lat_str += str(abs(lat))\n return lat_str\n\ndef parse_long(long: int):\n long_str = 'E' if long >= 0 else 'W'\n if 100 > long > -100:\n long_str += '0'\n if 10 > long > -10:\n long_str += '0'\n long_str += str(abs(long))\n return long_str\n\n\nif __name__=='__main__':\n\n for lat in range(47, 21, -1):\n for long in range(-14, 43, 1):\n #print(parse_lat(lat), parse_long(long))\n #print(f\"https://gdemdl.aster.jspacesystems.or.jp/download/Download_{parse_lat(lat)}{parse_long(long)}.zip\")\n download(f\"https://gdemdl.aster.jspacesystems.or.jp/download/Download_{parse_lat(lat)}{parse_long(long)}.zip\", dest_folder=\"/media/data-ext/aster-gdem\")",
"step-ids": [
1,
3,
4,
5,
6
]
}
|
[
1,
3,
4,
5,
6
] |
#Create Pandas dataframe from the DarkSage output G['']
import pandas as pd
import numpy as np
# This is a way to converte multi dimensional data into pd.Series and then load these into the pandas dataframe
Pos = []
for p in G['Pos']:
Pos.append(p)
Pos_df = pd.Series(Pos, dtype=np.dtype("object"))
Vel = []
for v in G['Vel']:
Vel.append(v)
Vel_df = pd.Series(Vel, dtype=np.dtype("object"))
Spin = []
for s in G['Spin']:
Spin.append(s)
Spin_df = pd.Series(Spin, dtype=np.dtype("object"))
Disc_r = []
for d in G['DiscRadii']:
Disc_r.append(d)
Disc_df = pd.Series(Disc_r, dtype=np.dtype("object"))
Disc_gas = []
for g in G['DiscGas']:
Disc_gas.append(g)
Disc_gas_df = pd.Series(Disc_gas, dtype=np.dtype("object"))
Disc_stars = []
for g in G['DiscStars']:
Disc_stars.append(g)
Disc_stars_df = pd.Series(Disc_stars, dtype=np.dtype("object"))
SpinStars = []
for g in G['SpinStars']:
SpinStars.append(g)
SpinStars_df = pd.Series(SpinStars, dtype=np.dtype("object"))
SpinGas = []
for g in G['SpinGas']:
SpinGas.append(g)
SpinGas_df = pd.Series(SpinGas , dtype=np.dtype("object"))
SpinClassicalBulge = []
for g in G['SpinClassicalBulge']:
SpinClassicalBulge.append(g)
SpinClassicalBulge_df = pd.Series(SpinClassicalBulge, dtype=np.dtype("object"))
DiscHI = []
for g in G['DiscHI']:
DiscHI.append(g)
DiscHI_df = pd.Series(DiscHI, dtype=np.dtype("object"))
DiscH2 = []
for g in G['DiscH2']:
DiscH2.append(g)
DiscH2_df = pd.Series(DiscH2, dtype=np.dtype("object"))
DiscSFR = []
for g in G['DiscSFR']:
DiscSFR.append(g)
DiscSFR_df = pd.Series(DiscSFR, dtype=np.dtype("object"))
DiscGasMetals = []
for g in G['DiscGasMetals']:
DiscGasMetals.append(g)
DiscGasMetals_df = pd.Series(DiscGasMetals, dtype=np.dtype("object"))
DiscStarsMetals = []
for g in G['DiscStarsMetals']:
DiscStarsMetals.append(g)
DiscStarsMetals_df = pd.Series(DiscStarsMetals, dtype=np.dtype("object"))
######################################
DS = pd.DataFrame({'Type' : G['Type' ],
'GalaxyIndex' : G['GalaxyIndex' ],
'HaloIndex' : G['HaloIndex' ],
'SimulationHaloIndex' : G['SimulationHaloIndex' ],
'TreeIndex' : G['TreeIndex' ],
'SnapNum' : G['SnapNum' ],
'CentralGalaxyIndex' : G['CentralGalaxyIndex' ],
'CentralMvir' : G['CentralMvir' ],
'mergeType' : G['mergeType' ],
'mergeIntoID' : G['mergeIntoID' ],
'mergeIntoSnapNum' : G['mergeIntoSnapNum' ],
'dT' : G['dT' ],
'Pos' : Pos_df,
'Vel' : Vel_df ,
'Spin' : Spin_df ,
'Len' : G['Len' ],
'LenMax' : G['LenMax' ],
'Mvir' : G['Mvir' ],
'Rvir' : G['Rvir' ],
'Vvir' : G['Vvir' ],
'Vmax' : G['Vmax' ],
'VelDisp' : G['VelDisp' ],
'DiscRadii' : Disc_df,
'ColdGas' : G['ColdGas' ],
'StellarMass' : G['StellarMass' ],
'MergerBulgeMass' : G['MergerBulgeMass' ],
'InstabilityBulgeMass' : G['InstabilityBulgeMass' ],
'HotGas' : G['HotGas' ],
'EjectedMass' : G['EjectedMass' ],
'BlackHoleMass' : G['BlackHoleMass' ],
'IntraClusterStars' : G['IntraClusterStars' ],
'DiscGas' : Disc_gas_df,
'DiscStars' : Disc_stars_df,
'SpinStars' : SpinStars_df,
'SpinGas' : SpinGas_df,
'SpinClassicalBulge' : SpinClassicalBulge_df,
'StarsInSitu' : G['StarsInSitu' ],
'StarsInstability' : G['StarsInstability' ],
'StarsMergeBurst' : G['StarsMergeBurst' ],
'DiscHI' : DiscHI_df,
'DiscH2' : DiscH2_df,
'DiscSFR' : DiscSFR_df,
'MetalsColdGas' : G['MetalsColdGas' ],
'MetalsStellarMass' : G['MetalsStellarMass' ],
'ClassicalMetalsBulgeMass' : G['ClassicalMetalsBulgeMass' ],
'SecularMetalsBulgeMass' : G['SecularMetalsBulgeMass' ],
'MetalsHotGas' : G['MetalsHotGas' ],
'MetalsEjectedMass' : G['MetalsEjectedMass' ],
'MetalsIntraClusterStars' : G['MetalsIntraClusterStars' ],
'DiscGasMetals' : DiscGasMetals_df,
'DiscStarsMetals' : DiscStarsMetals_df,
'SfrFromH2' : G['SfrFromH2' ],
'SfrInstab' : G['SfrInstab' ],
'SfrMergeBurst' : G['SfrMergeBurst' ],
'SfrDiskZ' : G['SfrDiskZ' ],
'SfrBulgeZ' : G['SfrBulgeZ' ],
'DiskScaleRadius' : G['DiskScaleRadius' ],
'CoolScaleRadius' : G['CoolScaleRadius' ],
'StellarDiscScaleRadius' : G['StellarDiscScaleRadius' ],
'Cooling' : G['Cooling' ],
'Heating' : G['Heating' ],
'LastMajorMerger' : G['LastMajorMerger' ],
'LastMinorMerger' : G['LastMinorMerger' ],
'OutflowRate' : G['OutflowRate' ],
'infallMvir' : G['infallMvir' ],
'infallVvir' : G['infallVvir' ],
'infallVmax' : G['infallVmax' ]})
|
normal
|
{
"blob_id": "0d565c9f92a60d25f28c903c0a27e7b93d547a4f",
"index": 2971,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor p in G['Pos']:\n Pos.append(p)\n<mask token>\nfor v in G['Vel']:\n Vel.append(v)\n<mask token>\nfor s in G['Spin']:\n Spin.append(s)\n<mask token>\nfor d in G['DiscRadii']:\n Disc_r.append(d)\n<mask token>\nfor g in G['DiscGas']:\n Disc_gas.append(g)\n<mask token>\nfor g in G['DiscStars']:\n Disc_stars.append(g)\n<mask token>\nfor g in G['SpinStars']:\n SpinStars.append(g)\n<mask token>\nfor g in G['SpinGas']:\n SpinGas.append(g)\n<mask token>\nfor g in G['SpinClassicalBulge']:\n SpinClassicalBulge.append(g)\n<mask token>\nfor g in G['DiscHI']:\n DiscHI.append(g)\n<mask token>\nfor g in G['DiscH2']:\n DiscH2.append(g)\n<mask token>\nfor g in G['DiscSFR']:\n DiscSFR.append(g)\n<mask token>\nfor g in G['DiscGasMetals']:\n DiscGasMetals.append(g)\n<mask token>\nfor g in G['DiscStarsMetals']:\n DiscStarsMetals.append(g)\n<mask token>\n",
"step-3": "<mask token>\nPos = []\nfor p in G['Pos']:\n Pos.append(p)\nPos_df = pd.Series(Pos, dtype=np.dtype('object'))\nVel = []\nfor v in G['Vel']:\n Vel.append(v)\nVel_df = pd.Series(Vel, dtype=np.dtype('object'))\nSpin = []\nfor s in G['Spin']:\n Spin.append(s)\nSpin_df = pd.Series(Spin, dtype=np.dtype('object'))\nDisc_r = []\nfor d in G['DiscRadii']:\n Disc_r.append(d)\nDisc_df = pd.Series(Disc_r, dtype=np.dtype('object'))\nDisc_gas = []\nfor g in G['DiscGas']:\n Disc_gas.append(g)\nDisc_gas_df = pd.Series(Disc_gas, dtype=np.dtype('object'))\nDisc_stars = []\nfor g in G['DiscStars']:\n Disc_stars.append(g)\nDisc_stars_df = pd.Series(Disc_stars, dtype=np.dtype('object'))\nSpinStars = []\nfor g in G['SpinStars']:\n SpinStars.append(g)\nSpinStars_df = pd.Series(SpinStars, dtype=np.dtype('object'))\nSpinGas = []\nfor g in G['SpinGas']:\n SpinGas.append(g)\nSpinGas_df = pd.Series(SpinGas, dtype=np.dtype('object'))\nSpinClassicalBulge = []\nfor g in G['SpinClassicalBulge']:\n SpinClassicalBulge.append(g)\nSpinClassicalBulge_df = pd.Series(SpinClassicalBulge, dtype=np.dtype('object'))\nDiscHI = []\nfor g in G['DiscHI']:\n DiscHI.append(g)\nDiscHI_df = pd.Series(DiscHI, dtype=np.dtype('object'))\nDiscH2 = []\nfor g in G['DiscH2']:\n DiscH2.append(g)\nDiscH2_df = pd.Series(DiscH2, dtype=np.dtype('object'))\nDiscSFR = []\nfor g in G['DiscSFR']:\n DiscSFR.append(g)\nDiscSFR_df = pd.Series(DiscSFR, dtype=np.dtype('object'))\nDiscGasMetals = []\nfor g in G['DiscGasMetals']:\n DiscGasMetals.append(g)\nDiscGasMetals_df = pd.Series(DiscGasMetals, dtype=np.dtype('object'))\nDiscStarsMetals = []\nfor g in G['DiscStarsMetals']:\n DiscStarsMetals.append(g)\nDiscStarsMetals_df = pd.Series(DiscStarsMetals, dtype=np.dtype('object'))\nDS = pd.DataFrame({'Type': G['Type'], 'GalaxyIndex': G['GalaxyIndex'],\n 'HaloIndex': G['HaloIndex'], 'SimulationHaloIndex': G[\n 'SimulationHaloIndex'], 'TreeIndex': G['TreeIndex'], 'SnapNum': G[\n 'SnapNum'], 'CentralGalaxyIndex': G['CentralGalaxyIndex'],\n 'CentralMvir': G['CentralMvir'], 'mergeType': G['mergeType'],\n 'mergeIntoID': G['mergeIntoID'], 'mergeIntoSnapNum': G[\n 'mergeIntoSnapNum'], 'dT': G['dT'], 'Pos': Pos_df, 'Vel': Vel_df,\n 'Spin': Spin_df, 'Len': G['Len'], 'LenMax': G['LenMax'], 'Mvir': G[\n 'Mvir'], 'Rvir': G['Rvir'], 'Vvir': G['Vvir'], 'Vmax': G['Vmax'],\n 'VelDisp': G['VelDisp'], 'DiscRadii': Disc_df, 'ColdGas': G['ColdGas'],\n 'StellarMass': G['StellarMass'], 'MergerBulgeMass': G['MergerBulgeMass'\n ], 'InstabilityBulgeMass': G['InstabilityBulgeMass'], 'HotGas': G[\n 'HotGas'], 'EjectedMass': G['EjectedMass'], 'BlackHoleMass': G[\n 'BlackHoleMass'], 'IntraClusterStars': G['IntraClusterStars'],\n 'DiscGas': Disc_gas_df, 'DiscStars': Disc_stars_df, 'SpinStars':\n SpinStars_df, 'SpinGas': SpinGas_df, 'SpinClassicalBulge':\n SpinClassicalBulge_df, 'StarsInSitu': G['StarsInSitu'],\n 'StarsInstability': G['StarsInstability'], 'StarsMergeBurst': G[\n 'StarsMergeBurst'], 'DiscHI': DiscHI_df, 'DiscH2': DiscH2_df, 'DiscSFR':\n DiscSFR_df, 'MetalsColdGas': G['MetalsColdGas'], 'MetalsStellarMass': G\n ['MetalsStellarMass'], 'ClassicalMetalsBulgeMass': G[\n 'ClassicalMetalsBulgeMass'], 'SecularMetalsBulgeMass': G[\n 'SecularMetalsBulgeMass'], 'MetalsHotGas': G['MetalsHotGas'],\n 'MetalsEjectedMass': G['MetalsEjectedMass'], 'MetalsIntraClusterStars':\n G['MetalsIntraClusterStars'], 'DiscGasMetals': DiscGasMetals_df,\n 'DiscStarsMetals': DiscStarsMetals_df, 'SfrFromH2': G['SfrFromH2'],\n 'SfrInstab': G['SfrInstab'], 'SfrMergeBurst': G['SfrMergeBurst'],\n 'SfrDiskZ': G['SfrDiskZ'], 'SfrBulgeZ': G['SfrBulgeZ'],\n 'DiskScaleRadius': G['DiskScaleRadius'], 'CoolScaleRadius': G[\n 'CoolScaleRadius'], 'StellarDiscScaleRadius': G[\n 'StellarDiscScaleRadius'], 'Cooling': G['Cooling'], 'Heating': G[\n 'Heating'], 'LastMajorMerger': G['LastMajorMerger'], 'LastMinorMerger':\n G['LastMinorMerger'], 'OutflowRate': G['OutflowRate'], 'infallMvir': G[\n 'infallMvir'], 'infallVvir': G['infallVvir'], 'infallVmax': G[\n 'infallVmax']})\n",
"step-4": "import pandas as pd\nimport numpy as np\nPos = []\nfor p in G['Pos']:\n Pos.append(p)\nPos_df = pd.Series(Pos, dtype=np.dtype('object'))\nVel = []\nfor v in G['Vel']:\n Vel.append(v)\nVel_df = pd.Series(Vel, dtype=np.dtype('object'))\nSpin = []\nfor s in G['Spin']:\n Spin.append(s)\nSpin_df = pd.Series(Spin, dtype=np.dtype('object'))\nDisc_r = []\nfor d in G['DiscRadii']:\n Disc_r.append(d)\nDisc_df = pd.Series(Disc_r, dtype=np.dtype('object'))\nDisc_gas = []\nfor g in G['DiscGas']:\n Disc_gas.append(g)\nDisc_gas_df = pd.Series(Disc_gas, dtype=np.dtype('object'))\nDisc_stars = []\nfor g in G['DiscStars']:\n Disc_stars.append(g)\nDisc_stars_df = pd.Series(Disc_stars, dtype=np.dtype('object'))\nSpinStars = []\nfor g in G['SpinStars']:\n SpinStars.append(g)\nSpinStars_df = pd.Series(SpinStars, dtype=np.dtype('object'))\nSpinGas = []\nfor g in G['SpinGas']:\n SpinGas.append(g)\nSpinGas_df = pd.Series(SpinGas, dtype=np.dtype('object'))\nSpinClassicalBulge = []\nfor g in G['SpinClassicalBulge']:\n SpinClassicalBulge.append(g)\nSpinClassicalBulge_df = pd.Series(SpinClassicalBulge, dtype=np.dtype('object'))\nDiscHI = []\nfor g in G['DiscHI']:\n DiscHI.append(g)\nDiscHI_df = pd.Series(DiscHI, dtype=np.dtype('object'))\nDiscH2 = []\nfor g in G['DiscH2']:\n DiscH2.append(g)\nDiscH2_df = pd.Series(DiscH2, dtype=np.dtype('object'))\nDiscSFR = []\nfor g in G['DiscSFR']:\n DiscSFR.append(g)\nDiscSFR_df = pd.Series(DiscSFR, dtype=np.dtype('object'))\nDiscGasMetals = []\nfor g in G['DiscGasMetals']:\n DiscGasMetals.append(g)\nDiscGasMetals_df = pd.Series(DiscGasMetals, dtype=np.dtype('object'))\nDiscStarsMetals = []\nfor g in G['DiscStarsMetals']:\n DiscStarsMetals.append(g)\nDiscStarsMetals_df = pd.Series(DiscStarsMetals, dtype=np.dtype('object'))\nDS = pd.DataFrame({'Type': G['Type'], 'GalaxyIndex': G['GalaxyIndex'],\n 'HaloIndex': G['HaloIndex'], 'SimulationHaloIndex': G[\n 'SimulationHaloIndex'], 'TreeIndex': G['TreeIndex'], 'SnapNum': G[\n 'SnapNum'], 'CentralGalaxyIndex': G['CentralGalaxyIndex'],\n 'CentralMvir': G['CentralMvir'], 'mergeType': G['mergeType'],\n 'mergeIntoID': G['mergeIntoID'], 'mergeIntoSnapNum': G[\n 'mergeIntoSnapNum'], 'dT': G['dT'], 'Pos': Pos_df, 'Vel': Vel_df,\n 'Spin': Spin_df, 'Len': G['Len'], 'LenMax': G['LenMax'], 'Mvir': G[\n 'Mvir'], 'Rvir': G['Rvir'], 'Vvir': G['Vvir'], 'Vmax': G['Vmax'],\n 'VelDisp': G['VelDisp'], 'DiscRadii': Disc_df, 'ColdGas': G['ColdGas'],\n 'StellarMass': G['StellarMass'], 'MergerBulgeMass': G['MergerBulgeMass'\n ], 'InstabilityBulgeMass': G['InstabilityBulgeMass'], 'HotGas': G[\n 'HotGas'], 'EjectedMass': G['EjectedMass'], 'BlackHoleMass': G[\n 'BlackHoleMass'], 'IntraClusterStars': G['IntraClusterStars'],\n 'DiscGas': Disc_gas_df, 'DiscStars': Disc_stars_df, 'SpinStars':\n SpinStars_df, 'SpinGas': SpinGas_df, 'SpinClassicalBulge':\n SpinClassicalBulge_df, 'StarsInSitu': G['StarsInSitu'],\n 'StarsInstability': G['StarsInstability'], 'StarsMergeBurst': G[\n 'StarsMergeBurst'], 'DiscHI': DiscHI_df, 'DiscH2': DiscH2_df, 'DiscSFR':\n DiscSFR_df, 'MetalsColdGas': G['MetalsColdGas'], 'MetalsStellarMass': G\n ['MetalsStellarMass'], 'ClassicalMetalsBulgeMass': G[\n 'ClassicalMetalsBulgeMass'], 'SecularMetalsBulgeMass': G[\n 'SecularMetalsBulgeMass'], 'MetalsHotGas': G['MetalsHotGas'],\n 'MetalsEjectedMass': G['MetalsEjectedMass'], 'MetalsIntraClusterStars':\n G['MetalsIntraClusterStars'], 'DiscGasMetals': DiscGasMetals_df,\n 'DiscStarsMetals': DiscStarsMetals_df, 'SfrFromH2': G['SfrFromH2'],\n 'SfrInstab': G['SfrInstab'], 'SfrMergeBurst': G['SfrMergeBurst'],\n 'SfrDiskZ': G['SfrDiskZ'], 'SfrBulgeZ': G['SfrBulgeZ'],\n 'DiskScaleRadius': G['DiskScaleRadius'], 'CoolScaleRadius': G[\n 'CoolScaleRadius'], 'StellarDiscScaleRadius': G[\n 'StellarDiscScaleRadius'], 'Cooling': G['Cooling'], 'Heating': G[\n 'Heating'], 'LastMajorMerger': G['LastMajorMerger'], 'LastMinorMerger':\n G['LastMinorMerger'], 'OutflowRate': G['OutflowRate'], 'infallMvir': G[\n 'infallMvir'], 'infallVvir': G['infallVvir'], 'infallVmax': G[\n 'infallVmax']})\n",
"step-5": "#Create Pandas dataframe from the DarkSage output G['']\n\nimport pandas as pd\nimport numpy as np\n\n\n# This is a way to converte multi dimensional data into pd.Series and then load these into the pandas dataframe\nPos = []\nfor p in G['Pos']:\n Pos.append(p)\nPos_df = pd.Series(Pos, dtype=np.dtype(\"object\"))\n\nVel = []\nfor v in G['Vel']:\n Vel.append(v)\nVel_df = pd.Series(Vel, dtype=np.dtype(\"object\"))\n\nSpin = []\nfor s in G['Spin']:\n Spin.append(s)\nSpin_df = pd.Series(Spin, dtype=np.dtype(\"object\"))\n\nDisc_r = []\nfor d in G['DiscRadii']:\n Disc_r.append(d)\nDisc_df = pd.Series(Disc_r, dtype=np.dtype(\"object\"))\n\nDisc_gas = []\nfor g in G['DiscGas']:\n Disc_gas.append(g)\nDisc_gas_df = pd.Series(Disc_gas, dtype=np.dtype(\"object\"))\n\nDisc_stars = []\nfor g in G['DiscStars']:\n Disc_stars.append(g)\nDisc_stars_df = pd.Series(Disc_stars, dtype=np.dtype(\"object\"))\n\nSpinStars = []\nfor g in G['SpinStars']:\n SpinStars.append(g)\nSpinStars_df = pd.Series(SpinStars, dtype=np.dtype(\"object\"))\n\nSpinGas = []\nfor g in G['SpinGas']:\n SpinGas.append(g)\nSpinGas_df = pd.Series(SpinGas , dtype=np.dtype(\"object\"))\n\nSpinClassicalBulge = []\nfor g in G['SpinClassicalBulge']:\n SpinClassicalBulge.append(g)\nSpinClassicalBulge_df = pd.Series(SpinClassicalBulge, dtype=np.dtype(\"object\"))\n\nDiscHI = []\nfor g in G['DiscHI']:\n DiscHI.append(g)\nDiscHI_df = pd.Series(DiscHI, dtype=np.dtype(\"object\"))\n\nDiscH2 = []\nfor g in G['DiscH2']:\n DiscH2.append(g)\nDiscH2_df = pd.Series(DiscH2, dtype=np.dtype(\"object\"))\n\nDiscSFR = []\nfor g in G['DiscSFR']:\n DiscSFR.append(g)\nDiscSFR_df = pd.Series(DiscSFR, dtype=np.dtype(\"object\"))\n\nDiscGasMetals = []\nfor g in G['DiscGasMetals']:\n DiscGasMetals.append(g)\nDiscGasMetals_df = pd.Series(DiscGasMetals, dtype=np.dtype(\"object\"))\n\nDiscStarsMetals = []\nfor g in G['DiscStarsMetals']:\n DiscStarsMetals.append(g)\nDiscStarsMetals_df = pd.Series(DiscStarsMetals, dtype=np.dtype(\"object\"))\n\n\n\n\n######################################\n\n\nDS = pd.DataFrame({'Type' : G['Type' ],\n'GalaxyIndex' : G['GalaxyIndex' ],\n'HaloIndex' : G['HaloIndex' ],\n'SimulationHaloIndex' : G['SimulationHaloIndex' ],\n'TreeIndex' : G['TreeIndex' ],\n'SnapNum' : G['SnapNum' ],\n'CentralGalaxyIndex' : G['CentralGalaxyIndex' ],\n'CentralMvir' : G['CentralMvir' ],\n'mergeType' : G['mergeType' ],\n'mergeIntoID' : G['mergeIntoID' ],\n'mergeIntoSnapNum' : G['mergeIntoSnapNum' ],\n'dT' : G['dT' ],\n'Pos' : Pos_df,\n'Vel' : Vel_df ,\n'Spin' : Spin_df ,\n'Len' : G['Len' ],\n'LenMax' : G['LenMax' ],\n'Mvir' : G['Mvir' ],\n'Rvir' : G['Rvir' ],\n'Vvir' : G['Vvir' ],\n'Vmax' : G['Vmax' ],\n'VelDisp' : G['VelDisp' ],\n'DiscRadii' : Disc_df,\n'ColdGas' : G['ColdGas' ],\n'StellarMass' : G['StellarMass' ],\n'MergerBulgeMass' : G['MergerBulgeMass' ],\n'InstabilityBulgeMass' : G['InstabilityBulgeMass' ],\n'HotGas' : G['HotGas' ],\n'EjectedMass' : G['EjectedMass' ],\n'BlackHoleMass' : G['BlackHoleMass' ],\n'IntraClusterStars' : G['IntraClusterStars' ],\n'DiscGas' : Disc_gas_df,\n'DiscStars' : Disc_stars_df,\n'SpinStars' : SpinStars_df,\n'SpinGas' : SpinGas_df,\n'SpinClassicalBulge' : SpinClassicalBulge_df,\n'StarsInSitu' : G['StarsInSitu' ],\n'StarsInstability' : G['StarsInstability' ],\n'StarsMergeBurst' : G['StarsMergeBurst' ],\n'DiscHI' : DiscHI_df,\n'DiscH2' : DiscH2_df,\n'DiscSFR' : DiscSFR_df,\n'MetalsColdGas' : G['MetalsColdGas' ],\n'MetalsStellarMass' : G['MetalsStellarMass' ],\n'ClassicalMetalsBulgeMass' : G['ClassicalMetalsBulgeMass' ],\n'SecularMetalsBulgeMass' : G['SecularMetalsBulgeMass' ],\n'MetalsHotGas' : G['MetalsHotGas' ],\n'MetalsEjectedMass' : G['MetalsEjectedMass' ],\n'MetalsIntraClusterStars' : G['MetalsIntraClusterStars' ],\n'DiscGasMetals' : DiscGasMetals_df,\n'DiscStarsMetals' : DiscStarsMetals_df,\n'SfrFromH2' : G['SfrFromH2' ],\n'SfrInstab' : G['SfrInstab' ],\n'SfrMergeBurst' : G['SfrMergeBurst' ],\n'SfrDiskZ' : G['SfrDiskZ' ],\n'SfrBulgeZ' : G['SfrBulgeZ' ],\n'DiskScaleRadius' : G['DiskScaleRadius' ],\n'CoolScaleRadius' : G['CoolScaleRadius' ],\n'StellarDiscScaleRadius' : G['StellarDiscScaleRadius' ],\n'Cooling' : G['Cooling' ],\n'Heating' : G['Heating' ],\n'LastMajorMerger' : G['LastMajorMerger' ],\n'LastMinorMerger' : G['LastMinorMerger' ],\n'OutflowRate' : G['OutflowRate' ],\n'infallMvir' : G['infallMvir' ],\n'infallVvir' : G['infallVvir' ],\n'infallVmax' : G['infallVmax' ]})\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
def print_duplicates(arr):
uniques = set()
for elem in arr:
if elem in uniques:
print(elem, end=' ')
else:
uniques.add(elem)
|
flexible
|
{
"blob_id": "420c3944de0a5436a9824604fd6caf27706eb99c",
"index": 4102,
"step-1": "<mask token>\n",
"step-2": "def print_duplicates(arr):\n uniques = set()\n for elem in arr:\n if elem in uniques:\n print(elem, end=' ')\n else:\n uniques.add(elem)\n",
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0,
1
]
}
|
[
0,
1
] |
# importing libraries
import cv2
import numpy as np
import argparse
aq = argparse.ArgumentParser()
aq.add_argument('-i', '--input', required=True, help="input image path")
aq.add_argument('-o', '--output', help="path where you want to download the image")
args = vars(aq.parse_args())
# reading image
img = cv2.imread(args['input'])
# Edges
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
gray = cv2.medianBlur(gray, 5)
edges = cv2.adaptiveThreshold(gray, 255, cv2.ADAPTIVE_THRESH_MEAN_C,
cv2.THRESH_BINARY, 9, 9)
# Cartoonization
color = cv2.bilateralFilter(img, 2, 250, 250)
cartoon = cv2.bitwise_or(color, color, mask=edges)
if(args['output']):
cv2.imwrite(args['output'], cartoon)
cv2.imshow("Cartoon", cartoon)
cv2.waitKey(0)
cv2.destroyAllWindows()
|
normal
|
{
"blob_id": "10cefb1cf2392fdcd368f11d0d69774a9ffa73ec",
"index": 2816,
"step-1": "<mask token>\n",
"step-2": "<mask token>\naq.add_argument('-i', '--input', required=True, help='input image path')\naq.add_argument('-o', '--output', help=\n 'path where you want to download the image')\n<mask token>\nif args['output']:\n cv2.imwrite(args['output'], cartoon)\ncv2.imshow('Cartoon', cartoon)\ncv2.waitKey(0)\ncv2.destroyAllWindows()\n",
"step-3": "<mask token>\naq = argparse.ArgumentParser()\naq.add_argument('-i', '--input', required=True, help='input image path')\naq.add_argument('-o', '--output', help=\n 'path where you want to download the image')\nargs = vars(aq.parse_args())\nimg = cv2.imread(args['input'])\ngray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\ngray = cv2.medianBlur(gray, 5)\nedges = cv2.adaptiveThreshold(gray, 255, cv2.ADAPTIVE_THRESH_MEAN_C, cv2.\n THRESH_BINARY, 9, 9)\ncolor = cv2.bilateralFilter(img, 2, 250, 250)\ncartoon = cv2.bitwise_or(color, color, mask=edges)\nif args['output']:\n cv2.imwrite(args['output'], cartoon)\ncv2.imshow('Cartoon', cartoon)\ncv2.waitKey(0)\ncv2.destroyAllWindows()\n",
"step-4": "import cv2\nimport numpy as np\nimport argparse\naq = argparse.ArgumentParser()\naq.add_argument('-i', '--input', required=True, help='input image path')\naq.add_argument('-o', '--output', help=\n 'path where you want to download the image')\nargs = vars(aq.parse_args())\nimg = cv2.imread(args['input'])\ngray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\ngray = cv2.medianBlur(gray, 5)\nedges = cv2.adaptiveThreshold(gray, 255, cv2.ADAPTIVE_THRESH_MEAN_C, cv2.\n THRESH_BINARY, 9, 9)\ncolor = cv2.bilateralFilter(img, 2, 250, 250)\ncartoon = cv2.bitwise_or(color, color, mask=edges)\nif args['output']:\n cv2.imwrite(args['output'], cartoon)\ncv2.imshow('Cartoon', cartoon)\ncv2.waitKey(0)\ncv2.destroyAllWindows()\n",
"step-5": "# importing libraries \nimport cv2 \nimport numpy as np \nimport argparse\n\naq = argparse.ArgumentParser()\n\naq.add_argument('-i', '--input', required=True, help=\"input image path\")\n\naq.add_argument('-o', '--output', help=\"path where you want to download the image\")\n\nargs = vars(aq.parse_args())\n# reading image \nimg = cv2.imread(args['input']) \n \n# Edges \ngray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) \ngray = cv2.medianBlur(gray, 5) \nedges = cv2.adaptiveThreshold(gray, 255, cv2.ADAPTIVE_THRESH_MEAN_C, \n cv2.THRESH_BINARY, 9, 9) \n \n# Cartoonization \ncolor = cv2.bilateralFilter(img, 2, 250, 250) \ncartoon = cv2.bitwise_or(color, color, mask=edges) \n \nif(args['output']):\n\tcv2.imwrite(args['output'], cartoon)\n\n\ncv2.imshow(\"Cartoon\", cartoon) \ncv2.waitKey(0) \ncv2.destroyAllWindows() ",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
def mkdir_p(mypath):
"""Creates a directory. equivalent to using mkdir -p on the command line"""
from errno import EEXIST
from os import makedirs, path
try:
makedirs(mypath)
except OSError as exc:
if exc.errno == EEXIST and path.isdir(mypath):
pass
else:
raise
<|reserved_special_token_0|>
def get_init_hr(hour):
if int(hour) < 6:
init_hour = '00'
elif int(hour) < 11:
init_hour = '06'
elif int(hour) < 17:
init_hour = '12'
elif int(hour) < 22:
init_hour = '18'
else:
init_hour = '00'
return init_hour
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def mkdir_p(mypath):
"""Creates a directory. equivalent to using mkdir -p on the command line"""
from errno import EEXIST
from os import makedirs, path
try:
makedirs(mypath)
except OSError as exc:
if exc.errno == EEXIST and path.isdir(mypath):
pass
else:
raise
<|reserved_special_token_0|>
if startTime.month < 10:
month = '0' + str(startTime.month)
else:
month = str(startTime.month)
if startTime.day < 10:
day = '0' + str(startTime.day)
else:
day = str(startTime.day)
if startTime.hour < 10:
hour = '0' + str(startTime.hour)
else:
hour = str(startTime.hour)
<|reserved_special_token_0|>
def get_init_hr(hour):
if int(hour) < 6:
init_hour = '00'
elif int(hour) < 11:
init_hour = '06'
elif int(hour) < 17:
init_hour = '12'
elif int(hour) < 22:
init_hour = '18'
else:
init_hour = '00'
return init_hour
<|reserved_special_token_0|>
mkdir_p(output_dir)
mkdir_p(output_dir + '/GFS')
<|reserved_special_token_0|>
for i in range(1, 120):
fc_hr = init_hr + dt.timedelta(hours=1 * i)
forecast_hour = times[0].values
data = ds.metpy.parse_cf()
data = data.isel(time=i)
data = data.rename({'absvprs': 'avort', 'hgtprs': 'gph', 'rhprs': 'rh',
'tmpprs': 'temp', 'ugrdprs': 'u', 'vgrdprs': 'v'})
vertical, = data['temp'].metpy.coordinates('vertical')
time = data['temp'].metpy.time
zH5_crs = data['temp'].metpy.cartopy_crs
t5 = data['temp'].sel(lev=500.0, lat=lats, lon=lons)
u5 = data['u'].sel(lev=500.0, lat=lats, lon=lons).squeeze() * 1.94384449
v5 = data['v'].sel(lev=500.0, lat=lats, lon=lons).squeeze() * 1.94384449
av5 = data['avort'].sel(lev=500.0, lat=lats, lon=lons).squeeze() * 100000.0
rh5 = data['rh'].sel(lev=500.0, lat=lats, lon=lons).squeeze()
h5 = data['gph'].sel(lev=500.0, lat=lats, lon=lons).squeeze()
x, y = t5.metpy.coordinates('x', 'y')
lat, lon = xr.broadcast(y, x)
wind_slice = slice(5, -5, 5)
fig = plt.figure(figsize=(15, 15))
ax1 = fig.add_subplot(111, projection=zH5_crs)
ax1.coastlines(resolution='10m')
ax1.add_feature(cfeature.BORDERS.with_scale('10m'))
ax1.add_feature(cfeature.STATES.with_scale('10m'))
h5c = ax1.contour(x, y, h5, colors='dimgray', levels=range(4800, 6200,
60), linewidths=1.5)
t5c = ax1.contour(x, y, t5, colors='r', levels=range(-60, 0, 5),
linestyles='dashed', linewidths=1)
a5c = ax1.contourf(x, y, av5, cmap='autumn_r', levels=range(10, 60, 2),
alpha=0.8, extend='max')
a5cb = fig.colorbar(a5c, orientation='horizontal', aspect=80, ax=ax1,
pad=0.01, extendrect=False, ticks=range(10, 61, 5))
a5cb.set_label('500mb Absolute Vorticity ($s^{-1}$)', fontsize=12)
ax1.barbs(x[wind_slice], y[wind_slice], u5[wind_slice, wind_slice], v5[
wind_slice, wind_slice], length=7)
ax1.set_title('500mb Heights (m) and Absolute Vorticity ($s^{-1}$)',
fontsize=16)
ax1.set_title('\n Valid: ' + time.dt.strftime('%Y-%m-%d %H:%MZ').item(),
fontsize=11, loc='right')
ax1.set_title('\n GFS Init: ' + init_time.dt.strftime('%Y-%m-%d %H:%MZ'
).item(), fontsize=11, loc='left')
ax1.set_extent((265, 300, 25, 50))
plt.savefig(output_dir + '/GFS/gfs_hrly_h5vort_' + str(i) + '.png')
plt.clf()
plt.close()
wind_slice_s = slice(10, -10, 10)
fig2 = plt.figure(figsize=(15, 15))
ax2 = fig2.add_subplot(111, projection=zH5_crs)
ax2.coastlines(resolution='50m')
ax2.add_feature(cfeature.BORDERS.with_scale('50m'))
ax2.add_feature(cfeature.STATES.with_scale('50m'))
h5c2 = ax2.contour(x, y, h5, colors='dimgray', levels=range(4800, 6200,
60), linewidths=1.5)
t5c2 = ax2.contour(x, y, t5, colors='r', levels=range(-60, 0, 5),
linestyles='dashed', linewidths=1)
a5c2 = ax2.contourf(x, y, av5, cmap='autumn_r', levels=range(10, 65, 2),
alpha=0.8)
a5cb2 = fig2.colorbar(a5c2, orientation='horizontal', aspect=80, ax=ax2,
pad=0.01, extendrect=False, ticks=range(10, 60, 5))
a5cb2.set_label('500mb Absolute Vorticity ($s^{-1}$)', fontsize=12)
ax2.barbs(x[wind_slice_s], y[wind_slice_s], u5[wind_slice_s,
wind_slice_s], v5[wind_slice_s, wind_slice_s], length=7)
ax2.set_title('500mb Heights (m) and Absolute Vorticity ($s^{-1}$)',
fontsize=16)
ax2.set_title('\n Valid: ' + time.dt.strftime('%Y-%m-%d %H:%MZ').item(),
fontsize=11, loc='right')
ax2.set_title('\n GFS Init: ' + init_time.dt.strftime('%Y-%m-%d %H:%MZ'
).item(), fontsize=11, loc='left')
ax2.set_extent((225, 300, 20, 65))
plt.savefig(output_dir + '/GFS/gfs_hrly_h5vortCONUS_v2_' + str(i) + '.png')
wind_slice_s = slice(10, -10, 10)
fig3 = plt.figure(figsize=(15, 15))
ax3 = fig3.add_subplot(111, projection=zH5_crs)
ax3.coastlines(resolution='50m')
ax3.add_feature(cfeature.BORDERS.with_scale('50m'))
ax3.add_feature(cfeature.STATES.with_scale('50m'))
h5c2 = ax3.contour(x, y, h5, colors='dimgray', levels=range(4800, 6200,
60), linewidths=1.5)
t5c2 = ax3.contour(x, y, t5, colors='r', levels=range(-60, 0, 5),
linestyles='dashed', linewidths=1)
a5c2 = ax3.contourf(x, y, av5, cmap='autumn_r', levels=range(10, 65, 2),
alpha=0.8)
a5cb2 = fig3.colorbar(a5c2, orientation='horizontal', aspect=80, ax=ax3,
pad=0.01, extendrect=False, ticks=range(10, 60, 5))
a5cb2.set_label('500mb Absolute Vorticity ($s^{-1}$)', fontsize=12)
ax3.barbs(x[wind_slice_s], y[wind_slice_s], u5[wind_slice_s,
wind_slice_s], v5[wind_slice_s, wind_slice_s], length=7)
ax3.set_title('500mb Heights (m) and Absolute Vorticity ($s^{-1}$)',
fontsize=16)
ax3.set_title('\n Valid: ' + time.dt.strftime('%Y-%m-%d %H:%MZ').item(),
fontsize=11, loc='right')
ax3.set_title('\n GFS Init: ' + init_time.dt.strftime('%Y-%m-%d %H:%MZ'
).item(), fontsize=11, loc='left')
ax3.set_extent((260, 320, 20, 65))
plt.savefig(output_dir + '/GFS/gfs_hrly_h5vortC_ec_v1_' + str(i) + '.png')
fcst_hr = str(0)
print('Hour ' + str(i) + ' completed!')
plt.close()
timeelapsed = datetime.now() - startTime
print(timeelapsed)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def mkdir_p(mypath):
"""Creates a directory. equivalent to using mkdir -p on the command line"""
from errno import EEXIST
from os import makedirs, path
try:
makedirs(mypath)
except OSError as exc:
if exc.errno == EEXIST and path.isdir(mypath):
pass
else:
raise
startTime = datetime.now()
m_date = '20200903'
m_hour = '12'
year = startTime.year
if startTime.month < 10:
month = '0' + str(startTime.month)
else:
month = str(startTime.month)
if startTime.day < 10:
day = '0' + str(startTime.day)
else:
day = str(startTime.day)
if startTime.hour < 10:
hour = '0' + str(startTime.hour)
else:
hour = str(startTime.hour)
mdate = str(year) + str(month) + str(day)
def get_init_hr(hour):
if int(hour) < 6:
init_hour = '00'
elif int(hour) < 11:
init_hour = '06'
elif int(hour) < 17:
init_hour = '12'
elif int(hour) < 22:
init_hour = '18'
else:
init_hour = '00'
return init_hour
url = ('http://nomads.ncep.noaa.gov:80/dods/gfs_0p25_1hr/gfs' + mdate +
'/gfs_0p25_1hr_' + get_init_hr(hour) + 'z')
init_hour = get_init_hr(hour)
<|reserved_special_token_0|>
output_dir = str(year) + str(month) + str(day) + '_' + str(init_hour) + '00'
mkdir_p(output_dir)
mkdir_p(output_dir + '/GFS')
ds = xr.open_dataset(url)
init_hr = dt.datetime(int(year), int(month), int(day), int(init_hour))
times = ds['tmp2m'].metpy.time
init_time = ds['time'][0]
lats = np.arange(15, 70, 0.25)
lons = np.arange(220, 330, 0.25)
for i in range(1, 120):
fc_hr = init_hr + dt.timedelta(hours=1 * i)
forecast_hour = times[0].values
data = ds.metpy.parse_cf()
data = data.isel(time=i)
data = data.rename({'absvprs': 'avort', 'hgtprs': 'gph', 'rhprs': 'rh',
'tmpprs': 'temp', 'ugrdprs': 'u', 'vgrdprs': 'v'})
vertical, = data['temp'].metpy.coordinates('vertical')
time = data['temp'].metpy.time
zH5_crs = data['temp'].metpy.cartopy_crs
t5 = data['temp'].sel(lev=500.0, lat=lats, lon=lons)
u5 = data['u'].sel(lev=500.0, lat=lats, lon=lons).squeeze() * 1.94384449
v5 = data['v'].sel(lev=500.0, lat=lats, lon=lons).squeeze() * 1.94384449
av5 = data['avort'].sel(lev=500.0, lat=lats, lon=lons).squeeze() * 100000.0
rh5 = data['rh'].sel(lev=500.0, lat=lats, lon=lons).squeeze()
h5 = data['gph'].sel(lev=500.0, lat=lats, lon=lons).squeeze()
x, y = t5.metpy.coordinates('x', 'y')
lat, lon = xr.broadcast(y, x)
wind_slice = slice(5, -5, 5)
fig = plt.figure(figsize=(15, 15))
ax1 = fig.add_subplot(111, projection=zH5_crs)
ax1.coastlines(resolution='10m')
ax1.add_feature(cfeature.BORDERS.with_scale('10m'))
ax1.add_feature(cfeature.STATES.with_scale('10m'))
h5c = ax1.contour(x, y, h5, colors='dimgray', levels=range(4800, 6200,
60), linewidths=1.5)
t5c = ax1.contour(x, y, t5, colors='r', levels=range(-60, 0, 5),
linestyles='dashed', linewidths=1)
a5c = ax1.contourf(x, y, av5, cmap='autumn_r', levels=range(10, 60, 2),
alpha=0.8, extend='max')
a5cb = fig.colorbar(a5c, orientation='horizontal', aspect=80, ax=ax1,
pad=0.01, extendrect=False, ticks=range(10, 61, 5))
a5cb.set_label('500mb Absolute Vorticity ($s^{-1}$)', fontsize=12)
ax1.barbs(x[wind_slice], y[wind_slice], u5[wind_slice, wind_slice], v5[
wind_slice, wind_slice], length=7)
ax1.set_title('500mb Heights (m) and Absolute Vorticity ($s^{-1}$)',
fontsize=16)
ax1.set_title('\n Valid: ' + time.dt.strftime('%Y-%m-%d %H:%MZ').item(),
fontsize=11, loc='right')
ax1.set_title('\n GFS Init: ' + init_time.dt.strftime('%Y-%m-%d %H:%MZ'
).item(), fontsize=11, loc='left')
ax1.set_extent((265, 300, 25, 50))
plt.savefig(output_dir + '/GFS/gfs_hrly_h5vort_' + str(i) + '.png')
plt.clf()
plt.close()
wind_slice_s = slice(10, -10, 10)
fig2 = plt.figure(figsize=(15, 15))
ax2 = fig2.add_subplot(111, projection=zH5_crs)
ax2.coastlines(resolution='50m')
ax2.add_feature(cfeature.BORDERS.with_scale('50m'))
ax2.add_feature(cfeature.STATES.with_scale('50m'))
h5c2 = ax2.contour(x, y, h5, colors='dimgray', levels=range(4800, 6200,
60), linewidths=1.5)
t5c2 = ax2.contour(x, y, t5, colors='r', levels=range(-60, 0, 5),
linestyles='dashed', linewidths=1)
a5c2 = ax2.contourf(x, y, av5, cmap='autumn_r', levels=range(10, 65, 2),
alpha=0.8)
a5cb2 = fig2.colorbar(a5c2, orientation='horizontal', aspect=80, ax=ax2,
pad=0.01, extendrect=False, ticks=range(10, 60, 5))
a5cb2.set_label('500mb Absolute Vorticity ($s^{-1}$)', fontsize=12)
ax2.barbs(x[wind_slice_s], y[wind_slice_s], u5[wind_slice_s,
wind_slice_s], v5[wind_slice_s, wind_slice_s], length=7)
ax2.set_title('500mb Heights (m) and Absolute Vorticity ($s^{-1}$)',
fontsize=16)
ax2.set_title('\n Valid: ' + time.dt.strftime('%Y-%m-%d %H:%MZ').item(),
fontsize=11, loc='right')
ax2.set_title('\n GFS Init: ' + init_time.dt.strftime('%Y-%m-%d %H:%MZ'
).item(), fontsize=11, loc='left')
ax2.set_extent((225, 300, 20, 65))
plt.savefig(output_dir + '/GFS/gfs_hrly_h5vortCONUS_v2_' + str(i) + '.png')
wind_slice_s = slice(10, -10, 10)
fig3 = plt.figure(figsize=(15, 15))
ax3 = fig3.add_subplot(111, projection=zH5_crs)
ax3.coastlines(resolution='50m')
ax3.add_feature(cfeature.BORDERS.with_scale('50m'))
ax3.add_feature(cfeature.STATES.with_scale('50m'))
h5c2 = ax3.contour(x, y, h5, colors='dimgray', levels=range(4800, 6200,
60), linewidths=1.5)
t5c2 = ax3.contour(x, y, t5, colors='r', levels=range(-60, 0, 5),
linestyles='dashed', linewidths=1)
a5c2 = ax3.contourf(x, y, av5, cmap='autumn_r', levels=range(10, 65, 2),
alpha=0.8)
a5cb2 = fig3.colorbar(a5c2, orientation='horizontal', aspect=80, ax=ax3,
pad=0.01, extendrect=False, ticks=range(10, 60, 5))
a5cb2.set_label('500mb Absolute Vorticity ($s^{-1}$)', fontsize=12)
ax3.barbs(x[wind_slice_s], y[wind_slice_s], u5[wind_slice_s,
wind_slice_s], v5[wind_slice_s, wind_slice_s], length=7)
ax3.set_title('500mb Heights (m) and Absolute Vorticity ($s^{-1}$)',
fontsize=16)
ax3.set_title('\n Valid: ' + time.dt.strftime('%Y-%m-%d %H:%MZ').item(),
fontsize=11, loc='right')
ax3.set_title('\n GFS Init: ' + init_time.dt.strftime('%Y-%m-%d %H:%MZ'
).item(), fontsize=11, loc='left')
ax3.set_extent((260, 320, 20, 65))
plt.savefig(output_dir + '/GFS/gfs_hrly_h5vortC_ec_v1_' + str(i) + '.png')
fcst_hr = str(0)
print('Hour ' + str(i) + ' completed!')
plt.close()
timeelapsed = datetime.now() - startTime
print(timeelapsed)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
import cartopy.crs as ccrs
import cartopy.feature as cfeature
import numpy as np
import matplotlib.pyplot as plt
import netCDF4
import xarray as xr
import metpy
from datetime import datetime
import datetime as dt
from metpy.units import units
import scipy.ndimage as ndimage
from metpy.plots import USCOUNTIES
import cartopy
from scipy.ndimage.filters import generic_filter as gf
def mkdir_p(mypath):
"""Creates a directory. equivalent to using mkdir -p on the command line"""
from errno import EEXIST
from os import makedirs, path
try:
makedirs(mypath)
except OSError as exc:
if exc.errno == EEXIST and path.isdir(mypath):
pass
else:
raise
startTime = datetime.now()
m_date = '20200903'
m_hour = '12'
year = startTime.year
if startTime.month < 10:
month = '0' + str(startTime.month)
else:
month = str(startTime.month)
if startTime.day < 10:
day = '0' + str(startTime.day)
else:
day = str(startTime.day)
if startTime.hour < 10:
hour = '0' + str(startTime.hour)
else:
hour = str(startTime.hour)
mdate = str(year) + str(month) + str(day)
def get_init_hr(hour):
if int(hour) < 6:
init_hour = '00'
elif int(hour) < 11:
init_hour = '06'
elif int(hour) < 17:
init_hour = '12'
elif int(hour) < 22:
init_hour = '18'
else:
init_hour = '00'
return init_hour
url = ('http://nomads.ncep.noaa.gov:80/dods/gfs_0p25_1hr/gfs' + mdate +
'/gfs_0p25_1hr_' + get_init_hr(hour) + 'z')
init_hour = get_init_hr(hour)
<|reserved_special_token_0|>
output_dir = str(year) + str(month) + str(day) + '_' + str(init_hour) + '00'
mkdir_p(output_dir)
mkdir_p(output_dir + '/GFS')
ds = xr.open_dataset(url)
init_hr = dt.datetime(int(year), int(month), int(day), int(init_hour))
times = ds['tmp2m'].metpy.time
init_time = ds['time'][0]
lats = np.arange(15, 70, 0.25)
lons = np.arange(220, 330, 0.25)
for i in range(1, 120):
fc_hr = init_hr + dt.timedelta(hours=1 * i)
forecast_hour = times[0].values
data = ds.metpy.parse_cf()
data = data.isel(time=i)
data = data.rename({'absvprs': 'avort', 'hgtprs': 'gph', 'rhprs': 'rh',
'tmpprs': 'temp', 'ugrdprs': 'u', 'vgrdprs': 'v'})
vertical, = data['temp'].metpy.coordinates('vertical')
time = data['temp'].metpy.time
zH5_crs = data['temp'].metpy.cartopy_crs
t5 = data['temp'].sel(lev=500.0, lat=lats, lon=lons)
u5 = data['u'].sel(lev=500.0, lat=lats, lon=lons).squeeze() * 1.94384449
v5 = data['v'].sel(lev=500.0, lat=lats, lon=lons).squeeze() * 1.94384449
av5 = data['avort'].sel(lev=500.0, lat=lats, lon=lons).squeeze() * 100000.0
rh5 = data['rh'].sel(lev=500.0, lat=lats, lon=lons).squeeze()
h5 = data['gph'].sel(lev=500.0, lat=lats, lon=lons).squeeze()
x, y = t5.metpy.coordinates('x', 'y')
lat, lon = xr.broadcast(y, x)
wind_slice = slice(5, -5, 5)
fig = plt.figure(figsize=(15, 15))
ax1 = fig.add_subplot(111, projection=zH5_crs)
ax1.coastlines(resolution='10m')
ax1.add_feature(cfeature.BORDERS.with_scale('10m'))
ax1.add_feature(cfeature.STATES.with_scale('10m'))
h5c = ax1.contour(x, y, h5, colors='dimgray', levels=range(4800, 6200,
60), linewidths=1.5)
t5c = ax1.contour(x, y, t5, colors='r', levels=range(-60, 0, 5),
linestyles='dashed', linewidths=1)
a5c = ax1.contourf(x, y, av5, cmap='autumn_r', levels=range(10, 60, 2),
alpha=0.8, extend='max')
a5cb = fig.colorbar(a5c, orientation='horizontal', aspect=80, ax=ax1,
pad=0.01, extendrect=False, ticks=range(10, 61, 5))
a5cb.set_label('500mb Absolute Vorticity ($s^{-1}$)', fontsize=12)
ax1.barbs(x[wind_slice], y[wind_slice], u5[wind_slice, wind_slice], v5[
wind_slice, wind_slice], length=7)
ax1.set_title('500mb Heights (m) and Absolute Vorticity ($s^{-1}$)',
fontsize=16)
ax1.set_title('\n Valid: ' + time.dt.strftime('%Y-%m-%d %H:%MZ').item(),
fontsize=11, loc='right')
ax1.set_title('\n GFS Init: ' + init_time.dt.strftime('%Y-%m-%d %H:%MZ'
).item(), fontsize=11, loc='left')
ax1.set_extent((265, 300, 25, 50))
plt.savefig(output_dir + '/GFS/gfs_hrly_h5vort_' + str(i) + '.png')
plt.clf()
plt.close()
wind_slice_s = slice(10, -10, 10)
fig2 = plt.figure(figsize=(15, 15))
ax2 = fig2.add_subplot(111, projection=zH5_crs)
ax2.coastlines(resolution='50m')
ax2.add_feature(cfeature.BORDERS.with_scale('50m'))
ax2.add_feature(cfeature.STATES.with_scale('50m'))
h5c2 = ax2.contour(x, y, h5, colors='dimgray', levels=range(4800, 6200,
60), linewidths=1.5)
t5c2 = ax2.contour(x, y, t5, colors='r', levels=range(-60, 0, 5),
linestyles='dashed', linewidths=1)
a5c2 = ax2.contourf(x, y, av5, cmap='autumn_r', levels=range(10, 65, 2),
alpha=0.8)
a5cb2 = fig2.colorbar(a5c2, orientation='horizontal', aspect=80, ax=ax2,
pad=0.01, extendrect=False, ticks=range(10, 60, 5))
a5cb2.set_label('500mb Absolute Vorticity ($s^{-1}$)', fontsize=12)
ax2.barbs(x[wind_slice_s], y[wind_slice_s], u5[wind_slice_s,
wind_slice_s], v5[wind_slice_s, wind_slice_s], length=7)
ax2.set_title('500mb Heights (m) and Absolute Vorticity ($s^{-1}$)',
fontsize=16)
ax2.set_title('\n Valid: ' + time.dt.strftime('%Y-%m-%d %H:%MZ').item(),
fontsize=11, loc='right')
ax2.set_title('\n GFS Init: ' + init_time.dt.strftime('%Y-%m-%d %H:%MZ'
).item(), fontsize=11, loc='left')
ax2.set_extent((225, 300, 20, 65))
plt.savefig(output_dir + '/GFS/gfs_hrly_h5vortCONUS_v2_' + str(i) + '.png')
wind_slice_s = slice(10, -10, 10)
fig3 = plt.figure(figsize=(15, 15))
ax3 = fig3.add_subplot(111, projection=zH5_crs)
ax3.coastlines(resolution='50m')
ax3.add_feature(cfeature.BORDERS.with_scale('50m'))
ax3.add_feature(cfeature.STATES.with_scale('50m'))
h5c2 = ax3.contour(x, y, h5, colors='dimgray', levels=range(4800, 6200,
60), linewidths=1.5)
t5c2 = ax3.contour(x, y, t5, colors='r', levels=range(-60, 0, 5),
linestyles='dashed', linewidths=1)
a5c2 = ax3.contourf(x, y, av5, cmap='autumn_r', levels=range(10, 65, 2),
alpha=0.8)
a5cb2 = fig3.colorbar(a5c2, orientation='horizontal', aspect=80, ax=ax3,
pad=0.01, extendrect=False, ticks=range(10, 60, 5))
a5cb2.set_label('500mb Absolute Vorticity ($s^{-1}$)', fontsize=12)
ax3.barbs(x[wind_slice_s], y[wind_slice_s], u5[wind_slice_s,
wind_slice_s], v5[wind_slice_s, wind_slice_s], length=7)
ax3.set_title('500mb Heights (m) and Absolute Vorticity ($s^{-1}$)',
fontsize=16)
ax3.set_title('\n Valid: ' + time.dt.strftime('%Y-%m-%d %H:%MZ').item(),
fontsize=11, loc='right')
ax3.set_title('\n GFS Init: ' + init_time.dt.strftime('%Y-%m-%d %H:%MZ'
).item(), fontsize=11, loc='left')
ax3.set_extent((260, 320, 20, 65))
plt.savefig(output_dir + '/GFS/gfs_hrly_h5vortC_ec_v1_' + str(i) + '.png')
fcst_hr = str(0)
print('Hour ' + str(i) + ' completed!')
plt.close()
timeelapsed = datetime.now() - startTime
print(timeelapsed)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
import cartopy.crs as ccrs
import cartopy.feature as cfeature
import numpy as np
import matplotlib.pyplot as plt
import netCDF4
import xarray as xr
import metpy
from datetime import datetime
import datetime as dt
from metpy.units import units
import scipy.ndimage as ndimage
from metpy.plots import USCOUNTIES
import cartopy
from scipy.ndimage.filters import generic_filter as gf
def mkdir_p(mypath):
'''Creates a directory. equivalent to using mkdir -p on the command line'''
from errno import EEXIST
from os import makedirs,path
try:
makedirs(mypath)
except OSError as exc: # Python >2.5
if exc.errno == EEXIST and path.isdir(mypath):
pass
else: raise
startTime=datetime.now()
m_date='20200903'
m_hour='12'
year = startTime.year
if startTime.month <10:
month = '0'+str(startTime.month)
else:
month = str(startTime.month)
if startTime.day <10:
day = '0'+str(startTime.day)
else:
day = str(startTime.day)
if startTime.hour <10:
hour = '0'+str(startTime.hour)
else:
hour = str(startTime.hour)
mdate = str(year)+str(month)+str(day)
def get_init_hr(hour):
if int(hour) <6:
init_hour = '00'
elif int(hour) <11:
init_hour = '06'
elif int(hour) <17:
init_hour = '12'
elif int(hour) <22:
init_hour = '18'
else:
init_hour = '00'
return(init_hour)
url = 'http://nomads.ncep.noaa.gov:80/dods/gfs_0p25_1hr/gfs'+mdate+'/gfs_0p25_1hr_'+get_init_hr(hour)+'z'
init_hour = get_init_hr(hour)
'''
for i in range(119):
fhr = i+1
'''
# Create new directory
output_dir = str(year)+str(month)+str(day)+'_'+str(init_hour)+'00'
mkdir_p(output_dir)
mkdir_p(output_dir+'/GFS')
#Parse data using MetPy
ds = xr.open_dataset(url)
init_hr = dt.datetime(int(year),int(month),int(day),int(init_hour))
times = ds['tmp2m'].metpy.time
init_time = ds['time'][0]
lats = np.arange(15,70,0.25)
lons = np.arange(220,330,0.25)
for i in range(1,120):
fc_hr = init_hr+dt.timedelta(hours=1*i)
forecast_hour = times[0].values
data = ds.metpy.parse_cf()
data = data.isel(time=i)
#Rename variables to useful things
data = data.rename({
'absvprs':'avort',
'hgtprs':'gph',
'rhprs':'rh',
'tmpprs':'temp',
'ugrdprs':'u',
'vgrdprs': 'v',
})
vertical, = data['temp'].metpy.coordinates('vertical')
time = data['temp'].metpy.time
zH5_crs = data['temp'].metpy.cartopy_crs
t5 = data['temp'].sel(lev=500.0,lat=lats,lon=lons)
u5 = data['u'].sel(lev=500.0,lat=lats,lon=lons).squeeze()*1.94384449
v5 = data['v'].sel(lev=500.0,lat=lats,lon=lons).squeeze()*1.94384449
av5 = data['avort'].sel(lev=500.0,lat=lats,lon=lons).squeeze()*1e5
rh5 = data['rh'].sel(lev=500.0,lat=lats,lon=lons).squeeze()
h5 = data['gph'].sel(lev=500.0,lat=lats,lon=lons).squeeze()
x, y = t5.metpy.coordinates('x', 'y')
lat, lon = xr.broadcast(y, x)
wind_slice = slice(5,-5,5)
########## SET UP FIGURE ##################################################
fig = plt.figure(figsize=(15,15))
ax1 = fig.add_subplot(111, projection = zH5_crs)
ax1.coastlines(resolution='10m')
ax1.add_feature(cfeature.BORDERS.with_scale('10m'))
ax1.add_feature(cfeature.STATES.with_scale('10m'))
#fig.suptitle("NAM Forecast valid at " + time[0].dt.strftime('%Y-%m-%d %H:%MZ').item(),fontsize=36)
########## PLOTTING #######################################################
h5c = ax1.contour(x,y,h5,colors='dimgray', levels = range(4800,6200,60),linewidths=1.5)
t5c = ax1.contour(x,y,t5,colors='r', levels = range(-60,0,5),linestyles='dashed',linewidths=1)
a5c = ax1.contourf(x,y,av5,cmap='autumn_r',levels=range(10,60,2),alpha=0.8,extend='max')
a5cb = fig.colorbar(a5c, orientation = 'horizontal', aspect = 80, ax = ax1, pad = 0.01,
extendrect=False, ticks = range(10,61,5))
a5cb.set_label('500mb Absolute Vorticity ($s^{-1}$)', fontsize = 12)
ax1.barbs(x[wind_slice],y[wind_slice],u5[wind_slice,wind_slice],v5[wind_slice,wind_slice], length=7)
#h_contour = ax1.contour(x, y, mslpc, colors='dimgray', levels=range(940,1040,4),linewidths=2)
#h_contour.clabel(fontsize=14, colors='dimgray', inline=1, inline_spacing=4, fmt='%i mb', rightside_up=True, use_clabeltext=True)
ax1.set_title('500mb Heights (m) and Absolute Vorticity ($s^{-1}$)',fontsize=16)
ax1.set_title('\n Valid: '+time.dt.strftime('%Y-%m-%d %H:%MZ').item(),fontsize=11,loc='right')
ax1.set_title('\n GFS Init: '+init_time.dt.strftime('%Y-%m-%d %H:%MZ').item(),fontsize=11,loc='left')
ax1.set_extent((265, 300, 25, 50))#, crs = zH5_crs) # Set a title and show the plot
plt.savefig(output_dir+'/GFS/gfs_hrly_h5vort_'+str(i)+'.png')
plt.clf()
plt.close()
########## PLOT 2 #######################################################
wind_slice_s = slice (10,-10,10)
fig2 = plt.figure(figsize=(15,15))
ax2 = fig2.add_subplot(111,projection=zH5_crs)
ax2.coastlines(resolution='50m')
ax2.add_feature(cfeature.BORDERS.with_scale('50m'))
ax2.add_feature(cfeature.STATES.with_scale('50m'))
h5c2 = ax2.contour(x,y,h5,colors='dimgray', levels = range(4800,6200,60),linewidths=1.5)
t5c2 = ax2.contour(x,y,t5,colors='r', levels = range(-60,0,5),linestyles='dashed',linewidths=1)
a5c2 = ax2.contourf(x,y,av5,cmap='autumn_r',levels=range(10,65,2),alpha=0.8)
a5cb2 = fig2.colorbar(a5c2, orientation = 'horizontal', aspect = 80, ax = ax2, pad = 0.01,
extendrect=False, ticks = range(10,60,5))
a5cb2.set_label('500mb Absolute Vorticity ($s^{-1}$)', fontsize = 12)
ax2.barbs(x[wind_slice_s],y[wind_slice_s],u5[wind_slice_s,wind_slice_s],v5[wind_slice_s,wind_slice_s], length=7)
#h_contour = ax1.contour(x, y, mslpc, colors='dimgray', levels=range(940,1040,4),linewidths=2)
#h_contour.clabel(fontsize=14, colors='dimgray', inline=1, inline_spacing=4, fmt='%i mb', rightside_up=True, use_clabeltext=True)
ax2.set_title('500mb Heights (m) and Absolute Vorticity ($s^{-1}$)',fontsize=16)
ax2.set_title('\n Valid: '+time.dt.strftime('%Y-%m-%d %H:%MZ').item(),fontsize=11,loc='right')
ax2.set_title('\n GFS Init: '+init_time.dt.strftime('%Y-%m-%d %H:%MZ').item(),fontsize=11,loc='left')
ax2.set_extent((225, 300, 20, 65))#, crs = zH5_crs) # Set a title and show the plot
plt.savefig(output_dir+'/GFS/gfs_hrly_h5vortCONUS_v2_'+str(i)+'.png')
########## PLOT 3 #######################################################
wind_slice_s = slice (10,-10,10)
fig3 = plt.figure(figsize=(15,15))
ax3 = fig3.add_subplot(111,projection=zH5_crs)
ax3.coastlines(resolution='50m')
ax3.add_feature(cfeature.BORDERS.with_scale('50m'))
ax3.add_feature(cfeature.STATES.with_scale('50m'))
h5c2 = ax3.contour(x,y,h5,colors='dimgray', levels = range(4800,6200,60),linewidths=1.5)
t5c2 = ax3.contour(x,y,t5,colors='r', levels = range(-60,0,5),linestyles='dashed',linewidths=1)
a5c2 = ax3.contourf(x,y,av5,cmap='autumn_r',levels=range(10,65,2),alpha=0.8)
a5cb2 = fig3.colorbar(a5c2, orientation = 'horizontal', aspect = 80, ax = ax3, pad = 0.01,
extendrect=False, ticks = range(10,60,5))
a5cb2.set_label('500mb Absolute Vorticity ($s^{-1}$)', fontsize = 12)
ax3.barbs(x[wind_slice_s],y[wind_slice_s],u5[wind_slice_s,wind_slice_s],v5[wind_slice_s,wind_slice_s], length=7)
#h_contour = ax1.contour(x, y, mslpc, colors='dimgray', levels=range(940,1040,4),linewidths=2)
#h_contour.clabel(fontsize=14, colors='dimgray', inline=1, inline_spacing=4, fmt='%i mb', rightside_up=True, use_clabeltext=True)
ax3.set_title('500mb Heights (m) and Absolute Vorticity ($s^{-1}$)',fontsize=16)
ax3.set_title('\n Valid: '+time.dt.strftime('%Y-%m-%d %H:%MZ').item(),fontsize=11,loc='right')
ax3.set_title('\n GFS Init: '+init_time.dt.strftime('%Y-%m-%d %H:%MZ').item(),fontsize=11,loc='left')
ax3.set_extent((260, 320, 20, 65))#, crs = zH5_crs) # Set a title and show the plot
plt.savefig(output_dir+'/GFS/gfs_hrly_h5vortC_ec_v1_'+str(i)+'.png')
fcst_hr = str(0)
print('Hour '+str(i)+' completed!')
plt.close()
timeelapsed = datetime.now()-startTime
print(timeelapsed)
'''
url= 'http://nomads.ncep.noaa.gov:80/dods/gfs_0p25_1hr/gfs20200903/gfs_0p25_1hr_12z'
ds = xr.open_dataset(url)
t2m_ds = ds['tmp2m']
init_hr = t2m_ds['time'][0].values
#fc_hr = t2m.ds['time'][i].values
lats = np.arange(20,50,0.25)
lons = np.arange(240,300,0.25)
t2m = t2m_ds.sel(time = init_hr, lat = lats, lon = lons)
print(t2m)
fig = plt.figure(figsize = (12,12))
fig.clf()
ax = plt.axes(projection=ccrs.PlateCarree())
ax.coastlines()
ax.set_extent((240,300, 20, 50), crs = ccrs.PlateCarree())
t2m_c = ax.contourf(t2m, cmap='RdPu')
plt.savefig('testingnomads6.png')
'''
|
flexible
|
{
"blob_id": "8771f71a69f3afdc5de4d38db6efe61b553ae880",
"index": 9396,
"step-1": "<mask token>\n\n\ndef mkdir_p(mypath):\n \"\"\"Creates a directory. equivalent to using mkdir -p on the command line\"\"\"\n from errno import EEXIST\n from os import makedirs, path\n try:\n makedirs(mypath)\n except OSError as exc:\n if exc.errno == EEXIST and path.isdir(mypath):\n pass\n else:\n raise\n\n\n<mask token>\n\n\ndef get_init_hr(hour):\n if int(hour) < 6:\n init_hour = '00'\n elif int(hour) < 11:\n init_hour = '06'\n elif int(hour) < 17:\n init_hour = '12'\n elif int(hour) < 22:\n init_hour = '18'\n else:\n init_hour = '00'\n return init_hour\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef mkdir_p(mypath):\n \"\"\"Creates a directory. equivalent to using mkdir -p on the command line\"\"\"\n from errno import EEXIST\n from os import makedirs, path\n try:\n makedirs(mypath)\n except OSError as exc:\n if exc.errno == EEXIST and path.isdir(mypath):\n pass\n else:\n raise\n\n\n<mask token>\nif startTime.month < 10:\n month = '0' + str(startTime.month)\nelse:\n month = str(startTime.month)\nif startTime.day < 10:\n day = '0' + str(startTime.day)\nelse:\n day = str(startTime.day)\nif startTime.hour < 10:\n hour = '0' + str(startTime.hour)\nelse:\n hour = str(startTime.hour)\n<mask token>\n\n\ndef get_init_hr(hour):\n if int(hour) < 6:\n init_hour = '00'\n elif int(hour) < 11:\n init_hour = '06'\n elif int(hour) < 17:\n init_hour = '12'\n elif int(hour) < 22:\n init_hour = '18'\n else:\n init_hour = '00'\n return init_hour\n\n\n<mask token>\nmkdir_p(output_dir)\nmkdir_p(output_dir + '/GFS')\n<mask token>\nfor i in range(1, 120):\n fc_hr = init_hr + dt.timedelta(hours=1 * i)\n forecast_hour = times[0].values\n data = ds.metpy.parse_cf()\n data = data.isel(time=i)\n data = data.rename({'absvprs': 'avort', 'hgtprs': 'gph', 'rhprs': 'rh',\n 'tmpprs': 'temp', 'ugrdprs': 'u', 'vgrdprs': 'v'})\n vertical, = data['temp'].metpy.coordinates('vertical')\n time = data['temp'].metpy.time\n zH5_crs = data['temp'].metpy.cartopy_crs\n t5 = data['temp'].sel(lev=500.0, lat=lats, lon=lons)\n u5 = data['u'].sel(lev=500.0, lat=lats, lon=lons).squeeze() * 1.94384449\n v5 = data['v'].sel(lev=500.0, lat=lats, lon=lons).squeeze() * 1.94384449\n av5 = data['avort'].sel(lev=500.0, lat=lats, lon=lons).squeeze() * 100000.0\n rh5 = data['rh'].sel(lev=500.0, lat=lats, lon=lons).squeeze()\n h5 = data['gph'].sel(lev=500.0, lat=lats, lon=lons).squeeze()\n x, y = t5.metpy.coordinates('x', 'y')\n lat, lon = xr.broadcast(y, x)\n wind_slice = slice(5, -5, 5)\n fig = plt.figure(figsize=(15, 15))\n ax1 = fig.add_subplot(111, projection=zH5_crs)\n ax1.coastlines(resolution='10m')\n ax1.add_feature(cfeature.BORDERS.with_scale('10m'))\n ax1.add_feature(cfeature.STATES.with_scale('10m'))\n h5c = ax1.contour(x, y, h5, colors='dimgray', levels=range(4800, 6200, \n 60), linewidths=1.5)\n t5c = ax1.contour(x, y, t5, colors='r', levels=range(-60, 0, 5),\n linestyles='dashed', linewidths=1)\n a5c = ax1.contourf(x, y, av5, cmap='autumn_r', levels=range(10, 60, 2),\n alpha=0.8, extend='max')\n a5cb = fig.colorbar(a5c, orientation='horizontal', aspect=80, ax=ax1,\n pad=0.01, extendrect=False, ticks=range(10, 61, 5))\n a5cb.set_label('500mb Absolute Vorticity ($s^{-1}$)', fontsize=12)\n ax1.barbs(x[wind_slice], y[wind_slice], u5[wind_slice, wind_slice], v5[\n wind_slice, wind_slice], length=7)\n ax1.set_title('500mb Heights (m) and Absolute Vorticity ($s^{-1}$)',\n fontsize=16)\n ax1.set_title('\\n Valid: ' + time.dt.strftime('%Y-%m-%d %H:%MZ').item(),\n fontsize=11, loc='right')\n ax1.set_title('\\n GFS Init: ' + init_time.dt.strftime('%Y-%m-%d %H:%MZ'\n ).item(), fontsize=11, loc='left')\n ax1.set_extent((265, 300, 25, 50))\n plt.savefig(output_dir + '/GFS/gfs_hrly_h5vort_' + str(i) + '.png')\n plt.clf()\n plt.close()\n wind_slice_s = slice(10, -10, 10)\n fig2 = plt.figure(figsize=(15, 15))\n ax2 = fig2.add_subplot(111, projection=zH5_crs)\n ax2.coastlines(resolution='50m')\n ax2.add_feature(cfeature.BORDERS.with_scale('50m'))\n ax2.add_feature(cfeature.STATES.with_scale('50m'))\n h5c2 = ax2.contour(x, y, h5, colors='dimgray', levels=range(4800, 6200,\n 60), linewidths=1.5)\n t5c2 = ax2.contour(x, y, t5, colors='r', levels=range(-60, 0, 5),\n linestyles='dashed', linewidths=1)\n a5c2 = ax2.contourf(x, y, av5, cmap='autumn_r', levels=range(10, 65, 2),\n alpha=0.8)\n a5cb2 = fig2.colorbar(a5c2, orientation='horizontal', aspect=80, ax=ax2,\n pad=0.01, extendrect=False, ticks=range(10, 60, 5))\n a5cb2.set_label('500mb Absolute Vorticity ($s^{-1}$)', fontsize=12)\n ax2.barbs(x[wind_slice_s], y[wind_slice_s], u5[wind_slice_s,\n wind_slice_s], v5[wind_slice_s, wind_slice_s], length=7)\n ax2.set_title('500mb Heights (m) and Absolute Vorticity ($s^{-1}$)',\n fontsize=16)\n ax2.set_title('\\n Valid: ' + time.dt.strftime('%Y-%m-%d %H:%MZ').item(),\n fontsize=11, loc='right')\n ax2.set_title('\\n GFS Init: ' + init_time.dt.strftime('%Y-%m-%d %H:%MZ'\n ).item(), fontsize=11, loc='left')\n ax2.set_extent((225, 300, 20, 65))\n plt.savefig(output_dir + '/GFS/gfs_hrly_h5vortCONUS_v2_' + str(i) + '.png')\n wind_slice_s = slice(10, -10, 10)\n fig3 = plt.figure(figsize=(15, 15))\n ax3 = fig3.add_subplot(111, projection=zH5_crs)\n ax3.coastlines(resolution='50m')\n ax3.add_feature(cfeature.BORDERS.with_scale('50m'))\n ax3.add_feature(cfeature.STATES.with_scale('50m'))\n h5c2 = ax3.contour(x, y, h5, colors='dimgray', levels=range(4800, 6200,\n 60), linewidths=1.5)\n t5c2 = ax3.contour(x, y, t5, colors='r', levels=range(-60, 0, 5),\n linestyles='dashed', linewidths=1)\n a5c2 = ax3.contourf(x, y, av5, cmap='autumn_r', levels=range(10, 65, 2),\n alpha=0.8)\n a5cb2 = fig3.colorbar(a5c2, orientation='horizontal', aspect=80, ax=ax3,\n pad=0.01, extendrect=False, ticks=range(10, 60, 5))\n a5cb2.set_label('500mb Absolute Vorticity ($s^{-1}$)', fontsize=12)\n ax3.barbs(x[wind_slice_s], y[wind_slice_s], u5[wind_slice_s,\n wind_slice_s], v5[wind_slice_s, wind_slice_s], length=7)\n ax3.set_title('500mb Heights (m) and Absolute Vorticity ($s^{-1}$)',\n fontsize=16)\n ax3.set_title('\\n Valid: ' + time.dt.strftime('%Y-%m-%d %H:%MZ').item(),\n fontsize=11, loc='right')\n ax3.set_title('\\n GFS Init: ' + init_time.dt.strftime('%Y-%m-%d %H:%MZ'\n ).item(), fontsize=11, loc='left')\n ax3.set_extent((260, 320, 20, 65))\n plt.savefig(output_dir + '/GFS/gfs_hrly_h5vortC_ec_v1_' + str(i) + '.png')\n fcst_hr = str(0)\n print('Hour ' + str(i) + ' completed!')\n plt.close()\n timeelapsed = datetime.now() - startTime\n print(timeelapsed)\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef mkdir_p(mypath):\n \"\"\"Creates a directory. equivalent to using mkdir -p on the command line\"\"\"\n from errno import EEXIST\n from os import makedirs, path\n try:\n makedirs(mypath)\n except OSError as exc:\n if exc.errno == EEXIST and path.isdir(mypath):\n pass\n else:\n raise\n\n\nstartTime = datetime.now()\nm_date = '20200903'\nm_hour = '12'\nyear = startTime.year\nif startTime.month < 10:\n month = '0' + str(startTime.month)\nelse:\n month = str(startTime.month)\nif startTime.day < 10:\n day = '0' + str(startTime.day)\nelse:\n day = str(startTime.day)\nif startTime.hour < 10:\n hour = '0' + str(startTime.hour)\nelse:\n hour = str(startTime.hour)\nmdate = str(year) + str(month) + str(day)\n\n\ndef get_init_hr(hour):\n if int(hour) < 6:\n init_hour = '00'\n elif int(hour) < 11:\n init_hour = '06'\n elif int(hour) < 17:\n init_hour = '12'\n elif int(hour) < 22:\n init_hour = '18'\n else:\n init_hour = '00'\n return init_hour\n\n\nurl = ('http://nomads.ncep.noaa.gov:80/dods/gfs_0p25_1hr/gfs' + mdate +\n '/gfs_0p25_1hr_' + get_init_hr(hour) + 'z')\ninit_hour = get_init_hr(hour)\n<mask token>\noutput_dir = str(year) + str(month) + str(day) + '_' + str(init_hour) + '00'\nmkdir_p(output_dir)\nmkdir_p(output_dir + '/GFS')\nds = xr.open_dataset(url)\ninit_hr = dt.datetime(int(year), int(month), int(day), int(init_hour))\ntimes = ds['tmp2m'].metpy.time\ninit_time = ds['time'][0]\nlats = np.arange(15, 70, 0.25)\nlons = np.arange(220, 330, 0.25)\nfor i in range(1, 120):\n fc_hr = init_hr + dt.timedelta(hours=1 * i)\n forecast_hour = times[0].values\n data = ds.metpy.parse_cf()\n data = data.isel(time=i)\n data = data.rename({'absvprs': 'avort', 'hgtprs': 'gph', 'rhprs': 'rh',\n 'tmpprs': 'temp', 'ugrdprs': 'u', 'vgrdprs': 'v'})\n vertical, = data['temp'].metpy.coordinates('vertical')\n time = data['temp'].metpy.time\n zH5_crs = data['temp'].metpy.cartopy_crs\n t5 = data['temp'].sel(lev=500.0, lat=lats, lon=lons)\n u5 = data['u'].sel(lev=500.0, lat=lats, lon=lons).squeeze() * 1.94384449\n v5 = data['v'].sel(lev=500.0, lat=lats, lon=lons).squeeze() * 1.94384449\n av5 = data['avort'].sel(lev=500.0, lat=lats, lon=lons).squeeze() * 100000.0\n rh5 = data['rh'].sel(lev=500.0, lat=lats, lon=lons).squeeze()\n h5 = data['gph'].sel(lev=500.0, lat=lats, lon=lons).squeeze()\n x, y = t5.metpy.coordinates('x', 'y')\n lat, lon = xr.broadcast(y, x)\n wind_slice = slice(5, -5, 5)\n fig = plt.figure(figsize=(15, 15))\n ax1 = fig.add_subplot(111, projection=zH5_crs)\n ax1.coastlines(resolution='10m')\n ax1.add_feature(cfeature.BORDERS.with_scale('10m'))\n ax1.add_feature(cfeature.STATES.with_scale('10m'))\n h5c = ax1.contour(x, y, h5, colors='dimgray', levels=range(4800, 6200, \n 60), linewidths=1.5)\n t5c = ax1.contour(x, y, t5, colors='r', levels=range(-60, 0, 5),\n linestyles='dashed', linewidths=1)\n a5c = ax1.contourf(x, y, av5, cmap='autumn_r', levels=range(10, 60, 2),\n alpha=0.8, extend='max')\n a5cb = fig.colorbar(a5c, orientation='horizontal', aspect=80, ax=ax1,\n pad=0.01, extendrect=False, ticks=range(10, 61, 5))\n a5cb.set_label('500mb Absolute Vorticity ($s^{-1}$)', fontsize=12)\n ax1.barbs(x[wind_slice], y[wind_slice], u5[wind_slice, wind_slice], v5[\n wind_slice, wind_slice], length=7)\n ax1.set_title('500mb Heights (m) and Absolute Vorticity ($s^{-1}$)',\n fontsize=16)\n ax1.set_title('\\n Valid: ' + time.dt.strftime('%Y-%m-%d %H:%MZ').item(),\n fontsize=11, loc='right')\n ax1.set_title('\\n GFS Init: ' + init_time.dt.strftime('%Y-%m-%d %H:%MZ'\n ).item(), fontsize=11, loc='left')\n ax1.set_extent((265, 300, 25, 50))\n plt.savefig(output_dir + '/GFS/gfs_hrly_h5vort_' + str(i) + '.png')\n plt.clf()\n plt.close()\n wind_slice_s = slice(10, -10, 10)\n fig2 = plt.figure(figsize=(15, 15))\n ax2 = fig2.add_subplot(111, projection=zH5_crs)\n ax2.coastlines(resolution='50m')\n ax2.add_feature(cfeature.BORDERS.with_scale('50m'))\n ax2.add_feature(cfeature.STATES.with_scale('50m'))\n h5c2 = ax2.contour(x, y, h5, colors='dimgray', levels=range(4800, 6200,\n 60), linewidths=1.5)\n t5c2 = ax2.contour(x, y, t5, colors='r', levels=range(-60, 0, 5),\n linestyles='dashed', linewidths=1)\n a5c2 = ax2.contourf(x, y, av5, cmap='autumn_r', levels=range(10, 65, 2),\n alpha=0.8)\n a5cb2 = fig2.colorbar(a5c2, orientation='horizontal', aspect=80, ax=ax2,\n pad=0.01, extendrect=False, ticks=range(10, 60, 5))\n a5cb2.set_label('500mb Absolute Vorticity ($s^{-1}$)', fontsize=12)\n ax2.barbs(x[wind_slice_s], y[wind_slice_s], u5[wind_slice_s,\n wind_slice_s], v5[wind_slice_s, wind_slice_s], length=7)\n ax2.set_title('500mb Heights (m) and Absolute Vorticity ($s^{-1}$)',\n fontsize=16)\n ax2.set_title('\\n Valid: ' + time.dt.strftime('%Y-%m-%d %H:%MZ').item(),\n fontsize=11, loc='right')\n ax2.set_title('\\n GFS Init: ' + init_time.dt.strftime('%Y-%m-%d %H:%MZ'\n ).item(), fontsize=11, loc='left')\n ax2.set_extent((225, 300, 20, 65))\n plt.savefig(output_dir + '/GFS/gfs_hrly_h5vortCONUS_v2_' + str(i) + '.png')\n wind_slice_s = slice(10, -10, 10)\n fig3 = plt.figure(figsize=(15, 15))\n ax3 = fig3.add_subplot(111, projection=zH5_crs)\n ax3.coastlines(resolution='50m')\n ax3.add_feature(cfeature.BORDERS.with_scale('50m'))\n ax3.add_feature(cfeature.STATES.with_scale('50m'))\n h5c2 = ax3.contour(x, y, h5, colors='dimgray', levels=range(4800, 6200,\n 60), linewidths=1.5)\n t5c2 = ax3.contour(x, y, t5, colors='r', levels=range(-60, 0, 5),\n linestyles='dashed', linewidths=1)\n a5c2 = ax3.contourf(x, y, av5, cmap='autumn_r', levels=range(10, 65, 2),\n alpha=0.8)\n a5cb2 = fig3.colorbar(a5c2, orientation='horizontal', aspect=80, ax=ax3,\n pad=0.01, extendrect=False, ticks=range(10, 60, 5))\n a5cb2.set_label('500mb Absolute Vorticity ($s^{-1}$)', fontsize=12)\n ax3.barbs(x[wind_slice_s], y[wind_slice_s], u5[wind_slice_s,\n wind_slice_s], v5[wind_slice_s, wind_slice_s], length=7)\n ax3.set_title('500mb Heights (m) and Absolute Vorticity ($s^{-1}$)',\n fontsize=16)\n ax3.set_title('\\n Valid: ' + time.dt.strftime('%Y-%m-%d %H:%MZ').item(),\n fontsize=11, loc='right')\n ax3.set_title('\\n GFS Init: ' + init_time.dt.strftime('%Y-%m-%d %H:%MZ'\n ).item(), fontsize=11, loc='left')\n ax3.set_extent((260, 320, 20, 65))\n plt.savefig(output_dir + '/GFS/gfs_hrly_h5vortC_ec_v1_' + str(i) + '.png')\n fcst_hr = str(0)\n print('Hour ' + str(i) + ' completed!')\n plt.close()\n timeelapsed = datetime.now() - startTime\n print(timeelapsed)\n<mask token>\n",
"step-4": "import cartopy.crs as ccrs\nimport cartopy.feature as cfeature\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport netCDF4\nimport xarray as xr\nimport metpy\nfrom datetime import datetime\nimport datetime as dt\nfrom metpy.units import units\nimport scipy.ndimage as ndimage\nfrom metpy.plots import USCOUNTIES\nimport cartopy\nfrom scipy.ndimage.filters import generic_filter as gf\n\n\ndef mkdir_p(mypath):\n \"\"\"Creates a directory. equivalent to using mkdir -p on the command line\"\"\"\n from errno import EEXIST\n from os import makedirs, path\n try:\n makedirs(mypath)\n except OSError as exc:\n if exc.errno == EEXIST and path.isdir(mypath):\n pass\n else:\n raise\n\n\nstartTime = datetime.now()\nm_date = '20200903'\nm_hour = '12'\nyear = startTime.year\nif startTime.month < 10:\n month = '0' + str(startTime.month)\nelse:\n month = str(startTime.month)\nif startTime.day < 10:\n day = '0' + str(startTime.day)\nelse:\n day = str(startTime.day)\nif startTime.hour < 10:\n hour = '0' + str(startTime.hour)\nelse:\n hour = str(startTime.hour)\nmdate = str(year) + str(month) + str(day)\n\n\ndef get_init_hr(hour):\n if int(hour) < 6:\n init_hour = '00'\n elif int(hour) < 11:\n init_hour = '06'\n elif int(hour) < 17:\n init_hour = '12'\n elif int(hour) < 22:\n init_hour = '18'\n else:\n init_hour = '00'\n return init_hour\n\n\nurl = ('http://nomads.ncep.noaa.gov:80/dods/gfs_0p25_1hr/gfs' + mdate +\n '/gfs_0p25_1hr_' + get_init_hr(hour) + 'z')\ninit_hour = get_init_hr(hour)\n<mask token>\noutput_dir = str(year) + str(month) + str(day) + '_' + str(init_hour) + '00'\nmkdir_p(output_dir)\nmkdir_p(output_dir + '/GFS')\nds = xr.open_dataset(url)\ninit_hr = dt.datetime(int(year), int(month), int(day), int(init_hour))\ntimes = ds['tmp2m'].metpy.time\ninit_time = ds['time'][0]\nlats = np.arange(15, 70, 0.25)\nlons = np.arange(220, 330, 0.25)\nfor i in range(1, 120):\n fc_hr = init_hr + dt.timedelta(hours=1 * i)\n forecast_hour = times[0].values\n data = ds.metpy.parse_cf()\n data = data.isel(time=i)\n data = data.rename({'absvprs': 'avort', 'hgtprs': 'gph', 'rhprs': 'rh',\n 'tmpprs': 'temp', 'ugrdprs': 'u', 'vgrdprs': 'v'})\n vertical, = data['temp'].metpy.coordinates('vertical')\n time = data['temp'].metpy.time\n zH5_crs = data['temp'].metpy.cartopy_crs\n t5 = data['temp'].sel(lev=500.0, lat=lats, lon=lons)\n u5 = data['u'].sel(lev=500.0, lat=lats, lon=lons).squeeze() * 1.94384449\n v5 = data['v'].sel(lev=500.0, lat=lats, lon=lons).squeeze() * 1.94384449\n av5 = data['avort'].sel(lev=500.0, lat=lats, lon=lons).squeeze() * 100000.0\n rh5 = data['rh'].sel(lev=500.0, lat=lats, lon=lons).squeeze()\n h5 = data['gph'].sel(lev=500.0, lat=lats, lon=lons).squeeze()\n x, y = t5.metpy.coordinates('x', 'y')\n lat, lon = xr.broadcast(y, x)\n wind_slice = slice(5, -5, 5)\n fig = plt.figure(figsize=(15, 15))\n ax1 = fig.add_subplot(111, projection=zH5_crs)\n ax1.coastlines(resolution='10m')\n ax1.add_feature(cfeature.BORDERS.with_scale('10m'))\n ax1.add_feature(cfeature.STATES.with_scale('10m'))\n h5c = ax1.contour(x, y, h5, colors='dimgray', levels=range(4800, 6200, \n 60), linewidths=1.5)\n t5c = ax1.contour(x, y, t5, colors='r', levels=range(-60, 0, 5),\n linestyles='dashed', linewidths=1)\n a5c = ax1.contourf(x, y, av5, cmap='autumn_r', levels=range(10, 60, 2),\n alpha=0.8, extend='max')\n a5cb = fig.colorbar(a5c, orientation='horizontal', aspect=80, ax=ax1,\n pad=0.01, extendrect=False, ticks=range(10, 61, 5))\n a5cb.set_label('500mb Absolute Vorticity ($s^{-1}$)', fontsize=12)\n ax1.barbs(x[wind_slice], y[wind_slice], u5[wind_slice, wind_slice], v5[\n wind_slice, wind_slice], length=7)\n ax1.set_title('500mb Heights (m) and Absolute Vorticity ($s^{-1}$)',\n fontsize=16)\n ax1.set_title('\\n Valid: ' + time.dt.strftime('%Y-%m-%d %H:%MZ').item(),\n fontsize=11, loc='right')\n ax1.set_title('\\n GFS Init: ' + init_time.dt.strftime('%Y-%m-%d %H:%MZ'\n ).item(), fontsize=11, loc='left')\n ax1.set_extent((265, 300, 25, 50))\n plt.savefig(output_dir + '/GFS/gfs_hrly_h5vort_' + str(i) + '.png')\n plt.clf()\n plt.close()\n wind_slice_s = slice(10, -10, 10)\n fig2 = plt.figure(figsize=(15, 15))\n ax2 = fig2.add_subplot(111, projection=zH5_crs)\n ax2.coastlines(resolution='50m')\n ax2.add_feature(cfeature.BORDERS.with_scale('50m'))\n ax2.add_feature(cfeature.STATES.with_scale('50m'))\n h5c2 = ax2.contour(x, y, h5, colors='dimgray', levels=range(4800, 6200,\n 60), linewidths=1.5)\n t5c2 = ax2.contour(x, y, t5, colors='r', levels=range(-60, 0, 5),\n linestyles='dashed', linewidths=1)\n a5c2 = ax2.contourf(x, y, av5, cmap='autumn_r', levels=range(10, 65, 2),\n alpha=0.8)\n a5cb2 = fig2.colorbar(a5c2, orientation='horizontal', aspect=80, ax=ax2,\n pad=0.01, extendrect=False, ticks=range(10, 60, 5))\n a5cb2.set_label('500mb Absolute Vorticity ($s^{-1}$)', fontsize=12)\n ax2.barbs(x[wind_slice_s], y[wind_slice_s], u5[wind_slice_s,\n wind_slice_s], v5[wind_slice_s, wind_slice_s], length=7)\n ax2.set_title('500mb Heights (m) and Absolute Vorticity ($s^{-1}$)',\n fontsize=16)\n ax2.set_title('\\n Valid: ' + time.dt.strftime('%Y-%m-%d %H:%MZ').item(),\n fontsize=11, loc='right')\n ax2.set_title('\\n GFS Init: ' + init_time.dt.strftime('%Y-%m-%d %H:%MZ'\n ).item(), fontsize=11, loc='left')\n ax2.set_extent((225, 300, 20, 65))\n plt.savefig(output_dir + '/GFS/gfs_hrly_h5vortCONUS_v2_' + str(i) + '.png')\n wind_slice_s = slice(10, -10, 10)\n fig3 = plt.figure(figsize=(15, 15))\n ax3 = fig3.add_subplot(111, projection=zH5_crs)\n ax3.coastlines(resolution='50m')\n ax3.add_feature(cfeature.BORDERS.with_scale('50m'))\n ax3.add_feature(cfeature.STATES.with_scale('50m'))\n h5c2 = ax3.contour(x, y, h5, colors='dimgray', levels=range(4800, 6200,\n 60), linewidths=1.5)\n t5c2 = ax3.contour(x, y, t5, colors='r', levels=range(-60, 0, 5),\n linestyles='dashed', linewidths=1)\n a5c2 = ax3.contourf(x, y, av5, cmap='autumn_r', levels=range(10, 65, 2),\n alpha=0.8)\n a5cb2 = fig3.colorbar(a5c2, orientation='horizontal', aspect=80, ax=ax3,\n pad=0.01, extendrect=False, ticks=range(10, 60, 5))\n a5cb2.set_label('500mb Absolute Vorticity ($s^{-1}$)', fontsize=12)\n ax3.barbs(x[wind_slice_s], y[wind_slice_s], u5[wind_slice_s,\n wind_slice_s], v5[wind_slice_s, wind_slice_s], length=7)\n ax3.set_title('500mb Heights (m) and Absolute Vorticity ($s^{-1}$)',\n fontsize=16)\n ax3.set_title('\\n Valid: ' + time.dt.strftime('%Y-%m-%d %H:%MZ').item(),\n fontsize=11, loc='right')\n ax3.set_title('\\n GFS Init: ' + init_time.dt.strftime('%Y-%m-%d %H:%MZ'\n ).item(), fontsize=11, loc='left')\n ax3.set_extent((260, 320, 20, 65))\n plt.savefig(output_dir + '/GFS/gfs_hrly_h5vortC_ec_v1_' + str(i) + '.png')\n fcst_hr = str(0)\n print('Hour ' + str(i) + ' completed!')\n plt.close()\n timeelapsed = datetime.now() - startTime\n print(timeelapsed)\n<mask token>\n",
"step-5": "import cartopy.crs as ccrs\r\nimport cartopy.feature as cfeature\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nimport netCDF4\r\nimport xarray as xr\r\nimport metpy\r\nfrom datetime import datetime\r\nimport datetime as dt\r\nfrom metpy.units import units\r\nimport scipy.ndimage as ndimage\r\nfrom metpy.plots import USCOUNTIES\r\nimport cartopy\r\nfrom scipy.ndimage.filters import generic_filter as gf\r\n\r\n\r\ndef mkdir_p(mypath):\r\n '''Creates a directory. equivalent to using mkdir -p on the command line'''\r\n\r\n from errno import EEXIST\r\n from os import makedirs,path\r\n\r\n try:\r\n makedirs(mypath)\r\n except OSError as exc: # Python >2.5\r\n if exc.errno == EEXIST and path.isdir(mypath):\r\n pass\r\n else: raise\r\n\r\nstartTime=datetime.now()\r\n\r\nm_date='20200903'\r\nm_hour='12'\r\n\r\nyear = startTime.year\r\n\r\nif startTime.month <10:\r\n month = '0'+str(startTime.month)\r\nelse:\r\n month = str(startTime.month)\r\n\r\nif startTime.day <10:\r\n day = '0'+str(startTime.day)\r\nelse:\r\n day = str(startTime.day)\r\n\r\nif startTime.hour <10:\r\n hour = '0'+str(startTime.hour)\r\nelse:\r\n hour = str(startTime.hour)\r\n\r\nmdate = str(year)+str(month)+str(day)\r\n\r\ndef get_init_hr(hour):\r\n if int(hour) <6:\r\n init_hour = '00'\r\n elif int(hour) <11:\r\n init_hour = '06'\r\n elif int(hour) <17:\r\n init_hour = '12'\r\n elif int(hour) <22:\r\n init_hour = '18'\r\n else:\r\n init_hour = '00'\r\n return(init_hour)\r\n\r\nurl = 'http://nomads.ncep.noaa.gov:80/dods/gfs_0p25_1hr/gfs'+mdate+'/gfs_0p25_1hr_'+get_init_hr(hour)+'z'\r\ninit_hour = get_init_hr(hour)\r\n'''\r\nfor i in range(119):\r\n fhr = i+1\r\n'''\r\n# Create new directory\r\noutput_dir = str(year)+str(month)+str(day)+'_'+str(init_hour)+'00'\r\nmkdir_p(output_dir)\r\nmkdir_p(output_dir+'/GFS')\r\n#Parse data using MetPy\r\nds = xr.open_dataset(url)\r\ninit_hr = dt.datetime(int(year),int(month),int(day),int(init_hour))\r\ntimes = ds['tmp2m'].metpy.time\r\ninit_time = ds['time'][0]\r\n\r\nlats = np.arange(15,70,0.25)\r\nlons = np.arange(220,330,0.25)\r\n\r\nfor i in range(1,120):\r\n fc_hr = init_hr+dt.timedelta(hours=1*i)\r\n forecast_hour = times[0].values\r\n\r\n data = ds.metpy.parse_cf()\r\n data = data.isel(time=i)\r\n #Rename variables to useful things\r\n data = data.rename({\r\n 'absvprs':'avort',\r\n 'hgtprs':'gph',\r\n 'rhprs':'rh',\r\n 'tmpprs':'temp',\r\n 'ugrdprs':'u',\r\n 'vgrdprs': 'v',\r\n })\r\n\r\n vertical, = data['temp'].metpy.coordinates('vertical')\r\n time = data['temp'].metpy.time\r\n zH5_crs = data['temp'].metpy.cartopy_crs\r\n\r\n t5 = data['temp'].sel(lev=500.0,lat=lats,lon=lons)\r\n u5 = data['u'].sel(lev=500.0,lat=lats,lon=lons).squeeze()*1.94384449\r\n v5 = data['v'].sel(lev=500.0,lat=lats,lon=lons).squeeze()*1.94384449\r\n av5 = data['avort'].sel(lev=500.0,lat=lats,lon=lons).squeeze()*1e5\r\n rh5 = data['rh'].sel(lev=500.0,lat=lats,lon=lons).squeeze()\r\n h5 = data['gph'].sel(lev=500.0,lat=lats,lon=lons).squeeze()\r\n x, y = t5.metpy.coordinates('x', 'y')\r\n lat, lon = xr.broadcast(y, x)\r\n wind_slice = slice(5,-5,5)\r\n ########## SET UP FIGURE ##################################################\r\n fig = plt.figure(figsize=(15,15))\r\n ax1 = fig.add_subplot(111, projection = zH5_crs)\r\n\r\n ax1.coastlines(resolution='10m')\r\n ax1.add_feature(cfeature.BORDERS.with_scale('10m'))\r\n ax1.add_feature(cfeature.STATES.with_scale('10m'))\r\n\r\n #fig.suptitle(\"NAM Forecast valid at \" + time[0].dt.strftime('%Y-%m-%d %H:%MZ').item(),fontsize=36)\r\n\r\n ########## PLOTTING #######################################################\r\n h5c = ax1.contour(x,y,h5,colors='dimgray', levels = range(4800,6200,60),linewidths=1.5)\r\n t5c = ax1.contour(x,y,t5,colors='r', levels = range(-60,0,5),linestyles='dashed',linewidths=1)\r\n a5c = ax1.contourf(x,y,av5,cmap='autumn_r',levels=range(10,60,2),alpha=0.8,extend='max')\r\n a5cb = fig.colorbar(a5c, orientation = 'horizontal', aspect = 80, ax = ax1, pad = 0.01,\r\n extendrect=False, ticks = range(10,61,5))\r\n a5cb.set_label('500mb Absolute Vorticity ($s^{-1}$)', fontsize = 12)\r\n ax1.barbs(x[wind_slice],y[wind_slice],u5[wind_slice,wind_slice],v5[wind_slice,wind_slice], length=7)\r\n\r\n #h_contour = ax1.contour(x, y, mslpc, colors='dimgray', levels=range(940,1040,4),linewidths=2)\r\n #h_contour.clabel(fontsize=14, colors='dimgray', inline=1, inline_spacing=4, fmt='%i mb', rightside_up=True, use_clabeltext=True)\r\n ax1.set_title('500mb Heights (m) and Absolute Vorticity ($s^{-1}$)',fontsize=16)\r\n ax1.set_title('\\n Valid: '+time.dt.strftime('%Y-%m-%d %H:%MZ').item(),fontsize=11,loc='right')\r\n ax1.set_title('\\n GFS Init: '+init_time.dt.strftime('%Y-%m-%d %H:%MZ').item(),fontsize=11,loc='left')\r\n ax1.set_extent((265, 300, 25, 50))#, crs = zH5_crs) # Set a title and show the plot\r\n plt.savefig(output_dir+'/GFS/gfs_hrly_h5vort_'+str(i)+'.png')\r\n plt.clf()\r\n plt.close()\r\n ########## PLOT 2 #######################################################\r\n wind_slice_s = slice (10,-10,10)\r\n fig2 = plt.figure(figsize=(15,15))\r\n ax2 = fig2.add_subplot(111,projection=zH5_crs)\r\n ax2.coastlines(resolution='50m')\r\n ax2.add_feature(cfeature.BORDERS.with_scale('50m'))\r\n ax2.add_feature(cfeature.STATES.with_scale('50m'))\r\n h5c2 = ax2.contour(x,y,h5,colors='dimgray', levels = range(4800,6200,60),linewidths=1.5)\r\n t5c2 = ax2.contour(x,y,t5,colors='r', levels = range(-60,0,5),linestyles='dashed',linewidths=1)\r\n a5c2 = ax2.contourf(x,y,av5,cmap='autumn_r',levels=range(10,65,2),alpha=0.8)\r\n a5cb2 = fig2.colorbar(a5c2, orientation = 'horizontal', aspect = 80, ax = ax2, pad = 0.01,\r\n extendrect=False, ticks = range(10,60,5))\r\n a5cb2.set_label('500mb Absolute Vorticity ($s^{-1}$)', fontsize = 12)\r\n ax2.barbs(x[wind_slice_s],y[wind_slice_s],u5[wind_slice_s,wind_slice_s],v5[wind_slice_s,wind_slice_s], length=7)\r\n\r\n #h_contour = ax1.contour(x, y, mslpc, colors='dimgray', levels=range(940,1040,4),linewidths=2)\r\n #h_contour.clabel(fontsize=14, colors='dimgray', inline=1, inline_spacing=4, fmt='%i mb', rightside_up=True, use_clabeltext=True)\r\n ax2.set_title('500mb Heights (m) and Absolute Vorticity ($s^{-1}$)',fontsize=16)\r\n ax2.set_title('\\n Valid: '+time.dt.strftime('%Y-%m-%d %H:%MZ').item(),fontsize=11,loc='right')\r\n ax2.set_title('\\n GFS Init: '+init_time.dt.strftime('%Y-%m-%d %H:%MZ').item(),fontsize=11,loc='left')\r\n ax2.set_extent((225, 300, 20, 65))#, crs = zH5_crs) # Set a title and show the plot\r\n plt.savefig(output_dir+'/GFS/gfs_hrly_h5vortCONUS_v2_'+str(i)+'.png')\r\n\r\n ########## PLOT 3 #######################################################\r\n wind_slice_s = slice (10,-10,10)\r\n fig3 = plt.figure(figsize=(15,15))\r\n ax3 = fig3.add_subplot(111,projection=zH5_crs)\r\n ax3.coastlines(resolution='50m')\r\n ax3.add_feature(cfeature.BORDERS.with_scale('50m'))\r\n ax3.add_feature(cfeature.STATES.with_scale('50m'))\r\n h5c2 = ax3.contour(x,y,h5,colors='dimgray', levels = range(4800,6200,60),linewidths=1.5)\r\n t5c2 = ax3.contour(x,y,t5,colors='r', levels = range(-60,0,5),linestyles='dashed',linewidths=1)\r\n a5c2 = ax3.contourf(x,y,av5,cmap='autumn_r',levels=range(10,65,2),alpha=0.8)\r\n a5cb2 = fig3.colorbar(a5c2, orientation = 'horizontal', aspect = 80, ax = ax3, pad = 0.01,\r\n extendrect=False, ticks = range(10,60,5))\r\n a5cb2.set_label('500mb Absolute Vorticity ($s^{-1}$)', fontsize = 12)\r\n ax3.barbs(x[wind_slice_s],y[wind_slice_s],u5[wind_slice_s,wind_slice_s],v5[wind_slice_s,wind_slice_s], length=7)\r\n\r\n #h_contour = ax1.contour(x, y, mslpc, colors='dimgray', levels=range(940,1040,4),linewidths=2)\r\n #h_contour.clabel(fontsize=14, colors='dimgray', inline=1, inline_spacing=4, fmt='%i mb', rightside_up=True, use_clabeltext=True)\r\n ax3.set_title('500mb Heights (m) and Absolute Vorticity ($s^{-1}$)',fontsize=16)\r\n ax3.set_title('\\n Valid: '+time.dt.strftime('%Y-%m-%d %H:%MZ').item(),fontsize=11,loc='right')\r\n ax3.set_title('\\n GFS Init: '+init_time.dt.strftime('%Y-%m-%d %H:%MZ').item(),fontsize=11,loc='left')\r\n ax3.set_extent((260, 320, 20, 65))#, crs = zH5_crs) # Set a title and show the plot\r\n plt.savefig(output_dir+'/GFS/gfs_hrly_h5vortC_ec_v1_'+str(i)+'.png')\r\n\r\n fcst_hr = str(0)\r\n print('Hour '+str(i)+' completed!')\r\n plt.close()\r\n timeelapsed = datetime.now()-startTime\r\n print(timeelapsed)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n'''\r\nurl= 'http://nomads.ncep.noaa.gov:80/dods/gfs_0p25_1hr/gfs20200903/gfs_0p25_1hr_12z'\r\nds = xr.open_dataset(url)\r\nt2m_ds = ds['tmp2m']\r\ninit_hr = t2m_ds['time'][0].values\r\n#fc_hr = t2m.ds['time'][i].values\r\nlats = np.arange(20,50,0.25)\r\nlons = np.arange(240,300,0.25)\r\nt2m = t2m_ds.sel(time = init_hr, lat = lats, lon = lons)\r\nprint(t2m)\r\n\r\nfig = plt.figure(figsize = (12,12))\r\nfig.clf()\r\nax = plt.axes(projection=ccrs.PlateCarree())\r\nax.coastlines()\r\nax.set_extent((240,300, 20, 50), crs = ccrs.PlateCarree())\r\nt2m_c = ax.contourf(t2m, cmap='RdPu')\r\nplt.savefig('testingnomads6.png')\r\n'''\r\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Orders(models.Model):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Orders(models.Model):
customer_name = models.CharField(max_length=80)
customer_email = models.CharField(max_length=120)
customer_mobile = models.CharField(max_length=40)
status = models.CharField(max_length=20)
process_url = models.CharField(max_length=150, null=True)
session_id = models.CharField(max_length=100, null=True)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
<|reserved_special_token_1|>
from django.db import models
class Orders(models.Model):
customer_name = models.CharField(max_length=80)
customer_email = models.CharField(max_length=120)
customer_mobile = models.CharField(max_length=40)
status = models.CharField(max_length=20)
process_url = models.CharField(max_length=150, null=True)
session_id = models.CharField(max_length=100, null=True)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
<|reserved_special_token_1|>
from django.db import models
# Create your models here.
class Orders(models.Model):
customer_name = models.CharField(max_length=80)
customer_email = models.CharField(max_length=120)
customer_mobile = models.CharField(max_length=40)
status = models.CharField(max_length=20)
process_url = models.CharField(max_length=150, null=True)
session_id = models.CharField(max_length=100, null=True)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
|
flexible
|
{
"blob_id": "bc7a7b9ba4b3277c862aadb57b56661c24efc6e5",
"index": 5577,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Orders(models.Model):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Orders(models.Model):\n customer_name = models.CharField(max_length=80)\n customer_email = models.CharField(max_length=120)\n customer_mobile = models.CharField(max_length=40)\n status = models.CharField(max_length=20)\n process_url = models.CharField(max_length=150, null=True)\n session_id = models.CharField(max_length=100, null=True)\n created_at = models.DateTimeField(auto_now_add=True)\n updated_at = models.DateTimeField(auto_now=True)\n",
"step-4": "from django.db import models\n\n\nclass Orders(models.Model):\n customer_name = models.CharField(max_length=80)\n customer_email = models.CharField(max_length=120)\n customer_mobile = models.CharField(max_length=40)\n status = models.CharField(max_length=20)\n process_url = models.CharField(max_length=150, null=True)\n session_id = models.CharField(max_length=100, null=True)\n created_at = models.DateTimeField(auto_now_add=True)\n updated_at = models.DateTimeField(auto_now=True)\n",
"step-5": "from django.db import models\n\n\n# Create your models here.\n\nclass Orders(models.Model):\n customer_name = models.CharField(max_length=80)\n customer_email = models.CharField(max_length=120)\n customer_mobile = models.CharField(max_length=40)\n status = models.CharField(max_length=20)\n process_url = models.CharField(max_length=150, null=True)\n session_id = models.CharField(max_length=100, null=True)\n created_at = models.DateTimeField(auto_now_add=True)\n updated_at = models.DateTimeField(auto_now=True)\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
if a < 97:
print('A')
else:
print('a')
<|reserved_special_token_0|>
<|reserved_special_token_1|>
a = ord(input().rstrip())
if a < 97:
print('A')
else:
print('a')
<|reserved_special_token_0|>
<|reserved_special_token_1|>
a = ord(input().rstrip())
if a < 97:
print('A')
else:
print('a')
'''
ord(A)=65
ord(Z)=90
ord(a)=97
ord(z)=122
'''
|
flexible
|
{
"blob_id": "e7c454b2bf6cf324e1e318e374e07a83812c978b",
"index": 2381,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nif a < 97:\n print('A')\nelse:\n print('a')\n<mask token>\n",
"step-3": "a = ord(input().rstrip())\nif a < 97:\n print('A')\nelse:\n print('a')\n<mask token>\n",
"step-4": "a = ord(input().rstrip())\n\nif a < 97:\n print('A')\nelse:\n print('a')\n \n\n''' \n\nord(A)=65\nord(Z)=90\nord(a)=97\nord(z)=122\n\n'''\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
from rest_framework import serializers
from issue.models import Issue
class IssueSerializer(serializers.ModelSerializer):
"""DRF Serializer For Listing Published Issue"""
class Meta:
model = Issue
fields = ['issueName', 'website', 'issueBody', 'impact', 'published_on'
]
class IssueCreateSerializer(serializers.ModelSerializer):
"""DRF Serializer Fpr Creating Issues By The User"""
class Meta:
model = Issue
fields = ['issueName', 'website', 'issueBody', 'impact', 'project',
'email']
class IssueStatusSerializer(serializers.ModelSerializer):
"""DRF Serializer For Listing Published Issue"""
class Meta:
model = Issue
fields = ['impact', 'angle', 'name']
|
normal
|
{
"blob_id": "e4422010337eade12226d84c79532cdbcae68d67",
"index": 1495,
"step-1": "<mask token>\n\n\nclass IssueCreateSerializer(serializers.ModelSerializer):\n <mask token>\n\n\n class Meta:\n model = Issue\n fields = ['issueName', 'website', 'issueBody', 'impact', 'project',\n 'email']\n\n\nclass IssueStatusSerializer(serializers.ModelSerializer):\n \"\"\"DRF Serializer For Listing Published Issue\"\"\"\n\n\n class Meta:\n model = Issue\n fields = ['impact', 'angle', 'name']\n",
"step-2": "<mask token>\n\n\nclass IssueSerializer(serializers.ModelSerializer):\n <mask token>\n\n\n class Meta:\n model = Issue\n fields = ['issueName', 'website', 'issueBody', 'impact', 'published_on'\n ]\n\n\nclass IssueCreateSerializer(serializers.ModelSerializer):\n \"\"\"DRF Serializer Fpr Creating Issues By The User\"\"\"\n\n\n class Meta:\n model = Issue\n fields = ['issueName', 'website', 'issueBody', 'impact', 'project',\n 'email']\n\n\nclass IssueStatusSerializer(serializers.ModelSerializer):\n \"\"\"DRF Serializer For Listing Published Issue\"\"\"\n\n\n class Meta:\n model = Issue\n fields = ['impact', 'angle', 'name']\n",
"step-3": "<mask token>\n\n\nclass IssueSerializer(serializers.ModelSerializer):\n \"\"\"DRF Serializer For Listing Published Issue\"\"\"\n\n\n class Meta:\n model = Issue\n fields = ['issueName', 'website', 'issueBody', 'impact', 'published_on'\n ]\n\n\nclass IssueCreateSerializer(serializers.ModelSerializer):\n \"\"\"DRF Serializer Fpr Creating Issues By The User\"\"\"\n\n\n class Meta:\n model = Issue\n fields = ['issueName', 'website', 'issueBody', 'impact', 'project',\n 'email']\n\n\nclass IssueStatusSerializer(serializers.ModelSerializer):\n \"\"\"DRF Serializer For Listing Published Issue\"\"\"\n\n\n class Meta:\n model = Issue\n fields = ['impact', 'angle', 'name']\n",
"step-4": "from rest_framework import serializers\nfrom issue.models import Issue\n\n\nclass IssueSerializer(serializers.ModelSerializer):\n \"\"\"DRF Serializer For Listing Published Issue\"\"\"\n\n\n class Meta:\n model = Issue\n fields = ['issueName', 'website', 'issueBody', 'impact', 'published_on'\n ]\n\n\nclass IssueCreateSerializer(serializers.ModelSerializer):\n \"\"\"DRF Serializer Fpr Creating Issues By The User\"\"\"\n\n\n class Meta:\n model = Issue\n fields = ['issueName', 'website', 'issueBody', 'impact', 'project',\n 'email']\n\n\nclass IssueStatusSerializer(serializers.ModelSerializer):\n \"\"\"DRF Serializer For Listing Published Issue\"\"\"\n\n\n class Meta:\n model = Issue\n fields = ['impact', 'angle', 'name']\n",
"step-5": null,
"step-ids": [
3,
5,
6,
7
]
}
|
[
3,
5,
6,
7
] |
# -*- coding: utf-8 -*-
# Copyright 2017 Objectif Libre
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""Test SummaryModel objects."""
from oslotest import base
from cloudkitty.api.v1.datamodels import report
class TestSummary(base.BaseTestCase):
def setUp(self):
super(TestSummary, self).setUp()
def test_nulls(self):
s = report.SummaryModel(begin=None,
end=None,
tenant_id=None,
res_type=None,
rate=None)
self.assertIsNone(s.begin)
self.assertIsNone(s.end)
self.assertEqual(s.tenant_id, "ALL")
self.assertEqual(s.res_type, "ALL")
self.assertEqual(s.rate, "0")
|
normal
|
{
"blob_id": "0ea67ac97ec8e7f287a2430c67f8f7d841d8b646",
"index": 813,
"step-1": "<mask token>\n\n\nclass TestSummary(base.BaseTestCase):\n <mask token>\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass TestSummary(base.BaseTestCase):\n\n def setUp(self):\n super(TestSummary, self).setUp()\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass TestSummary(base.BaseTestCase):\n\n def setUp(self):\n super(TestSummary, self).setUp()\n\n def test_nulls(self):\n s = report.SummaryModel(begin=None, end=None, tenant_id=None,\n res_type=None, rate=None)\n self.assertIsNone(s.begin)\n self.assertIsNone(s.end)\n self.assertEqual(s.tenant_id, 'ALL')\n self.assertEqual(s.res_type, 'ALL')\n self.assertEqual(s.rate, '0')\n",
"step-4": "<mask token>\nfrom oslotest import base\nfrom cloudkitty.api.v1.datamodels import report\n\n\nclass TestSummary(base.BaseTestCase):\n\n def setUp(self):\n super(TestSummary, self).setUp()\n\n def test_nulls(self):\n s = report.SummaryModel(begin=None, end=None, tenant_id=None,\n res_type=None, rate=None)\n self.assertIsNone(s.begin)\n self.assertIsNone(s.end)\n self.assertEqual(s.tenant_id, 'ALL')\n self.assertEqual(s.res_type, 'ALL')\n self.assertEqual(s.rate, '0')\n",
"step-5": "# -*- coding: utf-8 -*-\n# Copyright 2017 Objectif Libre\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n#\n\"\"\"Test SummaryModel objects.\"\"\"\nfrom oslotest import base\n\nfrom cloudkitty.api.v1.datamodels import report\n\n\nclass TestSummary(base.BaseTestCase):\n\n def setUp(self):\n super(TestSummary, self).setUp()\n\n def test_nulls(self):\n s = report.SummaryModel(begin=None,\n end=None,\n tenant_id=None,\n res_type=None,\n rate=None)\n self.assertIsNone(s.begin)\n self.assertIsNone(s.end)\n self.assertEqual(s.tenant_id, \"ALL\")\n self.assertEqual(s.res_type, \"ALL\")\n self.assertEqual(s.rate, \"0\")\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
#代码整体框架
#引用库
#创建窗口
def GameStart():
#游戏背景对象
Background = pygame.image.load()
#挡板背景对象
Baddle = pygame.image.load()
#球对象
Ball = pygame.image.load()
#挡板位置信息
BaffleX
BaffleY
#球位置信息
BallX
ballY
BallSpeed
#帧率控制Clock对象
#显示时间Clock对象
#设置时间字体
#游戏结果
while True:
#接受信息处理
#绘制背景
#显示时间
#绘制球
#判断球边界条件
#定位板移动后坐标
#判断挡板边界条件
#刷新显示
def GameResult():
#游戏结果背景Surface对象
#游戏结果引导
# 游戏结果Font对象
# 重新开始按钮
# 重新开始Hover按钮
# 游戏结果
if __name__ == "__main__":
|
normal
|
{
"blob_id": "9aeaab445ae9df5c27cc4375a8b6bf320d5ab873",
"index": 6378,
"step-1": "#代码整体框架\n\n#引用库\n\n#创建窗口\n\n\ndef GameStart():\n\n\n #游戏背景对象\n \n Background = pygame.image.load()\n \n #挡板背景对象\n\n Baddle = pygame.image.load()\n\n #球对象 \n\n Ball = pygame.image.load()\n\n #挡板位置信息\n\n BaffleX\n BaffleY\n\n #球位置信息\n\n BallX\n ballY\n BallSpeed\n\n #帧率控制Clock对象\n\n #显示时间Clock对象\n\n #设置时间字体\n\n #游戏结果\n\n\n while True:\n #接受信息处理\n\n #绘制背景\n\n #显示时间\n\n #绘制球\n\n #判断球边界条件\n\n #定位板移动后坐标\n\n #判断挡板边界条件\n\n #刷新显示\n \n\n\n\ndef GameResult():\n\n\n #游戏结果背景Surface对象\n\n #游戏结果引导\n\n # 游戏结果Font对象\n\n # 重新开始按钮\n\n # 重新开始Hover按钮\n\n # 游戏结果\n\n\nif __name__ == \"__main__\":\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
<|reserved_special_token_0|>
class Location:
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def update_overall_average_value(self):
value_sum = 0
for event in self.events:
value_sum += event.value
value_count = len(self.events)
if value_count > 0:
self.overall_average_value = value_sum / value_count
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Location:
def __init__(self, location_dict):
self.x = location_dict['x']
self.y = location_dict['y']
self.id = location_dict['id']
self.events = []
self.latest_average_value = 0
self.latest_event_count = 0
self.average_value_at_time_dict = {}
self.overall_average_value = 0
<|reserved_special_token_0|>
def update_overall_average_value(self):
value_sum = 0
for event in self.events:
value_sum += event.value
value_count = len(self.events)
if value_count > 0:
self.overall_average_value = value_sum / value_count
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Location:
def __init__(self, location_dict):
self.x = location_dict['x']
self.y = location_dict['y']
self.id = location_dict['id']
self.events = []
self.latest_average_value = 0
self.latest_event_count = 0
self.average_value_at_time_dict = {}
self.overall_average_value = 0
def update_average_values_at_time(self, time_to_calculate):
self.latest_event_count = 0
sum_of_values = 0
for event in self.events:
if event.time_rounded_to_minute == time_to_calculate:
sum_of_values += event.value
self.latest_event_count += 1
self.latest_average_value = 0
if self.latest_event_count > 0:
self.latest_average_value = sum_of_values / self.latest_event_count
formatted_time = datetime.strftime(datetime.utcfromtimestamp(
time_to_calculate + 3600), '%d/%m/%Y %H:%M:%S')
self.average_value_at_time_dict[formatted_time
] = self.latest_average_value
def update_overall_average_value(self):
value_sum = 0
for event in self.events:
value_sum += event.value
value_count = len(self.events)
if value_count > 0:
self.overall_average_value = value_sum / value_count
<|reserved_special_token_1|>
from datetime import datetime
class Location:
def __init__(self, location_dict):
self.x = location_dict['x']
self.y = location_dict['y']
self.id = location_dict['id']
self.events = []
self.latest_average_value = 0
self.latest_event_count = 0
self.average_value_at_time_dict = {}
self.overall_average_value = 0
def update_average_values_at_time(self, time_to_calculate):
self.latest_event_count = 0
sum_of_values = 0
for event in self.events:
if event.time_rounded_to_minute == time_to_calculate:
sum_of_values += event.value
self.latest_event_count += 1
self.latest_average_value = 0
if self.latest_event_count > 0:
self.latest_average_value = sum_of_values / self.latest_event_count
formatted_time = datetime.strftime(datetime.utcfromtimestamp(
time_to_calculate + 3600), '%d/%m/%Y %H:%M:%S')
self.average_value_at_time_dict[formatted_time
] = self.latest_average_value
def update_overall_average_value(self):
value_sum = 0
for event in self.events:
value_sum += event.value
value_count = len(self.events)
if value_count > 0:
self.overall_average_value = value_sum / value_count
<|reserved_special_token_1|>
from datetime import datetime
class Location:
def __init__(self, location_dict):
self.x = location_dict['x']
self.y = location_dict['y']
self.id = location_dict['id']
self.events = []
self.latest_average_value = 0
self.latest_event_count = 0
self.average_value_at_time_dict = {}
self.overall_average_value = 0
def update_average_values_at_time(self, time_to_calculate):
self.latest_event_count = 0
sum_of_values = 0
for event in self.events:
if event.time_rounded_to_minute == time_to_calculate:
# remove event from self.events
# remove event id from event_id_set in main
sum_of_values += event.value
self.latest_event_count += 1
self.latest_average_value = 0
if self.latest_event_count > 0:
self.latest_average_value = sum_of_values / self.latest_event_count
formatted_time = datetime.strftime(datetime.utcfromtimestamp(time_to_calculate + 3600), "%d/%m/%Y %H:%M:%S")
self.average_value_at_time_dict[formatted_time] = self.latest_average_value
def update_overall_average_value(self):
value_sum = 0
for event in self.events:
value_sum += event.value
value_count = len(self.events)
if value_count > 0:
self.overall_average_value = value_sum / value_count
|
flexible
|
{
"blob_id": "efbfe95acbe0b97e863c8788bca4a71633da36b3",
"index": 1906,
"step-1": "<mask token>\n\n\nclass Location:\n <mask token>\n <mask token>\n\n def update_overall_average_value(self):\n value_sum = 0\n for event in self.events:\n value_sum += event.value\n value_count = len(self.events)\n if value_count > 0:\n self.overall_average_value = value_sum / value_count\n",
"step-2": "<mask token>\n\n\nclass Location:\n\n def __init__(self, location_dict):\n self.x = location_dict['x']\n self.y = location_dict['y']\n self.id = location_dict['id']\n self.events = []\n self.latest_average_value = 0\n self.latest_event_count = 0\n self.average_value_at_time_dict = {}\n self.overall_average_value = 0\n <mask token>\n\n def update_overall_average_value(self):\n value_sum = 0\n for event in self.events:\n value_sum += event.value\n value_count = len(self.events)\n if value_count > 0:\n self.overall_average_value = value_sum / value_count\n",
"step-3": "<mask token>\n\n\nclass Location:\n\n def __init__(self, location_dict):\n self.x = location_dict['x']\n self.y = location_dict['y']\n self.id = location_dict['id']\n self.events = []\n self.latest_average_value = 0\n self.latest_event_count = 0\n self.average_value_at_time_dict = {}\n self.overall_average_value = 0\n\n def update_average_values_at_time(self, time_to_calculate):\n self.latest_event_count = 0\n sum_of_values = 0\n for event in self.events:\n if event.time_rounded_to_minute == time_to_calculate:\n sum_of_values += event.value\n self.latest_event_count += 1\n self.latest_average_value = 0\n if self.latest_event_count > 0:\n self.latest_average_value = sum_of_values / self.latest_event_count\n formatted_time = datetime.strftime(datetime.utcfromtimestamp(\n time_to_calculate + 3600), '%d/%m/%Y %H:%M:%S')\n self.average_value_at_time_dict[formatted_time\n ] = self.latest_average_value\n\n def update_overall_average_value(self):\n value_sum = 0\n for event in self.events:\n value_sum += event.value\n value_count = len(self.events)\n if value_count > 0:\n self.overall_average_value = value_sum / value_count\n",
"step-4": "from datetime import datetime\n\n\nclass Location:\n\n def __init__(self, location_dict):\n self.x = location_dict['x']\n self.y = location_dict['y']\n self.id = location_dict['id']\n self.events = []\n self.latest_average_value = 0\n self.latest_event_count = 0\n self.average_value_at_time_dict = {}\n self.overall_average_value = 0\n\n def update_average_values_at_time(self, time_to_calculate):\n self.latest_event_count = 0\n sum_of_values = 0\n for event in self.events:\n if event.time_rounded_to_minute == time_to_calculate:\n sum_of_values += event.value\n self.latest_event_count += 1\n self.latest_average_value = 0\n if self.latest_event_count > 0:\n self.latest_average_value = sum_of_values / self.latest_event_count\n formatted_time = datetime.strftime(datetime.utcfromtimestamp(\n time_to_calculate + 3600), '%d/%m/%Y %H:%M:%S')\n self.average_value_at_time_dict[formatted_time\n ] = self.latest_average_value\n\n def update_overall_average_value(self):\n value_sum = 0\n for event in self.events:\n value_sum += event.value\n value_count = len(self.events)\n if value_count > 0:\n self.overall_average_value = value_sum / value_count\n",
"step-5": "from datetime import datetime\n\n\nclass Location:\n\n def __init__(self, location_dict):\n self.x = location_dict['x']\n self.y = location_dict['y']\n self.id = location_dict['id']\n\n self.events = []\n\n self.latest_average_value = 0\n self.latest_event_count = 0\n self.average_value_at_time_dict = {}\n self.overall_average_value = 0\n\n def update_average_values_at_time(self, time_to_calculate):\n self.latest_event_count = 0\n sum_of_values = 0\n for event in self.events:\n if event.time_rounded_to_minute == time_to_calculate:\n # remove event from self.events\n # remove event id from event_id_set in main\n sum_of_values += event.value\n self.latest_event_count += 1\n self.latest_average_value = 0\n if self.latest_event_count > 0:\n self.latest_average_value = sum_of_values / self.latest_event_count\n\n formatted_time = datetime.strftime(datetime.utcfromtimestamp(time_to_calculate + 3600), \"%d/%m/%Y %H:%M:%S\")\n self.average_value_at_time_dict[formatted_time] = self.latest_average_value\n\n def update_overall_average_value(self):\n value_sum = 0\n for event in self.events:\n value_sum += event.value\n value_count = len(self.events)\n if value_count > 0:\n self.overall_average_value = value_sum / value_count\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
<|reserved_special_token_0|>
class ExecutionMetrics:
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class ExecutionMetrics:
def __init__(self, duration, succeeded: bool, timed_out: bool, lines:
int, error: List[str]=None):
if error is None:
error = list()
self.duration = duration
self.succeeded: bool = succeeded
self.timed_out: bool = timed_out
self.lines: int = lines
self.error: List[str] = error
def __str__(self):
return (
'succeeded: {succeeded} ; lines: {lines} ; duration: {duration} s ; error: {error}'
.format(succeeded=self.succeeded, lines=self.lines, duration=
self.duration, error=self.error))
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class ExecutionMetrics:
def __init__(self, duration, succeeded: bool, timed_out: bool, lines:
int, error: List[str]=None):
if error is None:
error = list()
self.duration = duration
self.succeeded: bool = succeeded
self.timed_out: bool = timed_out
self.lines: int = lines
self.error: List[str] = error
def __str__(self):
return (
'succeeded: {succeeded} ; lines: {lines} ; duration: {duration} s ; error: {error}'
.format(succeeded=self.succeeded, lines=self.lines, duration=
self.duration, error=self.error))
def read_stdout_until(process, terminal_startswith: str, failure_startswith:
List[str], timeout_time: float, debug: bool=False):
start = time.time()
line: str = ''
lines: int = 0
duration = None
succeeded = True
timed_out = False
errors: List[str] = list()
with timeout(timeout_time):
while True:
line = process.stdout.readline()
if debug:
print(line, end='')
for start_str in failure_startswith:
if line.startswith(start_str):
errors.append(line)
succeeded = False
if any(line.startswith(start_str) for start_str in
terminal_startswith):
duration = time.time() - start
break
else:
lines += 1
if duration is None:
succeeded = False
timed_out = True
duration = timeout_time
return ExecutionMetrics(duration, succeeded, timed_out, lines, errors)
<|reserved_special_token_1|>
import time
from typing import List
from classiclikeiguana.timeout import timeout
class ExecutionMetrics:
def __init__(self, duration, succeeded: bool, timed_out: bool, lines:
int, error: List[str]=None):
if error is None:
error = list()
self.duration = duration
self.succeeded: bool = succeeded
self.timed_out: bool = timed_out
self.lines: int = lines
self.error: List[str] = error
def __str__(self):
return (
'succeeded: {succeeded} ; lines: {lines} ; duration: {duration} s ; error: {error}'
.format(succeeded=self.succeeded, lines=self.lines, duration=
self.duration, error=self.error))
def read_stdout_until(process, terminal_startswith: str, failure_startswith:
List[str], timeout_time: float, debug: bool=False):
start = time.time()
line: str = ''
lines: int = 0
duration = None
succeeded = True
timed_out = False
errors: List[str] = list()
with timeout(timeout_time):
while True:
line = process.stdout.readline()
if debug:
print(line, end='')
for start_str in failure_startswith:
if line.startswith(start_str):
errors.append(line)
succeeded = False
if any(line.startswith(start_str) for start_str in
terminal_startswith):
duration = time.time() - start
break
else:
lines += 1
if duration is None:
succeeded = False
timed_out = True
duration = timeout_time
return ExecutionMetrics(duration, succeeded, timed_out, lines, errors)
<|reserved_special_token_1|>
import time
from typing import List
from classiclikeiguana.timeout import timeout
class ExecutionMetrics:
def __init__(self, duration, succeeded: bool, timed_out: bool, lines: int, error: List[str] = None):
if error is None:
error = list()
self.duration = duration
self.succeeded: bool = succeeded
self.timed_out: bool = timed_out
self.lines: int = lines
self.error: List[str] = error
def __str__(self):
return "succeeded: {succeeded} ; lines: {lines} ; duration: {duration} s ; error: {error}" \
.format(succeeded=self.succeeded, lines=self.lines, duration=self.duration, error=self.error)
def read_stdout_until(process, terminal_startswith: str, failure_startswith: List[str], timeout_time: float,
debug: bool = False):
start = time.time()
line: str = ""
lines: int = 0
duration = None
succeeded = True
timed_out = False
errors: List[str] = list()
with timeout(timeout_time):
while True:
line = process.stdout.readline()
if debug: print(line, end="")
for start_str in failure_startswith:
if line.startswith(start_str):
errors.append(line)
succeeded = False
if any(line.startswith(start_str) for start_str in terminal_startswith):
duration = time.time() - start
break
else:
lines += 1
if duration is None:
succeeded = False
timed_out = True
duration = timeout_time
return ExecutionMetrics(duration, succeeded, timed_out, lines, errors)
|
flexible
|
{
"blob_id": "f870c776a62f3b743356c5515cd25e588dbfca15",
"index": 8183,
"step-1": "<mask token>\n\n\nclass ExecutionMetrics:\n <mask token>\n <mask token>\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass ExecutionMetrics:\n\n def __init__(self, duration, succeeded: bool, timed_out: bool, lines:\n int, error: List[str]=None):\n if error is None:\n error = list()\n self.duration = duration\n self.succeeded: bool = succeeded\n self.timed_out: bool = timed_out\n self.lines: int = lines\n self.error: List[str] = error\n\n def __str__(self):\n return (\n 'succeeded: {succeeded} ; lines: {lines} ; duration: {duration} s ; error: {error}'\n .format(succeeded=self.succeeded, lines=self.lines, duration=\n self.duration, error=self.error))\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass ExecutionMetrics:\n\n def __init__(self, duration, succeeded: bool, timed_out: bool, lines:\n int, error: List[str]=None):\n if error is None:\n error = list()\n self.duration = duration\n self.succeeded: bool = succeeded\n self.timed_out: bool = timed_out\n self.lines: int = lines\n self.error: List[str] = error\n\n def __str__(self):\n return (\n 'succeeded: {succeeded} ; lines: {lines} ; duration: {duration} s ; error: {error}'\n .format(succeeded=self.succeeded, lines=self.lines, duration=\n self.duration, error=self.error))\n\n\ndef read_stdout_until(process, terminal_startswith: str, failure_startswith:\n List[str], timeout_time: float, debug: bool=False):\n start = time.time()\n line: str = ''\n lines: int = 0\n duration = None\n succeeded = True\n timed_out = False\n errors: List[str] = list()\n with timeout(timeout_time):\n while True:\n line = process.stdout.readline()\n if debug:\n print(line, end='')\n for start_str in failure_startswith:\n if line.startswith(start_str):\n errors.append(line)\n succeeded = False\n if any(line.startswith(start_str) for start_str in\n terminal_startswith):\n duration = time.time() - start\n break\n else:\n lines += 1\n if duration is None:\n succeeded = False\n timed_out = True\n duration = timeout_time\n return ExecutionMetrics(duration, succeeded, timed_out, lines, errors)\n",
"step-4": "import time\nfrom typing import List\nfrom classiclikeiguana.timeout import timeout\n\n\nclass ExecutionMetrics:\n\n def __init__(self, duration, succeeded: bool, timed_out: bool, lines:\n int, error: List[str]=None):\n if error is None:\n error = list()\n self.duration = duration\n self.succeeded: bool = succeeded\n self.timed_out: bool = timed_out\n self.lines: int = lines\n self.error: List[str] = error\n\n def __str__(self):\n return (\n 'succeeded: {succeeded} ; lines: {lines} ; duration: {duration} s ; error: {error}'\n .format(succeeded=self.succeeded, lines=self.lines, duration=\n self.duration, error=self.error))\n\n\ndef read_stdout_until(process, terminal_startswith: str, failure_startswith:\n List[str], timeout_time: float, debug: bool=False):\n start = time.time()\n line: str = ''\n lines: int = 0\n duration = None\n succeeded = True\n timed_out = False\n errors: List[str] = list()\n with timeout(timeout_time):\n while True:\n line = process.stdout.readline()\n if debug:\n print(line, end='')\n for start_str in failure_startswith:\n if line.startswith(start_str):\n errors.append(line)\n succeeded = False\n if any(line.startswith(start_str) for start_str in\n terminal_startswith):\n duration = time.time() - start\n break\n else:\n lines += 1\n if duration is None:\n succeeded = False\n timed_out = True\n duration = timeout_time\n return ExecutionMetrics(duration, succeeded, timed_out, lines, errors)\n",
"step-5": "import time\nfrom typing import List\n\nfrom classiclikeiguana.timeout import timeout\n\n\nclass ExecutionMetrics:\n def __init__(self, duration, succeeded: bool, timed_out: bool, lines: int, error: List[str] = None):\n if error is None:\n error = list()\n self.duration = duration\n self.succeeded: bool = succeeded\n self.timed_out: bool = timed_out\n self.lines: int = lines\n self.error: List[str] = error\n\n def __str__(self):\n return \"succeeded: {succeeded} ; lines: {lines} ; duration: {duration} s ; error: {error}\" \\\n .format(succeeded=self.succeeded, lines=self.lines, duration=self.duration, error=self.error)\n\n\ndef read_stdout_until(process, terminal_startswith: str, failure_startswith: List[str], timeout_time: float,\n debug: bool = False):\n start = time.time()\n line: str = \"\"\n lines: int = 0\n duration = None\n succeeded = True\n timed_out = False\n errors: List[str] = list()\n with timeout(timeout_time):\n while True:\n line = process.stdout.readline()\n if debug: print(line, end=\"\")\n for start_str in failure_startswith:\n if line.startswith(start_str):\n errors.append(line)\n succeeded = False\n if any(line.startswith(start_str) for start_str in terminal_startswith):\n duration = time.time() - start\n break\n else:\n lines += 1\n\n if duration is None:\n succeeded = False\n timed_out = True\n duration = timeout_time\n return ExecutionMetrics(duration, succeeded, timed_out, lines, errors)\n",
"step-ids": [
1,
3,
4,
5,
6
]
}
|
[
1,
3,
4,
5,
6
] |
<|reserved_special_token_0|>
class lfwdata:
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class lfwdata:
def __init__(self):
self._pairs = []
pairs = open(os.path.join(cfg.LFW_IMAGEPATH, '../pairs.txt'))
pairs.readline()
for pair in pairs:
pair = pair.split()
if len(pair) == 3:
img1 = os.path.join(pair[0], pair[0] + '_{:04d}.jpg'.format
(int(pair[1])))
img2 = os.path.join(pair[0], pair[0] + '_{:04d}.jpg'.format
(int(pair[2])))
label = True
elif len(pair) == 4:
img1 = os.path.join(pair[0], pair[0] + '_{:04d}.jpg'.format
(int(pair[1])))
img2 = os.path.join(pair[2], pair[2] + '_{:04d}.jpg'.format
(int(pair[3])))
label = False
else:
assert False, pair
self._pairs.append({'img': [img1, img2], 'label': label})
print('Number of pairs: {}'.format(len(self._pairs)))
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class lfwdata:
def __init__(self):
self._pairs = []
pairs = open(os.path.join(cfg.LFW_IMAGEPATH, '../pairs.txt'))
pairs.readline()
for pair in pairs:
pair = pair.split()
if len(pair) == 3:
img1 = os.path.join(pair[0], pair[0] + '_{:04d}.jpg'.format
(int(pair[1])))
img2 = os.path.join(pair[0], pair[0] + '_{:04d}.jpg'.format
(int(pair[2])))
label = True
elif len(pair) == 4:
img1 = os.path.join(pair[0], pair[0] + '_{:04d}.jpg'.format
(int(pair[1])))
img2 = os.path.join(pair[2], pair[2] + '_{:04d}.jpg'.format
(int(pair[3])))
label = False
else:
assert False, pair
self._pairs.append({'img': [img1, img2], 'label': label})
print('Number of pairs: {}'.format(len(self._pairs)))
if __name__ == '__main__':
pairs = lfwdata()
<|reserved_special_token_1|>
import os
import config as cfg
import numpy as np
class lfwdata:
def __init__(self):
self._pairs = []
pairs = open(os.path.join(cfg.LFW_IMAGEPATH, '../pairs.txt'))
pairs.readline()
for pair in pairs:
pair = pair.split()
if len(pair) == 3:
img1 = os.path.join(pair[0], pair[0] + '_{:04d}.jpg'.format
(int(pair[1])))
img2 = os.path.join(pair[0], pair[0] + '_{:04d}.jpg'.format
(int(pair[2])))
label = True
elif len(pair) == 4:
img1 = os.path.join(pair[0], pair[0] + '_{:04d}.jpg'.format
(int(pair[1])))
img2 = os.path.join(pair[2], pair[2] + '_{:04d}.jpg'.format
(int(pair[3])))
label = False
else:
assert False, pair
self._pairs.append({'img': [img1, img2], 'label': label})
print('Number of pairs: {}'.format(len(self._pairs)))
if __name__ == '__main__':
pairs = lfwdata()
<|reserved_special_token_1|>
import os
import config as cfg
import numpy as np
class lfwdata():
def __init__(self):
self._pairs = []
pairs = open(os.path.join(cfg.LFW_IMAGEPATH, '../pairs.txt'))
pairs.readline()
for pair in pairs:
pair = pair.split()
if len(pair) == 3:
img1 = os.path.join(
pair[0], pair[0] + '_{:04d}.jpg'.format(int(pair[1])))
img2 = os.path.join(
pair[0], pair[0] + '_{:04d}.jpg'.format(int(pair[2])))
label = True
elif len(pair) == 4:
img1 = os.path.join(
pair[0], pair[0] + '_{:04d}.jpg'.format(int(pair[1])))
img2 = os.path.join(
pair[2], pair[2] + '_{:04d}.jpg'.format(int(pair[3])))
label = False
else:
assert False, pair
self._pairs.append({'img': [img1, img2], 'label': label})
print('Number of pairs: {}'.format(len(self._pairs)))
if __name__ == '__main__':
pairs = lfwdata()
|
flexible
|
{
"blob_id": "ccdd7a5e0a1de75762530a7cadd919a2ee753d18",
"index": 1758,
"step-1": "<mask token>\n\n\nclass lfwdata:\n <mask token>\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass lfwdata:\n\n def __init__(self):\n self._pairs = []\n pairs = open(os.path.join(cfg.LFW_IMAGEPATH, '../pairs.txt'))\n pairs.readline()\n for pair in pairs:\n pair = pair.split()\n if len(pair) == 3:\n img1 = os.path.join(pair[0], pair[0] + '_{:04d}.jpg'.format\n (int(pair[1])))\n img2 = os.path.join(pair[0], pair[0] + '_{:04d}.jpg'.format\n (int(pair[2])))\n label = True\n elif len(pair) == 4:\n img1 = os.path.join(pair[0], pair[0] + '_{:04d}.jpg'.format\n (int(pair[1])))\n img2 = os.path.join(pair[2], pair[2] + '_{:04d}.jpg'.format\n (int(pair[3])))\n label = False\n else:\n assert False, pair\n self._pairs.append({'img': [img1, img2], 'label': label})\n print('Number of pairs: {}'.format(len(self._pairs)))\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass lfwdata:\n\n def __init__(self):\n self._pairs = []\n pairs = open(os.path.join(cfg.LFW_IMAGEPATH, '../pairs.txt'))\n pairs.readline()\n for pair in pairs:\n pair = pair.split()\n if len(pair) == 3:\n img1 = os.path.join(pair[0], pair[0] + '_{:04d}.jpg'.format\n (int(pair[1])))\n img2 = os.path.join(pair[0], pair[0] + '_{:04d}.jpg'.format\n (int(pair[2])))\n label = True\n elif len(pair) == 4:\n img1 = os.path.join(pair[0], pair[0] + '_{:04d}.jpg'.format\n (int(pair[1])))\n img2 = os.path.join(pair[2], pair[2] + '_{:04d}.jpg'.format\n (int(pair[3])))\n label = False\n else:\n assert False, pair\n self._pairs.append({'img': [img1, img2], 'label': label})\n print('Number of pairs: {}'.format(len(self._pairs)))\n\n\nif __name__ == '__main__':\n pairs = lfwdata()\n",
"step-4": "import os\nimport config as cfg\nimport numpy as np\n\n\nclass lfwdata:\n\n def __init__(self):\n self._pairs = []\n pairs = open(os.path.join(cfg.LFW_IMAGEPATH, '../pairs.txt'))\n pairs.readline()\n for pair in pairs:\n pair = pair.split()\n if len(pair) == 3:\n img1 = os.path.join(pair[0], pair[0] + '_{:04d}.jpg'.format\n (int(pair[1])))\n img2 = os.path.join(pair[0], pair[0] + '_{:04d}.jpg'.format\n (int(pair[2])))\n label = True\n elif len(pair) == 4:\n img1 = os.path.join(pair[0], pair[0] + '_{:04d}.jpg'.format\n (int(pair[1])))\n img2 = os.path.join(pair[2], pair[2] + '_{:04d}.jpg'.format\n (int(pair[3])))\n label = False\n else:\n assert False, pair\n self._pairs.append({'img': [img1, img2], 'label': label})\n print('Number of pairs: {}'.format(len(self._pairs)))\n\n\nif __name__ == '__main__':\n pairs = lfwdata()\n",
"step-5": "import os\nimport config as cfg\nimport numpy as np\n\n\nclass lfwdata():\n\n def __init__(self):\n self._pairs = []\n\n pairs = open(os.path.join(cfg.LFW_IMAGEPATH, '../pairs.txt'))\n pairs.readline()\n for pair in pairs:\n pair = pair.split()\n if len(pair) == 3:\n img1 = os.path.join(\n pair[0], pair[0] + '_{:04d}.jpg'.format(int(pair[1])))\n img2 = os.path.join(\n pair[0], pair[0] + '_{:04d}.jpg'.format(int(pair[2])))\n label = True\n elif len(pair) == 4:\n img1 = os.path.join(\n pair[0], pair[0] + '_{:04d}.jpg'.format(int(pair[1])))\n img2 = os.path.join(\n pair[2], pair[2] + '_{:04d}.jpg'.format(int(pair[3])))\n label = False\n else:\n assert False, pair\n self._pairs.append({'img': [img1, img2], 'label': label})\n\n print('Number of pairs: {}'.format(len(self._pairs)))\n\nif __name__ == '__main__':\n\n pairs = lfwdata()\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Migration(migrations.Migration):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Migration(migrations.Migration):
dependencies = [('accounts', '0011_auto_20201104_0936')]
operations = [migrations.AddField(model_name='users', name='isadmin',
field=models.IntegerField(default=0)), migrations.AlterField(
model_name='users', name='created_at', field=models.DateTimeField(
default='2020-11-05 16:33:16'))]
<|reserved_special_token_1|>
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [('accounts', '0011_auto_20201104_0936')]
operations = [migrations.AddField(model_name='users', name='isadmin',
field=models.IntegerField(default=0)), migrations.AlterField(
model_name='users', name='created_at', field=models.DateTimeField(
default='2020-11-05 16:33:16'))]
<|reserved_special_token_1|>
# Generated by Django 2.2 on 2020-11-05 16:33
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('accounts', '0011_auto_20201104_0936'),
]
operations = [
migrations.AddField(
model_name='users',
name='isadmin',
field=models.IntegerField(default=0),
),
migrations.AlterField(
model_name='users',
name='created_at',
field=models.DateTimeField(default='2020-11-05 16:33:16'),
),
]
|
flexible
|
{
"blob_id": "37f610457e51599a29168accd95eaa6699c6f777",
"index": 677,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Migration(migrations.Migration):\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Migration(migrations.Migration):\n dependencies = [('accounts', '0011_auto_20201104_0936')]\n operations = [migrations.AddField(model_name='users', name='isadmin',\n field=models.IntegerField(default=0)), migrations.AlterField(\n model_name='users', name='created_at', field=models.DateTimeField(\n default='2020-11-05 16:33:16'))]\n",
"step-4": "from django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n dependencies = [('accounts', '0011_auto_20201104_0936')]\n operations = [migrations.AddField(model_name='users', name='isadmin',\n field=models.IntegerField(default=0)), migrations.AlterField(\n model_name='users', name='created_at', field=models.DateTimeField(\n default='2020-11-05 16:33:16'))]\n",
"step-5": "# Generated by Django 2.2 on 2020-11-05 16:33\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('accounts', '0011_auto_20201104_0936'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='users',\n name='isadmin',\n field=models.IntegerField(default=0),\n ),\n migrations.AlterField(\n model_name='users',\n name='created_at',\n field=models.DateTimeField(default='2020-11-05 16:33:16'),\n ),\n ]\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
# code below
#taking filename as pyscript.py
from distutils.core import setup
import py2exe
setup(console=['pyscript.py'])
# command to run
# python setup.py pytoexe
|
normal
|
{
"blob_id": "9fbf994cb99369ba0c20383007ce52c99248bacf",
"index": 8820,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nsetup(console=['pyscript.py'])\n",
"step-3": "from distutils.core import setup\nimport py2exe\nsetup(console=['pyscript.py'])\n",
"step-4": "\n# code below \n#taking filename as pyscript.py \n\nfrom distutils.core import setup \n\n\nimport py2exe \n\nsetup(console=['pyscript.py'])\n\n\n\n# command to run \n# python setup.py pytoexe \n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
def gn_helper(planes):
return nn.GroupNorm(args.group_norm, planes)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
parser.add_argument('--dataroot', default='data/CIFAR-10-C/')
parser.add_argument('--shared', default=None)
parser.add_argument('--depth', default=18, type=int)
parser.add_argument('--group_norm', default=32, type=int)
parser.add_argument('--batch_size', default=32, type=int)
parser.add_argument('--workers', default=8, type=int)
parser.add_argument('--lr', default=0.001, type=float)
parser.add_argument('--niter', default=1, type=int)
parser.add_argument('--online', action='store_true')
parser.add_argument('--shuffle', action='store_true')
parser.add_argument('--threshold', default=1, type=float)
parser.add_argument('--epsilon', default=0.2, type=float)
parser.add_argument('--dset_size', default=0, type=int)
parser.add_argument('--resume', default=None)
parser.add_argument('--outf', default='.')
parser.add_argument('--epochs', default=10, type=int)
<|reserved_special_token_0|>
args.threshold += 0.001
my_makedir(args.outf)
<|reserved_special_token_0|>
def gn_helper(planes):
return nn.GroupNorm(args.group_norm, planes)
<|reserved_special_token_0|>
print('Resuming from %s...' % args.resume)
<|reserved_special_token_0|>
net.load_state_dict(ckpt['net'])
print('Starting Test Error: %.3f' % ckpt['err_cls'])
<|reserved_special_token_0|>
print('Lethean Attack')
for i in range(args.epochs):
idx = random.randint(0, len(trset) - 1)
img, lbl = trset[idx]
random_rot = random.randint(1, 3)
rot_img = rotate_single_with_label(img, random_rot)
adapt_single_tensor(net, rot_img, optimizer, criterion, args.niter,
args.batch_size)
if i % 50 == 49:
print('%d%%' % ((i + 1) * 100 / 5000))
err_cls, correct_per_cls, total_per_cls = test(teloader, net,
verbose=True, print_freq=0)
print('Epoch %d Test error: %.3f' % (i, err_cls))
<|reserved_special_token_1|>
<|reserved_special_token_0|>
device = 'cuda' if torch.cuda.is_available() else 'cpu'
classes = ('plane', 'car', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse',
'ship', 'truck')
parser = argparse.ArgumentParser()
parser.add_argument('--dataroot', default='data/CIFAR-10-C/')
parser.add_argument('--shared', default=None)
parser.add_argument('--depth', default=18, type=int)
parser.add_argument('--group_norm', default=32, type=int)
parser.add_argument('--batch_size', default=32, type=int)
parser.add_argument('--workers', default=8, type=int)
parser.add_argument('--lr', default=0.001, type=float)
parser.add_argument('--niter', default=1, type=int)
parser.add_argument('--online', action='store_true')
parser.add_argument('--shuffle', action='store_true')
parser.add_argument('--threshold', default=1, type=float)
parser.add_argument('--epsilon', default=0.2, type=float)
parser.add_argument('--dset_size', default=0, type=int)
parser.add_argument('--resume', default=None)
parser.add_argument('--outf', default='.')
parser.add_argument('--epochs', default=10, type=int)
args = parser.parse_args()
args.threshold += 0.001
my_makedir(args.outf)
<|reserved_special_token_0|>
cudnn.benchmark = True
def gn_helper(planes):
return nn.GroupNorm(args.group_norm, planes)
norm_layer = gn_helper
net = resnet18(num_classes=10, norm_layer=norm_layer).to(device)
net = torch.nn.DataParallel(net)
print('Resuming from %s...' % args.resume)
ckpt = torch.load('%s/best.pth' % args.resume)
net.load_state_dict(ckpt['net'])
print('Starting Test Error: %.3f' % ckpt['err_cls'])
criterion = nn.CrossEntropyLoss().to(device)
optimizer = optim.SGD(net.parameters(), lr=args.lr)
trset, trloader = prepare_train_data(args)
teset, teloader = prepare_test_data(args)
print('Lethean Attack')
for i in range(args.epochs):
idx = random.randint(0, len(trset) - 1)
img, lbl = trset[idx]
random_rot = random.randint(1, 3)
rot_img = rotate_single_with_label(img, random_rot)
adapt_single_tensor(net, rot_img, optimizer, criterion, args.niter,
args.batch_size)
if i % 50 == 49:
print('%d%%' % ((i + 1) * 100 / 5000))
err_cls, correct_per_cls, total_per_cls = test(teloader, net,
verbose=True, print_freq=0)
print('Epoch %d Test error: %.3f' % (i, err_cls))
<|reserved_special_token_1|>
from __future__ import print_function
import argparse
import torch
import torch.nn as nn
import torch.optim as optim
import random
from utils.misc import *
from utils.adapt_helpers import *
from utils.rotation import rotate_batch, rotate_single_with_label
from utils.model import resnet18
from utils.train_helpers import normalize, te_transforms
from utils.test_helpers import test
device = 'cuda' if torch.cuda.is_available() else 'cpu'
classes = ('plane', 'car', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse',
'ship', 'truck')
parser = argparse.ArgumentParser()
parser.add_argument('--dataroot', default='data/CIFAR-10-C/')
parser.add_argument('--shared', default=None)
parser.add_argument('--depth', default=18, type=int)
parser.add_argument('--group_norm', default=32, type=int)
parser.add_argument('--batch_size', default=32, type=int)
parser.add_argument('--workers', default=8, type=int)
parser.add_argument('--lr', default=0.001, type=float)
parser.add_argument('--niter', default=1, type=int)
parser.add_argument('--online', action='store_true')
parser.add_argument('--shuffle', action='store_true')
parser.add_argument('--threshold', default=1, type=float)
parser.add_argument('--epsilon', default=0.2, type=float)
parser.add_argument('--dset_size', default=0, type=int)
parser.add_argument('--resume', default=None)
parser.add_argument('--outf', default='.')
parser.add_argument('--epochs', default=10, type=int)
args = parser.parse_args()
args.threshold += 0.001
my_makedir(args.outf)
import torch.backends.cudnn as cudnn
cudnn.benchmark = True
def gn_helper(planes):
return nn.GroupNorm(args.group_norm, planes)
norm_layer = gn_helper
net = resnet18(num_classes=10, norm_layer=norm_layer).to(device)
net = torch.nn.DataParallel(net)
print('Resuming from %s...' % args.resume)
ckpt = torch.load('%s/best.pth' % args.resume)
net.load_state_dict(ckpt['net'])
print('Starting Test Error: %.3f' % ckpt['err_cls'])
criterion = nn.CrossEntropyLoss().to(device)
optimizer = optim.SGD(net.parameters(), lr=args.lr)
trset, trloader = prepare_train_data(args)
teset, teloader = prepare_test_data(args)
print('Lethean Attack')
for i in range(args.epochs):
idx = random.randint(0, len(trset) - 1)
img, lbl = trset[idx]
random_rot = random.randint(1, 3)
rot_img = rotate_single_with_label(img, random_rot)
adapt_single_tensor(net, rot_img, optimizer, criterion, args.niter,
args.batch_size)
if i % 50 == 49:
print('%d%%' % ((i + 1) * 100 / 5000))
err_cls, correct_per_cls, total_per_cls = test(teloader, net,
verbose=True, print_freq=0)
print('Epoch %d Test error: %.3f' % (i, err_cls))
<|reserved_special_token_1|>
from __future__ import print_function
import argparse
import torch
import torch.nn as nn
import torch.optim as optim
import random
from utils.misc import *
from utils.adapt_helpers import *
from utils.rotation import rotate_batch, rotate_single_with_label
from utils.model import resnet18
from utils.train_helpers import normalize, te_transforms
from utils.test_helpers import test
device = 'cuda' if torch.cuda.is_available() else 'cpu'
classes = ('plane', 'car', 'bird', 'cat',
'deer', 'dog', 'frog', 'horse', 'ship', 'truck')
parser = argparse.ArgumentParser()
parser.add_argument('--dataroot', default='data/CIFAR-10-C/')
parser.add_argument('--shared', default=None)
########################################################################
parser.add_argument('--depth', default=18, type=int)
parser.add_argument('--group_norm', default=32, type=int)
parser.add_argument('--batch_size', default=32, type=int)
parser.add_argument('--workers', default=8, type=int)
########################################################################
parser.add_argument('--lr', default=0.001, type=float)
parser.add_argument('--niter', default=1, type=int)
parser.add_argument('--online', action='store_true')
parser.add_argument('--shuffle', action='store_true')
parser.add_argument('--threshold', default=1, type=float)
parser.add_argument('--epsilon', default=0.2, type=float)
parser.add_argument('--dset_size', default=0, type=int)
########################################################################
parser.add_argument('--resume', default=None)
parser.add_argument('--outf', default='.')
parser.add_argument('--epochs', default=10, type=int)
args = parser.parse_args()
args.threshold += 0.001 # to correct for numeric errors
my_makedir(args.outf)
import torch.backends.cudnn as cudnn
cudnn.benchmark = True
def gn_helper(planes):
return nn.GroupNorm(args.group_norm, planes)
norm_layer = gn_helper
net = resnet18(num_classes = 10, norm_layer=norm_layer).to(device)
net = torch.nn.DataParallel(net)
print('Resuming from %s...' %(args.resume))
ckpt = torch.load('%s/best.pth' %(args.resume))
net.load_state_dict(ckpt['net'])
print("Starting Test Error: %.3f" % ckpt['err_cls'])
criterion = nn.CrossEntropyLoss().to(device)
optimizer = optim.SGD(net.parameters(), lr=args.lr)
trset, trloader = prepare_train_data(args)
teset, teloader = prepare_test_data(args)
print("Lethean Attack")
for i in range(args.epochs):
idx = random.randint(0, len(trset) - 1)
img, lbl = trset[idx]
random_rot = random.randint(1, 3)
rot_img = rotate_single_with_label(img, random_rot)
adapt_single_tensor(net, rot_img, optimizer, criterion, args.niter, args.batch_size)
if i % 50 == 49:
print("%d%%" % ((i + 1) * 100 / 5000))
err_cls, correct_per_cls, total_per_cls = test(teloader, net, verbose=True, print_freq=0)
print("Epoch %d Test error: %.3f" % (i, err_cls))
|
flexible
|
{
"blob_id": "1f345a20343eb859cb37bf406623c0fc10722357",
"index": 4826,
"step-1": "<mask token>\n\n\ndef gn_helper(planes):\n return nn.GroupNorm(args.group_norm, planes)\n\n\n<mask token>\n",
"step-2": "<mask token>\nparser.add_argument('--dataroot', default='data/CIFAR-10-C/')\nparser.add_argument('--shared', default=None)\nparser.add_argument('--depth', default=18, type=int)\nparser.add_argument('--group_norm', default=32, type=int)\nparser.add_argument('--batch_size', default=32, type=int)\nparser.add_argument('--workers', default=8, type=int)\nparser.add_argument('--lr', default=0.001, type=float)\nparser.add_argument('--niter', default=1, type=int)\nparser.add_argument('--online', action='store_true')\nparser.add_argument('--shuffle', action='store_true')\nparser.add_argument('--threshold', default=1, type=float)\nparser.add_argument('--epsilon', default=0.2, type=float)\nparser.add_argument('--dset_size', default=0, type=int)\nparser.add_argument('--resume', default=None)\nparser.add_argument('--outf', default='.')\nparser.add_argument('--epochs', default=10, type=int)\n<mask token>\nargs.threshold += 0.001\nmy_makedir(args.outf)\n<mask token>\n\n\ndef gn_helper(planes):\n return nn.GroupNorm(args.group_norm, planes)\n\n\n<mask token>\nprint('Resuming from %s...' % args.resume)\n<mask token>\nnet.load_state_dict(ckpt['net'])\nprint('Starting Test Error: %.3f' % ckpt['err_cls'])\n<mask token>\nprint('Lethean Attack')\nfor i in range(args.epochs):\n idx = random.randint(0, len(trset) - 1)\n img, lbl = trset[idx]\n random_rot = random.randint(1, 3)\n rot_img = rotate_single_with_label(img, random_rot)\n adapt_single_tensor(net, rot_img, optimizer, criterion, args.niter,\n args.batch_size)\n if i % 50 == 49:\n print('%d%%' % ((i + 1) * 100 / 5000))\n err_cls, correct_per_cls, total_per_cls = test(teloader, net,\n verbose=True, print_freq=0)\n print('Epoch %d Test error: %.3f' % (i, err_cls))\n",
"step-3": "<mask token>\ndevice = 'cuda' if torch.cuda.is_available() else 'cpu'\nclasses = ('plane', 'car', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse',\n 'ship', 'truck')\nparser = argparse.ArgumentParser()\nparser.add_argument('--dataroot', default='data/CIFAR-10-C/')\nparser.add_argument('--shared', default=None)\nparser.add_argument('--depth', default=18, type=int)\nparser.add_argument('--group_norm', default=32, type=int)\nparser.add_argument('--batch_size', default=32, type=int)\nparser.add_argument('--workers', default=8, type=int)\nparser.add_argument('--lr', default=0.001, type=float)\nparser.add_argument('--niter', default=1, type=int)\nparser.add_argument('--online', action='store_true')\nparser.add_argument('--shuffle', action='store_true')\nparser.add_argument('--threshold', default=1, type=float)\nparser.add_argument('--epsilon', default=0.2, type=float)\nparser.add_argument('--dset_size', default=0, type=int)\nparser.add_argument('--resume', default=None)\nparser.add_argument('--outf', default='.')\nparser.add_argument('--epochs', default=10, type=int)\nargs = parser.parse_args()\nargs.threshold += 0.001\nmy_makedir(args.outf)\n<mask token>\ncudnn.benchmark = True\n\n\ndef gn_helper(planes):\n return nn.GroupNorm(args.group_norm, planes)\n\n\nnorm_layer = gn_helper\nnet = resnet18(num_classes=10, norm_layer=norm_layer).to(device)\nnet = torch.nn.DataParallel(net)\nprint('Resuming from %s...' % args.resume)\nckpt = torch.load('%s/best.pth' % args.resume)\nnet.load_state_dict(ckpt['net'])\nprint('Starting Test Error: %.3f' % ckpt['err_cls'])\ncriterion = nn.CrossEntropyLoss().to(device)\noptimizer = optim.SGD(net.parameters(), lr=args.lr)\ntrset, trloader = prepare_train_data(args)\nteset, teloader = prepare_test_data(args)\nprint('Lethean Attack')\nfor i in range(args.epochs):\n idx = random.randint(0, len(trset) - 1)\n img, lbl = trset[idx]\n random_rot = random.randint(1, 3)\n rot_img = rotate_single_with_label(img, random_rot)\n adapt_single_tensor(net, rot_img, optimizer, criterion, args.niter,\n args.batch_size)\n if i % 50 == 49:\n print('%d%%' % ((i + 1) * 100 / 5000))\n err_cls, correct_per_cls, total_per_cls = test(teloader, net,\n verbose=True, print_freq=0)\n print('Epoch %d Test error: %.3f' % (i, err_cls))\n",
"step-4": "from __future__ import print_function\nimport argparse\nimport torch\nimport torch.nn as nn\nimport torch.optim as optim\nimport random\nfrom utils.misc import *\nfrom utils.adapt_helpers import *\nfrom utils.rotation import rotate_batch, rotate_single_with_label\nfrom utils.model import resnet18\nfrom utils.train_helpers import normalize, te_transforms\nfrom utils.test_helpers import test\ndevice = 'cuda' if torch.cuda.is_available() else 'cpu'\nclasses = ('plane', 'car', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse',\n 'ship', 'truck')\nparser = argparse.ArgumentParser()\nparser.add_argument('--dataroot', default='data/CIFAR-10-C/')\nparser.add_argument('--shared', default=None)\nparser.add_argument('--depth', default=18, type=int)\nparser.add_argument('--group_norm', default=32, type=int)\nparser.add_argument('--batch_size', default=32, type=int)\nparser.add_argument('--workers', default=8, type=int)\nparser.add_argument('--lr', default=0.001, type=float)\nparser.add_argument('--niter', default=1, type=int)\nparser.add_argument('--online', action='store_true')\nparser.add_argument('--shuffle', action='store_true')\nparser.add_argument('--threshold', default=1, type=float)\nparser.add_argument('--epsilon', default=0.2, type=float)\nparser.add_argument('--dset_size', default=0, type=int)\nparser.add_argument('--resume', default=None)\nparser.add_argument('--outf', default='.')\nparser.add_argument('--epochs', default=10, type=int)\nargs = parser.parse_args()\nargs.threshold += 0.001\nmy_makedir(args.outf)\nimport torch.backends.cudnn as cudnn\ncudnn.benchmark = True\n\n\ndef gn_helper(planes):\n return nn.GroupNorm(args.group_norm, planes)\n\n\nnorm_layer = gn_helper\nnet = resnet18(num_classes=10, norm_layer=norm_layer).to(device)\nnet = torch.nn.DataParallel(net)\nprint('Resuming from %s...' % args.resume)\nckpt = torch.load('%s/best.pth' % args.resume)\nnet.load_state_dict(ckpt['net'])\nprint('Starting Test Error: %.3f' % ckpt['err_cls'])\ncriterion = nn.CrossEntropyLoss().to(device)\noptimizer = optim.SGD(net.parameters(), lr=args.lr)\ntrset, trloader = prepare_train_data(args)\nteset, teloader = prepare_test_data(args)\nprint('Lethean Attack')\nfor i in range(args.epochs):\n idx = random.randint(0, len(trset) - 1)\n img, lbl = trset[idx]\n random_rot = random.randint(1, 3)\n rot_img = rotate_single_with_label(img, random_rot)\n adapt_single_tensor(net, rot_img, optimizer, criterion, args.niter,\n args.batch_size)\n if i % 50 == 49:\n print('%d%%' % ((i + 1) * 100 / 5000))\n err_cls, correct_per_cls, total_per_cls = test(teloader, net,\n verbose=True, print_freq=0)\n print('Epoch %d Test error: %.3f' % (i, err_cls))\n",
"step-5": "from __future__ import print_function\nimport argparse\nimport torch\nimport torch.nn as nn\nimport torch.optim as optim\nimport random\n\nfrom utils.misc import *\nfrom utils.adapt_helpers import *\nfrom utils.rotation import rotate_batch, rotate_single_with_label\nfrom utils.model import resnet18\nfrom utils.train_helpers import normalize, te_transforms\nfrom utils.test_helpers import test\n\ndevice = 'cuda' if torch.cuda.is_available() else 'cpu'\n\nclasses = ('plane', 'car', 'bird', 'cat',\n 'deer', 'dog', 'frog', 'horse', 'ship', 'truck')\n\nparser = argparse.ArgumentParser()\nparser.add_argument('--dataroot', default='data/CIFAR-10-C/')\nparser.add_argument('--shared', default=None)\n########################################################################\nparser.add_argument('--depth', default=18, type=int)\nparser.add_argument('--group_norm', default=32, type=int)\nparser.add_argument('--batch_size', default=32, type=int)\nparser.add_argument('--workers', default=8, type=int)\n########################################################################\nparser.add_argument('--lr', default=0.001, type=float)\nparser.add_argument('--niter', default=1, type=int)\nparser.add_argument('--online', action='store_true')\nparser.add_argument('--shuffle', action='store_true')\nparser.add_argument('--threshold', default=1, type=float)\nparser.add_argument('--epsilon', default=0.2, type=float)\nparser.add_argument('--dset_size', default=0, type=int)\n########################################################################\nparser.add_argument('--resume', default=None)\nparser.add_argument('--outf', default='.')\nparser.add_argument('--epochs', default=10, type=int)\n\nargs = parser.parse_args()\nargs.threshold += 0.001\t\t# to correct for numeric errors\nmy_makedir(args.outf)\nimport torch.backends.cudnn as cudnn\ncudnn.benchmark = True\n\ndef gn_helper(planes):\n return nn.GroupNorm(args.group_norm, planes)\nnorm_layer = gn_helper\n\nnet = resnet18(num_classes = 10, norm_layer=norm_layer).to(device)\nnet = torch.nn.DataParallel(net)\n\nprint('Resuming from %s...' %(args.resume))\nckpt = torch.load('%s/best.pth' %(args.resume))\nnet.load_state_dict(ckpt['net'])\nprint(\"Starting Test Error: %.3f\" % ckpt['err_cls'])\n\ncriterion = nn.CrossEntropyLoss().to(device)\noptimizer = optim.SGD(net.parameters(), lr=args.lr)\n\ntrset, trloader = prepare_train_data(args)\nteset, teloader = prepare_test_data(args)\n\nprint(\"Lethean Attack\")\nfor i in range(args.epochs):\n idx = random.randint(0, len(trset) - 1)\n img, lbl = trset[idx]\n random_rot = random.randint(1, 3)\n rot_img = rotate_single_with_label(img, random_rot)\n adapt_single_tensor(net, rot_img, optimizer, criterion, args.niter, args.batch_size)\n\n if i % 50 == 49:\n print(\"%d%%\" % ((i + 1) * 100 / 5000))\n err_cls, correct_per_cls, total_per_cls = test(teloader, net, verbose=True, print_freq=0)\n print(\"Epoch %d Test error: %.3f\" % (i, err_cls))\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
from __future__ import absolute_import
from . import utils
from . import bert_model
from . import transformer
from .utils import *
from .bert_model import *
from .transformer import *
|
normal
|
{
"blob_id": "6415b08795975698e8e2019cafb82561b35f8e71",
"index": 2037,
"step-1": "<mask token>\n",
"step-2": "from __future__ import absolute_import\nfrom . import utils\nfrom . import bert_model\nfrom . import transformer\nfrom .utils import *\nfrom .bert_model import *\nfrom .transformer import *\n",
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0,
1
]
}
|
[
0,
1
] |
from .standup import *
from .auth_register import *
from .channels_create import *
import pytest
# If channel does not exist
def test_notExisting_channel():
db.reset_DB()
auth_register('testmail@gmail.com', 'pas123456', 'Bob', 'Smith')
realtoken = Token.generateToken('testmail@gmail.com')
fake_channel = 70
with pytest.raises(ValueError):
standup_start(realtoken, fake_channel, 5)
# If channel does exist
def test_existing_channel_1():
db.reset_DB()
auth_register('testmail@gmail.com', 'pas123456', 'Bob', 'Smith')
realtoken = Token.generateToken('testmail@gmail.com')
channel_id = channels_create(realtoken,'Channel', True)
assert(standup_start(realtoken, 1, 5))
# if the user is not a member of the channel
def test_message_not_member():
db.reset_DB()
admintoken = Token.generateToken('admin@gmail.com')
channel_id = channels_create(admintoken,'Channel', True)
auth_register('testmail@gmail.com', 'pas123456', 'Bob', 'Smith')
realtoken = Token.generateToken('testmail@gmail.com')
with pytest.raises(AccessError):
standup_start(realtoken, 1, 5)
# If channel does exist and user member of channel
def test_existing_channel_2():
db.reset_DB()
auth_register('testmail@gmail.com', 'pas123456', 'Bob', 'Smith')
realtoken = Token.generateToken('testmail@gmail.com')
channel_id = channels_create(realtoken,'Channel', True)
assert(standup_start(realtoken, 1, 5))
|
normal
|
{
"blob_id": "b6715ad42d59720eb021973394a0b7bfd540181b",
"index": 4338,
"step-1": "<mask token>\n\n\ndef test_notExisting_channel():\n db.reset_DB()\n auth_register('testmail@gmail.com', 'pas123456', 'Bob', 'Smith')\n realtoken = Token.generateToken('testmail@gmail.com')\n fake_channel = 70\n with pytest.raises(ValueError):\n standup_start(realtoken, fake_channel, 5)\n\n\n<mask token>\n\n\ndef test_message_not_member():\n db.reset_DB()\n admintoken = Token.generateToken('admin@gmail.com')\n channel_id = channels_create(admintoken, 'Channel', True)\n auth_register('testmail@gmail.com', 'pas123456', 'Bob', 'Smith')\n realtoken = Token.generateToken('testmail@gmail.com')\n with pytest.raises(AccessError):\n standup_start(realtoken, 1, 5)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef test_notExisting_channel():\n db.reset_DB()\n auth_register('testmail@gmail.com', 'pas123456', 'Bob', 'Smith')\n realtoken = Token.generateToken('testmail@gmail.com')\n fake_channel = 70\n with pytest.raises(ValueError):\n standup_start(realtoken, fake_channel, 5)\n\n\ndef test_existing_channel_1():\n db.reset_DB()\n auth_register('testmail@gmail.com', 'pas123456', 'Bob', 'Smith')\n realtoken = Token.generateToken('testmail@gmail.com')\n channel_id = channels_create(realtoken, 'Channel', True)\n assert standup_start(realtoken, 1, 5)\n\n\ndef test_message_not_member():\n db.reset_DB()\n admintoken = Token.generateToken('admin@gmail.com')\n channel_id = channels_create(admintoken, 'Channel', True)\n auth_register('testmail@gmail.com', 'pas123456', 'Bob', 'Smith')\n realtoken = Token.generateToken('testmail@gmail.com')\n with pytest.raises(AccessError):\n standup_start(realtoken, 1, 5)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef test_notExisting_channel():\n db.reset_DB()\n auth_register('testmail@gmail.com', 'pas123456', 'Bob', 'Smith')\n realtoken = Token.generateToken('testmail@gmail.com')\n fake_channel = 70\n with pytest.raises(ValueError):\n standup_start(realtoken, fake_channel, 5)\n\n\ndef test_existing_channel_1():\n db.reset_DB()\n auth_register('testmail@gmail.com', 'pas123456', 'Bob', 'Smith')\n realtoken = Token.generateToken('testmail@gmail.com')\n channel_id = channels_create(realtoken, 'Channel', True)\n assert standup_start(realtoken, 1, 5)\n\n\ndef test_message_not_member():\n db.reset_DB()\n admintoken = Token.generateToken('admin@gmail.com')\n channel_id = channels_create(admintoken, 'Channel', True)\n auth_register('testmail@gmail.com', 'pas123456', 'Bob', 'Smith')\n realtoken = Token.generateToken('testmail@gmail.com')\n with pytest.raises(AccessError):\n standup_start(realtoken, 1, 5)\n\n\ndef test_existing_channel_2():\n db.reset_DB()\n auth_register('testmail@gmail.com', 'pas123456', 'Bob', 'Smith')\n realtoken = Token.generateToken('testmail@gmail.com')\n channel_id = channels_create(realtoken, 'Channel', True)\n assert standup_start(realtoken, 1, 5)\n",
"step-4": "from .standup import *\nfrom .auth_register import *\nfrom .channels_create import *\nimport pytest\n\n\ndef test_notExisting_channel():\n db.reset_DB()\n auth_register('testmail@gmail.com', 'pas123456', 'Bob', 'Smith')\n realtoken = Token.generateToken('testmail@gmail.com')\n fake_channel = 70\n with pytest.raises(ValueError):\n standup_start(realtoken, fake_channel, 5)\n\n\ndef test_existing_channel_1():\n db.reset_DB()\n auth_register('testmail@gmail.com', 'pas123456', 'Bob', 'Smith')\n realtoken = Token.generateToken('testmail@gmail.com')\n channel_id = channels_create(realtoken, 'Channel', True)\n assert standup_start(realtoken, 1, 5)\n\n\ndef test_message_not_member():\n db.reset_DB()\n admintoken = Token.generateToken('admin@gmail.com')\n channel_id = channels_create(admintoken, 'Channel', True)\n auth_register('testmail@gmail.com', 'pas123456', 'Bob', 'Smith')\n realtoken = Token.generateToken('testmail@gmail.com')\n with pytest.raises(AccessError):\n standup_start(realtoken, 1, 5)\n\n\ndef test_existing_channel_2():\n db.reset_DB()\n auth_register('testmail@gmail.com', 'pas123456', 'Bob', 'Smith')\n realtoken = Token.generateToken('testmail@gmail.com')\n channel_id = channels_create(realtoken, 'Channel', True)\n assert standup_start(realtoken, 1, 5)\n",
"step-5": "from .standup import *\r\nfrom .auth_register import *\r\nfrom .channels_create import *\r\nimport pytest\r\n\r\n# If channel does not exist\r\ndef test_notExisting_channel():\r\n\tdb.reset_DB()\r\n\tauth_register('testmail@gmail.com', 'pas123456', 'Bob', 'Smith')\r\n\trealtoken = Token.generateToken('testmail@gmail.com')\r\n\tfake_channel = 70\r\n\twith pytest.raises(ValueError):\r\n\t\tstandup_start(realtoken, fake_channel, 5)\r\n\r\n# If channel does exist\r\ndef test_existing_channel_1():\r\n\tdb.reset_DB()\r\n\tauth_register('testmail@gmail.com', 'pas123456', 'Bob', 'Smith')\r\n\trealtoken = Token.generateToken('testmail@gmail.com')\r\n\tchannel_id = channels_create(realtoken,'Channel', True)\r\n\tassert(standup_start(realtoken, 1, 5))\r\n\r\n# if the user is not a member of the channel\r\ndef test_message_not_member():\r\n\tdb.reset_DB()\r\n\tadmintoken = Token.generateToken('admin@gmail.com')\r\n\tchannel_id = channels_create(admintoken,'Channel', True)\r\n\tauth_register('testmail@gmail.com', 'pas123456', 'Bob', 'Smith')\r\n\trealtoken = Token.generateToken('testmail@gmail.com')\r\n\twith pytest.raises(AccessError):\r\n\t\tstandup_start(realtoken, 1, 5)\r\n\r\n# If channel does exist and user member of channel\r\ndef test_existing_channel_2():\r\n\tdb.reset_DB()\r\n\tauth_register('testmail@gmail.com', 'pas123456', 'Bob', 'Smith')\r\n\trealtoken = Token.generateToken('testmail@gmail.com')\r\n\tchannel_id = channels_create(realtoken,'Channel', True)\r\n\tassert(standup_start(realtoken, 1, 5))\r\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def eq(df1, df2, precision=0.5) ->bool:
"""Compare two dataframes by element with precision margin."""
return ((df1 - df2).abs() < precision).all()
<|reserved_special_token_0|>
doc.add_image('res_use.png', 'png', width=1)
doc.show()
<|reserved_special_token_0|>
assert eq(resources, uses + df.desc)
<|reserved_special_token_0|>
assert eq(gdp1, gdp2)
assert eq(gdp2, df.GDP)
assert eq(gdp3, df.GDP)
<|reserved_special_token_0|>
assert eq(gni.iloc[1:,], df.GNI.iloc[1:,])
<|reserved_special_token_0|>
assert eq(gndi, df.GNDI)
<|reserved_special_token_0|>
assert eq(df.C, df.HH + df.G)
assert eq(S, df.S)
<|reserved_special_token_0|>
assert eq(I, df.I)
<|reserved_special_token_0|>
assert eq(NL, df.NL0)
<|reserved_special_token_0|>
doc.show()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
doc = handout.Handout('handout')
<|reserved_special_token_0|>
def eq(df1, df2, precision=0.5) ->bool:
"""Compare two dataframes by element with precision margin."""
return ((df1 - df2).abs() < precision).all()
<|reserved_special_token_0|>
df = pd.read_csv('data/sna.csv', index_col=0)
<|reserved_special_token_0|>
df['X'] = df.Xb + df.Tp - df.Sp
<|reserved_special_token_0|>
resources = df.X + df.IM
uses = df.AX + df.C + df.I + df.EX
doc.add_image('res_use.png', 'png', width=1)
doc.show()
<|reserved_special_token_0|>
assert eq(resources, uses + df.desc)
<|reserved_special_token_0|>
gdp1 = df.X - df.AX
gdp2 = df.C + df.I - df.IM + df.EX + df.desc
gdp3 = df.W + df.Tf - df.Sf + df.GP
assert eq(gdp1, gdp2)
assert eq(gdp2, df.GDP)
assert eq(gdp3, df.GDP)
<|reserved_special_token_0|>
gni = (df.GDP + df.ROW_property_income_recieved - df.
ROW_property_income_paid + df.ROW_wage_net)
assert eq(gni.iloc[1:,], df.GNI.iloc[1:,])
<|reserved_special_token_0|>
gndi = gni + df.CT_recieved - df.CT_paid
assert eq(gndi, df.GNDI)
<|reserved_special_token_0|>
S = gndi - (df.HH + df.G)
assert eq(df.C, df.HH + df.G)
assert eq(S, df.S)
<|reserved_special_token_0|>
I = df.GFCF + df.inv
assert eq(I, df.I)
<|reserved_special_token_0|>
NL = S + df.d9_recieved - df.d9_paid - I - df.k2
assert eq(NL, df.NL0)
<|reserved_special_token_0|>
doc.show()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
import pandas as pd
import handout
doc = handout.Handout('handout')
<|reserved_special_token_0|>
def eq(df1, df2, precision=0.5) ->bool:
"""Compare two dataframes by element with precision margin."""
return ((df1 - df2).abs() < precision).all()
<|reserved_special_token_0|>
df = pd.read_csv('data/sna.csv', index_col=0)
<|reserved_special_token_0|>
df['X'] = df.Xb + df.Tp - df.Sp
<|reserved_special_token_0|>
resources = df.X + df.IM
uses = df.AX + df.C + df.I + df.EX
doc.add_image('res_use.png', 'png', width=1)
doc.show()
<|reserved_special_token_0|>
assert eq(resources, uses + df.desc)
<|reserved_special_token_0|>
gdp1 = df.X - df.AX
gdp2 = df.C + df.I - df.IM + df.EX + df.desc
gdp3 = df.W + df.Tf - df.Sf + df.GP
assert eq(gdp1, gdp2)
assert eq(gdp2, df.GDP)
assert eq(gdp3, df.GDP)
<|reserved_special_token_0|>
gni = (df.GDP + df.ROW_property_income_recieved - df.
ROW_property_income_paid + df.ROW_wage_net)
assert eq(gni.iloc[1:,], df.GNI.iloc[1:,])
<|reserved_special_token_0|>
gndi = gni + df.CT_recieved - df.CT_paid
assert eq(gndi, df.GNDI)
<|reserved_special_token_0|>
S = gndi - (df.HH + df.G)
assert eq(df.C, df.HH + df.G)
assert eq(S, df.S)
<|reserved_special_token_0|>
I = df.GFCF + df.inv
assert eq(I, df.I)
<|reserved_special_token_0|>
NL = S + df.d9_recieved - df.d9_paid - I - df.k2
assert eq(NL, df.NL0)
<|reserved_special_token_0|>
doc.show()
<|reserved_special_token_1|>
"""
# System of national accounts (SNA)
This is an end-to-end example of national accounts sequence,
from output to net lending. It is based on Russian Federation data
for 2014-2018.
Below is a python session transcript with comments.
You can fork [a github repo](https://github.com/epogrebnyak/sna-ru)
to replicate calculations.
"""
"""
## Chart
A short mnemonic chart to accompaign the calculations:
```
[controlling for factor income and transfers]
| |
V V
X -> GDP -> GNI -> GNDI = C + S (+ net capital transfers)
| |
Ch + I + Cg + NX S = I + Net lending
|
W + t' + P Always a mystery:
| S - I = NX = Net lending
X - AX (See Open Economy identitites below)
```
"""
"""
## Preparations
"""
import pandas as pd
import handout
doc = handout.Handout("handout") # handout: exclude
"""
`eq` function will check identities considering some rounding error.
"""
def eq(df1, df2, precision=0.5) -> bool:
"""Compare two dataframes by element with precision margin."""
return ((df1 - df2).abs() < precision).all()
"""
Read dataset from file.
"""
df = pd.read_csv("data/sna.csv", index_col=0)
"""
## 1. Output at market prices
Output at market prices is output at basic prices
plus tax on products less subsidy on products.
"""
df["X"] = df.Xb + df.Tp - df.Sp
"""
## 2. Production of goods and services account
Output and import are resources,
consumption, investment (I) and export are uses.
Consumption is intermediate (AX) and final (C).
"""
resources = df.X + df.IM
uses = df.AX + df.C + df.I + df.EX
doc.add_image("res_use.png", "png", width=1) # handout: exclude
doc.show() # handout: exclude
"""
Resources and uses are equal, controlling for
[statistical discrepancy](https://www.stat.fi/meta/kas/tilastollinen_e_en.html).
"""
assert eq(resources, uses + df.desc)
"""
## 3. Gross domestic product (GDP)
There are three ways to calculate a GDP.
With some luck they yield to similar values.
"""
gdp1 = df.X - df.AX
gdp2 = (df.C + df.I - df.IM) + df.EX + df.desc
gdp3 = df.W + df.Tf - df.Sf + df.GP
assert eq(gdp1, gdp2)
assert eq(gdp2, df.GDP)
assert eq(gdp3, df.GDP)
"""```
>> gdp1.divide(10**6).round(1)
2014 79.1
2015 83.1
2016 86.0
2017 92.1
2018 103.9
```"""
"""
## 4. Controlling for income and current transfers from abroad
Gross national income (GNI) is GDP and
net property and labor ("factor") income
form rest of the world (ROW).
"""
gni = (
df.GDP
+ df.ROW_property_income_recieved
- df.ROW_property_income_paid
+ df.ROW_wage_net
)
assert eq(gni.iloc[1:,], df.GNI.iloc[1:,])
"""
Gross national disposable income (GNDI)
is GNI and net current transfers from abroad
"""
gndi = gni + df.CT_recieved - df.CT_paid
assert eq(gndi, df.GNDI)
"""
## 5. Savings
Savings is gross domestic income
less household and government consumption.
"""
S = gndi - (df.HH + df.G)
assert eq(df.C, df.HH + df.G)
assert eq(S, df.S)
"""
Investment is gross fixed capital formation
and change in inventories.
"""
I = df.GFCF + df.inv
assert eq(I, df.I)
"""
## 6. Net lending
Net lending is S-I, and a balance of capital transfers
and a non-produced non-material asset aquisition (K.2).
"""
NL = S + df.d9_recieved - df.d9_paid - I - df.k2
assert eq(NL, df.NL0)
"""
Net lending is an entry value into financial account (flow of funds).
Is usually contains a statistical error, later netted in flow of funds.
"""
"""
## Links
- [SNA 2008 manual](https://unstats.un.org/unsd/nationalaccount/docs/SNA2008.pdf)
- [Russian national accounts data](https://www.gks.ru/folder/210/document/13221)
- [Open economy identitites](https://github.com/hisamsabouni/macroLectures/blob/master/lecture_6.pdf)
"""
doc.show() # handout: exclude
|
flexible
|
{
"blob_id": "2d4187ab5d178efa4920110ccef61c608fdb14c0",
"index": 8780,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef eq(df1, df2, precision=0.5) ->bool:\n \"\"\"Compare two dataframes by element with precision margin.\"\"\"\n return ((df1 - df2).abs() < precision).all()\n\n\n<mask token>\ndoc.add_image('res_use.png', 'png', width=1)\ndoc.show()\n<mask token>\nassert eq(resources, uses + df.desc)\n<mask token>\nassert eq(gdp1, gdp2)\nassert eq(gdp2, df.GDP)\nassert eq(gdp3, df.GDP)\n<mask token>\nassert eq(gni.iloc[1:,], df.GNI.iloc[1:,])\n<mask token>\nassert eq(gndi, df.GNDI)\n<mask token>\nassert eq(df.C, df.HH + df.G)\nassert eq(S, df.S)\n<mask token>\nassert eq(I, df.I)\n<mask token>\nassert eq(NL, df.NL0)\n<mask token>\ndoc.show()\n",
"step-3": "<mask token>\ndoc = handout.Handout('handout')\n<mask token>\n\n\ndef eq(df1, df2, precision=0.5) ->bool:\n \"\"\"Compare two dataframes by element with precision margin.\"\"\"\n return ((df1 - df2).abs() < precision).all()\n\n\n<mask token>\ndf = pd.read_csv('data/sna.csv', index_col=0)\n<mask token>\ndf['X'] = df.Xb + df.Tp - df.Sp\n<mask token>\nresources = df.X + df.IM\nuses = df.AX + df.C + df.I + df.EX\ndoc.add_image('res_use.png', 'png', width=1)\ndoc.show()\n<mask token>\nassert eq(resources, uses + df.desc)\n<mask token>\ngdp1 = df.X - df.AX\ngdp2 = df.C + df.I - df.IM + df.EX + df.desc\ngdp3 = df.W + df.Tf - df.Sf + df.GP\nassert eq(gdp1, gdp2)\nassert eq(gdp2, df.GDP)\nassert eq(gdp3, df.GDP)\n<mask token>\ngni = (df.GDP + df.ROW_property_income_recieved - df.\n ROW_property_income_paid + df.ROW_wage_net)\nassert eq(gni.iloc[1:,], df.GNI.iloc[1:,])\n<mask token>\ngndi = gni + df.CT_recieved - df.CT_paid\nassert eq(gndi, df.GNDI)\n<mask token>\nS = gndi - (df.HH + df.G)\nassert eq(df.C, df.HH + df.G)\nassert eq(S, df.S)\n<mask token>\nI = df.GFCF + df.inv\nassert eq(I, df.I)\n<mask token>\nNL = S + df.d9_recieved - df.d9_paid - I - df.k2\nassert eq(NL, df.NL0)\n<mask token>\ndoc.show()\n",
"step-4": "<mask token>\nimport pandas as pd\nimport handout\ndoc = handout.Handout('handout')\n<mask token>\n\n\ndef eq(df1, df2, precision=0.5) ->bool:\n \"\"\"Compare two dataframes by element with precision margin.\"\"\"\n return ((df1 - df2).abs() < precision).all()\n\n\n<mask token>\ndf = pd.read_csv('data/sna.csv', index_col=0)\n<mask token>\ndf['X'] = df.Xb + df.Tp - df.Sp\n<mask token>\nresources = df.X + df.IM\nuses = df.AX + df.C + df.I + df.EX\ndoc.add_image('res_use.png', 'png', width=1)\ndoc.show()\n<mask token>\nassert eq(resources, uses + df.desc)\n<mask token>\ngdp1 = df.X - df.AX\ngdp2 = df.C + df.I - df.IM + df.EX + df.desc\ngdp3 = df.W + df.Tf - df.Sf + df.GP\nassert eq(gdp1, gdp2)\nassert eq(gdp2, df.GDP)\nassert eq(gdp3, df.GDP)\n<mask token>\ngni = (df.GDP + df.ROW_property_income_recieved - df.\n ROW_property_income_paid + df.ROW_wage_net)\nassert eq(gni.iloc[1:,], df.GNI.iloc[1:,])\n<mask token>\ngndi = gni + df.CT_recieved - df.CT_paid\nassert eq(gndi, df.GNDI)\n<mask token>\nS = gndi - (df.HH + df.G)\nassert eq(df.C, df.HH + df.G)\nassert eq(S, df.S)\n<mask token>\nI = df.GFCF + df.inv\nassert eq(I, df.I)\n<mask token>\nNL = S + df.d9_recieved - df.d9_paid - I - df.k2\nassert eq(NL, df.NL0)\n<mask token>\ndoc.show()\n",
"step-5": "\"\"\"\n# System of national accounts (SNA)\n\nThis is an end-to-end example of national accounts sequence, \nfrom output to net lending. It is based on Russian Federation data \nfor 2014-2018. \n\nBelow is a python session transcript with comments. \nYou can fork [a github repo](https://github.com/epogrebnyak/sna-ru) \nto replicate calculations.\n\"\"\"\n\n\"\"\"\n## Chart \n\nA short mnemonic chart to accompaign the calculations:\n \n```\n [controlling for factor income and transfers] \n | |\n V V\nX -> GDP -> GNI -> GNDI = C + S (+ net capital transfers)\n | | \n Ch + I + Cg + NX S = I + Net lending \n |\n W + t' + P Always a mystery:\n | S - I = NX = Net lending \n X - AX (See Open Economy identitites below) \n\n```\n\"\"\"\n\n\"\"\"\n## Preparations\n\"\"\"\n\nimport pandas as pd\nimport handout\n\ndoc = handout.Handout(\"handout\") # handout: exclude\n\n\"\"\"\n`eq` function will check identities considering some rounding error.\n\"\"\"\n\n\ndef eq(df1, df2, precision=0.5) -> bool:\n \"\"\"Compare two dataframes by element with precision margin.\"\"\"\n return ((df1 - df2).abs() < precision).all()\n\n\n\"\"\"\nRead dataset from file. \n\"\"\"\n\ndf = pd.read_csv(\"data/sna.csv\", index_col=0)\n\n\"\"\"\n## 1. Output at market prices\n\nOutput at market prices is output at basic prices \nplus tax on products less subsidy on products.\n\"\"\"\n\ndf[\"X\"] = df.Xb + df.Tp - df.Sp\n\n\n\"\"\"\n## 2. Production of goods and services account\n\nOutput and import are resources,\nconsumption, investment (I) and export are uses.\nConsumption is intermediate (AX) and final (C).\n\"\"\"\n\nresources = df.X + df.IM\nuses = df.AX + df.C + df.I + df.EX\n\ndoc.add_image(\"res_use.png\", \"png\", width=1) # handout: exclude\ndoc.show() # handout: exclude\n\n\"\"\"\nResources and uses are equal, controlling for \n[statistical discrepancy](https://www.stat.fi/meta/kas/tilastollinen_e_en.html).\n\"\"\"\nassert eq(resources, uses + df.desc)\n\n\n\"\"\"\n## 3. Gross domestic product (GDP)\n\nThere are three ways to calculate a GDP. \n\nWith some luck they yield to similar values.\n\n\"\"\"\n\ngdp1 = df.X - df.AX\ngdp2 = (df.C + df.I - df.IM) + df.EX + df.desc\ngdp3 = df.W + df.Tf - df.Sf + df.GP\n\nassert eq(gdp1, gdp2)\nassert eq(gdp2, df.GDP)\nassert eq(gdp3, df.GDP)\n\n\"\"\"```\n>> gdp1.divide(10**6).round(1)\n\n2014 79.1\n2015 83.1\n2016 86.0\n2017 92.1\n2018 103.9\n\n```\"\"\"\n\n\n\"\"\"\n## 4. Controlling for income and current transfers from abroad\n\nGross national income (GNI) is GDP and \nnet property and labor (\"factor\") income \nform rest of the world (ROW).\n\"\"\"\n\n\ngni = (\n df.GDP\n + df.ROW_property_income_recieved\n - df.ROW_property_income_paid\n + df.ROW_wage_net\n)\nassert eq(gni.iloc[1:,], df.GNI.iloc[1:,])\n\n\"\"\"\n\nGross national disposable income (GNDI) \nis GNI and net current transfers from abroad\n\"\"\"\n\ngndi = gni + df.CT_recieved - df.CT_paid\nassert eq(gndi, df.GNDI)\n\n\"\"\"\n## 5. Savings\n\nSavings is gross domestic income \nless household and government consumption. \n\"\"\"\n\nS = gndi - (df.HH + df.G)\nassert eq(df.C, df.HH + df.G)\nassert eq(S, df.S)\n\n\"\"\"\nInvestment is gross fixed capital formation \nand change in inventories.\n\"\"\"\n\nI = df.GFCF + df.inv\nassert eq(I, df.I)\n\n\"\"\"\n## 6. Net lending\n\nNet lending is S-I, and a balance of capital transfers\nand a non-produced non-material asset aquisition (K.2).\n\"\"\"\n\nNL = S + df.d9_recieved - df.d9_paid - I - df.k2\nassert eq(NL, df.NL0)\n\n\"\"\"\nNet lending is an entry value into financial account (flow of funds).\nIs usually contains a statistical error, later netted in flow of funds.\n\"\"\"\n\n\n\"\"\"\n## Links\n\n- [SNA 2008 manual](https://unstats.un.org/unsd/nationalaccount/docs/SNA2008.pdf)\n- [Russian national accounts data](https://www.gks.ru/folder/210/document/13221)\n- [Open economy identitites](https://github.com/hisamsabouni/macroLectures/blob/master/lecture_6.pdf)\n\"\"\"\n\ndoc.show() # handout: exclude\n",
"step-ids": [
0,
2,
3,
4,
5
]
}
|
[
0,
2,
3,
4,
5
] |
# -*- coding: utf-8 -*-
def testeum():
a = 10
print(id(a))
def testedois():
a = 10
print(id(a))
|
normal
|
{
"blob_id": "a2e2528f560f6117d4ceeb9cd20d3f6f6b2a30a7",
"index": 213,
"step-1": "<mask token>\n",
"step-2": "def testeum():\n a = 10\n print(id(a))\n\n\n<mask token>\n",
"step-3": "def testeum():\n a = 10\n print(id(a))\n\n\ndef testedois():\n a = 10\n print(id(a))\n",
"step-4": "# -*- coding: utf-8 -*-\ndef testeum():\n a = 10\n print(id(a))\ndef testedois():\n a = 10\n print(id(a))",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
"""
Author : Gülşah Büyük
Date : 17.04.2021
"""
import numpy as np
A = np.array([[22, -41, 2], [61, 17, -18], [-9, 74, -13]])
# For a square matrix A the QR Decomposition converts into the product of an orthogonal matrix Q
# (Q.T)Q= I and an upper triangular matrix R.
def householder_reflection(A):
# A Householder Reflection is a linear transformation that enables a
# vector to be reflected through a plane or hyperplane.
size = len(A)
# Set R equal to A, and create Q as a identity matrix of the same size
Q = np.identity(size)
R = np.copy(A)
for i in range(size - 1):
# Create the vectors x, e
# x is the ith column of the matrix A
x = R[i:, i]
# e is eigenvector
e = np.zeros_like(x)
e[0] = np.linalg.norm(x)
# Using anonymous functions, we create u and v
# u = x + (sigma)*e
# sigma= -sgn(x[k])(||x||)
u = x - e
# v = u /||u||
v = u / np.linalg.norm(u)
Q_count = np.identity(size)
# Q = I-2*v(v.T)
Q_count[i:, i:] -= 2.0 * np.outer(v, v)
# Q is now mxm householder matrix
R = np.dot(Q_count, R) # R=H(n-1)*...*H(2)*H(1)*A
Q = np.dot(Q, Q_count) # Q=H(n-1)*...*H(2)*H(1) H is the self-inverse matrix
return (Q, R)
(Q, R) = householder_reflection(A)
print("A:")
print(A)
print("Q:")
print(Q)
print("R:")
print(R)
print("A = QR control:")
print(np.dot(Q,R))
|
normal
|
{
"blob_id": "0d1fda864edc73cc6a9853727228c6fa3dfb19a1",
"index": 3039,
"step-1": "<mask token>\n\n\ndef householder_reflection(A):\n size = len(A)\n Q = np.identity(size)\n R = np.copy(A)\n for i in range(size - 1):\n x = R[i:, i]\n e = np.zeros_like(x)\n e[0] = np.linalg.norm(x)\n u = x - e\n v = u / np.linalg.norm(u)\n Q_count = np.identity(size)\n Q_count[i:, i:] -= 2.0 * np.outer(v, v)\n R = np.dot(Q_count, R)\n Q = np.dot(Q, Q_count)\n return Q, R\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef householder_reflection(A):\n size = len(A)\n Q = np.identity(size)\n R = np.copy(A)\n for i in range(size - 1):\n x = R[i:, i]\n e = np.zeros_like(x)\n e[0] = np.linalg.norm(x)\n u = x - e\n v = u / np.linalg.norm(u)\n Q_count = np.identity(size)\n Q_count[i:, i:] -= 2.0 * np.outer(v, v)\n R = np.dot(Q_count, R)\n Q = np.dot(Q, Q_count)\n return Q, R\n\n\n<mask token>\nprint('A:')\nprint(A)\nprint('Q:')\nprint(Q)\nprint('R:')\nprint(R)\nprint('A = QR control:')\nprint(np.dot(Q, R))\n",
"step-3": "<mask token>\nA = np.array([[22, -41, 2], [61, 17, -18], [-9, 74, -13]])\n\n\ndef householder_reflection(A):\n size = len(A)\n Q = np.identity(size)\n R = np.copy(A)\n for i in range(size - 1):\n x = R[i:, i]\n e = np.zeros_like(x)\n e[0] = np.linalg.norm(x)\n u = x - e\n v = u / np.linalg.norm(u)\n Q_count = np.identity(size)\n Q_count[i:, i:] -= 2.0 * np.outer(v, v)\n R = np.dot(Q_count, R)\n Q = np.dot(Q, Q_count)\n return Q, R\n\n\nQ, R = householder_reflection(A)\nprint('A:')\nprint(A)\nprint('Q:')\nprint(Q)\nprint('R:')\nprint(R)\nprint('A = QR control:')\nprint(np.dot(Q, R))\n",
"step-4": "<mask token>\nimport numpy as np\nA = np.array([[22, -41, 2], [61, 17, -18], [-9, 74, -13]])\n\n\ndef householder_reflection(A):\n size = len(A)\n Q = np.identity(size)\n R = np.copy(A)\n for i in range(size - 1):\n x = R[i:, i]\n e = np.zeros_like(x)\n e[0] = np.linalg.norm(x)\n u = x - e\n v = u / np.linalg.norm(u)\n Q_count = np.identity(size)\n Q_count[i:, i:] -= 2.0 * np.outer(v, v)\n R = np.dot(Q_count, R)\n Q = np.dot(Q, Q_count)\n return Q, R\n\n\nQ, R = householder_reflection(A)\nprint('A:')\nprint(A)\nprint('Q:')\nprint(Q)\nprint('R:')\nprint(R)\nprint('A = QR control:')\nprint(np.dot(Q, R))\n",
"step-5": "\"\"\"\r\nAuthor : Gülşah Büyük\r\nDate : 17.04.2021\r\n\"\"\"\r\nimport numpy as np\r\nA = np.array([[22, -41, 2], [61, 17, -18], [-9, 74, -13]])\r\n# For a square matrix A the QR Decomposition converts into the product of an orthogonal matrix Q\r\n# (Q.T)Q= I and an upper triangular matrix R.\r\ndef householder_reflection(A):\r\n # A Householder Reflection is a linear transformation that enables a\r\n # vector to be reflected through a plane or hyperplane.\r\n size = len(A)\r\n # Set R equal to A, and create Q as a identity matrix of the same size\r\n Q = np.identity(size)\r\n R = np.copy(A)\r\n for i in range(size - 1):\r\n # Create the vectors x, e\r\n # x is the ith column of the matrix A\r\n x = R[i:, i]\r\n # e is eigenvector\r\n e = np.zeros_like(x)\r\n e[0] = np.linalg.norm(x)\r\n # Using anonymous functions, we create u and v\r\n # u = x + (sigma)*e\r\n # sigma= -sgn(x[k])(||x||)\r\n u = x - e\r\n # v = u /||u||\r\n v = u / np.linalg.norm(u)\r\n Q_count = np.identity(size)\r\n # Q = I-2*v(v.T)\r\n Q_count[i:, i:] -= 2.0 * np.outer(v, v)\r\n # Q is now mxm householder matrix\r\n R = np.dot(Q_count, R) # R=H(n-1)*...*H(2)*H(1)*A\r\n Q = np.dot(Q, Q_count) # Q=H(n-1)*...*H(2)*H(1) H is the self-inverse matrix\r\n return (Q, R)\r\n\r\n(Q, R) = householder_reflection(A)\r\nprint(\"A:\")\r\nprint(A)\r\n\r\nprint(\"Q:\")\r\nprint(Q)\r\n\r\nprint(\"R:\")\r\nprint(R)\r\n\r\nprint(\"A = QR control:\")\r\nprint(np.dot(Q,R))",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
if len(sys.argv) > 2:
n_hidden = tuple([int(x) for x in sys.argv[2:]])
<|reserved_special_token_0|>
if os.environ.has_key('nz'):
nz = int(os.environ['nz'])
if os.environ.has_key('stepsize'):
alpha = float(os.environ['stepsize'])
else:
alpha = 0.0003
if os.environ.has_key('decay1'):
decay1 = float(os.environ['decay1'])
else:
decay1 = 0.1
if os.environ.has_key('decay2'):
decay2 = float(os.environ['decay2'])
else:
decay2 = 0.001
if os.environ.has_key('random_seed'):
seed = 0
if int(os.environ['random_seed']) == 1:
seed = int(time.time())
if int(os.environ['random_seed'] > 1):
seed = int(os.environ['random_seed'])
color.printRed('random_seed ' + str(seed))
else:
seed = int(time.time())
color.printRed('random_seed ' + str(seed))
gpulearn_mm_z_x.main(dataset=sys.argv[1], n_z=nz, n_hidden=n_hidden, seed=
seed, comment='', alpha=alpha, decay1=decay1, decay2=decay2, gfx=True)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
n_hidden = 500, 500
if len(sys.argv) > 2:
n_hidden = tuple([int(x) for x in sys.argv[2:]])
nz = 500
if os.environ.has_key('nz'):
nz = int(os.environ['nz'])
if os.environ.has_key('stepsize'):
alpha = float(os.environ['stepsize'])
else:
alpha = 0.0003
if os.environ.has_key('decay1'):
decay1 = float(os.environ['decay1'])
else:
decay1 = 0.1
if os.environ.has_key('decay2'):
decay2 = float(os.environ['decay2'])
else:
decay2 = 0.001
if os.environ.has_key('random_seed'):
seed = 0
if int(os.environ['random_seed']) == 1:
seed = int(time.time())
if int(os.environ['random_seed'] > 1):
seed = int(os.environ['random_seed'])
color.printRed('random_seed ' + str(seed))
else:
seed = int(time.time())
color.printRed('random_seed ' + str(seed))
gpulearn_mm_z_x.main(dataset=sys.argv[1], n_z=nz, n_hidden=n_hidden, seed=
seed, comment='', alpha=alpha, decay1=decay1, decay2=decay2, gfx=True)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
import gpulearn_mm_z_x
import sys, os
import time
import color
n_hidden = 500, 500
if len(sys.argv) > 2:
n_hidden = tuple([int(x) for x in sys.argv[2:]])
nz = 500
if os.environ.has_key('nz'):
nz = int(os.environ['nz'])
if os.environ.has_key('stepsize'):
alpha = float(os.environ['stepsize'])
else:
alpha = 0.0003
if os.environ.has_key('decay1'):
decay1 = float(os.environ['decay1'])
else:
decay1 = 0.1
if os.environ.has_key('decay2'):
decay2 = float(os.environ['decay2'])
else:
decay2 = 0.001
if os.environ.has_key('random_seed'):
seed = 0
if int(os.environ['random_seed']) == 1:
seed = int(time.time())
if int(os.environ['random_seed'] > 1):
seed = int(os.environ['random_seed'])
color.printRed('random_seed ' + str(seed))
else:
seed = int(time.time())
color.printRed('random_seed ' + str(seed))
gpulearn_mm_z_x.main(dataset=sys.argv[1], n_z=nz, n_hidden=n_hidden, seed=
seed, comment='', alpha=alpha, decay1=decay1, decay2=decay2, gfx=True)
<|reserved_special_token_1|>
'''
Code for mmDGM
Author: Chongxuan Li (chongxuanli1991@gmail.com)
Version = '1.0'
'''
import gpulearn_mm_z_x
import sys, os
import time
import color
n_hidden = (500,500)
if len(sys.argv) > 2:
n_hidden = tuple([int(x) for x in sys.argv[2:]])
nz=500
if os.environ.has_key('nz'):
nz = int(os.environ['nz'])
if os.environ.has_key('stepsize'):
alpha = float(os.environ['stepsize'])
else:
alpha = 3e-4
if os.environ.has_key('decay1'):
decay1 = float(os.environ['decay1'])
else:
decay1 = 0.1
if os.environ.has_key('decay2'):
decay2 = float(os.environ['decay2'])
else:
decay2 = 0.001
if os.environ.has_key('random_seed'):
seed = 0
if int(os.environ['random_seed']) == 1:
seed = int(time.time())
if int(os.environ['random_seed'] > 1):
seed = int(os.environ['random_seed'])
color.printRed('random_seed ' + str(seed))
else:
seed = int(time.time())
color.printRed('random_seed ' + str(seed))
#print 'random_seed (bool) missing.'
#exit()
gpulearn_mm_z_x.main(dataset=sys.argv[1], n_z=nz, n_hidden=n_hidden, seed=seed, comment='', alpha=alpha, decay1=decay1, decay2=decay2, gfx=True)
#gpulearn_z_x.main(n_data=50000, dataset='svhn_pca', n_z=300, n_hidden=(500,500), seed=0)
|
flexible
|
{
"blob_id": "40158bbfd9c95a8344f34431d0b0e98c4a1bf6ed",
"index": 476,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nif len(sys.argv) > 2:\n n_hidden = tuple([int(x) for x in sys.argv[2:]])\n<mask token>\nif os.environ.has_key('nz'):\n nz = int(os.environ['nz'])\nif os.environ.has_key('stepsize'):\n alpha = float(os.environ['stepsize'])\nelse:\n alpha = 0.0003\nif os.environ.has_key('decay1'):\n decay1 = float(os.environ['decay1'])\nelse:\n decay1 = 0.1\nif os.environ.has_key('decay2'):\n decay2 = float(os.environ['decay2'])\nelse:\n decay2 = 0.001\nif os.environ.has_key('random_seed'):\n seed = 0\n if int(os.environ['random_seed']) == 1:\n seed = int(time.time())\n if int(os.environ['random_seed'] > 1):\n seed = int(os.environ['random_seed'])\n color.printRed('random_seed ' + str(seed))\nelse:\n seed = int(time.time())\n color.printRed('random_seed ' + str(seed))\ngpulearn_mm_z_x.main(dataset=sys.argv[1], n_z=nz, n_hidden=n_hidden, seed=\n seed, comment='', alpha=alpha, decay1=decay1, decay2=decay2, gfx=True)\n",
"step-3": "<mask token>\nn_hidden = 500, 500\nif len(sys.argv) > 2:\n n_hidden = tuple([int(x) for x in sys.argv[2:]])\nnz = 500\nif os.environ.has_key('nz'):\n nz = int(os.environ['nz'])\nif os.environ.has_key('stepsize'):\n alpha = float(os.environ['stepsize'])\nelse:\n alpha = 0.0003\nif os.environ.has_key('decay1'):\n decay1 = float(os.environ['decay1'])\nelse:\n decay1 = 0.1\nif os.environ.has_key('decay2'):\n decay2 = float(os.environ['decay2'])\nelse:\n decay2 = 0.001\nif os.environ.has_key('random_seed'):\n seed = 0\n if int(os.environ['random_seed']) == 1:\n seed = int(time.time())\n if int(os.environ['random_seed'] > 1):\n seed = int(os.environ['random_seed'])\n color.printRed('random_seed ' + str(seed))\nelse:\n seed = int(time.time())\n color.printRed('random_seed ' + str(seed))\ngpulearn_mm_z_x.main(dataset=sys.argv[1], n_z=nz, n_hidden=n_hidden, seed=\n seed, comment='', alpha=alpha, decay1=decay1, decay2=decay2, gfx=True)\n",
"step-4": "<mask token>\nimport gpulearn_mm_z_x\nimport sys, os\nimport time\nimport color\nn_hidden = 500, 500\nif len(sys.argv) > 2:\n n_hidden = tuple([int(x) for x in sys.argv[2:]])\nnz = 500\nif os.environ.has_key('nz'):\n nz = int(os.environ['nz'])\nif os.environ.has_key('stepsize'):\n alpha = float(os.environ['stepsize'])\nelse:\n alpha = 0.0003\nif os.environ.has_key('decay1'):\n decay1 = float(os.environ['decay1'])\nelse:\n decay1 = 0.1\nif os.environ.has_key('decay2'):\n decay2 = float(os.environ['decay2'])\nelse:\n decay2 = 0.001\nif os.environ.has_key('random_seed'):\n seed = 0\n if int(os.environ['random_seed']) == 1:\n seed = int(time.time())\n if int(os.environ['random_seed'] > 1):\n seed = int(os.environ['random_seed'])\n color.printRed('random_seed ' + str(seed))\nelse:\n seed = int(time.time())\n color.printRed('random_seed ' + str(seed))\ngpulearn_mm_z_x.main(dataset=sys.argv[1], n_z=nz, n_hidden=n_hidden, seed=\n seed, comment='', alpha=alpha, decay1=decay1, decay2=decay2, gfx=True)\n",
"step-5": "'''\nCode for mmDGM\nAuthor: Chongxuan Li (chongxuanli1991@gmail.com)\nVersion = '1.0'\n'''\n\nimport gpulearn_mm_z_x\nimport sys, os\nimport time\nimport color\n\nn_hidden = (500,500)\nif len(sys.argv) > 2:\n n_hidden = tuple([int(x) for x in sys.argv[2:]])\nnz=500\nif os.environ.has_key('nz'):\n nz = int(os.environ['nz'])\nif os.environ.has_key('stepsize'):\n alpha = float(os.environ['stepsize'])\nelse:\n alpha = 3e-4\nif os.environ.has_key('decay1'):\n decay1 = float(os.environ['decay1'])\nelse:\n decay1 = 0.1\nif os.environ.has_key('decay2'):\n decay2 = float(os.environ['decay2'])\nelse:\n decay2 = 0.001\nif os.environ.has_key('random_seed'):\n seed = 0\n if int(os.environ['random_seed']) == 1:\n seed = int(time.time())\n if int(os.environ['random_seed'] > 1):\n seed = int(os.environ['random_seed'])\n color.printRed('random_seed ' + str(seed))\nelse:\n seed = int(time.time())\n color.printRed('random_seed ' + str(seed))\n #print 'random_seed (bool) missing.' \n #exit()\n \ngpulearn_mm_z_x.main(dataset=sys.argv[1], n_z=nz, n_hidden=n_hidden, seed=seed, comment='', alpha=alpha, decay1=decay1, decay2=decay2, gfx=True)\n\n\n#gpulearn_z_x.main(n_data=50000, dataset='svhn_pca', n_z=300, n_hidden=(500,500), seed=0)\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class SensorValueSerializer(serializers.ModelSerializer):
<|reserved_special_token_0|>
class Meta:
model = SensorValue
fields = 'id', 'timestamp', 'sensor_type', 'value'
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class SensorValueSerializer(serializers.ModelSerializer):
timestamp = serializers.DateTimeField(required=False)
class Meta:
model = SensorValue
fields = 'id', 'timestamp', 'sensor_type', 'value'
<|reserved_special_token_1|>
from rest_framework import serializers
from .models import SensorValue
class SensorValueSerializer(serializers.ModelSerializer):
timestamp = serializers.DateTimeField(required=False)
class Meta:
model = SensorValue
fields = 'id', 'timestamp', 'sensor_type', 'value'
<|reserved_special_token_1|>
from rest_framework import serializers
from .models import SensorValue
class SensorValueSerializer(serializers.ModelSerializer):
timestamp = serializers.DateTimeField(required=False)
class Meta:
model = SensorValue
fields = ("id", "timestamp", "sensor_type", "value")
|
flexible
|
{
"blob_id": "39312ec60c9ef1c9c95cf4206b6d0bbdb0aedf94",
"index": 9042,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass SensorValueSerializer(serializers.ModelSerializer):\n <mask token>\n\n\n class Meta:\n model = SensorValue\n fields = 'id', 'timestamp', 'sensor_type', 'value'\n",
"step-3": "<mask token>\n\n\nclass SensorValueSerializer(serializers.ModelSerializer):\n timestamp = serializers.DateTimeField(required=False)\n\n\n class Meta:\n model = SensorValue\n fields = 'id', 'timestamp', 'sensor_type', 'value'\n",
"step-4": "from rest_framework import serializers\nfrom .models import SensorValue\n\n\nclass SensorValueSerializer(serializers.ModelSerializer):\n timestamp = serializers.DateTimeField(required=False)\n\n\n class Meta:\n model = SensorValue\n fields = 'id', 'timestamp', 'sensor_type', 'value'\n",
"step-5": "from rest_framework import serializers\nfrom .models import SensorValue\n\n\nclass SensorValueSerializer(serializers.ModelSerializer):\n timestamp = serializers.DateTimeField(required=False)\n\n class Meta:\n model = SensorValue\n fields = (\"id\", \"timestamp\", \"sensor_type\", \"value\")\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
import math
def calcula_distancia_do_projetil(v, O, y0):
g = 9.8
return ((v ** 2) / 2 * g) * (1 + math.sqrt(1 + ( 2 * g * y0 / (v ** 2) * (math.sin(O) ** 2)))) * math.sin(2 * O)
|
normal
|
{
"blob_id": "0a459b4aeb2a16c06c1d89dafb656028b235a31e",
"index": 9415,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef calcula_distancia_do_projetil(v, O, y0):\n g = 9.8\n return v ** 2 / 2 * g * (1 + math.sqrt(1 + 2 * g * y0 / v ** 2 * math.\n sin(O) ** 2)) * math.sin(2 * O)\n",
"step-3": "import math\n\n\ndef calcula_distancia_do_projetil(v, O, y0):\n g = 9.8\n return v ** 2 / 2 * g * (1 + math.sqrt(1 + 2 * g * y0 / v ** 2 * math.\n sin(O) ** 2)) * math.sin(2 * O)\n",
"step-4": "import math\n\ndef calcula_distancia_do_projetil(v, O, y0):\n g = 9.8\n return ((v ** 2) / 2 * g) * (1 + math.sqrt(1 + ( 2 * g * y0 / (v ** 2) * (math.sin(O) ** 2)))) * math.sin(2 * O)",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
np.random.seed(1)
<|reserved_special_token_0|>
encoder.fit(Y)
<|reserved_special_token_0|>
model.add(Dense(5, input_dim=len(X[0])))
model.add(Dense(32, activation='relu'))
model.add(Dense(len(onehot_Y[0]), activation='softmax'))
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=[
'accuracy'])
model.fit(X, onehot_Y, validation_split=0.33, epochs=1000)
<|reserved_special_token_0|>
print('Accuracy:', accuracy, '%')
<|reserved_special_token_1|>
<|reserved_special_token_0|>
np.random.seed(1)
df, meta = pyreadstat.read_sav('RESIDIV_Vimala.sav', usecols=[
'Sympt_blødning', 'Sympt_smerter', 'Sympt_ascites', 'Sympt_fatigue',
'Lengde_sympt_dager', 'Lengde_sympt_uker', 'Lengde_sympt_mnd', 'kreftform']
)
dataset = df.drop('kreftform', axis=1)
X = dataset.values
Y = df['kreftform'].values
encoder = LabelEncoder()
encoder.fit(Y)
encoded_Y = encoder.transform(Y)
onehot_Y = np_utils.to_categorical(encoded_Y)
model = Sequential()
model.add(Dense(5, input_dim=len(X[0])))
model.add(Dense(32, activation='relu'))
model.add(Dense(len(onehot_Y[0]), activation='softmax'))
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=[
'accuracy'])
model.fit(X, onehot_Y, validation_split=0.33, epochs=1000)
accuracy = '%.2f' % (model.evaluate(X, onehot_Y)[1] * 100)
print('Accuracy:', accuracy, '%')
<|reserved_special_token_1|>
import pyreadstat
import matplotlib.pyplot as plt
import numpy as np
from keras.models import Sequential
from keras.layers import Dense
from keras.utils import np_utils
from sklearn.preprocessing import LabelEncoder
np.random.seed(1)
df, meta = pyreadstat.read_sav('RESIDIV_Vimala.sav', usecols=[
'Sympt_blødning', 'Sympt_smerter', 'Sympt_ascites', 'Sympt_fatigue',
'Lengde_sympt_dager', 'Lengde_sympt_uker', 'Lengde_sympt_mnd', 'kreftform']
)
dataset = df.drop('kreftform', axis=1)
X = dataset.values
Y = df['kreftform'].values
encoder = LabelEncoder()
encoder.fit(Y)
encoded_Y = encoder.transform(Y)
onehot_Y = np_utils.to_categorical(encoded_Y)
model = Sequential()
model.add(Dense(5, input_dim=len(X[0])))
model.add(Dense(32, activation='relu'))
model.add(Dense(len(onehot_Y[0]), activation='softmax'))
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=[
'accuracy'])
model.fit(X, onehot_Y, validation_split=0.33, epochs=1000)
accuracy = '%.2f' % (model.evaluate(X, onehot_Y)[1] * 100)
print('Accuracy:', accuracy, '%')
<|reserved_special_token_1|>
import pyreadstat
import matplotlib.pyplot as plt
import numpy as np
from keras.models import Sequential
from keras.layers import Dense
from keras.utils import np_utils
from sklearn.preprocessing import LabelEncoder
# Set random seed for reproducible results
np.random.seed(1)
# Read sav file and create a pandas dataframe and extract metadata
df, meta = pyreadstat.read_sav("RESIDIV_Vimala.sav", usecols=["Sympt_blødning", "Sympt_smerter", "Sympt_ascites", "Sympt_fatigue", "Lengde_sympt_dager", "Lengde_sympt_uker", "Lengde_sympt_mnd", "kreftform"])
dataset = df.drop("kreftform", axis=1)
# dataset[0] is Y (kreftform), dataset[1, 2, 3 and 4] is X
X = dataset.values
Y = df["kreftform"].values
# encode class values as integers
encoder = LabelEncoder()
encoder.fit(Y)
encoded_Y = encoder.transform(Y)
# convert integers to dummy variables (i.e. one-hot encoded)
onehot_Y = np_utils.to_categorical(encoded_Y)
model = Sequential()
model.add(Dense(5, input_dim=(len(X[0]))))
model.add(Dense(32, activation="relu"))
model.add(Dense(len(onehot_Y[0]), activation="softmax"))
model.compile(loss="categorical_crossentropy", optimizer="adam", metrics=["accuracy"])
model.fit(X, onehot_Y, validation_split=0.33, epochs=1000)
accuracy = "%.2f" % (model.evaluate(X, onehot_Y)[1]*100)
print("Accuracy:", accuracy, "%")
|
flexible
|
{
"blob_id": "7282af4186a976296ac50840e9169b78a66e118b",
"index": 1683,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nnp.random.seed(1)\n<mask token>\nencoder.fit(Y)\n<mask token>\nmodel.add(Dense(5, input_dim=len(X[0])))\nmodel.add(Dense(32, activation='relu'))\nmodel.add(Dense(len(onehot_Y[0]), activation='softmax'))\nmodel.compile(loss='categorical_crossentropy', optimizer='adam', metrics=[\n 'accuracy'])\nmodel.fit(X, onehot_Y, validation_split=0.33, epochs=1000)\n<mask token>\nprint('Accuracy:', accuracy, '%')\n",
"step-3": "<mask token>\nnp.random.seed(1)\ndf, meta = pyreadstat.read_sav('RESIDIV_Vimala.sav', usecols=[\n 'Sympt_blødning', 'Sympt_smerter', 'Sympt_ascites', 'Sympt_fatigue',\n 'Lengde_sympt_dager', 'Lengde_sympt_uker', 'Lengde_sympt_mnd', 'kreftform']\n )\ndataset = df.drop('kreftform', axis=1)\nX = dataset.values\nY = df['kreftform'].values\nencoder = LabelEncoder()\nencoder.fit(Y)\nencoded_Y = encoder.transform(Y)\nonehot_Y = np_utils.to_categorical(encoded_Y)\nmodel = Sequential()\nmodel.add(Dense(5, input_dim=len(X[0])))\nmodel.add(Dense(32, activation='relu'))\nmodel.add(Dense(len(onehot_Y[0]), activation='softmax'))\nmodel.compile(loss='categorical_crossentropy', optimizer='adam', metrics=[\n 'accuracy'])\nmodel.fit(X, onehot_Y, validation_split=0.33, epochs=1000)\naccuracy = '%.2f' % (model.evaluate(X, onehot_Y)[1] * 100)\nprint('Accuracy:', accuracy, '%')\n",
"step-4": "import pyreadstat\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom keras.models import Sequential\nfrom keras.layers import Dense\nfrom keras.utils import np_utils\nfrom sklearn.preprocessing import LabelEncoder\nnp.random.seed(1)\ndf, meta = pyreadstat.read_sav('RESIDIV_Vimala.sav', usecols=[\n 'Sympt_blødning', 'Sympt_smerter', 'Sympt_ascites', 'Sympt_fatigue',\n 'Lengde_sympt_dager', 'Lengde_sympt_uker', 'Lengde_sympt_mnd', 'kreftform']\n )\ndataset = df.drop('kreftform', axis=1)\nX = dataset.values\nY = df['kreftform'].values\nencoder = LabelEncoder()\nencoder.fit(Y)\nencoded_Y = encoder.transform(Y)\nonehot_Y = np_utils.to_categorical(encoded_Y)\nmodel = Sequential()\nmodel.add(Dense(5, input_dim=len(X[0])))\nmodel.add(Dense(32, activation='relu'))\nmodel.add(Dense(len(onehot_Y[0]), activation='softmax'))\nmodel.compile(loss='categorical_crossentropy', optimizer='adam', metrics=[\n 'accuracy'])\nmodel.fit(X, onehot_Y, validation_split=0.33, epochs=1000)\naccuracy = '%.2f' % (model.evaluate(X, onehot_Y)[1] * 100)\nprint('Accuracy:', accuracy, '%')\n",
"step-5": "import pyreadstat\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom keras.models import Sequential\nfrom keras.layers import Dense\nfrom keras.utils import np_utils\nfrom sklearn.preprocessing import LabelEncoder\n\n# Set random seed for reproducible results\nnp.random.seed(1)\n\n# Read sav file and create a pandas dataframe and extract metadata\ndf, meta = pyreadstat.read_sav(\"RESIDIV_Vimala.sav\", usecols=[\"Sympt_blødning\", \"Sympt_smerter\", \"Sympt_ascites\", \"Sympt_fatigue\", \"Lengde_sympt_dager\", \"Lengde_sympt_uker\", \"Lengde_sympt_mnd\", \"kreftform\"])\n\ndataset = df.drop(\"kreftform\", axis=1)\n# dataset[0] is Y (kreftform), dataset[1, 2, 3 and 4] is X\nX = dataset.values\nY = df[\"kreftform\"].values\n\n# encode class values as integers\nencoder = LabelEncoder()\nencoder.fit(Y)\nencoded_Y = encoder.transform(Y)\n# convert integers to dummy variables (i.e. one-hot encoded)\nonehot_Y = np_utils.to_categorical(encoded_Y)\n\nmodel = Sequential()\nmodel.add(Dense(5, input_dim=(len(X[0]))))\nmodel.add(Dense(32, activation=\"relu\"))\nmodel.add(Dense(len(onehot_Y[0]), activation=\"softmax\"))\nmodel.compile(loss=\"categorical_crossentropy\", optimizer=\"adam\", metrics=[\"accuracy\"])\nmodel.fit(X, onehot_Y, validation_split=0.33, epochs=1000)\naccuracy = \"%.2f\" % (model.evaluate(X, onehot_Y)[1]*100)\n\nprint(\"Accuracy:\", accuracy, \"%\")\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
@rule({'@context': _context, '@type': 'WebSite', '@id': {}, 'url': {}})
def html_resolver(ld):
return dict(ld, **{'html': str(resolve_html(ld['url']))})
<|reserved_special_token_1|>
<|reserved_special_token_0|>
@promise
def resolve_html(url):
from urllib.request import urlopen
return urlopen(url).read().decode()
@rule({'@context': _context, '@type': 'WebSite', '@id': {}, 'url': {}})
def html_resolver(ld):
return dict(ld, **{'html': str(resolve_html(ld['url']))})
<|reserved_special_token_1|>
<|reserved_special_token_0|>
_context = {'@vocab': 'https://schema.org/', 'fairsharing':
'https://fairsharing.org/', 'html': 'fairsharing:bsg-s001284'}
@promise
def resolve_html(url):
from urllib.request import urlopen
return urlopen(url).read().decode()
@rule({'@context': _context, '@type': 'WebSite', '@id': {}, 'url': {}})
def html_resolver(ld):
return dict(ld, **{'html': str(resolve_html(ld['url']))})
<|reserved_special_token_1|>
from ..core import promise, rule
_context = {'@vocab': 'https://schema.org/', 'fairsharing':
'https://fairsharing.org/', 'html': 'fairsharing:bsg-s001284'}
@promise
def resolve_html(url):
from urllib.request import urlopen
return urlopen(url).read().decode()
@rule({'@context': _context, '@type': 'WebSite', '@id': {}, 'url': {}})
def html_resolver(ld):
return dict(ld, **{'html': str(resolve_html(ld['url']))})
<|reserved_special_token_1|>
from ..core import promise, rule
_context = {
'@vocab': 'https://schema.org/',
'fairsharing': 'https://fairsharing.org/',
'html': 'fairsharing:bsg-s001284',
}
@promise
def resolve_html(url):
from urllib.request import urlopen
return urlopen(url).read().decode()
@rule({
'@context': _context,
'@type': 'WebSite',
'@id': {},
'url': {},
})
def html_resolver(ld):
return dict(ld, **{
'html': str(resolve_html(ld['url'])),
})
|
flexible
|
{
"blob_id": "3272296bca0d6343540597baebef8d882a1267c0",
"index": 3111,
"step-1": "<mask token>\n\n\n@rule({'@context': _context, '@type': 'WebSite', '@id': {}, 'url': {}})\ndef html_resolver(ld):\n return dict(ld, **{'html': str(resolve_html(ld['url']))})\n",
"step-2": "<mask token>\n\n\n@promise\ndef resolve_html(url):\n from urllib.request import urlopen\n return urlopen(url).read().decode()\n\n\n@rule({'@context': _context, '@type': 'WebSite', '@id': {}, 'url': {}})\ndef html_resolver(ld):\n return dict(ld, **{'html': str(resolve_html(ld['url']))})\n",
"step-3": "<mask token>\n_context = {'@vocab': 'https://schema.org/', 'fairsharing':\n 'https://fairsharing.org/', 'html': 'fairsharing:bsg-s001284'}\n\n\n@promise\ndef resolve_html(url):\n from urllib.request import urlopen\n return urlopen(url).read().decode()\n\n\n@rule({'@context': _context, '@type': 'WebSite', '@id': {}, 'url': {}})\ndef html_resolver(ld):\n return dict(ld, **{'html': str(resolve_html(ld['url']))})\n",
"step-4": "from ..core import promise, rule\n_context = {'@vocab': 'https://schema.org/', 'fairsharing':\n 'https://fairsharing.org/', 'html': 'fairsharing:bsg-s001284'}\n\n\n@promise\ndef resolve_html(url):\n from urllib.request import urlopen\n return urlopen(url).read().decode()\n\n\n@rule({'@context': _context, '@type': 'WebSite', '@id': {}, 'url': {}})\ndef html_resolver(ld):\n return dict(ld, **{'html': str(resolve_html(ld['url']))})\n",
"step-5": "from ..core import promise, rule\n\n_context = {\n '@vocab': 'https://schema.org/',\n 'fairsharing': 'https://fairsharing.org/',\n 'html': 'fairsharing:bsg-s001284',\n}\n\n@promise\ndef resolve_html(url):\n from urllib.request import urlopen\n return urlopen(url).read().decode()\n\n@rule({\n '@context': _context,\n '@type': 'WebSite',\n '@id': {},\n 'url': {},\n})\ndef html_resolver(ld):\n return dict(ld, **{\n 'html': str(resolve_html(ld['url'])),\n })\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
if hasattr(sys, '__interactivehook__'):
del sys.__interactivehook__
print('Python3 startup file loaded from ~/.config/pystartup.py')
<|reserved_special_token_1|>
import sys
import os
import math
import random
if hasattr(sys, '__interactivehook__'):
del sys.__interactivehook__
print('Python3 startup file loaded from ~/.config/pystartup.py')
<|reserved_special_token_1|>
#!/usr/bin/env python3
import sys
import os
import math
import random
if hasattr(sys, '__interactivehook__'):
del sys.__interactivehook__
print('Python3 startup file loaded from ~/.config/pystartup.py')
|
flexible
|
{
"blob_id": "5ddde3aa6eaa30b70743272a532874663067eed6",
"index": 3157,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nif hasattr(sys, '__interactivehook__'):\n del sys.__interactivehook__\nprint('Python3 startup file loaded from ~/.config/pystartup.py')\n",
"step-3": "import sys\nimport os\nimport math\nimport random\nif hasattr(sys, '__interactivehook__'):\n del sys.__interactivehook__\nprint('Python3 startup file loaded from ~/.config/pystartup.py')\n",
"step-4": "#!/usr/bin/env python3\n\nimport sys\nimport os\nimport math\nimport random\n\nif hasattr(sys, '__interactivehook__'):\n del sys.__interactivehook__\n\nprint('Python3 startup file loaded from ~/.config/pystartup.py')\n\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
from django.core.paginator import Paginator, EmptyPage
from django.shortcuts import render
from django.views import View
from django.contrib.auth.mixins import LoginRequiredMixin
from logging import getLogger
from django_redis import get_redis_connection
from decimal import Decimal
import json
from django import http
from django.utils import timezone
from django.db import transaction
from users.models import Address
from goods.models import SKU
from meiduo_mall.utils import constants
from meiduo_mall.utils.auth_backend import LoginRequiredJsonMixin
from .models import OrderInfo, OrderGoods
from meiduo_mall.utils.response_code import RETCODE, err_msg
logger = getLogger('django')
class GoodsCommentView(View):
"""订单商品评价信息"""
def get(self, request, sku_id):
# 获取被评价的订单商品信息
order_goods_list = OrderGoods.objects.filter(sku_id=sku_id, is_commented=True).order_by('-create_time')[:constants.COMMENTS_LIST_LIMIT]
# 序列化
comment_list = []
for order_goods in order_goods_list:
username = order_goods.order.user.username
comment_list.append({
'username': username[0] + '***' + username[-1] if order_goods.is_anonymous else username,
'comment': order_goods.comment,
'score': order_goods.score,
})
return http.JsonResponse({'code': RETCODE.OK, 'errmsg': err_msg[RETCODE.OK], 'comment_list': comment_list})
class OrderCommentView(LoginRequiredMixin, View):
"""订单商品评价"""
def get(self, request):
"""展示商品评价页面"""
# 接收参数
order_id = request.GET.get('order_id')
# 校验参数
try:
OrderInfo.objects.get(order_id=order_id, user=request.user)
except OrderInfo.DoesNotExist:
return http.HttpResponseNotFound('订单不存在')
# 查询订单中未被评价的商品信息
try:
uncomment_goods = OrderGoods.objects.filter(order_id=order_id, is_commented=False)
except Exception as e:
logger.error(e)
return http.HttpResponseServerError('订单商品信息出错')
# 构造待评价商品数据
uncomment_goods_list = []
for goods in uncomment_goods:
uncomment_goods_list.append({
'order_id': goods.order.order_id,
'sku_id': goods.sku.id,
'name': goods.sku.name,
'price': str(goods.price),
'default_image_url': goods.sku.default_image.url,
'comment': goods.comment,
'score': goods.score,
'is_anonymous': str(goods.is_anonymous),
})
# 渲染模板
context = {
'uncomment_goods_list': uncomment_goods_list
}
return render(request, 'goods_judge.html', context)
def post(self, request):
"""评价订单商品"""
# 接收参数
json_dict = json.loads(request.body.decode())
order_id = json_dict.get('order_id')
sku_id = json_dict.get('sku_id')
score = json_dict.get('score')
comment = json_dict.get('comment')
is_anonymous = json_dict.get('is_anonymous')
# 校验参数
if not all([order_id, sku_id, score, comment]):
return http.HttpResponseForbidden('缺少必传参数')
try:
OrderInfo.objects.filter(order_id=order_id, user=request.user, status=OrderInfo.ORDER_STATUS_ENUM['UNCOMMENT'])
except OrderInfo.DoesNotExist:
return http.HttpResponseForbidden('参数order_id错误')
try:
sku = SKU.objects.get(id=sku_id)
except SKU.DoesNotExist:
return http.HttpResponseForbidden('参数sku_id错误')
if is_anonymous:
if not isinstance(is_anonymous, bool):
return http.HttpResponseForbidden('参数is_anonymous错误')
# 以下操作数据库的操作,开启作为一次事务
with transaction.atomic():
# 在数据库操作前,创建保存点(数据库最初的状态)
save_id = transaction.savepoint()
try:
# 保存订单商品评价数据
OrderGoods.objects.filter(order_id=order_id, sku_id=sku_id, is_commented=False).update(
comment=comment,
score=score,
is_anonymous=is_anonymous,
is_commented=True
)
# 累计评论数据
sku.comments += 1
sku.save()
sku.spu.comments += 1
sku.spu.save()
# 如果所有订单商品都已评价,则修改订单状态为已完成
if OrderGoods.objects.filter(order_id=order_id, is_commented=False).count() == 0:
OrderInfo.objects.filter(order_id=order_id).update(status=OrderInfo.ORDER_STATUS_ENUM['FINISHED'])
# 对于未知的数据库错误,暴力回滚
except Exception as e:
logger.error(e)
transaction.savepoint_rollback(save_id)
return http.JsonResponse({'code': RETCODE.COMMITMENTERR, 'errmsg': err_msg[RETCODE.COMMITMENTERR]})
else:
# 提交事务
transaction.savepoint_commit(save_id)
return http.JsonResponse({'code': RETCODE.OK, 'errmsg': err_msg[RETCODE.OK]})
class UserOrderInfoView(LoginRequiredMixin, View):
"""我的订单"""
def get(self, request, page_num):
"""提供我的订单页面"""
user = request.user
# 查询订单
orders = user.orderinfo_set.all().order_by("-create_time")
# 遍历所有订单
for order in orders:
# 绑定订单状态
order.status_name = OrderInfo.ORDER_STATUS_CHOICES[order.status-1][1]
# 绑定支付方式
order.pay_method_name = OrderInfo.PAY_METHOD_CHOICES[order.pay_method-1][1]
order.sku_list = []
# 查询订单商品
order_goods = order.skus.all()
# 遍历订单商品
for order_good in order_goods:
sku = order_good.sku
sku.count = order_good.count
sku.amount = sku.price * sku.count
order.sku_list.append(sku)
# 分页
page_num = int(page_num)
try:
paginator = Paginator(orders, constants.ORDERS_LIST_LIMIT)
page_orders = paginator.page(page_num)
total_page = paginator.num_pages
except EmptyPage:
return http.HttpResponseNotFound('订单不存在')
context = {
"page_orders": page_orders,
'total_page': total_page,
'page_num': page_num,
}
return render(request, "user_center_order.html", context)
class OrderSuccessView(LoginRequiredMixin, View):
"""订单成功页面"""
def get(self, request):
"""提供订单成功页面"""
# 接受参数
order_id = request.GET.get('order_id')
payment_amount = request.GET.get('payment_amount')
pay_method = request.GET.get('pay_method')
# 构造上下文
context = {
'order_id': order_id,
'payment_amount': payment_amount,
'pay_method': pay_method
}
return render(request, 'order_success.html', context)
class OrderCommitView(LoginRequiredJsonMixin, View):
"""提交订单"""
def post(self, request):
"""保存订单基本信息和订单商品信息"""
# 接收参数
json_dict = json.loads(request.body.decode())
address_id = json_dict.get('address_id')
pay_method = json_dict.get('pay_method')
# 校验参数
if not all([address_id, pay_method]):
return http.HttpResponseForbidden('缺少必传参数')
# 判断address_id是否合法
try:
address = Address.objects.get(id=address_id)
except Exception as e:
logger.error(e)
return http.HttpResponseForbidden('参数address_id错误')
# 判断pay_method是否合法
if pay_method not in [OrderInfo.PAY_METHODS_ENUM['CASH'], OrderInfo.PAY_METHODS_ENUM['ALIPAY']]:
return http.HttpResponseForbidden('参数pay_method错误')
# 以下操作数据库的操作,开启作为一次事务
with transaction.atomic():
# 在数据库操作前,创建保存点(数据库最初的状态)
save_id = transaction.savepoint()
# 获取登录用户
user = request.user
# 获取订单编号:时间 + user_id == '2020123113041200000001'
order_id = timezone.localtime().strftime('%Y%m%d%H%M%S') + '{:0>9d}'.format(user.id)
try:
# 保存订单基本信息(一)
order = OrderInfo.objects.create(
order_id=order_id,
user=user,
address=address,
total_count=0, # 仅用来初始化,后面根据订单中的商品进行更新
total_amount=Decimal('0.00'), # 仅用来初始化,后面根据订单中的商品进行更新
freight=Decimal(constants.ORDERS_FREIGHT_COST),
pay_method=pay_method,
# 如果支付方式为支付宝,支付状态为未付款,如果支付方式是货到付款,支付状态为未发货
status=OrderInfo.ORDER_STATUS_ENUM['UNPAID'] if pay_method == OrderInfo.PAY_METHODS_ENUM['ALIPAY'] else OrderInfo.ORDER_STATUS_ENUM['UNSEND']
)
# 保存订单商品信息(多)
# 查询redis中购物车被勾选的商品
redis_conn = get_redis_connection('carts')
# 购物车中商品的数量
redis_cart = redis_conn.hgetall('carts_%s' % user.id)
# 被勾选的商品sku_id
redis_selected = redis_conn.smembers('selected_{}'.format(user.id))
# 构造购物车中被勾选商品的数据 new_cart_dict,{sku_id: 2, sku_id: 1}
new_cart_dict = {}
for sku_id in redis_selected:
new_cart_dict[int(sku_id)] = int(redis_cart[sku_id])
# 获取被勾选商品的sku_id
sku_ids = new_cart_dict.keys()
for sku_id in sku_ids:
# 每个商品都有多次下单的机会,直到库存不足
while True:
# 读取商品的sku信息
sku = SKU.objects.get(id=sku_id) # 查询商品和库存信息时,不能出现缓存,所有不用 filter(id__in=sku_ids)
# 获取当前被勾选商品的库存
sku_count = new_cart_dict[sku.id]
# 获取sku商品原始的库存stock和销量sales
origin_stock = sku.stock
origin_sales = sku.sales
# # 模型网络延迟
# import time
# time.sleep(5)
# 如果订单中的商品数量大于库存,响应库存不足
if sku_count > origin_stock:
# 库存不足,回滚
transaction.savepoint_rollback(save_id)
print(request.user, '库存不足')
return http.JsonResponse({'code': RETCODE.STOCKERR, 'errmsg': err_msg[RETCODE.STOCKERR]})
# 如果库存满足,SKU 减库存,加销量
new_stock = origin_stock - sku_count
new_sales = origin_sales + sku_count
result = SKU.objects.filter(id=sku_id, stock=origin_stock).update(stock=new_stock, sales=new_sales)
# 如果在更新数据时,原始数据变化了,那么返回0,表示有资源抢夺
if result == 0:
# 由于其他用户提前对该商品完成下单,该商品此次下单失败,重新进行下单
continue
# SPU 加销量
sku.spu.sales += sku_count
sku.spu.save()
OrderGoods.objects.create(
order=order,
sku=sku,
count=sku_count,
price=sku.price,
)
# 累加订单中商品的总价和总数量
order.total_count += sku_count
order.total_amount += (sku_count * sku.price)
# 该件商品下单成功,退出循环
break
# 添加邮费和保存订单信息
order.total_amount += order.freight
order.save()
# 对于未知的数据库错误,暴力回滚
except Exception as e:
logger.error(e)
transaction.savepoint_rollback(save_id)
return http.JsonResponse({'code': RETCODE.ORDEROPERATEERR, 'errmsg': err_msg[RETCODE.ORDEROPERATEERR]})
else:
# 提交事务
transaction.savepoint_commit(save_id)
# 清除购物车中已结算的商品
pl = redis_conn.pipeline()
pl.hdel('carts_%s' % user.id, *redis_selected)
pl.srem('selected_%s' % user.id, *redis_selected)
try:
pl.execute()
except Exception as e:
logger.error(e)
return http.JsonResponse({'code': RETCODE.DUPLICATEORDERERR, 'errmsg': err_msg[RETCODE.DUPLICATEORDERERR]})
else:
# 返回响应
return http.JsonResponse({'code': RETCODE.OK, 'errmsg': err_msg[RETCODE.OK], 'order_id': order_id})
class OrderSettlementView(LoginRequiredMixin, View):
"""结算订单"""
def get(self, request):
"""查询并展示要结算的订单数据"""
# 获取登录用户
user = request.user
# 查询用户收货地址,没有被删除的收货地址
try:
addresses = Address.objects.filter(user=user, is_deleted=False)
except Exception as e:
logger.error(e)
# 如果没有查询出收货地址,可以去编辑收货地址
addresses = None
# 查询redis中购物车被勾选的商品
redis_conn = get_redis_connection('carts')
# 购物车中商品的数量
redis_cart = redis_conn.hgetall('carts_%s' % user.id)
# 被勾选的商品sku_id
redis_selected = redis_conn.smembers('selected_{}'.format(user.id))
# 构造购物车中被勾选商品的数据 new_cart_dict,{sku_id: 2, sku_id: 1}
new_cart_dict = {}
for sku_id in redis_selected:
new_cart_dict[int(sku_id)] = int(redis_cart[sku_id])
# 获取被勾选商品的sku_id
sku_ids = new_cart_dict.keys()
# 获取被勾选商品的sku信息
skus = SKU.objects.filter(id__in=sku_ids)
# 商品总数量与商品总金额
total_count = 0
total_amount = Decimal(0.00) # 或 Decimal('0.00')
for sku in skus:
# 遍历skus,给每个sku补充count(数量)和amount(小计)字段
sku.count = new_cart_dict[sku.id]
sku.amount = sku.price * sku.count # Decimal类型
# 累加商品数量和金额
total_count += sku.count
total_amount += sku.amount
# 构造上下文
context = {
'addresses': addresses,
'skus': skus,
'total_count': total_count,
'total_amount': total_amount,
'freight': constants.ORDERS_FREIGHT_COST, # 运费
'payment_amount': Decimal(constants.ORDERS_FREIGHT_COST) + total_amount,
}
return render(request, 'place_order.html', context)
|
normal
|
{
"blob_id": "0402096f215ae600318d17bc70e5e3067b0a176b",
"index": 3864,
"step-1": "<mask token>\n\n\nclass OrderSuccessView(LoginRequiredMixin, View):\n \"\"\"订单成功页面\"\"\"\n\n def get(self, request):\n \"\"\"提供订单成功页面\"\"\"\n order_id = request.GET.get('order_id')\n payment_amount = request.GET.get('payment_amount')\n pay_method = request.GET.get('pay_method')\n context = {'order_id': order_id, 'payment_amount': payment_amount,\n 'pay_method': pay_method}\n return render(request, 'order_success.html', context)\n\n\nclass OrderCommitView(LoginRequiredJsonMixin, View):\n \"\"\"提交订单\"\"\"\n\n def post(self, request):\n \"\"\"保存订单基本信息和订单商品信息\"\"\"\n json_dict = json.loads(request.body.decode())\n address_id = json_dict.get('address_id')\n pay_method = json_dict.get('pay_method')\n if not all([address_id, pay_method]):\n return http.HttpResponseForbidden('缺少必传参数')\n try:\n address = Address.objects.get(id=address_id)\n except Exception as e:\n logger.error(e)\n return http.HttpResponseForbidden('参数address_id错误')\n if pay_method not in [OrderInfo.PAY_METHODS_ENUM['CASH'], OrderInfo\n .PAY_METHODS_ENUM['ALIPAY']]:\n return http.HttpResponseForbidden('参数pay_method错误')\n with transaction.atomic():\n save_id = transaction.savepoint()\n user = request.user\n order_id = timezone.localtime().strftime('%Y%m%d%H%M%S'\n ) + '{:0>9d}'.format(user.id)\n try:\n order = OrderInfo.objects.create(order_id=order_id, user=\n user, address=address, total_count=0, total_amount=\n Decimal('0.00'), freight=Decimal(constants.\n ORDERS_FREIGHT_COST), pay_method=pay_method, status=\n OrderInfo.ORDER_STATUS_ENUM['UNPAID'] if pay_method ==\n OrderInfo.PAY_METHODS_ENUM['ALIPAY'] else OrderInfo.\n ORDER_STATUS_ENUM['UNSEND'])\n redis_conn = get_redis_connection('carts')\n redis_cart = redis_conn.hgetall('carts_%s' % user.id)\n redis_selected = redis_conn.smembers('selected_{}'.format(\n user.id))\n new_cart_dict = {}\n for sku_id in redis_selected:\n new_cart_dict[int(sku_id)] = int(redis_cart[sku_id])\n sku_ids = new_cart_dict.keys()\n for sku_id in sku_ids:\n while True:\n sku = SKU.objects.get(id=sku_id)\n sku_count = new_cart_dict[sku.id]\n origin_stock = sku.stock\n origin_sales = sku.sales\n if sku_count > origin_stock:\n transaction.savepoint_rollback(save_id)\n print(request.user, '库存不足')\n return http.JsonResponse({'code': RETCODE.\n STOCKERR, 'errmsg': err_msg[RETCODE.STOCKERR]})\n new_stock = origin_stock - sku_count\n new_sales = origin_sales + sku_count\n result = SKU.objects.filter(id=sku_id, stock=\n origin_stock).update(stock=new_stock, sales=\n new_sales)\n if result == 0:\n continue\n sku.spu.sales += sku_count\n sku.spu.save()\n OrderGoods.objects.create(order=order, sku=sku,\n count=sku_count, price=sku.price)\n order.total_count += sku_count\n order.total_amount += sku_count * sku.price\n break\n order.total_amount += order.freight\n order.save()\n except Exception as e:\n logger.error(e)\n transaction.savepoint_rollback(save_id)\n return http.JsonResponse({'code': RETCODE.ORDEROPERATEERR,\n 'errmsg': err_msg[RETCODE.ORDEROPERATEERR]})\n else:\n transaction.savepoint_commit(save_id)\n pl = redis_conn.pipeline()\n pl.hdel('carts_%s' % user.id, *redis_selected)\n pl.srem('selected_%s' % user.id, *redis_selected)\n try:\n pl.execute()\n except Exception as e:\n logger.error(e)\n return http.JsonResponse({'code': RETCODE.DUPLICATEORDERERR,\n 'errmsg': err_msg[RETCODE.DUPLICATEORDERERR]})\n else:\n return http.JsonResponse({'code': RETCODE.OK, 'errmsg': err_msg\n [RETCODE.OK], 'order_id': order_id})\n\n\nclass OrderSettlementView(LoginRequiredMixin, View):\n \"\"\"结算订单\"\"\"\n\n def get(self, request):\n \"\"\"查询并展示要结算的订单数据\"\"\"\n user = request.user\n try:\n addresses = Address.objects.filter(user=user, is_deleted=False)\n except Exception as e:\n logger.error(e)\n addresses = None\n redis_conn = get_redis_connection('carts')\n redis_cart = redis_conn.hgetall('carts_%s' % user.id)\n redis_selected = redis_conn.smembers('selected_{}'.format(user.id))\n new_cart_dict = {}\n for sku_id in redis_selected:\n new_cart_dict[int(sku_id)] = int(redis_cart[sku_id])\n sku_ids = new_cart_dict.keys()\n skus = SKU.objects.filter(id__in=sku_ids)\n total_count = 0\n total_amount = Decimal(0.0)\n for sku in skus:\n sku.count = new_cart_dict[sku.id]\n sku.amount = sku.price * sku.count\n total_count += sku.count\n total_amount += sku.amount\n context = {'addresses': addresses, 'skus': skus, 'total_count':\n total_count, 'total_amount': total_amount, 'freight': constants\n .ORDERS_FREIGHT_COST, 'payment_amount': Decimal(constants.\n ORDERS_FREIGHT_COST) + total_amount}\n return render(request, 'place_order.html', context)\n",
"step-2": "<mask token>\n\n\nclass OrderCommentView(LoginRequiredMixin, View):\n \"\"\"订单商品评价\"\"\"\n\n def get(self, request):\n \"\"\"展示商品评价页面\"\"\"\n order_id = request.GET.get('order_id')\n try:\n OrderInfo.objects.get(order_id=order_id, user=request.user)\n except OrderInfo.DoesNotExist:\n return http.HttpResponseNotFound('订单不存在')\n try:\n uncomment_goods = OrderGoods.objects.filter(order_id=order_id,\n is_commented=False)\n except Exception as e:\n logger.error(e)\n return http.HttpResponseServerError('订单商品信息出错')\n uncomment_goods_list = []\n for goods in uncomment_goods:\n uncomment_goods_list.append({'order_id': goods.order.order_id,\n 'sku_id': goods.sku.id, 'name': goods.sku.name, 'price':\n str(goods.price), 'default_image_url': goods.sku.\n default_image.url, 'comment': goods.comment, 'score': goods\n .score, 'is_anonymous': str(goods.is_anonymous)})\n context = {'uncomment_goods_list': uncomment_goods_list}\n return render(request, 'goods_judge.html', context)\n\n def post(self, request):\n \"\"\"评价订单商品\"\"\"\n json_dict = json.loads(request.body.decode())\n order_id = json_dict.get('order_id')\n sku_id = json_dict.get('sku_id')\n score = json_dict.get('score')\n comment = json_dict.get('comment')\n is_anonymous = json_dict.get('is_anonymous')\n if not all([order_id, sku_id, score, comment]):\n return http.HttpResponseForbidden('缺少必传参数')\n try:\n OrderInfo.objects.filter(order_id=order_id, user=request.user,\n status=OrderInfo.ORDER_STATUS_ENUM['UNCOMMENT'])\n except OrderInfo.DoesNotExist:\n return http.HttpResponseForbidden('参数order_id错误')\n try:\n sku = SKU.objects.get(id=sku_id)\n except SKU.DoesNotExist:\n return http.HttpResponseForbidden('参数sku_id错误')\n if is_anonymous:\n if not isinstance(is_anonymous, bool):\n return http.HttpResponseForbidden('参数is_anonymous错误')\n with transaction.atomic():\n save_id = transaction.savepoint()\n try:\n OrderGoods.objects.filter(order_id=order_id, sku_id=sku_id,\n is_commented=False).update(comment=comment, score=score,\n is_anonymous=is_anonymous, is_commented=True)\n sku.comments += 1\n sku.save()\n sku.spu.comments += 1\n sku.spu.save()\n if OrderGoods.objects.filter(order_id=order_id,\n is_commented=False).count() == 0:\n OrderInfo.objects.filter(order_id=order_id).update(status\n =OrderInfo.ORDER_STATUS_ENUM['FINISHED'])\n except Exception as e:\n logger.error(e)\n transaction.savepoint_rollback(save_id)\n return http.JsonResponse({'code': RETCODE.COMMITMENTERR,\n 'errmsg': err_msg[RETCODE.COMMITMENTERR]})\n else:\n transaction.savepoint_commit(save_id)\n return http.JsonResponse({'code': RETCODE.OK, 'errmsg': err_msg[\n RETCODE.OK]})\n\n\nclass UserOrderInfoView(LoginRequiredMixin, View):\n \"\"\"我的订单\"\"\"\n\n def get(self, request, page_num):\n \"\"\"提供我的订单页面\"\"\"\n user = request.user\n orders = user.orderinfo_set.all().order_by('-create_time')\n for order in orders:\n order.status_name = OrderInfo.ORDER_STATUS_CHOICES[order.status - 1\n ][1]\n order.pay_method_name = OrderInfo.PAY_METHOD_CHOICES[order.\n pay_method - 1][1]\n order.sku_list = []\n order_goods = order.skus.all()\n for order_good in order_goods:\n sku = order_good.sku\n sku.count = order_good.count\n sku.amount = sku.price * sku.count\n order.sku_list.append(sku)\n page_num = int(page_num)\n try:\n paginator = Paginator(orders, constants.ORDERS_LIST_LIMIT)\n page_orders = paginator.page(page_num)\n total_page = paginator.num_pages\n except EmptyPage:\n return http.HttpResponseNotFound('订单不存在')\n context = {'page_orders': page_orders, 'total_page': total_page,\n 'page_num': page_num}\n return render(request, 'user_center_order.html', context)\n\n\nclass OrderSuccessView(LoginRequiredMixin, View):\n \"\"\"订单成功页面\"\"\"\n\n def get(self, request):\n \"\"\"提供订单成功页面\"\"\"\n order_id = request.GET.get('order_id')\n payment_amount = request.GET.get('payment_amount')\n pay_method = request.GET.get('pay_method')\n context = {'order_id': order_id, 'payment_amount': payment_amount,\n 'pay_method': pay_method}\n return render(request, 'order_success.html', context)\n\n\nclass OrderCommitView(LoginRequiredJsonMixin, View):\n \"\"\"提交订单\"\"\"\n\n def post(self, request):\n \"\"\"保存订单基本信息和订单商品信息\"\"\"\n json_dict = json.loads(request.body.decode())\n address_id = json_dict.get('address_id')\n pay_method = json_dict.get('pay_method')\n if not all([address_id, pay_method]):\n return http.HttpResponseForbidden('缺少必传参数')\n try:\n address = Address.objects.get(id=address_id)\n except Exception as e:\n logger.error(e)\n return http.HttpResponseForbidden('参数address_id错误')\n if pay_method not in [OrderInfo.PAY_METHODS_ENUM['CASH'], OrderInfo\n .PAY_METHODS_ENUM['ALIPAY']]:\n return http.HttpResponseForbidden('参数pay_method错误')\n with transaction.atomic():\n save_id = transaction.savepoint()\n user = request.user\n order_id = timezone.localtime().strftime('%Y%m%d%H%M%S'\n ) + '{:0>9d}'.format(user.id)\n try:\n order = OrderInfo.objects.create(order_id=order_id, user=\n user, address=address, total_count=0, total_amount=\n Decimal('0.00'), freight=Decimal(constants.\n ORDERS_FREIGHT_COST), pay_method=pay_method, status=\n OrderInfo.ORDER_STATUS_ENUM['UNPAID'] if pay_method ==\n OrderInfo.PAY_METHODS_ENUM['ALIPAY'] else OrderInfo.\n ORDER_STATUS_ENUM['UNSEND'])\n redis_conn = get_redis_connection('carts')\n redis_cart = redis_conn.hgetall('carts_%s' % user.id)\n redis_selected = redis_conn.smembers('selected_{}'.format(\n user.id))\n new_cart_dict = {}\n for sku_id in redis_selected:\n new_cart_dict[int(sku_id)] = int(redis_cart[sku_id])\n sku_ids = new_cart_dict.keys()\n for sku_id in sku_ids:\n while True:\n sku = SKU.objects.get(id=sku_id)\n sku_count = new_cart_dict[sku.id]\n origin_stock = sku.stock\n origin_sales = sku.sales\n if sku_count > origin_stock:\n transaction.savepoint_rollback(save_id)\n print(request.user, '库存不足')\n return http.JsonResponse({'code': RETCODE.\n STOCKERR, 'errmsg': err_msg[RETCODE.STOCKERR]})\n new_stock = origin_stock - sku_count\n new_sales = origin_sales + sku_count\n result = SKU.objects.filter(id=sku_id, stock=\n origin_stock).update(stock=new_stock, sales=\n new_sales)\n if result == 0:\n continue\n sku.spu.sales += sku_count\n sku.spu.save()\n OrderGoods.objects.create(order=order, sku=sku,\n count=sku_count, price=sku.price)\n order.total_count += sku_count\n order.total_amount += sku_count * sku.price\n break\n order.total_amount += order.freight\n order.save()\n except Exception as e:\n logger.error(e)\n transaction.savepoint_rollback(save_id)\n return http.JsonResponse({'code': RETCODE.ORDEROPERATEERR,\n 'errmsg': err_msg[RETCODE.ORDEROPERATEERR]})\n else:\n transaction.savepoint_commit(save_id)\n pl = redis_conn.pipeline()\n pl.hdel('carts_%s' % user.id, *redis_selected)\n pl.srem('selected_%s' % user.id, *redis_selected)\n try:\n pl.execute()\n except Exception as e:\n logger.error(e)\n return http.JsonResponse({'code': RETCODE.DUPLICATEORDERERR,\n 'errmsg': err_msg[RETCODE.DUPLICATEORDERERR]})\n else:\n return http.JsonResponse({'code': RETCODE.OK, 'errmsg': err_msg\n [RETCODE.OK], 'order_id': order_id})\n\n\nclass OrderSettlementView(LoginRequiredMixin, View):\n \"\"\"结算订单\"\"\"\n\n def get(self, request):\n \"\"\"查询并展示要结算的订单数据\"\"\"\n user = request.user\n try:\n addresses = Address.objects.filter(user=user, is_deleted=False)\n except Exception as e:\n logger.error(e)\n addresses = None\n redis_conn = get_redis_connection('carts')\n redis_cart = redis_conn.hgetall('carts_%s' % user.id)\n redis_selected = redis_conn.smembers('selected_{}'.format(user.id))\n new_cart_dict = {}\n for sku_id in redis_selected:\n new_cart_dict[int(sku_id)] = int(redis_cart[sku_id])\n sku_ids = new_cart_dict.keys()\n skus = SKU.objects.filter(id__in=sku_ids)\n total_count = 0\n total_amount = Decimal(0.0)\n for sku in skus:\n sku.count = new_cart_dict[sku.id]\n sku.amount = sku.price * sku.count\n total_count += sku.count\n total_amount += sku.amount\n context = {'addresses': addresses, 'skus': skus, 'total_count':\n total_count, 'total_amount': total_amount, 'freight': constants\n .ORDERS_FREIGHT_COST, 'payment_amount': Decimal(constants.\n ORDERS_FREIGHT_COST) + total_amount}\n return render(request, 'place_order.html', context)\n",
"step-3": "<mask token>\n\n\nclass GoodsCommentView(View):\n <mask token>\n <mask token>\n\n\nclass OrderCommentView(LoginRequiredMixin, View):\n \"\"\"订单商品评价\"\"\"\n\n def get(self, request):\n \"\"\"展示商品评价页面\"\"\"\n order_id = request.GET.get('order_id')\n try:\n OrderInfo.objects.get(order_id=order_id, user=request.user)\n except OrderInfo.DoesNotExist:\n return http.HttpResponseNotFound('订单不存在')\n try:\n uncomment_goods = OrderGoods.objects.filter(order_id=order_id,\n is_commented=False)\n except Exception as e:\n logger.error(e)\n return http.HttpResponseServerError('订单商品信息出错')\n uncomment_goods_list = []\n for goods in uncomment_goods:\n uncomment_goods_list.append({'order_id': goods.order.order_id,\n 'sku_id': goods.sku.id, 'name': goods.sku.name, 'price':\n str(goods.price), 'default_image_url': goods.sku.\n default_image.url, 'comment': goods.comment, 'score': goods\n .score, 'is_anonymous': str(goods.is_anonymous)})\n context = {'uncomment_goods_list': uncomment_goods_list}\n return render(request, 'goods_judge.html', context)\n\n def post(self, request):\n \"\"\"评价订单商品\"\"\"\n json_dict = json.loads(request.body.decode())\n order_id = json_dict.get('order_id')\n sku_id = json_dict.get('sku_id')\n score = json_dict.get('score')\n comment = json_dict.get('comment')\n is_anonymous = json_dict.get('is_anonymous')\n if not all([order_id, sku_id, score, comment]):\n return http.HttpResponseForbidden('缺少必传参数')\n try:\n OrderInfo.objects.filter(order_id=order_id, user=request.user,\n status=OrderInfo.ORDER_STATUS_ENUM['UNCOMMENT'])\n except OrderInfo.DoesNotExist:\n return http.HttpResponseForbidden('参数order_id错误')\n try:\n sku = SKU.objects.get(id=sku_id)\n except SKU.DoesNotExist:\n return http.HttpResponseForbidden('参数sku_id错误')\n if is_anonymous:\n if not isinstance(is_anonymous, bool):\n return http.HttpResponseForbidden('参数is_anonymous错误')\n with transaction.atomic():\n save_id = transaction.savepoint()\n try:\n OrderGoods.objects.filter(order_id=order_id, sku_id=sku_id,\n is_commented=False).update(comment=comment, score=score,\n is_anonymous=is_anonymous, is_commented=True)\n sku.comments += 1\n sku.save()\n sku.spu.comments += 1\n sku.spu.save()\n if OrderGoods.objects.filter(order_id=order_id,\n is_commented=False).count() == 0:\n OrderInfo.objects.filter(order_id=order_id).update(status\n =OrderInfo.ORDER_STATUS_ENUM['FINISHED'])\n except Exception as e:\n logger.error(e)\n transaction.savepoint_rollback(save_id)\n return http.JsonResponse({'code': RETCODE.COMMITMENTERR,\n 'errmsg': err_msg[RETCODE.COMMITMENTERR]})\n else:\n transaction.savepoint_commit(save_id)\n return http.JsonResponse({'code': RETCODE.OK, 'errmsg': err_msg[\n RETCODE.OK]})\n\n\nclass UserOrderInfoView(LoginRequiredMixin, View):\n \"\"\"我的订单\"\"\"\n\n def get(self, request, page_num):\n \"\"\"提供我的订单页面\"\"\"\n user = request.user\n orders = user.orderinfo_set.all().order_by('-create_time')\n for order in orders:\n order.status_name = OrderInfo.ORDER_STATUS_CHOICES[order.status - 1\n ][1]\n order.pay_method_name = OrderInfo.PAY_METHOD_CHOICES[order.\n pay_method - 1][1]\n order.sku_list = []\n order_goods = order.skus.all()\n for order_good in order_goods:\n sku = order_good.sku\n sku.count = order_good.count\n sku.amount = sku.price * sku.count\n order.sku_list.append(sku)\n page_num = int(page_num)\n try:\n paginator = Paginator(orders, constants.ORDERS_LIST_LIMIT)\n page_orders = paginator.page(page_num)\n total_page = paginator.num_pages\n except EmptyPage:\n return http.HttpResponseNotFound('订单不存在')\n context = {'page_orders': page_orders, 'total_page': total_page,\n 'page_num': page_num}\n return render(request, 'user_center_order.html', context)\n\n\nclass OrderSuccessView(LoginRequiredMixin, View):\n \"\"\"订单成功页面\"\"\"\n\n def get(self, request):\n \"\"\"提供订单成功页面\"\"\"\n order_id = request.GET.get('order_id')\n payment_amount = request.GET.get('payment_amount')\n pay_method = request.GET.get('pay_method')\n context = {'order_id': order_id, 'payment_amount': payment_amount,\n 'pay_method': pay_method}\n return render(request, 'order_success.html', context)\n\n\nclass OrderCommitView(LoginRequiredJsonMixin, View):\n \"\"\"提交订单\"\"\"\n\n def post(self, request):\n \"\"\"保存订单基本信息和订单商品信息\"\"\"\n json_dict = json.loads(request.body.decode())\n address_id = json_dict.get('address_id')\n pay_method = json_dict.get('pay_method')\n if not all([address_id, pay_method]):\n return http.HttpResponseForbidden('缺少必传参数')\n try:\n address = Address.objects.get(id=address_id)\n except Exception as e:\n logger.error(e)\n return http.HttpResponseForbidden('参数address_id错误')\n if pay_method not in [OrderInfo.PAY_METHODS_ENUM['CASH'], OrderInfo\n .PAY_METHODS_ENUM['ALIPAY']]:\n return http.HttpResponseForbidden('参数pay_method错误')\n with transaction.atomic():\n save_id = transaction.savepoint()\n user = request.user\n order_id = timezone.localtime().strftime('%Y%m%d%H%M%S'\n ) + '{:0>9d}'.format(user.id)\n try:\n order = OrderInfo.objects.create(order_id=order_id, user=\n user, address=address, total_count=0, total_amount=\n Decimal('0.00'), freight=Decimal(constants.\n ORDERS_FREIGHT_COST), pay_method=pay_method, status=\n OrderInfo.ORDER_STATUS_ENUM['UNPAID'] if pay_method ==\n OrderInfo.PAY_METHODS_ENUM['ALIPAY'] else OrderInfo.\n ORDER_STATUS_ENUM['UNSEND'])\n redis_conn = get_redis_connection('carts')\n redis_cart = redis_conn.hgetall('carts_%s' % user.id)\n redis_selected = redis_conn.smembers('selected_{}'.format(\n user.id))\n new_cart_dict = {}\n for sku_id in redis_selected:\n new_cart_dict[int(sku_id)] = int(redis_cart[sku_id])\n sku_ids = new_cart_dict.keys()\n for sku_id in sku_ids:\n while True:\n sku = SKU.objects.get(id=sku_id)\n sku_count = new_cart_dict[sku.id]\n origin_stock = sku.stock\n origin_sales = sku.sales\n if sku_count > origin_stock:\n transaction.savepoint_rollback(save_id)\n print(request.user, '库存不足')\n return http.JsonResponse({'code': RETCODE.\n STOCKERR, 'errmsg': err_msg[RETCODE.STOCKERR]})\n new_stock = origin_stock - sku_count\n new_sales = origin_sales + sku_count\n result = SKU.objects.filter(id=sku_id, stock=\n origin_stock).update(stock=new_stock, sales=\n new_sales)\n if result == 0:\n continue\n sku.spu.sales += sku_count\n sku.spu.save()\n OrderGoods.objects.create(order=order, sku=sku,\n count=sku_count, price=sku.price)\n order.total_count += sku_count\n order.total_amount += sku_count * sku.price\n break\n order.total_amount += order.freight\n order.save()\n except Exception as e:\n logger.error(e)\n transaction.savepoint_rollback(save_id)\n return http.JsonResponse({'code': RETCODE.ORDEROPERATEERR,\n 'errmsg': err_msg[RETCODE.ORDEROPERATEERR]})\n else:\n transaction.savepoint_commit(save_id)\n pl = redis_conn.pipeline()\n pl.hdel('carts_%s' % user.id, *redis_selected)\n pl.srem('selected_%s' % user.id, *redis_selected)\n try:\n pl.execute()\n except Exception as e:\n logger.error(e)\n return http.JsonResponse({'code': RETCODE.DUPLICATEORDERERR,\n 'errmsg': err_msg[RETCODE.DUPLICATEORDERERR]})\n else:\n return http.JsonResponse({'code': RETCODE.OK, 'errmsg': err_msg\n [RETCODE.OK], 'order_id': order_id})\n\n\nclass OrderSettlementView(LoginRequiredMixin, View):\n \"\"\"结算订单\"\"\"\n\n def get(self, request):\n \"\"\"查询并展示要结算的订单数据\"\"\"\n user = request.user\n try:\n addresses = Address.objects.filter(user=user, is_deleted=False)\n except Exception as e:\n logger.error(e)\n addresses = None\n redis_conn = get_redis_connection('carts')\n redis_cart = redis_conn.hgetall('carts_%s' % user.id)\n redis_selected = redis_conn.smembers('selected_{}'.format(user.id))\n new_cart_dict = {}\n for sku_id in redis_selected:\n new_cart_dict[int(sku_id)] = int(redis_cart[sku_id])\n sku_ids = new_cart_dict.keys()\n skus = SKU.objects.filter(id__in=sku_ids)\n total_count = 0\n total_amount = Decimal(0.0)\n for sku in skus:\n sku.count = new_cart_dict[sku.id]\n sku.amount = sku.price * sku.count\n total_count += sku.count\n total_amount += sku.amount\n context = {'addresses': addresses, 'skus': skus, 'total_count':\n total_count, 'total_amount': total_amount, 'freight': constants\n .ORDERS_FREIGHT_COST, 'payment_amount': Decimal(constants.\n ORDERS_FREIGHT_COST) + total_amount}\n return render(request, 'place_order.html', context)\n",
"step-4": "<mask token>\n\n\nclass GoodsCommentView(View):\n \"\"\"订单商品评价信息\"\"\"\n\n def get(self, request, sku_id):\n order_goods_list = OrderGoods.objects.filter(sku_id=sku_id,\n is_commented=True).order_by('-create_time')[:constants.\n COMMENTS_LIST_LIMIT]\n comment_list = []\n for order_goods in order_goods_list:\n username = order_goods.order.user.username\n comment_list.append({'username': username[0] + '***' + username\n [-1] if order_goods.is_anonymous else username, 'comment':\n order_goods.comment, 'score': order_goods.score})\n return http.JsonResponse({'code': RETCODE.OK, 'errmsg': err_msg[\n RETCODE.OK], 'comment_list': comment_list})\n\n\nclass OrderCommentView(LoginRequiredMixin, View):\n \"\"\"订单商品评价\"\"\"\n\n def get(self, request):\n \"\"\"展示商品评价页面\"\"\"\n order_id = request.GET.get('order_id')\n try:\n OrderInfo.objects.get(order_id=order_id, user=request.user)\n except OrderInfo.DoesNotExist:\n return http.HttpResponseNotFound('订单不存在')\n try:\n uncomment_goods = OrderGoods.objects.filter(order_id=order_id,\n is_commented=False)\n except Exception as e:\n logger.error(e)\n return http.HttpResponseServerError('订单商品信息出错')\n uncomment_goods_list = []\n for goods in uncomment_goods:\n uncomment_goods_list.append({'order_id': goods.order.order_id,\n 'sku_id': goods.sku.id, 'name': goods.sku.name, 'price':\n str(goods.price), 'default_image_url': goods.sku.\n default_image.url, 'comment': goods.comment, 'score': goods\n .score, 'is_anonymous': str(goods.is_anonymous)})\n context = {'uncomment_goods_list': uncomment_goods_list}\n return render(request, 'goods_judge.html', context)\n\n def post(self, request):\n \"\"\"评价订单商品\"\"\"\n json_dict = json.loads(request.body.decode())\n order_id = json_dict.get('order_id')\n sku_id = json_dict.get('sku_id')\n score = json_dict.get('score')\n comment = json_dict.get('comment')\n is_anonymous = json_dict.get('is_anonymous')\n if not all([order_id, sku_id, score, comment]):\n return http.HttpResponseForbidden('缺少必传参数')\n try:\n OrderInfo.objects.filter(order_id=order_id, user=request.user,\n status=OrderInfo.ORDER_STATUS_ENUM['UNCOMMENT'])\n except OrderInfo.DoesNotExist:\n return http.HttpResponseForbidden('参数order_id错误')\n try:\n sku = SKU.objects.get(id=sku_id)\n except SKU.DoesNotExist:\n return http.HttpResponseForbidden('参数sku_id错误')\n if is_anonymous:\n if not isinstance(is_anonymous, bool):\n return http.HttpResponseForbidden('参数is_anonymous错误')\n with transaction.atomic():\n save_id = transaction.savepoint()\n try:\n OrderGoods.objects.filter(order_id=order_id, sku_id=sku_id,\n is_commented=False).update(comment=comment, score=score,\n is_anonymous=is_anonymous, is_commented=True)\n sku.comments += 1\n sku.save()\n sku.spu.comments += 1\n sku.spu.save()\n if OrderGoods.objects.filter(order_id=order_id,\n is_commented=False).count() == 0:\n OrderInfo.objects.filter(order_id=order_id).update(status\n =OrderInfo.ORDER_STATUS_ENUM['FINISHED'])\n except Exception as e:\n logger.error(e)\n transaction.savepoint_rollback(save_id)\n return http.JsonResponse({'code': RETCODE.COMMITMENTERR,\n 'errmsg': err_msg[RETCODE.COMMITMENTERR]})\n else:\n transaction.savepoint_commit(save_id)\n return http.JsonResponse({'code': RETCODE.OK, 'errmsg': err_msg[\n RETCODE.OK]})\n\n\nclass UserOrderInfoView(LoginRequiredMixin, View):\n \"\"\"我的订单\"\"\"\n\n def get(self, request, page_num):\n \"\"\"提供我的订单页面\"\"\"\n user = request.user\n orders = user.orderinfo_set.all().order_by('-create_time')\n for order in orders:\n order.status_name = OrderInfo.ORDER_STATUS_CHOICES[order.status - 1\n ][1]\n order.pay_method_name = OrderInfo.PAY_METHOD_CHOICES[order.\n pay_method - 1][1]\n order.sku_list = []\n order_goods = order.skus.all()\n for order_good in order_goods:\n sku = order_good.sku\n sku.count = order_good.count\n sku.amount = sku.price * sku.count\n order.sku_list.append(sku)\n page_num = int(page_num)\n try:\n paginator = Paginator(orders, constants.ORDERS_LIST_LIMIT)\n page_orders = paginator.page(page_num)\n total_page = paginator.num_pages\n except EmptyPage:\n return http.HttpResponseNotFound('订单不存在')\n context = {'page_orders': page_orders, 'total_page': total_page,\n 'page_num': page_num}\n return render(request, 'user_center_order.html', context)\n\n\nclass OrderSuccessView(LoginRequiredMixin, View):\n \"\"\"订单成功页面\"\"\"\n\n def get(self, request):\n \"\"\"提供订单成功页面\"\"\"\n order_id = request.GET.get('order_id')\n payment_amount = request.GET.get('payment_amount')\n pay_method = request.GET.get('pay_method')\n context = {'order_id': order_id, 'payment_amount': payment_amount,\n 'pay_method': pay_method}\n return render(request, 'order_success.html', context)\n\n\nclass OrderCommitView(LoginRequiredJsonMixin, View):\n \"\"\"提交订单\"\"\"\n\n def post(self, request):\n \"\"\"保存订单基本信息和订单商品信息\"\"\"\n json_dict = json.loads(request.body.decode())\n address_id = json_dict.get('address_id')\n pay_method = json_dict.get('pay_method')\n if not all([address_id, pay_method]):\n return http.HttpResponseForbidden('缺少必传参数')\n try:\n address = Address.objects.get(id=address_id)\n except Exception as e:\n logger.error(e)\n return http.HttpResponseForbidden('参数address_id错误')\n if pay_method not in [OrderInfo.PAY_METHODS_ENUM['CASH'], OrderInfo\n .PAY_METHODS_ENUM['ALIPAY']]:\n return http.HttpResponseForbidden('参数pay_method错误')\n with transaction.atomic():\n save_id = transaction.savepoint()\n user = request.user\n order_id = timezone.localtime().strftime('%Y%m%d%H%M%S'\n ) + '{:0>9d}'.format(user.id)\n try:\n order = OrderInfo.objects.create(order_id=order_id, user=\n user, address=address, total_count=0, total_amount=\n Decimal('0.00'), freight=Decimal(constants.\n ORDERS_FREIGHT_COST), pay_method=pay_method, status=\n OrderInfo.ORDER_STATUS_ENUM['UNPAID'] if pay_method ==\n OrderInfo.PAY_METHODS_ENUM['ALIPAY'] else OrderInfo.\n ORDER_STATUS_ENUM['UNSEND'])\n redis_conn = get_redis_connection('carts')\n redis_cart = redis_conn.hgetall('carts_%s' % user.id)\n redis_selected = redis_conn.smembers('selected_{}'.format(\n user.id))\n new_cart_dict = {}\n for sku_id in redis_selected:\n new_cart_dict[int(sku_id)] = int(redis_cart[sku_id])\n sku_ids = new_cart_dict.keys()\n for sku_id in sku_ids:\n while True:\n sku = SKU.objects.get(id=sku_id)\n sku_count = new_cart_dict[sku.id]\n origin_stock = sku.stock\n origin_sales = sku.sales\n if sku_count > origin_stock:\n transaction.savepoint_rollback(save_id)\n print(request.user, '库存不足')\n return http.JsonResponse({'code': RETCODE.\n STOCKERR, 'errmsg': err_msg[RETCODE.STOCKERR]})\n new_stock = origin_stock - sku_count\n new_sales = origin_sales + sku_count\n result = SKU.objects.filter(id=sku_id, stock=\n origin_stock).update(stock=new_stock, sales=\n new_sales)\n if result == 0:\n continue\n sku.spu.sales += sku_count\n sku.spu.save()\n OrderGoods.objects.create(order=order, sku=sku,\n count=sku_count, price=sku.price)\n order.total_count += sku_count\n order.total_amount += sku_count * sku.price\n break\n order.total_amount += order.freight\n order.save()\n except Exception as e:\n logger.error(e)\n transaction.savepoint_rollback(save_id)\n return http.JsonResponse({'code': RETCODE.ORDEROPERATEERR,\n 'errmsg': err_msg[RETCODE.ORDEROPERATEERR]})\n else:\n transaction.savepoint_commit(save_id)\n pl = redis_conn.pipeline()\n pl.hdel('carts_%s' % user.id, *redis_selected)\n pl.srem('selected_%s' % user.id, *redis_selected)\n try:\n pl.execute()\n except Exception as e:\n logger.error(e)\n return http.JsonResponse({'code': RETCODE.DUPLICATEORDERERR,\n 'errmsg': err_msg[RETCODE.DUPLICATEORDERERR]})\n else:\n return http.JsonResponse({'code': RETCODE.OK, 'errmsg': err_msg\n [RETCODE.OK], 'order_id': order_id})\n\n\nclass OrderSettlementView(LoginRequiredMixin, View):\n \"\"\"结算订单\"\"\"\n\n def get(self, request):\n \"\"\"查询并展示要结算的订单数据\"\"\"\n user = request.user\n try:\n addresses = Address.objects.filter(user=user, is_deleted=False)\n except Exception as e:\n logger.error(e)\n addresses = None\n redis_conn = get_redis_connection('carts')\n redis_cart = redis_conn.hgetall('carts_%s' % user.id)\n redis_selected = redis_conn.smembers('selected_{}'.format(user.id))\n new_cart_dict = {}\n for sku_id in redis_selected:\n new_cart_dict[int(sku_id)] = int(redis_cart[sku_id])\n sku_ids = new_cart_dict.keys()\n skus = SKU.objects.filter(id__in=sku_ids)\n total_count = 0\n total_amount = Decimal(0.0)\n for sku in skus:\n sku.count = new_cart_dict[sku.id]\n sku.amount = sku.price * sku.count\n total_count += sku.count\n total_amount += sku.amount\n context = {'addresses': addresses, 'skus': skus, 'total_count':\n total_count, 'total_amount': total_amount, 'freight': constants\n .ORDERS_FREIGHT_COST, 'payment_amount': Decimal(constants.\n ORDERS_FREIGHT_COST) + total_amount}\n return render(request, 'place_order.html', context)\n",
"step-5": "from django.core.paginator import Paginator, EmptyPage\nfrom django.shortcuts import render\nfrom django.views import View\nfrom django.contrib.auth.mixins import LoginRequiredMixin\nfrom logging import getLogger\nfrom django_redis import get_redis_connection\nfrom decimal import Decimal\nimport json\nfrom django import http\nfrom django.utils import timezone\nfrom django.db import transaction\n\nfrom users.models import Address\nfrom goods.models import SKU\nfrom meiduo_mall.utils import constants\nfrom meiduo_mall.utils.auth_backend import LoginRequiredJsonMixin\nfrom .models import OrderInfo, OrderGoods\nfrom meiduo_mall.utils.response_code import RETCODE, err_msg\n\n\nlogger = getLogger('django')\n\n\nclass GoodsCommentView(View):\n \"\"\"订单商品评价信息\"\"\"\n def get(self, request, sku_id):\n # 获取被评价的订单商品信息\n order_goods_list = OrderGoods.objects.filter(sku_id=sku_id, is_commented=True).order_by('-create_time')[:constants.COMMENTS_LIST_LIMIT]\n # 序列化\n comment_list = []\n for order_goods in order_goods_list:\n username = order_goods.order.user.username\n comment_list.append({\n 'username': username[0] + '***' + username[-1] if order_goods.is_anonymous else username,\n 'comment': order_goods.comment,\n 'score': order_goods.score,\n })\n return http.JsonResponse({'code': RETCODE.OK, 'errmsg': err_msg[RETCODE.OK], 'comment_list': comment_list})\n\n\nclass OrderCommentView(LoginRequiredMixin, View):\n \"\"\"订单商品评价\"\"\"\n def get(self, request):\n \"\"\"展示商品评价页面\"\"\"\n # 接收参数\n order_id = request.GET.get('order_id')\n # 校验参数\n try:\n OrderInfo.objects.get(order_id=order_id, user=request.user)\n except OrderInfo.DoesNotExist:\n return http.HttpResponseNotFound('订单不存在')\n # 查询订单中未被评价的商品信息\n try:\n uncomment_goods = OrderGoods.objects.filter(order_id=order_id, is_commented=False)\n except Exception as e:\n logger.error(e)\n return http.HttpResponseServerError('订单商品信息出错')\n # 构造待评价商品数据\n uncomment_goods_list = []\n for goods in uncomment_goods:\n uncomment_goods_list.append({\n 'order_id': goods.order.order_id,\n 'sku_id': goods.sku.id,\n 'name': goods.sku.name,\n 'price': str(goods.price),\n 'default_image_url': goods.sku.default_image.url,\n 'comment': goods.comment,\n 'score': goods.score,\n 'is_anonymous': str(goods.is_anonymous),\n })\n # 渲染模板\n context = {\n 'uncomment_goods_list': uncomment_goods_list\n }\n return render(request, 'goods_judge.html', context)\n\n def post(self, request):\n \"\"\"评价订单商品\"\"\"\n # 接收参数\n json_dict = json.loads(request.body.decode())\n order_id = json_dict.get('order_id')\n sku_id = json_dict.get('sku_id')\n score = json_dict.get('score')\n comment = json_dict.get('comment')\n is_anonymous = json_dict.get('is_anonymous')\n # 校验参数\n if not all([order_id, sku_id, score, comment]):\n return http.HttpResponseForbidden('缺少必传参数')\n try:\n OrderInfo.objects.filter(order_id=order_id, user=request.user, status=OrderInfo.ORDER_STATUS_ENUM['UNCOMMENT'])\n except OrderInfo.DoesNotExist:\n return http.HttpResponseForbidden('参数order_id错误')\n try:\n sku = SKU.objects.get(id=sku_id)\n except SKU.DoesNotExist:\n return http.HttpResponseForbidden('参数sku_id错误')\n if is_anonymous:\n if not isinstance(is_anonymous, bool):\n return http.HttpResponseForbidden('参数is_anonymous错误')\n # 以下操作数据库的操作,开启作为一次事务\n with transaction.atomic():\n # 在数据库操作前,创建保存点(数据库最初的状态)\n save_id = transaction.savepoint()\n try:\n # 保存订单商品评价数据\n OrderGoods.objects.filter(order_id=order_id, sku_id=sku_id, is_commented=False).update(\n comment=comment,\n score=score,\n is_anonymous=is_anonymous,\n is_commented=True\n )\n # 累计评论数据\n sku.comments += 1\n sku.save()\n sku.spu.comments += 1\n sku.spu.save()\n # 如果所有订单商品都已评价,则修改订单状态为已完成\n if OrderGoods.objects.filter(order_id=order_id, is_commented=False).count() == 0:\n OrderInfo.objects.filter(order_id=order_id).update(status=OrderInfo.ORDER_STATUS_ENUM['FINISHED'])\n # 对于未知的数据库错误,暴力回滚\n except Exception as e:\n logger.error(e)\n transaction.savepoint_rollback(save_id)\n return http.JsonResponse({'code': RETCODE.COMMITMENTERR, 'errmsg': err_msg[RETCODE.COMMITMENTERR]})\n else:\n # 提交事务\n transaction.savepoint_commit(save_id)\n return http.JsonResponse({'code': RETCODE.OK, 'errmsg': err_msg[RETCODE.OK]})\n\n\nclass UserOrderInfoView(LoginRequiredMixin, View):\n \"\"\"我的订单\"\"\"\n\n def get(self, request, page_num):\n \"\"\"提供我的订单页面\"\"\"\n user = request.user\n # 查询订单\n orders = user.orderinfo_set.all().order_by(\"-create_time\")\n # 遍历所有订单\n for order in orders:\n # 绑定订单状态\n order.status_name = OrderInfo.ORDER_STATUS_CHOICES[order.status-1][1]\n # 绑定支付方式\n order.pay_method_name = OrderInfo.PAY_METHOD_CHOICES[order.pay_method-1][1]\n order.sku_list = []\n # 查询订单商品\n order_goods = order.skus.all()\n # 遍历订单商品\n for order_good in order_goods:\n sku = order_good.sku\n sku.count = order_good.count\n sku.amount = sku.price * sku.count\n order.sku_list.append(sku)\n # 分页\n page_num = int(page_num)\n try:\n paginator = Paginator(orders, constants.ORDERS_LIST_LIMIT)\n page_orders = paginator.page(page_num)\n total_page = paginator.num_pages\n except EmptyPage:\n return http.HttpResponseNotFound('订单不存在')\n context = {\n \"page_orders\": page_orders,\n 'total_page': total_page,\n 'page_num': page_num,\n }\n return render(request, \"user_center_order.html\", context)\n\n\nclass OrderSuccessView(LoginRequiredMixin, View):\n \"\"\"订单成功页面\"\"\"\n def get(self, request):\n \"\"\"提供订单成功页面\"\"\"\n # 接受参数\n order_id = request.GET.get('order_id')\n payment_amount = request.GET.get('payment_amount')\n pay_method = request.GET.get('pay_method')\n # 构造上下文\n context = {\n 'order_id': order_id,\n 'payment_amount': payment_amount,\n 'pay_method': pay_method\n }\n return render(request, 'order_success.html', context)\n\n\nclass OrderCommitView(LoginRequiredJsonMixin, View):\n \"\"\"提交订单\"\"\"\n def post(self, request):\n \"\"\"保存订单基本信息和订单商品信息\"\"\"\n # 接收参数\n json_dict = json.loads(request.body.decode())\n address_id = json_dict.get('address_id')\n pay_method = json_dict.get('pay_method')\n # 校验参数\n if not all([address_id, pay_method]):\n return http.HttpResponseForbidden('缺少必传参数')\n # 判断address_id是否合法\n try:\n address = Address.objects.get(id=address_id)\n except Exception as e:\n logger.error(e)\n return http.HttpResponseForbidden('参数address_id错误')\n # 判断pay_method是否合法\n if pay_method not in [OrderInfo.PAY_METHODS_ENUM['CASH'], OrderInfo.PAY_METHODS_ENUM['ALIPAY']]:\n return http.HttpResponseForbidden('参数pay_method错误')\n # 以下操作数据库的操作,开启作为一次事务\n with transaction.atomic():\n # 在数据库操作前,创建保存点(数据库最初的状态)\n save_id = transaction.savepoint()\n # 获取登录用户\n user = request.user\n # 获取订单编号:时间 + user_id == '2020123113041200000001'\n order_id = timezone.localtime().strftime('%Y%m%d%H%M%S') + '{:0>9d}'.format(user.id)\n try:\n # 保存订单基本信息(一)\n order = OrderInfo.objects.create(\n order_id=order_id,\n user=user,\n address=address,\n total_count=0, # 仅用来初始化,后面根据订单中的商品进行更新\n total_amount=Decimal('0.00'), # 仅用来初始化,后面根据订单中的商品进行更新\n freight=Decimal(constants.ORDERS_FREIGHT_COST),\n pay_method=pay_method,\n # 如果支付方式为支付宝,支付状态为未付款,如果支付方式是货到付款,支付状态为未发货\n status=OrderInfo.ORDER_STATUS_ENUM['UNPAID'] if pay_method == OrderInfo.PAY_METHODS_ENUM['ALIPAY'] else OrderInfo.ORDER_STATUS_ENUM['UNSEND']\n )\n\n # 保存订单商品信息(多)\n # 查询redis中购物车被勾选的商品\n redis_conn = get_redis_connection('carts')\n # 购物车中商品的数量\n redis_cart = redis_conn.hgetall('carts_%s' % user.id)\n # 被勾选的商品sku_id\n redis_selected = redis_conn.smembers('selected_{}'.format(user.id))\n # 构造购物车中被勾选商品的数据 new_cart_dict,{sku_id: 2, sku_id: 1}\n new_cart_dict = {}\n for sku_id in redis_selected:\n new_cart_dict[int(sku_id)] = int(redis_cart[sku_id])\n # 获取被勾选商品的sku_id\n sku_ids = new_cart_dict.keys()\n for sku_id in sku_ids:\n # 每个商品都有多次下单的机会,直到库存不足\n while True:\n # 读取商品的sku信息\n sku = SKU.objects.get(id=sku_id) # 查询商品和库存信息时,不能出现缓存,所有不用 filter(id__in=sku_ids)\n # 获取当前被勾选商品的库存\n sku_count = new_cart_dict[sku.id]\n # 获取sku商品原始的库存stock和销量sales\n origin_stock = sku.stock\n origin_sales = sku.sales\n # # 模型网络延迟\n # import time\n # time.sleep(5)\n # 如果订单中的商品数量大于库存,响应库存不足\n if sku_count > origin_stock:\n # 库存不足,回滚\n transaction.savepoint_rollback(save_id)\n print(request.user, '库存不足')\n return http.JsonResponse({'code': RETCODE.STOCKERR, 'errmsg': err_msg[RETCODE.STOCKERR]})\n # 如果库存满足,SKU 减库存,加销量\n new_stock = origin_stock - sku_count\n new_sales = origin_sales + sku_count\n result = SKU.objects.filter(id=sku_id, stock=origin_stock).update(stock=new_stock, sales=new_sales)\n # 如果在更新数据时,原始数据变化了,那么返回0,表示有资源抢夺\n if result == 0:\n # 由于其他用户提前对该商品完成下单,该商品此次下单失败,重新进行下单\n continue\n # SPU 加销量\n sku.spu.sales += sku_count\n sku.spu.save()\n OrderGoods.objects.create(\n order=order,\n sku=sku,\n count=sku_count,\n price=sku.price,\n )\n # 累加订单中商品的总价和总数量\n order.total_count += sku_count\n order.total_amount += (sku_count * sku.price)\n # 该件商品下单成功,退出循环\n break\n # 添加邮费和保存订单信息\n order.total_amount += order.freight\n order.save()\n # 对于未知的数据库错误,暴力回滚\n except Exception as e:\n logger.error(e)\n transaction.savepoint_rollback(save_id)\n return http.JsonResponse({'code': RETCODE.ORDEROPERATEERR, 'errmsg': err_msg[RETCODE.ORDEROPERATEERR]})\n else:\n # 提交事务\n transaction.savepoint_commit(save_id)\n # 清除购物车中已结算的商品\n pl = redis_conn.pipeline()\n pl.hdel('carts_%s' % user.id, *redis_selected)\n pl.srem('selected_%s' % user.id, *redis_selected)\n try:\n pl.execute()\n except Exception as e:\n logger.error(e)\n return http.JsonResponse({'code': RETCODE.DUPLICATEORDERERR, 'errmsg': err_msg[RETCODE.DUPLICATEORDERERR]})\n else:\n # 返回响应\n return http.JsonResponse({'code': RETCODE.OK, 'errmsg': err_msg[RETCODE.OK], 'order_id': order_id})\n\n\nclass OrderSettlementView(LoginRequiredMixin, View):\n \"\"\"结算订单\"\"\"\n def get(self, request):\n \"\"\"查询并展示要结算的订单数据\"\"\"\n # 获取登录用户\n user = request.user\n # 查询用户收货地址,没有被删除的收货地址\n try:\n addresses = Address.objects.filter(user=user, is_deleted=False)\n except Exception as e:\n logger.error(e)\n # 如果没有查询出收货地址,可以去编辑收货地址\n addresses = None\n # 查询redis中购物车被勾选的商品\n redis_conn = get_redis_connection('carts')\n # 购物车中商品的数量\n redis_cart = redis_conn.hgetall('carts_%s' % user.id)\n # 被勾选的商品sku_id\n redis_selected = redis_conn.smembers('selected_{}'.format(user.id))\n # 构造购物车中被勾选商品的数据 new_cart_dict,{sku_id: 2, sku_id: 1}\n new_cart_dict = {}\n for sku_id in redis_selected:\n new_cart_dict[int(sku_id)] = int(redis_cart[sku_id])\n # 获取被勾选商品的sku_id\n sku_ids = new_cart_dict.keys()\n # 获取被勾选商品的sku信息\n skus = SKU.objects.filter(id__in=sku_ids)\n # 商品总数量与商品总金额\n total_count = 0\n total_amount = Decimal(0.00) # 或 Decimal('0.00')\n for sku in skus:\n # 遍历skus,给每个sku补充count(数量)和amount(小计)字段\n sku.count = new_cart_dict[sku.id]\n sku.amount = sku.price * sku.count # Decimal类型\n # 累加商品数量和金额\n total_count += sku.count\n total_amount += sku.amount\n # 构造上下文\n context = {\n 'addresses': addresses,\n 'skus': skus,\n 'total_count': total_count,\n 'total_amount': total_amount,\n 'freight': constants.ORDERS_FREIGHT_COST, # 运费\n 'payment_amount': Decimal(constants.ORDERS_FREIGHT_COST) + total_amount,\n }\n return render(request, 'place_order.html', context)\n",
"step-ids": [
9,
16,
17,
19,
22
]
}
|
[
9,
16,
17,
19,
22
] |
<|reserved_special_token_0|>
class InflationView(TemplateView):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class InflationView(TemplateView):
<|reserved_special_token_0|>
def get(self, request, *args, **kwargs):
context = {}
file_path = os.path.join(settings.BASE_DIR, 'inflation_russia.csv')
with open(file_path, newline='', encoding='utf-8') as csvfile:
reader = csv.reader(csvfile, delimiter=';')
context['head'] = next(reader)
context['data'] = []
for row in reader:
context['data'].append(row)
return render(request, self.template_name, context)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class InflationView(TemplateView):
template_name = 'inflation.html'
def get(self, request, *args, **kwargs):
context = {}
file_path = os.path.join(settings.BASE_DIR, 'inflation_russia.csv')
with open(file_path, newline='', encoding='utf-8') as csvfile:
reader = csv.reader(csvfile, delimiter=';')
context['head'] = next(reader)
context['data'] = []
for row in reader:
context['data'].append(row)
return render(request, self.template_name, context)
<|reserved_special_token_1|>
from django.shortcuts import render
from django.views.generic import TemplateView
from django.conf import settings
import os, csv
class InflationView(TemplateView):
template_name = 'inflation.html'
def get(self, request, *args, **kwargs):
context = {}
file_path = os.path.join(settings.BASE_DIR, 'inflation_russia.csv')
with open(file_path, newline='', encoding='utf-8') as csvfile:
reader = csv.reader(csvfile, delimiter=';')
context['head'] = next(reader)
context['data'] = []
for row in reader:
context['data'].append(row)
return render(request, self.template_name, context)
<|reserved_special_token_1|>
from django.shortcuts import render
from django.views.generic import TemplateView
from django.conf import settings
import os, csv
class InflationView(TemplateView):
template_name = 'inflation.html'
def get(self, request, *args, **kwargs):
# чтение csv-файла и заполнение контекста
context = {}
file_path = os.path.join(settings.BASE_DIR, 'inflation_russia.csv')
with open(file_path, newline='', encoding='utf-8') as csvfile:
reader = csv.reader(csvfile, delimiter=';')
context['head'] = next(reader)
context['data'] = []
for row in reader:
context['data'].append(row)
return render(request, self.template_name, context)
|
flexible
|
{
"blob_id": "6645887b25d75f4657fb231b80d8ebdec2bac7c9",
"index": 8718,
"step-1": "<mask token>\n\n\nclass InflationView(TemplateView):\n <mask token>\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass InflationView(TemplateView):\n <mask token>\n\n def get(self, request, *args, **kwargs):\n context = {}\n file_path = os.path.join(settings.BASE_DIR, 'inflation_russia.csv')\n with open(file_path, newline='', encoding='utf-8') as csvfile:\n reader = csv.reader(csvfile, delimiter=';')\n context['head'] = next(reader)\n context['data'] = []\n for row in reader:\n context['data'].append(row)\n return render(request, self.template_name, context)\n",
"step-3": "<mask token>\n\n\nclass InflationView(TemplateView):\n template_name = 'inflation.html'\n\n def get(self, request, *args, **kwargs):\n context = {}\n file_path = os.path.join(settings.BASE_DIR, 'inflation_russia.csv')\n with open(file_path, newline='', encoding='utf-8') as csvfile:\n reader = csv.reader(csvfile, delimiter=';')\n context['head'] = next(reader)\n context['data'] = []\n for row in reader:\n context['data'].append(row)\n return render(request, self.template_name, context)\n",
"step-4": "from django.shortcuts import render\nfrom django.views.generic import TemplateView\nfrom django.conf import settings\nimport os, csv\n\n\nclass InflationView(TemplateView):\n template_name = 'inflation.html'\n\n def get(self, request, *args, **kwargs):\n context = {}\n file_path = os.path.join(settings.BASE_DIR, 'inflation_russia.csv')\n with open(file_path, newline='', encoding='utf-8') as csvfile:\n reader = csv.reader(csvfile, delimiter=';')\n context['head'] = next(reader)\n context['data'] = []\n for row in reader:\n context['data'].append(row)\n return render(request, self.template_name, context)\n",
"step-5": "from django.shortcuts import render\nfrom django.views.generic import TemplateView\nfrom django.conf import settings\nimport os, csv\n\n\nclass InflationView(TemplateView):\n template_name = 'inflation.html'\n\n def get(self, request, *args, **kwargs):\n # чтение csv-файла и заполнение контекста\n context = {}\n file_path = os.path.join(settings.BASE_DIR, 'inflation_russia.csv')\n with open(file_path, newline='', encoding='utf-8') as csvfile:\n reader = csv.reader(csvfile, delimiter=';')\n context['head'] = next(reader)\n context['data'] = []\n for row in reader:\n context['data'].append(row)\n return render(request, self.template_name, context)\n\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
print('Introduce un valor par:')
<|reserved_special_token_0|>
print('Introduce un valor impar:')
<|reserved_special_token_0|>
if numpar == numimp * 2:
print(numpar, ' es el doble que ', numimp, '.')
else:
print(numpar, ' no es el doble que ', numimp, '.')
<|reserved_special_token_1|>
print('Introduce un valor par:')
numpar = int(input())
print('Introduce un valor impar:')
numimp = int(input())
if numpar == numimp * 2:
print(numpar, ' es el doble que ', numimp, '.')
else:
print(numpar, ' no es el doble que ', numimp, '.')
<|reserved_special_token_1|>
#Pràctica 9 Condicionals, Exercici 2:
print("Introduce un valor par:")
numpar=int(input())
print("Introduce un valor impar:")
numimp=int(input())
if numpar==numimp*2:
print(numpar," es el doble que ",numimp,".")
else:
print(numpar," no es el doble que ",numimp,".")
|
flexible
|
{
"blob_id": "8ad5f3e5f73eae191a3fe9bc20f73b4bfcfedc8c",
"index": 4884,
"step-1": "<mask token>\n",
"step-2": "print('Introduce un valor par:')\n<mask token>\nprint('Introduce un valor impar:')\n<mask token>\nif numpar == numimp * 2:\n print(numpar, ' es el doble que ', numimp, '.')\nelse:\n print(numpar, ' no es el doble que ', numimp, '.')\n",
"step-3": "print('Introduce un valor par:')\nnumpar = int(input())\nprint('Introduce un valor impar:')\nnumimp = int(input())\nif numpar == numimp * 2:\n print(numpar, ' es el doble que ', numimp, '.')\nelse:\n print(numpar, ' no es el doble que ', numimp, '.')\n",
"step-4": "#Pràctica 9 Condicionals, Exercici 2:\nprint(\"Introduce un valor par:\")\nnumpar=int(input())\nprint(\"Introduce un valor impar:\")\nnumimp=int(input())\nif numpar==numimp*2:\n print(numpar,\" es el doble que \",numimp,\".\")\nelse:\n print(numpar,\" no es el doble que \",numimp,\".\")",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
def visualize_data(filename, width=72, height=48, depth=3, cnn_model=None):
"""
When cnn_model is specified it'll show what the cnn_model predicts (red)
as opposed to what inputs it actually received (green)
"""
data = pd.DataFrame.from_csv(filename)
for i in range(30):
cur_img = data['image'][i]
cur_steer = int(data['servo'][i])
cur_throttle = int(data['motor'][i])
cur_img_array = deserialize_image(cur_img)
image = cv2.cvtColor(cur_img_array, cv2.COLOR_RGB2BGR)
print(i)
cv2.imwrite('test' + str(i) + '.jpg', image)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def visualize_data(filename, width=72, height=48, depth=3, cnn_model=None):
"""
When cnn_model is specified it'll show what the cnn_model predicts (red)
as opposed to what inputs it actually received (green)
"""
data = pd.DataFrame.from_csv(filename)
for i in range(30):
cur_img = data['image'][i]
cur_steer = int(data['servo'][i])
cur_throttle = int(data['motor'][i])
cur_img_array = deserialize_image(cur_img)
image = cv2.cvtColor(cur_img_array, cv2.COLOR_RGB2BGR)
print(i)
cv2.imwrite('test' + str(i) + '.jpg', image)
<|reserved_special_token_0|>
with open('settings.json') as d:
SETTINGS = json.load(d)
<|reserved_special_token_0|>
if len(sys.argv) > 1:
filename = sys.argv[1]
visualize_data(filename, width=SETTINGS['width'], height=SETTINGS['height'],
depth=SETTINGS['depth'])
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def visualize_data(filename, width=72, height=48, depth=3, cnn_model=None):
"""
When cnn_model is specified it'll show what the cnn_model predicts (red)
as opposed to what inputs it actually received (green)
"""
data = pd.DataFrame.from_csv(filename)
for i in range(30):
cur_img = data['image'][i]
cur_steer = int(data['servo'][i])
cur_throttle = int(data['motor'][i])
cur_img_array = deserialize_image(cur_img)
image = cv2.cvtColor(cur_img_array, cv2.COLOR_RGB2BGR)
print(i)
cv2.imwrite('test' + str(i) + '.jpg', image)
<|reserved_special_token_0|>
with open('settings.json') as d:
SETTINGS = json.load(d)
filename = get_latest_filename()
if len(sys.argv) > 1:
filename = sys.argv[1]
visualize_data(filename, width=SETTINGS['width'], height=SETTINGS['height'],
depth=SETTINGS['depth'])
<|reserved_special_token_1|>
import numpy as np
import cv2
import pandas as pd
from suiron.utils.functions import raw_to_cnn, cnn_to_raw, raw_motor_to_rgb
from suiron.utils.img_serializer import deserialize_image
def visualize_data(filename, width=72, height=48, depth=3, cnn_model=None):
"""
When cnn_model is specified it'll show what the cnn_model predicts (red)
as opposed to what inputs it actually received (green)
"""
data = pd.DataFrame.from_csv(filename)
for i in range(30):
cur_img = data['image'][i]
cur_steer = int(data['servo'][i])
cur_throttle = int(data['motor'][i])
cur_img_array = deserialize_image(cur_img)
image = cv2.cvtColor(cur_img_array, cv2.COLOR_RGB2BGR)
print(i)
cv2.imwrite('test' + str(i) + '.jpg', image)
import sys
import json
from suiron.utils.file_finder import get_latest_filename
with open('settings.json') as d:
SETTINGS = json.load(d)
filename = get_latest_filename()
if len(sys.argv) > 1:
filename = sys.argv[1]
visualize_data(filename, width=SETTINGS['width'], height=SETTINGS['height'],
depth=SETTINGS['depth'])
<|reserved_special_token_1|>
# from suiron.core.SuironIO import SuironIO
# import cv2
# import os
# import time
# import json
# import numpy as np
# suironio = SuironIO(serial_location='/dev/ttyUSB0', baudrate=57600, port=5050)
# if __name__ == "__main__":
# while True:
# # suironio.record_inputs()
# print('turn90')
# suironio.servo_test(90)
# print('turn0')
# suironio.servo_test(0)
# print('turn-90')
# suironio.servo_test(-90)
# import socket
# import struct
# import pandas as pd
# sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# host = raw_input("Server hostname or ip? ")
# port = input("Server port? ")
# # sock.connect((host,port))
# sock.connect(('192.168.0.164',5051))
# while True:
# data = raw_input("message: ")
# # sock.send(data)
# raw_data = {
# 'image': [2,4,2,5,6,3,2,3],
# 'servo': [22,42,5,45,34,534,2,3],
# 'motor': [23423,324,32,324,324,2,4,2]
# }
# df = pd.DataFrame(raw_data, columns=['image', 'servo', 'motor'])
# df = df.to_csv()
# sock.sendall(struct.pack('>i', len(df))+df)
# # sock.sendall(struct.pack('>i', len(data))+data)
# print("response: ", sock.recv(1024))
import numpy as np
import cv2
import pandas as pd
from suiron.utils.functions import raw_to_cnn, cnn_to_raw, raw_motor_to_rgb
from suiron.utils.img_serializer import deserialize_image
# Visualize images
# With and without any predictions
def visualize_data(filename, width=72, height=48, depth=3, cnn_model=None):
"""
When cnn_model is specified it'll show what the cnn_model predicts (red)
as opposed to what inputs it actually received (green)
"""
data = pd.DataFrame.from_csv(filename)
for i in range(30):
cur_img = data['image'][i]
cur_steer = int(data['servo'][i])
cur_throttle = int(data['motor'][i])
# [1:-1] is used to remove '[' and ']' from string
cur_img_array = deserialize_image(cur_img)
# cur_img_array = cv2.resize(cur_img_array, (480, 320), interpolation=cv2.INTER_CUBIC)
image = cv2.cvtColor(cur_img_array, cv2.COLOR_RGB2BGR)
print(i)
cv2.imwrite('test'+str(i)+'.jpg', image)
import sys
import json
# from suiron.core.SuironVZ import visualize_data
from suiron.utils.file_finder import get_latest_filename
# Load image settings
with open('settings.json') as d:
SETTINGS = json.load(d)
# Visualize latest filename
filename = get_latest_filename()
# If we specified which file
if len(sys.argv) > 1:
filename = sys.argv[1]
visualize_data(filename, width=SETTINGS['width'], height=SETTINGS['height'], depth=SETTINGS['depth'])
|
flexible
|
{
"blob_id": "bf8ffe603b7c1e90deed6a69500ea5b7671e7270",
"index": 879,
"step-1": "<mask token>\n\n\ndef visualize_data(filename, width=72, height=48, depth=3, cnn_model=None):\n \"\"\"\n When cnn_model is specified it'll show what the cnn_model predicts (red)\n as opposed to what inputs it actually received (green)\n \"\"\"\n data = pd.DataFrame.from_csv(filename)\n for i in range(30):\n cur_img = data['image'][i]\n cur_steer = int(data['servo'][i])\n cur_throttle = int(data['motor'][i])\n cur_img_array = deserialize_image(cur_img)\n image = cv2.cvtColor(cur_img_array, cv2.COLOR_RGB2BGR)\n print(i)\n cv2.imwrite('test' + str(i) + '.jpg', image)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef visualize_data(filename, width=72, height=48, depth=3, cnn_model=None):\n \"\"\"\n When cnn_model is specified it'll show what the cnn_model predicts (red)\n as opposed to what inputs it actually received (green)\n \"\"\"\n data = pd.DataFrame.from_csv(filename)\n for i in range(30):\n cur_img = data['image'][i]\n cur_steer = int(data['servo'][i])\n cur_throttle = int(data['motor'][i])\n cur_img_array = deserialize_image(cur_img)\n image = cv2.cvtColor(cur_img_array, cv2.COLOR_RGB2BGR)\n print(i)\n cv2.imwrite('test' + str(i) + '.jpg', image)\n\n\n<mask token>\nwith open('settings.json') as d:\n SETTINGS = json.load(d)\n<mask token>\nif len(sys.argv) > 1:\n filename = sys.argv[1]\nvisualize_data(filename, width=SETTINGS['width'], height=SETTINGS['height'],\n depth=SETTINGS['depth'])\n",
"step-3": "<mask token>\n\n\ndef visualize_data(filename, width=72, height=48, depth=3, cnn_model=None):\n \"\"\"\n When cnn_model is specified it'll show what the cnn_model predicts (red)\n as opposed to what inputs it actually received (green)\n \"\"\"\n data = pd.DataFrame.from_csv(filename)\n for i in range(30):\n cur_img = data['image'][i]\n cur_steer = int(data['servo'][i])\n cur_throttle = int(data['motor'][i])\n cur_img_array = deserialize_image(cur_img)\n image = cv2.cvtColor(cur_img_array, cv2.COLOR_RGB2BGR)\n print(i)\n cv2.imwrite('test' + str(i) + '.jpg', image)\n\n\n<mask token>\nwith open('settings.json') as d:\n SETTINGS = json.load(d)\nfilename = get_latest_filename()\nif len(sys.argv) > 1:\n filename = sys.argv[1]\nvisualize_data(filename, width=SETTINGS['width'], height=SETTINGS['height'],\n depth=SETTINGS['depth'])\n",
"step-4": "import numpy as np\nimport cv2\nimport pandas as pd\nfrom suiron.utils.functions import raw_to_cnn, cnn_to_raw, raw_motor_to_rgb\nfrom suiron.utils.img_serializer import deserialize_image\n\n\ndef visualize_data(filename, width=72, height=48, depth=3, cnn_model=None):\n \"\"\"\n When cnn_model is specified it'll show what the cnn_model predicts (red)\n as opposed to what inputs it actually received (green)\n \"\"\"\n data = pd.DataFrame.from_csv(filename)\n for i in range(30):\n cur_img = data['image'][i]\n cur_steer = int(data['servo'][i])\n cur_throttle = int(data['motor'][i])\n cur_img_array = deserialize_image(cur_img)\n image = cv2.cvtColor(cur_img_array, cv2.COLOR_RGB2BGR)\n print(i)\n cv2.imwrite('test' + str(i) + '.jpg', image)\n\n\nimport sys\nimport json\nfrom suiron.utils.file_finder import get_latest_filename\nwith open('settings.json') as d:\n SETTINGS = json.load(d)\nfilename = get_latest_filename()\nif len(sys.argv) > 1:\n filename = sys.argv[1]\nvisualize_data(filename, width=SETTINGS['width'], height=SETTINGS['height'],\n depth=SETTINGS['depth'])\n",
"step-5": "# from suiron.core.SuironIO import SuironIO\n# import cv2\n# import os\n# import time\n# import json\n# import numpy as np\n\n# suironio = SuironIO(serial_location='/dev/ttyUSB0', baudrate=57600, port=5050)\n\n# if __name__ == \"__main__\":\n# while True:\n# \t# suironio.record_inputs()\n# \tprint('turn90')\n# suironio.servo_test(90)\n# print('turn0')\n# suironio.servo_test(0)\n# print('turn-90')\n# suironio.servo_test(-90)\n\n# import socket\n# import struct\n# import pandas as pd\n\n# sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n# host = raw_input(\"Server hostname or ip? \")\n# port = input(\"Server port? \")\n# # sock.connect((host,port))\n# sock.connect(('192.168.0.164',5051))\n\n# while True:\n# data = raw_input(\"message: \")\n# # sock.send(data)\n# raw_data = {\n# \t 'image': [2,4,2,5,6,3,2,3], \n# \t 'servo': [22,42,5,45,34,534,2,3],\n# \t 'motor': [23423,324,32,324,324,2,4,2]\n# \t }\n# df = pd.DataFrame(raw_data, columns=['image', 'servo', 'motor'])\n# df = df.to_csv()\n# sock.sendall(struct.pack('>i', len(df))+df)\n# # sock.sendall(struct.pack('>i', len(data))+data)\n# print(\"response: \", sock.recv(1024))\n\nimport numpy as np\nimport cv2\nimport pandas as pd\n\nfrom suiron.utils.functions import raw_to_cnn, cnn_to_raw, raw_motor_to_rgb\nfrom suiron.utils.img_serializer import deserialize_image\n\n# Visualize images\n# With and without any predictions\ndef visualize_data(filename, width=72, height=48, depth=3, cnn_model=None):\n \"\"\"\n When cnn_model is specified it'll show what the cnn_model predicts (red)\n as opposed to what inputs it actually received (green)\n \"\"\"\n data = pd.DataFrame.from_csv(filename) \n\n for i in range(30):\n cur_img = data['image'][i]\n cur_steer = int(data['servo'][i])\n cur_throttle = int(data['motor'][i])\n \n # [1:-1] is used to remove '[' and ']' from string \n cur_img_array = deserialize_image(cur_img)\n # cur_img_array = cv2.resize(cur_img_array, (480, 320), interpolation=cv2.INTER_CUBIC)\n image = cv2.cvtColor(cur_img_array, cv2.COLOR_RGB2BGR)\n print(i)\n cv2.imwrite('test'+str(i)+'.jpg', image)\n\nimport sys\nimport json\n\n# from suiron.core.SuironVZ import visualize_data\nfrom suiron.utils.file_finder import get_latest_filename\n\n# Load image settings\nwith open('settings.json') as d:\n SETTINGS = json.load(d)\n\n# Visualize latest filename\nfilename = get_latest_filename() \n\n# If we specified which file\nif len(sys.argv) > 1:\n filename = sys.argv[1]\n\nvisualize_data(filename, width=SETTINGS['width'], height=SETTINGS['height'], depth=SETTINGS['depth'])",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
# coding: gb18030
from setuptools import setup
setup(
name="qlquery",
version="1.0",
license="MIT",
packages=['qlquery'],
install_requires=[
'my-fake-useragent',
'requests',
'beautifulsoup4'
],
zip_safe=False
)
|
normal
|
{
"blob_id": "f11ede752df7d9aff672eee4e230b109fcbf987b",
"index": 8555,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nsetup(name='qlquery', version='1.0', license='MIT', packages=['qlquery'],\n install_requires=['my-fake-useragent', 'requests', 'beautifulsoup4'],\n zip_safe=False)\n",
"step-3": "from setuptools import setup\nsetup(name='qlquery', version='1.0', license='MIT', packages=['qlquery'],\n install_requires=['my-fake-useragent', 'requests', 'beautifulsoup4'],\n zip_safe=False)\n",
"step-4": "# coding: gb18030\n\nfrom setuptools import setup\n\nsetup(\n name=\"qlquery\",\n version=\"1.0\",\n license=\"MIT\",\n packages=['qlquery'],\n install_requires=[\n 'my-fake-useragent',\n 'requests',\n 'beautifulsoup4'\n ],\n zip_safe=False\n)",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
class QueuedSpace(models.Model):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def __unicode__(self):
return 'id: %s (marked %s on %s by %s)' % (self.space_id, self.
status, self.last_modified, self.modified_by)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class QueuedSpace(models.Model):
<|reserved_special_token_0|>
space_id = models.IntegerField(blank=True, null=True)
json = models.TextField()
q_etag = models.CharField(max_length=40, blank=True)
status = models.CharField(max_length=25, blank=True)
last_modified = models.DateTimeField(auto_now=True, auto_now_add=True)
modified_by = models.ForeignKey(User, blank=True, null=True,
related_name='modified_by')
approved_by = models.ForeignKey(User, blank=True, null=True,
related_name='approved_by')
def __unicode__(self):
return 'id: %s (marked %s on %s by %s)' % (self.space_id, self.
status, self.last_modified, self.modified_by)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class QueuedSpace(models.Model):
""" Stores space json for possible further editing before being sent to the server.
q_etag should update on every save so conflicts can be checked for in queued items.
"""
space_id = models.IntegerField(blank=True, null=True)
json = models.TextField()
q_etag = models.CharField(max_length=40, blank=True)
status = models.CharField(max_length=25, blank=True)
last_modified = models.DateTimeField(auto_now=True, auto_now_add=True)
modified_by = models.ForeignKey(User, blank=True, null=True,
related_name='modified_by')
approved_by = models.ForeignKey(User, blank=True, null=True,
related_name='approved_by')
def __unicode__(self):
return 'id: %s (marked %s on %s by %s)' % (self.space_id, self.
status, self.last_modified, self.modified_by)
<|reserved_special_token_1|>
from django.contrib.auth.models import User
from django.db import models
class QueuedSpace(models.Model):
""" Stores space json for possible further editing before being sent to the server.
q_etag should update on every save so conflicts can be checked for in queued items.
"""
space_id = models.IntegerField(blank=True, null=True)
json = models.TextField()
q_etag = models.CharField(max_length=40, blank=True)
status = models.CharField(max_length=25, blank=True)
last_modified = models.DateTimeField(auto_now=True, auto_now_add=True)
modified_by = models.ForeignKey(User, blank=True, null=True,
related_name='modified_by')
approved_by = models.ForeignKey(User, blank=True, null=True,
related_name='approved_by')
def __unicode__(self):
return 'id: %s (marked %s on %s by %s)' % (self.space_id, self.
status, self.last_modified, self.modified_by)
<|reserved_special_token_1|>
from django.contrib.auth.models import User
from django.db import models
class QueuedSpace(models.Model):
""" Stores space json for possible further editing before being sent to the server.
q_etag should update on every save so conflicts can be checked for in queued items.
"""
space_id = models.IntegerField(blank=True, null=True)
json = models.TextField()
q_etag = models.CharField(max_length=40, blank=True)
status = models.CharField(max_length=25, blank=True)
last_modified = models.DateTimeField(auto_now=True, auto_now_add=True)
modified_by = models.ForeignKey(User, blank=True, null=True, related_name='modified_by')
approved_by = models.ForeignKey(User, blank=True, null=True, related_name='approved_by')
def __unicode__(self):
return "id: %s (marked %s on %s by %s)" % (self.space_id, self.status, self.last_modified, self.modified_by)
#TODO: put in an etag generator
|
flexible
|
{
"blob_id": "ff09993a4f8fed65fa00c065eb5cfa41e7f9dcc1",
"index": 4411,
"step-1": "<mask token>\n\n\nclass QueuedSpace(models.Model):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def __unicode__(self):\n return 'id: %s (marked %s on %s by %s)' % (self.space_id, self.\n status, self.last_modified, self.modified_by)\n",
"step-2": "<mask token>\n\n\nclass QueuedSpace(models.Model):\n <mask token>\n space_id = models.IntegerField(blank=True, null=True)\n json = models.TextField()\n q_etag = models.CharField(max_length=40, blank=True)\n status = models.CharField(max_length=25, blank=True)\n last_modified = models.DateTimeField(auto_now=True, auto_now_add=True)\n modified_by = models.ForeignKey(User, blank=True, null=True,\n related_name='modified_by')\n approved_by = models.ForeignKey(User, blank=True, null=True,\n related_name='approved_by')\n\n def __unicode__(self):\n return 'id: %s (marked %s on %s by %s)' % (self.space_id, self.\n status, self.last_modified, self.modified_by)\n",
"step-3": "<mask token>\n\n\nclass QueuedSpace(models.Model):\n \"\"\" Stores space json for possible further editing before being sent to the server.\n q_etag should update on every save so conflicts can be checked for in queued items.\n \"\"\"\n space_id = models.IntegerField(blank=True, null=True)\n json = models.TextField()\n q_etag = models.CharField(max_length=40, blank=True)\n status = models.CharField(max_length=25, blank=True)\n last_modified = models.DateTimeField(auto_now=True, auto_now_add=True)\n modified_by = models.ForeignKey(User, blank=True, null=True,\n related_name='modified_by')\n approved_by = models.ForeignKey(User, blank=True, null=True,\n related_name='approved_by')\n\n def __unicode__(self):\n return 'id: %s (marked %s on %s by %s)' % (self.space_id, self.\n status, self.last_modified, self.modified_by)\n",
"step-4": "from django.contrib.auth.models import User\nfrom django.db import models\n\n\nclass QueuedSpace(models.Model):\n \"\"\" Stores space json for possible further editing before being sent to the server.\n q_etag should update on every save so conflicts can be checked for in queued items.\n \"\"\"\n space_id = models.IntegerField(blank=True, null=True)\n json = models.TextField()\n q_etag = models.CharField(max_length=40, blank=True)\n status = models.CharField(max_length=25, blank=True)\n last_modified = models.DateTimeField(auto_now=True, auto_now_add=True)\n modified_by = models.ForeignKey(User, blank=True, null=True,\n related_name='modified_by')\n approved_by = models.ForeignKey(User, blank=True, null=True,\n related_name='approved_by')\n\n def __unicode__(self):\n return 'id: %s (marked %s on %s by %s)' % (self.space_id, self.\n status, self.last_modified, self.modified_by)\n",
"step-5": "from django.contrib.auth.models import User\nfrom django.db import models\n\n\nclass QueuedSpace(models.Model):\n \"\"\" Stores space json for possible further editing before being sent to the server.\n q_etag should update on every save so conflicts can be checked for in queued items.\n \"\"\"\n space_id = models.IntegerField(blank=True, null=True)\n json = models.TextField()\n q_etag = models.CharField(max_length=40, blank=True)\n status = models.CharField(max_length=25, blank=True)\n last_modified = models.DateTimeField(auto_now=True, auto_now_add=True)\n modified_by = models.ForeignKey(User, blank=True, null=True, related_name='modified_by')\n approved_by = models.ForeignKey(User, blank=True, null=True, related_name='approved_by')\n\n def __unicode__(self):\n return \"id: %s (marked %s on %s by %s)\" % (self.space_id, self.status, self.last_modified, self.modified_by)\n\n #TODO: put in an etag generator\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
<|reserved_special_token_0|>
class Member(models.Model):
name = models.CharField(max_length=200, db_index=True)
age = models.CharField(max_length=200)
phone = models.CharField(max_length=200)
address1 = models.CharField(max_length=200)
address2 = models.CharField(max_length=200)
phone = models.CharField(max_length=200)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Group(models.Model):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
class Member(models.Model):
name = models.CharField(max_length=200, db_index=True)
age = models.CharField(max_length=200)
phone = models.CharField(max_length=200)
address1 = models.CharField(max_length=200)
address2 = models.CharField(max_length=200)
phone = models.CharField(max_length=200)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Group(models.Model):
name = models.CharField(max_length=200, db_index=True)
loan_eligibility = models.CharField(max_length=200, db_index=True)
account_number = models.CharField(max_length=200, db_index=True)
incharge = models.CharField(max_length=200, db_index=True)
incharge2 = models.CharField(max_length=200, db_index=True)
class Member(models.Model):
name = models.CharField(max_length=200, db_index=True)
age = models.CharField(max_length=200)
phone = models.CharField(max_length=200)
address1 = models.CharField(max_length=200)
address2 = models.CharField(max_length=200)
phone = models.CharField(max_length=200)
<|reserved_special_token_1|>
from __future__ import unicode_literals
from django.db import models
class Group(models.Model):
name = models.CharField(max_length=200, db_index=True)
loan_eligibility = models.CharField(max_length=200, db_index=True)
account_number = models.CharField(max_length=200, db_index=True)
incharge = models.CharField(max_length=200, db_index=True)
incharge2 = models.CharField(max_length=200, db_index=True)
class Member(models.Model):
name = models.CharField(max_length=200, db_index=True)
age = models.CharField(max_length=200)
phone = models.CharField(max_length=200)
address1 = models.CharField(max_length=200)
address2 = models.CharField(max_length=200)
phone = models.CharField(max_length=200)
<|reserved_special_token_1|>
from __future__ import unicode_literals
from django.db import models
# Create your models here.
class Group(models.Model):
name = models.CharField(max_length=200, db_index=True)
loan_eligibility = models.CharField(max_length=200, db_index=True)
account_number = models.CharField(max_length=200, db_index=True)
incharge = models.CharField(max_length=200, db_index=True)
incharge2 = models.CharField(max_length=200, db_index=True)
class Member(models.Model):
name = models.CharField(max_length=200, db_index=True)
age = models.CharField(max_length=200)
phone = models.CharField(max_length=200)
address1 = models.CharField(max_length=200)
address2 = models.CharField(max_length=200)
phone = models.CharField(max_length=200)
|
flexible
|
{
"blob_id": "0c8b58acf33bdfa95984d29a75ae01e49d0da149",
"index": 9202,
"step-1": "<mask token>\n\n\nclass Member(models.Model):\n name = models.CharField(max_length=200, db_index=True)\n age = models.CharField(max_length=200)\n phone = models.CharField(max_length=200)\n address1 = models.CharField(max_length=200)\n address2 = models.CharField(max_length=200)\n phone = models.CharField(max_length=200)\n",
"step-2": "<mask token>\n\n\nclass Group(models.Model):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n\nclass Member(models.Model):\n name = models.CharField(max_length=200, db_index=True)\n age = models.CharField(max_length=200)\n phone = models.CharField(max_length=200)\n address1 = models.CharField(max_length=200)\n address2 = models.CharField(max_length=200)\n phone = models.CharField(max_length=200)\n",
"step-3": "<mask token>\n\n\nclass Group(models.Model):\n name = models.CharField(max_length=200, db_index=True)\n loan_eligibility = models.CharField(max_length=200, db_index=True)\n account_number = models.CharField(max_length=200, db_index=True)\n incharge = models.CharField(max_length=200, db_index=True)\n incharge2 = models.CharField(max_length=200, db_index=True)\n\n\nclass Member(models.Model):\n name = models.CharField(max_length=200, db_index=True)\n age = models.CharField(max_length=200)\n phone = models.CharField(max_length=200)\n address1 = models.CharField(max_length=200)\n address2 = models.CharField(max_length=200)\n phone = models.CharField(max_length=200)\n",
"step-4": "from __future__ import unicode_literals\nfrom django.db import models\n\n\nclass Group(models.Model):\n name = models.CharField(max_length=200, db_index=True)\n loan_eligibility = models.CharField(max_length=200, db_index=True)\n account_number = models.CharField(max_length=200, db_index=True)\n incharge = models.CharField(max_length=200, db_index=True)\n incharge2 = models.CharField(max_length=200, db_index=True)\n\n\nclass Member(models.Model):\n name = models.CharField(max_length=200, db_index=True)\n age = models.CharField(max_length=200)\n phone = models.CharField(max_length=200)\n address1 = models.CharField(max_length=200)\n address2 = models.CharField(max_length=200)\n phone = models.CharField(max_length=200)\n",
"step-5": "from __future__ import unicode_literals\n\nfrom django.db import models\n\n# Create your models here.\nclass Group(models.Model):\n name = models.CharField(max_length=200, db_index=True)\n loan_eligibility = models.CharField(max_length=200, db_index=True)\n account_number = models.CharField(max_length=200, db_index=True)\n incharge = models.CharField(max_length=200, db_index=True)\n incharge2 = models.CharField(max_length=200, db_index=True)\n\n\nclass Member(models.Model):\n name = models.CharField(max_length=200, db_index=True)\n age = models.CharField(max_length=200)\n phone = models.CharField(max_length=200)\n address1 = models.CharField(max_length=200)\n address2 = models.CharField(max_length=200)\n phone = models.CharField(max_length=200)\n\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
if __name__ == '__main__':
running.go()
<|reserved_special_token_1|>
import running
if __name__ == '__main__':
running.go()
<|reserved_special_token_1|>
#!/usr/bin/python
# coding=utf8
# author: Sun yang
import running
if __name__ == '__main__':
running.go()
|
flexible
|
{
"blob_id": "12442e4debc7fbf102ab88b42464f4ca8eb91351",
"index": 8454,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nif __name__ == '__main__':\n running.go()\n",
"step-3": "import running\nif __name__ == '__main__':\n running.go()\n",
"step-4": "#!/usr/bin/python\r\n# coding=utf8\r\n# author: Sun yang\r\n\r\nimport running\r\n\r\n\r\nif __name__ == '__main__':\r\n running.go()",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
def login_homework():
res = requests.get('http://www.yphs.tp.edu.tw/tea/tu2.aspx')
soup = BeautifulSoup(res.text, 'lxml')
VIEWSTATE = soup.find(id='__VIEWSTATE')
VIEWSTATEGENERATOR = soup.find(id='__VIEWSTATEGENERATOR')
EVENTVALIDATION = soup.find(id='__EVENTVALIDATION')
res = requests.post('http://www.yphs.tp.edu.tw/tea/tu2.aspx',
allow_redirects=False, data={'__VIEWSTATE': VIEWSTATE.get('value'),
'__VIEWSTATEGENERATOR': VIEWSTATEGENERATOR.get('value'),
'__EVENTVALIDATION': EVENTVALIDATION.get('value'), 'chk_id':
'學生/家長', 'tbx_sno': sid, 'tbx_sid': cid, 'tbx_sbir': bir,
'but_login_stud': '登\u3000\u3000入'})
global cook
cook = res.cookies['ASP.NET_SessionId']
return
<|reserved_special_token_0|>
def crawl_tomorrow_calendar():
res = requests.get('http://www.yphs.tp.edu.tw/yphs/gr2.aspx')
soup = BeautifulSoup(res.text, 'lxml')
calendar = '明日行事曆:\n 全校:' + soup.find_all(color='#404040')[16].text
if soup.find_all(color='#404040')[16].text == '\xa0':
calendar += 'N/A'
calendar = calendar + '\n 高一:' + soup.find_all(color='#404040')[21].text
if soup.find_all(color='#404040')[21].text == '\xa0':
calendar += 'N/A'
return calendar
def fetch_tomorrow_class_table():
count = int(0)
tomorrow_class = '\n明日課表:\n 早上:\n '
for i in cls[(datetime.today().weekday() + 1) % 7]:
if count == 4:
tomorrow_class += '\n 下午:\n '
tomorrow_class += '[' + i + ']'
if count < 8 and count != 3:
tomorrow_class += '->'
count += 1
return tomorrow_class
def post(send_word):
if platform == 'line':
line_bot_api.push_message(chatid, TextSendMessage(text=send_word,
wrap=True))
if platform == 'telegram':
requests.get('https://api.telegram.org/bot' + bottoken +
'/sendMessage?chat_id=' + chatid + '&text=' + send_word)
<|reserved_special_token_0|>
def close_log():
fw.close()
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def open_log():
global log
global fw
try:
fr = open(log_path, 'r')
log = fr.read().split('\n')
fr.close()
except:
fw = open(log_path, 'w+')
log = ''
return
fw = open(log_path, 'a')
return
def login_homework():
res = requests.get('http://www.yphs.tp.edu.tw/tea/tu2.aspx')
soup = BeautifulSoup(res.text, 'lxml')
VIEWSTATE = soup.find(id='__VIEWSTATE')
VIEWSTATEGENERATOR = soup.find(id='__VIEWSTATEGENERATOR')
EVENTVALIDATION = soup.find(id='__EVENTVALIDATION')
res = requests.post('http://www.yphs.tp.edu.tw/tea/tu2.aspx',
allow_redirects=False, data={'__VIEWSTATE': VIEWSTATE.get('value'),
'__VIEWSTATEGENERATOR': VIEWSTATEGENERATOR.get('value'),
'__EVENTVALIDATION': EVENTVALIDATION.get('value'), 'chk_id':
'學生/家長', 'tbx_sno': sid, 'tbx_sid': cid, 'tbx_sbir': bir,
'but_login_stud': '登\u3000\u3000入'})
global cook
cook = res.cookies['ASP.NET_SessionId']
return
def crawl_and_fetch_today_homework(tomorrow_calendar, tomorrow_class_table):
send = requests.get('http://www.yphs.tp.edu.tw/tea/tu2-1.aspx', cookies
={'ASP.NET_SessionId': cook})
soup = BeautifulSoup(send.text, 'lxml')
VIEWSTATE = soup.find(id='__VIEWSTATE')
VIEWSTATEGENERATOR = soup.find(id='__VIEWSTATEGENERATOR')
EVENTVALIDATION = soup.find(id='__EVENTVALIDATION')
for x in range(15, 1, -1):
try:
num = str('')
if x < 10:
num = '0' + str(x)
else:
num = str(x)
send = requests.post('http://www.yphs.tp.edu.tw/tea/tu2-1.aspx',
cookies={'ASP.NET_SessionId': cook}, data={'__VIEWSTATE':
VIEWSTATE.get('value'), '__VIEWSTATEGENERATOR':
VIEWSTATEGENERATOR.get('value'), '__EVENTVALIDATION':
EVENTVALIDATION.get('value'), ('GridViewS$ctl' + num +
'$but_vf1'): '詳細內容'})
soup = BeautifulSoup(send.text, 'lxml')
ok = bool(True)
for y in range(0, len(log), 1):
if soup.find(id='Lab_purport').text == log[y]:
ok = bool(False)
if ok == True:
fw.write(soup.find(id='Lab_purport').text + '\n')
post_title = str('[主旨:' + str(soup.find(id='Lab_purport').
text) + ']')
post_content = str(soup.find(id='Lab_content').text)
post_attachment = str(' ')
if soup.find(target='_blank'):
post_attachment = soup.find(target='_blank').get('href')
send_word = (post_title + '\n' + post_content + '\n' +
post_attachment)
if str(soup.find(id='Lab_purport').text).find('聯絡簿'
) >= 0 and datetime.today().weekday() < 4:
send_word = (send_word + '\n***系統訊息***\n' +
tomorrow_calendar + '\n' + tomorrow_class_table)
if str(soup.find(id='Lab_purport').text).find('聯絡簿'
) >= 0 and datetime.today().weekday() == 4:
send_word = send_word
post(send_word)
except:
pass
return
def crawl_tomorrow_calendar():
res = requests.get('http://www.yphs.tp.edu.tw/yphs/gr2.aspx')
soup = BeautifulSoup(res.text, 'lxml')
calendar = '明日行事曆:\n 全校:' + soup.find_all(color='#404040')[16].text
if soup.find_all(color='#404040')[16].text == '\xa0':
calendar += 'N/A'
calendar = calendar + '\n 高一:' + soup.find_all(color='#404040')[21].text
if soup.find_all(color='#404040')[21].text == '\xa0':
calendar += 'N/A'
return calendar
def fetch_tomorrow_class_table():
count = int(0)
tomorrow_class = '\n明日課表:\n 早上:\n '
for i in cls[(datetime.today().weekday() + 1) % 7]:
if count == 4:
tomorrow_class += '\n 下午:\n '
tomorrow_class += '[' + i + ']'
if count < 8 and count != 3:
tomorrow_class += '->'
count += 1
return tomorrow_class
def post(send_word):
if platform == 'line':
line_bot_api.push_message(chatid, TextSendMessage(text=send_word,
wrap=True))
if platform == 'telegram':
requests.get('https://api.telegram.org/bot' + bottoken +
'/sendMessage?chat_id=' + chatid + '&text=' + send_word)
<|reserved_special_token_0|>
def close_log():
fw.close()
def main():
open_log()
login_homework()
crawl_and_fetch_today_homework(crawl_tomorrow_calendar(),
fetch_tomorrow_class_table())
close_log()
if datetime.today().weekday() == 6 and datetime.today(
).hour == 21 and datetime.today().minute < 10:
send_word = '[主旨:機器人訊息]\n***系統訊息***\n' + crawl_tomorrow_calendar(
) + '\n' + fetch_tomorrow_class_table()
post(send_word)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
log_path = './log.txt'
sid = ''
cid = ''
bir = ''
platform = 'line'
if platform == 'line':
from linebot import LineBotApi
from linebot.models import TextSendMessage
bottoken = ''
chatid = ''
line_bot_api = LineBotApi(bottoken)
if platform == 'telegram':
bottoken = ''
chatid = ''
cls = [['學校活動', '英文', '化學', '國文', '地理', '生物', '公民', '歷史', '數學'], ['彈性課程',
'地科', '數學', '數學', '資訊', '西洋影視', '國文', '國文', '英文'], ['數學', '物理', '生活科技',
'體育', '國文', '化學', '音樂', '英文', '英文'], ['數學', '論孟選讀', '生物', '多元選修', '歷史',
'化學', '英文', '國防', '物理'], ['彈性課程', '英文', '數學', '地理', '公民', '國文', '體育',
'物理', '社團'], [], []]
def open_log():
global log
global fw
try:
fr = open(log_path, 'r')
log = fr.read().split('\n')
fr.close()
except:
fw = open(log_path, 'w+')
log = ''
return
fw = open(log_path, 'a')
return
def login_homework():
res = requests.get('http://www.yphs.tp.edu.tw/tea/tu2.aspx')
soup = BeautifulSoup(res.text, 'lxml')
VIEWSTATE = soup.find(id='__VIEWSTATE')
VIEWSTATEGENERATOR = soup.find(id='__VIEWSTATEGENERATOR')
EVENTVALIDATION = soup.find(id='__EVENTVALIDATION')
res = requests.post('http://www.yphs.tp.edu.tw/tea/tu2.aspx',
allow_redirects=False, data={'__VIEWSTATE': VIEWSTATE.get('value'),
'__VIEWSTATEGENERATOR': VIEWSTATEGENERATOR.get('value'),
'__EVENTVALIDATION': EVENTVALIDATION.get('value'), 'chk_id':
'學生/家長', 'tbx_sno': sid, 'tbx_sid': cid, 'tbx_sbir': bir,
'but_login_stud': '登\u3000\u3000入'})
global cook
cook = res.cookies['ASP.NET_SessionId']
return
def crawl_and_fetch_today_homework(tomorrow_calendar, tomorrow_class_table):
send = requests.get('http://www.yphs.tp.edu.tw/tea/tu2-1.aspx', cookies
={'ASP.NET_SessionId': cook})
soup = BeautifulSoup(send.text, 'lxml')
VIEWSTATE = soup.find(id='__VIEWSTATE')
VIEWSTATEGENERATOR = soup.find(id='__VIEWSTATEGENERATOR')
EVENTVALIDATION = soup.find(id='__EVENTVALIDATION')
for x in range(15, 1, -1):
try:
num = str('')
if x < 10:
num = '0' + str(x)
else:
num = str(x)
send = requests.post('http://www.yphs.tp.edu.tw/tea/tu2-1.aspx',
cookies={'ASP.NET_SessionId': cook}, data={'__VIEWSTATE':
VIEWSTATE.get('value'), '__VIEWSTATEGENERATOR':
VIEWSTATEGENERATOR.get('value'), '__EVENTVALIDATION':
EVENTVALIDATION.get('value'), ('GridViewS$ctl' + num +
'$but_vf1'): '詳細內容'})
soup = BeautifulSoup(send.text, 'lxml')
ok = bool(True)
for y in range(0, len(log), 1):
if soup.find(id='Lab_purport').text == log[y]:
ok = bool(False)
if ok == True:
fw.write(soup.find(id='Lab_purport').text + '\n')
post_title = str('[主旨:' + str(soup.find(id='Lab_purport').
text) + ']')
post_content = str(soup.find(id='Lab_content').text)
post_attachment = str(' ')
if soup.find(target='_blank'):
post_attachment = soup.find(target='_blank').get('href')
send_word = (post_title + '\n' + post_content + '\n' +
post_attachment)
if str(soup.find(id='Lab_purport').text).find('聯絡簿'
) >= 0 and datetime.today().weekday() < 4:
send_word = (send_word + '\n***系統訊息***\n' +
tomorrow_calendar + '\n' + tomorrow_class_table)
if str(soup.find(id='Lab_purport').text).find('聯絡簿'
) >= 0 and datetime.today().weekday() == 4:
send_word = send_word
post(send_word)
except:
pass
return
def crawl_tomorrow_calendar():
res = requests.get('http://www.yphs.tp.edu.tw/yphs/gr2.aspx')
soup = BeautifulSoup(res.text, 'lxml')
calendar = '明日行事曆:\n 全校:' + soup.find_all(color='#404040')[16].text
if soup.find_all(color='#404040')[16].text == '\xa0':
calendar += 'N/A'
calendar = calendar + '\n 高一:' + soup.find_all(color='#404040')[21].text
if soup.find_all(color='#404040')[21].text == '\xa0':
calendar += 'N/A'
return calendar
def fetch_tomorrow_class_table():
count = int(0)
tomorrow_class = '\n明日課表:\n 早上:\n '
for i in cls[(datetime.today().weekday() + 1) % 7]:
if count == 4:
tomorrow_class += '\n 下午:\n '
tomorrow_class += '[' + i + ']'
if count < 8 and count != 3:
tomorrow_class += '->'
count += 1
return tomorrow_class
def post(send_word):
if platform == 'line':
line_bot_api.push_message(chatid, TextSendMessage(text=send_word,
wrap=True))
if platform == 'telegram':
requests.get('https://api.telegram.org/bot' + bottoken +
'/sendMessage?chat_id=' + chatid + '&text=' + send_word)
<|reserved_special_token_0|>
def close_log():
fw.close()
def main():
open_log()
login_homework()
crawl_and_fetch_today_homework(crawl_tomorrow_calendar(),
fetch_tomorrow_class_table())
close_log()
if datetime.today().weekday() == 6 and datetime.today(
).hour == 21 and datetime.today().minute < 10:
send_word = '[主旨:機器人訊息]\n***系統訊息***\n' + crawl_tomorrow_calendar(
) + '\n' + fetch_tomorrow_class_table()
post(send_word)
main()
<|reserved_special_token_1|>
import requests
from bs4 import BeautifulSoup
import re
from datetime import datetime
log_path = './log.txt'
sid = ''
cid = ''
bir = ''
platform = 'line'
if platform == 'line':
from linebot import LineBotApi
from linebot.models import TextSendMessage
bottoken = ''
chatid = ''
line_bot_api = LineBotApi(bottoken)
if platform == 'telegram':
bottoken = ''
chatid = ''
cls = [['學校活動', '英文', '化學', '國文', '地理', '生物', '公民', '歷史', '數學'], ['彈性課程',
'地科', '數學', '數學', '資訊', '西洋影視', '國文', '國文', '英文'], ['數學', '物理', '生活科技',
'體育', '國文', '化學', '音樂', '英文', '英文'], ['數學', '論孟選讀', '生物', '多元選修', '歷史',
'化學', '英文', '國防', '物理'], ['彈性課程', '英文', '數學', '地理', '公民', '國文', '體育',
'物理', '社團'], [], []]
def open_log():
global log
global fw
try:
fr = open(log_path, 'r')
log = fr.read().split('\n')
fr.close()
except:
fw = open(log_path, 'w+')
log = ''
return
fw = open(log_path, 'a')
return
def login_homework():
res = requests.get('http://www.yphs.tp.edu.tw/tea/tu2.aspx')
soup = BeautifulSoup(res.text, 'lxml')
VIEWSTATE = soup.find(id='__VIEWSTATE')
VIEWSTATEGENERATOR = soup.find(id='__VIEWSTATEGENERATOR')
EVENTVALIDATION = soup.find(id='__EVENTVALIDATION')
res = requests.post('http://www.yphs.tp.edu.tw/tea/tu2.aspx',
allow_redirects=False, data={'__VIEWSTATE': VIEWSTATE.get('value'),
'__VIEWSTATEGENERATOR': VIEWSTATEGENERATOR.get('value'),
'__EVENTVALIDATION': EVENTVALIDATION.get('value'), 'chk_id':
'學生/家長', 'tbx_sno': sid, 'tbx_sid': cid, 'tbx_sbir': bir,
'but_login_stud': '登\u3000\u3000入'})
global cook
cook = res.cookies['ASP.NET_SessionId']
return
def crawl_and_fetch_today_homework(tomorrow_calendar, tomorrow_class_table):
send = requests.get('http://www.yphs.tp.edu.tw/tea/tu2-1.aspx', cookies
={'ASP.NET_SessionId': cook})
soup = BeautifulSoup(send.text, 'lxml')
VIEWSTATE = soup.find(id='__VIEWSTATE')
VIEWSTATEGENERATOR = soup.find(id='__VIEWSTATEGENERATOR')
EVENTVALIDATION = soup.find(id='__EVENTVALIDATION')
for x in range(15, 1, -1):
try:
num = str('')
if x < 10:
num = '0' + str(x)
else:
num = str(x)
send = requests.post('http://www.yphs.tp.edu.tw/tea/tu2-1.aspx',
cookies={'ASP.NET_SessionId': cook}, data={'__VIEWSTATE':
VIEWSTATE.get('value'), '__VIEWSTATEGENERATOR':
VIEWSTATEGENERATOR.get('value'), '__EVENTVALIDATION':
EVENTVALIDATION.get('value'), ('GridViewS$ctl' + num +
'$but_vf1'): '詳細內容'})
soup = BeautifulSoup(send.text, 'lxml')
ok = bool(True)
for y in range(0, len(log), 1):
if soup.find(id='Lab_purport').text == log[y]:
ok = bool(False)
if ok == True:
fw.write(soup.find(id='Lab_purport').text + '\n')
post_title = str('[主旨:' + str(soup.find(id='Lab_purport').
text) + ']')
post_content = str(soup.find(id='Lab_content').text)
post_attachment = str(' ')
if soup.find(target='_blank'):
post_attachment = soup.find(target='_blank').get('href')
send_word = (post_title + '\n' + post_content + '\n' +
post_attachment)
if str(soup.find(id='Lab_purport').text).find('聯絡簿'
) >= 0 and datetime.today().weekday() < 4:
send_word = (send_word + '\n***系統訊息***\n' +
tomorrow_calendar + '\n' + tomorrow_class_table)
if str(soup.find(id='Lab_purport').text).find('聯絡簿'
) >= 0 and datetime.today().weekday() == 4:
send_word = send_word
post(send_word)
except:
pass
return
def crawl_tomorrow_calendar():
res = requests.get('http://www.yphs.tp.edu.tw/yphs/gr2.aspx')
soup = BeautifulSoup(res.text, 'lxml')
calendar = '明日行事曆:\n 全校:' + soup.find_all(color='#404040')[16].text
if soup.find_all(color='#404040')[16].text == '\xa0':
calendar += 'N/A'
calendar = calendar + '\n 高一:' + soup.find_all(color='#404040')[21].text
if soup.find_all(color='#404040')[21].text == '\xa0':
calendar += 'N/A'
return calendar
def fetch_tomorrow_class_table():
count = int(0)
tomorrow_class = '\n明日課表:\n 早上:\n '
for i in cls[(datetime.today().weekday() + 1) % 7]:
if count == 4:
tomorrow_class += '\n 下午:\n '
tomorrow_class += '[' + i + ']'
if count < 8 and count != 3:
tomorrow_class += '->'
count += 1
return tomorrow_class
def post(send_word):
if platform == 'line':
line_bot_api.push_message(chatid, TextSendMessage(text=send_word,
wrap=True))
if platform == 'telegram':
requests.get('https://api.telegram.org/bot' + bottoken +
'/sendMessage?chat_id=' + chatid + '&text=' + send_word)
<|reserved_special_token_0|>
def close_log():
fw.close()
def main():
open_log()
login_homework()
crawl_and_fetch_today_homework(crawl_tomorrow_calendar(),
fetch_tomorrow_class_table())
close_log()
if datetime.today().weekday() == 6 and datetime.today(
).hour == 21 and datetime.today().minute < 10:
send_word = '[主旨:機器人訊息]\n***系統訊息***\n' + crawl_tomorrow_calendar(
) + '\n' + fetch_tomorrow_class_table()
post(send_word)
main()
<|reserved_special_token_1|>
# !/usr/bin/python
# coding:utf-8
import requests
from bs4 import BeautifulSoup
import re
from datetime import datetime
#紀錄檔PATH(建議絕對位置)
log_path='./log.txt'
#登入聯絡簿的個資
sid=''#學號(Ex. 10731187)
cid=''#生份證號(Ex. A123456789)
bir=''#生日(Ex. 2000/1/1)
#line or telegram module
#platform='telegram'
platform='line'
if platform=='line':
from linebot import LineBotApi
from linebot.models import TextSendMessage
#line api token
bottoken=''
#line chat id
chatid=''
line_bot_api = LineBotApi(bottoken)
if platform=='telegram':
#telegram bot token
bottoken=''
#telegram group chat id
chatid=''
#課表
cls=[['學校活動','英文','化學','國文','地理','生物','公民','歷史','數學'],
['彈性課程','地科','數學','數學','資訊','西洋影視','國文','國文','英文'],
['數學','物理','生活科技','體育','國文','化學','音樂','英文','英文'],
['數學','論孟選讀','生物','多元選修','歷史','化學','英文','國防','物理'],
['彈性課程','英文','數學','地理','公民','國文','體育','物理','社團'],[],[]]
def open_log():
global log
global fw
try:
fr = open(log_path, "r")
log=fr.read().split('\n')
fr.close()
except:
fw = open(log_path, "w+")
log=''
return
fw = open(log_path, "a")
return
def login_homework():
res = requests.get('http://www.yphs.tp.edu.tw/tea/tu2.aspx')
soup = BeautifulSoup(res.text, "lxml")
VIEWSTATE=soup.find(id="__VIEWSTATE")
VIEWSTATEGENERATOR=soup.find(id="__VIEWSTATEGENERATOR")
EVENTVALIDATION=soup.find(id="__EVENTVALIDATION")
res=requests.post('http://www.yphs.tp.edu.tw/tea/tu2.aspx', allow_redirects=False, data = {'__VIEWSTATE':VIEWSTATE.get('value'),'__VIEWSTATEGENERATOR':VIEWSTATEGENERATOR.get('value'),'__EVENTVALIDATION':EVENTVALIDATION.get('value'),'chk_id':'學生/家長','tbx_sno':sid,'tbx_sid':cid,'tbx_sbir':bir,'but_login_stud':'登 入'})
global cook
cook=res.cookies['ASP.NET_SessionId']
return
def crawl_and_fetch_today_homework(tomorrow_calendar,tomorrow_class_table):
send = requests.get('http://www.yphs.tp.edu.tw/tea/tu2-1.aspx',cookies={'ASP.NET_SessionId':cook})
soup = BeautifulSoup(send.text, "lxml")
VIEWSTATE=soup.find(id="__VIEWSTATE")
VIEWSTATEGENERATOR=soup.find(id="__VIEWSTATEGENERATOR")
EVENTVALIDATION=soup.find(id="__EVENTVALIDATION")
for x in range(15,1,-1):#第一頁1~15則
try:#用try怕有頁面沒15則post
#數字轉文字
num=str('')
if(x<10):
num='0'+str(x)
else:
num=str(x)
#爬內文
send = requests.post('http://www.yphs.tp.edu.tw/tea/tu2-1.aspx',cookies={'ASP.NET_SessionId':cook}, data = {'__VIEWSTATE':VIEWSTATE.get('value'),'__VIEWSTATEGENERATOR':VIEWSTATEGENERATOR.get('value'),'__EVENTVALIDATION':EVENTVALIDATION.get('value'),('GridViewS$ctl'+num+'$but_vf1'):'詳細內容'})
soup = BeautifulSoup(send.text, "lxml")
#檢查市否已發過
ok=bool(True)
for y in range(0,len(log),1):
if soup.find(id='Lab_purport').text==log[y]:
ok=bool(False)
if ok==True:#沒發過
fw.write(soup.find(id='Lab_purport').text+'\n')
post_title=str('[主旨:'+str(soup.find(id='Lab_purport').text)+']')
post_content=str(soup.find(id='Lab_content').text)
post_attachment=str(' ')
if(soup.find(target='_blank')):
post_attachment=soup.find(target='_blank').get('href')
send_word=post_title+'\n'+post_content+'\n'+post_attachment
if(str(soup.find(id='Lab_purport').text).find('聯絡簿')>=0 and datetime.today().weekday()<4):
send_word=send_word+'\n***系統訊息***\n'+tomorrow_calendar+'\n'+tomorrow_class_table
if(str(soup.find(id='Lab_purport').text).find('聯絡簿')>=0 and datetime.today().weekday() == 4 ):
send_word=send_word
post(send_word)
except:
pass
return
def crawl_tomorrow_calendar():
res = requests.get('http://www.yphs.tp.edu.tw/yphs/gr2.aspx')
soup = BeautifulSoup(res.text, "lxml")
calendar='明日行事曆:\n 全校:'+soup.find_all(color="#404040")[16].text
if(soup.find_all(color="#404040")[16].text==' '):
calendar+='N/A'
calendar=calendar+'\n 高一:'+soup.find_all(color="#404040")[21].text
if(soup.find_all(color="#404040")[21].text==' '):
calendar+='N/A'
return calendar
def fetch_tomorrow_class_table():
count=int(0)
tomorrow_class='\n明日課表:\n 早上:\n '
for i in cls[(datetime.today().weekday()+1)%7]:
if(count==4):
tomorrow_class+='\n 下午:\n '
tomorrow_class+='['+i+']'
if(count<8 and count!=3):
tomorrow_class+='->'
count+=1
return tomorrow_class
def post(send_word):
if platform=='line':
line_bot_api.push_message(chatid,TextSendMessage(text=send_word,wrap=True))
if platform=='telegram':
requests.get("https://api.telegram.org/bot"+bottoken+"/sendMessage?chat_id="+chatid+"&text="+send_word)
'''
!!!contact ab0897867564534231@gmail.com for this function!!!
def crawl_message_board():
res = requests.get('http://59.120.227.144:11300/line/api.php')
soup = BeautifulSoup(res.text, "lxml")
message_board = soup.find_all('td')
message='\n\n留言板( http://59.120.227.144:11300/line/ ) : \n'
for i in range(0,len(message_board),3):
message=message+'第'+str(int((i/3)+1))+'則:\n-'+message_board[i+1].text+"\n--來自:"+message_board[i+2].text+'\n'
return message
'''
def close_log():
fw.close()
def main():
open_log()
login_homework()
crawl_and_fetch_today_homework(crawl_tomorrow_calendar(),fetch_tomorrow_class_table())
close_log()
#星期天提醒明天要上課
if(datetime.today().weekday()==6 and datetime.today().hour == 21 and datetime.today().minute<10):
send_word='[主旨:機器人訊息]\n***系統訊息***\n'+crawl_tomorrow_calendar()+'\n'+fetch_tomorrow_class_table()
post(send_word)
main()
|
flexible
|
{
"blob_id": "77f37a80d160e42bb74017a55aa9d06b4c8d4fee",
"index": 4320,
"step-1": "<mask token>\n\n\ndef login_homework():\n res = requests.get('http://www.yphs.tp.edu.tw/tea/tu2.aspx')\n soup = BeautifulSoup(res.text, 'lxml')\n VIEWSTATE = soup.find(id='__VIEWSTATE')\n VIEWSTATEGENERATOR = soup.find(id='__VIEWSTATEGENERATOR')\n EVENTVALIDATION = soup.find(id='__EVENTVALIDATION')\n res = requests.post('http://www.yphs.tp.edu.tw/tea/tu2.aspx',\n allow_redirects=False, data={'__VIEWSTATE': VIEWSTATE.get('value'),\n '__VIEWSTATEGENERATOR': VIEWSTATEGENERATOR.get('value'),\n '__EVENTVALIDATION': EVENTVALIDATION.get('value'), 'chk_id':\n '學生/家長', 'tbx_sno': sid, 'tbx_sid': cid, 'tbx_sbir': bir,\n 'but_login_stud': '登\\u3000\\u3000入'})\n global cook\n cook = res.cookies['ASP.NET_SessionId']\n return\n\n\n<mask token>\n\n\ndef crawl_tomorrow_calendar():\n res = requests.get('http://www.yphs.tp.edu.tw/yphs/gr2.aspx')\n soup = BeautifulSoup(res.text, 'lxml')\n calendar = '明日行事曆:\\n 全校:' + soup.find_all(color='#404040')[16].text\n if soup.find_all(color='#404040')[16].text == '\\xa0':\n calendar += 'N/A'\n calendar = calendar + '\\n 高一:' + soup.find_all(color='#404040')[21].text\n if soup.find_all(color='#404040')[21].text == '\\xa0':\n calendar += 'N/A'\n return calendar\n\n\ndef fetch_tomorrow_class_table():\n count = int(0)\n tomorrow_class = '\\n明日課表:\\n 早上:\\n '\n for i in cls[(datetime.today().weekday() + 1) % 7]:\n if count == 4:\n tomorrow_class += '\\n 下午:\\n '\n tomorrow_class += '[' + i + ']'\n if count < 8 and count != 3:\n tomorrow_class += '->'\n count += 1\n return tomorrow_class\n\n\ndef post(send_word):\n if platform == 'line':\n line_bot_api.push_message(chatid, TextSendMessage(text=send_word,\n wrap=True))\n if platform == 'telegram':\n requests.get('https://api.telegram.org/bot' + bottoken +\n '/sendMessage?chat_id=' + chatid + '&text=' + send_word)\n\n\n<mask token>\n\n\ndef close_log():\n fw.close()\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef open_log():\n global log\n global fw\n try:\n fr = open(log_path, 'r')\n log = fr.read().split('\\n')\n fr.close()\n except:\n fw = open(log_path, 'w+')\n log = ''\n return\n fw = open(log_path, 'a')\n return\n\n\ndef login_homework():\n res = requests.get('http://www.yphs.tp.edu.tw/tea/tu2.aspx')\n soup = BeautifulSoup(res.text, 'lxml')\n VIEWSTATE = soup.find(id='__VIEWSTATE')\n VIEWSTATEGENERATOR = soup.find(id='__VIEWSTATEGENERATOR')\n EVENTVALIDATION = soup.find(id='__EVENTVALIDATION')\n res = requests.post('http://www.yphs.tp.edu.tw/tea/tu2.aspx',\n allow_redirects=False, data={'__VIEWSTATE': VIEWSTATE.get('value'),\n '__VIEWSTATEGENERATOR': VIEWSTATEGENERATOR.get('value'),\n '__EVENTVALIDATION': EVENTVALIDATION.get('value'), 'chk_id':\n '學生/家長', 'tbx_sno': sid, 'tbx_sid': cid, 'tbx_sbir': bir,\n 'but_login_stud': '登\\u3000\\u3000入'})\n global cook\n cook = res.cookies['ASP.NET_SessionId']\n return\n\n\ndef crawl_and_fetch_today_homework(tomorrow_calendar, tomorrow_class_table):\n send = requests.get('http://www.yphs.tp.edu.tw/tea/tu2-1.aspx', cookies\n ={'ASP.NET_SessionId': cook})\n soup = BeautifulSoup(send.text, 'lxml')\n VIEWSTATE = soup.find(id='__VIEWSTATE')\n VIEWSTATEGENERATOR = soup.find(id='__VIEWSTATEGENERATOR')\n EVENTVALIDATION = soup.find(id='__EVENTVALIDATION')\n for x in range(15, 1, -1):\n try:\n num = str('')\n if x < 10:\n num = '0' + str(x)\n else:\n num = str(x)\n send = requests.post('http://www.yphs.tp.edu.tw/tea/tu2-1.aspx',\n cookies={'ASP.NET_SessionId': cook}, data={'__VIEWSTATE':\n VIEWSTATE.get('value'), '__VIEWSTATEGENERATOR':\n VIEWSTATEGENERATOR.get('value'), '__EVENTVALIDATION':\n EVENTVALIDATION.get('value'), ('GridViewS$ctl' + num +\n '$but_vf1'): '詳細內容'})\n soup = BeautifulSoup(send.text, 'lxml')\n ok = bool(True)\n for y in range(0, len(log), 1):\n if soup.find(id='Lab_purport').text == log[y]:\n ok = bool(False)\n if ok == True:\n fw.write(soup.find(id='Lab_purport').text + '\\n')\n post_title = str('[主旨:' + str(soup.find(id='Lab_purport').\n text) + ']')\n post_content = str(soup.find(id='Lab_content').text)\n post_attachment = str(' ')\n if soup.find(target='_blank'):\n post_attachment = soup.find(target='_blank').get('href')\n send_word = (post_title + '\\n' + post_content + '\\n' +\n post_attachment)\n if str(soup.find(id='Lab_purport').text).find('聯絡簿'\n ) >= 0 and datetime.today().weekday() < 4:\n send_word = (send_word + '\\n***系統訊息***\\n' +\n tomorrow_calendar + '\\n' + tomorrow_class_table)\n if str(soup.find(id='Lab_purport').text).find('聯絡簿'\n ) >= 0 and datetime.today().weekday() == 4:\n send_word = send_word\n post(send_word)\n except:\n pass\n return\n\n\ndef crawl_tomorrow_calendar():\n res = requests.get('http://www.yphs.tp.edu.tw/yphs/gr2.aspx')\n soup = BeautifulSoup(res.text, 'lxml')\n calendar = '明日行事曆:\\n 全校:' + soup.find_all(color='#404040')[16].text\n if soup.find_all(color='#404040')[16].text == '\\xa0':\n calendar += 'N/A'\n calendar = calendar + '\\n 高一:' + soup.find_all(color='#404040')[21].text\n if soup.find_all(color='#404040')[21].text == '\\xa0':\n calendar += 'N/A'\n return calendar\n\n\ndef fetch_tomorrow_class_table():\n count = int(0)\n tomorrow_class = '\\n明日課表:\\n 早上:\\n '\n for i in cls[(datetime.today().weekday() + 1) % 7]:\n if count == 4:\n tomorrow_class += '\\n 下午:\\n '\n tomorrow_class += '[' + i + ']'\n if count < 8 and count != 3:\n tomorrow_class += '->'\n count += 1\n return tomorrow_class\n\n\ndef post(send_word):\n if platform == 'line':\n line_bot_api.push_message(chatid, TextSendMessage(text=send_word,\n wrap=True))\n if platform == 'telegram':\n requests.get('https://api.telegram.org/bot' + bottoken +\n '/sendMessage?chat_id=' + chatid + '&text=' + send_word)\n\n\n<mask token>\n\n\ndef close_log():\n fw.close()\n\n\ndef main():\n open_log()\n login_homework()\n crawl_and_fetch_today_homework(crawl_tomorrow_calendar(),\n fetch_tomorrow_class_table())\n close_log()\n if datetime.today().weekday() == 6 and datetime.today(\n ).hour == 21 and datetime.today().minute < 10:\n send_word = '[主旨:機器人訊息]\\n***系統訊息***\\n' + crawl_tomorrow_calendar(\n ) + '\\n' + fetch_tomorrow_class_table()\n post(send_word)\n\n\n<mask token>\n",
"step-3": "<mask token>\nlog_path = './log.txt'\nsid = ''\ncid = ''\nbir = ''\nplatform = 'line'\nif platform == 'line':\n from linebot import LineBotApi\n from linebot.models import TextSendMessage\n bottoken = ''\n chatid = ''\n line_bot_api = LineBotApi(bottoken)\nif platform == 'telegram':\n bottoken = ''\n chatid = ''\ncls = [['學校活動', '英文', '化學', '國文', '地理', '生物', '公民', '歷史', '數學'], ['彈性課程',\n '地科', '數學', '數學', '資訊', '西洋影視', '國文', '國文', '英文'], ['數學', '物理', '生活科技',\n '體育', '國文', '化學', '音樂', '英文', '英文'], ['數學', '論孟選讀', '生物', '多元選修', '歷史',\n '化學', '英文', '國防', '物理'], ['彈性課程', '英文', '數學', '地理', '公民', '國文', '體育',\n '物理', '社團'], [], []]\n\n\ndef open_log():\n global log\n global fw\n try:\n fr = open(log_path, 'r')\n log = fr.read().split('\\n')\n fr.close()\n except:\n fw = open(log_path, 'w+')\n log = ''\n return\n fw = open(log_path, 'a')\n return\n\n\ndef login_homework():\n res = requests.get('http://www.yphs.tp.edu.tw/tea/tu2.aspx')\n soup = BeautifulSoup(res.text, 'lxml')\n VIEWSTATE = soup.find(id='__VIEWSTATE')\n VIEWSTATEGENERATOR = soup.find(id='__VIEWSTATEGENERATOR')\n EVENTVALIDATION = soup.find(id='__EVENTVALIDATION')\n res = requests.post('http://www.yphs.tp.edu.tw/tea/tu2.aspx',\n allow_redirects=False, data={'__VIEWSTATE': VIEWSTATE.get('value'),\n '__VIEWSTATEGENERATOR': VIEWSTATEGENERATOR.get('value'),\n '__EVENTVALIDATION': EVENTVALIDATION.get('value'), 'chk_id':\n '學生/家長', 'tbx_sno': sid, 'tbx_sid': cid, 'tbx_sbir': bir,\n 'but_login_stud': '登\\u3000\\u3000入'})\n global cook\n cook = res.cookies['ASP.NET_SessionId']\n return\n\n\ndef crawl_and_fetch_today_homework(tomorrow_calendar, tomorrow_class_table):\n send = requests.get('http://www.yphs.tp.edu.tw/tea/tu2-1.aspx', cookies\n ={'ASP.NET_SessionId': cook})\n soup = BeautifulSoup(send.text, 'lxml')\n VIEWSTATE = soup.find(id='__VIEWSTATE')\n VIEWSTATEGENERATOR = soup.find(id='__VIEWSTATEGENERATOR')\n EVENTVALIDATION = soup.find(id='__EVENTVALIDATION')\n for x in range(15, 1, -1):\n try:\n num = str('')\n if x < 10:\n num = '0' + str(x)\n else:\n num = str(x)\n send = requests.post('http://www.yphs.tp.edu.tw/tea/tu2-1.aspx',\n cookies={'ASP.NET_SessionId': cook}, data={'__VIEWSTATE':\n VIEWSTATE.get('value'), '__VIEWSTATEGENERATOR':\n VIEWSTATEGENERATOR.get('value'), '__EVENTVALIDATION':\n EVENTVALIDATION.get('value'), ('GridViewS$ctl' + num +\n '$but_vf1'): '詳細內容'})\n soup = BeautifulSoup(send.text, 'lxml')\n ok = bool(True)\n for y in range(0, len(log), 1):\n if soup.find(id='Lab_purport').text == log[y]:\n ok = bool(False)\n if ok == True:\n fw.write(soup.find(id='Lab_purport').text + '\\n')\n post_title = str('[主旨:' + str(soup.find(id='Lab_purport').\n text) + ']')\n post_content = str(soup.find(id='Lab_content').text)\n post_attachment = str(' ')\n if soup.find(target='_blank'):\n post_attachment = soup.find(target='_blank').get('href')\n send_word = (post_title + '\\n' + post_content + '\\n' +\n post_attachment)\n if str(soup.find(id='Lab_purport').text).find('聯絡簿'\n ) >= 0 and datetime.today().weekday() < 4:\n send_word = (send_word + '\\n***系統訊息***\\n' +\n tomorrow_calendar + '\\n' + tomorrow_class_table)\n if str(soup.find(id='Lab_purport').text).find('聯絡簿'\n ) >= 0 and datetime.today().weekday() == 4:\n send_word = send_word\n post(send_word)\n except:\n pass\n return\n\n\ndef crawl_tomorrow_calendar():\n res = requests.get('http://www.yphs.tp.edu.tw/yphs/gr2.aspx')\n soup = BeautifulSoup(res.text, 'lxml')\n calendar = '明日行事曆:\\n 全校:' + soup.find_all(color='#404040')[16].text\n if soup.find_all(color='#404040')[16].text == '\\xa0':\n calendar += 'N/A'\n calendar = calendar + '\\n 高一:' + soup.find_all(color='#404040')[21].text\n if soup.find_all(color='#404040')[21].text == '\\xa0':\n calendar += 'N/A'\n return calendar\n\n\ndef fetch_tomorrow_class_table():\n count = int(0)\n tomorrow_class = '\\n明日課表:\\n 早上:\\n '\n for i in cls[(datetime.today().weekday() + 1) % 7]:\n if count == 4:\n tomorrow_class += '\\n 下午:\\n '\n tomorrow_class += '[' + i + ']'\n if count < 8 and count != 3:\n tomorrow_class += '->'\n count += 1\n return tomorrow_class\n\n\ndef post(send_word):\n if platform == 'line':\n line_bot_api.push_message(chatid, TextSendMessage(text=send_word,\n wrap=True))\n if platform == 'telegram':\n requests.get('https://api.telegram.org/bot' + bottoken +\n '/sendMessage?chat_id=' + chatid + '&text=' + send_word)\n\n\n<mask token>\n\n\ndef close_log():\n fw.close()\n\n\ndef main():\n open_log()\n login_homework()\n crawl_and_fetch_today_homework(crawl_tomorrow_calendar(),\n fetch_tomorrow_class_table())\n close_log()\n if datetime.today().weekday() == 6 and datetime.today(\n ).hour == 21 and datetime.today().minute < 10:\n send_word = '[主旨:機器人訊息]\\n***系統訊息***\\n' + crawl_tomorrow_calendar(\n ) + '\\n' + fetch_tomorrow_class_table()\n post(send_word)\n\n\nmain()\n",
"step-4": "import requests\nfrom bs4 import BeautifulSoup\nimport re\nfrom datetime import datetime\nlog_path = './log.txt'\nsid = ''\ncid = ''\nbir = ''\nplatform = 'line'\nif platform == 'line':\n from linebot import LineBotApi\n from linebot.models import TextSendMessage\n bottoken = ''\n chatid = ''\n line_bot_api = LineBotApi(bottoken)\nif platform == 'telegram':\n bottoken = ''\n chatid = ''\ncls = [['學校活動', '英文', '化學', '國文', '地理', '生物', '公民', '歷史', '數學'], ['彈性課程',\n '地科', '數學', '數學', '資訊', '西洋影視', '國文', '國文', '英文'], ['數學', '物理', '生活科技',\n '體育', '國文', '化學', '音樂', '英文', '英文'], ['數學', '論孟選讀', '生物', '多元選修', '歷史',\n '化學', '英文', '國防', '物理'], ['彈性課程', '英文', '數學', '地理', '公民', '國文', '體育',\n '物理', '社團'], [], []]\n\n\ndef open_log():\n global log\n global fw\n try:\n fr = open(log_path, 'r')\n log = fr.read().split('\\n')\n fr.close()\n except:\n fw = open(log_path, 'w+')\n log = ''\n return\n fw = open(log_path, 'a')\n return\n\n\ndef login_homework():\n res = requests.get('http://www.yphs.tp.edu.tw/tea/tu2.aspx')\n soup = BeautifulSoup(res.text, 'lxml')\n VIEWSTATE = soup.find(id='__VIEWSTATE')\n VIEWSTATEGENERATOR = soup.find(id='__VIEWSTATEGENERATOR')\n EVENTVALIDATION = soup.find(id='__EVENTVALIDATION')\n res = requests.post('http://www.yphs.tp.edu.tw/tea/tu2.aspx',\n allow_redirects=False, data={'__VIEWSTATE': VIEWSTATE.get('value'),\n '__VIEWSTATEGENERATOR': VIEWSTATEGENERATOR.get('value'),\n '__EVENTVALIDATION': EVENTVALIDATION.get('value'), 'chk_id':\n '學生/家長', 'tbx_sno': sid, 'tbx_sid': cid, 'tbx_sbir': bir,\n 'but_login_stud': '登\\u3000\\u3000入'})\n global cook\n cook = res.cookies['ASP.NET_SessionId']\n return\n\n\ndef crawl_and_fetch_today_homework(tomorrow_calendar, tomorrow_class_table):\n send = requests.get('http://www.yphs.tp.edu.tw/tea/tu2-1.aspx', cookies\n ={'ASP.NET_SessionId': cook})\n soup = BeautifulSoup(send.text, 'lxml')\n VIEWSTATE = soup.find(id='__VIEWSTATE')\n VIEWSTATEGENERATOR = soup.find(id='__VIEWSTATEGENERATOR')\n EVENTVALIDATION = soup.find(id='__EVENTVALIDATION')\n for x in range(15, 1, -1):\n try:\n num = str('')\n if x < 10:\n num = '0' + str(x)\n else:\n num = str(x)\n send = requests.post('http://www.yphs.tp.edu.tw/tea/tu2-1.aspx',\n cookies={'ASP.NET_SessionId': cook}, data={'__VIEWSTATE':\n VIEWSTATE.get('value'), '__VIEWSTATEGENERATOR':\n VIEWSTATEGENERATOR.get('value'), '__EVENTVALIDATION':\n EVENTVALIDATION.get('value'), ('GridViewS$ctl' + num +\n '$but_vf1'): '詳細內容'})\n soup = BeautifulSoup(send.text, 'lxml')\n ok = bool(True)\n for y in range(0, len(log), 1):\n if soup.find(id='Lab_purport').text == log[y]:\n ok = bool(False)\n if ok == True:\n fw.write(soup.find(id='Lab_purport').text + '\\n')\n post_title = str('[主旨:' + str(soup.find(id='Lab_purport').\n text) + ']')\n post_content = str(soup.find(id='Lab_content').text)\n post_attachment = str(' ')\n if soup.find(target='_blank'):\n post_attachment = soup.find(target='_blank').get('href')\n send_word = (post_title + '\\n' + post_content + '\\n' +\n post_attachment)\n if str(soup.find(id='Lab_purport').text).find('聯絡簿'\n ) >= 0 and datetime.today().weekday() < 4:\n send_word = (send_word + '\\n***系統訊息***\\n' +\n tomorrow_calendar + '\\n' + tomorrow_class_table)\n if str(soup.find(id='Lab_purport').text).find('聯絡簿'\n ) >= 0 and datetime.today().weekday() == 4:\n send_word = send_word\n post(send_word)\n except:\n pass\n return\n\n\ndef crawl_tomorrow_calendar():\n res = requests.get('http://www.yphs.tp.edu.tw/yphs/gr2.aspx')\n soup = BeautifulSoup(res.text, 'lxml')\n calendar = '明日行事曆:\\n 全校:' + soup.find_all(color='#404040')[16].text\n if soup.find_all(color='#404040')[16].text == '\\xa0':\n calendar += 'N/A'\n calendar = calendar + '\\n 高一:' + soup.find_all(color='#404040')[21].text\n if soup.find_all(color='#404040')[21].text == '\\xa0':\n calendar += 'N/A'\n return calendar\n\n\ndef fetch_tomorrow_class_table():\n count = int(0)\n tomorrow_class = '\\n明日課表:\\n 早上:\\n '\n for i in cls[(datetime.today().weekday() + 1) % 7]:\n if count == 4:\n tomorrow_class += '\\n 下午:\\n '\n tomorrow_class += '[' + i + ']'\n if count < 8 and count != 3:\n tomorrow_class += '->'\n count += 1\n return tomorrow_class\n\n\ndef post(send_word):\n if platform == 'line':\n line_bot_api.push_message(chatid, TextSendMessage(text=send_word,\n wrap=True))\n if platform == 'telegram':\n requests.get('https://api.telegram.org/bot' + bottoken +\n '/sendMessage?chat_id=' + chatid + '&text=' + send_word)\n\n\n<mask token>\n\n\ndef close_log():\n fw.close()\n\n\ndef main():\n open_log()\n login_homework()\n crawl_and_fetch_today_homework(crawl_tomorrow_calendar(),\n fetch_tomorrow_class_table())\n close_log()\n if datetime.today().weekday() == 6 and datetime.today(\n ).hour == 21 and datetime.today().minute < 10:\n send_word = '[主旨:機器人訊息]\\n***系統訊息***\\n' + crawl_tomorrow_calendar(\n ) + '\\n' + fetch_tomorrow_class_table()\n post(send_word)\n\n\nmain()\n",
"step-5": "# !/usr/bin/python \n# coding:utf-8 \nimport requests\nfrom bs4 import BeautifulSoup\nimport re\nfrom datetime import datetime\n\n#紀錄檔PATH(建議絕對位置)\nlog_path='./log.txt'\n\n#登入聯絡簿的個資\nsid=''#學號(Ex. 10731187)\ncid=''#生份證號(Ex. A123456789)\nbir=''#生日(Ex. 2000/1/1)\n\n#line or telegram module\n\n#platform='telegram'\nplatform='line'\n\nif platform=='line':\n from linebot import LineBotApi\n from linebot.models import TextSendMessage\n #line api token\n bottoken=''\n #line chat id\n chatid=''\n\n line_bot_api = LineBotApi(bottoken)\n\nif platform=='telegram':\n #telegram bot token\n bottoken=''\n #telegram group chat id\n chatid=''\n\n#課表\ncls=[['學校活動','英文','化學','國文','地理','生物','公民','歷史','數學'],\n ['彈性課程','地科','數學','數學','資訊','西洋影視','國文','國文','英文'],\n ['數學','物理','生活科技','體育','國文','化學','音樂','英文','英文'],\n ['數學','論孟選讀','生物','多元選修','歷史','化學','英文','國防','物理'],\n ['彈性課程','英文','數學','地理','公民','國文','體育','物理','社團'],[],[]]\n\ndef open_log():\n global log\n global fw\n try:\n fr = open(log_path, \"r\")\n log=fr.read().split('\\n')\n fr.close()\n except:\n fw = open(log_path, \"w+\")\n log=''\n return\n fw = open(log_path, \"a\")\n return\n\ndef login_homework():\n res = requests.get('http://www.yphs.tp.edu.tw/tea/tu2.aspx')\n soup = BeautifulSoup(res.text, \"lxml\")\n VIEWSTATE=soup.find(id=\"__VIEWSTATE\")\n VIEWSTATEGENERATOR=soup.find(id=\"__VIEWSTATEGENERATOR\")\n EVENTVALIDATION=soup.find(id=\"__EVENTVALIDATION\")\n res=requests.post('http://www.yphs.tp.edu.tw/tea/tu2.aspx', allow_redirects=False, data = {'__VIEWSTATE':VIEWSTATE.get('value'),'__VIEWSTATEGENERATOR':VIEWSTATEGENERATOR.get('value'),'__EVENTVALIDATION':EVENTVALIDATION.get('value'),'chk_id':'學生/家長','tbx_sno':sid,'tbx_sid':cid,'tbx_sbir':bir,'but_login_stud':'登 入'})\n global cook\n cook=res.cookies['ASP.NET_SessionId']\n return\n\ndef crawl_and_fetch_today_homework(tomorrow_calendar,tomorrow_class_table):\n send = requests.get('http://www.yphs.tp.edu.tw/tea/tu2-1.aspx',cookies={'ASP.NET_SessionId':cook})\n soup = BeautifulSoup(send.text, \"lxml\")\n VIEWSTATE=soup.find(id=\"__VIEWSTATE\")\n VIEWSTATEGENERATOR=soup.find(id=\"__VIEWSTATEGENERATOR\")\n EVENTVALIDATION=soup.find(id=\"__EVENTVALIDATION\")\n for x in range(15,1,-1):#第一頁1~15則\n try:#用try怕有頁面沒15則post\n #數字轉文字\n num=str('')\n if(x<10):\n num='0'+str(x)\n else:\n num=str(x)\n #爬內文\n send = requests.post('http://www.yphs.tp.edu.tw/tea/tu2-1.aspx',cookies={'ASP.NET_SessionId':cook}, data = {'__VIEWSTATE':VIEWSTATE.get('value'),'__VIEWSTATEGENERATOR':VIEWSTATEGENERATOR.get('value'),'__EVENTVALIDATION':EVENTVALIDATION.get('value'),('GridViewS$ctl'+num+'$but_vf1'):'詳細內容'})\n soup = BeautifulSoup(send.text, \"lxml\")\n #檢查市否已發過\n ok=bool(True)\n for y in range(0,len(log),1):\n if soup.find(id='Lab_purport').text==log[y]:\n ok=bool(False)\n if ok==True:#沒發過\n fw.write(soup.find(id='Lab_purport').text+'\\n')\n post_title=str('[主旨:'+str(soup.find(id='Lab_purport').text)+']')\n post_content=str(soup.find(id='Lab_content').text)\n post_attachment=str(' ')\n if(soup.find(target='_blank')):\n post_attachment=soup.find(target='_blank').get('href')\n send_word=post_title+'\\n'+post_content+'\\n'+post_attachment\n if(str(soup.find(id='Lab_purport').text).find('聯絡簿')>=0 and datetime.today().weekday()<4):\n send_word=send_word+'\\n***系統訊息***\\n'+tomorrow_calendar+'\\n'+tomorrow_class_table\n if(str(soup.find(id='Lab_purport').text).find('聯絡簿')>=0 and datetime.today().weekday() == 4 ):\n send_word=send_word\n post(send_word)\n except:\n pass\n return\n\ndef crawl_tomorrow_calendar():\n res = requests.get('http://www.yphs.tp.edu.tw/yphs/gr2.aspx')\n soup = BeautifulSoup(res.text, \"lxml\")\n calendar='明日行事曆:\\n 全校:'+soup.find_all(color=\"#404040\")[16].text\n if(soup.find_all(color=\"#404040\")[16].text==' '):\n calendar+='N/A'\n calendar=calendar+'\\n 高一:'+soup.find_all(color=\"#404040\")[21].text\n if(soup.find_all(color=\"#404040\")[21].text==' '):\n calendar+='N/A'\n return calendar\n\ndef fetch_tomorrow_class_table():\n count=int(0)\n tomorrow_class='\\n明日課表:\\n 早上:\\n '\n for i in cls[(datetime.today().weekday()+1)%7]:\n if(count==4):\n tomorrow_class+='\\n 下午:\\n '\n tomorrow_class+='['+i+']'\n if(count<8 and count!=3):\n tomorrow_class+='->'\n count+=1\n return tomorrow_class\n\ndef post(send_word):\n if platform=='line':\n line_bot_api.push_message(chatid,TextSendMessage(text=send_word,wrap=True))\n if platform=='telegram':\n requests.get(\"https://api.telegram.org/bot\"+bottoken+\"/sendMessage?chat_id=\"+chatid+\"&text=\"+send_word)\n'''\n\n!!!contact ab0897867564534231@gmail.com for this function!!!\n\ndef crawl_message_board():\n res = requests.get('http://59.120.227.144:11300/line/api.php')\n soup = BeautifulSoup(res.text, \"lxml\")\n message_board = soup.find_all('td')\n message='\\n\\n留言板( http://59.120.227.144:11300/line/ ) : \\n'\n for i in range(0,len(message_board),3):\n message=message+'第'+str(int((i/3)+1))+'則:\\n-'+message_board[i+1].text+\"\\n--來自:\"+message_board[i+2].text+'\\n'\n return message\n'''\n\ndef close_log():\n fw.close()\n\ndef main():\n open_log()\n login_homework()\n crawl_and_fetch_today_homework(crawl_tomorrow_calendar(),fetch_tomorrow_class_table())\n close_log()\n\n #星期天提醒明天要上課\n if(datetime.today().weekday()==6 and datetime.today().hour == 21 and datetime.today().minute<10):\n send_word='[主旨:機器人訊息]\\n***系統訊息***\\n'+crawl_tomorrow_calendar()+'\\n'+fetch_tomorrow_class_table()\n post(send_word)\nmain()",
"step-ids": [
5,
8,
10,
11,
12
]
}
|
[
5,
8,
10,
11,
12
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def trapezoid_integral(**kwargs):
a = kwargs.get('a', None)
b = kwargs.get('b', None)
n = kwargs.get('n', 2)
y_generator = kwargs.get('y_generator', None)
x = kwargs.get('x', None)
y = kwargs.get('y', None)
if y is None:
h = (b - a) / n
x = np.linspace(a, b, n + 1)
y = [y_generator(x[i]) for i in range(n + 1)]
vectors_length = len(x)
integral_value = y[0]
for i in range(2, vectors_length):
integral_value += 2 * y[i - 1]
integral_value += y[vectors_length - 1]
integral_value *= h / 2
return integral_value
else:
sum = 0
for i in range(len(x) - 1):
sum += (y[i] + y[i + 1]) / 2 * (x[i + 1] - x[i])
return sum
<|reserved_special_token_1|>
import numpy as np
<|reserved_special_token_0|>
def trapezoid_integral(**kwargs):
a = kwargs.get('a', None)
b = kwargs.get('b', None)
n = kwargs.get('n', 2)
y_generator = kwargs.get('y_generator', None)
x = kwargs.get('x', None)
y = kwargs.get('y', None)
if y is None:
h = (b - a) / n
x = np.linspace(a, b, n + 1)
y = [y_generator(x[i]) for i in range(n + 1)]
vectors_length = len(x)
integral_value = y[0]
for i in range(2, vectors_length):
integral_value += 2 * y[i - 1]
integral_value += y[vectors_length - 1]
integral_value *= h / 2
return integral_value
else:
sum = 0
for i in range(len(x) - 1):
sum += (y[i] + y[i + 1]) / 2 * (x[i + 1] - x[i])
return sum
<|reserved_special_token_1|>
import numpy as np
"""
function for calculating integrals using the trapezoid method
x is a vector of independent variables
y is a vector of dependent variables
a is the initial value
b is the final value
n is the number of intervals
y_generator is the function to be integrated
"""
def trapezoid_integral(**kwargs):
a = kwargs.get('a', None)
b = kwargs.get('b', None)
n = kwargs.get('n', 2)
y_generator = kwargs.get('y_generator', None)
x = kwargs.get('x', None)
y = kwargs.get('y', None)
if y is None:
h = (b-a)/n
x = np.linspace(a, b, n+1)
y = [y_generator(x[i]) for i in range(n+1)]
vectors_length = len(x)
integral_value = y[0]
for i in range(2, vectors_length):
integral_value += 2*y[i - 1]
integral_value += y[vectors_length - 1]
integral_value *= h/2
return integral_value
else:
sum = 0
for i in range(len(x) - 1):
sum += ((y[i] + y[i+1])/2 * (x[i+1] - x[i]))
return sum
|
flexible
|
{
"blob_id": "8ce468460a81c7869f3abb69035a033c58e0f699",
"index": 8828,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef trapezoid_integral(**kwargs):\n a = kwargs.get('a', None)\n b = kwargs.get('b', None)\n n = kwargs.get('n', 2)\n y_generator = kwargs.get('y_generator', None)\n x = kwargs.get('x', None)\n y = kwargs.get('y', None)\n if y is None:\n h = (b - a) / n\n x = np.linspace(a, b, n + 1)\n y = [y_generator(x[i]) for i in range(n + 1)]\n vectors_length = len(x)\n integral_value = y[0]\n for i in range(2, vectors_length):\n integral_value += 2 * y[i - 1]\n integral_value += y[vectors_length - 1]\n integral_value *= h / 2\n return integral_value\n else:\n sum = 0\n for i in range(len(x) - 1):\n sum += (y[i] + y[i + 1]) / 2 * (x[i + 1] - x[i])\n return sum\n",
"step-3": "import numpy as np\n<mask token>\n\n\ndef trapezoid_integral(**kwargs):\n a = kwargs.get('a', None)\n b = kwargs.get('b', None)\n n = kwargs.get('n', 2)\n y_generator = kwargs.get('y_generator', None)\n x = kwargs.get('x', None)\n y = kwargs.get('y', None)\n if y is None:\n h = (b - a) / n\n x = np.linspace(a, b, n + 1)\n y = [y_generator(x[i]) for i in range(n + 1)]\n vectors_length = len(x)\n integral_value = y[0]\n for i in range(2, vectors_length):\n integral_value += 2 * y[i - 1]\n integral_value += y[vectors_length - 1]\n integral_value *= h / 2\n return integral_value\n else:\n sum = 0\n for i in range(len(x) - 1):\n sum += (y[i] + y[i + 1]) / 2 * (x[i + 1] - x[i])\n return sum\n",
"step-4": "import numpy as np\n\n\"\"\"\n function for calculating integrals using the trapezoid method\n x is a vector of independent variables\n y is a vector of dependent variables\n a is the initial value\n b is the final value\n n is the number of intervals\n y_generator is the function to be integrated\n\"\"\"\n\ndef trapezoid_integral(**kwargs):\n\n a = kwargs.get('a', None)\n b = kwargs.get('b', None)\n n = kwargs.get('n', 2)\n y_generator = kwargs.get('y_generator', None)\n\n x = kwargs.get('x', None)\n y = kwargs.get('y', None)\n \n if y is None:\n h = (b-a)/n\n x = np.linspace(a, b, n+1)\n y = [y_generator(x[i]) for i in range(n+1)]\n vectors_length = len(x)\n \n integral_value = y[0]\n\n for i in range(2, vectors_length):\n integral_value += 2*y[i - 1]\n \n integral_value += y[vectors_length - 1]\n integral_value *= h/2\n return integral_value\n \n else:\n sum = 0\n for i in range(len(x) - 1):\n sum += ((y[i] + y[i+1])/2 * (x[i+1] - x[i]))\n return sum\n \n\n\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
from sklearn.cluster import MeanShift
from sklearn.datasets.samples_generator import make_blobs
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from matplotlib import style
style.use('ggplot')
# Create random data points whose centers are the following
centers = [[20, 0, 0], [0, 20, 0], [0, 0, 20], [0, 0, 0]]
X, _ = make_blobs(n_samples=200, centers=centers, cluster_std=2)
# Fit the data into MeanShift classifier with search bandwidth = 10
clf = MeanShift(bandwidth=10)
clf.fit(X)
# Get the labels of each data point
# and cluster centers of the number of clusters formed
labels = clf.labels_
cluster_centers = clf.cluster_centers_
print(cluster_centers)
n_clusters = len(cluster_centers)
print('Number of clusters found:', n_clusters)
# Plot the data points with their clusters and centers on a 3d graph
colors = 10*['r', 'g', 'b', 'y', 'c']
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
for i in range(len(X)):
ax.scatter(X[i][0], X[i][1], X[i][2], c=colors[labels[i]], marker='o')
ax.scatter(cluster_centers[:, 0], cluster_centers[:, 1], cluster_centers[:, 2],
marker='x', s=150, linewidth=5, zorder=10, color='k')
plt.show()
|
normal
|
{
"blob_id": "c0216dbd52be134eb417c20ed80b398b22e5d844",
"index": 6967,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nstyle.use('ggplot')\n<mask token>\nclf.fit(X)\n<mask token>\nprint(cluster_centers)\n<mask token>\nprint('Number of clusters found:', n_clusters)\n<mask token>\nfor i in range(len(X)):\n ax.scatter(X[i][0], X[i][1], X[i][2], c=colors[labels[i]], marker='o')\nax.scatter(cluster_centers[:, 0], cluster_centers[:, 1], cluster_centers[:,\n 2], marker='x', s=150, linewidth=5, zorder=10, color='k')\nplt.show()\n",
"step-3": "<mask token>\nstyle.use('ggplot')\ncenters = [[20, 0, 0], [0, 20, 0], [0, 0, 20], [0, 0, 0]]\nX, _ = make_blobs(n_samples=200, centers=centers, cluster_std=2)\nclf = MeanShift(bandwidth=10)\nclf.fit(X)\nlabels = clf.labels_\ncluster_centers = clf.cluster_centers_\nprint(cluster_centers)\nn_clusters = len(cluster_centers)\nprint('Number of clusters found:', n_clusters)\ncolors = 10 * ['r', 'g', 'b', 'y', 'c']\nfig = plt.figure()\nax = fig.add_subplot(111, projection='3d')\nfor i in range(len(X)):\n ax.scatter(X[i][0], X[i][1], X[i][2], c=colors[labels[i]], marker='o')\nax.scatter(cluster_centers[:, 0], cluster_centers[:, 1], cluster_centers[:,\n 2], marker='x', s=150, linewidth=5, zorder=10, color='k')\nplt.show()\n",
"step-4": "from sklearn.cluster import MeanShift\nfrom sklearn.datasets.samples_generator import make_blobs\nimport matplotlib.pyplot as plt\nfrom mpl_toolkits.mplot3d import Axes3D\nfrom matplotlib import style\nstyle.use('ggplot')\ncenters = [[20, 0, 0], [0, 20, 0], [0, 0, 20], [0, 0, 0]]\nX, _ = make_blobs(n_samples=200, centers=centers, cluster_std=2)\nclf = MeanShift(bandwidth=10)\nclf.fit(X)\nlabels = clf.labels_\ncluster_centers = clf.cluster_centers_\nprint(cluster_centers)\nn_clusters = len(cluster_centers)\nprint('Number of clusters found:', n_clusters)\ncolors = 10 * ['r', 'g', 'b', 'y', 'c']\nfig = plt.figure()\nax = fig.add_subplot(111, projection='3d')\nfor i in range(len(X)):\n ax.scatter(X[i][0], X[i][1], X[i][2], c=colors[labels[i]], marker='o')\nax.scatter(cluster_centers[:, 0], cluster_centers[:, 1], cluster_centers[:,\n 2], marker='x', s=150, linewidth=5, zorder=10, color='k')\nplt.show()\n",
"step-5": "from sklearn.cluster import MeanShift\nfrom sklearn.datasets.samples_generator import make_blobs\nimport matplotlib.pyplot as plt\nfrom mpl_toolkits.mplot3d import Axes3D\nfrom matplotlib import style\n\nstyle.use('ggplot')\n\n\n# Create random data points whose centers are the following\ncenters = [[20, 0, 0], [0, 20, 0], [0, 0, 20], [0, 0, 0]]\nX, _ = make_blobs(n_samples=200, centers=centers, cluster_std=2)\n\n# Fit the data into MeanShift classifier with search bandwidth = 10\nclf = MeanShift(bandwidth=10)\nclf.fit(X)\n\n# Get the labels of each data point\n# and cluster centers of the number of clusters formed\nlabels = clf.labels_\ncluster_centers = clf.cluster_centers_\nprint(cluster_centers)\nn_clusters = len(cluster_centers)\nprint('Number of clusters found:', n_clusters)\n\n# Plot the data points with their clusters and centers on a 3d graph\ncolors = 10*['r', 'g', 'b', 'y', 'c']\nfig = plt.figure()\nax = fig.add_subplot(111, projection='3d')\n\nfor i in range(len(X)):\n ax.scatter(X[i][0], X[i][1], X[i][2], c=colors[labels[i]], marker='o')\n\nax.scatter(cluster_centers[:, 0], cluster_centers[:, 1], cluster_centers[:, 2],\n marker='x', s=150, linewidth=5, zorder=10, color='k')\n\nplt.show()\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
num=int(input("enter no"))
def factorial(no):
fact=1
if no <0:
print("-ve no factorial not exist")
else:
for i in range(1,no+1):
fact=fact*i
return fact
print(factorial(num))
|
normal
|
{
"blob_id": "2d3ab575b18144f714f06167f54cd069af4e5895",
"index": 7506,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef factorial(no):\n fact = 1\n if no < 0:\n print('-ve no factorial not exist')\n else:\n for i in range(1, no + 1):\n fact = fact * i\n return fact\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef factorial(no):\n fact = 1\n if no < 0:\n print('-ve no factorial not exist')\n else:\n for i in range(1, no + 1):\n fact = fact * i\n return fact\n\n\nprint(factorial(num))\n",
"step-4": "num = int(input('enter no'))\n\n\ndef factorial(no):\n fact = 1\n if no < 0:\n print('-ve no factorial not exist')\n else:\n for i in range(1, no + 1):\n fact = fact * i\n return fact\n\n\nprint(factorial(num))\n",
"step-5": "num=int(input(\"enter no\"))\ndef factorial(no):\n fact=1\n if no <0:\n print(\"-ve no factorial not exist\")\n else:\n for i in range(1,no+1):\n fact=fact*i\n return fact\nprint(factorial(num))",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
def get_encoder(conf):
if conf.encoder == 'linear':
model = tf.keras.Sequential([tf.keras.layers.Dense(conf.d_model * 2
), tf.keras.layers.ReLU(), tf.keras.layers.Dense(conf.d_model)])
return model
if conf.encoder == 'rand_linear':
model = get_stochastic_linear(conf)
return model
if conf.encoder[:5] == 'cifar':
model = ResNet(BasicBlock, [3, 4, 6, 3], 4, low_dim=128, width=1, k
=conf.k, linear=conf.linear)
return model
<|reserved_special_token_0|>
class BasicBlock(tf.keras.layers.Layer):
EXPANSION = 1
def __init__(self, channels, filters, strides=1):
super().__init__()
self.conv_1 = tf.keras.layers.Conv2D(filters=filters, kernel_size=3,
strides=strides, padding='same', use_bias=False)
self.bn_1 = tf.keras.layers.BatchNormalization()
self.conv_2 = tf.keras.layers.Conv2D(filters=filters, kernel_size=3,
strides=1, padding='same', use_bias=False)
self.bn_2 = tf.keras.layers.BatchNormalization()
self.shortcut = tf.keras.Sequential()
if strides != 1 or channels != filters * self.EXPANSION:
self.shortcut.add(tf.keras.layers.Conv2D(filters=self.EXPANSION *
filters, kernel_size=1, strides=strides, use_bias=False))
self.shortcut.add(tf.keras.layers.BatchNormalization())
def call(self, inputs, training=True, mask=None):
x = tf.nn.relu(self.bn_1(self.conv_1(inputs, training=training),
training=training))
x = self.bn_2(self.conv_2(x, training=training), training=training)
x += self.shortcut(inputs, training=training)
return tf.nn.relu(x)
class ResNet(tf.keras.Model):
def __init__(self, block, num_blocks, pool_len=4, low_dim=128, width=1,
k=10, linear=True):
super().__init__()
self.channels = 64
self.pool_len = pool_len
self.k = k
self.linear = linear
self.conv_1 = tf.keras.layers.Conv2D(filters=64, kernel_size=3,
strides=1, padding='same', use_bias=False)
self.bn_1 = tf.keras.layers.BatchNormalization()
self.base = int(64 * width)
self.residual = tf.keras.Sequential([self._make_layer(block, self.
base, num_blocks[0], stride=1), self._make_layer(block, self.
base * 2, num_blocks[1], stride=2), self._make_layer(block,
self.base * 4, num_blocks[2], stride=2), self._make_layer(block,
self.base * 8, num_blocks[3], stride=2)])
if self.linear:
self.fc = tf.keras.layers.Dense(low_dim)
self.pool = tf.keras.layers.AveragePooling2D(pool_len, pool_len,
data_format='channels_last')
def _make_layer(self, block, planes, num_blocks, stride):
strides = [stride] + [1] * (num_blocks - 1)
layers = []
for stride in strides:
layers.append(block(self.channels, planes, stride))
self.channels = planes * block.EXPANSION
return tf.keras.Sequential(layers)
def call(self, inputs, training=True, mask=None):
x = tf.nn.relu(self.bn_1(self.conv_1(inputs, training=training),
training=training))
x = self.residual(x, training=training)
x = self.pool(x)
batch_size = tf.shape(x)[0]
x = tf.reshape(x, [batch_size, -1])
if self.linear:
x = self.fc(x, training=training)
return x
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def get_encoder(conf):
if conf.encoder == 'linear':
model = tf.keras.Sequential([tf.keras.layers.Dense(conf.d_model * 2
), tf.keras.layers.ReLU(), tf.keras.layers.Dense(conf.d_model)])
return model
if conf.encoder == 'rand_linear':
model = get_stochastic_linear(conf)
return model
if conf.encoder[:5] == 'cifar':
model = ResNet(BasicBlock, [3, 4, 6, 3], 4, low_dim=128, width=1, k
=conf.k, linear=conf.linear)
return model
<|reserved_special_token_0|>
class BasicBlock(tf.keras.layers.Layer):
EXPANSION = 1
def __init__(self, channels, filters, strides=1):
super().__init__()
self.conv_1 = tf.keras.layers.Conv2D(filters=filters, kernel_size=3,
strides=strides, padding='same', use_bias=False)
self.bn_1 = tf.keras.layers.BatchNormalization()
self.conv_2 = tf.keras.layers.Conv2D(filters=filters, kernel_size=3,
strides=1, padding='same', use_bias=False)
self.bn_2 = tf.keras.layers.BatchNormalization()
self.shortcut = tf.keras.Sequential()
if strides != 1 or channels != filters * self.EXPANSION:
self.shortcut.add(tf.keras.layers.Conv2D(filters=self.EXPANSION *
filters, kernel_size=1, strides=strides, use_bias=False))
self.shortcut.add(tf.keras.layers.BatchNormalization())
def call(self, inputs, training=True, mask=None):
x = tf.nn.relu(self.bn_1(self.conv_1(inputs, training=training),
training=training))
x = self.bn_2(self.conv_2(x, training=training), training=training)
x += self.shortcut(inputs, training=training)
return tf.nn.relu(x)
class ResNet(tf.keras.Model):
def __init__(self, block, num_blocks, pool_len=4, low_dim=128, width=1,
k=10, linear=True):
super().__init__()
self.channels = 64
self.pool_len = pool_len
self.k = k
self.linear = linear
self.conv_1 = tf.keras.layers.Conv2D(filters=64, kernel_size=3,
strides=1, padding='same', use_bias=False)
self.bn_1 = tf.keras.layers.BatchNormalization()
self.base = int(64 * width)
self.residual = tf.keras.Sequential([self._make_layer(block, self.
base, num_blocks[0], stride=1), self._make_layer(block, self.
base * 2, num_blocks[1], stride=2), self._make_layer(block,
self.base * 4, num_blocks[2], stride=2), self._make_layer(block,
self.base * 8, num_blocks[3], stride=2)])
if self.linear:
self.fc = tf.keras.layers.Dense(low_dim)
self.pool = tf.keras.layers.AveragePooling2D(pool_len, pool_len,
data_format='channels_last')
def _make_layer(self, block, planes, num_blocks, stride):
strides = [stride] + [1] * (num_blocks - 1)
layers = []
for stride in strides:
layers.append(block(self.channels, planes, stride))
self.channels = planes * block.EXPANSION
return tf.keras.Sequential(layers)
def call(self, inputs, training=True, mask=None):
x = tf.nn.relu(self.bn_1(self.conv_1(inputs, training=training),
training=training))
x = self.residual(x, training=training)
x = self.pool(x)
batch_size = tf.shape(x)[0]
x = tf.reshape(x, [batch_size, -1])
if self.linear:
x = self.fc(x, training=training)
return x
def test_resnet():
model = ResNet(BasicBlock, [3, 4, 6, 3], 4, low_dim=128, width=1)
a = tf.ones([7, 32, 32, 3])
b = model(a)
print(b)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def get_encoder(conf):
if conf.encoder == 'linear':
model = tf.keras.Sequential([tf.keras.layers.Dense(conf.d_model * 2
), tf.keras.layers.ReLU(), tf.keras.layers.Dense(conf.d_model)])
return model
if conf.encoder == 'rand_linear':
model = get_stochastic_linear(conf)
return model
if conf.encoder[:5] == 'cifar':
model = ResNet(BasicBlock, [3, 4, 6, 3], 4, low_dim=128, width=1, k
=conf.k, linear=conf.linear)
return model
def get_stochastic_linear(conf):
model = tf.keras.Sequential([tf.keras.layers.GaussianNoise(0.3), tf.
keras.layers.Dense(conf.d_model * 2), tf.keras.layers.ReLU(), tf.
keras.layers.GaussianNoise(0.3), tf.keras.layers.Dense(conf.d_model)])
return model
class BasicBlock(tf.keras.layers.Layer):
EXPANSION = 1
def __init__(self, channels, filters, strides=1):
super().__init__()
self.conv_1 = tf.keras.layers.Conv2D(filters=filters, kernel_size=3,
strides=strides, padding='same', use_bias=False)
self.bn_1 = tf.keras.layers.BatchNormalization()
self.conv_2 = tf.keras.layers.Conv2D(filters=filters, kernel_size=3,
strides=1, padding='same', use_bias=False)
self.bn_2 = tf.keras.layers.BatchNormalization()
self.shortcut = tf.keras.Sequential()
if strides != 1 or channels != filters * self.EXPANSION:
self.shortcut.add(tf.keras.layers.Conv2D(filters=self.EXPANSION *
filters, kernel_size=1, strides=strides, use_bias=False))
self.shortcut.add(tf.keras.layers.BatchNormalization())
def call(self, inputs, training=True, mask=None):
x = tf.nn.relu(self.bn_1(self.conv_1(inputs, training=training),
training=training))
x = self.bn_2(self.conv_2(x, training=training), training=training)
x += self.shortcut(inputs, training=training)
return tf.nn.relu(x)
class ResNet(tf.keras.Model):
def __init__(self, block, num_blocks, pool_len=4, low_dim=128, width=1,
k=10, linear=True):
super().__init__()
self.channels = 64
self.pool_len = pool_len
self.k = k
self.linear = linear
self.conv_1 = tf.keras.layers.Conv2D(filters=64, kernel_size=3,
strides=1, padding='same', use_bias=False)
self.bn_1 = tf.keras.layers.BatchNormalization()
self.base = int(64 * width)
self.residual = tf.keras.Sequential([self._make_layer(block, self.
base, num_blocks[0], stride=1), self._make_layer(block, self.
base * 2, num_blocks[1], stride=2), self._make_layer(block,
self.base * 4, num_blocks[2], stride=2), self._make_layer(block,
self.base * 8, num_blocks[3], stride=2)])
if self.linear:
self.fc = tf.keras.layers.Dense(low_dim)
self.pool = tf.keras.layers.AveragePooling2D(pool_len, pool_len,
data_format='channels_last')
def _make_layer(self, block, planes, num_blocks, stride):
strides = [stride] + [1] * (num_blocks - 1)
layers = []
for stride in strides:
layers.append(block(self.channels, planes, stride))
self.channels = planes * block.EXPANSION
return tf.keras.Sequential(layers)
def call(self, inputs, training=True, mask=None):
x = tf.nn.relu(self.bn_1(self.conv_1(inputs, training=training),
training=training))
x = self.residual(x, training=training)
x = self.pool(x)
batch_size = tf.shape(x)[0]
x = tf.reshape(x, [batch_size, -1])
if self.linear:
x = self.fc(x, training=training)
return x
def test_resnet():
model = ResNet(BasicBlock, [3, 4, 6, 3], 4, low_dim=128, width=1)
a = tf.ones([7, 32, 32, 3])
b = model(a)
print(b)
if __name__ == '__main__':
test_resnet()
<|reserved_special_token_1|>
from __future__ import absolute_import, print_function, division, unicode_literals
import tensorflow as tf
def get_encoder(conf):
if conf.encoder == 'linear':
model = tf.keras.Sequential([tf.keras.layers.Dense(conf.d_model * 2
), tf.keras.layers.ReLU(), tf.keras.layers.Dense(conf.d_model)])
return model
if conf.encoder == 'rand_linear':
model = get_stochastic_linear(conf)
return model
if conf.encoder[:5] == 'cifar':
model = ResNet(BasicBlock, [3, 4, 6, 3], 4, low_dim=128, width=1, k
=conf.k, linear=conf.linear)
return model
def get_stochastic_linear(conf):
model = tf.keras.Sequential([tf.keras.layers.GaussianNoise(0.3), tf.
keras.layers.Dense(conf.d_model * 2), tf.keras.layers.ReLU(), tf.
keras.layers.GaussianNoise(0.3), tf.keras.layers.Dense(conf.d_model)])
return model
class BasicBlock(tf.keras.layers.Layer):
EXPANSION = 1
def __init__(self, channels, filters, strides=1):
super().__init__()
self.conv_1 = tf.keras.layers.Conv2D(filters=filters, kernel_size=3,
strides=strides, padding='same', use_bias=False)
self.bn_1 = tf.keras.layers.BatchNormalization()
self.conv_2 = tf.keras.layers.Conv2D(filters=filters, kernel_size=3,
strides=1, padding='same', use_bias=False)
self.bn_2 = tf.keras.layers.BatchNormalization()
self.shortcut = tf.keras.Sequential()
if strides != 1 or channels != filters * self.EXPANSION:
self.shortcut.add(tf.keras.layers.Conv2D(filters=self.EXPANSION *
filters, kernel_size=1, strides=strides, use_bias=False))
self.shortcut.add(tf.keras.layers.BatchNormalization())
def call(self, inputs, training=True, mask=None):
x = tf.nn.relu(self.bn_1(self.conv_1(inputs, training=training),
training=training))
x = self.bn_2(self.conv_2(x, training=training), training=training)
x += self.shortcut(inputs, training=training)
return tf.nn.relu(x)
class ResNet(tf.keras.Model):
def __init__(self, block, num_blocks, pool_len=4, low_dim=128, width=1,
k=10, linear=True):
super().__init__()
self.channels = 64
self.pool_len = pool_len
self.k = k
self.linear = linear
self.conv_1 = tf.keras.layers.Conv2D(filters=64, kernel_size=3,
strides=1, padding='same', use_bias=False)
self.bn_1 = tf.keras.layers.BatchNormalization()
self.base = int(64 * width)
self.residual = tf.keras.Sequential([self._make_layer(block, self.
base, num_blocks[0], stride=1), self._make_layer(block, self.
base * 2, num_blocks[1], stride=2), self._make_layer(block,
self.base * 4, num_blocks[2], stride=2), self._make_layer(block,
self.base * 8, num_blocks[3], stride=2)])
if self.linear:
self.fc = tf.keras.layers.Dense(low_dim)
self.pool = tf.keras.layers.AveragePooling2D(pool_len, pool_len,
data_format='channels_last')
def _make_layer(self, block, planes, num_blocks, stride):
strides = [stride] + [1] * (num_blocks - 1)
layers = []
for stride in strides:
layers.append(block(self.channels, planes, stride))
self.channels = planes * block.EXPANSION
return tf.keras.Sequential(layers)
def call(self, inputs, training=True, mask=None):
x = tf.nn.relu(self.bn_1(self.conv_1(inputs, training=training),
training=training))
x = self.residual(x, training=training)
x = self.pool(x)
batch_size = tf.shape(x)[0]
x = tf.reshape(x, [batch_size, -1])
if self.linear:
x = self.fc(x, training=training)
return x
def test_resnet():
model = ResNet(BasicBlock, [3, 4, 6, 3], 4, low_dim=128, width=1)
a = tf.ones([7, 32, 32, 3])
b = model(a)
print(b)
if __name__ == '__main__':
test_resnet()
<|reserved_special_token_1|>
from __future__ import absolute_import, print_function, division, unicode_literals
import tensorflow as tf
def get_encoder(conf):
if conf.encoder == 'linear':
model = tf.keras.Sequential([tf.keras.layers.Dense(conf.d_model * 2),
tf.keras.layers.ReLU(),
tf.keras.layers.Dense(conf.d_model)])
return model
if conf.encoder == 'rand_linear':
model = get_stochastic_linear(conf)
return model
if conf.encoder[:5] == 'cifar':
model = ResNet(BasicBlock, [3, 4, 6, 3], 4, low_dim=128, width=1, k=conf.k, linear=conf.linear)
return model
def get_stochastic_linear(conf):
model = tf.keras.Sequential([tf.keras.layers.GaussianNoise(.3),
tf.keras.layers.Dense(conf.d_model * 2),
tf.keras.layers.ReLU(),
tf.keras.layers.GaussianNoise(.3),
tf.keras.layers.Dense(conf.d_model)])
return model
# noinspection PyAbstractClass
class BasicBlock(tf.keras.layers.Layer):
EXPANSION = 1
def __init__(self, channels, filters, strides=1):
super().__init__()
self.conv_1 = tf.keras.layers.Conv2D(filters=filters, kernel_size=3, strides=strides, padding='same',
use_bias=False)
self.bn_1 = tf.keras.layers.BatchNormalization()
self.conv_2 = tf.keras.layers.Conv2D(filters=filters, kernel_size=3, strides=1, padding='same',
use_bias=False)
self.bn_2 = tf.keras.layers.BatchNormalization()
self.shortcut = tf.keras.Sequential()
if strides != 1 or channels != (filters * self.EXPANSION):
self.shortcut.add(tf.keras.layers.Conv2D(filters=self.EXPANSION * filters, kernel_size=1, strides=strides,
use_bias=False))
self.shortcut.add(tf.keras.layers.BatchNormalization())
def call(self, inputs, training=True, mask=None):
x = tf.nn.relu(self.bn_1(self.conv_1(inputs, training=training), training=training))
x = self.bn_2(self.conv_2(x, training=training), training=training)
x += self.shortcut(inputs, training=training)
return tf.nn.relu(x)
# noinspection PyAbstractClass
class ResNet(tf.keras.Model):
def __init__(self, block, num_blocks, pool_len=4, low_dim=128, width=1, k=10, linear=True):
super().__init__()
self.channels = 64
self.pool_len = pool_len
self.k = k
self.linear = linear
self.conv_1 = tf.keras.layers.Conv2D(filters=64, kernel_size=3, strides=1, padding='same', use_bias=False)
self.bn_1 = tf.keras.layers.BatchNormalization()
self.base = int(64 * width)
self.residual = tf.keras.Sequential([
self._make_layer(block, self.base, num_blocks[0], stride=1),
self._make_layer(block, self.base * 2, num_blocks[1], stride=2),
self._make_layer(block, self.base * 4, num_blocks[2], stride=2),
self._make_layer(block, self.base * 8, num_blocks[3], stride=2)
])
if self.linear:
self.fc = tf.keras.layers.Dense(low_dim)
self.pool = tf.keras.layers.AveragePooling2D(pool_len, pool_len, data_format='channels_last')
def _make_layer(self, block, planes, num_blocks, stride):
strides = [stride] + [1] * (num_blocks - 1)
layers = []
for stride in strides:
layers.append(block(self.channels, planes, stride))
self.channels = planes * block.EXPANSION
return tf.keras.Sequential(layers)
def call(self, inputs, training=True, mask=None):
x = tf.nn.relu(self.bn_1(self.conv_1(inputs, training=training), training=training))
x = self.residual(x, training=training)
x = self.pool(x)
batch_size = tf.shape(x)[0]
x = tf.reshape(x, [batch_size, -1])
if self.linear:
x = self.fc(x, training=training)
return x
def test_resnet():
model = ResNet(BasicBlock, [3, 4, 6, 3], 4, low_dim=128, width=1)
a = tf.ones([7, 32, 32, 3])
b = model(a)
print(b)
if __name__ == '__main__':
test_resnet()
|
flexible
|
{
"blob_id": "548eebb9628374df320021c714454e05d2c606c0",
"index": 5336,
"step-1": "<mask token>\n\n\ndef get_encoder(conf):\n if conf.encoder == 'linear':\n model = tf.keras.Sequential([tf.keras.layers.Dense(conf.d_model * 2\n ), tf.keras.layers.ReLU(), tf.keras.layers.Dense(conf.d_model)])\n return model\n if conf.encoder == 'rand_linear':\n model = get_stochastic_linear(conf)\n return model\n if conf.encoder[:5] == 'cifar':\n model = ResNet(BasicBlock, [3, 4, 6, 3], 4, low_dim=128, width=1, k\n =conf.k, linear=conf.linear)\n return model\n\n\n<mask token>\n\n\nclass BasicBlock(tf.keras.layers.Layer):\n EXPANSION = 1\n\n def __init__(self, channels, filters, strides=1):\n super().__init__()\n self.conv_1 = tf.keras.layers.Conv2D(filters=filters, kernel_size=3,\n strides=strides, padding='same', use_bias=False)\n self.bn_1 = tf.keras.layers.BatchNormalization()\n self.conv_2 = tf.keras.layers.Conv2D(filters=filters, kernel_size=3,\n strides=1, padding='same', use_bias=False)\n self.bn_2 = tf.keras.layers.BatchNormalization()\n self.shortcut = tf.keras.Sequential()\n if strides != 1 or channels != filters * self.EXPANSION:\n self.shortcut.add(tf.keras.layers.Conv2D(filters=self.EXPANSION *\n filters, kernel_size=1, strides=strides, use_bias=False))\n self.shortcut.add(tf.keras.layers.BatchNormalization())\n\n def call(self, inputs, training=True, mask=None):\n x = tf.nn.relu(self.bn_1(self.conv_1(inputs, training=training),\n training=training))\n x = self.bn_2(self.conv_2(x, training=training), training=training)\n x += self.shortcut(inputs, training=training)\n return tf.nn.relu(x)\n\n\nclass ResNet(tf.keras.Model):\n\n def __init__(self, block, num_blocks, pool_len=4, low_dim=128, width=1,\n k=10, linear=True):\n super().__init__()\n self.channels = 64\n self.pool_len = pool_len\n self.k = k\n self.linear = linear\n self.conv_1 = tf.keras.layers.Conv2D(filters=64, kernel_size=3,\n strides=1, padding='same', use_bias=False)\n self.bn_1 = tf.keras.layers.BatchNormalization()\n self.base = int(64 * width)\n self.residual = tf.keras.Sequential([self._make_layer(block, self.\n base, num_blocks[0], stride=1), self._make_layer(block, self.\n base * 2, num_blocks[1], stride=2), self._make_layer(block, \n self.base * 4, num_blocks[2], stride=2), self._make_layer(block,\n self.base * 8, num_blocks[3], stride=2)])\n if self.linear:\n self.fc = tf.keras.layers.Dense(low_dim)\n self.pool = tf.keras.layers.AveragePooling2D(pool_len, pool_len,\n data_format='channels_last')\n\n def _make_layer(self, block, planes, num_blocks, stride):\n strides = [stride] + [1] * (num_blocks - 1)\n layers = []\n for stride in strides:\n layers.append(block(self.channels, planes, stride))\n self.channels = planes * block.EXPANSION\n return tf.keras.Sequential(layers)\n\n def call(self, inputs, training=True, mask=None):\n x = tf.nn.relu(self.bn_1(self.conv_1(inputs, training=training),\n training=training))\n x = self.residual(x, training=training)\n x = self.pool(x)\n batch_size = tf.shape(x)[0]\n x = tf.reshape(x, [batch_size, -1])\n if self.linear:\n x = self.fc(x, training=training)\n return x\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef get_encoder(conf):\n if conf.encoder == 'linear':\n model = tf.keras.Sequential([tf.keras.layers.Dense(conf.d_model * 2\n ), tf.keras.layers.ReLU(), tf.keras.layers.Dense(conf.d_model)])\n return model\n if conf.encoder == 'rand_linear':\n model = get_stochastic_linear(conf)\n return model\n if conf.encoder[:5] == 'cifar':\n model = ResNet(BasicBlock, [3, 4, 6, 3], 4, low_dim=128, width=1, k\n =conf.k, linear=conf.linear)\n return model\n\n\n<mask token>\n\n\nclass BasicBlock(tf.keras.layers.Layer):\n EXPANSION = 1\n\n def __init__(self, channels, filters, strides=1):\n super().__init__()\n self.conv_1 = tf.keras.layers.Conv2D(filters=filters, kernel_size=3,\n strides=strides, padding='same', use_bias=False)\n self.bn_1 = tf.keras.layers.BatchNormalization()\n self.conv_2 = tf.keras.layers.Conv2D(filters=filters, kernel_size=3,\n strides=1, padding='same', use_bias=False)\n self.bn_2 = tf.keras.layers.BatchNormalization()\n self.shortcut = tf.keras.Sequential()\n if strides != 1 or channels != filters * self.EXPANSION:\n self.shortcut.add(tf.keras.layers.Conv2D(filters=self.EXPANSION *\n filters, kernel_size=1, strides=strides, use_bias=False))\n self.shortcut.add(tf.keras.layers.BatchNormalization())\n\n def call(self, inputs, training=True, mask=None):\n x = tf.nn.relu(self.bn_1(self.conv_1(inputs, training=training),\n training=training))\n x = self.bn_2(self.conv_2(x, training=training), training=training)\n x += self.shortcut(inputs, training=training)\n return tf.nn.relu(x)\n\n\nclass ResNet(tf.keras.Model):\n\n def __init__(self, block, num_blocks, pool_len=4, low_dim=128, width=1,\n k=10, linear=True):\n super().__init__()\n self.channels = 64\n self.pool_len = pool_len\n self.k = k\n self.linear = linear\n self.conv_1 = tf.keras.layers.Conv2D(filters=64, kernel_size=3,\n strides=1, padding='same', use_bias=False)\n self.bn_1 = tf.keras.layers.BatchNormalization()\n self.base = int(64 * width)\n self.residual = tf.keras.Sequential([self._make_layer(block, self.\n base, num_blocks[0], stride=1), self._make_layer(block, self.\n base * 2, num_blocks[1], stride=2), self._make_layer(block, \n self.base * 4, num_blocks[2], stride=2), self._make_layer(block,\n self.base * 8, num_blocks[3], stride=2)])\n if self.linear:\n self.fc = tf.keras.layers.Dense(low_dim)\n self.pool = tf.keras.layers.AveragePooling2D(pool_len, pool_len,\n data_format='channels_last')\n\n def _make_layer(self, block, planes, num_blocks, stride):\n strides = [stride] + [1] * (num_blocks - 1)\n layers = []\n for stride in strides:\n layers.append(block(self.channels, planes, stride))\n self.channels = planes * block.EXPANSION\n return tf.keras.Sequential(layers)\n\n def call(self, inputs, training=True, mask=None):\n x = tf.nn.relu(self.bn_1(self.conv_1(inputs, training=training),\n training=training))\n x = self.residual(x, training=training)\n x = self.pool(x)\n batch_size = tf.shape(x)[0]\n x = tf.reshape(x, [batch_size, -1])\n if self.linear:\n x = self.fc(x, training=training)\n return x\n\n\ndef test_resnet():\n model = ResNet(BasicBlock, [3, 4, 6, 3], 4, low_dim=128, width=1)\n a = tf.ones([7, 32, 32, 3])\n b = model(a)\n print(b)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef get_encoder(conf):\n if conf.encoder == 'linear':\n model = tf.keras.Sequential([tf.keras.layers.Dense(conf.d_model * 2\n ), tf.keras.layers.ReLU(), tf.keras.layers.Dense(conf.d_model)])\n return model\n if conf.encoder == 'rand_linear':\n model = get_stochastic_linear(conf)\n return model\n if conf.encoder[:5] == 'cifar':\n model = ResNet(BasicBlock, [3, 4, 6, 3], 4, low_dim=128, width=1, k\n =conf.k, linear=conf.linear)\n return model\n\n\ndef get_stochastic_linear(conf):\n model = tf.keras.Sequential([tf.keras.layers.GaussianNoise(0.3), tf.\n keras.layers.Dense(conf.d_model * 2), tf.keras.layers.ReLU(), tf.\n keras.layers.GaussianNoise(0.3), tf.keras.layers.Dense(conf.d_model)])\n return model\n\n\nclass BasicBlock(tf.keras.layers.Layer):\n EXPANSION = 1\n\n def __init__(self, channels, filters, strides=1):\n super().__init__()\n self.conv_1 = tf.keras.layers.Conv2D(filters=filters, kernel_size=3,\n strides=strides, padding='same', use_bias=False)\n self.bn_1 = tf.keras.layers.BatchNormalization()\n self.conv_2 = tf.keras.layers.Conv2D(filters=filters, kernel_size=3,\n strides=1, padding='same', use_bias=False)\n self.bn_2 = tf.keras.layers.BatchNormalization()\n self.shortcut = tf.keras.Sequential()\n if strides != 1 or channels != filters * self.EXPANSION:\n self.shortcut.add(tf.keras.layers.Conv2D(filters=self.EXPANSION *\n filters, kernel_size=1, strides=strides, use_bias=False))\n self.shortcut.add(tf.keras.layers.BatchNormalization())\n\n def call(self, inputs, training=True, mask=None):\n x = tf.nn.relu(self.bn_1(self.conv_1(inputs, training=training),\n training=training))\n x = self.bn_2(self.conv_2(x, training=training), training=training)\n x += self.shortcut(inputs, training=training)\n return tf.nn.relu(x)\n\n\nclass ResNet(tf.keras.Model):\n\n def __init__(self, block, num_blocks, pool_len=4, low_dim=128, width=1,\n k=10, linear=True):\n super().__init__()\n self.channels = 64\n self.pool_len = pool_len\n self.k = k\n self.linear = linear\n self.conv_1 = tf.keras.layers.Conv2D(filters=64, kernel_size=3,\n strides=1, padding='same', use_bias=False)\n self.bn_1 = tf.keras.layers.BatchNormalization()\n self.base = int(64 * width)\n self.residual = tf.keras.Sequential([self._make_layer(block, self.\n base, num_blocks[0], stride=1), self._make_layer(block, self.\n base * 2, num_blocks[1], stride=2), self._make_layer(block, \n self.base * 4, num_blocks[2], stride=2), self._make_layer(block,\n self.base * 8, num_blocks[3], stride=2)])\n if self.linear:\n self.fc = tf.keras.layers.Dense(low_dim)\n self.pool = tf.keras.layers.AveragePooling2D(pool_len, pool_len,\n data_format='channels_last')\n\n def _make_layer(self, block, planes, num_blocks, stride):\n strides = [stride] + [1] * (num_blocks - 1)\n layers = []\n for stride in strides:\n layers.append(block(self.channels, planes, stride))\n self.channels = planes * block.EXPANSION\n return tf.keras.Sequential(layers)\n\n def call(self, inputs, training=True, mask=None):\n x = tf.nn.relu(self.bn_1(self.conv_1(inputs, training=training),\n training=training))\n x = self.residual(x, training=training)\n x = self.pool(x)\n batch_size = tf.shape(x)[0]\n x = tf.reshape(x, [batch_size, -1])\n if self.linear:\n x = self.fc(x, training=training)\n return x\n\n\ndef test_resnet():\n model = ResNet(BasicBlock, [3, 4, 6, 3], 4, low_dim=128, width=1)\n a = tf.ones([7, 32, 32, 3])\n b = model(a)\n print(b)\n\n\nif __name__ == '__main__':\n test_resnet()\n",
"step-4": "from __future__ import absolute_import, print_function, division, unicode_literals\nimport tensorflow as tf\n\n\ndef get_encoder(conf):\n if conf.encoder == 'linear':\n model = tf.keras.Sequential([tf.keras.layers.Dense(conf.d_model * 2\n ), tf.keras.layers.ReLU(), tf.keras.layers.Dense(conf.d_model)])\n return model\n if conf.encoder == 'rand_linear':\n model = get_stochastic_linear(conf)\n return model\n if conf.encoder[:5] == 'cifar':\n model = ResNet(BasicBlock, [3, 4, 6, 3], 4, low_dim=128, width=1, k\n =conf.k, linear=conf.linear)\n return model\n\n\ndef get_stochastic_linear(conf):\n model = tf.keras.Sequential([tf.keras.layers.GaussianNoise(0.3), tf.\n keras.layers.Dense(conf.d_model * 2), tf.keras.layers.ReLU(), tf.\n keras.layers.GaussianNoise(0.3), tf.keras.layers.Dense(conf.d_model)])\n return model\n\n\nclass BasicBlock(tf.keras.layers.Layer):\n EXPANSION = 1\n\n def __init__(self, channels, filters, strides=1):\n super().__init__()\n self.conv_1 = tf.keras.layers.Conv2D(filters=filters, kernel_size=3,\n strides=strides, padding='same', use_bias=False)\n self.bn_1 = tf.keras.layers.BatchNormalization()\n self.conv_2 = tf.keras.layers.Conv2D(filters=filters, kernel_size=3,\n strides=1, padding='same', use_bias=False)\n self.bn_2 = tf.keras.layers.BatchNormalization()\n self.shortcut = tf.keras.Sequential()\n if strides != 1 or channels != filters * self.EXPANSION:\n self.shortcut.add(tf.keras.layers.Conv2D(filters=self.EXPANSION *\n filters, kernel_size=1, strides=strides, use_bias=False))\n self.shortcut.add(tf.keras.layers.BatchNormalization())\n\n def call(self, inputs, training=True, mask=None):\n x = tf.nn.relu(self.bn_1(self.conv_1(inputs, training=training),\n training=training))\n x = self.bn_2(self.conv_2(x, training=training), training=training)\n x += self.shortcut(inputs, training=training)\n return tf.nn.relu(x)\n\n\nclass ResNet(tf.keras.Model):\n\n def __init__(self, block, num_blocks, pool_len=4, low_dim=128, width=1,\n k=10, linear=True):\n super().__init__()\n self.channels = 64\n self.pool_len = pool_len\n self.k = k\n self.linear = linear\n self.conv_1 = tf.keras.layers.Conv2D(filters=64, kernel_size=3,\n strides=1, padding='same', use_bias=False)\n self.bn_1 = tf.keras.layers.BatchNormalization()\n self.base = int(64 * width)\n self.residual = tf.keras.Sequential([self._make_layer(block, self.\n base, num_blocks[0], stride=1), self._make_layer(block, self.\n base * 2, num_blocks[1], stride=2), self._make_layer(block, \n self.base * 4, num_blocks[2], stride=2), self._make_layer(block,\n self.base * 8, num_blocks[3], stride=2)])\n if self.linear:\n self.fc = tf.keras.layers.Dense(low_dim)\n self.pool = tf.keras.layers.AveragePooling2D(pool_len, pool_len,\n data_format='channels_last')\n\n def _make_layer(self, block, planes, num_blocks, stride):\n strides = [stride] + [1] * (num_blocks - 1)\n layers = []\n for stride in strides:\n layers.append(block(self.channels, planes, stride))\n self.channels = planes * block.EXPANSION\n return tf.keras.Sequential(layers)\n\n def call(self, inputs, training=True, mask=None):\n x = tf.nn.relu(self.bn_1(self.conv_1(inputs, training=training),\n training=training))\n x = self.residual(x, training=training)\n x = self.pool(x)\n batch_size = tf.shape(x)[0]\n x = tf.reshape(x, [batch_size, -1])\n if self.linear:\n x = self.fc(x, training=training)\n return x\n\n\ndef test_resnet():\n model = ResNet(BasicBlock, [3, 4, 6, 3], 4, low_dim=128, width=1)\n a = tf.ones([7, 32, 32, 3])\n b = model(a)\n print(b)\n\n\nif __name__ == '__main__':\n test_resnet()\n",
"step-5": "from __future__ import absolute_import, print_function, division, unicode_literals\nimport tensorflow as tf\n\n\ndef get_encoder(conf):\n if conf.encoder == 'linear':\n model = tf.keras.Sequential([tf.keras.layers.Dense(conf.d_model * 2),\n tf.keras.layers.ReLU(),\n tf.keras.layers.Dense(conf.d_model)])\n return model\n\n if conf.encoder == 'rand_linear':\n model = get_stochastic_linear(conf)\n return model\n if conf.encoder[:5] == 'cifar':\n model = ResNet(BasicBlock, [3, 4, 6, 3], 4, low_dim=128, width=1, k=conf.k, linear=conf.linear)\n return model\n\n\ndef get_stochastic_linear(conf):\n model = tf.keras.Sequential([tf.keras.layers.GaussianNoise(.3),\n tf.keras.layers.Dense(conf.d_model * 2),\n tf.keras.layers.ReLU(),\n tf.keras.layers.GaussianNoise(.3),\n tf.keras.layers.Dense(conf.d_model)])\n return model\n\n\n# noinspection PyAbstractClass\nclass BasicBlock(tf.keras.layers.Layer):\n EXPANSION = 1\n\n def __init__(self, channels, filters, strides=1):\n super().__init__()\n self.conv_1 = tf.keras.layers.Conv2D(filters=filters, kernel_size=3, strides=strides, padding='same',\n use_bias=False)\n self.bn_1 = tf.keras.layers.BatchNormalization()\n self.conv_2 = tf.keras.layers.Conv2D(filters=filters, kernel_size=3, strides=1, padding='same',\n use_bias=False)\n self.bn_2 = tf.keras.layers.BatchNormalization()\n self.shortcut = tf.keras.Sequential()\n if strides != 1 or channels != (filters * self.EXPANSION):\n self.shortcut.add(tf.keras.layers.Conv2D(filters=self.EXPANSION * filters, kernel_size=1, strides=strides,\n use_bias=False))\n self.shortcut.add(tf.keras.layers.BatchNormalization())\n\n def call(self, inputs, training=True, mask=None):\n x = tf.nn.relu(self.bn_1(self.conv_1(inputs, training=training), training=training))\n x = self.bn_2(self.conv_2(x, training=training), training=training)\n x += self.shortcut(inputs, training=training)\n return tf.nn.relu(x)\n\n\n# noinspection PyAbstractClass\nclass ResNet(tf.keras.Model):\n def __init__(self, block, num_blocks, pool_len=4, low_dim=128, width=1, k=10, linear=True):\n super().__init__()\n self.channels = 64\n self.pool_len = pool_len\n self.k = k\n self.linear = linear\n self.conv_1 = tf.keras.layers.Conv2D(filters=64, kernel_size=3, strides=1, padding='same', use_bias=False)\n self.bn_1 = tf.keras.layers.BatchNormalization()\n\n self.base = int(64 * width)\n self.residual = tf.keras.Sequential([\n self._make_layer(block, self.base, num_blocks[0], stride=1),\n self._make_layer(block, self.base * 2, num_blocks[1], stride=2),\n self._make_layer(block, self.base * 4, num_blocks[2], stride=2),\n self._make_layer(block, self.base * 8, num_blocks[3], stride=2)\n ])\n if self.linear:\n self.fc = tf.keras.layers.Dense(low_dim)\n self.pool = tf.keras.layers.AveragePooling2D(pool_len, pool_len, data_format='channels_last')\n\n def _make_layer(self, block, planes, num_blocks, stride):\n strides = [stride] + [1] * (num_blocks - 1)\n layers = []\n for stride in strides:\n layers.append(block(self.channels, planes, stride))\n self.channels = planes * block.EXPANSION\n return tf.keras.Sequential(layers)\n\n def call(self, inputs, training=True, mask=None):\n x = tf.nn.relu(self.bn_1(self.conv_1(inputs, training=training), training=training))\n x = self.residual(x, training=training)\n x = self.pool(x)\n\n batch_size = tf.shape(x)[0]\n x = tf.reshape(x, [batch_size, -1])\n if self.linear:\n x = self.fc(x, training=training)\n return x\n\n\ndef test_resnet():\n model = ResNet(BasicBlock, [3, 4, 6, 3], 4, low_dim=128, width=1)\n a = tf.ones([7, 32, 32, 3])\n b = model(a)\n print(b)\n\n\nif __name__ == '__main__':\n test_resnet()\n",
"step-ids": [
9,
10,
12,
13,
14
]
}
|
[
9,
10,
12,
13,
14
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
reload(sys)
sys.setdefaultencoding('utf-8')
<|reserved_special_token_0|>
write_schedule(cut(get_son(schedule[0], List)))
<|reserved_special_token_1|>
<|reserved_special_token_0|>
reload(sys)
sys.setdefaultencoding('utf-8')
<|reserved_special_token_0|>
List = []
cookie = cookielib.CookieJar()
opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(cookie))
postdata = urllib.urlencode({'zjh': user(0), 'mm': user(1)})
loginUrl = 'http://zhjw.dlut.edu.cn/loginAction.do'
result = opener.open(loginUrl, postdata)
gradeUrl = 'http://zhjw.dlut.edu.cn/xkAction.do?actionType=6'
result = opener.open(gradeUrl)
html = etree.HTML(result.read().decode('gbk'))
schedule = html.xpath('//td[@class="pageAlign"]/table[@border="1"]')
write_schedule(cut(get_son(schedule[0], List)))
<|reserved_special_token_1|>
import sys
reload(sys)
sys.setdefaultencoding('utf-8')
import urllib
import urllib2
import cookielib
from excel import *
from user import *
List = []
cookie = cookielib.CookieJar()
opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(cookie))
postdata = urllib.urlencode({'zjh': user(0), 'mm': user(1)})
loginUrl = 'http://zhjw.dlut.edu.cn/loginAction.do'
result = opener.open(loginUrl, postdata)
gradeUrl = 'http://zhjw.dlut.edu.cn/xkAction.do?actionType=6'
result = opener.open(gradeUrl)
html = etree.HTML(result.read().decode('gbk'))
schedule = html.xpath('//td[@class="pageAlign"]/table[@border="1"]')
write_schedule(cut(get_son(schedule[0], List)))
<|reserved_special_token_1|>
# # -*- coding:utf-8 -*-
import sys
reload(sys)
sys.setdefaultencoding( "utf-8" )
import urllib
import urllib2
import cookielib
from excel import *
from user import *
List=[]
cookie = cookielib.CookieJar()
opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(cookie))
postdata = urllib.urlencode({'zjh':user(0),'mm':user(1)})
loginUrl = 'http://zhjw.dlut.edu.cn/loginAction.do'
result = opener.open(loginUrl,postdata)
gradeUrl = 'http://zhjw.dlut.edu.cn/xkAction.do?actionType=6'
result = opener.open(gradeUrl)
html = etree.HTML(result.read().decode('gbk'))
schedule = html.xpath('//td[@class="pageAlign"]/table[@border="1"]')
write_schedule(cut(get_son(schedule[0],List)))
|
flexible
|
{
"blob_id": "3c7280bbd23bd3472915da0760efbfd03bfe995d",
"index": 9314,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nreload(sys)\nsys.setdefaultencoding('utf-8')\n<mask token>\nwrite_schedule(cut(get_son(schedule[0], List)))\n",
"step-3": "<mask token>\nreload(sys)\nsys.setdefaultencoding('utf-8')\n<mask token>\nList = []\ncookie = cookielib.CookieJar()\nopener = urllib2.build_opener(urllib2.HTTPCookieProcessor(cookie))\npostdata = urllib.urlencode({'zjh': user(0), 'mm': user(1)})\nloginUrl = 'http://zhjw.dlut.edu.cn/loginAction.do'\nresult = opener.open(loginUrl, postdata)\ngradeUrl = 'http://zhjw.dlut.edu.cn/xkAction.do?actionType=6'\nresult = opener.open(gradeUrl)\nhtml = etree.HTML(result.read().decode('gbk'))\nschedule = html.xpath('//td[@class=\"pageAlign\"]/table[@border=\"1\"]')\nwrite_schedule(cut(get_son(schedule[0], List)))\n",
"step-4": "import sys\nreload(sys)\nsys.setdefaultencoding('utf-8')\nimport urllib\nimport urllib2\nimport cookielib\nfrom excel import *\nfrom user import *\nList = []\ncookie = cookielib.CookieJar()\nopener = urllib2.build_opener(urllib2.HTTPCookieProcessor(cookie))\npostdata = urllib.urlencode({'zjh': user(0), 'mm': user(1)})\nloginUrl = 'http://zhjw.dlut.edu.cn/loginAction.do'\nresult = opener.open(loginUrl, postdata)\ngradeUrl = 'http://zhjw.dlut.edu.cn/xkAction.do?actionType=6'\nresult = opener.open(gradeUrl)\nhtml = etree.HTML(result.read().decode('gbk'))\nschedule = html.xpath('//td[@class=\"pageAlign\"]/table[@border=\"1\"]')\nwrite_schedule(cut(get_son(schedule[0], List)))\n",
"step-5": "# # -*- coding:utf-8 -*-\nimport sys\nreload(sys)\nsys.setdefaultencoding( \"utf-8\" )\nimport urllib\nimport urllib2\nimport cookielib\nfrom excel import *\nfrom user import *\n\nList=[]\ncookie = cookielib.CookieJar()\nopener = urllib2.build_opener(urllib2.HTTPCookieProcessor(cookie))\npostdata = urllib.urlencode({'zjh':user(0),'mm':user(1)})\nloginUrl = 'http://zhjw.dlut.edu.cn/loginAction.do'\nresult = opener.open(loginUrl,postdata)\ngradeUrl = 'http://zhjw.dlut.edu.cn/xkAction.do?actionType=6'\nresult = opener.open(gradeUrl)\nhtml = etree.HTML(result.read().decode('gbk'))\nschedule = html.xpath('//td[@class=\"pageAlign\"]/table[@border=\"1\"]')\nwrite_schedule(cut(get_son(schedule[0],List)))",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
# For better usage on ddp
import torch
from pytorch_lightning.metrics import Metric
import cv2
import numpy as np
import skimage
import torch.tensor as Tensor
class SegMetric(Metric):
def __init__(self, iou_thr, prob_thr, img_size, dist_sync_on_step=False):
super().__init__(dist_sync_on_step=dist_sync_on_step)
# call `self.add_state`for every internal state that is needed for the metrics computations
# dist_reduce_fx indicates the function that should be used to reduce
# state from multiple processes
self.iou_thr = iou_thr
self.prob_thr = prob_thr
self.img_size = img_size
self.use_ddp = dist_sync_on_step
self.add_state("csv_files", default=[], dist_reduce_fx="cat")
def update(self, preds: torch.Tensor, target: torch.Tensor):
logit_seg, _ = preds
_, mask, mask_cls, _, img_path, _ = target
assert logit_seg.shape == mask.shape
pred_seg = torch.sigmoid(logit_seg).detach().cpu().numpy()
gt_seg = mask.detach().cpu().numpy()
gt_cls = mask_cls.detach().cpu().numpy()[:, 0].tolist()
pred_seg = pred_seg.astype("float32")
for idx, file_path in enumerate(img_path):
pred = cv2.resize(pred_seg[idx][0], (self.img_size, self.img_size))
pred = np.expand_dims(pred, 0)
gt = cv2.resize(
gt_seg[idx][0],
(self.img_size, self.img_size),
interpolation=cv2.INTER_NEAREST,
)
gt = np.expand_dims(gt, 0)
gt_c = gt_cls[idx]
is_p = int(gt_c == 1.0)
is_n = 1 - is_p
gt_nums_, pred_nums_, tp_nums_, fp_nums_ = evaluation(
pred, gt, iou_th=self.iou_thr, prob_ths=[self.prob_thr]
)
# csv = file_path.split("/")[5]
csv = file_path.split("png_1024/")[1].split("/")[0]
if not hasattr(self, f"{csv}_gt"):
self.csv_files += [csv]
self.add_state(f"{csv}_gt", default=Tensor(0), dist_reduce_fx="sum")
self.add_state(f"{csv}_pred", default=Tensor(0), dist_reduce_fx="sum")
self.add_state(f"{csv}_tp", default=Tensor(0), dist_reduce_fx="sum")
self.add_state(f"{csv}_fp", default=Tensor(0), dist_reduce_fx="sum")
self.add_state(f"{csv}_pos", default=Tensor(0), dist_reduce_fx="sum")
self.add_state(
f"{csv}_neg", default=torch.tensor(0), dist_reduce_fx="sum"
)
# TODO: Need to be change if num_class > 1
# FIXME: 몬 생긴 포맷..
setattr(self, f"{csv}_gt", getattr(self, f"{csv}_gt") + gt_nums_[0])
setattr(
self, f"{csv}_pred", getattr(self, f"{csv}_pred") + pred_nums_[0, 0]
)
setattr(self, f"{csv}_tp", getattr(self, f"{csv}_tp") + tp_nums_[0, 0])
setattr(self, f"{csv}_fp", getattr(self, f"{csv}_fp") + fp_nums_[0, 0])
setattr(self, f"{csv}_pos", getattr(self, f"{csv}_pos") + is_p)
setattr(self, f"{csv}_neg", getattr(self, f"{csv}_neg") + is_n)
def update_each(self, preds: torch.Tensor, target: torch.Tensor):
self.update(preds, target)
def compute(self):
gt = 0
tp = 0
fp = 0
pos = 0
neg = 0
for csv in self.csv_files:
gt += getattr(self, f"{csv}_gt").item()
tp += getattr(self, f"{csv}_tp").item()
fp += getattr(self, f"{csv}_fp").item()
pos += getattr(self, f"{csv}_pos").item()
neg += getattr(self, f"{csv}_neg").item()
pre = tp / (tp + fp * (pos / (neg + 1e-5)) + 1e-5)
rec = tp / (gt + 1e-5)
f1 = 2 * (pre * rec) / (pre + rec + 1e-5)
myf1 = (pre + rec) / 2.0
lesion_metric_dict = {
"pre": pre,
"rec": rec,
"f1": f1,
"myf1": myf1,
}
# FIXME: DDP Error: https://github.com/PyTorchLightning/pytorch-lightning/discussions/2529
# Tensors must be CUDA and dense
# if self.use_ddp:
# lesion_metric_dict = torch.FloatTensor([myf1], device=self.device)
return lesion_metric_dict
def compute_each(self):
metric_dict_each_csv = {}
for csv in self.csv_files:
gt = getattr(self, f"{csv}_gt").item()
tp = getattr(self, f"{csv}_tp").item()
fp = getattr(self, f"{csv}_fp").item()
pos = getattr(self, f"{csv}_pos").item()
neg = getattr(self, f"{csv}_neg").item()
pre = tp / (tp + fp * (pos / (neg + 1e-5)) + 1e-5)
rec = tp / (gt + 1e-5)
f1 = 2 * (pre * rec) / (pre + rec + 1e-5)
fppi = fp / (pos + neg + 1e-5)
# myf1 = (pre + rec) / 2.0
lesion_metric_dict = {
"gt": gt,
"pos": pos,
"neg": neg,
"pre": pre,
"rec": rec,
"f1": f1,
"fppi": fppi
# "myf1": myf1,
}
metric_dict_each_csv[csv] = lesion_metric_dict
return metric_dict_each_csv
# Helper functions
def calc_iou(bbox_a, bbox_b):
"""
:param a: bbox list [min_y, min_x, max_y, max_x]
:param b: bbox list [min_y, min_x, max_y, max_x]
:return:
"""
size_a = (bbox_a[2] - bbox_a[0]) * (bbox_a[3] - bbox_a[1])
size_b = (bbox_b[2] - bbox_b[0]) * (bbox_b[3] - bbox_b[1])
min_ab_y = max(bbox_a[0], bbox_b[0])
min_ab_x = max(bbox_a[1], bbox_b[1])
max_ab_y = min(bbox_a[2], bbox_b[2])
max_ab_x = min(bbox_a[3], bbox_b[3])
inter_ab = max(0, max_ab_y - min_ab_y) * max(0, max_ab_x - min_ab_x)
return inter_ab / (size_a + size_b - inter_ab)
def evaluation(pred, gt, iou_th=0.15, prob_ths=[0.5]):
"""
:param pred: Prediction Seg Map, shape = (1, num_classes, height, width)
:param gt: Ground-truth Seg Map, shape = (1, num_classes, height, width)
:param iou_th: Threshold for prediction and gt matching
:return:
gt_nums: Ground-truth region numbers
pred_nums: Prediction region numbers
tp_nums: True Positive region numbers
fp_nums: False Positive region numbers
# 필수 가정: batch_size=1 (regionprops 함수가 2차원 행렬에만 적용 가능함)
# Region을 고려에서 제외하는 경우(2048x2048 이미지 기반, pixel spacing=0.2mm)
# i) Region bbox 크기 < 400 pixels
# ii) (현재 사용x) Region bbox 장축<4mm(20pixels), 단축<2mm(10 pixels)
# issue: # 3. 영상사이즈는 디텍터 크기에 따라 달라질 수 있습니다. 완벽히 하기 위해선 pixel spacing 정보를 받아야 합니다.
# # 따라서 영상 크기에 대해 기준이 변경되는 것은 현단계에서는 적용할 필요가 없어 보입니다.
"""
if len(pred.shape) > 3:
pred = pred[0]
gt = gt[0]
num_classes = pred.shape[0]
image_size = gt.shape[2]
gt_regions = [
skimage.measure.regionprops(skimage.measure.label(gt[c, :, :]))
for c in range(num_classes)
]
for c in range(num_classes):
gt_regions[c] = [
r for r in gt_regions[c] if r.area > (20 * (image_size / 2048)) ** 2
]
pred_regions = [
[
skimage.measure.regionprops(skimage.measure.label(pred[c, :, :] > th))
for c in range(num_classes)
]
for th in prob_ths
] # shape - len(prob_th), num_classes
# 초기화
gt_nums = np.array([len(gt_regions[c]) for c in range(num_classes)])
pred_nums = np.array(
[
[len(pred_regions[thi][c]) for c in range(num_classes)]
for thi in range(len(prob_ths))
]
)
tp_nums = np.zeros((len(prob_ths), num_classes))
fp_nums = pred_nums.copy() # .copy() 없으면 포인터가 같아짐
# Gt-Pred Bbox Iou Matrix
for c in range(num_classes):
for thi in range(len(prob_ths)):
if (gt_nums[c] == 0) or (pred_nums[thi][c] == 0): # np array 이상함;
continue
iou_matrix = np.zeros((gt_nums[c], pred_nums[thi][c]))
for gi, gr in enumerate(gt_regions[c]):
for pi, pr in enumerate(pred_regions[thi][c]):
iou_matrix[gi, pi] = calc_iou(gr.bbox, pr.bbox)
tp_nums[thi][c] = np.sum(np.any((iou_matrix >= iou_th), axis=1))
fp_nums[thi][c] -= np.sum(np.any((iou_matrix > iou_th), axis=0))
return gt_nums, pred_nums, tp_nums, fp_nums
|
normal
|
{
"blob_id": "8d3f8872a3d5c4351551dc2d46839763d28ebd70",
"index": 3586,
"step-1": "<mask token>\n\n\nclass SegMetric(Metric):\n\n def __init__(self, iou_thr, prob_thr, img_size, dist_sync_on_step=False):\n super().__init__(dist_sync_on_step=dist_sync_on_step)\n self.iou_thr = iou_thr\n self.prob_thr = prob_thr\n self.img_size = img_size\n self.use_ddp = dist_sync_on_step\n self.add_state('csv_files', default=[], dist_reduce_fx='cat')\n <mask token>\n\n def update_each(self, preds: torch.Tensor, target: torch.Tensor):\n self.update(preds, target)\n\n def compute(self):\n gt = 0\n tp = 0\n fp = 0\n pos = 0\n neg = 0\n for csv in self.csv_files:\n gt += getattr(self, f'{csv}_gt').item()\n tp += getattr(self, f'{csv}_tp').item()\n fp += getattr(self, f'{csv}_fp').item()\n pos += getattr(self, f'{csv}_pos').item()\n neg += getattr(self, f'{csv}_neg').item()\n pre = tp / (tp + fp * (pos / (neg + 1e-05)) + 1e-05)\n rec = tp / (gt + 1e-05)\n f1 = 2 * (pre * rec) / (pre + rec + 1e-05)\n myf1 = (pre + rec) / 2.0\n lesion_metric_dict = {'pre': pre, 'rec': rec, 'f1': f1, 'myf1': myf1}\n return lesion_metric_dict\n\n def compute_each(self):\n metric_dict_each_csv = {}\n for csv in self.csv_files:\n gt = getattr(self, f'{csv}_gt').item()\n tp = getattr(self, f'{csv}_tp').item()\n fp = getattr(self, f'{csv}_fp').item()\n pos = getattr(self, f'{csv}_pos').item()\n neg = getattr(self, f'{csv}_neg').item()\n pre = tp / (tp + fp * (pos / (neg + 1e-05)) + 1e-05)\n rec = tp / (gt + 1e-05)\n f1 = 2 * (pre * rec) / (pre + rec + 1e-05)\n fppi = fp / (pos + neg + 1e-05)\n lesion_metric_dict = {'gt': gt, 'pos': pos, 'neg': neg, 'pre':\n pre, 'rec': rec, 'f1': f1, 'fppi': fppi}\n metric_dict_each_csv[csv] = lesion_metric_dict\n return metric_dict_each_csv\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass SegMetric(Metric):\n\n def __init__(self, iou_thr, prob_thr, img_size, dist_sync_on_step=False):\n super().__init__(dist_sync_on_step=dist_sync_on_step)\n self.iou_thr = iou_thr\n self.prob_thr = prob_thr\n self.img_size = img_size\n self.use_ddp = dist_sync_on_step\n self.add_state('csv_files', default=[], dist_reduce_fx='cat')\n\n def update(self, preds: torch.Tensor, target: torch.Tensor):\n logit_seg, _ = preds\n _, mask, mask_cls, _, img_path, _ = target\n assert logit_seg.shape == mask.shape\n pred_seg = torch.sigmoid(logit_seg).detach().cpu().numpy()\n gt_seg = mask.detach().cpu().numpy()\n gt_cls = mask_cls.detach().cpu().numpy()[:, 0].tolist()\n pred_seg = pred_seg.astype('float32')\n for idx, file_path in enumerate(img_path):\n pred = cv2.resize(pred_seg[idx][0], (self.img_size, self.img_size))\n pred = np.expand_dims(pred, 0)\n gt = cv2.resize(gt_seg[idx][0], (self.img_size, self.img_size),\n interpolation=cv2.INTER_NEAREST)\n gt = np.expand_dims(gt, 0)\n gt_c = gt_cls[idx]\n is_p = int(gt_c == 1.0)\n is_n = 1 - is_p\n gt_nums_, pred_nums_, tp_nums_, fp_nums_ = evaluation(pred, gt,\n iou_th=self.iou_thr, prob_ths=[self.prob_thr])\n csv = file_path.split('png_1024/')[1].split('/')[0]\n if not hasattr(self, f'{csv}_gt'):\n self.csv_files += [csv]\n self.add_state(f'{csv}_gt', default=Tensor(0),\n dist_reduce_fx='sum')\n self.add_state(f'{csv}_pred', default=Tensor(0),\n dist_reduce_fx='sum')\n self.add_state(f'{csv}_tp', default=Tensor(0),\n dist_reduce_fx='sum')\n self.add_state(f'{csv}_fp', default=Tensor(0),\n dist_reduce_fx='sum')\n self.add_state(f'{csv}_pos', default=Tensor(0),\n dist_reduce_fx='sum')\n self.add_state(f'{csv}_neg', default=torch.tensor(0),\n dist_reduce_fx='sum')\n setattr(self, f'{csv}_gt', getattr(self, f'{csv}_gt') + gt_nums_[0]\n )\n setattr(self, f'{csv}_pred', getattr(self, f'{csv}_pred') +\n pred_nums_[0, 0])\n setattr(self, f'{csv}_tp', getattr(self, f'{csv}_tp') +\n tp_nums_[0, 0])\n setattr(self, f'{csv}_fp', getattr(self, f'{csv}_fp') +\n fp_nums_[0, 0])\n setattr(self, f'{csv}_pos', getattr(self, f'{csv}_pos') + is_p)\n setattr(self, f'{csv}_neg', getattr(self, f'{csv}_neg') + is_n)\n\n def update_each(self, preds: torch.Tensor, target: torch.Tensor):\n self.update(preds, target)\n\n def compute(self):\n gt = 0\n tp = 0\n fp = 0\n pos = 0\n neg = 0\n for csv in self.csv_files:\n gt += getattr(self, f'{csv}_gt').item()\n tp += getattr(self, f'{csv}_tp').item()\n fp += getattr(self, f'{csv}_fp').item()\n pos += getattr(self, f'{csv}_pos').item()\n neg += getattr(self, f'{csv}_neg').item()\n pre = tp / (tp + fp * (pos / (neg + 1e-05)) + 1e-05)\n rec = tp / (gt + 1e-05)\n f1 = 2 * (pre * rec) / (pre + rec + 1e-05)\n myf1 = (pre + rec) / 2.0\n lesion_metric_dict = {'pre': pre, 'rec': rec, 'f1': f1, 'myf1': myf1}\n return lesion_metric_dict\n\n def compute_each(self):\n metric_dict_each_csv = {}\n for csv in self.csv_files:\n gt = getattr(self, f'{csv}_gt').item()\n tp = getattr(self, f'{csv}_tp').item()\n fp = getattr(self, f'{csv}_fp').item()\n pos = getattr(self, f'{csv}_pos').item()\n neg = getattr(self, f'{csv}_neg').item()\n pre = tp / (tp + fp * (pos / (neg + 1e-05)) + 1e-05)\n rec = tp / (gt + 1e-05)\n f1 = 2 * (pre * rec) / (pre + rec + 1e-05)\n fppi = fp / (pos + neg + 1e-05)\n lesion_metric_dict = {'gt': gt, 'pos': pos, 'neg': neg, 'pre':\n pre, 'rec': rec, 'f1': f1, 'fppi': fppi}\n metric_dict_each_csv[csv] = lesion_metric_dict\n return metric_dict_each_csv\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass SegMetric(Metric):\n\n def __init__(self, iou_thr, prob_thr, img_size, dist_sync_on_step=False):\n super().__init__(dist_sync_on_step=dist_sync_on_step)\n self.iou_thr = iou_thr\n self.prob_thr = prob_thr\n self.img_size = img_size\n self.use_ddp = dist_sync_on_step\n self.add_state('csv_files', default=[], dist_reduce_fx='cat')\n\n def update(self, preds: torch.Tensor, target: torch.Tensor):\n logit_seg, _ = preds\n _, mask, mask_cls, _, img_path, _ = target\n assert logit_seg.shape == mask.shape\n pred_seg = torch.sigmoid(logit_seg).detach().cpu().numpy()\n gt_seg = mask.detach().cpu().numpy()\n gt_cls = mask_cls.detach().cpu().numpy()[:, 0].tolist()\n pred_seg = pred_seg.astype('float32')\n for idx, file_path in enumerate(img_path):\n pred = cv2.resize(pred_seg[idx][0], (self.img_size, self.img_size))\n pred = np.expand_dims(pred, 0)\n gt = cv2.resize(gt_seg[idx][0], (self.img_size, self.img_size),\n interpolation=cv2.INTER_NEAREST)\n gt = np.expand_dims(gt, 0)\n gt_c = gt_cls[idx]\n is_p = int(gt_c == 1.0)\n is_n = 1 - is_p\n gt_nums_, pred_nums_, tp_nums_, fp_nums_ = evaluation(pred, gt,\n iou_th=self.iou_thr, prob_ths=[self.prob_thr])\n csv = file_path.split('png_1024/')[1].split('/')[0]\n if not hasattr(self, f'{csv}_gt'):\n self.csv_files += [csv]\n self.add_state(f'{csv}_gt', default=Tensor(0),\n dist_reduce_fx='sum')\n self.add_state(f'{csv}_pred', default=Tensor(0),\n dist_reduce_fx='sum')\n self.add_state(f'{csv}_tp', default=Tensor(0),\n dist_reduce_fx='sum')\n self.add_state(f'{csv}_fp', default=Tensor(0),\n dist_reduce_fx='sum')\n self.add_state(f'{csv}_pos', default=Tensor(0),\n dist_reduce_fx='sum')\n self.add_state(f'{csv}_neg', default=torch.tensor(0),\n dist_reduce_fx='sum')\n setattr(self, f'{csv}_gt', getattr(self, f'{csv}_gt') + gt_nums_[0]\n )\n setattr(self, f'{csv}_pred', getattr(self, f'{csv}_pred') +\n pred_nums_[0, 0])\n setattr(self, f'{csv}_tp', getattr(self, f'{csv}_tp') +\n tp_nums_[0, 0])\n setattr(self, f'{csv}_fp', getattr(self, f'{csv}_fp') +\n fp_nums_[0, 0])\n setattr(self, f'{csv}_pos', getattr(self, f'{csv}_pos') + is_p)\n setattr(self, f'{csv}_neg', getattr(self, f'{csv}_neg') + is_n)\n\n def update_each(self, preds: torch.Tensor, target: torch.Tensor):\n self.update(preds, target)\n\n def compute(self):\n gt = 0\n tp = 0\n fp = 0\n pos = 0\n neg = 0\n for csv in self.csv_files:\n gt += getattr(self, f'{csv}_gt').item()\n tp += getattr(self, f'{csv}_tp').item()\n fp += getattr(self, f'{csv}_fp').item()\n pos += getattr(self, f'{csv}_pos').item()\n neg += getattr(self, f'{csv}_neg').item()\n pre = tp / (tp + fp * (pos / (neg + 1e-05)) + 1e-05)\n rec = tp / (gt + 1e-05)\n f1 = 2 * (pre * rec) / (pre + rec + 1e-05)\n myf1 = (pre + rec) / 2.0\n lesion_metric_dict = {'pre': pre, 'rec': rec, 'f1': f1, 'myf1': myf1}\n return lesion_metric_dict\n\n def compute_each(self):\n metric_dict_each_csv = {}\n for csv in self.csv_files:\n gt = getattr(self, f'{csv}_gt').item()\n tp = getattr(self, f'{csv}_tp').item()\n fp = getattr(self, f'{csv}_fp').item()\n pos = getattr(self, f'{csv}_pos').item()\n neg = getattr(self, f'{csv}_neg').item()\n pre = tp / (tp + fp * (pos / (neg + 1e-05)) + 1e-05)\n rec = tp / (gt + 1e-05)\n f1 = 2 * (pre * rec) / (pre + rec + 1e-05)\n fppi = fp / (pos + neg + 1e-05)\n lesion_metric_dict = {'gt': gt, 'pos': pos, 'neg': neg, 'pre':\n pre, 'rec': rec, 'f1': f1, 'fppi': fppi}\n metric_dict_each_csv[csv] = lesion_metric_dict\n return metric_dict_each_csv\n\n\ndef calc_iou(bbox_a, bbox_b):\n \"\"\"\n :param a: bbox list [min_y, min_x, max_y, max_x]\n :param b: bbox list [min_y, min_x, max_y, max_x]\n :return:\n \"\"\"\n size_a = (bbox_a[2] - bbox_a[0]) * (bbox_a[3] - bbox_a[1])\n size_b = (bbox_b[2] - bbox_b[0]) * (bbox_b[3] - bbox_b[1])\n min_ab_y = max(bbox_a[0], bbox_b[0])\n min_ab_x = max(bbox_a[1], bbox_b[1])\n max_ab_y = min(bbox_a[2], bbox_b[2])\n max_ab_x = min(bbox_a[3], bbox_b[3])\n inter_ab = max(0, max_ab_y - min_ab_y) * max(0, max_ab_x - min_ab_x)\n return inter_ab / (size_a + size_b - inter_ab)\n\n\n<mask token>\n",
"step-4": "<mask token>\n\n\nclass SegMetric(Metric):\n\n def __init__(self, iou_thr, prob_thr, img_size, dist_sync_on_step=False):\n super().__init__(dist_sync_on_step=dist_sync_on_step)\n self.iou_thr = iou_thr\n self.prob_thr = prob_thr\n self.img_size = img_size\n self.use_ddp = dist_sync_on_step\n self.add_state('csv_files', default=[], dist_reduce_fx='cat')\n\n def update(self, preds: torch.Tensor, target: torch.Tensor):\n logit_seg, _ = preds\n _, mask, mask_cls, _, img_path, _ = target\n assert logit_seg.shape == mask.shape\n pred_seg = torch.sigmoid(logit_seg).detach().cpu().numpy()\n gt_seg = mask.detach().cpu().numpy()\n gt_cls = mask_cls.detach().cpu().numpy()[:, 0].tolist()\n pred_seg = pred_seg.astype('float32')\n for idx, file_path in enumerate(img_path):\n pred = cv2.resize(pred_seg[idx][0], (self.img_size, self.img_size))\n pred = np.expand_dims(pred, 0)\n gt = cv2.resize(gt_seg[idx][0], (self.img_size, self.img_size),\n interpolation=cv2.INTER_NEAREST)\n gt = np.expand_dims(gt, 0)\n gt_c = gt_cls[idx]\n is_p = int(gt_c == 1.0)\n is_n = 1 - is_p\n gt_nums_, pred_nums_, tp_nums_, fp_nums_ = evaluation(pred, gt,\n iou_th=self.iou_thr, prob_ths=[self.prob_thr])\n csv = file_path.split('png_1024/')[1].split('/')[0]\n if not hasattr(self, f'{csv}_gt'):\n self.csv_files += [csv]\n self.add_state(f'{csv}_gt', default=Tensor(0),\n dist_reduce_fx='sum')\n self.add_state(f'{csv}_pred', default=Tensor(0),\n dist_reduce_fx='sum')\n self.add_state(f'{csv}_tp', default=Tensor(0),\n dist_reduce_fx='sum')\n self.add_state(f'{csv}_fp', default=Tensor(0),\n dist_reduce_fx='sum')\n self.add_state(f'{csv}_pos', default=Tensor(0),\n dist_reduce_fx='sum')\n self.add_state(f'{csv}_neg', default=torch.tensor(0),\n dist_reduce_fx='sum')\n setattr(self, f'{csv}_gt', getattr(self, f'{csv}_gt') + gt_nums_[0]\n )\n setattr(self, f'{csv}_pred', getattr(self, f'{csv}_pred') +\n pred_nums_[0, 0])\n setattr(self, f'{csv}_tp', getattr(self, f'{csv}_tp') +\n tp_nums_[0, 0])\n setattr(self, f'{csv}_fp', getattr(self, f'{csv}_fp') +\n fp_nums_[0, 0])\n setattr(self, f'{csv}_pos', getattr(self, f'{csv}_pos') + is_p)\n setattr(self, f'{csv}_neg', getattr(self, f'{csv}_neg') + is_n)\n\n def update_each(self, preds: torch.Tensor, target: torch.Tensor):\n self.update(preds, target)\n\n def compute(self):\n gt = 0\n tp = 0\n fp = 0\n pos = 0\n neg = 0\n for csv in self.csv_files:\n gt += getattr(self, f'{csv}_gt').item()\n tp += getattr(self, f'{csv}_tp').item()\n fp += getattr(self, f'{csv}_fp').item()\n pos += getattr(self, f'{csv}_pos').item()\n neg += getattr(self, f'{csv}_neg').item()\n pre = tp / (tp + fp * (pos / (neg + 1e-05)) + 1e-05)\n rec = tp / (gt + 1e-05)\n f1 = 2 * (pre * rec) / (pre + rec + 1e-05)\n myf1 = (pre + rec) / 2.0\n lesion_metric_dict = {'pre': pre, 'rec': rec, 'f1': f1, 'myf1': myf1}\n return lesion_metric_dict\n\n def compute_each(self):\n metric_dict_each_csv = {}\n for csv in self.csv_files:\n gt = getattr(self, f'{csv}_gt').item()\n tp = getattr(self, f'{csv}_tp').item()\n fp = getattr(self, f'{csv}_fp').item()\n pos = getattr(self, f'{csv}_pos').item()\n neg = getattr(self, f'{csv}_neg').item()\n pre = tp / (tp + fp * (pos / (neg + 1e-05)) + 1e-05)\n rec = tp / (gt + 1e-05)\n f1 = 2 * (pre * rec) / (pre + rec + 1e-05)\n fppi = fp / (pos + neg + 1e-05)\n lesion_metric_dict = {'gt': gt, 'pos': pos, 'neg': neg, 'pre':\n pre, 'rec': rec, 'f1': f1, 'fppi': fppi}\n metric_dict_each_csv[csv] = lesion_metric_dict\n return metric_dict_each_csv\n\n\ndef calc_iou(bbox_a, bbox_b):\n \"\"\"\n :param a: bbox list [min_y, min_x, max_y, max_x]\n :param b: bbox list [min_y, min_x, max_y, max_x]\n :return:\n \"\"\"\n size_a = (bbox_a[2] - bbox_a[0]) * (bbox_a[3] - bbox_a[1])\n size_b = (bbox_b[2] - bbox_b[0]) * (bbox_b[3] - bbox_b[1])\n min_ab_y = max(bbox_a[0], bbox_b[0])\n min_ab_x = max(bbox_a[1], bbox_b[1])\n max_ab_y = min(bbox_a[2], bbox_b[2])\n max_ab_x = min(bbox_a[3], bbox_b[3])\n inter_ab = max(0, max_ab_y - min_ab_y) * max(0, max_ab_x - min_ab_x)\n return inter_ab / (size_a + size_b - inter_ab)\n\n\ndef evaluation(pred, gt, iou_th=0.15, prob_ths=[0.5]):\n \"\"\"\n :param pred: Prediction Seg Map, shape = (1, num_classes, height, width)\n :param gt: Ground-truth Seg Map, shape = (1, num_classes, height, width)\n :param iou_th: Threshold for prediction and gt matching\n :return:\n gt_nums: Ground-truth region numbers\n pred_nums: Prediction region numbers\n tp_nums: True Positive region numbers\n fp_nums: False Positive region numbers\n # 필수 가정: batch_size=1 (regionprops 함수가 2차원 행렬에만 적용 가능함)\n # Region을 고려에서 제외하는 경우(2048x2048 이미지 기반, pixel spacing=0.2mm)\n # i) Region bbox 크기 < 400 pixels\n # ii) (현재 사용x) Region bbox 장축<4mm(20pixels), 단축<2mm(10 pixels)\n # issue: # 3. 영상사이즈는 디텍터 크기에 따라 달라질 수 있습니다. 완벽히 하기 위해선 pixel spacing 정보를 받아야 합니다.\n # # 따라서 영상 크기에 대해 기준이 변경되는 것은 현단계에서는 적용할 필요가 없어 보입니다.\n \"\"\"\n if len(pred.shape) > 3:\n pred = pred[0]\n gt = gt[0]\n num_classes = pred.shape[0]\n image_size = gt.shape[2]\n gt_regions = [skimage.measure.regionprops(skimage.measure.label(gt[c, :,\n :])) for c in range(num_classes)]\n for c in range(num_classes):\n gt_regions[c] = [r for r in gt_regions[c] if r.area > (20 * (\n image_size / 2048)) ** 2]\n pred_regions = [[skimage.measure.regionprops(skimage.measure.label(pred\n [c, :, :] > th)) for c in range(num_classes)] for th in prob_ths]\n gt_nums = np.array([len(gt_regions[c]) for c in range(num_classes)])\n pred_nums = np.array([[len(pred_regions[thi][c]) for c in range(\n num_classes)] for thi in range(len(prob_ths))])\n tp_nums = np.zeros((len(prob_ths), num_classes))\n fp_nums = pred_nums.copy()\n for c in range(num_classes):\n for thi in range(len(prob_ths)):\n if gt_nums[c] == 0 or pred_nums[thi][c] == 0:\n continue\n iou_matrix = np.zeros((gt_nums[c], pred_nums[thi][c]))\n for gi, gr in enumerate(gt_regions[c]):\n for pi, pr in enumerate(pred_regions[thi][c]):\n iou_matrix[gi, pi] = calc_iou(gr.bbox, pr.bbox)\n tp_nums[thi][c] = np.sum(np.any(iou_matrix >= iou_th, axis=1))\n fp_nums[thi][c] -= np.sum(np.any(iou_matrix > iou_th, axis=0))\n return gt_nums, pred_nums, tp_nums, fp_nums\n",
"step-5": "# For better usage on ddp\n\nimport torch\nfrom pytorch_lightning.metrics import Metric\nimport cv2\nimport numpy as np\nimport skimage\nimport torch.tensor as Tensor\n\n\nclass SegMetric(Metric):\n def __init__(self, iou_thr, prob_thr, img_size, dist_sync_on_step=False):\n super().__init__(dist_sync_on_step=dist_sync_on_step)\n # call `self.add_state`for every internal state that is needed for the metrics computations\n # dist_reduce_fx indicates the function that should be used to reduce\n # state from multiple processes\n self.iou_thr = iou_thr\n self.prob_thr = prob_thr\n self.img_size = img_size\n self.use_ddp = dist_sync_on_step\n self.add_state(\"csv_files\", default=[], dist_reduce_fx=\"cat\")\n\n def update(self, preds: torch.Tensor, target: torch.Tensor):\n logit_seg, _ = preds\n _, mask, mask_cls, _, img_path, _ = target\n\n assert logit_seg.shape == mask.shape\n\n pred_seg = torch.sigmoid(logit_seg).detach().cpu().numpy()\n gt_seg = mask.detach().cpu().numpy()\n gt_cls = mask_cls.detach().cpu().numpy()[:, 0].tolist()\n\n pred_seg = pred_seg.astype(\"float32\")\n for idx, file_path in enumerate(img_path):\n pred = cv2.resize(pred_seg[idx][0], (self.img_size, self.img_size))\n pred = np.expand_dims(pred, 0)\n gt = cv2.resize(\n gt_seg[idx][0],\n (self.img_size, self.img_size),\n interpolation=cv2.INTER_NEAREST,\n )\n gt = np.expand_dims(gt, 0)\n\n gt_c = gt_cls[idx]\n is_p = int(gt_c == 1.0)\n is_n = 1 - is_p\n\n gt_nums_, pred_nums_, tp_nums_, fp_nums_ = evaluation(\n pred, gt, iou_th=self.iou_thr, prob_ths=[self.prob_thr]\n )\n\n # csv = file_path.split(\"/\")[5]\n csv = file_path.split(\"png_1024/\")[1].split(\"/\")[0]\n if not hasattr(self, f\"{csv}_gt\"):\n self.csv_files += [csv]\n self.add_state(f\"{csv}_gt\", default=Tensor(0), dist_reduce_fx=\"sum\")\n self.add_state(f\"{csv}_pred\", default=Tensor(0), dist_reduce_fx=\"sum\")\n self.add_state(f\"{csv}_tp\", default=Tensor(0), dist_reduce_fx=\"sum\")\n self.add_state(f\"{csv}_fp\", default=Tensor(0), dist_reduce_fx=\"sum\")\n self.add_state(f\"{csv}_pos\", default=Tensor(0), dist_reduce_fx=\"sum\")\n self.add_state(\n f\"{csv}_neg\", default=torch.tensor(0), dist_reduce_fx=\"sum\"\n )\n\n # TODO: Need to be change if num_class > 1\n # FIXME: 몬 생긴 포맷..\n setattr(self, f\"{csv}_gt\", getattr(self, f\"{csv}_gt\") + gt_nums_[0])\n setattr(\n self, f\"{csv}_pred\", getattr(self, f\"{csv}_pred\") + pred_nums_[0, 0]\n )\n setattr(self, f\"{csv}_tp\", getattr(self, f\"{csv}_tp\") + tp_nums_[0, 0])\n setattr(self, f\"{csv}_fp\", getattr(self, f\"{csv}_fp\") + fp_nums_[0, 0])\n setattr(self, f\"{csv}_pos\", getattr(self, f\"{csv}_pos\") + is_p)\n setattr(self, f\"{csv}_neg\", getattr(self, f\"{csv}_neg\") + is_n)\n\n def update_each(self, preds: torch.Tensor, target: torch.Tensor):\n self.update(preds, target)\n\n def compute(self):\n gt = 0\n tp = 0\n fp = 0\n pos = 0\n neg = 0\n for csv in self.csv_files:\n gt += getattr(self, f\"{csv}_gt\").item()\n tp += getattr(self, f\"{csv}_tp\").item()\n fp += getattr(self, f\"{csv}_fp\").item()\n pos += getattr(self, f\"{csv}_pos\").item()\n neg += getattr(self, f\"{csv}_neg\").item()\n\n pre = tp / (tp + fp * (pos / (neg + 1e-5)) + 1e-5)\n rec = tp / (gt + 1e-5)\n f1 = 2 * (pre * rec) / (pre + rec + 1e-5)\n myf1 = (pre + rec) / 2.0\n\n lesion_metric_dict = {\n \"pre\": pre,\n \"rec\": rec,\n \"f1\": f1,\n \"myf1\": myf1,\n }\n\n # FIXME: DDP Error: https://github.com/PyTorchLightning/pytorch-lightning/discussions/2529\n # Tensors must be CUDA and dense\n # if self.use_ddp:\n # lesion_metric_dict = torch.FloatTensor([myf1], device=self.device)\n\n return lesion_metric_dict\n\n def compute_each(self):\n metric_dict_each_csv = {}\n for csv in self.csv_files:\n gt = getattr(self, f\"{csv}_gt\").item()\n tp = getattr(self, f\"{csv}_tp\").item()\n fp = getattr(self, f\"{csv}_fp\").item()\n pos = getattr(self, f\"{csv}_pos\").item()\n neg = getattr(self, f\"{csv}_neg\").item()\n\n pre = tp / (tp + fp * (pos / (neg + 1e-5)) + 1e-5)\n rec = tp / (gt + 1e-5)\n f1 = 2 * (pre * rec) / (pre + rec + 1e-5)\n fppi = fp / (pos + neg + 1e-5)\n # myf1 = (pre + rec) / 2.0\n\n lesion_metric_dict = {\n \"gt\": gt,\n \"pos\": pos,\n \"neg\": neg,\n \"pre\": pre,\n \"rec\": rec,\n \"f1\": f1,\n \"fppi\": fppi\n # \"myf1\": myf1,\n }\n\n metric_dict_each_csv[csv] = lesion_metric_dict\n\n return metric_dict_each_csv\n\n\n# Helper functions\ndef calc_iou(bbox_a, bbox_b):\n \"\"\"\n :param a: bbox list [min_y, min_x, max_y, max_x]\n :param b: bbox list [min_y, min_x, max_y, max_x]\n :return:\n \"\"\"\n size_a = (bbox_a[2] - bbox_a[0]) * (bbox_a[3] - bbox_a[1])\n size_b = (bbox_b[2] - bbox_b[0]) * (bbox_b[3] - bbox_b[1])\n\n min_ab_y = max(bbox_a[0], bbox_b[0])\n min_ab_x = max(bbox_a[1], bbox_b[1])\n max_ab_y = min(bbox_a[2], bbox_b[2])\n max_ab_x = min(bbox_a[3], bbox_b[3])\n\n inter_ab = max(0, max_ab_y - min_ab_y) * max(0, max_ab_x - min_ab_x)\n\n return inter_ab / (size_a + size_b - inter_ab)\n\n\ndef evaluation(pred, gt, iou_th=0.15, prob_ths=[0.5]):\n \"\"\"\n :param pred: Prediction Seg Map, shape = (1, num_classes, height, width)\n :param gt: Ground-truth Seg Map, shape = (1, num_classes, height, width)\n :param iou_th: Threshold for prediction and gt matching\n :return:\n gt_nums: Ground-truth region numbers\n pred_nums: Prediction region numbers\n tp_nums: True Positive region numbers\n fp_nums: False Positive region numbers\n # 필수 가정: batch_size=1 (regionprops 함수가 2차원 행렬에만 적용 가능함)\n # Region을 고려에서 제외하는 경우(2048x2048 이미지 기반, pixel spacing=0.2mm)\n # i) Region bbox 크기 < 400 pixels\n # ii) (현재 사용x) Region bbox 장축<4mm(20pixels), 단축<2mm(10 pixels)\n # issue: # 3. 영상사이즈는 디텍터 크기에 따라 달라질 수 있습니다. 완벽히 하기 위해선 pixel spacing 정보를 받아야 합니다.\n # # 따라서 영상 크기에 대해 기준이 변경되는 것은 현단계에서는 적용할 필요가 없어 보입니다.\n \"\"\"\n\n if len(pred.shape) > 3:\n pred = pred[0]\n gt = gt[0]\n\n num_classes = pred.shape[0]\n image_size = gt.shape[2]\n\n gt_regions = [\n skimage.measure.regionprops(skimage.measure.label(gt[c, :, :]))\n for c in range(num_classes)\n ]\n for c in range(num_classes):\n gt_regions[c] = [\n r for r in gt_regions[c] if r.area > (20 * (image_size / 2048)) ** 2\n ]\n\n pred_regions = [\n [\n skimage.measure.regionprops(skimage.measure.label(pred[c, :, :] > th))\n for c in range(num_classes)\n ]\n for th in prob_ths\n ] # shape - len(prob_th), num_classes\n\n # 초기화\n gt_nums = np.array([len(gt_regions[c]) for c in range(num_classes)])\n pred_nums = np.array(\n [\n [len(pred_regions[thi][c]) for c in range(num_classes)]\n for thi in range(len(prob_ths))\n ]\n )\n tp_nums = np.zeros((len(prob_ths), num_classes))\n fp_nums = pred_nums.copy() # .copy() 없으면 포인터가 같아짐\n\n # Gt-Pred Bbox Iou Matrix\n for c in range(num_classes):\n for thi in range(len(prob_ths)):\n if (gt_nums[c] == 0) or (pred_nums[thi][c] == 0): # np array 이상함;\n continue\n\n iou_matrix = np.zeros((gt_nums[c], pred_nums[thi][c]))\n for gi, gr in enumerate(gt_regions[c]):\n for pi, pr in enumerate(pred_regions[thi][c]):\n iou_matrix[gi, pi] = calc_iou(gr.bbox, pr.bbox)\n\n tp_nums[thi][c] = np.sum(np.any((iou_matrix >= iou_th), axis=1))\n fp_nums[thi][c] -= np.sum(np.any((iou_matrix > iou_th), axis=0))\n\n return gt_nums, pred_nums, tp_nums, fp_nums",
"step-ids": [
5,
6,
7,
8,
10
]
}
|
[
5,
6,
7,
8,
10
] |
"""config URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.conf import settings
from django.urls import include, path
from rest_framework import routers
from BugBytes import views
from django.conf.urls.static import static
router = routers.DefaultRouter()
router.register(r'species', views.SpeciesViewSet)
router.register(r'com_names', views.Com_NamesViewSet)
router.register(r'photos', views.PhotosViewSet)
urlpatterns = [
path('admin/', admin.site.urls),
path('api/', include(router.urls)),
path('api-auth/', include('rest_framework.urls', namespace='rest_framework')),
path('bugbytes/<int:tensorflow_id>/view_species',
views.view_species, name='view_species'),
path('', views.landing, name='landing'),
path('model_json/', views.model_json, name='model_json'),
]
if settings.DEBUG: # new
urlpatterns += static(settings.MEDIA_URL,
document_root=settings.MEDIA_ROOT)
|
normal
|
{
"blob_id": "786bc5d44115b46bd246e85e85c8f8c1f20737b9",
"index": 7921,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nrouter.register('species', views.SpeciesViewSet)\nrouter.register('com_names', views.Com_NamesViewSet)\nrouter.register('photos', views.PhotosViewSet)\n<mask token>\nif settings.DEBUG:\n urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT\n )\n",
"step-3": "<mask token>\nrouter = routers.DefaultRouter()\nrouter.register('species', views.SpeciesViewSet)\nrouter.register('com_names', views.Com_NamesViewSet)\nrouter.register('photos', views.PhotosViewSet)\nurlpatterns = [path('admin/', admin.site.urls), path('api/', include(router\n .urls)), path('api-auth/', include('rest_framework.urls', namespace=\n 'rest_framework')), path('bugbytes/<int:tensorflow_id>/view_species',\n views.view_species, name='view_species'), path('', views.landing, name=\n 'landing'), path('model_json/', views.model_json, name='model_json')]\nif settings.DEBUG:\n urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT\n )\n",
"step-4": "<mask token>\nfrom django.contrib import admin\nfrom django.conf import settings\nfrom django.urls import include, path\nfrom rest_framework import routers\nfrom BugBytes import views\nfrom django.conf.urls.static import static\nrouter = routers.DefaultRouter()\nrouter.register('species', views.SpeciesViewSet)\nrouter.register('com_names', views.Com_NamesViewSet)\nrouter.register('photos', views.PhotosViewSet)\nurlpatterns = [path('admin/', admin.site.urls), path('api/', include(router\n .urls)), path('api-auth/', include('rest_framework.urls', namespace=\n 'rest_framework')), path('bugbytes/<int:tensorflow_id>/view_species',\n views.view_species, name='view_species'), path('', views.landing, name=\n 'landing'), path('model_json/', views.model_json, name='model_json')]\nif settings.DEBUG:\n urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT\n )\n",
"step-5": "\"\"\"config URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/2.2/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: path('', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.urls import include, path\n 2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))\n\"\"\"\nfrom django.contrib import admin\nfrom django.conf import settings\nfrom django.urls import include, path\nfrom rest_framework import routers\nfrom BugBytes import views\nfrom django.conf.urls.static import static\n\nrouter = routers.DefaultRouter()\nrouter.register(r'species', views.SpeciesViewSet)\nrouter.register(r'com_names', views.Com_NamesViewSet)\nrouter.register(r'photos', views.PhotosViewSet)\n\nurlpatterns = [\n path('admin/', admin.site.urls),\n path('api/', include(router.urls)),\n path('api-auth/', include('rest_framework.urls', namespace='rest_framework')),\n path('bugbytes/<int:tensorflow_id>/view_species',\n views.view_species, name='view_species'),\n path('', views.landing, name='landing'),\n path('model_json/', views.model_json, name='model_json'),\n]\n\nif settings.DEBUG: # new\n urlpatterns += static(settings.MEDIA_URL,\n document_root=settings.MEDIA_ROOT)\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
import sys
sys.path.append("./")
from torchtext.datasets import Multi30k
from torchtext.data import Field
from torchtext import data
import pickle
import models.transformer as h
import torch
from datasets import load_dataset
from torch.utils.data import DataLoader
from metrics.metrics import bleu
import numpy as np
from torch.autograd import Variable
from utils import plot_training_curve,plot_loss_curves
from torch import nn
import torch
import time
import matplotlib.pyplot as plt
import seaborn
global max_src_in_batch, max_tgt_in_batch
def batch_size_fn(new, count, sofar):
"Keep augmenting batch and calculate total number of tokens + padding."
global max_src_in_batch, max_tgt_in_batch
if count == 1:
max_src_in_batch = 0
max_tgt_in_batch = 0
max_src_in_batch = max(max_src_in_batch, len(vars(new)["src"]))
max_tgt_in_batch = max(max_tgt_in_batch, len(vars(new)["trg"]) + 2)
src_elements = count * max_src_in_batch
tgt_elements = count * max_tgt_in_batch
return max(src_elements, tgt_elements)
class Batch:
"Object for holding a batch of data with mask during training."
def __init__(self, src, trg=None, pad=0):
self.src = src
self.src_mask = (src != pad).unsqueeze(-2)
if trg is not None:
self.trg = trg[:, :-1]
self.trg_y = trg[:, 1:]
self.trg_mask = \
self.make_std_mask(self.trg, pad)
self.ntokens = (self.trg_y != pad).data.sum()
@staticmethod
def make_std_mask(tgt, pad):
"Create a mask to hide padding and future words."
tgt_mask = (tgt != pad).unsqueeze(-2)
tgt_mask = tgt_mask & Variable(
subsequent_mask(tgt.size(-1)).type_as(tgt_mask.data))
return tgt_mask
class MyIterator(data.Iterator):
def create_batches(self):
if self.train:
def pool(d, random_shuffler):
for p in data.batch(d, self.batch_size * 100):
p_batch = data.batch(
sorted(p, key=self.sort_key),
self.batch_size, self.batch_size_fn)
for b in random_shuffler(list(p_batch)):
yield b
self.batches = pool(self.data(), self.random_shuffler)
else:
self.batches = []
for b in data.batch(self.data(), self.batch_size,
self.batch_size_fn):
self.batches.append(sorted(b, key=self.sort_key))
def subsequent_mask(size):
"Mask out subsequent positions."
attn_shape = (1, size, size)
subsequent_mask = np.triu(np.ones(attn_shape), k=1).astype('uint8')
return torch.from_numpy(subsequent_mask) == 0
def greedy_decode(model, src, src_mask, max_len, start_symbol):
memory = model.encode(src, src_mask)
ys = torch.ones(1, 1).fill_(start_symbol).type_as(src.data)
for i in range(max_len-1):
out = model.decode(memory, src_mask,
Variable(ys),
Variable(subsequent_mask(ys.size(1))
.type_as(src.data)))
prob = model.generator(out[:, -1])
# vals, idxs = torch.topk(torch.softmax(prob, dim=1).flatten(), 10, largest=True)
# print((vals*100).tolist())
# print([TRG.vocab.itos[idx] for idx in idxs])
_, next_word = torch.max(prob, dim = 1)
next_word = next_word.data[0]
ys = torch.cat([ys,
torch.ones(1, 1).type_as(src.data).fill_(next_word)], dim=1)
return ys
def visualise_attention(tgt_sent, sent):
def draw(data, x, y, ax):
seaborn.heatmap(data,
xticklabels=x, square=True, yticklabels=y, vmin=0.0, vmax=1.0,
cbar=False, ax=ax)
# bottom, top = ax.get_ylim()
# ax.set_ylim(bottom + 0.5, top - 0.5)
for layer in range(1, 6, 2):
fig, axs = plt.subplots(1,4, figsize=(16, 5))
print("Encoder Layer", layer+1)
for h in range(4):
vals = model.encoder.layers[layer].self_attn.attn[0, h].data.cpu()
draw(vals, sent, sent if h ==0 else [], ax=axs[h])
plt.show()
for layer in range(1, 6, 2):
fig, axs = plt.subplots(1,4, figsize=(16, 5))
print("Decoder Self Layer", layer+1)
for h in range(4):
vals = model.decoder.layers[layer].self_attn.attn[0, h].data[:len(tgt_sent), :len(tgt_sent)].cpu()
draw(vals, tgt_sent, tgt_sent if h ==0 else [], ax=axs[h])
plt.show()
print("Decoder Src Layer", layer+1)
fig, axs = plt.subplots(1,4, figsize=(16, 5))
for h in range(4):
vals = model.decoder.layers[layer].self_attn.attn[0, h].data[:len(tgt_sent), :len(sent)].cpu()
draw(vals, sent, tgt_sent if h ==0 else [], ax=axs[h])
plt.show()
class SimpleLossCompute:
"A simple loss compute and train function."
def __init__(self, generator, criterion, opt=None):
self.generator = generator
self.criterion = criterion
self.opt = opt
def __call__(self, x, y, norm):
x = self.generator(x)
loss = self.criterion(x.contiguous().view(-1, x.size(-1)),
y.contiguous().view(-1)) / norm
if self.opt is not None:
loss.backward()
self.opt.step()
self.opt.optimizer.zero_grad()
return loss.data.item() * norm
def rebatch(pad_idx, batch):
"Fix order in torchtext to match ours"
src, trg = batch.src.transpose(0, 1), batch.trg.transpose(0, 1)
return Batch(src, trg, pad_idx)
def evaluate(data_iter, model, criterion):
model.eval()
with torch.no_grad():
eval_loss = run_epoch((rebatch(pad_idx, b) for b in data_iter), model,
SimpleLossCompute(model.generator, criterion, opt=None))
return eval_loss
def run_epoch(data_iter, model, loss_compute):
"Standard Training and Logging Function"
start = time.time()
total_tokens = 0
total_loss = []
tokens = 0
for i, batch in enumerate(data_iter):
out = model.forward(batch.src, batch.trg,
batch.src_mask, batch.trg_mask)
loss = loss_compute(out, batch.trg_y, batch.ntokens) #/ batch.ntokens
total_loss.append(loss.item())
total_tokens += batch.ntokens
tokens += batch.ntokens
if i % 50 == 1:
elapsed = time.time() - start
print("Epoch Step: %d Loss: %f Tokens per Sec: %f" %
(i, loss, tokens / elapsed))
start = time.time()
tokens = 0
return total_loss
SRC = Field(tokenize = "spacy",
tokenizer_language="de_core_news_sm",
init_token = '<sos>',
eos_token = '<eos>',
lower = True)
TRG = Field(tokenize = "spacy",
tokenizer_language="en_core_web_sm",
init_token = '<sos>',
eos_token = '<eos>',
lower = True)
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
MAX_LEN = 100
train_data, valid_data, test_data = Multi30k.splits(exts = ('.de', '.en'),fields = (SRC, TRG)
,filter_pred=lambda x: len(vars(x)['src']) <= MAX_LEN and len(vars(x)['trg']) <= MAX_LEN)
SRC.build_vocab(train_data.src, min_freq=2)
TRG.build_vocab(train_data.trg, min_freq=2)
INPUT_DIM = len(SRC.vocab)
OUTPUT_DIM = len(TRG.vocab)
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
BATCH_SIZE = 64
train_iter = MyIterator(train_data, batch_size=BATCH_SIZE, device=device,
repeat=False, sort_key=lambda x: (len(x.src), len(x.trg)),
batch_size_fn=batch_size_fn, train=True)
valid_iter = MyIterator(valid_data, batch_size=BATCH_SIZE, device=device,
repeat=False, sort_key=lambda x: (len(x.src), len(x.trg)),
batch_size_fn=batch_size_fn, train=False)
test_iter = MyIterator(test_data, batch_size=BATCH_SIZE, device=device,
repeat=False, sort_key=lambda x: (len(x.src), len(x.trg)),
batch_size_fn=batch_size_fn, train=False)
model_name = "harvard_transformer2_state"
args = (INPUT_DIM, OUTPUT_DIM)
kwargs = {"N" : 6}
model = h.make_model(*args, **kwargs).to(device)
state = torch.load(model_name + ".pt", map_location=device)
model.load_state_dict(state["state_dict"])
losses = state["loss"]
pad_idx = TRG.vocab.stoi["<pad>"]
criterion_test = nn.CrossEntropyLoss(ignore_index=pad_idx)
test_losses = evaluate(test_iter, model, criterion_test)
losses["test"].append(test_losses)
test_loss = torch.tensor(sum(test_losses) / len(test_losses))
print(test_loss)
print('Perplexity:', torch.exp(test_loss))
# sentence = [SRC.preprocess("eine gruppe von menschen steht vor einem iglu .")]
# real_translation = TRG.preprocess("a man in a blue shirt is standing on a ladder and cleaning a window")
# sentence = [SRC.preprocess("eine gruppe von menschen steht vor einem iglu .")]
# real_translation = TRG.preprocess("a group of people stands in front of an igloo.")
sentence = [SRC.preprocess("ein mann mit kariertem hut in einer schwarzen jacke und einer schwarz-weiß gestreiften hose spielt auf einer bühne mit einem sänger und einem weiteren gitarristen im hintergrund auf einer e-gitarre .")]
real_translation = TRG.preprocess("a man in a black jacket and checkered hat wearing black and white striped pants plays an electric guitar on a stage with a singer and another guitar player in the background .")
src = SRC.process(sentence).to(device).T
src_mask = (src != SRC.vocab.stoi["<pad>"]).unsqueeze(-2)
model.eval()
out = greedy_decode(model, src, src_mask, max_len=60, start_symbol=TRG.vocab.stoi["<sos>"])
translation = ["<sos>"]
for i in range(1, out.size(1)):
sym = TRG.vocab.itos[out[0, i]]
translation.append(sym)
if sym == "<eos>":
break
print(' '.join(translation))
print(' '.join(real_translation))
# plot_loss_curves(losses["train"], losses["val"])
visualise_attention(translation, ["<sos>"] + sentence[0] + ["<eos>"])
# candidate = []
# reference = []
# for i, batch in enumerate(test_iter):
# src = batch.src.transpose(0, 1)[:1]
# src_mask = (src != SRC.vocab.stoi["<pad>"]).unsqueeze(-2)
# model.eval()
# out = greedy_decode(model, src, src_mask, max_len=60, start_symbol=TRG.vocab.stoi["<sos>"])
# translation = []
# for i in range(1, out.size(1)):
# sym = TRG.vocab.itos[out[0, i]]
# if sym == "<eos>": break
# translation.append(sym)
# print("Translation: \t", ' '.join(translation))
# target = []
# for i in range(1, batch.trg.size(0)):
# sym = TRG.vocab.itos[batch.trg.data[i, 0]]
# if sym == "<eos>": break
# target.append(sym)
# print("Target: \t", ' '.join(target))
# print()
# candidate.append(translation)
# reference.append([target])
# score = bleu(candidate, reference)
# print(score)
# # state["bleu"] = bleu
# # save_model_state("harvard_transformer2_state.pt", model, {"args" : args, "kwargs" : kwargs}, epoch+1, state["loss"], state["bleu"])
# dataset = load_dataset('wmt14', 'de-en', 'test')['test']['translation']
# trainloader = DataLoader(dataset, batch_size=1, shuffle=True)
# model.eval()
# candidate = []
# reference = []
# for val in trainloader:
# de=val['de']
# en=val['en']
# de_tokens = [SRC.preprocess(sentence) for sentence in de]
# en_tokens = [TRG.preprocess(sentence) for sentence in en]
# src = SRC.process(de_tokens).to(device).T[:1]
# trg = TRG.process(en_tokens).to(device).T[:1]
# src_mask = (src != SRC.vocab.stoi["<pad>"]).unsqueeze(-2)
# out = greedy_decode(model, src, src_mask, max_len=60, start_symbol=TRG.vocab.stoi["<sos>"])
# translation = []
# for i in range(1, out.size(1)):
# sym = TRG.vocab.itos[out[0, i]]
# if sym == "<eos>": break
# translation.append(sym)
# target = []
# for i in range(1, trg.size(1)):
# sym = TRG.vocab.itos[trg[0, i]]
# if sym == "<eos>": break
# target.append(sym)
# candidate.append(translation)
# reference.append([target])
# print(bleu(candidate, reference))
|
normal
|
{
"blob_id": "57bc34c6a23c98fd031ea6634441d4d135c06590",
"index": 8694,
"step-1": "<mask token>\n\n\nclass Batch:\n <mask token>\n <mask token>\n <mask token>\n\n\nclass MyIterator(data.Iterator):\n\n def create_batches(self):\n if self.train:\n\n def pool(d, random_shuffler):\n for p in data.batch(d, self.batch_size * 100):\n p_batch = data.batch(sorted(p, key=self.sort_key), self\n .batch_size, self.batch_size_fn)\n for b in random_shuffler(list(p_batch)):\n yield b\n self.batches = pool(self.data(), self.random_shuffler)\n else:\n self.batches = []\n for b in data.batch(self.data(), self.batch_size, self.\n batch_size_fn):\n self.batches.append(sorted(b, key=self.sort_key))\n\n\n<mask token>\n\n\nclass SimpleLossCompute:\n \"\"\"A simple loss compute and train function.\"\"\"\n\n def __init__(self, generator, criterion, opt=None):\n self.generator = generator\n self.criterion = criterion\n self.opt = opt\n\n def __call__(self, x, y, norm):\n x = self.generator(x)\n loss = self.criterion(x.contiguous().view(-1, x.size(-1)), y.\n contiguous().view(-1)) / norm\n if self.opt is not None:\n loss.backward()\n self.opt.step()\n self.opt.optimizer.zero_grad()\n return loss.data.item() * norm\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef batch_size_fn(new, count, sofar):\n \"\"\"Keep augmenting batch and calculate total number of tokens + padding.\"\"\"\n global max_src_in_batch, max_tgt_in_batch\n if count == 1:\n max_src_in_batch = 0\n max_tgt_in_batch = 0\n max_src_in_batch = max(max_src_in_batch, len(vars(new)['src']))\n max_tgt_in_batch = max(max_tgt_in_batch, len(vars(new)['trg']) + 2)\n src_elements = count * max_src_in_batch\n tgt_elements = count * max_tgt_in_batch\n return max(src_elements, tgt_elements)\n\n\nclass Batch:\n \"\"\"Object for holding a batch of data with mask during training.\"\"\"\n\n def __init__(self, src, trg=None, pad=0):\n self.src = src\n self.src_mask = (src != pad).unsqueeze(-2)\n if trg is not None:\n self.trg = trg[:, :-1]\n self.trg_y = trg[:, 1:]\n self.trg_mask = self.make_std_mask(self.trg, pad)\n self.ntokens = (self.trg_y != pad).data.sum()\n\n @staticmethod\n def make_std_mask(tgt, pad):\n \"\"\"Create a mask to hide padding and future words.\"\"\"\n tgt_mask = (tgt != pad).unsqueeze(-2)\n tgt_mask = tgt_mask & Variable(subsequent_mask(tgt.size(-1)).\n type_as(tgt_mask.data))\n return tgt_mask\n\n\nclass MyIterator(data.Iterator):\n\n def create_batches(self):\n if self.train:\n\n def pool(d, random_shuffler):\n for p in data.batch(d, self.batch_size * 100):\n p_batch = data.batch(sorted(p, key=self.sort_key), self\n .batch_size, self.batch_size_fn)\n for b in random_shuffler(list(p_batch)):\n yield b\n self.batches = pool(self.data(), self.random_shuffler)\n else:\n self.batches = []\n for b in data.batch(self.data(), self.batch_size, self.\n batch_size_fn):\n self.batches.append(sorted(b, key=self.sort_key))\n\n\ndef subsequent_mask(size):\n \"\"\"Mask out subsequent positions.\"\"\"\n attn_shape = 1, size, size\n subsequent_mask = np.triu(np.ones(attn_shape), k=1).astype('uint8')\n return torch.from_numpy(subsequent_mask) == 0\n\n\ndef greedy_decode(model, src, src_mask, max_len, start_symbol):\n memory = model.encode(src, src_mask)\n ys = torch.ones(1, 1).fill_(start_symbol).type_as(src.data)\n for i in range(max_len - 1):\n out = model.decode(memory, src_mask, Variable(ys), Variable(\n subsequent_mask(ys.size(1)).type_as(src.data)))\n prob = model.generator(out[:, -1])\n _, next_word = torch.max(prob, dim=1)\n next_word = next_word.data[0]\n ys = torch.cat([ys, torch.ones(1, 1).type_as(src.data).fill_(\n next_word)], dim=1)\n return ys\n\n\ndef visualise_attention(tgt_sent, sent):\n\n def draw(data, x, y, ax):\n seaborn.heatmap(data, xticklabels=x, square=True, yticklabels=y,\n vmin=0.0, vmax=1.0, cbar=False, ax=ax)\n for layer in range(1, 6, 2):\n fig, axs = plt.subplots(1, 4, figsize=(16, 5))\n print('Encoder Layer', layer + 1)\n for h in range(4):\n vals = model.encoder.layers[layer].self_attn.attn[0, h].data.cpu()\n draw(vals, sent, sent if h == 0 else [], ax=axs[h])\n plt.show()\n for layer in range(1, 6, 2):\n fig, axs = plt.subplots(1, 4, figsize=(16, 5))\n print('Decoder Self Layer', layer + 1)\n for h in range(4):\n vals = model.decoder.layers[layer].self_attn.attn[0, h].data[:\n len(tgt_sent), :len(tgt_sent)].cpu()\n draw(vals, tgt_sent, tgt_sent if h == 0 else [], ax=axs[h])\n plt.show()\n print('Decoder Src Layer', layer + 1)\n fig, axs = plt.subplots(1, 4, figsize=(16, 5))\n for h in range(4):\n vals = model.decoder.layers[layer].self_attn.attn[0, h].data[:\n len(tgt_sent), :len(sent)].cpu()\n draw(vals, sent, tgt_sent if h == 0 else [], ax=axs[h])\n plt.show()\n\n\nclass SimpleLossCompute:\n \"\"\"A simple loss compute and train function.\"\"\"\n\n def __init__(self, generator, criterion, opt=None):\n self.generator = generator\n self.criterion = criterion\n self.opt = opt\n\n def __call__(self, x, y, norm):\n x = self.generator(x)\n loss = self.criterion(x.contiguous().view(-1, x.size(-1)), y.\n contiguous().view(-1)) / norm\n if self.opt is not None:\n loss.backward()\n self.opt.step()\n self.opt.optimizer.zero_grad()\n return loss.data.item() * norm\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef batch_size_fn(new, count, sofar):\n \"\"\"Keep augmenting batch and calculate total number of tokens + padding.\"\"\"\n global max_src_in_batch, max_tgt_in_batch\n if count == 1:\n max_src_in_batch = 0\n max_tgt_in_batch = 0\n max_src_in_batch = max(max_src_in_batch, len(vars(new)['src']))\n max_tgt_in_batch = max(max_tgt_in_batch, len(vars(new)['trg']) + 2)\n src_elements = count * max_src_in_batch\n tgt_elements = count * max_tgt_in_batch\n return max(src_elements, tgt_elements)\n\n\nclass Batch:\n \"\"\"Object for holding a batch of data with mask during training.\"\"\"\n\n def __init__(self, src, trg=None, pad=0):\n self.src = src\n self.src_mask = (src != pad).unsqueeze(-2)\n if trg is not None:\n self.trg = trg[:, :-1]\n self.trg_y = trg[:, 1:]\n self.trg_mask = self.make_std_mask(self.trg, pad)\n self.ntokens = (self.trg_y != pad).data.sum()\n\n @staticmethod\n def make_std_mask(tgt, pad):\n \"\"\"Create a mask to hide padding and future words.\"\"\"\n tgt_mask = (tgt != pad).unsqueeze(-2)\n tgt_mask = tgt_mask & Variable(subsequent_mask(tgt.size(-1)).\n type_as(tgt_mask.data))\n return tgt_mask\n\n\nclass MyIterator(data.Iterator):\n\n def create_batches(self):\n if self.train:\n\n def pool(d, random_shuffler):\n for p in data.batch(d, self.batch_size * 100):\n p_batch = data.batch(sorted(p, key=self.sort_key), self\n .batch_size, self.batch_size_fn)\n for b in random_shuffler(list(p_batch)):\n yield b\n self.batches = pool(self.data(), self.random_shuffler)\n else:\n self.batches = []\n for b in data.batch(self.data(), self.batch_size, self.\n batch_size_fn):\n self.batches.append(sorted(b, key=self.sort_key))\n\n\ndef subsequent_mask(size):\n \"\"\"Mask out subsequent positions.\"\"\"\n attn_shape = 1, size, size\n subsequent_mask = np.triu(np.ones(attn_shape), k=1).astype('uint8')\n return torch.from_numpy(subsequent_mask) == 0\n\n\ndef greedy_decode(model, src, src_mask, max_len, start_symbol):\n memory = model.encode(src, src_mask)\n ys = torch.ones(1, 1).fill_(start_symbol).type_as(src.data)\n for i in range(max_len - 1):\n out = model.decode(memory, src_mask, Variable(ys), Variable(\n subsequent_mask(ys.size(1)).type_as(src.data)))\n prob = model.generator(out[:, -1])\n _, next_word = torch.max(prob, dim=1)\n next_word = next_word.data[0]\n ys = torch.cat([ys, torch.ones(1, 1).type_as(src.data).fill_(\n next_word)], dim=1)\n return ys\n\n\ndef visualise_attention(tgt_sent, sent):\n\n def draw(data, x, y, ax):\n seaborn.heatmap(data, xticklabels=x, square=True, yticklabels=y,\n vmin=0.0, vmax=1.0, cbar=False, ax=ax)\n for layer in range(1, 6, 2):\n fig, axs = plt.subplots(1, 4, figsize=(16, 5))\n print('Encoder Layer', layer + 1)\n for h in range(4):\n vals = model.encoder.layers[layer].self_attn.attn[0, h].data.cpu()\n draw(vals, sent, sent if h == 0 else [], ax=axs[h])\n plt.show()\n for layer in range(1, 6, 2):\n fig, axs = plt.subplots(1, 4, figsize=(16, 5))\n print('Decoder Self Layer', layer + 1)\n for h in range(4):\n vals = model.decoder.layers[layer].self_attn.attn[0, h].data[:\n len(tgt_sent), :len(tgt_sent)].cpu()\n draw(vals, tgt_sent, tgt_sent if h == 0 else [], ax=axs[h])\n plt.show()\n print('Decoder Src Layer', layer + 1)\n fig, axs = plt.subplots(1, 4, figsize=(16, 5))\n for h in range(4):\n vals = model.decoder.layers[layer].self_attn.attn[0, h].data[:\n len(tgt_sent), :len(sent)].cpu()\n draw(vals, sent, tgt_sent if h == 0 else [], ax=axs[h])\n plt.show()\n\n\nclass SimpleLossCompute:\n \"\"\"A simple loss compute and train function.\"\"\"\n\n def __init__(self, generator, criterion, opt=None):\n self.generator = generator\n self.criterion = criterion\n self.opt = opt\n\n def __call__(self, x, y, norm):\n x = self.generator(x)\n loss = self.criterion(x.contiguous().view(-1, x.size(-1)), y.\n contiguous().view(-1)) / norm\n if self.opt is not None:\n loss.backward()\n self.opt.step()\n self.opt.optimizer.zero_grad()\n return loss.data.item() * norm\n\n\ndef rebatch(pad_idx, batch):\n \"\"\"Fix order in torchtext to match ours\"\"\"\n src, trg = batch.src.transpose(0, 1), batch.trg.transpose(0, 1)\n return Batch(src, trg, pad_idx)\n\n\ndef evaluate(data_iter, model, criterion):\n model.eval()\n with torch.no_grad():\n eval_loss = run_epoch((rebatch(pad_idx, b) for b in data_iter),\n model, SimpleLossCompute(model.generator, criterion, opt=None))\n return eval_loss\n\n\ndef run_epoch(data_iter, model, loss_compute):\n \"\"\"Standard Training and Logging Function\"\"\"\n start = time.time()\n total_tokens = 0\n total_loss = []\n tokens = 0\n for i, batch in enumerate(data_iter):\n out = model.forward(batch.src, batch.trg, batch.src_mask, batch.\n trg_mask)\n loss = loss_compute(out, batch.trg_y, batch.ntokens)\n total_loss.append(loss.item())\n total_tokens += batch.ntokens\n tokens += batch.ntokens\n if i % 50 == 1:\n elapsed = time.time() - start\n print('Epoch Step: %d Loss: %f Tokens per Sec: %f' % (i, loss, \n tokens / elapsed))\n start = time.time()\n tokens = 0\n return total_loss\n\n\n<mask token>\n",
"step-4": "<mask token>\nsys.path.append('./')\n<mask token>\nglobal max_src_in_batch, max_tgt_in_batch\n\n\ndef batch_size_fn(new, count, sofar):\n \"\"\"Keep augmenting batch and calculate total number of tokens + padding.\"\"\"\n global max_src_in_batch, max_tgt_in_batch\n if count == 1:\n max_src_in_batch = 0\n max_tgt_in_batch = 0\n max_src_in_batch = max(max_src_in_batch, len(vars(new)['src']))\n max_tgt_in_batch = max(max_tgt_in_batch, len(vars(new)['trg']) + 2)\n src_elements = count * max_src_in_batch\n tgt_elements = count * max_tgt_in_batch\n return max(src_elements, tgt_elements)\n\n\nclass Batch:\n \"\"\"Object for holding a batch of data with mask during training.\"\"\"\n\n def __init__(self, src, trg=None, pad=0):\n self.src = src\n self.src_mask = (src != pad).unsqueeze(-2)\n if trg is not None:\n self.trg = trg[:, :-1]\n self.trg_y = trg[:, 1:]\n self.trg_mask = self.make_std_mask(self.trg, pad)\n self.ntokens = (self.trg_y != pad).data.sum()\n\n @staticmethod\n def make_std_mask(tgt, pad):\n \"\"\"Create a mask to hide padding and future words.\"\"\"\n tgt_mask = (tgt != pad).unsqueeze(-2)\n tgt_mask = tgt_mask & Variable(subsequent_mask(tgt.size(-1)).\n type_as(tgt_mask.data))\n return tgt_mask\n\n\nclass MyIterator(data.Iterator):\n\n def create_batches(self):\n if self.train:\n\n def pool(d, random_shuffler):\n for p in data.batch(d, self.batch_size * 100):\n p_batch = data.batch(sorted(p, key=self.sort_key), self\n .batch_size, self.batch_size_fn)\n for b in random_shuffler(list(p_batch)):\n yield b\n self.batches = pool(self.data(), self.random_shuffler)\n else:\n self.batches = []\n for b in data.batch(self.data(), self.batch_size, self.\n batch_size_fn):\n self.batches.append(sorted(b, key=self.sort_key))\n\n\ndef subsequent_mask(size):\n \"\"\"Mask out subsequent positions.\"\"\"\n attn_shape = 1, size, size\n subsequent_mask = np.triu(np.ones(attn_shape), k=1).astype('uint8')\n return torch.from_numpy(subsequent_mask) == 0\n\n\ndef greedy_decode(model, src, src_mask, max_len, start_symbol):\n memory = model.encode(src, src_mask)\n ys = torch.ones(1, 1).fill_(start_symbol).type_as(src.data)\n for i in range(max_len - 1):\n out = model.decode(memory, src_mask, Variable(ys), Variable(\n subsequent_mask(ys.size(1)).type_as(src.data)))\n prob = model.generator(out[:, -1])\n _, next_word = torch.max(prob, dim=1)\n next_word = next_word.data[0]\n ys = torch.cat([ys, torch.ones(1, 1).type_as(src.data).fill_(\n next_word)], dim=1)\n return ys\n\n\ndef visualise_attention(tgt_sent, sent):\n\n def draw(data, x, y, ax):\n seaborn.heatmap(data, xticklabels=x, square=True, yticklabels=y,\n vmin=0.0, vmax=1.0, cbar=False, ax=ax)\n for layer in range(1, 6, 2):\n fig, axs = plt.subplots(1, 4, figsize=(16, 5))\n print('Encoder Layer', layer + 1)\n for h in range(4):\n vals = model.encoder.layers[layer].self_attn.attn[0, h].data.cpu()\n draw(vals, sent, sent if h == 0 else [], ax=axs[h])\n plt.show()\n for layer in range(1, 6, 2):\n fig, axs = plt.subplots(1, 4, figsize=(16, 5))\n print('Decoder Self Layer', layer + 1)\n for h in range(4):\n vals = model.decoder.layers[layer].self_attn.attn[0, h].data[:\n len(tgt_sent), :len(tgt_sent)].cpu()\n draw(vals, tgt_sent, tgt_sent if h == 0 else [], ax=axs[h])\n plt.show()\n print('Decoder Src Layer', layer + 1)\n fig, axs = plt.subplots(1, 4, figsize=(16, 5))\n for h in range(4):\n vals = model.decoder.layers[layer].self_attn.attn[0, h].data[:\n len(tgt_sent), :len(sent)].cpu()\n draw(vals, sent, tgt_sent if h == 0 else [], ax=axs[h])\n plt.show()\n\n\nclass SimpleLossCompute:\n \"\"\"A simple loss compute and train function.\"\"\"\n\n def __init__(self, generator, criterion, opt=None):\n self.generator = generator\n self.criterion = criterion\n self.opt = opt\n\n def __call__(self, x, y, norm):\n x = self.generator(x)\n loss = self.criterion(x.contiguous().view(-1, x.size(-1)), y.\n contiguous().view(-1)) / norm\n if self.opt is not None:\n loss.backward()\n self.opt.step()\n self.opt.optimizer.zero_grad()\n return loss.data.item() * norm\n\n\ndef rebatch(pad_idx, batch):\n \"\"\"Fix order in torchtext to match ours\"\"\"\n src, trg = batch.src.transpose(0, 1), batch.trg.transpose(0, 1)\n return Batch(src, trg, pad_idx)\n\n\ndef evaluate(data_iter, model, criterion):\n model.eval()\n with torch.no_grad():\n eval_loss = run_epoch((rebatch(pad_idx, b) for b in data_iter),\n model, SimpleLossCompute(model.generator, criterion, opt=None))\n return eval_loss\n\n\ndef run_epoch(data_iter, model, loss_compute):\n \"\"\"Standard Training and Logging Function\"\"\"\n start = time.time()\n total_tokens = 0\n total_loss = []\n tokens = 0\n for i, batch in enumerate(data_iter):\n out = model.forward(batch.src, batch.trg, batch.src_mask, batch.\n trg_mask)\n loss = loss_compute(out, batch.trg_y, batch.ntokens)\n total_loss.append(loss.item())\n total_tokens += batch.ntokens\n tokens += batch.ntokens\n if i % 50 == 1:\n elapsed = time.time() - start\n print('Epoch Step: %d Loss: %f Tokens per Sec: %f' % (i, loss, \n tokens / elapsed))\n start = time.time()\n tokens = 0\n return total_loss\n\n\n<mask token>\nSRC.build_vocab(train_data.src, min_freq=2)\nTRG.build_vocab(train_data.trg, min_freq=2)\n<mask token>\nmodel.load_state_dict(state['state_dict'])\n<mask token>\nlosses['test'].append(test_losses)\n<mask token>\nprint(test_loss)\nprint('Perplexity:', torch.exp(test_loss))\n<mask token>\nmodel.eval()\n<mask token>\nfor i in range(1, out.size(1)):\n sym = TRG.vocab.itos[out[0, i]]\n translation.append(sym)\n if sym == '<eos>':\n break\nprint(' '.join(translation))\nprint(' '.join(real_translation))\nvisualise_attention(translation, ['<sos>'] + sentence[0] + ['<eos>'])\n",
"step-5": "import sys\nsys.path.append(\"./\")\nfrom torchtext.datasets import Multi30k\nfrom torchtext.data import Field\nfrom torchtext import data\nimport pickle\nimport models.transformer as h\nimport torch\nfrom datasets import load_dataset\nfrom torch.utils.data import DataLoader\nfrom metrics.metrics import bleu\nimport numpy as np\nfrom torch.autograd import Variable\nfrom utils import plot_training_curve,plot_loss_curves\nfrom torch import nn\nimport torch\nimport time\nimport matplotlib.pyplot as plt\nimport seaborn\n\nglobal max_src_in_batch, max_tgt_in_batch\ndef batch_size_fn(new, count, sofar):\n \"Keep augmenting batch and calculate total number of tokens + padding.\"\n global max_src_in_batch, max_tgt_in_batch\n if count == 1:\n max_src_in_batch = 0\n max_tgt_in_batch = 0\n max_src_in_batch = max(max_src_in_batch, len(vars(new)[\"src\"]))\n max_tgt_in_batch = max(max_tgt_in_batch, len(vars(new)[\"trg\"]) + 2)\n src_elements = count * max_src_in_batch\n tgt_elements = count * max_tgt_in_batch\n return max(src_elements, tgt_elements)\nclass Batch:\n \"Object for holding a batch of data with mask during training.\"\n def __init__(self, src, trg=None, pad=0):\n self.src = src\n self.src_mask = (src != pad).unsqueeze(-2)\n if trg is not None:\n self.trg = trg[:, :-1]\n self.trg_y = trg[:, 1:]\n self.trg_mask = \\\n self.make_std_mask(self.trg, pad)\n self.ntokens = (self.trg_y != pad).data.sum()\n \n @staticmethod\n def make_std_mask(tgt, pad):\n \"Create a mask to hide padding and future words.\"\n tgt_mask = (tgt != pad).unsqueeze(-2)\n tgt_mask = tgt_mask & Variable(\n subsequent_mask(tgt.size(-1)).type_as(tgt_mask.data))\n return tgt_mask\nclass MyIterator(data.Iterator):\n def create_batches(self):\n if self.train:\n def pool(d, random_shuffler):\n for p in data.batch(d, self.batch_size * 100):\n p_batch = data.batch(\n sorted(p, key=self.sort_key),\n self.batch_size, self.batch_size_fn)\n for b in random_shuffler(list(p_batch)):\n yield b\n self.batches = pool(self.data(), self.random_shuffler)\n \n else:\n self.batches = []\n for b in data.batch(self.data(), self.batch_size,\n self.batch_size_fn):\n self.batches.append(sorted(b, key=self.sort_key))\ndef subsequent_mask(size):\n \"Mask out subsequent positions.\"\n attn_shape = (1, size, size)\n subsequent_mask = np.triu(np.ones(attn_shape), k=1).astype('uint8')\n return torch.from_numpy(subsequent_mask) == 0\ndef greedy_decode(model, src, src_mask, max_len, start_symbol):\n memory = model.encode(src, src_mask)\n ys = torch.ones(1, 1).fill_(start_symbol).type_as(src.data)\n for i in range(max_len-1):\n out = model.decode(memory, src_mask, \n Variable(ys), \n Variable(subsequent_mask(ys.size(1))\n .type_as(src.data)))\n prob = model.generator(out[:, -1])\n # vals, idxs = torch.topk(torch.softmax(prob, dim=1).flatten(), 10, largest=True)\n # print((vals*100).tolist())\n # print([TRG.vocab.itos[idx] for idx in idxs])\n _, next_word = torch.max(prob, dim = 1)\n next_word = next_word.data[0]\n ys = torch.cat([ys, \n torch.ones(1, 1).type_as(src.data).fill_(next_word)], dim=1)\n return ys\ndef visualise_attention(tgt_sent, sent):\n def draw(data, x, y, ax):\n seaborn.heatmap(data, \n xticklabels=x, square=True, yticklabels=y, vmin=0.0, vmax=1.0, \n cbar=False, ax=ax)\n # bottom, top = ax.get_ylim()\n # ax.set_ylim(bottom + 0.5, top - 0.5)\n for layer in range(1, 6, 2):\n fig, axs = plt.subplots(1,4, figsize=(16, 5))\n print(\"Encoder Layer\", layer+1)\n for h in range(4):\n vals = model.encoder.layers[layer].self_attn.attn[0, h].data.cpu()\n draw(vals, sent, sent if h ==0 else [], ax=axs[h])\n plt.show()\n \n for layer in range(1, 6, 2):\n fig, axs = plt.subplots(1,4, figsize=(16, 5))\n print(\"Decoder Self Layer\", layer+1)\n for h in range(4):\n vals = model.decoder.layers[layer].self_attn.attn[0, h].data[:len(tgt_sent), :len(tgt_sent)].cpu()\n draw(vals, tgt_sent, tgt_sent if h ==0 else [], ax=axs[h])\n plt.show()\n print(\"Decoder Src Layer\", layer+1)\n fig, axs = plt.subplots(1,4, figsize=(16, 5))\n for h in range(4):\n vals = model.decoder.layers[layer].self_attn.attn[0, h].data[:len(tgt_sent), :len(sent)].cpu()\n draw(vals, sent, tgt_sent if h ==0 else [], ax=axs[h])\n plt.show()\nclass SimpleLossCompute:\n \"A simple loss compute and train function.\"\n def __init__(self, generator, criterion, opt=None):\n self.generator = generator\n self.criterion = criterion\n self.opt = opt\n \n def __call__(self, x, y, norm):\n x = self.generator(x)\n loss = self.criterion(x.contiguous().view(-1, x.size(-1)), \n y.contiguous().view(-1)) / norm\n if self.opt is not None:\n loss.backward()\n self.opt.step()\n self.opt.optimizer.zero_grad()\n return loss.data.item() * norm\ndef rebatch(pad_idx, batch):\n \"Fix order in torchtext to match ours\"\n src, trg = batch.src.transpose(0, 1), batch.trg.transpose(0, 1)\n return Batch(src, trg, pad_idx)\ndef evaluate(data_iter, model, criterion):\n model.eval()\n with torch.no_grad():\n eval_loss = run_epoch((rebatch(pad_idx, b) for b in data_iter), model, \n SimpleLossCompute(model.generator, criterion, opt=None))\n return eval_loss\ndef run_epoch(data_iter, model, loss_compute):\n \"Standard Training and Logging Function\"\n start = time.time()\n total_tokens = 0\n total_loss = []\n tokens = 0\n for i, batch in enumerate(data_iter):\n out = model.forward(batch.src, batch.trg, \n batch.src_mask, batch.trg_mask)\n loss = loss_compute(out, batch.trg_y, batch.ntokens) #/ batch.ntokens\n total_loss.append(loss.item())\n total_tokens += batch.ntokens\n tokens += batch.ntokens\n if i % 50 == 1:\n elapsed = time.time() - start\n print(\"Epoch Step: %d Loss: %f Tokens per Sec: %f\" %\n (i, loss, tokens / elapsed))\n start = time.time()\n tokens = 0\n return total_loss\n\n\nSRC = Field(tokenize = \"spacy\",\n tokenizer_language=\"de_core_news_sm\",\n init_token = '<sos>',\n eos_token = '<eos>',\n lower = True)\n\nTRG = Field(tokenize = \"spacy\",\n tokenizer_language=\"en_core_web_sm\",\n init_token = '<sos>',\n eos_token = '<eos>',\n lower = True)\ndevice = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n\n\nMAX_LEN = 100\ntrain_data, valid_data, test_data = Multi30k.splits(exts = ('.de', '.en'),fields = (SRC, TRG)\n ,filter_pred=lambda x: len(vars(x)['src']) <= MAX_LEN and len(vars(x)['trg']) <= MAX_LEN)\nSRC.build_vocab(train_data.src, min_freq=2)\nTRG.build_vocab(train_data.trg, min_freq=2)\nINPUT_DIM = len(SRC.vocab)\nOUTPUT_DIM = len(TRG.vocab)\n\ndevice = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n\nBATCH_SIZE = 64\ntrain_iter = MyIterator(train_data, batch_size=BATCH_SIZE, device=device,\n repeat=False, sort_key=lambda x: (len(x.src), len(x.trg)),\n batch_size_fn=batch_size_fn, train=True)\nvalid_iter = MyIterator(valid_data, batch_size=BATCH_SIZE, device=device,\n repeat=False, sort_key=lambda x: (len(x.src), len(x.trg)),\n batch_size_fn=batch_size_fn, train=False)\ntest_iter = MyIterator(test_data, batch_size=BATCH_SIZE, device=device,\n repeat=False, sort_key=lambda x: (len(x.src), len(x.trg)),\n batch_size_fn=batch_size_fn, train=False)\n\nmodel_name = \"harvard_transformer2_state\"\nargs = (INPUT_DIM, OUTPUT_DIM)\nkwargs = {\"N\" : 6}\nmodel = h.make_model(*args, **kwargs).to(device)\n\nstate = torch.load(model_name + \".pt\", map_location=device)\nmodel.load_state_dict(state[\"state_dict\"])\nlosses = state[\"loss\"]\n\npad_idx = TRG.vocab.stoi[\"<pad>\"]\ncriterion_test = nn.CrossEntropyLoss(ignore_index=pad_idx)\n\ntest_losses = evaluate(test_iter, model, criterion_test)\nlosses[\"test\"].append(test_losses)\ntest_loss = torch.tensor(sum(test_losses) / len(test_losses))\nprint(test_loss)\nprint('Perplexity:', torch.exp(test_loss))\n\n# sentence = [SRC.preprocess(\"eine gruppe von menschen steht vor einem iglu .\")]\n# real_translation = TRG.preprocess(\"a man in a blue shirt is standing on a ladder and cleaning a window\")\n# sentence = [SRC.preprocess(\"eine gruppe von menschen steht vor einem iglu .\")]\n# real_translation = TRG.preprocess(\"a group of people stands in front of an igloo.\")\nsentence = [SRC.preprocess(\"ein mann mit kariertem hut in einer schwarzen jacke und einer schwarz-weiß gestreiften hose spielt auf einer bühne mit einem sänger und einem weiteren gitarristen im hintergrund auf einer e-gitarre .\")]\nreal_translation = TRG.preprocess(\"a man in a black jacket and checkered hat wearing black and white striped pants plays an electric guitar on a stage with a singer and another guitar player in the background .\")\n\nsrc = SRC.process(sentence).to(device).T\nsrc_mask = (src != SRC.vocab.stoi[\"<pad>\"]).unsqueeze(-2)\nmodel.eval()\nout = greedy_decode(model, src, src_mask, max_len=60, start_symbol=TRG.vocab.stoi[\"<sos>\"])\ntranslation = [\"<sos>\"]\nfor i in range(1, out.size(1)):\n sym = TRG.vocab.itos[out[0, i]]\n translation.append(sym)\n if sym == \"<eos>\":\n break\nprint(' '.join(translation))\nprint(' '.join(real_translation))\n\n# plot_loss_curves(losses[\"train\"], losses[\"val\"])\n\nvisualise_attention(translation, [\"<sos>\"] + sentence[0] + [\"<eos>\"])\n\n# candidate = []\n# reference = []\n# for i, batch in enumerate(test_iter):\n# src = batch.src.transpose(0, 1)[:1]\n# src_mask = (src != SRC.vocab.stoi[\"<pad>\"]).unsqueeze(-2)\n# model.eval()\n# out = greedy_decode(model, src, src_mask, max_len=60, start_symbol=TRG.vocab.stoi[\"<sos>\"])\n\n# translation = []\n# for i in range(1, out.size(1)):\n# sym = TRG.vocab.itos[out[0, i]]\n# if sym == \"<eos>\": break\n# translation.append(sym)\n# print(\"Translation: \\t\", ' '.join(translation))\n# target = []\n# for i in range(1, batch.trg.size(0)):\n# sym = TRG.vocab.itos[batch.trg.data[i, 0]]\n# if sym == \"<eos>\": break\n# target.append(sym)\n# print(\"Target: \\t\", ' '.join(target))\n# print()\n\n# candidate.append(translation)\n# reference.append([target])\n\n# score = bleu(candidate, reference)\n# print(score)\n# # state[\"bleu\"] = bleu\n# # save_model_state(\"harvard_transformer2_state.pt\", model, {\"args\" : args, \"kwargs\" : kwargs}, epoch+1, state[\"loss\"], state[\"bleu\"])\n\n\n# dataset = load_dataset('wmt14', 'de-en', 'test')['test']['translation']\n# trainloader = DataLoader(dataset, batch_size=1, shuffle=True)\n\n# model.eval()\n\n# candidate = []\n# reference = []\n# for val in trainloader:\n# de=val['de']\n# en=val['en']\n\n# de_tokens = [SRC.preprocess(sentence) for sentence in de]\n# en_tokens = [TRG.preprocess(sentence) for sentence in en]\n# src = SRC.process(de_tokens).to(device).T[:1]\n# trg = TRG.process(en_tokens).to(device).T[:1]\n# src_mask = (src != SRC.vocab.stoi[\"<pad>\"]).unsqueeze(-2)\n# out = greedy_decode(model, src, src_mask, max_len=60, start_symbol=TRG.vocab.stoi[\"<sos>\"])\n\n# translation = []\n# for i in range(1, out.size(1)):\n# sym = TRG.vocab.itos[out[0, i]]\n# if sym == \"<eos>\": break\n# translation.append(sym)\n# target = []\n# for i in range(1, trg.size(1)):\n# sym = TRG.vocab.itos[trg[0, i]]\n# if sym == \"<eos>\": break\n# target.append(sym)\n# candidate.append(translation)\n# reference.append([target])\n\n# print(bleu(candidate, reference))\n",
"step-ids": [
7,
14,
17,
18,
21
]
}
|
[
7,
14,
17,
18,
21
] |
#!python
import pdb
import argparse
import os
import re
import sys
import string
from utilpack import path
from subprocess import Popen
from subprocess import PIPE
def popen(cmd):
spl = cmd.split()
return Popen(spl, stdout=PIPE).communicate()[0]
def debug (s):
s
dists = 0
def get_setup_ini (setup_ini_filename):
global dists
if dists:
return
dists = {'test': {}, 'curr': {}, 'prev' : {}}
chunks = string.split (open (setup_ini_filename).read (), '\n\n@ ')
for i in chunks[1:]:
lines = string.split (i, '\n')
name = string.strip (lines[0])
debug ('package: ' + name)
packages = dists['curr']
records = {'sdesc': name}
j = 1
while j < len (lines) and string.strip (lines[j]):
debug ('raw: ' + lines[j])
if lines[j][0] == '#':
j = j + 1
continue
elif lines[j][0] == '[':
debug ('dist: ' + lines[j][1:5])
packages[name] = records.copy ()
packages = dists[lines[j][1:5]]
j = j + 1
continue
try:
key, value = map (string.strip,
string.split (lines[j], ': ', 1))
except:
print lines[j]
raise 'URG'
if value[0] == '"' and value.find ('"', 1) == -1:
while 1:
j = j + 1
value += '\n' + lines[j]
if lines[j].find ('"') != -1:
break
records[key] = value
j = j + 1
packages[name] = records
def error (msg):
print sys.argv[0] + ": " + msg
def find_line(inifile, target_package, section, filename):
ini = file(inifile).readlines()
tpmarkerlen= len(target_package) + 2
ln = 0
found = False
for l in ini:
if l[0:tpmarkerlen] == "@ " + target_package:
found = True
break
ln = ln + 1
if not found:
error("urg")
return None
endln = len(ini)
while ln < endln:
#print ini[ln]
if section in ini[ln]:
return ln, ini[ln]
ln += 1
raise("urg")
def gen_diff(diff_filename, packagename, linenum, oldline,\
filename, basename, section):
# Generate the md5
md5 = popen("md5sum " + filename).split()[0]
# Generate the length
len = str(os.stat(filename).st_size)
# Generate the new line
#install: release-2/testpkg/testpkg-0.0.1-0.tar.bz 3140 fbbe05f50b9273be640c312857f70619
newline = section + ": " + "release-2/" + packagename + "/" + basename + " " + len + " " + md5 + "\n"
# Use the old and new lines to create a diff
#19916c19916
#< install: release-2/testpkg/testpkg-0.0.1-0.tar.bz 3140 fbbe05f50b9273be640c312857f70619
#---
#> install: release-2/testpkg/testpkg-0.0.1-0.tar.bz 3140 69906b3bc3a249056201c398cb928bef
# Add one: we're zerobase internally but diff is 1 based linenumbers
diff = [0,0,0,0]
diff[0] = str(linenum + 1) + "c" + str(linenum + 1) + "\n"
diff[1] = "< " + oldline
diff[2] = "---\n"
diff[3] = "> " + newline
# Return the diff
return diff
def main():
global dists
parser = argparse.ArgumentParser(description = " Fixes md5sum in setup-2.ini to match newly built package. It is an error for given files not to exist in the .ini under that package."\
"Example usage: " + sys.argv[0] + " testpkg test-pkg-0.0.1-0-src.tar.bz test-pkg-0.0.1-0.tar.bz"
)
parser.add_argument("inifile",\
help="The setup.ini to patch.", metavar="INI")
parser.add_argument("package",\
help="The package name to fix the md5sums for.", metavar="PKG")
parser.add_argument("files",\
help="The package files to fix.", nargs = "*", metavar="FILES")
options = parser.parse_args()
target_package = options.package
target_files = []
for f in options.files:
target_files.append(f)
# Yeah I know this looks wrong but that's globals for you
get_setup_ini(options.inifile)
inifile = options.inifile
pkgs = dists["curr"]
namekeys = pkgs.keys()
if target_package not in namekeys:
error(target_package + " is not in " + inifile)
return 1
sections = ["install", "source"]
for f in target_files:
basename = path(f).basename()
found_section = 0
for s in sections:
if basename in pkgs[target_package][s]:
found_section = s
break
if not found_section:
error(basename + " is not in install: or source: of " +\
target_package + "in " + inifile )
return 1
#def gen_diff(diff_filename, packagename, linenum, oldline,\
# filename, basename, section):
for f in target_files:
basename = path(f).basename()
#def find_line(inifile, target_package, section, filename):
(linenum, line) = find_line(inifile, target_package,\
found_section, basename)
diff_filename = basename + ".diff"
diff = gen_diff(diff_filename, target_package, linenum, line,\
f, basename, found_section)
df = file(diff_filename, "w")
df.writelines(diff)
df.close()
#selected = []
#for package in dists["curr"].keys():
# if "Base" in dists["curr"][package]["category"]:
# selected.append(package)
#selected.sort()
#for i in selected:
# print i
if __name__ == "__main__":
main()
|
normal
|
{
"blob_id": "e3b8bec0cc7df217052a3182f9a862f0e3622afd",
"index": 5318,
"step-1": "#!python\nimport pdb\nimport argparse\nimport os\nimport re\nimport sys\nimport string\nfrom utilpack import path\nfrom subprocess import Popen\nfrom subprocess import PIPE\n\n\ndef popen(cmd):\n spl = cmd.split()\n return Popen(spl, stdout=PIPE).communicate()[0]\n \ndef debug (s):\n s\n\ndists = 0\ndef get_setup_ini (setup_ini_filename):\n global dists\n if dists:\n return\n dists = {'test': {}, 'curr': {}, 'prev' : {}}\n chunks = string.split (open (setup_ini_filename).read (), '\\n\\n@ ')\n for i in chunks[1:]:\n lines = string.split (i, '\\n')\n name = string.strip (lines[0])\n debug ('package: ' + name)\n packages = dists['curr']\n records = {'sdesc': name}\n j = 1\n while j < len (lines) and string.strip (lines[j]):\n debug ('raw: ' + lines[j])\n if lines[j][0] == '#':\n j = j + 1\n continue\n elif lines[j][0] == '[':\n debug ('dist: ' + lines[j][1:5])\n packages[name] = records.copy ()\n packages = dists[lines[j][1:5]]\n j = j + 1\n continue\n\n try:\n key, value = map (string.strip,\n string.split (lines[j], ': ', 1))\n except:\n print lines[j]\n raise 'URG'\n if value[0] == '\"' and value.find ('\"', 1) == -1:\n while 1:\n j = j + 1\n value += '\\n' + lines[j]\n if lines[j].find ('\"') != -1:\n break\n records[key] = value\n j = j + 1\n packages[name] = records\n\ndef error (msg):\n print sys.argv[0] + \": \" + msg\n \n\n\ndef find_line(inifile, target_package, section, filename):\n ini = file(inifile).readlines()\n tpmarkerlen= len(target_package) + 2\n ln = 0\n found = False\n for l in ini: \n if l[0:tpmarkerlen] == \"@ \" + target_package:\n found = True\n break\n ln = ln + 1\n \n if not found:\n error(\"urg\")\n return None\n \n endln = len(ini)\n while ln < endln:\n #print ini[ln]\n if section in ini[ln]:\n return ln, ini[ln]\n ln += 1\n raise(\"urg\")\n \n \n\n \ndef gen_diff(diff_filename, packagename, linenum, oldline,\\\n filename, basename, section):\n # Generate the md5\n md5 = popen(\"md5sum \" + filename).split()[0]\n \n # Generate the length\n len = str(os.stat(filename).st_size)\n \n # Generate the new line\n #install: release-2/testpkg/testpkg-0.0.1-0.tar.bz 3140 fbbe05f50b9273be640c312857f70619\n newline = section + \": \" + \"release-2/\" + packagename + \"/\" + basename + \" \" + len + \" \" + md5 + \"\\n\"\n \n # Use the old and new lines to create a diff\n#19916c19916\n#< install: release-2/testpkg/testpkg-0.0.1-0.tar.bz 3140 fbbe05f50b9273be640c312857f70619\n#---\n#> install: release-2/testpkg/testpkg-0.0.1-0.tar.bz 3140 69906b3bc3a249056201c398cb928bef\n\n\n # Add one: we're zerobase internally but diff is 1 based linenumbers\n diff = [0,0,0,0]\n diff[0] = str(linenum + 1) + \"c\" + str(linenum + 1) + \"\\n\"\n diff[1] = \"< \" + oldline\n diff[2] = \"---\\n\"\n diff[3] = \"> \" + newline\n # Return the diff\n return diff\n \ndef main():\n global dists\n parser = argparse.ArgumentParser(description = \" Fixes md5sum in setup-2.ini to match newly built package. It is an error for given files not to exist in the .ini under that package.\"\\\n \"Example usage: \" + sys.argv[0] + \" testpkg test-pkg-0.0.1-0-src.tar.bz test-pkg-0.0.1-0.tar.bz\"\n \n )\n \n parser.add_argument(\"inifile\",\\\n help=\"The setup.ini to patch.\", metavar=\"INI\")\n\n parser.add_argument(\"package\",\\\n help=\"The package name to fix the md5sums for.\", metavar=\"PKG\")\n\n parser.add_argument(\"files\",\\\n help=\"The package files to fix.\", nargs = \"*\", metavar=\"FILES\")\n\n options = parser.parse_args()\n target_package = options.package\n target_files = []\n for f in options.files:\n target_files.append(f)\n\n \n # Yeah I know this looks wrong but that's globals for you\n get_setup_ini(options.inifile)\n inifile = options.inifile\n \n pkgs = dists[\"curr\"]\n namekeys = pkgs.keys()\n \n if target_package not in namekeys:\n error(target_package + \" is not in \" + inifile)\n return 1\n \n sections = [\"install\", \"source\"]\n \n for f in target_files:\n basename = path(f).basename()\n found_section = 0\n for s in sections:\n if basename in pkgs[target_package][s]:\n found_section = s\n break\n if not found_section:\n error(basename + \" is not in install: or source: of \" +\\\n target_package + \"in \" + inifile )\n return 1\n \n#def gen_diff(diff_filename, packagename, linenum, oldline,\\\n# filename, basename, section):\n \n for f in target_files:\n basename = path(f).basename()\n#def find_line(inifile, target_package, section, filename): \n (linenum, line) = find_line(inifile, target_package,\\\n found_section, basename)\n diff_filename = basename + \".diff\"\n diff = gen_diff(diff_filename, target_package, linenum, line,\\\n f, basename, found_section)\n df = file(diff_filename, \"w\")\n df.writelines(diff)\n df.close()\n \n \n \n #selected = []\n #for package in dists[\"curr\"].keys():\n # if \"Base\" in dists[\"curr\"][package][\"category\"]:\n # selected.append(package)\n #selected.sort()\n #for i in selected:\n # print i\n \n \n\n\n\nif __name__ == \"__main__\":\n main()\n \n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
if __name__ == '__main__':
create_data_lists(ICDAR_path=
'../ICDAR_Dataset/0325updated.task1train(626p)', output_folder=
'../ICDAR_Dataset/0325updated.task1train(626p)')
<|reserved_special_token_1|>
from utils import create_data_lists
if __name__ == '__main__':
create_data_lists(ICDAR_path=
'../ICDAR_Dataset/0325updated.task1train(626p)', output_folder=
'../ICDAR_Dataset/0325updated.task1train(626p)')
|
flexible
|
{
"blob_id": "6334a8a052d72b0f13395b301bd5a766acf4399b",
"index": 3437,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nif __name__ == '__main__':\n create_data_lists(ICDAR_path=\n '../ICDAR_Dataset/0325updated.task1train(626p)', output_folder=\n '../ICDAR_Dataset/0325updated.task1train(626p)')\n",
"step-3": "from utils import create_data_lists\nif __name__ == '__main__':\n create_data_lists(ICDAR_path=\n '../ICDAR_Dataset/0325updated.task1train(626p)', output_folder=\n '../ICDAR_Dataset/0325updated.task1train(626p)')\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
import numpy as np
import cv2
import time
from itertools import chain, compress
from collections import defaultdict, namedtuple
class FeatureMetaData(object):
"""
Contain necessary information of a feature for easy access.
"""
def __init__(self):
self.id = None # int
self.response = None # float
self.lifetime = None # int
self.cam0_point = None # vec2
self.cam1_point = None # vec2
class FeatureMeasurement(object):
"""
Stereo measurement of a feature.
"""
def __init__(self):
self.id = None
self.u0 = None
self.v0 = None
self.u1 = None
self.v1 = None
class ImageProcessor(object):
"""
Detect and track features in image sequences.
"""
def __init__(self, config):
self.config = config
# Indicate if this is the first image message.
self.is_first_img = True
# ID for the next new feature.
self.next_feature_id = 0
# Feature detector
self.detector = cv2.FastFeatureDetector_create(self.config.fast_threshold)
# IMU message buffer.
self.imu_msg_buffer = []
# Previous and current images
self.cam0_prev_img_msg = None
self.cam0_curr_img_msg = None
self.cam1_curr_img_msg = None
# Pyramids for previous and current image
self.prev_cam0_pyramid = None
self.curr_cam0_pyramid = None
self.curr_cam1_pyramid = None
# Features in the previous and current image.
# list of lists of FeatureMetaData
self.prev_features = [[] for _ in range(self.config.grid_num)] # Don't use [[]] * N
self.curr_features = [[] for _ in range(self.config.grid_num)]
# Number of features after each outlier removal step.
# keys: before_tracking, after_tracking, after_matching, after_ransac
self.num_features = defaultdict(int)
# load config
# Camera calibration parameters
self.cam0_resolution = config.cam0_resolution # vec2
self.cam0_intrinsics = config.cam0_intrinsics # vec4
self.cam0_distortion_model = config.cam0_distortion_model # string
self.cam0_distortion_coeffs = config.cam0_distortion_coeffs # vec4
self.cam1_resolution = config.cam1_resolution # vec2
self.cam1_intrinsics = config.cam1_intrinsics # vec4
self.cam1_distortion_model = config.cam1_distortion_model # string
self.cam1_distortion_coeffs = config.cam1_distortion_coeffs # vec4
# Take a vector from cam0 frame to the IMU frame.
self.T_cam0_imu = np.linalg.inv(config.T_imu_cam0)
self.R_cam0_imu = self.T_cam0_imu[:3, :3]
self.t_cam0_imu = self.T_cam0_imu[:3, 3]
# Take a vector from cam1 frame to the IMU frame.
self.T_cam1_imu = np.linalg.inv(config.T_imu_cam1)
self.R_cam1_imu = self.T_cam1_imu[:3, :3]
self.t_cam1_imu = self.T_cam1_imu[:3, 3]
self.image_id = 0
def stereo_callback(self, stereo_msg):
"""
Callback function for the stereo images.
"""
start = time.time()
self.cam0_curr_img_msg = stereo_msg.cam0_msg
self.cam1_curr_img_msg = stereo_msg.cam1_msg
# Build the image pyramids once since they're used at multiple places.
self.create_image_pyramids()
# Detect features in the first frame.
if self.is_first_img:
if not self.config.load_features_flag:
self.initialize_first_frame()
self.is_first_img = False
# Draw results.
# self.draw_features_stereo()
else:
if not self.config.load_features_flag:
# Track the feature in the previous image.
t = time.time()
self.track_features()
print('___track_features:', time.time() - t)
t = time.time()
# Add new features into the current image.
self.add_new_features()
print('___add_new_features:', time.time() - t)
t = time.time()
self.prune_features()
print('___prune_features:', time.time() - t)
t = time.time()
# Draw results.
# self.draw_features_stereo()
print('___draw_features_stereo:', time.time() - t)
t = time.time()
print('===image process elapsed:', time.time() - start, f'({stereo_msg.timestamp})')
if not self.config.load_features_flag:
try:
self.save_features()
return self.publish()
finally:
self.cam0_prev_img_msg = self.cam0_curr_img_msg
self.prev_features = self.curr_features
self.prev_cam0_pyramid = self.curr_cam0_pyramid
# Initialize the current features to empty vectors.
self.curr_features = [[] for _ in range(self.config.grid_num)]
else:
self.load_features()
return self.publish()
def imu_callback(self, msg):
"""
Callback function for the imu message.
"""
self.imu_msg_buffer.append(msg)
def create_image_pyramids(self):
"""
Create image pyramids used for KLT tracking.
(Seems doesn't work in python)
"""
curr_cam0_img = self.cam0_curr_img_msg.image
# self.curr_cam0_pyramid = cv2.buildOpticalFlowPyramid(
# curr_cam0_img, self.config.win_size, self.config.pyramid_levels,
# None, cv2.BORDER_REFLECT_101, cv2.BORDER_CONSTANT, False)[1]
self.curr_cam0_pyramid = curr_cam0_img
curr_cam1_img = self.cam1_curr_img_msg.image
# self.curr_cam1_pyramid = cv2.buildOpticalFlowPyramid(
# curr_cam1_img, self.config.win_size, self.config.pyramid_levels,
# None, cv2.BORDER_REFLECT_101, cv2.BORDER_CONSTANT, False)[1]
self.curr_cam1_pyramid = curr_cam1_img
def initialize_first_frame(self):
"""
Initialize the image processing sequence, which is basically detect
new features on the first set of stereo images.
"""
img = self.cam0_curr_img_msg.image
grid_height, grid_width = self.get_grid_size(img)
# Detect new features on the frist image.
new_features = self.detector.detect(img)
# Find the stereo matched points for the newly detected features.
cam0_points = [kp.pt for kp in new_features]
cam1_points, inlier_markers = self.stereo_match(cam0_points)
cam0_inliers, cam1_inliers = [], []
response_inliers = []
for i, inlier in enumerate(inlier_markers):
if not inlier:
continue
cam0_inliers.append(cam0_points[i])
cam1_inliers.append(cam1_points[i])
response_inliers.append(new_features[i].response)
# len(cam0_inliers) < max(5, 0.1 * len(new_features))
# Group the features into grids
grid_new_features = [[] for _ in range(self.config.grid_num)]
for i in range(len(cam0_inliers)):
cam0_point = cam0_inliers[i]
cam1_point = cam1_inliers[i]
response = response_inliers[i]
row = int(cam0_point[1] / grid_height)
col = int(cam0_point[0] / grid_width)
code = row*self.config.grid_col + col
new_feature = FeatureMetaData()
new_feature.response = response
new_feature.cam0_point = cam0_point
new_feature.cam1_point = cam1_point
grid_new_features[code].append(new_feature)
# Sort the new features in each grid based on its response.
# And collect new features within each grid with high response.
for i, new_features in enumerate(grid_new_features):
for feature in sorted(new_features, key=lambda x:x.response,
reverse=True)[:self.config.grid_min_feature_num]:
self.curr_features[i].append(feature)
self.curr_features[i][-1].id = self.next_feature_id
self.curr_features[i][-1].lifetime = 1
self.next_feature_id += 1
def track_features(self):
"""
Tracker features on the newly received stereo images.
"""
img = self.cam0_curr_img_msg.image
grid_height, grid_width = self.get_grid_size(img)
# Compute a rough relative rotation which takes a vector
# from the previous frame to the current frame.
cam0_R_p_c, cam1_R_p_c = self.integrate_imu_data()
# Organize the features in the previous image.
prev_ids = []
prev_lifetime = []
prev_cam0_points = []
prev_cam1_points = []
for feature in chain.from_iterable(self.prev_features):
prev_ids.append(feature.id)
prev_lifetime.append(feature.lifetime)
prev_cam0_points.append(feature.cam0_point)
prev_cam1_points.append(feature.cam1_point)
prev_cam0_points = np.array(prev_cam0_points, dtype=np.float32)
# Number of the features before tracking.
self.num_features['before_tracking'] = len(prev_cam0_points)
# Abort tracking if there is no features in the previous frame.
if len(prev_cam0_points) == 0:
return
# Track features using LK optical flow method.
curr_cam0_points = self.predict_feature_tracking(
prev_cam0_points, cam0_R_p_c, self.cam0_intrinsics)
curr_cam0_points, track_inliers, _ = cv2.calcOpticalFlowPyrLK(
self.prev_cam0_pyramid, self.curr_cam0_pyramid,
prev_cam0_points.astype(np.float32),
curr_cam0_points.astype(np.float32),
**self.config.lk_params)
# Mark those tracked points out of the image region as untracked.
for i, point in enumerate(curr_cam0_points):
if not track_inliers[i]:
continue
if (point[0] < 0 or point[0] > img.shape[1]-1 or
point[1] < 0 or point[1] > img.shape[0]-1):
track_inliers[i] = 0
# Collect the tracked points.
prev_tracked_ids = select(prev_ids, track_inliers)
prev_tracked_lifetime = select(prev_lifetime, track_inliers)
prev_tracked_cam0_points = select(prev_cam0_points, track_inliers)
prev_tracked_cam1_points = select(prev_cam1_points, track_inliers)
curr_tracked_cam0_points = select(curr_cam0_points, track_inliers)
# Number of features left after tracking.
self.num_features['after_tracking'] = len(curr_tracked_cam0_points)
# Outlier removal involves three steps, which forms a close
# loop between the previous and current frames of cam0 (left)
# and cam1 (right). Assuming the stereo matching between the
# previous cam0 and cam1 images are correct, the three steps are:
#
# prev frames cam0 ----------> cam1
# | |
# |ransac |ransac
# | stereo match |
# curr frames cam0 ----------> cam1
#
# 1) Stereo matching between current images of cam0 and cam1.
# 2) RANSAC between previous and current images of cam0.
# 3) RANSAC between previous and current images of cam1.
#
# For Step 3, tracking between the images is no longer needed.
# The stereo matching results are directly used in the RANSAC.
# Step 1: stereo matching.
curr_cam1_points, match_inliers = self.stereo_match(
curr_tracked_cam0_points)
prev_matched_ids = select(prev_tracked_ids, match_inliers)
prev_matched_lifetime = select(prev_tracked_lifetime, match_inliers)
prev_matched_cam0_points = select(prev_tracked_cam0_points, match_inliers)
prev_matched_cam1_points = select(prev_tracked_cam1_points, match_inliers)
curr_matched_cam0_points = select(curr_tracked_cam0_points, match_inliers)
curr_matched_cam1_points = select(curr_cam1_points, match_inliers)
# Number of features left after stereo matching.
self.num_features['after_matching'] = len(curr_matched_cam0_points)
# Step 2 and 3: RANSAC on temporal image pairs of cam0 and cam1.
# cam0_ransac_inliers = self.two_point_ransac(
# prev_matched_cam0_points, curr_matched_cam0_points,
# cam0_R_p_c, self.cam0_intrinsics,
# self.cam0_distortion_model, self.cam0_distortion_coeffs,
# self.config.ransac_threshold, 0.99)
# cam1_ransac_inliers = self.two_point_ransac(
# prev_matched_cam1_points, curr_matched_cam1_points,
# cam1_R_p_c, self.cam1_intrinsics,
# self.cam1_distortion_model, self.cam1_distortion_coeffs,
# self.config.ransac_threshold, 0.99)
cam0_ransac_inliers = [1] * len(prev_matched_cam0_points)
cam1_ransac_inliers = [1] * len(prev_matched_cam1_points)
# Number of features after ransac.
after_ransac = 0
for i in range(len(cam0_ransac_inliers)):
if not (cam0_ransac_inliers[i] and cam1_ransac_inliers[i]):
continue
row = int(curr_matched_cam0_points[i][1] / grid_height)
col = int(curr_matched_cam0_points[i][0] / grid_width)
code = row * self.config.grid_col + col
grid_new_feature = FeatureMetaData()
grid_new_feature.id = prev_matched_ids[i]
grid_new_feature.lifetime = prev_matched_lifetime[i] + 1
grid_new_feature.cam0_point = curr_matched_cam0_points[i]
grid_new_feature.cam1_point = curr_matched_cam1_points[i]
prev_matched_lifetime[i] += 1
self.curr_features[code].append(grid_new_feature)
after_ransac += 1
self.num_features['after_ransac'] = after_ransac
# Compute the tracking rate.
# prev_feature_num = sum([len(x) for x in self.prev_features])
# curr_feature_num = sum([len(x) for x in self.curr_features])
def add_new_features(self):
"""
Detect new features on the image to ensure that the features are
uniformly distributed on the image.
"""
curr_img = self.cam0_curr_img_msg.image
grid_height, grid_width = self.get_grid_size(curr_img)
# Create a mask to avoid redetecting existing features.
mask = np.ones(curr_img.shape[:2], dtype='uint8')
for feature in chain.from_iterable(self.curr_features):
x, y = map(int, feature.cam0_point)
mask[y-3:y+4, x-3:x+4] = 0
# Detect new features.
new_features = self.detector.detect(curr_img, mask=mask)
# Collect the new detected features based on the grid.
# Select the ones with top response within each grid afterwards.
new_feature_sieve = [[] for _ in range(self.config.grid_num)]
for feature in new_features:
row = int(feature.pt[1] / grid_height)
col = int(feature.pt[0] / grid_width)
code = row * self.config.grid_col + col
new_feature_sieve[code].append(feature)
new_features = []
for features in new_feature_sieve:
if len(features) > self.config.grid_max_feature_num:
features = sorted(features, key=lambda x:x.response,
reverse=True)[:self.config.grid_max_feature_num]
new_features.append(features)
new_features = list(chain.from_iterable(new_features))
# Find the stereo matched points for the newly detected features.
cam0_points = [kp.pt for kp in new_features]
cam1_points, inlier_markers = self.stereo_match(cam0_points)
cam0_inliers, cam1_inliers, response_inliers = [], [], []
for i, inlier in enumerate(inlier_markers):
if not inlier:
continue
cam0_inliers.append(cam0_points[i])
cam1_inliers.append(cam1_points[i])
response_inliers.append(new_features[i].response)
# if len(cam0_inliers) < max(5, len(new_features) * 0.1):
# Group the features into grids
grid_new_features = [[] for _ in range(self.config.grid_num)]
for i in range(len(cam0_inliers)):
cam0_point = cam0_inliers[i]
cam1_point = cam1_inliers[i]
response = response_inliers[i]
row = int(cam0_point[1] / grid_height)
col = int(cam0_point[0] / grid_width)
code = row*self.config.grid_col + col
new_feature = FeatureMetaData()
new_feature.response = response
new_feature.cam0_point = cam0_point
new_feature.cam1_point = cam1_point
grid_new_features[code].append(new_feature)
# Sort the new features in each grid based on its response.
# And collect new features within each grid with high response.
for i, new_features in enumerate(grid_new_features):
for feature in sorted(new_features, key=lambda x:x.response,
reverse=True)[:self.config.grid_min_feature_num]:
self.curr_features[i].append(feature)
self.curr_features[i][-1].id = self.next_feature_id
self.curr_features[i][-1].lifetime = 1
self.next_feature_id += 1
def prune_features(self):
"""
Remove some of the features of a grid in case there are too many
features inside of that grid, which ensures the number of features
within each grid is bounded.
"""
for i, features in enumerate(self.curr_features):
# Continue if the number of features in this grid does
# not exceed the upper bound.
if len(features) <= self.config.grid_max_feature_num:
continue
self.curr_features[i] = sorted(features, key=lambda x:x.lifetime,
reverse=True)[:self.config.grid_max_feature_num]
def load_features(self):
# load features
filename = self.config.result_dir + str(self.image_id) + ".npz"
self.curr_features = np.load(filename, allow_pickle=True)['arr_0']
self.image_id += 1
def save_features(self):
# save features
filename = self.config.result_dir + str(self.image_id) + ".npz"
np.savez(filename, self.curr_features)
self.image_id += 1
def publish(self):
"""
Publish the features on the current image including both the
tracked and newly detected ones.
"""
curr_ids = []
curr_cam0_points = []
curr_cam1_points = []
for feature in chain.from_iterable(self.curr_features):
curr_ids.append(feature.id)
curr_cam0_points.append(feature.cam0_point)
curr_cam1_points.append(feature.cam1_point)
curr_cam0_points_undistorted = self.undistort_points(
curr_cam0_points, self.cam0_intrinsics,
self.cam0_distortion_model, self.cam0_distortion_coeffs)
curr_cam1_points_undistorted = self.undistort_points(
curr_cam1_points, self.cam1_intrinsics,
self.cam1_distortion_model, self.cam1_distortion_coeffs)
features = []
for i in range(len(curr_ids)):
fm = FeatureMeasurement()
fm.id = curr_ids[i]
fm.u0 = curr_cam0_points_undistorted[i][0]
fm.v0 = curr_cam0_points_undistorted[i][1]
fm.u1 = curr_cam1_points_undistorted[i][0]
fm.v1 = curr_cam1_points_undistorted[i][1]
features.append(fm)
feature_msg = namedtuple('feature_msg', ['timestamp', 'features'])(
self.cam0_curr_img_msg.timestamp, features)
return feature_msg
def integrate_imu_data(self):
"""
Integrates the IMU gyro readings between the two consecutive images,
which is used for both tracking prediction and 2-point RANSAC.
Returns:
cam0_R_p_c: a rotation matrix which takes a vector from previous
cam0 frame to current cam0 frame.
cam1_R_p_c: a rotation matrix which takes a vector from previous
cam1 frame to current cam1 frame.
"""
# Find the start and the end limit within the imu msg buffer.
idx_begin = None
for i, msg in enumerate(self.imu_msg_buffer):
if msg.timestamp >= self.cam0_prev_img_msg.timestamp - 0.01:
idx_begin = i
break
idx_end = None
for i, msg in enumerate(self.imu_msg_buffer):
if msg.timestamp >= self.cam0_curr_img_msg.timestamp - 0.004:
idx_end = i
break
if idx_begin is None or idx_end is None:
return np.identity(3), np.identity(3)
# Compute the mean angular velocity in the IMU frame.
mean_ang_vel = np.zeros(3)
for i in range(idx_begin, idx_end):
mean_ang_vel += self.imu_msg_buffer[i].angular_velocity
if idx_end > idx_begin:
mean_ang_vel /= (idx_end - idx_begin)
# Transform the mean angular velocity from the IMU frame to the
# cam0 and cam1 frames.
cam0_mean_ang_vel = self.R_cam0_imu.T @ mean_ang_vel
cam1_mean_ang_vel = self.R_cam1_imu.T @ mean_ang_vel
# Compute the relative rotation.
dt = self.cam0_curr_img_msg.timestamp - self.cam0_prev_img_msg.timestamp
cam0_R_p_c = cv2.Rodrigues(cam0_mean_ang_vel * dt)[0].T
cam1_R_p_c = cv2.Rodrigues(cam1_mean_ang_vel * dt)[0].T
# Delete the useless and used imu messages.
self.imu_msg_buffer = self.imu_msg_buffer[idx_end:]
return cam0_R_p_c, cam1_R_p_c
def rescale_points(self, pts1, pts2):
"""
Arguments:
pts1: first set of points.
pts2: second set of points.
Returns:
pts1: scaled first set of points.
pts2: scaled second set of points.
scaling_factor: scaling factor
"""
scaling_factor = 0
for pt1, pt2 in zip(pts1, pts2):
scaling_factor += np.linalg.norm(pt1)
scaling_factor += np.linalg.norm(pt2)
scaling_factor = (len(pts1) + len(pts2)) / scaling_factor * np.sqrt(2)
for i in range(len(pts1)):
pts1[i] *= scaling_factor
pts2[i] *= scaling_factor
return pts1, pts2, scaling_factor
# def two_point_ransac(self, pts1, pts2, R_p_c, intrinsics,
# distortion_model, distortion_coeffs,
# inlier_error, success_probability):
# """
# Applies two point ransac algorithm to mark the inliers in the input set.
# Arguments:
# pts1: first set of points.
# pts2: second set of points.
# R_p_c: a rotation matrix takes a vector in the previous camera frame
# to the current camera frame.
# intrinsics: intrinsics of the camera.
# distortion_model: distortion model of the camera.
# distortion_coeffs: distortion coefficients.
# inlier_error: acceptable error to be considered as an inlier.
# success_probability: the required probability of success.
# Returns:
# inlier_flag: 1 for inliers and 0 for outliers.
# """
# # Check the size of input point size.
# assert len(pts1) == len(pts2), 'Sets of different size are used...'
# norm_pixel_unit = 2.0 / (intrinsics[0] + intrinsics[1])
# iter_num = int(np.ceil(np.log(1-success_probability) / np.log(1-0.7*0.7)))
# # Initially, mark all points as inliers.
# inlier_markers = [1] * len(pts1)
# # Undistort all the points.
# pts1_undistorted = self.undistort_points(pts1, intrinsics,
# distortion_model, distortion_coeffs)
# pts2_undistorted = self.undistort_points(pts2, intrinsics,
# distortion_model, distortion_coeffs)
# # Compenstate the points in the previous image with
# # the relative rotation.
# for i, pt in enumerate(pts1_undistorted):
# pt_h = np.array([*pt, 1.0])
# pt_hc = R_p_c @ pt_h
# pts1_undistorted[i] = pt_hc[:2]
# # Normalize the points to gain numerical stability.
# pts1_undistorted, pts2_undistorted, scaling_factor = self.rescale_points(
# pts1_undistorted, pts2_undistorted)
# # Compute the difference between previous and current points,
# # which will be used frequently later.
# pts_diff = []
# for pt1, pt2 in zip(pts1_undistorted, pts2_undistorted):
# pts_diff.append(pt1 - pt2)
# # Mark the point pairs with large difference directly.
# # BTW, the mean distance of the rest of the point pairs are computed.
# mean_pt_distance = 0.0
# raw_inlier_count = 0
# for i, pt_diff in enumerate(pts_diff):
# distance = np.linalg.norm(pt_diff)
# # 25 pixel distance is a pretty large tolerance for normal motion.
# # However, to be used with aggressive motion, this tolerance should
# # be increased significantly to match the usage.
# if distance > 50.0 * norm_pixel_unit:
# inlier_markers[i] = 0
# else:
# mean_pt_distance += distance
# raw_inlier_count += 1
# mean_pt_distance /= raw_inlier_count
# # If the current number of inliers is less than 3, just mark
# # all input as outliers. This case can happen with fast
# # rotation where very few features are tracked.
# if raw_inlier_count < 3:
# return [0] * len(inlier_markers)
# # Before doing 2-point RANSAC, we have to check if the motion
# # is degenerated, meaning that there is no translation between
# # the frames, in which case, the model of the RANSAC does not work.
# # If so, the distance between the matched points will be almost 0.
# if mean_pt_distance < norm_pixel_unit:
# for i, pt_diff in enumerate(pts_diff):
# if inlier_markers[i] == 0:
# continue
# if np.linalg.norm(pt_diff) > inlier_error * norm_pixel_unit:
# inlier_markers[i] = 0
# return inlier_markers
# # In the case of general motion, the RANSAC model can be applied.
# # The three column corresponds to tx, ty, and tz respectively.
# coeff_t = []
# for i, pt_diff in enumerate(pts_diff):
# coeff_t.append(np.array([
# pt_diff[1],
# -pt_diff[0],
# pts1_undistorted[0] * pts2_undistorted[1] -
# pts1_undistorted[1] * pts2_undistorted[0]]))
# coeff_t = np.array(coeff_t)
# raw_inlier_idx = np.where(inlier_markers)[0]
# best_inlier_set = []
# best_error = 1e10
# for i in range(iter_num):
# # Randomly select two point pairs.
# # Although this is a weird way of selecting two pairs, but it
# # is able to efficiently avoid selecting repetitive pairs.
# pair_idx1 = np.random.choice(raw_inlier_idx)
# idx_diff = np.random.randint(1, len(raw_inlier_idx))
# pair_idx2 = (pair_idx1+idx_diff) % len(raw_inlier_idx)
# # Construct the model.
# coeff_t_ = np.array([coeff_t[pair_idx1], coeff_t[pair_idx2]])
# coeff_tx = coeff_t_[:, 0]
# coeff_ty = coeff_t_[:, 1]
# coeff_tz = coeff_t_[:, 2]
# coeff_l1_norm = np.linalg.norm(coeff_t_, 1, axis=0)
# base_indicator = np.argmin(coeff_l1_norm)
# if base_indicator == 0:
# A = np.array([coeff_ty, coeff_tz]).T
# solution = np.linalg.inv(A) @ (-coeff_tx)
# model = [1.0, *solution]
# elif base_indicator == 1:
# A = np.array([coeff_tx, coeff_tz]).T
# solution = np.linalg.inv(A) @ (-coeff_ty)
# model = [solution[0], 1.0, solution[1]]
# else:
# A = np.array([coeff_tx, coeff_ty]).T
# solution = np.linalg.inv(A) @ (-coeff_tz)
# model = [*solution, 1.0]
# # Find all the inliers among point pairs.
# error = coeff_t @ model
# inlier_set = []
# for i, e in enumerate(error):
# if inlier_markers[i] == 0:
# continue
# if np.abs(e) < inlier_error * norm_pixel_unit:
# inlier_set.append(i)
# # If the number of inliers is small, the current model is
# # probably wrong.
# if len(inlier_set) < 0.2 * len(pts1_undistorted):
# continue
# # Refit the model using all of the possible inliers.
# coeff_t_ = coeff_t[inlier_set]
# coeff_tx_better = coeff_t_[:, 0]
# coeff_ty_better = coeff_t_[:, 1]
# coeff_tz_better = coeff_t_[:, 2]
# if base_indicator == 0:
# A = np.array([coeff_ty_better, coeff_tz_better]).T
# solution = np.linalg.inv(A.T @ A) @ A.T @ (-coeff_tx_better)
# model_better = [1.0, *solution]
# elif base_indicator == 1:
# A = np.array([coeff_tx_better, coeff_tz_better]).T
# solution = np.linalg.inv(A.T @ A) @ A.T @ (-coeff_ty_better)
# model_better = [solution[0], 1.0, solution[1]]
# else:
# A = np.array([coeff_tx_better, coeff_ty_better]).T
# solution = np.linalg.inv(A.T @ A) @ A.T @ (-coeff_tz_better)
# model_better = [*solution, 1.0]
# # Compute the error and upate the best model if possible.
# new_error = coeff_t @ model_better
# this_error = np.mean([np.abs(new_error[i]) for i in inlier_set])
# if len(inlier_set) > best_inlier_set:
# best_error = this_error
# best_inlier_set = inlier_set
# # Fill in the markers.
# inlier_markers = [0] * len(pts1)
# for i in best_inlier_set:
# inlier_markers[i] = 1
# return inlier_markers
def get_grid_size(self, img):
"""
# Size of each grid.
"""
grid_height = int(np.ceil(img.shape[0] / self.config.grid_row))
grid_width = int(np.ceil(img.shape[1] / self.config.grid_col))
return grid_height, grid_width
def predict_feature_tracking(self, input_pts, R_p_c, intrinsics):
"""
predictFeatureTracking Compensates the rotation between consecutive
camera frames so that feature tracking would be more robust and fast.
Arguments:
input_pts: features in the previous image to be tracked.
R_p_c: a rotation matrix takes a vector in the previous camera
frame to the current camera frame. (matrix33)
intrinsics: intrinsic matrix of the camera. (vec3)
Returns:
compensated_pts: predicted locations of the features in the
current image based on the provided rotation.
"""
# Return directly if there are no input features.
if len(input_pts) == 0:
return []
# Intrinsic matrix.
K = np.array([
[intrinsics[0], 0.0, intrinsics[2]],
[0.0, intrinsics[1], intrinsics[3]],
[0.0, 0.0, 1.0]])
H = K @ R_p_c @ np.linalg.inv(K)
compensated_pts = []
for i in range(len(input_pts)):
p1 = np.array([*input_pts[i], 1.0])
p2 = H @ p1
compensated_pts.append(p2[:2] / p2[2])
return np.array(compensated_pts, dtype=np.float32)
def stereo_match(self, cam0_points):
"""
Matches features with stereo image pairs.
Arguments:
cam0_points: points in the primary image.
Returns:
cam1_points: points in the secondary image.
inlier_markers: 1 if the match is valid, 0 otherwise.
"""
cam0_points = np.array(cam0_points)
if len(cam0_points) == 0:
return []
R_cam0_cam1 = self.R_cam1_imu.T @ self.R_cam0_imu
cam0_points_undistorted = self.undistort_points(
cam0_points, self.cam0_intrinsics,
self.cam0_distortion_model, self.cam0_distortion_coeffs, R_cam0_cam1)
cam1_points = self.distort_points(
cam0_points_undistorted, self.cam1_intrinsics,
self.cam1_distortion_model, self.cam1_distortion_coeffs)
cam1_points_copy = cam1_points.copy()
# Track features using LK optical flow method.
cam0_points = cam0_points.astype(np.float32)
cam1_points = cam1_points.astype(np.float32)
cam1_points, inlier_markers, _ = cv2.calcOpticalFlowPyrLK(
self.curr_cam0_pyramid, self.curr_cam1_pyramid,
cam0_points, cam1_points, **self.config.lk_params)
cam0_points_, _, _ = cv2.calcOpticalFlowPyrLK(
self.curr_cam1_pyramid, self.curr_cam0_pyramid,
cam1_points, cam0_points.copy(), **self.config.lk_params)
err = np.linalg.norm(cam0_points - cam0_points_, axis=1)
# cam1_points_undistorted = self.undistort_points(
# cam1_points, self.cam1_intrinsics,
# self.cam1_distortion_model, self.cam1_distortion_coeffs, R_cam0_cam1)
disparity = np.abs(cam1_points_copy[:, 1] - cam1_points[:, 1])
inlier_markers = np.logical_and.reduce(
[inlier_markers.reshape(-1), err < 3, disparity < 20])
# Mark those tracked points out of the image region as untracked.
img = self.cam1_curr_img_msg.image
for i, point in enumerate(cam1_points):
if not inlier_markers[i]:
continue
if (point[0] < 0 or point[0] > img.shape[1]-1 or
point[1] < 0 or point[1] > img.shape[0]-1):
inlier_markers[i] = 0
# Compute the relative rotation between the cam0 frame and cam1 frame.
t_cam0_cam1 = self.R_cam1_imu.T @ (self.t_cam0_imu - self.t_cam1_imu)
# Compute the essential matrix.
E = skew(t_cam0_cam1) @ R_cam0_cam1
# Further remove outliers based on the known essential matrix.
cam0_points_undistorted = self.undistort_points(
cam0_points, self.cam0_intrinsics,
self.cam0_distortion_model, self.cam0_distortion_coeffs)
cam1_points_undistorted = self.undistort_points(
cam1_points, self.cam1_intrinsics,
self.cam1_distortion_model, self.cam1_distortion_coeffs)
norm_pixel_unit = 4.0 / (
self.cam0_intrinsics[0] + self.cam0_intrinsics[1] +
self.cam1_intrinsics[0] + self.cam1_intrinsics[1])
for i in range(len(cam0_points_undistorted)):
if not inlier_markers[i]:
continue
pt0 = np.array([*cam0_points_undistorted[i], 1.0])
pt1 = np.array([*cam1_points_undistorted[i], 1.0])
epipolar_line = E @ pt0
error = np.abs((pt1 * epipolar_line)[0]) / np.linalg.norm(
epipolar_line[:2])
if error > self.config.stereo_threshold * norm_pixel_unit:
inlier_markers[i] = 0
return cam1_points, inlier_markers
def undistort_points(self, pts_in, intrinsics, distortion_model,
distortion_coeffs, rectification_matrix=np.identity(3),
new_intrinsics=np.array([1, 1, 0, 0])):
"""
Arguments:
pts_in: points to be undistorted.
intrinsics: intrinsics of the camera.
distortion_model: distortion model of the camera.
distortion_coeffs: distortion coefficients.
rectification_matrix:
new_intrinsics:
Returns:
pts_out: undistorted points.
"""
if len(pts_in) == 0:
return []
pts_in = np.reshape(pts_in, (-1, 1, 2))
K = np.array([
[intrinsics[0], 0.0, intrinsics[2]],
[0.0, intrinsics[1], intrinsics[3]],
[0.0, 0.0, 1.0]])
K_new = np.array([
[new_intrinsics[0], 0.0, new_intrinsics[2]],
[0.0, new_intrinsics[1], new_intrinsics[3]],
[0.0, 0.0, 1.0]])
if distortion_model == 'equidistant':
pts_out = cv2.fisheye.undistortPoints(pts_in, K, distortion_coeffs,
rectification_matrix, K_new)
else: # default: 'radtan'
pts_out = cv2.undistortPoints(pts_in, K, distortion_coeffs, None,
rectification_matrix, K_new)
return pts_out.reshape((-1, 2))
def distort_points(self, pts_in, intrinsics, distortion_model,
distortion_coeffs):
"""
Arguments:
pts_in: points to be distorted.
intrinsics: intrinsics of the camera.
distortion_model: distortion model of the camera.
distortion_coeffs: distortion coefficients.
Returns:
pts_out: distorted points. (N, 2)
"""
if len(pts_in) == 0:
return []
K = np.array([
[intrinsics[0], 0.0, intrinsics[2]],
[0.0, intrinsics[1], intrinsics[3]],
[0.0, 0.0, 1.0]])
if distortion_model == 'equidistant':
pts_out = cv2.fisheye.distortPoints(pts_in, K, distortion_coeffs)
else: # default: 'radtan'
homogenous_pts = cv2.convertPointsToHomogeneous(pts_in)
pts_out, _ = cv2.projectPoints(homogenous_pts,
np.zeros(3), np.zeros(3), K, distortion_coeffs)
return pts_out.reshape((-1, 2))
def draw_features_stereo(self):
img0 = self.cam0_curr_img_msg.image
img1 = self.cam1_curr_img_msg.image
kps0 = []
kps1 = []
matches = []
for feature in chain.from_iterable(self.curr_features):
matches.append(cv2.DMatch(len(kps0), len(kps0), 0))
kps0.append(cv2.KeyPoint(*feature.cam0_point, 1))
kps1.append(cv2.KeyPoint(*feature.cam1_point, 1))
img = cv2.drawMatches(img0, kps0, img1, kps1, matches, None, flags=2)
cv2.imshow('stereo features', img)
cv2.waitKey(1)
def skew(vec):
x, y, z = vec
return np.array([
[0, -z, y],
[z, 0, -x],
[-y, x, 0]])
def select(data, selectors):
return [d for d, s in zip(data, selectors) if s]
|
normal
|
{
"blob_id": "02f196623907703255bf149db0435104d086da97",
"index": 8292,
"step-1": "<mask token>\n\n\nclass ImageProcessor(object):\n <mask token>\n\n def __init__(self, config):\n self.config = config\n self.is_first_img = True\n self.next_feature_id = 0\n self.detector = cv2.FastFeatureDetector_create(self.config.\n fast_threshold)\n self.imu_msg_buffer = []\n self.cam0_prev_img_msg = None\n self.cam0_curr_img_msg = None\n self.cam1_curr_img_msg = None\n self.prev_cam0_pyramid = None\n self.curr_cam0_pyramid = None\n self.curr_cam1_pyramid = None\n self.prev_features = [[] for _ in range(self.config.grid_num)]\n self.curr_features = [[] for _ in range(self.config.grid_num)]\n self.num_features = defaultdict(int)\n self.cam0_resolution = config.cam0_resolution\n self.cam0_intrinsics = config.cam0_intrinsics\n self.cam0_distortion_model = config.cam0_distortion_model\n self.cam0_distortion_coeffs = config.cam0_distortion_coeffs\n self.cam1_resolution = config.cam1_resolution\n self.cam1_intrinsics = config.cam1_intrinsics\n self.cam1_distortion_model = config.cam1_distortion_model\n self.cam1_distortion_coeffs = config.cam1_distortion_coeffs\n self.T_cam0_imu = np.linalg.inv(config.T_imu_cam0)\n self.R_cam0_imu = self.T_cam0_imu[:3, :3]\n self.t_cam0_imu = self.T_cam0_imu[:3, 3]\n self.T_cam1_imu = np.linalg.inv(config.T_imu_cam1)\n self.R_cam1_imu = self.T_cam1_imu[:3, :3]\n self.t_cam1_imu = self.T_cam1_imu[:3, 3]\n self.image_id = 0\n\n def stereo_callback(self, stereo_msg):\n \"\"\"\n Callback function for the stereo images.\n \"\"\"\n start = time.time()\n self.cam0_curr_img_msg = stereo_msg.cam0_msg\n self.cam1_curr_img_msg = stereo_msg.cam1_msg\n self.create_image_pyramids()\n if self.is_first_img:\n if not self.config.load_features_flag:\n self.initialize_first_frame()\n self.is_first_img = False\n elif not self.config.load_features_flag:\n t = time.time()\n self.track_features()\n print('___track_features:', time.time() - t)\n t = time.time()\n self.add_new_features()\n print('___add_new_features:', time.time() - t)\n t = time.time()\n self.prune_features()\n print('___prune_features:', time.time() - t)\n t = time.time()\n print('___draw_features_stereo:', time.time() - t)\n t = time.time()\n print('===image process elapsed:', time.time() - start,\n f'({stereo_msg.timestamp})')\n if not self.config.load_features_flag:\n try:\n self.save_features()\n return self.publish()\n finally:\n self.cam0_prev_img_msg = self.cam0_curr_img_msg\n self.prev_features = self.curr_features\n self.prev_cam0_pyramid = self.curr_cam0_pyramid\n self.curr_features = [[] for _ in range(self.config.grid_num)]\n else:\n self.load_features()\n return self.publish()\n\n def imu_callback(self, msg):\n \"\"\"\n Callback function for the imu message.\n \"\"\"\n self.imu_msg_buffer.append(msg)\n\n def create_image_pyramids(self):\n \"\"\"\n Create image pyramids used for KLT tracking.\n (Seems doesn't work in python)\n \"\"\"\n curr_cam0_img = self.cam0_curr_img_msg.image\n self.curr_cam0_pyramid = curr_cam0_img\n curr_cam1_img = self.cam1_curr_img_msg.image\n self.curr_cam1_pyramid = curr_cam1_img\n\n def initialize_first_frame(self):\n \"\"\"\n Initialize the image processing sequence, which is basically detect \n new features on the first set of stereo images.\n \"\"\"\n img = self.cam0_curr_img_msg.image\n grid_height, grid_width = self.get_grid_size(img)\n new_features = self.detector.detect(img)\n cam0_points = [kp.pt for kp in new_features]\n cam1_points, inlier_markers = self.stereo_match(cam0_points)\n cam0_inliers, cam1_inliers = [], []\n response_inliers = []\n for i, inlier in enumerate(inlier_markers):\n if not inlier:\n continue\n cam0_inliers.append(cam0_points[i])\n cam1_inliers.append(cam1_points[i])\n response_inliers.append(new_features[i].response)\n grid_new_features = [[] for _ in range(self.config.grid_num)]\n for i in range(len(cam0_inliers)):\n cam0_point = cam0_inliers[i]\n cam1_point = cam1_inliers[i]\n response = response_inliers[i]\n row = int(cam0_point[1] / grid_height)\n col = int(cam0_point[0] / grid_width)\n code = row * self.config.grid_col + col\n new_feature = FeatureMetaData()\n new_feature.response = response\n new_feature.cam0_point = cam0_point\n new_feature.cam1_point = cam1_point\n grid_new_features[code].append(new_feature)\n for i, new_features in enumerate(grid_new_features):\n for feature in sorted(new_features, key=lambda x: x.response,\n reverse=True)[:self.config.grid_min_feature_num]:\n self.curr_features[i].append(feature)\n self.curr_features[i][-1].id = self.next_feature_id\n self.curr_features[i][-1].lifetime = 1\n self.next_feature_id += 1\n\n def track_features(self):\n \"\"\"\n Tracker features on the newly received stereo images.\n \"\"\"\n img = self.cam0_curr_img_msg.image\n grid_height, grid_width = self.get_grid_size(img)\n cam0_R_p_c, cam1_R_p_c = self.integrate_imu_data()\n prev_ids = []\n prev_lifetime = []\n prev_cam0_points = []\n prev_cam1_points = []\n for feature in chain.from_iterable(self.prev_features):\n prev_ids.append(feature.id)\n prev_lifetime.append(feature.lifetime)\n prev_cam0_points.append(feature.cam0_point)\n prev_cam1_points.append(feature.cam1_point)\n prev_cam0_points = np.array(prev_cam0_points, dtype=np.float32)\n self.num_features['before_tracking'] = len(prev_cam0_points)\n if len(prev_cam0_points) == 0:\n return\n curr_cam0_points = self.predict_feature_tracking(prev_cam0_points,\n cam0_R_p_c, self.cam0_intrinsics)\n curr_cam0_points, track_inliers, _ = cv2.calcOpticalFlowPyrLK(self.\n prev_cam0_pyramid, self.curr_cam0_pyramid, prev_cam0_points.\n astype(np.float32), curr_cam0_points.astype(np.float32), **self\n .config.lk_params)\n for i, point in enumerate(curr_cam0_points):\n if not track_inliers[i]:\n continue\n if point[0] < 0 or point[0] > img.shape[1] - 1 or point[1\n ] < 0 or point[1] > img.shape[0] - 1:\n track_inliers[i] = 0\n prev_tracked_ids = select(prev_ids, track_inliers)\n prev_tracked_lifetime = select(prev_lifetime, track_inliers)\n prev_tracked_cam0_points = select(prev_cam0_points, track_inliers)\n prev_tracked_cam1_points = select(prev_cam1_points, track_inliers)\n curr_tracked_cam0_points = select(curr_cam0_points, track_inliers)\n self.num_features['after_tracking'] = len(curr_tracked_cam0_points)\n curr_cam1_points, match_inliers = self.stereo_match(\n curr_tracked_cam0_points)\n prev_matched_ids = select(prev_tracked_ids, match_inliers)\n prev_matched_lifetime = select(prev_tracked_lifetime, match_inliers)\n prev_matched_cam0_points = select(prev_tracked_cam0_points,\n match_inliers)\n prev_matched_cam1_points = select(prev_tracked_cam1_points,\n match_inliers)\n curr_matched_cam0_points = select(curr_tracked_cam0_points,\n match_inliers)\n curr_matched_cam1_points = select(curr_cam1_points, match_inliers)\n self.num_features['after_matching'] = len(curr_matched_cam0_points)\n cam0_ransac_inliers = [1] * len(prev_matched_cam0_points)\n cam1_ransac_inliers = [1] * len(prev_matched_cam1_points)\n after_ransac = 0\n for i in range(len(cam0_ransac_inliers)):\n if not (cam0_ransac_inliers[i] and cam1_ransac_inliers[i]):\n continue\n row = int(curr_matched_cam0_points[i][1] / grid_height)\n col = int(curr_matched_cam0_points[i][0] / grid_width)\n code = row * self.config.grid_col + col\n grid_new_feature = FeatureMetaData()\n grid_new_feature.id = prev_matched_ids[i]\n grid_new_feature.lifetime = prev_matched_lifetime[i] + 1\n grid_new_feature.cam0_point = curr_matched_cam0_points[i]\n grid_new_feature.cam1_point = curr_matched_cam1_points[i]\n prev_matched_lifetime[i] += 1\n self.curr_features[code].append(grid_new_feature)\n after_ransac += 1\n self.num_features['after_ransac'] = after_ransac\n\n def add_new_features(self):\n \"\"\"\n Detect new features on the image to ensure that the features are \n uniformly distributed on the image.\n \"\"\"\n curr_img = self.cam0_curr_img_msg.image\n grid_height, grid_width = self.get_grid_size(curr_img)\n mask = np.ones(curr_img.shape[:2], dtype='uint8')\n for feature in chain.from_iterable(self.curr_features):\n x, y = map(int, feature.cam0_point)\n mask[y - 3:y + 4, x - 3:x + 4] = 0\n new_features = self.detector.detect(curr_img, mask=mask)\n new_feature_sieve = [[] for _ in range(self.config.grid_num)]\n for feature in new_features:\n row = int(feature.pt[1] / grid_height)\n col = int(feature.pt[0] / grid_width)\n code = row * self.config.grid_col + col\n new_feature_sieve[code].append(feature)\n new_features = []\n for features in new_feature_sieve:\n if len(features) > self.config.grid_max_feature_num:\n features = sorted(features, key=lambda x: x.response,\n reverse=True)[:self.config.grid_max_feature_num]\n new_features.append(features)\n new_features = list(chain.from_iterable(new_features))\n cam0_points = [kp.pt for kp in new_features]\n cam1_points, inlier_markers = self.stereo_match(cam0_points)\n cam0_inliers, cam1_inliers, response_inliers = [], [], []\n for i, inlier in enumerate(inlier_markers):\n if not inlier:\n continue\n cam0_inliers.append(cam0_points[i])\n cam1_inliers.append(cam1_points[i])\n response_inliers.append(new_features[i].response)\n grid_new_features = [[] for _ in range(self.config.grid_num)]\n for i in range(len(cam0_inliers)):\n cam0_point = cam0_inliers[i]\n cam1_point = cam1_inliers[i]\n response = response_inliers[i]\n row = int(cam0_point[1] / grid_height)\n col = int(cam0_point[0] / grid_width)\n code = row * self.config.grid_col + col\n new_feature = FeatureMetaData()\n new_feature.response = response\n new_feature.cam0_point = cam0_point\n new_feature.cam1_point = cam1_point\n grid_new_features[code].append(new_feature)\n for i, new_features in enumerate(grid_new_features):\n for feature in sorted(new_features, key=lambda x: x.response,\n reverse=True)[:self.config.grid_min_feature_num]:\n self.curr_features[i].append(feature)\n self.curr_features[i][-1].id = self.next_feature_id\n self.curr_features[i][-1].lifetime = 1\n self.next_feature_id += 1\n\n def prune_features(self):\n \"\"\"\n Remove some of the features of a grid in case there are too many \n features inside of that grid, which ensures the number of features \n within each grid is bounded.\n \"\"\"\n for i, features in enumerate(self.curr_features):\n if len(features) <= self.config.grid_max_feature_num:\n continue\n self.curr_features[i] = sorted(features, key=lambda x: x.\n lifetime, reverse=True)[:self.config.grid_max_feature_num]\n <mask token>\n\n def save_features(self):\n filename = self.config.result_dir + str(self.image_id) + '.npz'\n np.savez(filename, self.curr_features)\n self.image_id += 1\n <mask token>\n\n def integrate_imu_data(self):\n \"\"\"\n Integrates the IMU gyro readings between the two consecutive images, \n which is used for both tracking prediction and 2-point RANSAC.\n\n Returns:\n cam0_R_p_c: a rotation matrix which takes a vector from previous \n cam0 frame to current cam0 frame.\n cam1_R_p_c: a rotation matrix which takes a vector from previous \n cam1 frame to current cam1 frame.\n \"\"\"\n idx_begin = None\n for i, msg in enumerate(self.imu_msg_buffer):\n if msg.timestamp >= self.cam0_prev_img_msg.timestamp - 0.01:\n idx_begin = i\n break\n idx_end = None\n for i, msg in enumerate(self.imu_msg_buffer):\n if msg.timestamp >= self.cam0_curr_img_msg.timestamp - 0.004:\n idx_end = i\n break\n if idx_begin is None or idx_end is None:\n return np.identity(3), np.identity(3)\n mean_ang_vel = np.zeros(3)\n for i in range(idx_begin, idx_end):\n mean_ang_vel += self.imu_msg_buffer[i].angular_velocity\n if idx_end > idx_begin:\n mean_ang_vel /= idx_end - idx_begin\n cam0_mean_ang_vel = self.R_cam0_imu.T @ mean_ang_vel\n cam1_mean_ang_vel = self.R_cam1_imu.T @ mean_ang_vel\n dt = (self.cam0_curr_img_msg.timestamp - self.cam0_prev_img_msg.\n timestamp)\n cam0_R_p_c = cv2.Rodrigues(cam0_mean_ang_vel * dt)[0].T\n cam1_R_p_c = cv2.Rodrigues(cam1_mean_ang_vel * dt)[0].T\n self.imu_msg_buffer = self.imu_msg_buffer[idx_end:]\n return cam0_R_p_c, cam1_R_p_c\n\n def rescale_points(self, pts1, pts2):\n \"\"\"\n Arguments:\n pts1: first set of points.\n pts2: second set of points.\n\n Returns:\n pts1: scaled first set of points.\n pts2: scaled second set of points.\n scaling_factor: scaling factor\n \"\"\"\n scaling_factor = 0\n for pt1, pt2 in zip(pts1, pts2):\n scaling_factor += np.linalg.norm(pt1)\n scaling_factor += np.linalg.norm(pt2)\n scaling_factor = (len(pts1) + len(pts2)) / scaling_factor * np.sqrt(2)\n for i in range(len(pts1)):\n pts1[i] *= scaling_factor\n pts2[i] *= scaling_factor\n return pts1, pts2, scaling_factor\n\n def get_grid_size(self, img):\n \"\"\"\n # Size of each grid.\n \"\"\"\n grid_height = int(np.ceil(img.shape[0] / self.config.grid_row))\n grid_width = int(np.ceil(img.shape[1] / self.config.grid_col))\n return grid_height, grid_width\n\n def predict_feature_tracking(self, input_pts, R_p_c, intrinsics):\n \"\"\"\n predictFeatureTracking Compensates the rotation between consecutive \n camera frames so that feature tracking would be more robust and fast.\n\n Arguments:\n input_pts: features in the previous image to be tracked.\n R_p_c: a rotation matrix takes a vector in the previous camera \n frame to the current camera frame. (matrix33)\n intrinsics: intrinsic matrix of the camera. (vec3)\n\n Returns:\n compensated_pts: predicted locations of the features in the \n current image based on the provided rotation.\n \"\"\"\n if len(input_pts) == 0:\n return []\n K = np.array([[intrinsics[0], 0.0, intrinsics[2]], [0.0, intrinsics\n [1], intrinsics[3]], [0.0, 0.0, 1.0]])\n H = K @ R_p_c @ np.linalg.inv(K)\n compensated_pts = []\n for i in range(len(input_pts)):\n p1 = np.array([*input_pts[i], 1.0])\n p2 = H @ p1\n compensated_pts.append(p2[:2] / p2[2])\n return np.array(compensated_pts, dtype=np.float32)\n\n def stereo_match(self, cam0_points):\n \"\"\"\n Matches features with stereo image pairs.\n\n Arguments:\n cam0_points: points in the primary image.\n\n Returns:\n cam1_points: points in the secondary image.\n inlier_markers: 1 if the match is valid, 0 otherwise.\n \"\"\"\n cam0_points = np.array(cam0_points)\n if len(cam0_points) == 0:\n return []\n R_cam0_cam1 = self.R_cam1_imu.T @ self.R_cam0_imu\n cam0_points_undistorted = self.undistort_points(cam0_points, self.\n cam0_intrinsics, self.cam0_distortion_model, self.\n cam0_distortion_coeffs, R_cam0_cam1)\n cam1_points = self.distort_points(cam0_points_undistorted, self.\n cam1_intrinsics, self.cam1_distortion_model, self.\n cam1_distortion_coeffs)\n cam1_points_copy = cam1_points.copy()\n cam0_points = cam0_points.astype(np.float32)\n cam1_points = cam1_points.astype(np.float32)\n cam1_points, inlier_markers, _ = cv2.calcOpticalFlowPyrLK(self.\n curr_cam0_pyramid, self.curr_cam1_pyramid, cam0_points,\n cam1_points, **self.config.lk_params)\n cam0_points_, _, _ = cv2.calcOpticalFlowPyrLK(self.\n curr_cam1_pyramid, self.curr_cam0_pyramid, cam1_points,\n cam0_points.copy(), **self.config.lk_params)\n err = np.linalg.norm(cam0_points - cam0_points_, axis=1)\n disparity = np.abs(cam1_points_copy[:, 1] - cam1_points[:, 1])\n inlier_markers = np.logical_and.reduce([inlier_markers.reshape(-1),\n err < 3, disparity < 20])\n img = self.cam1_curr_img_msg.image\n for i, point in enumerate(cam1_points):\n if not inlier_markers[i]:\n continue\n if point[0] < 0 or point[0] > img.shape[1] - 1 or point[1\n ] < 0 or point[1] > img.shape[0] - 1:\n inlier_markers[i] = 0\n t_cam0_cam1 = self.R_cam1_imu.T @ (self.t_cam0_imu - self.t_cam1_imu)\n E = skew(t_cam0_cam1) @ R_cam0_cam1\n cam0_points_undistorted = self.undistort_points(cam0_points, self.\n cam0_intrinsics, self.cam0_distortion_model, self.\n cam0_distortion_coeffs)\n cam1_points_undistorted = self.undistort_points(cam1_points, self.\n cam1_intrinsics, self.cam1_distortion_model, self.\n cam1_distortion_coeffs)\n norm_pixel_unit = 4.0 / (self.cam0_intrinsics[0] + self.\n cam0_intrinsics[1] + self.cam1_intrinsics[0] + self.\n cam1_intrinsics[1])\n for i in range(len(cam0_points_undistorted)):\n if not inlier_markers[i]:\n continue\n pt0 = np.array([*cam0_points_undistorted[i], 1.0])\n pt1 = np.array([*cam1_points_undistorted[i], 1.0])\n epipolar_line = E @ pt0\n error = np.abs((pt1 * epipolar_line)[0]) / np.linalg.norm(\n epipolar_line[:2])\n if error > self.config.stereo_threshold * norm_pixel_unit:\n inlier_markers[i] = 0\n return cam1_points, inlier_markers\n\n def undistort_points(self, pts_in, intrinsics, distortion_model,\n distortion_coeffs, rectification_matrix=np.identity(3),\n new_intrinsics=np.array([1, 1, 0, 0])):\n \"\"\"\n Arguments:\n pts_in: points to be undistorted.\n intrinsics: intrinsics of the camera.\n distortion_model: distortion model of the camera.\n distortion_coeffs: distortion coefficients.\n rectification_matrix:\n new_intrinsics:\n\n Returns:\n pts_out: undistorted points.\n \"\"\"\n if len(pts_in) == 0:\n return []\n pts_in = np.reshape(pts_in, (-1, 1, 2))\n K = np.array([[intrinsics[0], 0.0, intrinsics[2]], [0.0, intrinsics\n [1], intrinsics[3]], [0.0, 0.0, 1.0]])\n K_new = np.array([[new_intrinsics[0], 0.0, new_intrinsics[2]], [0.0,\n new_intrinsics[1], new_intrinsics[3]], [0.0, 0.0, 1.0]])\n if distortion_model == 'equidistant':\n pts_out = cv2.fisheye.undistortPoints(pts_in, K,\n distortion_coeffs, rectification_matrix, K_new)\n else:\n pts_out = cv2.undistortPoints(pts_in, K, distortion_coeffs,\n None, rectification_matrix, K_new)\n return pts_out.reshape((-1, 2))\n\n def distort_points(self, pts_in, intrinsics, distortion_model,\n distortion_coeffs):\n \"\"\"\n Arguments:\n pts_in: points to be distorted.\n intrinsics: intrinsics of the camera.\n distortion_model: distortion model of the camera.\n distortion_coeffs: distortion coefficients.\n\n Returns:\n pts_out: distorted points. (N, 2)\n \"\"\"\n if len(pts_in) == 0:\n return []\n K = np.array([[intrinsics[0], 0.0, intrinsics[2]], [0.0, intrinsics\n [1], intrinsics[3]], [0.0, 0.0, 1.0]])\n if distortion_model == 'equidistant':\n pts_out = cv2.fisheye.distortPoints(pts_in, K, distortion_coeffs)\n else:\n homogenous_pts = cv2.convertPointsToHomogeneous(pts_in)\n pts_out, _ = cv2.projectPoints(homogenous_pts, np.zeros(3), np.\n zeros(3), K, distortion_coeffs)\n return pts_out.reshape((-1, 2))\n\n def draw_features_stereo(self):\n img0 = self.cam0_curr_img_msg.image\n img1 = self.cam1_curr_img_msg.image\n kps0 = []\n kps1 = []\n matches = []\n for feature in chain.from_iterable(self.curr_features):\n matches.append(cv2.DMatch(len(kps0), len(kps0), 0))\n kps0.append(cv2.KeyPoint(*feature.cam0_point, 1))\n kps1.append(cv2.KeyPoint(*feature.cam1_point, 1))\n img = cv2.drawMatches(img0, kps0, img1, kps1, matches, None, flags=2)\n cv2.imshow('stereo features', img)\n cv2.waitKey(1)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass ImageProcessor(object):\n <mask token>\n\n def __init__(self, config):\n self.config = config\n self.is_first_img = True\n self.next_feature_id = 0\n self.detector = cv2.FastFeatureDetector_create(self.config.\n fast_threshold)\n self.imu_msg_buffer = []\n self.cam0_prev_img_msg = None\n self.cam0_curr_img_msg = None\n self.cam1_curr_img_msg = None\n self.prev_cam0_pyramid = None\n self.curr_cam0_pyramid = None\n self.curr_cam1_pyramid = None\n self.prev_features = [[] for _ in range(self.config.grid_num)]\n self.curr_features = [[] for _ in range(self.config.grid_num)]\n self.num_features = defaultdict(int)\n self.cam0_resolution = config.cam0_resolution\n self.cam0_intrinsics = config.cam0_intrinsics\n self.cam0_distortion_model = config.cam0_distortion_model\n self.cam0_distortion_coeffs = config.cam0_distortion_coeffs\n self.cam1_resolution = config.cam1_resolution\n self.cam1_intrinsics = config.cam1_intrinsics\n self.cam1_distortion_model = config.cam1_distortion_model\n self.cam1_distortion_coeffs = config.cam1_distortion_coeffs\n self.T_cam0_imu = np.linalg.inv(config.T_imu_cam0)\n self.R_cam0_imu = self.T_cam0_imu[:3, :3]\n self.t_cam0_imu = self.T_cam0_imu[:3, 3]\n self.T_cam1_imu = np.linalg.inv(config.T_imu_cam1)\n self.R_cam1_imu = self.T_cam1_imu[:3, :3]\n self.t_cam1_imu = self.T_cam1_imu[:3, 3]\n self.image_id = 0\n\n def stereo_callback(self, stereo_msg):\n \"\"\"\n Callback function for the stereo images.\n \"\"\"\n start = time.time()\n self.cam0_curr_img_msg = stereo_msg.cam0_msg\n self.cam1_curr_img_msg = stereo_msg.cam1_msg\n self.create_image_pyramids()\n if self.is_first_img:\n if not self.config.load_features_flag:\n self.initialize_first_frame()\n self.is_first_img = False\n elif not self.config.load_features_flag:\n t = time.time()\n self.track_features()\n print('___track_features:', time.time() - t)\n t = time.time()\n self.add_new_features()\n print('___add_new_features:', time.time() - t)\n t = time.time()\n self.prune_features()\n print('___prune_features:', time.time() - t)\n t = time.time()\n print('___draw_features_stereo:', time.time() - t)\n t = time.time()\n print('===image process elapsed:', time.time() - start,\n f'({stereo_msg.timestamp})')\n if not self.config.load_features_flag:\n try:\n self.save_features()\n return self.publish()\n finally:\n self.cam0_prev_img_msg = self.cam0_curr_img_msg\n self.prev_features = self.curr_features\n self.prev_cam0_pyramid = self.curr_cam0_pyramid\n self.curr_features = [[] for _ in range(self.config.grid_num)]\n else:\n self.load_features()\n return self.publish()\n\n def imu_callback(self, msg):\n \"\"\"\n Callback function for the imu message.\n \"\"\"\n self.imu_msg_buffer.append(msg)\n\n def create_image_pyramids(self):\n \"\"\"\n Create image pyramids used for KLT tracking.\n (Seems doesn't work in python)\n \"\"\"\n curr_cam0_img = self.cam0_curr_img_msg.image\n self.curr_cam0_pyramid = curr_cam0_img\n curr_cam1_img = self.cam1_curr_img_msg.image\n self.curr_cam1_pyramid = curr_cam1_img\n\n def initialize_first_frame(self):\n \"\"\"\n Initialize the image processing sequence, which is basically detect \n new features on the first set of stereo images.\n \"\"\"\n img = self.cam0_curr_img_msg.image\n grid_height, grid_width = self.get_grid_size(img)\n new_features = self.detector.detect(img)\n cam0_points = [kp.pt for kp in new_features]\n cam1_points, inlier_markers = self.stereo_match(cam0_points)\n cam0_inliers, cam1_inliers = [], []\n response_inliers = []\n for i, inlier in enumerate(inlier_markers):\n if not inlier:\n continue\n cam0_inliers.append(cam0_points[i])\n cam1_inliers.append(cam1_points[i])\n response_inliers.append(new_features[i].response)\n grid_new_features = [[] for _ in range(self.config.grid_num)]\n for i in range(len(cam0_inliers)):\n cam0_point = cam0_inliers[i]\n cam1_point = cam1_inliers[i]\n response = response_inliers[i]\n row = int(cam0_point[1] / grid_height)\n col = int(cam0_point[0] / grid_width)\n code = row * self.config.grid_col + col\n new_feature = FeatureMetaData()\n new_feature.response = response\n new_feature.cam0_point = cam0_point\n new_feature.cam1_point = cam1_point\n grid_new_features[code].append(new_feature)\n for i, new_features in enumerate(grid_new_features):\n for feature in sorted(new_features, key=lambda x: x.response,\n reverse=True)[:self.config.grid_min_feature_num]:\n self.curr_features[i].append(feature)\n self.curr_features[i][-1].id = self.next_feature_id\n self.curr_features[i][-1].lifetime = 1\n self.next_feature_id += 1\n\n def track_features(self):\n \"\"\"\n Tracker features on the newly received stereo images.\n \"\"\"\n img = self.cam0_curr_img_msg.image\n grid_height, grid_width = self.get_grid_size(img)\n cam0_R_p_c, cam1_R_p_c = self.integrate_imu_data()\n prev_ids = []\n prev_lifetime = []\n prev_cam0_points = []\n prev_cam1_points = []\n for feature in chain.from_iterable(self.prev_features):\n prev_ids.append(feature.id)\n prev_lifetime.append(feature.lifetime)\n prev_cam0_points.append(feature.cam0_point)\n prev_cam1_points.append(feature.cam1_point)\n prev_cam0_points = np.array(prev_cam0_points, dtype=np.float32)\n self.num_features['before_tracking'] = len(prev_cam0_points)\n if len(prev_cam0_points) == 0:\n return\n curr_cam0_points = self.predict_feature_tracking(prev_cam0_points,\n cam0_R_p_c, self.cam0_intrinsics)\n curr_cam0_points, track_inliers, _ = cv2.calcOpticalFlowPyrLK(self.\n prev_cam0_pyramid, self.curr_cam0_pyramid, prev_cam0_points.\n astype(np.float32), curr_cam0_points.astype(np.float32), **self\n .config.lk_params)\n for i, point in enumerate(curr_cam0_points):\n if not track_inliers[i]:\n continue\n if point[0] < 0 or point[0] > img.shape[1] - 1 or point[1\n ] < 0 or point[1] > img.shape[0] - 1:\n track_inliers[i] = 0\n prev_tracked_ids = select(prev_ids, track_inliers)\n prev_tracked_lifetime = select(prev_lifetime, track_inliers)\n prev_tracked_cam0_points = select(prev_cam0_points, track_inliers)\n prev_tracked_cam1_points = select(prev_cam1_points, track_inliers)\n curr_tracked_cam0_points = select(curr_cam0_points, track_inliers)\n self.num_features['after_tracking'] = len(curr_tracked_cam0_points)\n curr_cam1_points, match_inliers = self.stereo_match(\n curr_tracked_cam0_points)\n prev_matched_ids = select(prev_tracked_ids, match_inliers)\n prev_matched_lifetime = select(prev_tracked_lifetime, match_inliers)\n prev_matched_cam0_points = select(prev_tracked_cam0_points,\n match_inliers)\n prev_matched_cam1_points = select(prev_tracked_cam1_points,\n match_inliers)\n curr_matched_cam0_points = select(curr_tracked_cam0_points,\n match_inliers)\n curr_matched_cam1_points = select(curr_cam1_points, match_inliers)\n self.num_features['after_matching'] = len(curr_matched_cam0_points)\n cam0_ransac_inliers = [1] * len(prev_matched_cam0_points)\n cam1_ransac_inliers = [1] * len(prev_matched_cam1_points)\n after_ransac = 0\n for i in range(len(cam0_ransac_inliers)):\n if not (cam0_ransac_inliers[i] and cam1_ransac_inliers[i]):\n continue\n row = int(curr_matched_cam0_points[i][1] / grid_height)\n col = int(curr_matched_cam0_points[i][0] / grid_width)\n code = row * self.config.grid_col + col\n grid_new_feature = FeatureMetaData()\n grid_new_feature.id = prev_matched_ids[i]\n grid_new_feature.lifetime = prev_matched_lifetime[i] + 1\n grid_new_feature.cam0_point = curr_matched_cam0_points[i]\n grid_new_feature.cam1_point = curr_matched_cam1_points[i]\n prev_matched_lifetime[i] += 1\n self.curr_features[code].append(grid_new_feature)\n after_ransac += 1\n self.num_features['after_ransac'] = after_ransac\n\n def add_new_features(self):\n \"\"\"\n Detect new features on the image to ensure that the features are \n uniformly distributed on the image.\n \"\"\"\n curr_img = self.cam0_curr_img_msg.image\n grid_height, grid_width = self.get_grid_size(curr_img)\n mask = np.ones(curr_img.shape[:2], dtype='uint8')\n for feature in chain.from_iterable(self.curr_features):\n x, y = map(int, feature.cam0_point)\n mask[y - 3:y + 4, x - 3:x + 4] = 0\n new_features = self.detector.detect(curr_img, mask=mask)\n new_feature_sieve = [[] for _ in range(self.config.grid_num)]\n for feature in new_features:\n row = int(feature.pt[1] / grid_height)\n col = int(feature.pt[0] / grid_width)\n code = row * self.config.grid_col + col\n new_feature_sieve[code].append(feature)\n new_features = []\n for features in new_feature_sieve:\n if len(features) > self.config.grid_max_feature_num:\n features = sorted(features, key=lambda x: x.response,\n reverse=True)[:self.config.grid_max_feature_num]\n new_features.append(features)\n new_features = list(chain.from_iterable(new_features))\n cam0_points = [kp.pt for kp in new_features]\n cam1_points, inlier_markers = self.stereo_match(cam0_points)\n cam0_inliers, cam1_inliers, response_inliers = [], [], []\n for i, inlier in enumerate(inlier_markers):\n if not inlier:\n continue\n cam0_inliers.append(cam0_points[i])\n cam1_inliers.append(cam1_points[i])\n response_inliers.append(new_features[i].response)\n grid_new_features = [[] for _ in range(self.config.grid_num)]\n for i in range(len(cam0_inliers)):\n cam0_point = cam0_inliers[i]\n cam1_point = cam1_inliers[i]\n response = response_inliers[i]\n row = int(cam0_point[1] / grid_height)\n col = int(cam0_point[0] / grid_width)\n code = row * self.config.grid_col + col\n new_feature = FeatureMetaData()\n new_feature.response = response\n new_feature.cam0_point = cam0_point\n new_feature.cam1_point = cam1_point\n grid_new_features[code].append(new_feature)\n for i, new_features in enumerate(grid_new_features):\n for feature in sorted(new_features, key=lambda x: x.response,\n reverse=True)[:self.config.grid_min_feature_num]:\n self.curr_features[i].append(feature)\n self.curr_features[i][-1].id = self.next_feature_id\n self.curr_features[i][-1].lifetime = 1\n self.next_feature_id += 1\n\n def prune_features(self):\n \"\"\"\n Remove some of the features of a grid in case there are too many \n features inside of that grid, which ensures the number of features \n within each grid is bounded.\n \"\"\"\n for i, features in enumerate(self.curr_features):\n if len(features) <= self.config.grid_max_feature_num:\n continue\n self.curr_features[i] = sorted(features, key=lambda x: x.\n lifetime, reverse=True)[:self.config.grid_max_feature_num]\n\n def load_features(self):\n filename = self.config.result_dir + str(self.image_id) + '.npz'\n self.curr_features = np.load(filename, allow_pickle=True)['arr_0']\n self.image_id += 1\n\n def save_features(self):\n filename = self.config.result_dir + str(self.image_id) + '.npz'\n np.savez(filename, self.curr_features)\n self.image_id += 1\n\n def publish(self):\n \"\"\"\n Publish the features on the current image including both the \n tracked and newly detected ones.\n \"\"\"\n curr_ids = []\n curr_cam0_points = []\n curr_cam1_points = []\n for feature in chain.from_iterable(self.curr_features):\n curr_ids.append(feature.id)\n curr_cam0_points.append(feature.cam0_point)\n curr_cam1_points.append(feature.cam1_point)\n curr_cam0_points_undistorted = self.undistort_points(curr_cam0_points,\n self.cam0_intrinsics, self.cam0_distortion_model, self.\n cam0_distortion_coeffs)\n curr_cam1_points_undistorted = self.undistort_points(curr_cam1_points,\n self.cam1_intrinsics, self.cam1_distortion_model, self.\n cam1_distortion_coeffs)\n features = []\n for i in range(len(curr_ids)):\n fm = FeatureMeasurement()\n fm.id = curr_ids[i]\n fm.u0 = curr_cam0_points_undistorted[i][0]\n fm.v0 = curr_cam0_points_undistorted[i][1]\n fm.u1 = curr_cam1_points_undistorted[i][0]\n fm.v1 = curr_cam1_points_undistorted[i][1]\n features.append(fm)\n feature_msg = namedtuple('feature_msg', ['timestamp', 'features'])(self\n .cam0_curr_img_msg.timestamp, features)\n return feature_msg\n\n def integrate_imu_data(self):\n \"\"\"\n Integrates the IMU gyro readings between the two consecutive images, \n which is used for both tracking prediction and 2-point RANSAC.\n\n Returns:\n cam0_R_p_c: a rotation matrix which takes a vector from previous \n cam0 frame to current cam0 frame.\n cam1_R_p_c: a rotation matrix which takes a vector from previous \n cam1 frame to current cam1 frame.\n \"\"\"\n idx_begin = None\n for i, msg in enumerate(self.imu_msg_buffer):\n if msg.timestamp >= self.cam0_prev_img_msg.timestamp - 0.01:\n idx_begin = i\n break\n idx_end = None\n for i, msg in enumerate(self.imu_msg_buffer):\n if msg.timestamp >= self.cam0_curr_img_msg.timestamp - 0.004:\n idx_end = i\n break\n if idx_begin is None or idx_end is None:\n return np.identity(3), np.identity(3)\n mean_ang_vel = np.zeros(3)\n for i in range(idx_begin, idx_end):\n mean_ang_vel += self.imu_msg_buffer[i].angular_velocity\n if idx_end > idx_begin:\n mean_ang_vel /= idx_end - idx_begin\n cam0_mean_ang_vel = self.R_cam0_imu.T @ mean_ang_vel\n cam1_mean_ang_vel = self.R_cam1_imu.T @ mean_ang_vel\n dt = (self.cam0_curr_img_msg.timestamp - self.cam0_prev_img_msg.\n timestamp)\n cam0_R_p_c = cv2.Rodrigues(cam0_mean_ang_vel * dt)[0].T\n cam1_R_p_c = cv2.Rodrigues(cam1_mean_ang_vel * dt)[0].T\n self.imu_msg_buffer = self.imu_msg_buffer[idx_end:]\n return cam0_R_p_c, cam1_R_p_c\n\n def rescale_points(self, pts1, pts2):\n \"\"\"\n Arguments:\n pts1: first set of points.\n pts2: second set of points.\n\n Returns:\n pts1: scaled first set of points.\n pts2: scaled second set of points.\n scaling_factor: scaling factor\n \"\"\"\n scaling_factor = 0\n for pt1, pt2 in zip(pts1, pts2):\n scaling_factor += np.linalg.norm(pt1)\n scaling_factor += np.linalg.norm(pt2)\n scaling_factor = (len(pts1) + len(pts2)) / scaling_factor * np.sqrt(2)\n for i in range(len(pts1)):\n pts1[i] *= scaling_factor\n pts2[i] *= scaling_factor\n return pts1, pts2, scaling_factor\n\n def get_grid_size(self, img):\n \"\"\"\n # Size of each grid.\n \"\"\"\n grid_height = int(np.ceil(img.shape[0] / self.config.grid_row))\n grid_width = int(np.ceil(img.shape[1] / self.config.grid_col))\n return grid_height, grid_width\n\n def predict_feature_tracking(self, input_pts, R_p_c, intrinsics):\n \"\"\"\n predictFeatureTracking Compensates the rotation between consecutive \n camera frames so that feature tracking would be more robust and fast.\n\n Arguments:\n input_pts: features in the previous image to be tracked.\n R_p_c: a rotation matrix takes a vector in the previous camera \n frame to the current camera frame. (matrix33)\n intrinsics: intrinsic matrix of the camera. (vec3)\n\n Returns:\n compensated_pts: predicted locations of the features in the \n current image based on the provided rotation.\n \"\"\"\n if len(input_pts) == 0:\n return []\n K = np.array([[intrinsics[0], 0.0, intrinsics[2]], [0.0, intrinsics\n [1], intrinsics[3]], [0.0, 0.0, 1.0]])\n H = K @ R_p_c @ np.linalg.inv(K)\n compensated_pts = []\n for i in range(len(input_pts)):\n p1 = np.array([*input_pts[i], 1.0])\n p2 = H @ p1\n compensated_pts.append(p2[:2] / p2[2])\n return np.array(compensated_pts, dtype=np.float32)\n\n def stereo_match(self, cam0_points):\n \"\"\"\n Matches features with stereo image pairs.\n\n Arguments:\n cam0_points: points in the primary image.\n\n Returns:\n cam1_points: points in the secondary image.\n inlier_markers: 1 if the match is valid, 0 otherwise.\n \"\"\"\n cam0_points = np.array(cam0_points)\n if len(cam0_points) == 0:\n return []\n R_cam0_cam1 = self.R_cam1_imu.T @ self.R_cam0_imu\n cam0_points_undistorted = self.undistort_points(cam0_points, self.\n cam0_intrinsics, self.cam0_distortion_model, self.\n cam0_distortion_coeffs, R_cam0_cam1)\n cam1_points = self.distort_points(cam0_points_undistorted, self.\n cam1_intrinsics, self.cam1_distortion_model, self.\n cam1_distortion_coeffs)\n cam1_points_copy = cam1_points.copy()\n cam0_points = cam0_points.astype(np.float32)\n cam1_points = cam1_points.astype(np.float32)\n cam1_points, inlier_markers, _ = cv2.calcOpticalFlowPyrLK(self.\n curr_cam0_pyramid, self.curr_cam1_pyramid, cam0_points,\n cam1_points, **self.config.lk_params)\n cam0_points_, _, _ = cv2.calcOpticalFlowPyrLK(self.\n curr_cam1_pyramid, self.curr_cam0_pyramid, cam1_points,\n cam0_points.copy(), **self.config.lk_params)\n err = np.linalg.norm(cam0_points - cam0_points_, axis=1)\n disparity = np.abs(cam1_points_copy[:, 1] - cam1_points[:, 1])\n inlier_markers = np.logical_and.reduce([inlier_markers.reshape(-1),\n err < 3, disparity < 20])\n img = self.cam1_curr_img_msg.image\n for i, point in enumerate(cam1_points):\n if not inlier_markers[i]:\n continue\n if point[0] < 0 or point[0] > img.shape[1] - 1 or point[1\n ] < 0 or point[1] > img.shape[0] - 1:\n inlier_markers[i] = 0\n t_cam0_cam1 = self.R_cam1_imu.T @ (self.t_cam0_imu - self.t_cam1_imu)\n E = skew(t_cam0_cam1) @ R_cam0_cam1\n cam0_points_undistorted = self.undistort_points(cam0_points, self.\n cam0_intrinsics, self.cam0_distortion_model, self.\n cam0_distortion_coeffs)\n cam1_points_undistorted = self.undistort_points(cam1_points, self.\n cam1_intrinsics, self.cam1_distortion_model, self.\n cam1_distortion_coeffs)\n norm_pixel_unit = 4.0 / (self.cam0_intrinsics[0] + self.\n cam0_intrinsics[1] + self.cam1_intrinsics[0] + self.\n cam1_intrinsics[1])\n for i in range(len(cam0_points_undistorted)):\n if not inlier_markers[i]:\n continue\n pt0 = np.array([*cam0_points_undistorted[i], 1.0])\n pt1 = np.array([*cam1_points_undistorted[i], 1.0])\n epipolar_line = E @ pt0\n error = np.abs((pt1 * epipolar_line)[0]) / np.linalg.norm(\n epipolar_line[:2])\n if error > self.config.stereo_threshold * norm_pixel_unit:\n inlier_markers[i] = 0\n return cam1_points, inlier_markers\n\n def undistort_points(self, pts_in, intrinsics, distortion_model,\n distortion_coeffs, rectification_matrix=np.identity(3),\n new_intrinsics=np.array([1, 1, 0, 0])):\n \"\"\"\n Arguments:\n pts_in: points to be undistorted.\n intrinsics: intrinsics of the camera.\n distortion_model: distortion model of the camera.\n distortion_coeffs: distortion coefficients.\n rectification_matrix:\n new_intrinsics:\n\n Returns:\n pts_out: undistorted points.\n \"\"\"\n if len(pts_in) == 0:\n return []\n pts_in = np.reshape(pts_in, (-1, 1, 2))\n K = np.array([[intrinsics[0], 0.0, intrinsics[2]], [0.0, intrinsics\n [1], intrinsics[3]], [0.0, 0.0, 1.0]])\n K_new = np.array([[new_intrinsics[0], 0.0, new_intrinsics[2]], [0.0,\n new_intrinsics[1], new_intrinsics[3]], [0.0, 0.0, 1.0]])\n if distortion_model == 'equidistant':\n pts_out = cv2.fisheye.undistortPoints(pts_in, K,\n distortion_coeffs, rectification_matrix, K_new)\n else:\n pts_out = cv2.undistortPoints(pts_in, K, distortion_coeffs,\n None, rectification_matrix, K_new)\n return pts_out.reshape((-1, 2))\n\n def distort_points(self, pts_in, intrinsics, distortion_model,\n distortion_coeffs):\n \"\"\"\n Arguments:\n pts_in: points to be distorted.\n intrinsics: intrinsics of the camera.\n distortion_model: distortion model of the camera.\n distortion_coeffs: distortion coefficients.\n\n Returns:\n pts_out: distorted points. (N, 2)\n \"\"\"\n if len(pts_in) == 0:\n return []\n K = np.array([[intrinsics[0], 0.0, intrinsics[2]], [0.0, intrinsics\n [1], intrinsics[3]], [0.0, 0.0, 1.0]])\n if distortion_model == 'equidistant':\n pts_out = cv2.fisheye.distortPoints(pts_in, K, distortion_coeffs)\n else:\n homogenous_pts = cv2.convertPointsToHomogeneous(pts_in)\n pts_out, _ = cv2.projectPoints(homogenous_pts, np.zeros(3), np.\n zeros(3), K, distortion_coeffs)\n return pts_out.reshape((-1, 2))\n\n def draw_features_stereo(self):\n img0 = self.cam0_curr_img_msg.image\n img1 = self.cam1_curr_img_msg.image\n kps0 = []\n kps1 = []\n matches = []\n for feature in chain.from_iterable(self.curr_features):\n matches.append(cv2.DMatch(len(kps0), len(kps0), 0))\n kps0.append(cv2.KeyPoint(*feature.cam0_point, 1))\n kps1.append(cv2.KeyPoint(*feature.cam1_point, 1))\n img = cv2.drawMatches(img0, kps0, img1, kps1, matches, None, flags=2)\n cv2.imshow('stereo features', img)\n cv2.waitKey(1)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass FeatureMeasurement(object):\n <mask token>\n\n def __init__(self):\n self.id = None\n self.u0 = None\n self.v0 = None\n self.u1 = None\n self.v1 = None\n\n\nclass ImageProcessor(object):\n \"\"\"\n Detect and track features in image sequences.\n \"\"\"\n\n def __init__(self, config):\n self.config = config\n self.is_first_img = True\n self.next_feature_id = 0\n self.detector = cv2.FastFeatureDetector_create(self.config.\n fast_threshold)\n self.imu_msg_buffer = []\n self.cam0_prev_img_msg = None\n self.cam0_curr_img_msg = None\n self.cam1_curr_img_msg = None\n self.prev_cam0_pyramid = None\n self.curr_cam0_pyramid = None\n self.curr_cam1_pyramid = None\n self.prev_features = [[] for _ in range(self.config.grid_num)]\n self.curr_features = [[] for _ in range(self.config.grid_num)]\n self.num_features = defaultdict(int)\n self.cam0_resolution = config.cam0_resolution\n self.cam0_intrinsics = config.cam0_intrinsics\n self.cam0_distortion_model = config.cam0_distortion_model\n self.cam0_distortion_coeffs = config.cam0_distortion_coeffs\n self.cam1_resolution = config.cam1_resolution\n self.cam1_intrinsics = config.cam1_intrinsics\n self.cam1_distortion_model = config.cam1_distortion_model\n self.cam1_distortion_coeffs = config.cam1_distortion_coeffs\n self.T_cam0_imu = np.linalg.inv(config.T_imu_cam0)\n self.R_cam0_imu = self.T_cam0_imu[:3, :3]\n self.t_cam0_imu = self.T_cam0_imu[:3, 3]\n self.T_cam1_imu = np.linalg.inv(config.T_imu_cam1)\n self.R_cam1_imu = self.T_cam1_imu[:3, :3]\n self.t_cam1_imu = self.T_cam1_imu[:3, 3]\n self.image_id = 0\n\n def stereo_callback(self, stereo_msg):\n \"\"\"\n Callback function for the stereo images.\n \"\"\"\n start = time.time()\n self.cam0_curr_img_msg = stereo_msg.cam0_msg\n self.cam1_curr_img_msg = stereo_msg.cam1_msg\n self.create_image_pyramids()\n if self.is_first_img:\n if not self.config.load_features_flag:\n self.initialize_first_frame()\n self.is_first_img = False\n elif not self.config.load_features_flag:\n t = time.time()\n self.track_features()\n print('___track_features:', time.time() - t)\n t = time.time()\n self.add_new_features()\n print('___add_new_features:', time.time() - t)\n t = time.time()\n self.prune_features()\n print('___prune_features:', time.time() - t)\n t = time.time()\n print('___draw_features_stereo:', time.time() - t)\n t = time.time()\n print('===image process elapsed:', time.time() - start,\n f'({stereo_msg.timestamp})')\n if not self.config.load_features_flag:\n try:\n self.save_features()\n return self.publish()\n finally:\n self.cam0_prev_img_msg = self.cam0_curr_img_msg\n self.prev_features = self.curr_features\n self.prev_cam0_pyramid = self.curr_cam0_pyramid\n self.curr_features = [[] for _ in range(self.config.grid_num)]\n else:\n self.load_features()\n return self.publish()\n\n def imu_callback(self, msg):\n \"\"\"\n Callback function for the imu message.\n \"\"\"\n self.imu_msg_buffer.append(msg)\n\n def create_image_pyramids(self):\n \"\"\"\n Create image pyramids used for KLT tracking.\n (Seems doesn't work in python)\n \"\"\"\n curr_cam0_img = self.cam0_curr_img_msg.image\n self.curr_cam0_pyramid = curr_cam0_img\n curr_cam1_img = self.cam1_curr_img_msg.image\n self.curr_cam1_pyramid = curr_cam1_img\n\n def initialize_first_frame(self):\n \"\"\"\n Initialize the image processing sequence, which is basically detect \n new features on the first set of stereo images.\n \"\"\"\n img = self.cam0_curr_img_msg.image\n grid_height, grid_width = self.get_grid_size(img)\n new_features = self.detector.detect(img)\n cam0_points = [kp.pt for kp in new_features]\n cam1_points, inlier_markers = self.stereo_match(cam0_points)\n cam0_inliers, cam1_inliers = [], []\n response_inliers = []\n for i, inlier in enumerate(inlier_markers):\n if not inlier:\n continue\n cam0_inliers.append(cam0_points[i])\n cam1_inliers.append(cam1_points[i])\n response_inliers.append(new_features[i].response)\n grid_new_features = [[] for _ in range(self.config.grid_num)]\n for i in range(len(cam0_inliers)):\n cam0_point = cam0_inliers[i]\n cam1_point = cam1_inliers[i]\n response = response_inliers[i]\n row = int(cam0_point[1] / grid_height)\n col = int(cam0_point[0] / grid_width)\n code = row * self.config.grid_col + col\n new_feature = FeatureMetaData()\n new_feature.response = response\n new_feature.cam0_point = cam0_point\n new_feature.cam1_point = cam1_point\n grid_new_features[code].append(new_feature)\n for i, new_features in enumerate(grid_new_features):\n for feature in sorted(new_features, key=lambda x: x.response,\n reverse=True)[:self.config.grid_min_feature_num]:\n self.curr_features[i].append(feature)\n self.curr_features[i][-1].id = self.next_feature_id\n self.curr_features[i][-1].lifetime = 1\n self.next_feature_id += 1\n\n def track_features(self):\n \"\"\"\n Tracker features on the newly received stereo images.\n \"\"\"\n img = self.cam0_curr_img_msg.image\n grid_height, grid_width = self.get_grid_size(img)\n cam0_R_p_c, cam1_R_p_c = self.integrate_imu_data()\n prev_ids = []\n prev_lifetime = []\n prev_cam0_points = []\n prev_cam1_points = []\n for feature in chain.from_iterable(self.prev_features):\n prev_ids.append(feature.id)\n prev_lifetime.append(feature.lifetime)\n prev_cam0_points.append(feature.cam0_point)\n prev_cam1_points.append(feature.cam1_point)\n prev_cam0_points = np.array(prev_cam0_points, dtype=np.float32)\n self.num_features['before_tracking'] = len(prev_cam0_points)\n if len(prev_cam0_points) == 0:\n return\n curr_cam0_points = self.predict_feature_tracking(prev_cam0_points,\n cam0_R_p_c, self.cam0_intrinsics)\n curr_cam0_points, track_inliers, _ = cv2.calcOpticalFlowPyrLK(self.\n prev_cam0_pyramid, self.curr_cam0_pyramid, prev_cam0_points.\n astype(np.float32), curr_cam0_points.astype(np.float32), **self\n .config.lk_params)\n for i, point in enumerate(curr_cam0_points):\n if not track_inliers[i]:\n continue\n if point[0] < 0 or point[0] > img.shape[1] - 1 or point[1\n ] < 0 or point[1] > img.shape[0] - 1:\n track_inliers[i] = 0\n prev_tracked_ids = select(prev_ids, track_inliers)\n prev_tracked_lifetime = select(prev_lifetime, track_inliers)\n prev_tracked_cam0_points = select(prev_cam0_points, track_inliers)\n prev_tracked_cam1_points = select(prev_cam1_points, track_inliers)\n curr_tracked_cam0_points = select(curr_cam0_points, track_inliers)\n self.num_features['after_tracking'] = len(curr_tracked_cam0_points)\n curr_cam1_points, match_inliers = self.stereo_match(\n curr_tracked_cam0_points)\n prev_matched_ids = select(prev_tracked_ids, match_inliers)\n prev_matched_lifetime = select(prev_tracked_lifetime, match_inliers)\n prev_matched_cam0_points = select(prev_tracked_cam0_points,\n match_inliers)\n prev_matched_cam1_points = select(prev_tracked_cam1_points,\n match_inliers)\n curr_matched_cam0_points = select(curr_tracked_cam0_points,\n match_inliers)\n curr_matched_cam1_points = select(curr_cam1_points, match_inliers)\n self.num_features['after_matching'] = len(curr_matched_cam0_points)\n cam0_ransac_inliers = [1] * len(prev_matched_cam0_points)\n cam1_ransac_inliers = [1] * len(prev_matched_cam1_points)\n after_ransac = 0\n for i in range(len(cam0_ransac_inliers)):\n if not (cam0_ransac_inliers[i] and cam1_ransac_inliers[i]):\n continue\n row = int(curr_matched_cam0_points[i][1] / grid_height)\n col = int(curr_matched_cam0_points[i][0] / grid_width)\n code = row * self.config.grid_col + col\n grid_new_feature = FeatureMetaData()\n grid_new_feature.id = prev_matched_ids[i]\n grid_new_feature.lifetime = prev_matched_lifetime[i] + 1\n grid_new_feature.cam0_point = curr_matched_cam0_points[i]\n grid_new_feature.cam1_point = curr_matched_cam1_points[i]\n prev_matched_lifetime[i] += 1\n self.curr_features[code].append(grid_new_feature)\n after_ransac += 1\n self.num_features['after_ransac'] = after_ransac\n\n def add_new_features(self):\n \"\"\"\n Detect new features on the image to ensure that the features are \n uniformly distributed on the image.\n \"\"\"\n curr_img = self.cam0_curr_img_msg.image\n grid_height, grid_width = self.get_grid_size(curr_img)\n mask = np.ones(curr_img.shape[:2], dtype='uint8')\n for feature in chain.from_iterable(self.curr_features):\n x, y = map(int, feature.cam0_point)\n mask[y - 3:y + 4, x - 3:x + 4] = 0\n new_features = self.detector.detect(curr_img, mask=mask)\n new_feature_sieve = [[] for _ in range(self.config.grid_num)]\n for feature in new_features:\n row = int(feature.pt[1] / grid_height)\n col = int(feature.pt[0] / grid_width)\n code = row * self.config.grid_col + col\n new_feature_sieve[code].append(feature)\n new_features = []\n for features in new_feature_sieve:\n if len(features) > self.config.grid_max_feature_num:\n features = sorted(features, key=lambda x: x.response,\n reverse=True)[:self.config.grid_max_feature_num]\n new_features.append(features)\n new_features = list(chain.from_iterable(new_features))\n cam0_points = [kp.pt for kp in new_features]\n cam1_points, inlier_markers = self.stereo_match(cam0_points)\n cam0_inliers, cam1_inliers, response_inliers = [], [], []\n for i, inlier in enumerate(inlier_markers):\n if not inlier:\n continue\n cam0_inliers.append(cam0_points[i])\n cam1_inliers.append(cam1_points[i])\n response_inliers.append(new_features[i].response)\n grid_new_features = [[] for _ in range(self.config.grid_num)]\n for i in range(len(cam0_inliers)):\n cam0_point = cam0_inliers[i]\n cam1_point = cam1_inliers[i]\n response = response_inliers[i]\n row = int(cam0_point[1] / grid_height)\n col = int(cam0_point[0] / grid_width)\n code = row * self.config.grid_col + col\n new_feature = FeatureMetaData()\n new_feature.response = response\n new_feature.cam0_point = cam0_point\n new_feature.cam1_point = cam1_point\n grid_new_features[code].append(new_feature)\n for i, new_features in enumerate(grid_new_features):\n for feature in sorted(new_features, key=lambda x: x.response,\n reverse=True)[:self.config.grid_min_feature_num]:\n self.curr_features[i].append(feature)\n self.curr_features[i][-1].id = self.next_feature_id\n self.curr_features[i][-1].lifetime = 1\n self.next_feature_id += 1\n\n def prune_features(self):\n \"\"\"\n Remove some of the features of a grid in case there are too many \n features inside of that grid, which ensures the number of features \n within each grid is bounded.\n \"\"\"\n for i, features in enumerate(self.curr_features):\n if len(features) <= self.config.grid_max_feature_num:\n continue\n self.curr_features[i] = sorted(features, key=lambda x: x.\n lifetime, reverse=True)[:self.config.grid_max_feature_num]\n\n def load_features(self):\n filename = self.config.result_dir + str(self.image_id) + '.npz'\n self.curr_features = np.load(filename, allow_pickle=True)['arr_0']\n self.image_id += 1\n\n def save_features(self):\n filename = self.config.result_dir + str(self.image_id) + '.npz'\n np.savez(filename, self.curr_features)\n self.image_id += 1\n\n def publish(self):\n \"\"\"\n Publish the features on the current image including both the \n tracked and newly detected ones.\n \"\"\"\n curr_ids = []\n curr_cam0_points = []\n curr_cam1_points = []\n for feature in chain.from_iterable(self.curr_features):\n curr_ids.append(feature.id)\n curr_cam0_points.append(feature.cam0_point)\n curr_cam1_points.append(feature.cam1_point)\n curr_cam0_points_undistorted = self.undistort_points(curr_cam0_points,\n self.cam0_intrinsics, self.cam0_distortion_model, self.\n cam0_distortion_coeffs)\n curr_cam1_points_undistorted = self.undistort_points(curr_cam1_points,\n self.cam1_intrinsics, self.cam1_distortion_model, self.\n cam1_distortion_coeffs)\n features = []\n for i in range(len(curr_ids)):\n fm = FeatureMeasurement()\n fm.id = curr_ids[i]\n fm.u0 = curr_cam0_points_undistorted[i][0]\n fm.v0 = curr_cam0_points_undistorted[i][1]\n fm.u1 = curr_cam1_points_undistorted[i][0]\n fm.v1 = curr_cam1_points_undistorted[i][1]\n features.append(fm)\n feature_msg = namedtuple('feature_msg', ['timestamp', 'features'])(self\n .cam0_curr_img_msg.timestamp, features)\n return feature_msg\n\n def integrate_imu_data(self):\n \"\"\"\n Integrates the IMU gyro readings between the two consecutive images, \n which is used for both tracking prediction and 2-point RANSAC.\n\n Returns:\n cam0_R_p_c: a rotation matrix which takes a vector from previous \n cam0 frame to current cam0 frame.\n cam1_R_p_c: a rotation matrix which takes a vector from previous \n cam1 frame to current cam1 frame.\n \"\"\"\n idx_begin = None\n for i, msg in enumerate(self.imu_msg_buffer):\n if msg.timestamp >= self.cam0_prev_img_msg.timestamp - 0.01:\n idx_begin = i\n break\n idx_end = None\n for i, msg in enumerate(self.imu_msg_buffer):\n if msg.timestamp >= self.cam0_curr_img_msg.timestamp - 0.004:\n idx_end = i\n break\n if idx_begin is None or idx_end is None:\n return np.identity(3), np.identity(3)\n mean_ang_vel = np.zeros(3)\n for i in range(idx_begin, idx_end):\n mean_ang_vel += self.imu_msg_buffer[i].angular_velocity\n if idx_end > idx_begin:\n mean_ang_vel /= idx_end - idx_begin\n cam0_mean_ang_vel = self.R_cam0_imu.T @ mean_ang_vel\n cam1_mean_ang_vel = self.R_cam1_imu.T @ mean_ang_vel\n dt = (self.cam0_curr_img_msg.timestamp - self.cam0_prev_img_msg.\n timestamp)\n cam0_R_p_c = cv2.Rodrigues(cam0_mean_ang_vel * dt)[0].T\n cam1_R_p_c = cv2.Rodrigues(cam1_mean_ang_vel * dt)[0].T\n self.imu_msg_buffer = self.imu_msg_buffer[idx_end:]\n return cam0_R_p_c, cam1_R_p_c\n\n def rescale_points(self, pts1, pts2):\n \"\"\"\n Arguments:\n pts1: first set of points.\n pts2: second set of points.\n\n Returns:\n pts1: scaled first set of points.\n pts2: scaled second set of points.\n scaling_factor: scaling factor\n \"\"\"\n scaling_factor = 0\n for pt1, pt2 in zip(pts1, pts2):\n scaling_factor += np.linalg.norm(pt1)\n scaling_factor += np.linalg.norm(pt2)\n scaling_factor = (len(pts1) + len(pts2)) / scaling_factor * np.sqrt(2)\n for i in range(len(pts1)):\n pts1[i] *= scaling_factor\n pts2[i] *= scaling_factor\n return pts1, pts2, scaling_factor\n\n def get_grid_size(self, img):\n \"\"\"\n # Size of each grid.\n \"\"\"\n grid_height = int(np.ceil(img.shape[0] / self.config.grid_row))\n grid_width = int(np.ceil(img.shape[1] / self.config.grid_col))\n return grid_height, grid_width\n\n def predict_feature_tracking(self, input_pts, R_p_c, intrinsics):\n \"\"\"\n predictFeatureTracking Compensates the rotation between consecutive \n camera frames so that feature tracking would be more robust and fast.\n\n Arguments:\n input_pts: features in the previous image to be tracked.\n R_p_c: a rotation matrix takes a vector in the previous camera \n frame to the current camera frame. (matrix33)\n intrinsics: intrinsic matrix of the camera. (vec3)\n\n Returns:\n compensated_pts: predicted locations of the features in the \n current image based on the provided rotation.\n \"\"\"\n if len(input_pts) == 0:\n return []\n K = np.array([[intrinsics[0], 0.0, intrinsics[2]], [0.0, intrinsics\n [1], intrinsics[3]], [0.0, 0.0, 1.0]])\n H = K @ R_p_c @ np.linalg.inv(K)\n compensated_pts = []\n for i in range(len(input_pts)):\n p1 = np.array([*input_pts[i], 1.0])\n p2 = H @ p1\n compensated_pts.append(p2[:2] / p2[2])\n return np.array(compensated_pts, dtype=np.float32)\n\n def stereo_match(self, cam0_points):\n \"\"\"\n Matches features with stereo image pairs.\n\n Arguments:\n cam0_points: points in the primary image.\n\n Returns:\n cam1_points: points in the secondary image.\n inlier_markers: 1 if the match is valid, 0 otherwise.\n \"\"\"\n cam0_points = np.array(cam0_points)\n if len(cam0_points) == 0:\n return []\n R_cam0_cam1 = self.R_cam1_imu.T @ self.R_cam0_imu\n cam0_points_undistorted = self.undistort_points(cam0_points, self.\n cam0_intrinsics, self.cam0_distortion_model, self.\n cam0_distortion_coeffs, R_cam0_cam1)\n cam1_points = self.distort_points(cam0_points_undistorted, self.\n cam1_intrinsics, self.cam1_distortion_model, self.\n cam1_distortion_coeffs)\n cam1_points_copy = cam1_points.copy()\n cam0_points = cam0_points.astype(np.float32)\n cam1_points = cam1_points.astype(np.float32)\n cam1_points, inlier_markers, _ = cv2.calcOpticalFlowPyrLK(self.\n curr_cam0_pyramid, self.curr_cam1_pyramid, cam0_points,\n cam1_points, **self.config.lk_params)\n cam0_points_, _, _ = cv2.calcOpticalFlowPyrLK(self.\n curr_cam1_pyramid, self.curr_cam0_pyramid, cam1_points,\n cam0_points.copy(), **self.config.lk_params)\n err = np.linalg.norm(cam0_points - cam0_points_, axis=1)\n disparity = np.abs(cam1_points_copy[:, 1] - cam1_points[:, 1])\n inlier_markers = np.logical_and.reduce([inlier_markers.reshape(-1),\n err < 3, disparity < 20])\n img = self.cam1_curr_img_msg.image\n for i, point in enumerate(cam1_points):\n if not inlier_markers[i]:\n continue\n if point[0] < 0 or point[0] > img.shape[1] - 1 or point[1\n ] < 0 or point[1] > img.shape[0] - 1:\n inlier_markers[i] = 0\n t_cam0_cam1 = self.R_cam1_imu.T @ (self.t_cam0_imu - self.t_cam1_imu)\n E = skew(t_cam0_cam1) @ R_cam0_cam1\n cam0_points_undistorted = self.undistort_points(cam0_points, self.\n cam0_intrinsics, self.cam0_distortion_model, self.\n cam0_distortion_coeffs)\n cam1_points_undistorted = self.undistort_points(cam1_points, self.\n cam1_intrinsics, self.cam1_distortion_model, self.\n cam1_distortion_coeffs)\n norm_pixel_unit = 4.0 / (self.cam0_intrinsics[0] + self.\n cam0_intrinsics[1] + self.cam1_intrinsics[0] + self.\n cam1_intrinsics[1])\n for i in range(len(cam0_points_undistorted)):\n if not inlier_markers[i]:\n continue\n pt0 = np.array([*cam0_points_undistorted[i], 1.0])\n pt1 = np.array([*cam1_points_undistorted[i], 1.0])\n epipolar_line = E @ pt0\n error = np.abs((pt1 * epipolar_line)[0]) / np.linalg.norm(\n epipolar_line[:2])\n if error > self.config.stereo_threshold * norm_pixel_unit:\n inlier_markers[i] = 0\n return cam1_points, inlier_markers\n\n def undistort_points(self, pts_in, intrinsics, distortion_model,\n distortion_coeffs, rectification_matrix=np.identity(3),\n new_intrinsics=np.array([1, 1, 0, 0])):\n \"\"\"\n Arguments:\n pts_in: points to be undistorted.\n intrinsics: intrinsics of the camera.\n distortion_model: distortion model of the camera.\n distortion_coeffs: distortion coefficients.\n rectification_matrix:\n new_intrinsics:\n\n Returns:\n pts_out: undistorted points.\n \"\"\"\n if len(pts_in) == 0:\n return []\n pts_in = np.reshape(pts_in, (-1, 1, 2))\n K = np.array([[intrinsics[0], 0.0, intrinsics[2]], [0.0, intrinsics\n [1], intrinsics[3]], [0.0, 0.0, 1.0]])\n K_new = np.array([[new_intrinsics[0], 0.0, new_intrinsics[2]], [0.0,\n new_intrinsics[1], new_intrinsics[3]], [0.0, 0.0, 1.0]])\n if distortion_model == 'equidistant':\n pts_out = cv2.fisheye.undistortPoints(pts_in, K,\n distortion_coeffs, rectification_matrix, K_new)\n else:\n pts_out = cv2.undistortPoints(pts_in, K, distortion_coeffs,\n None, rectification_matrix, K_new)\n return pts_out.reshape((-1, 2))\n\n def distort_points(self, pts_in, intrinsics, distortion_model,\n distortion_coeffs):\n \"\"\"\n Arguments:\n pts_in: points to be distorted.\n intrinsics: intrinsics of the camera.\n distortion_model: distortion model of the camera.\n distortion_coeffs: distortion coefficients.\n\n Returns:\n pts_out: distorted points. (N, 2)\n \"\"\"\n if len(pts_in) == 0:\n return []\n K = np.array([[intrinsics[0], 0.0, intrinsics[2]], [0.0, intrinsics\n [1], intrinsics[3]], [0.0, 0.0, 1.0]])\n if distortion_model == 'equidistant':\n pts_out = cv2.fisheye.distortPoints(pts_in, K, distortion_coeffs)\n else:\n homogenous_pts = cv2.convertPointsToHomogeneous(pts_in)\n pts_out, _ = cv2.projectPoints(homogenous_pts, np.zeros(3), np.\n zeros(3), K, distortion_coeffs)\n return pts_out.reshape((-1, 2))\n\n def draw_features_stereo(self):\n img0 = self.cam0_curr_img_msg.image\n img1 = self.cam1_curr_img_msg.image\n kps0 = []\n kps1 = []\n matches = []\n for feature in chain.from_iterable(self.curr_features):\n matches.append(cv2.DMatch(len(kps0), len(kps0), 0))\n kps0.append(cv2.KeyPoint(*feature.cam0_point, 1))\n kps1.append(cv2.KeyPoint(*feature.cam1_point, 1))\n img = cv2.drawMatches(img0, kps0, img1, kps1, matches, None, flags=2)\n cv2.imshow('stereo features', img)\n cv2.waitKey(1)\n\n\n<mask token>\n",
"step-4": "<mask token>\n\n\nclass FeatureMetaData(object):\n <mask token>\n <mask token>\n\n\nclass FeatureMeasurement(object):\n \"\"\"\n Stereo measurement of a feature.\n \"\"\"\n\n def __init__(self):\n self.id = None\n self.u0 = None\n self.v0 = None\n self.u1 = None\n self.v1 = None\n\n\nclass ImageProcessor(object):\n \"\"\"\n Detect and track features in image sequences.\n \"\"\"\n\n def __init__(self, config):\n self.config = config\n self.is_first_img = True\n self.next_feature_id = 0\n self.detector = cv2.FastFeatureDetector_create(self.config.\n fast_threshold)\n self.imu_msg_buffer = []\n self.cam0_prev_img_msg = None\n self.cam0_curr_img_msg = None\n self.cam1_curr_img_msg = None\n self.prev_cam0_pyramid = None\n self.curr_cam0_pyramid = None\n self.curr_cam1_pyramid = None\n self.prev_features = [[] for _ in range(self.config.grid_num)]\n self.curr_features = [[] for _ in range(self.config.grid_num)]\n self.num_features = defaultdict(int)\n self.cam0_resolution = config.cam0_resolution\n self.cam0_intrinsics = config.cam0_intrinsics\n self.cam0_distortion_model = config.cam0_distortion_model\n self.cam0_distortion_coeffs = config.cam0_distortion_coeffs\n self.cam1_resolution = config.cam1_resolution\n self.cam1_intrinsics = config.cam1_intrinsics\n self.cam1_distortion_model = config.cam1_distortion_model\n self.cam1_distortion_coeffs = config.cam1_distortion_coeffs\n self.T_cam0_imu = np.linalg.inv(config.T_imu_cam0)\n self.R_cam0_imu = self.T_cam0_imu[:3, :3]\n self.t_cam0_imu = self.T_cam0_imu[:3, 3]\n self.T_cam1_imu = np.linalg.inv(config.T_imu_cam1)\n self.R_cam1_imu = self.T_cam1_imu[:3, :3]\n self.t_cam1_imu = self.T_cam1_imu[:3, 3]\n self.image_id = 0\n\n def stereo_callback(self, stereo_msg):\n \"\"\"\n Callback function for the stereo images.\n \"\"\"\n start = time.time()\n self.cam0_curr_img_msg = stereo_msg.cam0_msg\n self.cam1_curr_img_msg = stereo_msg.cam1_msg\n self.create_image_pyramids()\n if self.is_first_img:\n if not self.config.load_features_flag:\n self.initialize_first_frame()\n self.is_first_img = False\n elif not self.config.load_features_flag:\n t = time.time()\n self.track_features()\n print('___track_features:', time.time() - t)\n t = time.time()\n self.add_new_features()\n print('___add_new_features:', time.time() - t)\n t = time.time()\n self.prune_features()\n print('___prune_features:', time.time() - t)\n t = time.time()\n print('___draw_features_stereo:', time.time() - t)\n t = time.time()\n print('===image process elapsed:', time.time() - start,\n f'({stereo_msg.timestamp})')\n if not self.config.load_features_flag:\n try:\n self.save_features()\n return self.publish()\n finally:\n self.cam0_prev_img_msg = self.cam0_curr_img_msg\n self.prev_features = self.curr_features\n self.prev_cam0_pyramid = self.curr_cam0_pyramid\n self.curr_features = [[] for _ in range(self.config.grid_num)]\n else:\n self.load_features()\n return self.publish()\n\n def imu_callback(self, msg):\n \"\"\"\n Callback function for the imu message.\n \"\"\"\n self.imu_msg_buffer.append(msg)\n\n def create_image_pyramids(self):\n \"\"\"\n Create image pyramids used for KLT tracking.\n (Seems doesn't work in python)\n \"\"\"\n curr_cam0_img = self.cam0_curr_img_msg.image\n self.curr_cam0_pyramid = curr_cam0_img\n curr_cam1_img = self.cam1_curr_img_msg.image\n self.curr_cam1_pyramid = curr_cam1_img\n\n def initialize_first_frame(self):\n \"\"\"\n Initialize the image processing sequence, which is basically detect \n new features on the first set of stereo images.\n \"\"\"\n img = self.cam0_curr_img_msg.image\n grid_height, grid_width = self.get_grid_size(img)\n new_features = self.detector.detect(img)\n cam0_points = [kp.pt for kp in new_features]\n cam1_points, inlier_markers = self.stereo_match(cam0_points)\n cam0_inliers, cam1_inliers = [], []\n response_inliers = []\n for i, inlier in enumerate(inlier_markers):\n if not inlier:\n continue\n cam0_inliers.append(cam0_points[i])\n cam1_inliers.append(cam1_points[i])\n response_inliers.append(new_features[i].response)\n grid_new_features = [[] for _ in range(self.config.grid_num)]\n for i in range(len(cam0_inliers)):\n cam0_point = cam0_inliers[i]\n cam1_point = cam1_inliers[i]\n response = response_inliers[i]\n row = int(cam0_point[1] / grid_height)\n col = int(cam0_point[0] / grid_width)\n code = row * self.config.grid_col + col\n new_feature = FeatureMetaData()\n new_feature.response = response\n new_feature.cam0_point = cam0_point\n new_feature.cam1_point = cam1_point\n grid_new_features[code].append(new_feature)\n for i, new_features in enumerate(grid_new_features):\n for feature in sorted(new_features, key=lambda x: x.response,\n reverse=True)[:self.config.grid_min_feature_num]:\n self.curr_features[i].append(feature)\n self.curr_features[i][-1].id = self.next_feature_id\n self.curr_features[i][-1].lifetime = 1\n self.next_feature_id += 1\n\n def track_features(self):\n \"\"\"\n Tracker features on the newly received stereo images.\n \"\"\"\n img = self.cam0_curr_img_msg.image\n grid_height, grid_width = self.get_grid_size(img)\n cam0_R_p_c, cam1_R_p_c = self.integrate_imu_data()\n prev_ids = []\n prev_lifetime = []\n prev_cam0_points = []\n prev_cam1_points = []\n for feature in chain.from_iterable(self.prev_features):\n prev_ids.append(feature.id)\n prev_lifetime.append(feature.lifetime)\n prev_cam0_points.append(feature.cam0_point)\n prev_cam1_points.append(feature.cam1_point)\n prev_cam0_points = np.array(prev_cam0_points, dtype=np.float32)\n self.num_features['before_tracking'] = len(prev_cam0_points)\n if len(prev_cam0_points) == 0:\n return\n curr_cam0_points = self.predict_feature_tracking(prev_cam0_points,\n cam0_R_p_c, self.cam0_intrinsics)\n curr_cam0_points, track_inliers, _ = cv2.calcOpticalFlowPyrLK(self.\n prev_cam0_pyramid, self.curr_cam0_pyramid, prev_cam0_points.\n astype(np.float32), curr_cam0_points.astype(np.float32), **self\n .config.lk_params)\n for i, point in enumerate(curr_cam0_points):\n if not track_inliers[i]:\n continue\n if point[0] < 0 or point[0] > img.shape[1] - 1 or point[1\n ] < 0 or point[1] > img.shape[0] - 1:\n track_inliers[i] = 0\n prev_tracked_ids = select(prev_ids, track_inliers)\n prev_tracked_lifetime = select(prev_lifetime, track_inliers)\n prev_tracked_cam0_points = select(prev_cam0_points, track_inliers)\n prev_tracked_cam1_points = select(prev_cam1_points, track_inliers)\n curr_tracked_cam0_points = select(curr_cam0_points, track_inliers)\n self.num_features['after_tracking'] = len(curr_tracked_cam0_points)\n curr_cam1_points, match_inliers = self.stereo_match(\n curr_tracked_cam0_points)\n prev_matched_ids = select(prev_tracked_ids, match_inliers)\n prev_matched_lifetime = select(prev_tracked_lifetime, match_inliers)\n prev_matched_cam0_points = select(prev_tracked_cam0_points,\n match_inliers)\n prev_matched_cam1_points = select(prev_tracked_cam1_points,\n match_inliers)\n curr_matched_cam0_points = select(curr_tracked_cam0_points,\n match_inliers)\n curr_matched_cam1_points = select(curr_cam1_points, match_inliers)\n self.num_features['after_matching'] = len(curr_matched_cam0_points)\n cam0_ransac_inliers = [1] * len(prev_matched_cam0_points)\n cam1_ransac_inliers = [1] * len(prev_matched_cam1_points)\n after_ransac = 0\n for i in range(len(cam0_ransac_inliers)):\n if not (cam0_ransac_inliers[i] and cam1_ransac_inliers[i]):\n continue\n row = int(curr_matched_cam0_points[i][1] / grid_height)\n col = int(curr_matched_cam0_points[i][0] / grid_width)\n code = row * self.config.grid_col + col\n grid_new_feature = FeatureMetaData()\n grid_new_feature.id = prev_matched_ids[i]\n grid_new_feature.lifetime = prev_matched_lifetime[i] + 1\n grid_new_feature.cam0_point = curr_matched_cam0_points[i]\n grid_new_feature.cam1_point = curr_matched_cam1_points[i]\n prev_matched_lifetime[i] += 1\n self.curr_features[code].append(grid_new_feature)\n after_ransac += 1\n self.num_features['after_ransac'] = after_ransac\n\n def add_new_features(self):\n \"\"\"\n Detect new features on the image to ensure that the features are \n uniformly distributed on the image.\n \"\"\"\n curr_img = self.cam0_curr_img_msg.image\n grid_height, grid_width = self.get_grid_size(curr_img)\n mask = np.ones(curr_img.shape[:2], dtype='uint8')\n for feature in chain.from_iterable(self.curr_features):\n x, y = map(int, feature.cam0_point)\n mask[y - 3:y + 4, x - 3:x + 4] = 0\n new_features = self.detector.detect(curr_img, mask=mask)\n new_feature_sieve = [[] for _ in range(self.config.grid_num)]\n for feature in new_features:\n row = int(feature.pt[1] / grid_height)\n col = int(feature.pt[0] / grid_width)\n code = row * self.config.grid_col + col\n new_feature_sieve[code].append(feature)\n new_features = []\n for features in new_feature_sieve:\n if len(features) > self.config.grid_max_feature_num:\n features = sorted(features, key=lambda x: x.response,\n reverse=True)[:self.config.grid_max_feature_num]\n new_features.append(features)\n new_features = list(chain.from_iterable(new_features))\n cam0_points = [kp.pt for kp in new_features]\n cam1_points, inlier_markers = self.stereo_match(cam0_points)\n cam0_inliers, cam1_inliers, response_inliers = [], [], []\n for i, inlier in enumerate(inlier_markers):\n if not inlier:\n continue\n cam0_inliers.append(cam0_points[i])\n cam1_inliers.append(cam1_points[i])\n response_inliers.append(new_features[i].response)\n grid_new_features = [[] for _ in range(self.config.grid_num)]\n for i in range(len(cam0_inliers)):\n cam0_point = cam0_inliers[i]\n cam1_point = cam1_inliers[i]\n response = response_inliers[i]\n row = int(cam0_point[1] / grid_height)\n col = int(cam0_point[0] / grid_width)\n code = row * self.config.grid_col + col\n new_feature = FeatureMetaData()\n new_feature.response = response\n new_feature.cam0_point = cam0_point\n new_feature.cam1_point = cam1_point\n grid_new_features[code].append(new_feature)\n for i, new_features in enumerate(grid_new_features):\n for feature in sorted(new_features, key=lambda x: x.response,\n reverse=True)[:self.config.grid_min_feature_num]:\n self.curr_features[i].append(feature)\n self.curr_features[i][-1].id = self.next_feature_id\n self.curr_features[i][-1].lifetime = 1\n self.next_feature_id += 1\n\n def prune_features(self):\n \"\"\"\n Remove some of the features of a grid in case there are too many \n features inside of that grid, which ensures the number of features \n within each grid is bounded.\n \"\"\"\n for i, features in enumerate(self.curr_features):\n if len(features) <= self.config.grid_max_feature_num:\n continue\n self.curr_features[i] = sorted(features, key=lambda x: x.\n lifetime, reverse=True)[:self.config.grid_max_feature_num]\n\n def load_features(self):\n filename = self.config.result_dir + str(self.image_id) + '.npz'\n self.curr_features = np.load(filename, allow_pickle=True)['arr_0']\n self.image_id += 1\n\n def save_features(self):\n filename = self.config.result_dir + str(self.image_id) + '.npz'\n np.savez(filename, self.curr_features)\n self.image_id += 1\n\n def publish(self):\n \"\"\"\n Publish the features on the current image including both the \n tracked and newly detected ones.\n \"\"\"\n curr_ids = []\n curr_cam0_points = []\n curr_cam1_points = []\n for feature in chain.from_iterable(self.curr_features):\n curr_ids.append(feature.id)\n curr_cam0_points.append(feature.cam0_point)\n curr_cam1_points.append(feature.cam1_point)\n curr_cam0_points_undistorted = self.undistort_points(curr_cam0_points,\n self.cam0_intrinsics, self.cam0_distortion_model, self.\n cam0_distortion_coeffs)\n curr_cam1_points_undistorted = self.undistort_points(curr_cam1_points,\n self.cam1_intrinsics, self.cam1_distortion_model, self.\n cam1_distortion_coeffs)\n features = []\n for i in range(len(curr_ids)):\n fm = FeatureMeasurement()\n fm.id = curr_ids[i]\n fm.u0 = curr_cam0_points_undistorted[i][0]\n fm.v0 = curr_cam0_points_undistorted[i][1]\n fm.u1 = curr_cam1_points_undistorted[i][0]\n fm.v1 = curr_cam1_points_undistorted[i][1]\n features.append(fm)\n feature_msg = namedtuple('feature_msg', ['timestamp', 'features'])(self\n .cam0_curr_img_msg.timestamp, features)\n return feature_msg\n\n def integrate_imu_data(self):\n \"\"\"\n Integrates the IMU gyro readings between the two consecutive images, \n which is used for both tracking prediction and 2-point RANSAC.\n\n Returns:\n cam0_R_p_c: a rotation matrix which takes a vector from previous \n cam0 frame to current cam0 frame.\n cam1_R_p_c: a rotation matrix which takes a vector from previous \n cam1 frame to current cam1 frame.\n \"\"\"\n idx_begin = None\n for i, msg in enumerate(self.imu_msg_buffer):\n if msg.timestamp >= self.cam0_prev_img_msg.timestamp - 0.01:\n idx_begin = i\n break\n idx_end = None\n for i, msg in enumerate(self.imu_msg_buffer):\n if msg.timestamp >= self.cam0_curr_img_msg.timestamp - 0.004:\n idx_end = i\n break\n if idx_begin is None or idx_end is None:\n return np.identity(3), np.identity(3)\n mean_ang_vel = np.zeros(3)\n for i in range(idx_begin, idx_end):\n mean_ang_vel += self.imu_msg_buffer[i].angular_velocity\n if idx_end > idx_begin:\n mean_ang_vel /= idx_end - idx_begin\n cam0_mean_ang_vel = self.R_cam0_imu.T @ mean_ang_vel\n cam1_mean_ang_vel = self.R_cam1_imu.T @ mean_ang_vel\n dt = (self.cam0_curr_img_msg.timestamp - self.cam0_prev_img_msg.\n timestamp)\n cam0_R_p_c = cv2.Rodrigues(cam0_mean_ang_vel * dt)[0].T\n cam1_R_p_c = cv2.Rodrigues(cam1_mean_ang_vel * dt)[0].T\n self.imu_msg_buffer = self.imu_msg_buffer[idx_end:]\n return cam0_R_p_c, cam1_R_p_c\n\n def rescale_points(self, pts1, pts2):\n \"\"\"\n Arguments:\n pts1: first set of points.\n pts2: second set of points.\n\n Returns:\n pts1: scaled first set of points.\n pts2: scaled second set of points.\n scaling_factor: scaling factor\n \"\"\"\n scaling_factor = 0\n for pt1, pt2 in zip(pts1, pts2):\n scaling_factor += np.linalg.norm(pt1)\n scaling_factor += np.linalg.norm(pt2)\n scaling_factor = (len(pts1) + len(pts2)) / scaling_factor * np.sqrt(2)\n for i in range(len(pts1)):\n pts1[i] *= scaling_factor\n pts2[i] *= scaling_factor\n return pts1, pts2, scaling_factor\n\n def get_grid_size(self, img):\n \"\"\"\n # Size of each grid.\n \"\"\"\n grid_height = int(np.ceil(img.shape[0] / self.config.grid_row))\n grid_width = int(np.ceil(img.shape[1] / self.config.grid_col))\n return grid_height, grid_width\n\n def predict_feature_tracking(self, input_pts, R_p_c, intrinsics):\n \"\"\"\n predictFeatureTracking Compensates the rotation between consecutive \n camera frames so that feature tracking would be more robust and fast.\n\n Arguments:\n input_pts: features in the previous image to be tracked.\n R_p_c: a rotation matrix takes a vector in the previous camera \n frame to the current camera frame. (matrix33)\n intrinsics: intrinsic matrix of the camera. (vec3)\n\n Returns:\n compensated_pts: predicted locations of the features in the \n current image based on the provided rotation.\n \"\"\"\n if len(input_pts) == 0:\n return []\n K = np.array([[intrinsics[0], 0.0, intrinsics[2]], [0.0, intrinsics\n [1], intrinsics[3]], [0.0, 0.0, 1.0]])\n H = K @ R_p_c @ np.linalg.inv(K)\n compensated_pts = []\n for i in range(len(input_pts)):\n p1 = np.array([*input_pts[i], 1.0])\n p2 = H @ p1\n compensated_pts.append(p2[:2] / p2[2])\n return np.array(compensated_pts, dtype=np.float32)\n\n def stereo_match(self, cam0_points):\n \"\"\"\n Matches features with stereo image pairs.\n\n Arguments:\n cam0_points: points in the primary image.\n\n Returns:\n cam1_points: points in the secondary image.\n inlier_markers: 1 if the match is valid, 0 otherwise.\n \"\"\"\n cam0_points = np.array(cam0_points)\n if len(cam0_points) == 0:\n return []\n R_cam0_cam1 = self.R_cam1_imu.T @ self.R_cam0_imu\n cam0_points_undistorted = self.undistort_points(cam0_points, self.\n cam0_intrinsics, self.cam0_distortion_model, self.\n cam0_distortion_coeffs, R_cam0_cam1)\n cam1_points = self.distort_points(cam0_points_undistorted, self.\n cam1_intrinsics, self.cam1_distortion_model, self.\n cam1_distortion_coeffs)\n cam1_points_copy = cam1_points.copy()\n cam0_points = cam0_points.astype(np.float32)\n cam1_points = cam1_points.astype(np.float32)\n cam1_points, inlier_markers, _ = cv2.calcOpticalFlowPyrLK(self.\n curr_cam0_pyramid, self.curr_cam1_pyramid, cam0_points,\n cam1_points, **self.config.lk_params)\n cam0_points_, _, _ = cv2.calcOpticalFlowPyrLK(self.\n curr_cam1_pyramid, self.curr_cam0_pyramid, cam1_points,\n cam0_points.copy(), **self.config.lk_params)\n err = np.linalg.norm(cam0_points - cam0_points_, axis=1)\n disparity = np.abs(cam1_points_copy[:, 1] - cam1_points[:, 1])\n inlier_markers = np.logical_and.reduce([inlier_markers.reshape(-1),\n err < 3, disparity < 20])\n img = self.cam1_curr_img_msg.image\n for i, point in enumerate(cam1_points):\n if not inlier_markers[i]:\n continue\n if point[0] < 0 or point[0] > img.shape[1] - 1 or point[1\n ] < 0 or point[1] > img.shape[0] - 1:\n inlier_markers[i] = 0\n t_cam0_cam1 = self.R_cam1_imu.T @ (self.t_cam0_imu - self.t_cam1_imu)\n E = skew(t_cam0_cam1) @ R_cam0_cam1\n cam0_points_undistorted = self.undistort_points(cam0_points, self.\n cam0_intrinsics, self.cam0_distortion_model, self.\n cam0_distortion_coeffs)\n cam1_points_undistorted = self.undistort_points(cam1_points, self.\n cam1_intrinsics, self.cam1_distortion_model, self.\n cam1_distortion_coeffs)\n norm_pixel_unit = 4.0 / (self.cam0_intrinsics[0] + self.\n cam0_intrinsics[1] + self.cam1_intrinsics[0] + self.\n cam1_intrinsics[1])\n for i in range(len(cam0_points_undistorted)):\n if not inlier_markers[i]:\n continue\n pt0 = np.array([*cam0_points_undistorted[i], 1.0])\n pt1 = np.array([*cam1_points_undistorted[i], 1.0])\n epipolar_line = E @ pt0\n error = np.abs((pt1 * epipolar_line)[0]) / np.linalg.norm(\n epipolar_line[:2])\n if error > self.config.stereo_threshold * norm_pixel_unit:\n inlier_markers[i] = 0\n return cam1_points, inlier_markers\n\n def undistort_points(self, pts_in, intrinsics, distortion_model,\n distortion_coeffs, rectification_matrix=np.identity(3),\n new_intrinsics=np.array([1, 1, 0, 0])):\n \"\"\"\n Arguments:\n pts_in: points to be undistorted.\n intrinsics: intrinsics of the camera.\n distortion_model: distortion model of the camera.\n distortion_coeffs: distortion coefficients.\n rectification_matrix:\n new_intrinsics:\n\n Returns:\n pts_out: undistorted points.\n \"\"\"\n if len(pts_in) == 0:\n return []\n pts_in = np.reshape(pts_in, (-1, 1, 2))\n K = np.array([[intrinsics[0], 0.0, intrinsics[2]], [0.0, intrinsics\n [1], intrinsics[3]], [0.0, 0.0, 1.0]])\n K_new = np.array([[new_intrinsics[0], 0.0, new_intrinsics[2]], [0.0,\n new_intrinsics[1], new_intrinsics[3]], [0.0, 0.0, 1.0]])\n if distortion_model == 'equidistant':\n pts_out = cv2.fisheye.undistortPoints(pts_in, K,\n distortion_coeffs, rectification_matrix, K_new)\n else:\n pts_out = cv2.undistortPoints(pts_in, K, distortion_coeffs,\n None, rectification_matrix, K_new)\n return pts_out.reshape((-1, 2))\n\n def distort_points(self, pts_in, intrinsics, distortion_model,\n distortion_coeffs):\n \"\"\"\n Arguments:\n pts_in: points to be distorted.\n intrinsics: intrinsics of the camera.\n distortion_model: distortion model of the camera.\n distortion_coeffs: distortion coefficients.\n\n Returns:\n pts_out: distorted points. (N, 2)\n \"\"\"\n if len(pts_in) == 0:\n return []\n K = np.array([[intrinsics[0], 0.0, intrinsics[2]], [0.0, intrinsics\n [1], intrinsics[3]], [0.0, 0.0, 1.0]])\n if distortion_model == 'equidistant':\n pts_out = cv2.fisheye.distortPoints(pts_in, K, distortion_coeffs)\n else:\n homogenous_pts = cv2.convertPointsToHomogeneous(pts_in)\n pts_out, _ = cv2.projectPoints(homogenous_pts, np.zeros(3), np.\n zeros(3), K, distortion_coeffs)\n return pts_out.reshape((-1, 2))\n\n def draw_features_stereo(self):\n img0 = self.cam0_curr_img_msg.image\n img1 = self.cam1_curr_img_msg.image\n kps0 = []\n kps1 = []\n matches = []\n for feature in chain.from_iterable(self.curr_features):\n matches.append(cv2.DMatch(len(kps0), len(kps0), 0))\n kps0.append(cv2.KeyPoint(*feature.cam0_point, 1))\n kps1.append(cv2.KeyPoint(*feature.cam1_point, 1))\n img = cv2.drawMatches(img0, kps0, img1, kps1, matches, None, flags=2)\n cv2.imshow('stereo features', img)\n cv2.waitKey(1)\n\n\n<mask token>\n",
"step-5": "import numpy as np\r\nimport cv2\r\nimport time\r\n\r\nfrom itertools import chain, compress\r\nfrom collections import defaultdict, namedtuple\r\n\r\n\r\n\r\nclass FeatureMetaData(object):\r\n \"\"\"\r\n Contain necessary information of a feature for easy access.\r\n \"\"\"\r\n def __init__(self):\r\n self.id = None # int\r\n self.response = None # float\r\n self.lifetime = None # int\r\n self.cam0_point = None # vec2\r\n self.cam1_point = None # vec2\r\n\r\n\r\nclass FeatureMeasurement(object):\r\n \"\"\"\r\n Stereo measurement of a feature.\r\n \"\"\"\r\n def __init__(self):\r\n self.id = None\r\n self.u0 = None\r\n self.v0 = None\r\n self.u1 = None\r\n self.v1 = None\r\n\r\n\r\n\r\nclass ImageProcessor(object):\r\n \"\"\"\r\n Detect and track features in image sequences.\r\n \"\"\"\r\n def __init__(self, config):\r\n self.config = config\r\n\r\n # Indicate if this is the first image message.\r\n self.is_first_img = True\r\n\r\n # ID for the next new feature.\r\n self.next_feature_id = 0\r\n\r\n # Feature detector\r\n self.detector = cv2.FastFeatureDetector_create(self.config.fast_threshold)\r\n\r\n # IMU message buffer.\r\n self.imu_msg_buffer = []\r\n\r\n # Previous and current images\r\n self.cam0_prev_img_msg = None\r\n self.cam0_curr_img_msg = None\r\n self.cam1_curr_img_msg = None\r\n\r\n # Pyramids for previous and current image\r\n self.prev_cam0_pyramid = None\r\n self.curr_cam0_pyramid = None\r\n self.curr_cam1_pyramid = None\r\n\r\n # Features in the previous and current image.\r\n # list of lists of FeatureMetaData\r\n self.prev_features = [[] for _ in range(self.config.grid_num)] # Don't use [[]] * N\r\n self.curr_features = [[] for _ in range(self.config.grid_num)]\r\n\r\n # Number of features after each outlier removal step.\r\n # keys: before_tracking, after_tracking, after_matching, after_ransac\r\n self.num_features = defaultdict(int)\r\n\r\n # load config\r\n # Camera calibration parameters\r\n self.cam0_resolution = config.cam0_resolution # vec2\r\n self.cam0_intrinsics = config.cam0_intrinsics # vec4\r\n self.cam0_distortion_model = config.cam0_distortion_model # string\r\n self.cam0_distortion_coeffs = config.cam0_distortion_coeffs # vec4\r\n\r\n self.cam1_resolution = config.cam1_resolution # vec2\r\n self.cam1_intrinsics = config.cam1_intrinsics # vec4\r\n self.cam1_distortion_model = config.cam1_distortion_model # string\r\n self.cam1_distortion_coeffs = config.cam1_distortion_coeffs # vec4\r\n\r\n # Take a vector from cam0 frame to the IMU frame.\r\n self.T_cam0_imu = np.linalg.inv(config.T_imu_cam0)\r\n self.R_cam0_imu = self.T_cam0_imu[:3, :3]\r\n self.t_cam0_imu = self.T_cam0_imu[:3, 3]\r\n # Take a vector from cam1 frame to the IMU frame.\r\n self.T_cam1_imu = np.linalg.inv(config.T_imu_cam1)\r\n self.R_cam1_imu = self.T_cam1_imu[:3, :3]\r\n self.t_cam1_imu = self.T_cam1_imu[:3, 3]\r\n\r\n self.image_id = 0\r\n\r\n def stereo_callback(self, stereo_msg):\r\n \"\"\"\r\n Callback function for the stereo images.\r\n \"\"\"\r\n start = time.time()\r\n self.cam0_curr_img_msg = stereo_msg.cam0_msg\r\n self.cam1_curr_img_msg = stereo_msg.cam1_msg\r\n\r\n # Build the image pyramids once since they're used at multiple places.\r\n self.create_image_pyramids()\r\n\r\n # Detect features in the first frame.\r\n if self.is_first_img:\r\n if not self.config.load_features_flag:\r\n self.initialize_first_frame()\r\n self.is_first_img = False\r\n # Draw results.\r\n # self.draw_features_stereo()\r\n else:\r\n if not self.config.load_features_flag:\r\n # Track the feature in the previous image.\r\n t = time.time()\r\n self.track_features()\r\n print('___track_features:', time.time() - t)\r\n t = time.time()\r\n\r\n # Add new features into the current image.\r\n self.add_new_features()\r\n print('___add_new_features:', time.time() - t)\r\n t = time.time()\r\n self.prune_features()\r\n print('___prune_features:', time.time() - t)\r\n t = time.time()\r\n # Draw results.\r\n # self.draw_features_stereo()\r\n print('___draw_features_stereo:', time.time() - t)\r\n t = time.time()\r\n\r\n print('===image process elapsed:', time.time() - start, f'({stereo_msg.timestamp})')\r\n\r\n if not self.config.load_features_flag:\r\n try:\r\n self.save_features() \r\n return self.publish()\r\n finally:\r\n self.cam0_prev_img_msg = self.cam0_curr_img_msg\r\n self.prev_features = self.curr_features\r\n self.prev_cam0_pyramid = self.curr_cam0_pyramid\r\n\r\n # Initialize the current features to empty vectors.\r\n self.curr_features = [[] for _ in range(self.config.grid_num)]\r\n else:\r\n self.load_features()\r\n return self.publish()\r\n\r\n def imu_callback(self, msg):\r\n \"\"\"\r\n Callback function for the imu message.\r\n \"\"\"\r\n self.imu_msg_buffer.append(msg)\r\n\r\n def create_image_pyramids(self):\r\n \"\"\"\r\n Create image pyramids used for KLT tracking.\r\n (Seems doesn't work in python)\r\n \"\"\"\r\n curr_cam0_img = self.cam0_curr_img_msg.image\r\n # self.curr_cam0_pyramid = cv2.buildOpticalFlowPyramid(\r\n # curr_cam0_img, self.config.win_size, self.config.pyramid_levels, \r\n # None, cv2.BORDER_REFLECT_101, cv2.BORDER_CONSTANT, False)[1]\r\n self.curr_cam0_pyramid = curr_cam0_img\r\n\r\n curr_cam1_img = self.cam1_curr_img_msg.image\r\n # self.curr_cam1_pyramid = cv2.buildOpticalFlowPyramid(\r\n # curr_cam1_img, self.config.win_size, self.config.pyramid_levels, \r\n # None, cv2.BORDER_REFLECT_101, cv2.BORDER_CONSTANT, False)[1]\r\n self.curr_cam1_pyramid = curr_cam1_img\r\n\r\n def initialize_first_frame(self):\r\n \"\"\"\r\n Initialize the image processing sequence, which is basically detect \r\n new features on the first set of stereo images.\r\n \"\"\"\r\n img = self.cam0_curr_img_msg.image\r\n grid_height, grid_width = self.get_grid_size(img)\r\n\r\n # Detect new features on the frist image.\r\n new_features = self.detector.detect(img)\r\n\r\n # Find the stereo matched points for the newly detected features.\r\n cam0_points = [kp.pt for kp in new_features]\r\n cam1_points, inlier_markers = self.stereo_match(cam0_points)\r\n\r\n cam0_inliers, cam1_inliers = [], []\r\n response_inliers = []\r\n for i, inlier in enumerate(inlier_markers):\r\n if not inlier:\r\n continue\r\n cam0_inliers.append(cam0_points[i])\r\n cam1_inliers.append(cam1_points[i])\r\n response_inliers.append(new_features[i].response)\r\n # len(cam0_inliers) < max(5, 0.1 * len(new_features))\r\n\r\n # Group the features into grids\r\n grid_new_features = [[] for _ in range(self.config.grid_num)]\r\n\r\n for i in range(len(cam0_inliers)):\r\n cam0_point = cam0_inliers[i]\r\n cam1_point = cam1_inliers[i]\r\n response = response_inliers[i]\r\n\r\n row = int(cam0_point[1] / grid_height)\r\n col = int(cam0_point[0] / grid_width)\r\n code = row*self.config.grid_col + col\r\n\r\n new_feature = FeatureMetaData()\r\n new_feature.response = response\r\n new_feature.cam0_point = cam0_point\r\n new_feature.cam1_point = cam1_point\r\n grid_new_features[code].append(new_feature)\r\n\r\n # Sort the new features in each grid based on its response.\r\n # And collect new features within each grid with high response.\r\n for i, new_features in enumerate(grid_new_features):\r\n for feature in sorted(new_features, key=lambda x:x.response, \r\n reverse=True)[:self.config.grid_min_feature_num]:\r\n self.curr_features[i].append(feature)\r\n self.curr_features[i][-1].id = self.next_feature_id\r\n self.curr_features[i][-1].lifetime = 1\r\n self.next_feature_id += 1\r\n\r\n def track_features(self):\r\n \"\"\"\r\n Tracker features on the newly received stereo images.\r\n \"\"\"\r\n img = self.cam0_curr_img_msg.image\r\n grid_height, grid_width = self.get_grid_size(img)\r\n\r\n # Compute a rough relative rotation which takes a vector \r\n # from the previous frame to the current frame.\r\n cam0_R_p_c, cam1_R_p_c = self.integrate_imu_data()\r\n\r\n # Organize the features in the previous image.\r\n prev_ids = []\r\n prev_lifetime = []\r\n prev_cam0_points = []\r\n prev_cam1_points = []\r\n\r\n for feature in chain.from_iterable(self.prev_features):\r\n prev_ids.append(feature.id)\r\n prev_lifetime.append(feature.lifetime)\r\n prev_cam0_points.append(feature.cam0_point)\r\n prev_cam1_points.append(feature.cam1_point)\r\n prev_cam0_points = np.array(prev_cam0_points, dtype=np.float32)\r\n\r\n # Number of the features before tracking.\r\n self.num_features['before_tracking'] = len(prev_cam0_points)\r\n\r\n # Abort tracking if there is no features in the previous frame.\r\n if len(prev_cam0_points) == 0:\r\n return\r\n\r\n # Track features using LK optical flow method.\r\n curr_cam0_points = self.predict_feature_tracking(\r\n prev_cam0_points, cam0_R_p_c, self.cam0_intrinsics)\r\n\r\n curr_cam0_points, track_inliers, _ = cv2.calcOpticalFlowPyrLK(\r\n self.prev_cam0_pyramid, self.curr_cam0_pyramid,\r\n prev_cam0_points.astype(np.float32), \r\n curr_cam0_points.astype(np.float32), \r\n **self.config.lk_params)\r\n \r\n # Mark those tracked points out of the image region as untracked.\r\n for i, point in enumerate(curr_cam0_points):\r\n if not track_inliers[i]:\r\n continue\r\n if (point[0] < 0 or point[0] > img.shape[1]-1 or \r\n point[1] < 0 or point[1] > img.shape[0]-1):\r\n track_inliers[i] = 0\r\n\r\n # Collect the tracked points.\r\n prev_tracked_ids = select(prev_ids, track_inliers)\r\n prev_tracked_lifetime = select(prev_lifetime, track_inliers)\r\n prev_tracked_cam0_points = select(prev_cam0_points, track_inliers)\r\n prev_tracked_cam1_points = select(prev_cam1_points, track_inliers)\r\n curr_tracked_cam0_points = select(curr_cam0_points, track_inliers)\r\n\r\n # Number of features left after tracking.\r\n self.num_features['after_tracking'] = len(curr_tracked_cam0_points)\r\n\r\n # Outlier removal involves three steps, which forms a close\r\n # loop between the previous and current frames of cam0 (left)\r\n # and cam1 (right). Assuming the stereo matching between the\r\n # previous cam0 and cam1 images are correct, the three steps are:\r\n #\r\n # prev frames cam0 ----------> cam1\r\n # | |\r\n # |ransac |ransac\r\n # | stereo match |\r\n # curr frames cam0 ----------> cam1\r\n #\r\n # 1) Stereo matching between current images of cam0 and cam1.\r\n # 2) RANSAC between previous and current images of cam0.\r\n # 3) RANSAC between previous and current images of cam1.\r\n #\r\n # For Step 3, tracking between the images is no longer needed.\r\n # The stereo matching results are directly used in the RANSAC.\r\n\r\n # Step 1: stereo matching.\r\n curr_cam1_points, match_inliers = self.stereo_match(\r\n curr_tracked_cam0_points)\r\n\r\n prev_matched_ids = select(prev_tracked_ids, match_inliers)\r\n prev_matched_lifetime = select(prev_tracked_lifetime, match_inliers)\r\n prev_matched_cam0_points = select(prev_tracked_cam0_points, match_inliers)\r\n prev_matched_cam1_points = select(prev_tracked_cam1_points, match_inliers)\r\n curr_matched_cam0_points = select(curr_tracked_cam0_points, match_inliers)\r\n curr_matched_cam1_points = select(curr_cam1_points, match_inliers)\r\n\r\n # Number of features left after stereo matching.\r\n self.num_features['after_matching'] = len(curr_matched_cam0_points)\r\n\r\n # Step 2 and 3: RANSAC on temporal image pairs of cam0 and cam1.\r\n # cam0_ransac_inliers = self.two_point_ransac(\r\n # prev_matched_cam0_points, curr_matched_cam0_points,\r\n # cam0_R_p_c, self.cam0_intrinsics, \r\n # self.cam0_distortion_model, self.cam0_distortion_coeffs, \r\n # self.config.ransac_threshold, 0.99)\r\n\r\n # cam1_ransac_inliers = self.two_point_ransac(\r\n # prev_matched_cam1_points, curr_matched_cam1_points,\r\n # cam1_R_p_c, self.cam1_intrinsics, \r\n # self.cam1_distortion_model, self.cam1_distortion_coeffs, \r\n # self.config.ransac_threshold, 0.99)\r\n cam0_ransac_inliers = [1] * len(prev_matched_cam0_points)\r\n cam1_ransac_inliers = [1] * len(prev_matched_cam1_points)\r\n\r\n # Number of features after ransac.\r\n after_ransac = 0\r\n for i in range(len(cam0_ransac_inliers)):\r\n if not (cam0_ransac_inliers[i] and cam1_ransac_inliers[i]):\r\n continue \r\n row = int(curr_matched_cam0_points[i][1] / grid_height)\r\n col = int(curr_matched_cam0_points[i][0] / grid_width)\r\n code = row * self.config.grid_col + col\r\n\r\n grid_new_feature = FeatureMetaData()\r\n grid_new_feature.id = prev_matched_ids[i]\r\n grid_new_feature.lifetime = prev_matched_lifetime[i] + 1\r\n grid_new_feature.cam0_point = curr_matched_cam0_points[i]\r\n grid_new_feature.cam1_point = curr_matched_cam1_points[i]\r\n prev_matched_lifetime[i] += 1\r\n\r\n self.curr_features[code].append(grid_new_feature)\r\n after_ransac += 1\r\n self.num_features['after_ransac'] = after_ransac\r\n\r\n # Compute the tracking rate.\r\n # prev_feature_num = sum([len(x) for x in self.prev_features])\r\n # curr_feature_num = sum([len(x) for x in self.curr_features])\r\n \r\n\r\n def add_new_features(self):\r\n \"\"\"\r\n Detect new features on the image to ensure that the features are \r\n uniformly distributed on the image.\r\n \"\"\"\r\n curr_img = self.cam0_curr_img_msg.image\r\n grid_height, grid_width = self.get_grid_size(curr_img)\r\n\r\n # Create a mask to avoid redetecting existing features.\r\n mask = np.ones(curr_img.shape[:2], dtype='uint8')\r\n\r\n for feature in chain.from_iterable(self.curr_features):\r\n x, y = map(int, feature.cam0_point)\r\n mask[y-3:y+4, x-3:x+4] = 0\r\n\r\n # Detect new features.\r\n new_features = self.detector.detect(curr_img, mask=mask)\r\n\r\n # Collect the new detected features based on the grid.\r\n # Select the ones with top response within each grid afterwards.\r\n new_feature_sieve = [[] for _ in range(self.config.grid_num)]\r\n for feature in new_features:\r\n row = int(feature.pt[1] / grid_height)\r\n col = int(feature.pt[0] / grid_width)\r\n code = row * self.config.grid_col + col\r\n new_feature_sieve[code].append(feature)\r\n\r\n new_features = []\r\n for features in new_feature_sieve:\r\n if len(features) > self.config.grid_max_feature_num:\r\n features = sorted(features, key=lambda x:x.response, \r\n reverse=True)[:self.config.grid_max_feature_num]\r\n new_features.append(features)\r\n new_features = list(chain.from_iterable(new_features))\r\n\r\n # Find the stereo matched points for the newly detected features.\r\n cam0_points = [kp.pt for kp in new_features]\r\n cam1_points, inlier_markers = self.stereo_match(cam0_points)\r\n\r\n cam0_inliers, cam1_inliers, response_inliers = [], [], []\r\n for i, inlier in enumerate(inlier_markers):\r\n if not inlier:\r\n continue\r\n cam0_inliers.append(cam0_points[i])\r\n cam1_inliers.append(cam1_points[i])\r\n response_inliers.append(new_features[i].response)\r\n # if len(cam0_inliers) < max(5, len(new_features) * 0.1):\r\n\r\n # Group the features into grids\r\n grid_new_features = [[] for _ in range(self.config.grid_num)]\r\n for i in range(len(cam0_inliers)):\r\n cam0_point = cam0_inliers[i]\r\n cam1_point = cam1_inliers[i]\r\n response = response_inliers[i]\r\n\r\n row = int(cam0_point[1] / grid_height)\r\n col = int(cam0_point[0] / grid_width)\r\n code = row*self.config.grid_col + col\r\n\r\n new_feature = FeatureMetaData()\r\n new_feature.response = response\r\n new_feature.cam0_point = cam0_point\r\n new_feature.cam1_point = cam1_point\r\n grid_new_features[code].append(new_feature)\r\n\r\n # Sort the new features in each grid based on its response.\r\n # And collect new features within each grid with high response.\r\n for i, new_features in enumerate(grid_new_features):\r\n for feature in sorted(new_features, key=lambda x:x.response, \r\n reverse=True)[:self.config.grid_min_feature_num]:\r\n self.curr_features[i].append(feature)\r\n self.curr_features[i][-1].id = self.next_feature_id\r\n self.curr_features[i][-1].lifetime = 1\r\n self.next_feature_id += 1\r\n\r\n def prune_features(self):\r\n \"\"\"\r\n Remove some of the features of a grid in case there are too many \r\n features inside of that grid, which ensures the number of features \r\n within each grid is bounded.\r\n \"\"\"\r\n for i, features in enumerate(self.curr_features):\r\n # Continue if the number of features in this grid does\r\n # not exceed the upper bound.\r\n if len(features) <= self.config.grid_max_feature_num:\r\n continue\r\n self.curr_features[i] = sorted(features, key=lambda x:x.lifetime, \r\n reverse=True)[:self.config.grid_max_feature_num]\r\n\r\n def load_features(self):\r\n\r\n # load features \r\n filename = self.config.result_dir + str(self.image_id) + \".npz\"\r\n self.curr_features = np.load(filename, allow_pickle=True)['arr_0']\r\n self.image_id += 1 \r\n\r\n def save_features(self):\r\n \r\n # save features \r\n filename = self.config.result_dir + str(self.image_id) + \".npz\"\r\n np.savez(filename, self.curr_features)\r\n self.image_id += 1 \r\n\r\n def publish(self):\r\n \"\"\"\r\n Publish the features on the current image including both the \r\n tracked and newly detected ones.\r\n \"\"\"\r\n\r\n curr_ids = []\r\n curr_cam0_points = []\r\n curr_cam1_points = []\r\n for feature in chain.from_iterable(self.curr_features):\r\n curr_ids.append(feature.id)\r\n curr_cam0_points.append(feature.cam0_point)\r\n curr_cam1_points.append(feature.cam1_point)\r\n\r\n curr_cam0_points_undistorted = self.undistort_points(\r\n curr_cam0_points, self.cam0_intrinsics,\r\n self.cam0_distortion_model, self.cam0_distortion_coeffs)\r\n curr_cam1_points_undistorted = self.undistort_points(\r\n curr_cam1_points, self.cam1_intrinsics,\r\n self.cam1_distortion_model, self.cam1_distortion_coeffs)\r\n\r\n features = []\r\n for i in range(len(curr_ids)):\r\n fm = FeatureMeasurement()\r\n fm.id = curr_ids[i]\r\n fm.u0 = curr_cam0_points_undistorted[i][0]\r\n fm.v0 = curr_cam0_points_undistorted[i][1]\r\n fm.u1 = curr_cam1_points_undistorted[i][0]\r\n fm.v1 = curr_cam1_points_undistorted[i][1]\r\n features.append(fm)\r\n\r\n feature_msg = namedtuple('feature_msg', ['timestamp', 'features'])(\r\n self.cam0_curr_img_msg.timestamp, features)\r\n return feature_msg\r\n\r\n def integrate_imu_data(self):\r\n \"\"\"\r\n Integrates the IMU gyro readings between the two consecutive images, \r\n which is used for both tracking prediction and 2-point RANSAC.\r\n\r\n Returns:\r\n cam0_R_p_c: a rotation matrix which takes a vector from previous \r\n cam0 frame to current cam0 frame.\r\n cam1_R_p_c: a rotation matrix which takes a vector from previous \r\n cam1 frame to current cam1 frame.\r\n \"\"\"\r\n # Find the start and the end limit within the imu msg buffer.\r\n idx_begin = None\r\n for i, msg in enumerate(self.imu_msg_buffer):\r\n if msg.timestamp >= self.cam0_prev_img_msg.timestamp - 0.01:\r\n idx_begin = i\r\n break\r\n\r\n idx_end = None\r\n for i, msg in enumerate(self.imu_msg_buffer):\r\n if msg.timestamp >= self.cam0_curr_img_msg.timestamp - 0.004:\r\n idx_end = i\r\n break\r\n\r\n if idx_begin is None or idx_end is None:\r\n return np.identity(3), np.identity(3)\r\n\r\n # Compute the mean angular velocity in the IMU frame.\r\n mean_ang_vel = np.zeros(3)\r\n for i in range(idx_begin, idx_end):\r\n mean_ang_vel += self.imu_msg_buffer[i].angular_velocity\r\n\r\n if idx_end > idx_begin:\r\n mean_ang_vel /= (idx_end - idx_begin)\r\n\r\n # Transform the mean angular velocity from the IMU frame to the \r\n # cam0 and cam1 frames.\r\n cam0_mean_ang_vel = self.R_cam0_imu.T @ mean_ang_vel\r\n cam1_mean_ang_vel = self.R_cam1_imu.T @ mean_ang_vel\r\n\r\n # Compute the relative rotation.\r\n dt = self.cam0_curr_img_msg.timestamp - self.cam0_prev_img_msg.timestamp\r\n cam0_R_p_c = cv2.Rodrigues(cam0_mean_ang_vel * dt)[0].T\r\n cam1_R_p_c = cv2.Rodrigues(cam1_mean_ang_vel * dt)[0].T\r\n\r\n # Delete the useless and used imu messages.\r\n self.imu_msg_buffer = self.imu_msg_buffer[idx_end:]\r\n return cam0_R_p_c, cam1_R_p_c\r\n\r\n def rescale_points(self, pts1, pts2):\r\n \"\"\"\r\n Arguments:\r\n pts1: first set of points.\r\n pts2: second set of points.\r\n\r\n Returns:\r\n pts1: scaled first set of points.\r\n pts2: scaled second set of points.\r\n scaling_factor: scaling factor\r\n \"\"\"\r\n scaling_factor = 0\r\n for pt1, pt2 in zip(pts1, pts2):\r\n scaling_factor += np.linalg.norm(pt1)\r\n scaling_factor += np.linalg.norm(pt2)\r\n\r\n scaling_factor = (len(pts1) + len(pts2)) / scaling_factor * np.sqrt(2)\r\n\r\n for i in range(len(pts1)):\r\n pts1[i] *= scaling_factor\r\n pts2[i] *= scaling_factor\r\n\r\n return pts1, pts2, scaling_factor\r\n\r\n # def two_point_ransac(self, pts1, pts2, R_p_c, intrinsics, \r\n # distortion_model, distortion_coeffs,\r\n # inlier_error, success_probability):\r\n # \"\"\"\r\n # Applies two point ransac algorithm to mark the inliers in the input set.\r\n\r\n # Arguments:\r\n # pts1: first set of points.\r\n # pts2: second set of points.\r\n # R_p_c: a rotation matrix takes a vector in the previous camera frame \r\n # to the current camera frame.\r\n # intrinsics: intrinsics of the camera.\r\n # distortion_model: distortion model of the camera.\r\n # distortion_coeffs: distortion coefficients.\r\n # inlier_error: acceptable error to be considered as an inlier.\r\n # success_probability: the required probability of success.\r\n\r\n # Returns:\r\n # inlier_flag: 1 for inliers and 0 for outliers.\r\n # \"\"\"\r\n # # Check the size of input point size.\r\n # assert len(pts1) == len(pts2), 'Sets of different size are used...'\r\n\r\n # norm_pixel_unit = 2.0 / (intrinsics[0] + intrinsics[1])\r\n # iter_num = int(np.ceil(np.log(1-success_probability) / np.log(1-0.7*0.7)))\r\n\r\n # # Initially, mark all points as inliers.\r\n # inlier_markers = [1] * len(pts1)\r\n\r\n # # Undistort all the points.\r\n # pts1_undistorted = self.undistort_points(pts1, intrinsics, \r\n # distortion_model, distortion_coeffs)\r\n # pts2_undistorted = self.undistort_points(pts2, intrinsics, \r\n # distortion_model, distortion_coeffs)\r\n\r\n # # Compenstate the points in the previous image with\r\n # # the relative rotation.\r\n # for i, pt in enumerate(pts1_undistorted):\r\n # pt_h = np.array([*pt, 1.0])\r\n # pt_hc = R_p_c @ pt_h\r\n # pts1_undistorted[i] = pt_hc[:2]\r\n\r\n # # Normalize the points to gain numerical stability.\r\n # pts1_undistorted, pts2_undistorted, scaling_factor = self.rescale_points(\r\n # pts1_undistorted, pts2_undistorted)\r\n\r\n # # Compute the difference between previous and current points,\r\n # # which will be used frequently later.\r\n # pts_diff = []\r\n # for pt1, pt2 in zip(pts1_undistorted, pts2_undistorted):\r\n # pts_diff.append(pt1 - pt2)\r\n\r\n # # Mark the point pairs with large difference directly.\r\n # # BTW, the mean distance of the rest of the point pairs are computed.\r\n # mean_pt_distance = 0.0\r\n # raw_inlier_count = 0\r\n # for i, pt_diff in enumerate(pts_diff):\r\n # distance = np.linalg.norm(pt_diff)\r\n # # 25 pixel distance is a pretty large tolerance for normal motion.\r\n # # However, to be used with aggressive motion, this tolerance should\r\n # # be increased significantly to match the usage.\r\n # if distance > 50.0 * norm_pixel_unit:\r\n # inlier_markers[i] = 0\r\n # else:\r\n # mean_pt_distance += distance\r\n # raw_inlier_count += 1\r\n\r\n # mean_pt_distance /= raw_inlier_count\r\n\r\n # # If the current number of inliers is less than 3, just mark\r\n # # all input as outliers. This case can happen with fast\r\n # # rotation where very few features are tracked.\r\n # if raw_inlier_count < 3:\r\n # return [0] * len(inlier_markers)\r\n\r\n # # Before doing 2-point RANSAC, we have to check if the motion\r\n # # is degenerated, meaning that there is no translation between\r\n # # the frames, in which case, the model of the RANSAC does not work. \r\n # # If so, the distance between the matched points will be almost 0.\r\n # if mean_pt_distance < norm_pixel_unit:\r\n # for i, pt_diff in enumerate(pts_diff):\r\n # if inlier_markers[i] == 0:\r\n # continue\r\n # if np.linalg.norm(pt_diff) > inlier_error * norm_pixel_unit:\r\n # inlier_markers[i] = 0\r\n # return inlier_markers\r\n\r\n # # In the case of general motion, the RANSAC model can be applied.\r\n # # The three column corresponds to tx, ty, and tz respectively.\r\n # coeff_t = []\r\n # for i, pt_diff in enumerate(pts_diff):\r\n # coeff_t.append(np.array([\r\n # pt_diff[1],\r\n # -pt_diff[0],\r\n # pts1_undistorted[0] * pts2_undistorted[1] - \r\n # pts1_undistorted[1] * pts2_undistorted[0]]))\r\n # coeff_t = np.array(coeff_t)\r\n\r\n # raw_inlier_idx = np.where(inlier_markers)[0]\r\n # best_inlier_set = []\r\n # best_error = 1e10\r\n\r\n # for i in range(iter_num):\r\n # # Randomly select two point pairs.\r\n # # Although this is a weird way of selecting two pairs, but it\r\n # # is able to efficiently avoid selecting repetitive pairs.\r\n # pair_idx1 = np.random.choice(raw_inlier_idx)\r\n # idx_diff = np.random.randint(1, len(raw_inlier_idx))\r\n # pair_idx2 = (pair_idx1+idx_diff) % len(raw_inlier_idx)\r\n\r\n # # Construct the model.\r\n # coeff_t_ = np.array([coeff_t[pair_idx1], coeff_t[pair_idx2]])\r\n # coeff_tx = coeff_t_[:, 0]\r\n # coeff_ty = coeff_t_[:, 1]\r\n # coeff_tz = coeff_t_[:, 2]\r\n # coeff_l1_norm = np.linalg.norm(coeff_t_, 1, axis=0)\r\n # base_indicator = np.argmin(coeff_l1_norm)\r\n\r\n # if base_indicator == 0:\r\n # A = np.array([coeff_ty, coeff_tz]).T\r\n # solution = np.linalg.inv(A) @ (-coeff_tx)\r\n # model = [1.0, *solution]\r\n # elif base_indicator == 1:\r\n # A = np.array([coeff_tx, coeff_tz]).T\r\n # solution = np.linalg.inv(A) @ (-coeff_ty)\r\n # model = [solution[0], 1.0, solution[1]]\r\n # else:\r\n # A = np.array([coeff_tx, coeff_ty]).T\r\n # solution = np.linalg.inv(A) @ (-coeff_tz)\r\n # model = [*solution, 1.0]\r\n\r\n # # Find all the inliers among point pairs.\r\n # error = coeff_t @ model\r\n\r\n # inlier_set = []\r\n # for i, e in enumerate(error):\r\n # if inlier_markers[i] == 0:\r\n # continue\r\n # if np.abs(e) < inlier_error * norm_pixel_unit:\r\n # inlier_set.append(i)\r\n\r\n # # If the number of inliers is small, the current model is \r\n # # probably wrong.\r\n # if len(inlier_set) < 0.2 * len(pts1_undistorted):\r\n # continue\r\n\r\n # # Refit the model using all of the possible inliers.\r\n # coeff_t_ = coeff_t[inlier_set]\r\n # coeff_tx_better = coeff_t_[:, 0]\r\n # coeff_ty_better = coeff_t_[:, 1]\r\n # coeff_tz_better = coeff_t_[:, 2]\r\n\r\n # if base_indicator == 0:\r\n # A = np.array([coeff_ty_better, coeff_tz_better]).T\r\n # solution = np.linalg.inv(A.T @ A) @ A.T @ (-coeff_tx_better)\r\n # model_better = [1.0, *solution]\r\n # elif base_indicator == 1:\r\n # A = np.array([coeff_tx_better, coeff_tz_better]).T\r\n # solution = np.linalg.inv(A.T @ A) @ A.T @ (-coeff_ty_better)\r\n # model_better = [solution[0], 1.0, solution[1]]\r\n # else:\r\n # A = np.array([coeff_tx_better, coeff_ty_better]).T\r\n # solution = np.linalg.inv(A.T @ A) @ A.T @ (-coeff_tz_better)\r\n # model_better = [*solution, 1.0]\r\n\r\n # # Compute the error and upate the best model if possible.\r\n # new_error = coeff_t @ model_better\r\n # this_error = np.mean([np.abs(new_error[i]) for i in inlier_set])\r\n\r\n # if len(inlier_set) > best_inlier_set:\r\n # best_error = this_error\r\n # best_inlier_set = inlier_set\r\n\r\n # # Fill in the markers.\r\n # inlier_markers = [0] * len(pts1)\r\n # for i in best_inlier_set:\r\n # inlier_markers[i] = 1\r\n\r\n # return inlier_markers\r\n\r\n def get_grid_size(self, img):\r\n \"\"\"\r\n # Size of each grid.\r\n \"\"\"\r\n grid_height = int(np.ceil(img.shape[0] / self.config.grid_row))\r\n grid_width = int(np.ceil(img.shape[1] / self.config.grid_col))\r\n return grid_height, grid_width\r\n\r\n def predict_feature_tracking(self, input_pts, R_p_c, intrinsics):\r\n \"\"\"\r\n predictFeatureTracking Compensates the rotation between consecutive \r\n camera frames so that feature tracking would be more robust and fast.\r\n\r\n Arguments:\r\n input_pts: features in the previous image to be tracked.\r\n R_p_c: a rotation matrix takes a vector in the previous camera \r\n frame to the current camera frame. (matrix33)\r\n intrinsics: intrinsic matrix of the camera. (vec3)\r\n\r\n Returns:\r\n compensated_pts: predicted locations of the features in the \r\n current image based on the provided rotation.\r\n \"\"\"\r\n # Return directly if there are no input features.\r\n if len(input_pts) == 0:\r\n return []\r\n\r\n # Intrinsic matrix.\r\n K = np.array([\r\n [intrinsics[0], 0.0, intrinsics[2]],\r\n [0.0, intrinsics[1], intrinsics[3]],\r\n [0.0, 0.0, 1.0]])\r\n H = K @ R_p_c @ np.linalg.inv(K)\r\n\r\n compensated_pts = []\r\n for i in range(len(input_pts)):\r\n p1 = np.array([*input_pts[i], 1.0])\r\n p2 = H @ p1\r\n compensated_pts.append(p2[:2] / p2[2])\r\n return np.array(compensated_pts, dtype=np.float32)\r\n\r\n def stereo_match(self, cam0_points):\r\n \"\"\"\r\n Matches features with stereo image pairs.\r\n\r\n Arguments:\r\n cam0_points: points in the primary image.\r\n\r\n Returns:\r\n cam1_points: points in the secondary image.\r\n inlier_markers: 1 if the match is valid, 0 otherwise.\r\n \"\"\"\r\n cam0_points = np.array(cam0_points)\r\n if len(cam0_points) == 0:\r\n return []\r\n\r\n R_cam0_cam1 = self.R_cam1_imu.T @ self.R_cam0_imu\r\n cam0_points_undistorted = self.undistort_points(\r\n cam0_points, self.cam0_intrinsics,\r\n self.cam0_distortion_model, self.cam0_distortion_coeffs, R_cam0_cam1)\r\n cam1_points = self.distort_points(\r\n cam0_points_undistorted, self.cam1_intrinsics,\r\n self.cam1_distortion_model, self.cam1_distortion_coeffs)\r\n cam1_points_copy = cam1_points.copy()\r\n\r\n # Track features using LK optical flow method.\r\n cam0_points = cam0_points.astype(np.float32)\r\n cam1_points = cam1_points.astype(np.float32)\r\n cam1_points, inlier_markers, _ = cv2.calcOpticalFlowPyrLK(\r\n self.curr_cam0_pyramid, self.curr_cam1_pyramid,\r\n cam0_points, cam1_points, **self.config.lk_params)\r\n\r\n cam0_points_, _, _ = cv2.calcOpticalFlowPyrLK(\r\n self.curr_cam1_pyramid, self.curr_cam0_pyramid, \r\n cam1_points, cam0_points.copy(), **self.config.lk_params)\r\n err = np.linalg.norm(cam0_points - cam0_points_, axis=1)\r\n\r\n # cam1_points_undistorted = self.undistort_points(\r\n # cam1_points, self.cam1_intrinsics,\r\n # self.cam1_distortion_model, self.cam1_distortion_coeffs, R_cam0_cam1)\r\n disparity = np.abs(cam1_points_copy[:, 1] - cam1_points[:, 1])\r\n \r\n\r\n \r\n inlier_markers = np.logical_and.reduce(\r\n [inlier_markers.reshape(-1), err < 3, disparity < 20])\r\n\r\n # Mark those tracked points out of the image region as untracked.\r\n img = self.cam1_curr_img_msg.image\r\n for i, point in enumerate(cam1_points):\r\n if not inlier_markers[i]:\r\n continue\r\n if (point[0] < 0 or point[0] > img.shape[1]-1 or \r\n point[1] < 0 or point[1] > img.shape[0]-1):\r\n inlier_markers[i] = 0\r\n\r\n # Compute the relative rotation between the cam0 frame and cam1 frame.\r\n t_cam0_cam1 = self.R_cam1_imu.T @ (self.t_cam0_imu - self.t_cam1_imu)\r\n # Compute the essential matrix.\r\n E = skew(t_cam0_cam1) @ R_cam0_cam1\r\n\r\n # Further remove outliers based on the known essential matrix.\r\n cam0_points_undistorted = self.undistort_points(\r\n cam0_points, self.cam0_intrinsics,\r\n self.cam0_distortion_model, self.cam0_distortion_coeffs)\r\n cam1_points_undistorted = self.undistort_points(\r\n cam1_points, self.cam1_intrinsics,\r\n self.cam1_distortion_model, self.cam1_distortion_coeffs)\r\n\r\n norm_pixel_unit = 4.0 / (\r\n self.cam0_intrinsics[0] + self.cam0_intrinsics[1] +\r\n self.cam1_intrinsics[0] + self.cam1_intrinsics[1])\r\n\r\n for i in range(len(cam0_points_undistorted)):\r\n if not inlier_markers[i]:\r\n continue\r\n pt0 = np.array([*cam0_points_undistorted[i], 1.0])\r\n pt1 = np.array([*cam1_points_undistorted[i], 1.0])\r\n epipolar_line = E @ pt0\r\n error = np.abs((pt1 * epipolar_line)[0]) / np.linalg.norm(\r\n epipolar_line[:2])\r\n\r\n if error > self.config.stereo_threshold * norm_pixel_unit:\r\n inlier_markers[i] = 0\r\n\r\n return cam1_points, inlier_markers\r\n\r\n def undistort_points(self, pts_in, intrinsics, distortion_model, \r\n distortion_coeffs, rectification_matrix=np.identity(3),\r\n new_intrinsics=np.array([1, 1, 0, 0])):\r\n \"\"\"\r\n Arguments:\r\n pts_in: points to be undistorted.\r\n intrinsics: intrinsics of the camera.\r\n distortion_model: distortion model of the camera.\r\n distortion_coeffs: distortion coefficients.\r\n rectification_matrix:\r\n new_intrinsics:\r\n\r\n Returns:\r\n pts_out: undistorted points.\r\n \"\"\"\r\n if len(pts_in) == 0:\r\n return []\r\n \r\n pts_in = np.reshape(pts_in, (-1, 1, 2))\r\n K = np.array([\r\n [intrinsics[0], 0.0, intrinsics[2]],\r\n [0.0, intrinsics[1], intrinsics[3]],\r\n [0.0, 0.0, 1.0]])\r\n K_new = np.array([\r\n [new_intrinsics[0], 0.0, new_intrinsics[2]],\r\n [0.0, new_intrinsics[1], new_intrinsics[3]],\r\n [0.0, 0.0, 1.0]])\r\n\r\n if distortion_model == 'equidistant':\r\n pts_out = cv2.fisheye.undistortPoints(pts_in, K, distortion_coeffs,\r\n rectification_matrix, K_new)\r\n else: # default: 'radtan'\r\n pts_out = cv2.undistortPoints(pts_in, K, distortion_coeffs, None,\r\n rectification_matrix, K_new)\r\n return pts_out.reshape((-1, 2))\r\n\r\n def distort_points(self, pts_in, intrinsics, distortion_model, \r\n distortion_coeffs):\r\n \"\"\"\r\n Arguments:\r\n pts_in: points to be distorted.\r\n intrinsics: intrinsics of the camera.\r\n distortion_model: distortion model of the camera.\r\n distortion_coeffs: distortion coefficients.\r\n\r\n Returns:\r\n pts_out: distorted points. (N, 2)\r\n \"\"\"\r\n if len(pts_in) == 0:\r\n return []\r\n\r\n K = np.array([\r\n [intrinsics[0], 0.0, intrinsics[2]],\r\n [0.0, intrinsics[1], intrinsics[3]],\r\n [0.0, 0.0, 1.0]])\r\n\r\n if distortion_model == 'equidistant':\r\n pts_out = cv2.fisheye.distortPoints(pts_in, K, distortion_coeffs)\r\n else: # default: 'radtan'\r\n homogenous_pts = cv2.convertPointsToHomogeneous(pts_in)\r\n pts_out, _ = cv2.projectPoints(homogenous_pts, \r\n np.zeros(3), np.zeros(3), K, distortion_coeffs)\r\n return pts_out.reshape((-1, 2))\r\n\r\n def draw_features_stereo(self):\r\n img0 = self.cam0_curr_img_msg.image\r\n img1 = self.cam1_curr_img_msg.image\r\n\r\n kps0 = []\r\n kps1 = []\r\n matches = []\r\n for feature in chain.from_iterable(self.curr_features):\r\n matches.append(cv2.DMatch(len(kps0), len(kps0), 0))\r\n kps0.append(cv2.KeyPoint(*feature.cam0_point, 1))\r\n kps1.append(cv2.KeyPoint(*feature.cam1_point, 1))\r\n\r\n img = cv2.drawMatches(img0, kps0, img1, kps1, matches, None, flags=2)\r\n cv2.imshow('stereo features', img)\r\n cv2.waitKey(1)\r\n\r\n\r\ndef skew(vec):\r\n x, y, z = vec\r\n return np.array([\r\n [0, -z, y],\r\n [z, 0, -x],\r\n [-y, x, 0]])\r\n\r\ndef select(data, selectors):\r\n return [d for d, s in zip(data, selectors) if s]\r\n\r\n\r\n",
"step-ids": [
18,
20,
23,
25,
31
]
}
|
[
18,
20,
23,
25,
31
] |
# from django.test import TestCase ,LiveServerTestCase,Client
# from MeetUps.models import*
# from django.shortcuts import reverse
# from .forms import RegistrationForm
# class MeetUpViewTest(TestCase):
# @classmethod
# def setupTestDat(cls):
# #create or get all meetups
# def test_index(request,meetup_slug):
|
normal
|
{
"blob_id": "9156ee034ceb8a39fc1eb3a18c1597c737814c72",
"index": 692,
"step-1": "# from django.test import TestCase ,LiveServerTestCase,Client\n\n# from MeetUps.models import*\n# from django.shortcuts import reverse\n# from .forms import RegistrationForm\n\n# class MeetUpViewTest(TestCase):\n\n# @classmethod\n# def setupTestDat(cls):\n# #create or get all meetups\n \n\n \n\n\n# def test_index(request,meetup_slug):",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
1
]
}
|
[
1
] |
<|reserved_special_token_0|>
def replchars(word: str, reptable: List[aff.RepPattern]) ->Iterator[Union[
str, List[str]]]:
"""
Uses :attr:`aff.REP <spylls.hunspell.data.aff.Aff.REP>` table (typical misspellings) to replace
in the word provided. If the pattern's replacement contains "_", it means replacing to " " and
yielding _two_ different hypotheses: it was one (dictionary) word "foo bar" (and should be
checked as such) or it was words ["foo", "bar"] and should be checked separately.
"""
if len(word) < 2 or not reptable:
return
for pattern in reptable:
for match in pattern.regexp.finditer(word):
suggestion = word[:match.start()] + pattern.replacement.replace('_'
, ' ') + word[match.end():]
yield suggestion
if ' ' in suggestion:
yield suggestion.split(' ', 2)
def mapchars(word: str, maptable: List[Set[str]]) ->Iterator[str]:
"""
Uses :attr:`aff.MAP <spylls.hunspell.data.aff.Aff.MAP>` table ( sets of potentially similar chars)
and tries to replace them recursively. E.g., assuming ``MAP`` has entry ``aáã``, and we have
a misspelling "anarchia", ``mapchars`` will do this:
>>> [*pmt.mapchars("anarchia", ['aáã'])]
['ánarchia',
'ánárchia',
'ánárchiá',
'ánárchiã',
'ánãrchia',
'ánãrchiá',
'ánãrchiã',
'ãnarchia',
'ãnárchia',
'ãnárchiá',
'ãnárchiã',
'ãnãrchia',
'ãnãrchiá',
'ãnãrchiã']
"""
if len(word) < 2 or not maptable:
return
def mapchars_internal(word, start=0):
if start >= len(word):
return
for options in maptable:
for option in options:
pos = word.find(option, start)
if pos != -1:
for other in options:
if other == option:
continue
replaced = word[:pos] + other + word[pos + len(option):
]
yield replaced
for variant in mapchars_internal(replaced, pos + 1):
yield variant
for variant in mapchars_internal(word):
yield variant
<|reserved_special_token_0|>
def longswapchar(word: str) ->Iterator[str]:
"""
Produces permutations with non-adjacent chars swapped (up to 4 chars distance)
"""
for first in range(0, len(word) - 2):
for second in range(first + 2, min(first + MAX_CHAR_DISTANCE, len(
word))):
yield word[:first] + word[second] + word[first + 1:second] + word[
first] + word[second + 1:]
def badcharkey(word: str, layout: str) ->Iterator[str]:
"""
Produces permutations with chars replaced by adjacent chars on keyboard layout ("vat -> cat")
or downcased (if it was accidental uppercase).
Uses :attr:`aff.KEY <spylls.hunspell.data.aff.Aff.KEY>`
"""
for i, c in enumerate(word):
before = word[:i]
after = word[i + 1:]
if c != c.upper():
yield before + c.upper() + after
if not layout:
continue
pos = layout.find(c)
while pos != -1:
if pos > 0 and layout[pos - 1] != '|':
yield before + layout[pos - 1] + after
if pos + 1 < len(layout) and layout[pos + 1] != '|':
yield before + layout[pos + 1] + after
pos = layout.find(c, pos + 1)
<|reserved_special_token_0|>
def badchar(word: str, trystring: str) ->Iterator[str]:
"""
Produces permutations with chars replaced by chars in :attr:`aff.TRY <spylls.hunspell.data.aff.Aff.TRY>`
set.
"""
if not trystring:
return
for c in trystring:
for i in reversed(range(0, len(word))):
if word[i] == c:
continue
yield word[:i] + c + word[i + 1:]
<|reserved_special_token_0|>
def twowords(word: str) ->Iterator[List[str]]:
"""
Produces permutation of splitting in two words in all possible positions.
"""
for i in range(1, len(word)):
yield [word[:i], word[i:]]
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def replchars(word: str, reptable: List[aff.RepPattern]) ->Iterator[Union[
str, List[str]]]:
"""
Uses :attr:`aff.REP <spylls.hunspell.data.aff.Aff.REP>` table (typical misspellings) to replace
in the word provided. If the pattern's replacement contains "_", it means replacing to " " and
yielding _two_ different hypotheses: it was one (dictionary) word "foo bar" (and should be
checked as such) or it was words ["foo", "bar"] and should be checked separately.
"""
if len(word) < 2 or not reptable:
return
for pattern in reptable:
for match in pattern.regexp.finditer(word):
suggestion = word[:match.start()] + pattern.replacement.replace('_'
, ' ') + word[match.end():]
yield suggestion
if ' ' in suggestion:
yield suggestion.split(' ', 2)
def mapchars(word: str, maptable: List[Set[str]]) ->Iterator[str]:
"""
Uses :attr:`aff.MAP <spylls.hunspell.data.aff.Aff.MAP>` table ( sets of potentially similar chars)
and tries to replace them recursively. E.g., assuming ``MAP`` has entry ``aáã``, and we have
a misspelling "anarchia", ``mapchars`` will do this:
>>> [*pmt.mapchars("anarchia", ['aáã'])]
['ánarchia',
'ánárchia',
'ánárchiá',
'ánárchiã',
'ánãrchia',
'ánãrchiá',
'ánãrchiã',
'ãnarchia',
'ãnárchia',
'ãnárchiá',
'ãnárchiã',
'ãnãrchia',
'ãnãrchiá',
'ãnãrchiã']
"""
if len(word) < 2 or not maptable:
return
def mapchars_internal(word, start=0):
if start >= len(word):
return
for options in maptable:
for option in options:
pos = word.find(option, start)
if pos != -1:
for other in options:
if other == option:
continue
replaced = word[:pos] + other + word[pos + len(option):
]
yield replaced
for variant in mapchars_internal(replaced, pos + 1):
yield variant
for variant in mapchars_internal(word):
yield variant
<|reserved_special_token_0|>
def longswapchar(word: str) ->Iterator[str]:
"""
Produces permutations with non-adjacent chars swapped (up to 4 chars distance)
"""
for first in range(0, len(word) - 2):
for second in range(first + 2, min(first + MAX_CHAR_DISTANCE, len(
word))):
yield word[:first] + word[second] + word[first + 1:second] + word[
first] + word[second + 1:]
def badcharkey(word: str, layout: str) ->Iterator[str]:
"""
Produces permutations with chars replaced by adjacent chars on keyboard layout ("vat -> cat")
or downcased (if it was accidental uppercase).
Uses :attr:`aff.KEY <spylls.hunspell.data.aff.Aff.KEY>`
"""
for i, c in enumerate(word):
before = word[:i]
after = word[i + 1:]
if c != c.upper():
yield before + c.upper() + after
if not layout:
continue
pos = layout.find(c)
while pos != -1:
if pos > 0 and layout[pos - 1] != '|':
yield before + layout[pos - 1] + after
if pos + 1 < len(layout) and layout[pos + 1] != '|':
yield before + layout[pos + 1] + after
pos = layout.find(c, pos + 1)
<|reserved_special_token_0|>
def forgotchar(word: str, trystring: str) ->Iterator[str]:
"""
Produces permutations with one char inserted in all possible possitions.
List of chars is taken from :attr:`aff.TRY <spylls.hunspell.data.aff.Aff.TRY>` -- if it is absent,
doesn't try anything! Chars there are expected to be sorted in order of chars usage in language
(most used characters first).
"""
if not trystring:
return
for c in trystring:
for i in range(0, len(word)):
yield word[:i] + c + word[i:]
def movechar(word: str) ->Iterator[str]:
"""
Produces permutations with one character moved by 2, 3 or 4 places forward or backward (not 1,
because it is already handled by :meth:`swapchar`)
"""
if len(word) < 2:
return
for frompos, char in enumerate(word):
for topos in range(frompos + 3, min(len(word), frompos +
MAX_CHAR_DISTANCE + 1)):
yield word[:frompos] + word[frompos + 1:topos] + char + word[topos:
]
for frompos in reversed(range(0, len(word))):
for topos in reversed(range(max(0, frompos - MAX_CHAR_DISTANCE + 1),
frompos - 1)):
yield word[:topos] + word[frompos] + word[topos:frompos] + word[
frompos + 1:]
def badchar(word: str, trystring: str) ->Iterator[str]:
"""
Produces permutations with chars replaced by chars in :attr:`aff.TRY <spylls.hunspell.data.aff.Aff.TRY>`
set.
"""
if not trystring:
return
for c in trystring:
for i in reversed(range(0, len(word))):
if word[i] == c:
continue
yield word[:i] + c + word[i + 1:]
def doubletwochars(word: str) ->Iterator[str]:
"""
Produces permutations with accidental two-letter-doubling fixed (vacation -> vacacation)
"""
if len(word) < 5:
return
for i in range(2, len(word)):
if word[i - 2] == word[i] and word[i - 3] == word[i - 1]:
yield word[:i - 1] + word[i + 1:]
def twowords(word: str) ->Iterator[List[str]]:
"""
Produces permutation of splitting in two words in all possible positions.
"""
for i in range(1, len(word)):
yield [word[:i], word[i:]]
<|reserved_special_token_1|>
<|reserved_special_token_0|>
MAX_CHAR_DISTANCE = 4
def replchars(word: str, reptable: List[aff.RepPattern]) ->Iterator[Union[
str, List[str]]]:
"""
Uses :attr:`aff.REP <spylls.hunspell.data.aff.Aff.REP>` table (typical misspellings) to replace
in the word provided. If the pattern's replacement contains "_", it means replacing to " " and
yielding _two_ different hypotheses: it was one (dictionary) word "foo bar" (and should be
checked as such) or it was words ["foo", "bar"] and should be checked separately.
"""
if len(word) < 2 or not reptable:
return
for pattern in reptable:
for match in pattern.regexp.finditer(word):
suggestion = word[:match.start()] + pattern.replacement.replace('_'
, ' ') + word[match.end():]
yield suggestion
if ' ' in suggestion:
yield suggestion.split(' ', 2)
def mapchars(word: str, maptable: List[Set[str]]) ->Iterator[str]:
"""
Uses :attr:`aff.MAP <spylls.hunspell.data.aff.Aff.MAP>` table ( sets of potentially similar chars)
and tries to replace them recursively. E.g., assuming ``MAP`` has entry ``aáã``, and we have
a misspelling "anarchia", ``mapchars`` will do this:
>>> [*pmt.mapchars("anarchia", ['aáã'])]
['ánarchia',
'ánárchia',
'ánárchiá',
'ánárchiã',
'ánãrchia',
'ánãrchiá',
'ánãrchiã',
'ãnarchia',
'ãnárchia',
'ãnárchiá',
'ãnárchiã',
'ãnãrchia',
'ãnãrchiá',
'ãnãrchiã']
"""
if len(word) < 2 or not maptable:
return
def mapchars_internal(word, start=0):
if start >= len(word):
return
for options in maptable:
for option in options:
pos = word.find(option, start)
if pos != -1:
for other in options:
if other == option:
continue
replaced = word[:pos] + other + word[pos + len(option):
]
yield replaced
for variant in mapchars_internal(replaced, pos + 1):
yield variant
for variant in mapchars_internal(word):
yield variant
def swapchar(word: str) ->Iterator[str]:
"""
Produces permutations with adjacent chars swapped. For short (4 or 5 letters) words produces
also doubleswaps: ahev -> have.
"""
if len(word) < 2:
return
for i in range(0, len(word) - 1):
yield word[:i] + word[i + 1] + word[i + 1] + word[i + 2:]
if len(word) in [4, 5]:
yield word[1] + word[0] + (word[2] if len(word) == 5 else '') + word[-1
] + word[-2]
if len(word) == 5:
yield word[0] + word[2] + word[1] + word[-1] + word[-2]
def longswapchar(word: str) ->Iterator[str]:
"""
Produces permutations with non-adjacent chars swapped (up to 4 chars distance)
"""
for first in range(0, len(word) - 2):
for second in range(first + 2, min(first + MAX_CHAR_DISTANCE, len(
word))):
yield word[:first] + word[second] + word[first + 1:second] + word[
first] + word[second + 1:]
def badcharkey(word: str, layout: str) ->Iterator[str]:
"""
Produces permutations with chars replaced by adjacent chars on keyboard layout ("vat -> cat")
or downcased (if it was accidental uppercase).
Uses :attr:`aff.KEY <spylls.hunspell.data.aff.Aff.KEY>`
"""
for i, c in enumerate(word):
before = word[:i]
after = word[i + 1:]
if c != c.upper():
yield before + c.upper() + after
if not layout:
continue
pos = layout.find(c)
while pos != -1:
if pos > 0 and layout[pos - 1] != '|':
yield before + layout[pos - 1] + after
if pos + 1 < len(layout) and layout[pos + 1] != '|':
yield before + layout[pos + 1] + after
pos = layout.find(c, pos + 1)
def extrachar(word: str) ->Iterator[str]:
"""
Produces permutations with one char removed in all possible positions
"""
if len(word) < 2:
return
for i in range(0, len(word)):
yield word[:i] + word[i + 1:]
def forgotchar(word: str, trystring: str) ->Iterator[str]:
"""
Produces permutations with one char inserted in all possible possitions.
List of chars is taken from :attr:`aff.TRY <spylls.hunspell.data.aff.Aff.TRY>` -- if it is absent,
doesn't try anything! Chars there are expected to be sorted in order of chars usage in language
(most used characters first).
"""
if not trystring:
return
for c in trystring:
for i in range(0, len(word)):
yield word[:i] + c + word[i:]
def movechar(word: str) ->Iterator[str]:
"""
Produces permutations with one character moved by 2, 3 or 4 places forward or backward (not 1,
because it is already handled by :meth:`swapchar`)
"""
if len(word) < 2:
return
for frompos, char in enumerate(word):
for topos in range(frompos + 3, min(len(word), frompos +
MAX_CHAR_DISTANCE + 1)):
yield word[:frompos] + word[frompos + 1:topos] + char + word[topos:
]
for frompos in reversed(range(0, len(word))):
for topos in reversed(range(max(0, frompos - MAX_CHAR_DISTANCE + 1),
frompos - 1)):
yield word[:topos] + word[frompos] + word[topos:frompos] + word[
frompos + 1:]
def badchar(word: str, trystring: str) ->Iterator[str]:
"""
Produces permutations with chars replaced by chars in :attr:`aff.TRY <spylls.hunspell.data.aff.Aff.TRY>`
set.
"""
if not trystring:
return
for c in trystring:
for i in reversed(range(0, len(word))):
if word[i] == c:
continue
yield word[:i] + c + word[i + 1:]
def doubletwochars(word: str) ->Iterator[str]:
"""
Produces permutations with accidental two-letter-doubling fixed (vacation -> vacacation)
"""
if len(word) < 5:
return
for i in range(2, len(word)):
if word[i - 2] == word[i] and word[i - 3] == word[i - 1]:
yield word[:i - 1] + word[i + 1:]
def twowords(word: str) ->Iterator[List[str]]:
"""
Produces permutation of splitting in two words in all possible positions.
"""
for i in range(1, len(word)):
yield [word[:i], word[i:]]
<|reserved_special_token_1|>
<|reserved_special_token_0|>
from typing import Iterator, Union, List, Set
from spylls.hunspell.data import aff
MAX_CHAR_DISTANCE = 4
def replchars(word: str, reptable: List[aff.RepPattern]) ->Iterator[Union[
str, List[str]]]:
"""
Uses :attr:`aff.REP <spylls.hunspell.data.aff.Aff.REP>` table (typical misspellings) to replace
in the word provided. If the pattern's replacement contains "_", it means replacing to " " and
yielding _two_ different hypotheses: it was one (dictionary) word "foo bar" (and should be
checked as such) or it was words ["foo", "bar"] and should be checked separately.
"""
if len(word) < 2 or not reptable:
return
for pattern in reptable:
for match in pattern.regexp.finditer(word):
suggestion = word[:match.start()] + pattern.replacement.replace('_'
, ' ') + word[match.end():]
yield suggestion
if ' ' in suggestion:
yield suggestion.split(' ', 2)
def mapchars(word: str, maptable: List[Set[str]]) ->Iterator[str]:
"""
Uses :attr:`aff.MAP <spylls.hunspell.data.aff.Aff.MAP>` table ( sets of potentially similar chars)
and tries to replace them recursively. E.g., assuming ``MAP`` has entry ``aáã``, and we have
a misspelling "anarchia", ``mapchars`` will do this:
>>> [*pmt.mapchars("anarchia", ['aáã'])]
['ánarchia',
'ánárchia',
'ánárchiá',
'ánárchiã',
'ánãrchia',
'ánãrchiá',
'ánãrchiã',
'ãnarchia',
'ãnárchia',
'ãnárchiá',
'ãnárchiã',
'ãnãrchia',
'ãnãrchiá',
'ãnãrchiã']
"""
if len(word) < 2 or not maptable:
return
def mapchars_internal(word, start=0):
if start >= len(word):
return
for options in maptable:
for option in options:
pos = word.find(option, start)
if pos != -1:
for other in options:
if other == option:
continue
replaced = word[:pos] + other + word[pos + len(option):
]
yield replaced
for variant in mapchars_internal(replaced, pos + 1):
yield variant
for variant in mapchars_internal(word):
yield variant
def swapchar(word: str) ->Iterator[str]:
"""
Produces permutations with adjacent chars swapped. For short (4 or 5 letters) words produces
also doubleswaps: ahev -> have.
"""
if len(word) < 2:
return
for i in range(0, len(word) - 1):
yield word[:i] + word[i + 1] + word[i + 1] + word[i + 2:]
if len(word) in [4, 5]:
yield word[1] + word[0] + (word[2] if len(word) == 5 else '') + word[-1
] + word[-2]
if len(word) == 5:
yield word[0] + word[2] + word[1] + word[-1] + word[-2]
def longswapchar(word: str) ->Iterator[str]:
"""
Produces permutations with non-adjacent chars swapped (up to 4 chars distance)
"""
for first in range(0, len(word) - 2):
for second in range(first + 2, min(first + MAX_CHAR_DISTANCE, len(
word))):
yield word[:first] + word[second] + word[first + 1:second] + word[
first] + word[second + 1:]
def badcharkey(word: str, layout: str) ->Iterator[str]:
"""
Produces permutations with chars replaced by adjacent chars on keyboard layout ("vat -> cat")
or downcased (if it was accidental uppercase).
Uses :attr:`aff.KEY <spylls.hunspell.data.aff.Aff.KEY>`
"""
for i, c in enumerate(word):
before = word[:i]
after = word[i + 1:]
if c != c.upper():
yield before + c.upper() + after
if not layout:
continue
pos = layout.find(c)
while pos != -1:
if pos > 0 and layout[pos - 1] != '|':
yield before + layout[pos - 1] + after
if pos + 1 < len(layout) and layout[pos + 1] != '|':
yield before + layout[pos + 1] + after
pos = layout.find(c, pos + 1)
def extrachar(word: str) ->Iterator[str]:
"""
Produces permutations with one char removed in all possible positions
"""
if len(word) < 2:
return
for i in range(0, len(word)):
yield word[:i] + word[i + 1:]
def forgotchar(word: str, trystring: str) ->Iterator[str]:
"""
Produces permutations with one char inserted in all possible possitions.
List of chars is taken from :attr:`aff.TRY <spylls.hunspell.data.aff.Aff.TRY>` -- if it is absent,
doesn't try anything! Chars there are expected to be sorted in order of chars usage in language
(most used characters first).
"""
if not trystring:
return
for c in trystring:
for i in range(0, len(word)):
yield word[:i] + c + word[i:]
def movechar(word: str) ->Iterator[str]:
"""
Produces permutations with one character moved by 2, 3 or 4 places forward or backward (not 1,
because it is already handled by :meth:`swapchar`)
"""
if len(word) < 2:
return
for frompos, char in enumerate(word):
for topos in range(frompos + 3, min(len(word), frompos +
MAX_CHAR_DISTANCE + 1)):
yield word[:frompos] + word[frompos + 1:topos] + char + word[topos:
]
for frompos in reversed(range(0, len(word))):
for topos in reversed(range(max(0, frompos - MAX_CHAR_DISTANCE + 1),
frompos - 1)):
yield word[:topos] + word[frompos] + word[topos:frompos] + word[
frompos + 1:]
def badchar(word: str, trystring: str) ->Iterator[str]:
"""
Produces permutations with chars replaced by chars in :attr:`aff.TRY <spylls.hunspell.data.aff.Aff.TRY>`
set.
"""
if not trystring:
return
for c in trystring:
for i in reversed(range(0, len(word))):
if word[i] == c:
continue
yield word[:i] + c + word[i + 1:]
def doubletwochars(word: str) ->Iterator[str]:
"""
Produces permutations with accidental two-letter-doubling fixed (vacation -> vacacation)
"""
if len(word) < 5:
return
for i in range(2, len(word)):
if word[i - 2] == word[i] and word[i - 3] == word[i - 1]:
yield word[:i - 1] + word[i + 1:]
def twowords(word: str) ->Iterator[List[str]]:
"""
Produces permutation of splitting in two words in all possible positions.
"""
for i in range(1, len(word)):
yield [word[:i], word[i:]]
<|reserved_special_token_1|>
"""
Note: names of methods in this module, if seem weird, are the same as in Hunspell's ``suggest.cxx``
to keep track of them.
"""
from typing import Iterator, Union, List, Set
from spylls.hunspell.data import aff
MAX_CHAR_DISTANCE = 4
def replchars(word: str, reptable: List[aff.RepPattern]) -> Iterator[Union[str, List[str]]]:
"""
Uses :attr:`aff.REP <spylls.hunspell.data.aff.Aff.REP>` table (typical misspellings) to replace
in the word provided. If the pattern's replacement contains "_", it means replacing to " " and
yielding _two_ different hypotheses: it was one (dictionary) word "foo bar" (and should be
checked as such) or it was words ["foo", "bar"] and should be checked separately.
"""
if len(word) < 2 or not reptable:
return
for pattern in reptable:
# TODO: compiled at aff loading
for match in pattern.regexp.finditer(word):
suggestion = word[:match.start()] + pattern.replacement.replace('_', ' ') + word[match.end():]
yield suggestion
if ' ' in suggestion:
yield suggestion.split(' ', 2)
def mapchars(word: str, maptable: List[Set[str]]) -> Iterator[str]:
"""
Uses :attr:`aff.MAP <spylls.hunspell.data.aff.Aff.MAP>` table ( sets of potentially similar chars)
and tries to replace them recursively. E.g., assuming ``MAP`` has entry ``aáã``, and we have
a misspelling "anarchia", ``mapchars`` will do this:
>>> [*pmt.mapchars("anarchia", ['aáã'])]
['ánarchia',
'ánárchia',
'ánárchiá',
'ánárchiã',
'ánãrchia',
'ánãrchiá',
'ánãrchiã',
'ãnarchia',
'ãnárchia',
'ãnárchiá',
'ãnárchiã',
'ãnãrchia',
'ãnãrchiá',
'ãnãrchiã']
"""
if len(word) < 2 or not maptable:
return
def mapchars_internal(word, start=0):
if start >= len(word):
return
for options in maptable:
for option in options:
pos = word.find(option, start)
if pos != -1:
for other in options:
if other == option:
continue
replaced = word[:pos] + other + word[pos+len(option):]
yield replaced
for variant in mapchars_internal(replaced, pos + 1):
yield variant
for variant in mapchars_internal(word):
yield variant
def swapchar(word: str) -> Iterator[str]:
"""
Produces permutations with adjacent chars swapped. For short (4 or 5 letters) words produces
also doubleswaps: ahev -> have.
"""
if len(word) < 2:
return
for i in range(0, len(word) - 1):
yield word[:i] + word[i+1] + word[i+1] + word[i+2:]
# try double swaps for short words
# ahev -> have, owudl -> would
if len(word) in [4, 5]:
yield word[1] + word[0] + (word[2] if len(word) == 5 else '') + word[-1] + word[-2]
if len(word) == 5:
yield word[0] + word[2] + word[1] + word[-1] + word[-2]
def longswapchar(word: str) -> Iterator[str]:
"""
Produces permutations with non-adjacent chars swapped (up to 4 chars distance)
"""
for first in range(0, len(word) - 2):
for second in range(first + 2, min(first + MAX_CHAR_DISTANCE, len(word))):
yield word[:first] + word[second] + word[first+1:second] + word[first] + word[second+1:]
def badcharkey(word: str, layout: str) -> Iterator[str]:
"""
Produces permutations with chars replaced by adjacent chars on keyboard layout ("vat -> cat")
or downcased (if it was accidental uppercase).
Uses :attr:`aff.KEY <spylls.hunspell.data.aff.Aff.KEY>`
"""
for i, c in enumerate(word):
before = word[:i]
after = word[i+1:]
if c != c.upper():
yield before + c.upper() + after
if not layout:
continue
pos = layout.find(c)
while pos != -1:
if pos > 0 and layout[pos-1] != '|':
yield before + layout[pos-1] + after
if pos + 1 < len(layout) and layout[pos+1] != '|':
yield before + layout[pos+1] + after
pos = layout.find(c, pos+1)
def extrachar(word: str) -> Iterator[str]:
"""
Produces permutations with one char removed in all possible positions
"""
if len(word) < 2:
return
for i in range(0, len(word)):
yield word[:i] + word[i+1:]
def forgotchar(word: str, trystring: str) -> Iterator[str]:
"""
Produces permutations with one char inserted in all possible possitions.
List of chars is taken from :attr:`aff.TRY <spylls.hunspell.data.aff.Aff.TRY>` -- if it is absent,
doesn't try anything! Chars there are expected to be sorted in order of chars usage in language
(most used characters first).
"""
if not trystring:
return
for c in trystring:
for i in range(0, len(word)):
yield word[:i] + c + word[i:]
def movechar(word: str) -> Iterator[str]:
"""
Produces permutations with one character moved by 2, 3 or 4 places forward or backward (not 1,
because it is already handled by :meth:`swapchar`)
"""
if len(word) < 2:
return
for frompos, char in enumerate(word):
for topos in range(frompos + 3, min(len(word), frompos + MAX_CHAR_DISTANCE + 1)):
yield word[:frompos] + word[frompos+1:topos] + char + word[topos:]
for frompos in reversed(range(0, len(word))):
for topos in reversed(range(max(0, frompos - MAX_CHAR_DISTANCE + 1), frompos - 1)):
yield word[:topos] + word[frompos] + word[topos:frompos] + word[frompos+1:]
def badchar(word: str, trystring: str) -> Iterator[str]:
"""
Produces permutations with chars replaced by chars in :attr:`aff.TRY <spylls.hunspell.data.aff.Aff.TRY>`
set.
"""
if not trystring:
return
for c in trystring:
for i in reversed(range(0, len(word))):
if word[i] == c:
continue
yield word[:i] + c + word[i+1:]
def doubletwochars(word: str) -> Iterator[str]:
"""
Produces permutations with accidental two-letter-doubling fixed (vacation -> vacacation)
"""
if len(word) < 5:
return
# TODO: 1) for vacacation yields "vacation" twice, hunspell's algo kinda wiser
# 2) maybe just use regexp?..
for i in range(2, len(word)):
if word[i-2] == word[i] and word[i-3] == word[i-1]:
yield word[:i-1] + word[i+1:]
def twowords(word: str) -> Iterator[List[str]]:
"""
Produces permutation of splitting in two words in all possible positions.
"""
for i in range(1, len(word)):
yield [word[:i], word[i:]]
|
flexible
|
{
"blob_id": "cfba55505f3290a14b98d594bc871a74812c7c57",
"index": 5594,
"step-1": "<mask token>\n\n\ndef replchars(word: str, reptable: List[aff.RepPattern]) ->Iterator[Union[\n str, List[str]]]:\n \"\"\"\n Uses :attr:`aff.REP <spylls.hunspell.data.aff.Aff.REP>` table (typical misspellings) to replace\n in the word provided. If the pattern's replacement contains \"_\", it means replacing to \" \" and\n yielding _two_ different hypotheses: it was one (dictionary) word \"foo bar\" (and should be\n checked as such) or it was words [\"foo\", \"bar\"] and should be checked separately.\n \"\"\"\n if len(word) < 2 or not reptable:\n return\n for pattern in reptable:\n for match in pattern.regexp.finditer(word):\n suggestion = word[:match.start()] + pattern.replacement.replace('_'\n , ' ') + word[match.end():]\n yield suggestion\n if ' ' in suggestion:\n yield suggestion.split(' ', 2)\n\n\ndef mapchars(word: str, maptable: List[Set[str]]) ->Iterator[str]:\n \"\"\"\n Uses :attr:`aff.MAP <spylls.hunspell.data.aff.Aff.MAP>` table ( sets of potentially similar chars)\n and tries to replace them recursively. E.g., assuming ``MAP`` has entry ``aáã``, and we have\n a misspelling \"anarchia\", ``mapchars`` will do this:\n\n >>> [*pmt.mapchars(\"anarchia\", ['aáã'])]\n ['ánarchia',\n 'ánárchia',\n 'ánárchiá',\n 'ánárchiã',\n 'ánãrchia',\n 'ánãrchiá',\n 'ánãrchiã',\n 'ãnarchia',\n 'ãnárchia',\n 'ãnárchiá',\n 'ãnárchiã',\n 'ãnãrchia',\n 'ãnãrchiá',\n 'ãnãrchiã']\n \"\"\"\n if len(word) < 2 or not maptable:\n return\n\n def mapchars_internal(word, start=0):\n if start >= len(word):\n return\n for options in maptable:\n for option in options:\n pos = word.find(option, start)\n if pos != -1:\n for other in options:\n if other == option:\n continue\n replaced = word[:pos] + other + word[pos + len(option):\n ]\n yield replaced\n for variant in mapchars_internal(replaced, pos + 1):\n yield variant\n for variant in mapchars_internal(word):\n yield variant\n\n\n<mask token>\n\n\ndef longswapchar(word: str) ->Iterator[str]:\n \"\"\"\n Produces permutations with non-adjacent chars swapped (up to 4 chars distance)\n \"\"\"\n for first in range(0, len(word) - 2):\n for second in range(first + 2, min(first + MAX_CHAR_DISTANCE, len(\n word))):\n yield word[:first] + word[second] + word[first + 1:second] + word[\n first] + word[second + 1:]\n\n\ndef badcharkey(word: str, layout: str) ->Iterator[str]:\n \"\"\"\n Produces permutations with chars replaced by adjacent chars on keyboard layout (\"vat -> cat\")\n or downcased (if it was accidental uppercase).\n\n Uses :attr:`aff.KEY <spylls.hunspell.data.aff.Aff.KEY>`\n \"\"\"\n for i, c in enumerate(word):\n before = word[:i]\n after = word[i + 1:]\n if c != c.upper():\n yield before + c.upper() + after\n if not layout:\n continue\n pos = layout.find(c)\n while pos != -1:\n if pos > 0 and layout[pos - 1] != '|':\n yield before + layout[pos - 1] + after\n if pos + 1 < len(layout) and layout[pos + 1] != '|':\n yield before + layout[pos + 1] + after\n pos = layout.find(c, pos + 1)\n\n\n<mask token>\n\n\ndef badchar(word: str, trystring: str) ->Iterator[str]:\n \"\"\"\n Produces permutations with chars replaced by chars in :attr:`aff.TRY <spylls.hunspell.data.aff.Aff.TRY>`\n set.\n \"\"\"\n if not trystring:\n return\n for c in trystring:\n for i in reversed(range(0, len(word))):\n if word[i] == c:\n continue\n yield word[:i] + c + word[i + 1:]\n\n\n<mask token>\n\n\ndef twowords(word: str) ->Iterator[List[str]]:\n \"\"\"\n Produces permutation of splitting in two words in all possible positions.\n \"\"\"\n for i in range(1, len(word)):\n yield [word[:i], word[i:]]\n",
"step-2": "<mask token>\n\n\ndef replchars(word: str, reptable: List[aff.RepPattern]) ->Iterator[Union[\n str, List[str]]]:\n \"\"\"\n Uses :attr:`aff.REP <spylls.hunspell.data.aff.Aff.REP>` table (typical misspellings) to replace\n in the word provided. If the pattern's replacement contains \"_\", it means replacing to \" \" and\n yielding _two_ different hypotheses: it was one (dictionary) word \"foo bar\" (and should be\n checked as such) or it was words [\"foo\", \"bar\"] and should be checked separately.\n \"\"\"\n if len(word) < 2 or not reptable:\n return\n for pattern in reptable:\n for match in pattern.regexp.finditer(word):\n suggestion = word[:match.start()] + pattern.replacement.replace('_'\n , ' ') + word[match.end():]\n yield suggestion\n if ' ' in suggestion:\n yield suggestion.split(' ', 2)\n\n\ndef mapchars(word: str, maptable: List[Set[str]]) ->Iterator[str]:\n \"\"\"\n Uses :attr:`aff.MAP <spylls.hunspell.data.aff.Aff.MAP>` table ( sets of potentially similar chars)\n and tries to replace them recursively. E.g., assuming ``MAP`` has entry ``aáã``, and we have\n a misspelling \"anarchia\", ``mapchars`` will do this:\n\n >>> [*pmt.mapchars(\"anarchia\", ['aáã'])]\n ['ánarchia',\n 'ánárchia',\n 'ánárchiá',\n 'ánárchiã',\n 'ánãrchia',\n 'ánãrchiá',\n 'ánãrchiã',\n 'ãnarchia',\n 'ãnárchia',\n 'ãnárchiá',\n 'ãnárchiã',\n 'ãnãrchia',\n 'ãnãrchiá',\n 'ãnãrchiã']\n \"\"\"\n if len(word) < 2 or not maptable:\n return\n\n def mapchars_internal(word, start=0):\n if start >= len(word):\n return\n for options in maptable:\n for option in options:\n pos = word.find(option, start)\n if pos != -1:\n for other in options:\n if other == option:\n continue\n replaced = word[:pos] + other + word[pos + len(option):\n ]\n yield replaced\n for variant in mapchars_internal(replaced, pos + 1):\n yield variant\n for variant in mapchars_internal(word):\n yield variant\n\n\n<mask token>\n\n\ndef longswapchar(word: str) ->Iterator[str]:\n \"\"\"\n Produces permutations with non-adjacent chars swapped (up to 4 chars distance)\n \"\"\"\n for first in range(0, len(word) - 2):\n for second in range(first + 2, min(first + MAX_CHAR_DISTANCE, len(\n word))):\n yield word[:first] + word[second] + word[first + 1:second] + word[\n first] + word[second + 1:]\n\n\ndef badcharkey(word: str, layout: str) ->Iterator[str]:\n \"\"\"\n Produces permutations with chars replaced by adjacent chars on keyboard layout (\"vat -> cat\")\n or downcased (if it was accidental uppercase).\n\n Uses :attr:`aff.KEY <spylls.hunspell.data.aff.Aff.KEY>`\n \"\"\"\n for i, c in enumerate(word):\n before = word[:i]\n after = word[i + 1:]\n if c != c.upper():\n yield before + c.upper() + after\n if not layout:\n continue\n pos = layout.find(c)\n while pos != -1:\n if pos > 0 and layout[pos - 1] != '|':\n yield before + layout[pos - 1] + after\n if pos + 1 < len(layout) and layout[pos + 1] != '|':\n yield before + layout[pos + 1] + after\n pos = layout.find(c, pos + 1)\n\n\n<mask token>\n\n\ndef forgotchar(word: str, trystring: str) ->Iterator[str]:\n \"\"\"\n Produces permutations with one char inserted in all possible possitions.\n\n List of chars is taken from :attr:`aff.TRY <spylls.hunspell.data.aff.Aff.TRY>` -- if it is absent,\n doesn't try anything! Chars there are expected to be sorted in order of chars usage in language\n (most used characters first).\n \"\"\"\n if not trystring:\n return\n for c in trystring:\n for i in range(0, len(word)):\n yield word[:i] + c + word[i:]\n\n\ndef movechar(word: str) ->Iterator[str]:\n \"\"\"\n Produces permutations with one character moved by 2, 3 or 4 places forward or backward (not 1,\n because it is already handled by :meth:`swapchar`)\n \"\"\"\n if len(word) < 2:\n return\n for frompos, char in enumerate(word):\n for topos in range(frompos + 3, min(len(word), frompos +\n MAX_CHAR_DISTANCE + 1)):\n yield word[:frompos] + word[frompos + 1:topos] + char + word[topos:\n ]\n for frompos in reversed(range(0, len(word))):\n for topos in reversed(range(max(0, frompos - MAX_CHAR_DISTANCE + 1),\n frompos - 1)):\n yield word[:topos] + word[frompos] + word[topos:frompos] + word[\n frompos + 1:]\n\n\ndef badchar(word: str, trystring: str) ->Iterator[str]:\n \"\"\"\n Produces permutations with chars replaced by chars in :attr:`aff.TRY <spylls.hunspell.data.aff.Aff.TRY>`\n set.\n \"\"\"\n if not trystring:\n return\n for c in trystring:\n for i in reversed(range(0, len(word))):\n if word[i] == c:\n continue\n yield word[:i] + c + word[i + 1:]\n\n\ndef doubletwochars(word: str) ->Iterator[str]:\n \"\"\"\n Produces permutations with accidental two-letter-doubling fixed (vacation -> vacacation)\n \"\"\"\n if len(word) < 5:\n return\n for i in range(2, len(word)):\n if word[i - 2] == word[i] and word[i - 3] == word[i - 1]:\n yield word[:i - 1] + word[i + 1:]\n\n\ndef twowords(word: str) ->Iterator[List[str]]:\n \"\"\"\n Produces permutation of splitting in two words in all possible positions.\n \"\"\"\n for i in range(1, len(word)):\n yield [word[:i], word[i:]]\n",
"step-3": "<mask token>\nMAX_CHAR_DISTANCE = 4\n\n\ndef replchars(word: str, reptable: List[aff.RepPattern]) ->Iterator[Union[\n str, List[str]]]:\n \"\"\"\n Uses :attr:`aff.REP <spylls.hunspell.data.aff.Aff.REP>` table (typical misspellings) to replace\n in the word provided. If the pattern's replacement contains \"_\", it means replacing to \" \" and\n yielding _two_ different hypotheses: it was one (dictionary) word \"foo bar\" (and should be\n checked as such) or it was words [\"foo\", \"bar\"] and should be checked separately.\n \"\"\"\n if len(word) < 2 or not reptable:\n return\n for pattern in reptable:\n for match in pattern.regexp.finditer(word):\n suggestion = word[:match.start()] + pattern.replacement.replace('_'\n , ' ') + word[match.end():]\n yield suggestion\n if ' ' in suggestion:\n yield suggestion.split(' ', 2)\n\n\ndef mapchars(word: str, maptable: List[Set[str]]) ->Iterator[str]:\n \"\"\"\n Uses :attr:`aff.MAP <spylls.hunspell.data.aff.Aff.MAP>` table ( sets of potentially similar chars)\n and tries to replace them recursively. E.g., assuming ``MAP`` has entry ``aáã``, and we have\n a misspelling \"anarchia\", ``mapchars`` will do this:\n\n >>> [*pmt.mapchars(\"anarchia\", ['aáã'])]\n ['ánarchia',\n 'ánárchia',\n 'ánárchiá',\n 'ánárchiã',\n 'ánãrchia',\n 'ánãrchiá',\n 'ánãrchiã',\n 'ãnarchia',\n 'ãnárchia',\n 'ãnárchiá',\n 'ãnárchiã',\n 'ãnãrchia',\n 'ãnãrchiá',\n 'ãnãrchiã']\n \"\"\"\n if len(word) < 2 or not maptable:\n return\n\n def mapchars_internal(word, start=0):\n if start >= len(word):\n return\n for options in maptable:\n for option in options:\n pos = word.find(option, start)\n if pos != -1:\n for other in options:\n if other == option:\n continue\n replaced = word[:pos] + other + word[pos + len(option):\n ]\n yield replaced\n for variant in mapchars_internal(replaced, pos + 1):\n yield variant\n for variant in mapchars_internal(word):\n yield variant\n\n\ndef swapchar(word: str) ->Iterator[str]:\n \"\"\"\n Produces permutations with adjacent chars swapped. For short (4 or 5 letters) words produces\n also doubleswaps: ahev -> have.\n \"\"\"\n if len(word) < 2:\n return\n for i in range(0, len(word) - 1):\n yield word[:i] + word[i + 1] + word[i + 1] + word[i + 2:]\n if len(word) in [4, 5]:\n yield word[1] + word[0] + (word[2] if len(word) == 5 else '') + word[-1\n ] + word[-2]\n if len(word) == 5:\n yield word[0] + word[2] + word[1] + word[-1] + word[-2]\n\n\ndef longswapchar(word: str) ->Iterator[str]:\n \"\"\"\n Produces permutations with non-adjacent chars swapped (up to 4 chars distance)\n \"\"\"\n for first in range(0, len(word) - 2):\n for second in range(first + 2, min(first + MAX_CHAR_DISTANCE, len(\n word))):\n yield word[:first] + word[second] + word[first + 1:second] + word[\n first] + word[second + 1:]\n\n\ndef badcharkey(word: str, layout: str) ->Iterator[str]:\n \"\"\"\n Produces permutations with chars replaced by adjacent chars on keyboard layout (\"vat -> cat\")\n or downcased (if it was accidental uppercase).\n\n Uses :attr:`aff.KEY <spylls.hunspell.data.aff.Aff.KEY>`\n \"\"\"\n for i, c in enumerate(word):\n before = word[:i]\n after = word[i + 1:]\n if c != c.upper():\n yield before + c.upper() + after\n if not layout:\n continue\n pos = layout.find(c)\n while pos != -1:\n if pos > 0 and layout[pos - 1] != '|':\n yield before + layout[pos - 1] + after\n if pos + 1 < len(layout) and layout[pos + 1] != '|':\n yield before + layout[pos + 1] + after\n pos = layout.find(c, pos + 1)\n\n\ndef extrachar(word: str) ->Iterator[str]:\n \"\"\"\n Produces permutations with one char removed in all possible positions\n \"\"\"\n if len(word) < 2:\n return\n for i in range(0, len(word)):\n yield word[:i] + word[i + 1:]\n\n\ndef forgotchar(word: str, trystring: str) ->Iterator[str]:\n \"\"\"\n Produces permutations with one char inserted in all possible possitions.\n\n List of chars is taken from :attr:`aff.TRY <spylls.hunspell.data.aff.Aff.TRY>` -- if it is absent,\n doesn't try anything! Chars there are expected to be sorted in order of chars usage in language\n (most used characters first).\n \"\"\"\n if not trystring:\n return\n for c in trystring:\n for i in range(0, len(word)):\n yield word[:i] + c + word[i:]\n\n\ndef movechar(word: str) ->Iterator[str]:\n \"\"\"\n Produces permutations with one character moved by 2, 3 or 4 places forward or backward (not 1,\n because it is already handled by :meth:`swapchar`)\n \"\"\"\n if len(word) < 2:\n return\n for frompos, char in enumerate(word):\n for topos in range(frompos + 3, min(len(word), frompos +\n MAX_CHAR_DISTANCE + 1)):\n yield word[:frompos] + word[frompos + 1:topos] + char + word[topos:\n ]\n for frompos in reversed(range(0, len(word))):\n for topos in reversed(range(max(0, frompos - MAX_CHAR_DISTANCE + 1),\n frompos - 1)):\n yield word[:topos] + word[frompos] + word[topos:frompos] + word[\n frompos + 1:]\n\n\ndef badchar(word: str, trystring: str) ->Iterator[str]:\n \"\"\"\n Produces permutations with chars replaced by chars in :attr:`aff.TRY <spylls.hunspell.data.aff.Aff.TRY>`\n set.\n \"\"\"\n if not trystring:\n return\n for c in trystring:\n for i in reversed(range(0, len(word))):\n if word[i] == c:\n continue\n yield word[:i] + c + word[i + 1:]\n\n\ndef doubletwochars(word: str) ->Iterator[str]:\n \"\"\"\n Produces permutations with accidental two-letter-doubling fixed (vacation -> vacacation)\n \"\"\"\n if len(word) < 5:\n return\n for i in range(2, len(word)):\n if word[i - 2] == word[i] and word[i - 3] == word[i - 1]:\n yield word[:i - 1] + word[i + 1:]\n\n\ndef twowords(word: str) ->Iterator[List[str]]:\n \"\"\"\n Produces permutation of splitting in two words in all possible positions.\n \"\"\"\n for i in range(1, len(word)):\n yield [word[:i], word[i:]]\n",
"step-4": "<mask token>\nfrom typing import Iterator, Union, List, Set\nfrom spylls.hunspell.data import aff\nMAX_CHAR_DISTANCE = 4\n\n\ndef replchars(word: str, reptable: List[aff.RepPattern]) ->Iterator[Union[\n str, List[str]]]:\n \"\"\"\n Uses :attr:`aff.REP <spylls.hunspell.data.aff.Aff.REP>` table (typical misspellings) to replace\n in the word provided. If the pattern's replacement contains \"_\", it means replacing to \" \" and\n yielding _two_ different hypotheses: it was one (dictionary) word \"foo bar\" (and should be\n checked as such) or it was words [\"foo\", \"bar\"] and should be checked separately.\n \"\"\"\n if len(word) < 2 or not reptable:\n return\n for pattern in reptable:\n for match in pattern.regexp.finditer(word):\n suggestion = word[:match.start()] + pattern.replacement.replace('_'\n , ' ') + word[match.end():]\n yield suggestion\n if ' ' in suggestion:\n yield suggestion.split(' ', 2)\n\n\ndef mapchars(word: str, maptable: List[Set[str]]) ->Iterator[str]:\n \"\"\"\n Uses :attr:`aff.MAP <spylls.hunspell.data.aff.Aff.MAP>` table ( sets of potentially similar chars)\n and tries to replace them recursively. E.g., assuming ``MAP`` has entry ``aáã``, and we have\n a misspelling \"anarchia\", ``mapchars`` will do this:\n\n >>> [*pmt.mapchars(\"anarchia\", ['aáã'])]\n ['ánarchia',\n 'ánárchia',\n 'ánárchiá',\n 'ánárchiã',\n 'ánãrchia',\n 'ánãrchiá',\n 'ánãrchiã',\n 'ãnarchia',\n 'ãnárchia',\n 'ãnárchiá',\n 'ãnárchiã',\n 'ãnãrchia',\n 'ãnãrchiá',\n 'ãnãrchiã']\n \"\"\"\n if len(word) < 2 or not maptable:\n return\n\n def mapchars_internal(word, start=0):\n if start >= len(word):\n return\n for options in maptable:\n for option in options:\n pos = word.find(option, start)\n if pos != -1:\n for other in options:\n if other == option:\n continue\n replaced = word[:pos] + other + word[pos + len(option):\n ]\n yield replaced\n for variant in mapchars_internal(replaced, pos + 1):\n yield variant\n for variant in mapchars_internal(word):\n yield variant\n\n\ndef swapchar(word: str) ->Iterator[str]:\n \"\"\"\n Produces permutations with adjacent chars swapped. For short (4 or 5 letters) words produces\n also doubleswaps: ahev -> have.\n \"\"\"\n if len(word) < 2:\n return\n for i in range(0, len(word) - 1):\n yield word[:i] + word[i + 1] + word[i + 1] + word[i + 2:]\n if len(word) in [4, 5]:\n yield word[1] + word[0] + (word[2] if len(word) == 5 else '') + word[-1\n ] + word[-2]\n if len(word) == 5:\n yield word[0] + word[2] + word[1] + word[-1] + word[-2]\n\n\ndef longswapchar(word: str) ->Iterator[str]:\n \"\"\"\n Produces permutations with non-adjacent chars swapped (up to 4 chars distance)\n \"\"\"\n for first in range(0, len(word) - 2):\n for second in range(first + 2, min(first + MAX_CHAR_DISTANCE, len(\n word))):\n yield word[:first] + word[second] + word[first + 1:second] + word[\n first] + word[second + 1:]\n\n\ndef badcharkey(word: str, layout: str) ->Iterator[str]:\n \"\"\"\n Produces permutations with chars replaced by adjacent chars on keyboard layout (\"vat -> cat\")\n or downcased (if it was accidental uppercase).\n\n Uses :attr:`aff.KEY <spylls.hunspell.data.aff.Aff.KEY>`\n \"\"\"\n for i, c in enumerate(word):\n before = word[:i]\n after = word[i + 1:]\n if c != c.upper():\n yield before + c.upper() + after\n if not layout:\n continue\n pos = layout.find(c)\n while pos != -1:\n if pos > 0 and layout[pos - 1] != '|':\n yield before + layout[pos - 1] + after\n if pos + 1 < len(layout) and layout[pos + 1] != '|':\n yield before + layout[pos + 1] + after\n pos = layout.find(c, pos + 1)\n\n\ndef extrachar(word: str) ->Iterator[str]:\n \"\"\"\n Produces permutations with one char removed in all possible positions\n \"\"\"\n if len(word) < 2:\n return\n for i in range(0, len(word)):\n yield word[:i] + word[i + 1:]\n\n\ndef forgotchar(word: str, trystring: str) ->Iterator[str]:\n \"\"\"\n Produces permutations with one char inserted in all possible possitions.\n\n List of chars is taken from :attr:`aff.TRY <spylls.hunspell.data.aff.Aff.TRY>` -- if it is absent,\n doesn't try anything! Chars there are expected to be sorted in order of chars usage in language\n (most used characters first).\n \"\"\"\n if not trystring:\n return\n for c in trystring:\n for i in range(0, len(word)):\n yield word[:i] + c + word[i:]\n\n\ndef movechar(word: str) ->Iterator[str]:\n \"\"\"\n Produces permutations with one character moved by 2, 3 or 4 places forward or backward (not 1,\n because it is already handled by :meth:`swapchar`)\n \"\"\"\n if len(word) < 2:\n return\n for frompos, char in enumerate(word):\n for topos in range(frompos + 3, min(len(word), frompos +\n MAX_CHAR_DISTANCE + 1)):\n yield word[:frompos] + word[frompos + 1:topos] + char + word[topos:\n ]\n for frompos in reversed(range(0, len(word))):\n for topos in reversed(range(max(0, frompos - MAX_CHAR_DISTANCE + 1),\n frompos - 1)):\n yield word[:topos] + word[frompos] + word[topos:frompos] + word[\n frompos + 1:]\n\n\ndef badchar(word: str, trystring: str) ->Iterator[str]:\n \"\"\"\n Produces permutations with chars replaced by chars in :attr:`aff.TRY <spylls.hunspell.data.aff.Aff.TRY>`\n set.\n \"\"\"\n if not trystring:\n return\n for c in trystring:\n for i in reversed(range(0, len(word))):\n if word[i] == c:\n continue\n yield word[:i] + c + word[i + 1:]\n\n\ndef doubletwochars(word: str) ->Iterator[str]:\n \"\"\"\n Produces permutations with accidental two-letter-doubling fixed (vacation -> vacacation)\n \"\"\"\n if len(word) < 5:\n return\n for i in range(2, len(word)):\n if word[i - 2] == word[i] and word[i - 3] == word[i - 1]:\n yield word[:i - 1] + word[i + 1:]\n\n\ndef twowords(word: str) ->Iterator[List[str]]:\n \"\"\"\n Produces permutation of splitting in two words in all possible positions.\n \"\"\"\n for i in range(1, len(word)):\n yield [word[:i], word[i:]]\n",
"step-5": "\"\"\"\nNote: names of methods in this module, if seem weird, are the same as in Hunspell's ``suggest.cxx``\nto keep track of them.\n\"\"\"\n\nfrom typing import Iterator, Union, List, Set\n\nfrom spylls.hunspell.data import aff\n\n\nMAX_CHAR_DISTANCE = 4\n\n\ndef replchars(word: str, reptable: List[aff.RepPattern]) -> Iterator[Union[str, List[str]]]:\n \"\"\"\n Uses :attr:`aff.REP <spylls.hunspell.data.aff.Aff.REP>` table (typical misspellings) to replace\n in the word provided. If the pattern's replacement contains \"_\", it means replacing to \" \" and\n yielding _two_ different hypotheses: it was one (dictionary) word \"foo bar\" (and should be\n checked as such) or it was words [\"foo\", \"bar\"] and should be checked separately.\n \"\"\"\n\n if len(word) < 2 or not reptable:\n return\n\n for pattern in reptable:\n # TODO: compiled at aff loading\n for match in pattern.regexp.finditer(word):\n suggestion = word[:match.start()] + pattern.replacement.replace('_', ' ') + word[match.end():]\n yield suggestion\n if ' ' in suggestion:\n yield suggestion.split(' ', 2)\n\n\ndef mapchars(word: str, maptable: List[Set[str]]) -> Iterator[str]:\n \"\"\"\n Uses :attr:`aff.MAP <spylls.hunspell.data.aff.Aff.MAP>` table ( sets of potentially similar chars)\n and tries to replace them recursively. E.g., assuming ``MAP`` has entry ``aáã``, and we have\n a misspelling \"anarchia\", ``mapchars`` will do this:\n\n >>> [*pmt.mapchars(\"anarchia\", ['aáã'])]\n ['ánarchia',\n 'ánárchia',\n 'ánárchiá',\n 'ánárchiã',\n 'ánãrchia',\n 'ánãrchiá',\n 'ánãrchiã',\n 'ãnarchia',\n 'ãnárchia',\n 'ãnárchiá',\n 'ãnárchiã',\n 'ãnãrchia',\n 'ãnãrchiá',\n 'ãnãrchiã']\n \"\"\"\n\n if len(word) < 2 or not maptable:\n return\n\n def mapchars_internal(word, start=0):\n if start >= len(word):\n return\n\n for options in maptable:\n for option in options:\n pos = word.find(option, start)\n if pos != -1:\n for other in options:\n if other == option:\n continue\n replaced = word[:pos] + other + word[pos+len(option):]\n yield replaced\n for variant in mapchars_internal(replaced, pos + 1):\n yield variant\n\n for variant in mapchars_internal(word):\n yield variant\n\n\ndef swapchar(word: str) -> Iterator[str]:\n \"\"\"\n Produces permutations with adjacent chars swapped. For short (4 or 5 letters) words produces\n also doubleswaps: ahev -> have.\n \"\"\"\n\n if len(word) < 2:\n return\n\n for i in range(0, len(word) - 1):\n yield word[:i] + word[i+1] + word[i+1] + word[i+2:]\n\n # try double swaps for short words\n # ahev -> have, owudl -> would\n if len(word) in [4, 5]:\n yield word[1] + word[0] + (word[2] if len(word) == 5 else '') + word[-1] + word[-2]\n if len(word) == 5:\n yield word[0] + word[2] + word[1] + word[-1] + word[-2]\n\n\ndef longswapchar(word: str) -> Iterator[str]:\n \"\"\"\n Produces permutations with non-adjacent chars swapped (up to 4 chars distance)\n \"\"\"\n\n for first in range(0, len(word) - 2):\n for second in range(first + 2, min(first + MAX_CHAR_DISTANCE, len(word))):\n yield word[:first] + word[second] + word[first+1:second] + word[first] + word[second+1:]\n\n\ndef badcharkey(word: str, layout: str) -> Iterator[str]:\n \"\"\"\n Produces permutations with chars replaced by adjacent chars on keyboard layout (\"vat -> cat\")\n or downcased (if it was accidental uppercase).\n\n Uses :attr:`aff.KEY <spylls.hunspell.data.aff.Aff.KEY>`\n \"\"\"\n\n for i, c in enumerate(word):\n before = word[:i]\n after = word[i+1:]\n if c != c.upper():\n yield before + c.upper() + after\n\n if not layout:\n continue\n\n pos = layout.find(c)\n while pos != -1:\n if pos > 0 and layout[pos-1] != '|':\n yield before + layout[pos-1] + after\n if pos + 1 < len(layout) and layout[pos+1] != '|':\n yield before + layout[pos+1] + after\n pos = layout.find(c, pos+1)\n\n\ndef extrachar(word: str) -> Iterator[str]:\n \"\"\"\n Produces permutations with one char removed in all possible positions\n \"\"\"\n if len(word) < 2:\n return\n\n for i in range(0, len(word)):\n yield word[:i] + word[i+1:]\n\n\ndef forgotchar(word: str, trystring: str) -> Iterator[str]:\n \"\"\"\n Produces permutations with one char inserted in all possible possitions.\n\n List of chars is taken from :attr:`aff.TRY <spylls.hunspell.data.aff.Aff.TRY>` -- if it is absent,\n doesn't try anything! Chars there are expected to be sorted in order of chars usage in language\n (most used characters first).\n \"\"\"\n\n if not trystring:\n return\n\n for c in trystring:\n for i in range(0, len(word)):\n yield word[:i] + c + word[i:]\n\n\ndef movechar(word: str) -> Iterator[str]:\n \"\"\"\n Produces permutations with one character moved by 2, 3 or 4 places forward or backward (not 1,\n because it is already handled by :meth:`swapchar`)\n \"\"\"\n\n if len(word) < 2:\n return\n\n for frompos, char in enumerate(word):\n for topos in range(frompos + 3, min(len(word), frompos + MAX_CHAR_DISTANCE + 1)):\n yield word[:frompos] + word[frompos+1:topos] + char + word[topos:]\n\n for frompos in reversed(range(0, len(word))):\n for topos in reversed(range(max(0, frompos - MAX_CHAR_DISTANCE + 1), frompos - 1)):\n yield word[:topos] + word[frompos] + word[topos:frompos] + word[frompos+1:]\n\n\ndef badchar(word: str, trystring: str) -> Iterator[str]:\n \"\"\"\n Produces permutations with chars replaced by chars in :attr:`aff.TRY <spylls.hunspell.data.aff.Aff.TRY>`\n set.\n \"\"\"\n\n if not trystring:\n return\n\n for c in trystring:\n for i in reversed(range(0, len(word))):\n if word[i] == c:\n continue\n yield word[:i] + c + word[i+1:]\n\n\ndef doubletwochars(word: str) -> Iterator[str]:\n \"\"\"\n Produces permutations with accidental two-letter-doubling fixed (vacation -> vacacation)\n \"\"\"\n\n if len(word) < 5:\n return\n\n # TODO: 1) for vacacation yields \"vacation\" twice, hunspell's algo kinda wiser\n # 2) maybe just use regexp?..\n for i in range(2, len(word)):\n if word[i-2] == word[i] and word[i-3] == word[i-1]:\n yield word[:i-1] + word[i+1:]\n\n\ndef twowords(word: str) -> Iterator[List[str]]:\n \"\"\"\n Produces permutation of splitting in two words in all possible positions.\n \"\"\"\n\n for i in range(1, len(word)):\n yield [word[:i], word[i:]]\n",
"step-ids": [
6,
9,
12,
13,
14
]
}
|
[
6,
9,
12,
13,
14
] |
import sys
import pygame
import os
import random
import subprocess
FPS, NEWENEMYSPAWN, fst_spawn, not_paused, coins, enemies_count, killed, score = 50, 30, 2000, True, 0, 0, 0, 0
MiniG_rate, EnemyG_rate, MetalM_rate = 1, 5, 15
WEAPONS_LIST = ['Green laser gun', 'Purple laser gun', 'Plasma gun']
def load_image(name, colorkey=None):
fullname = os.path.join('data', name)
image = pygame.image.load(fullname).convert()
if colorkey is not None:
if colorkey == -1:
colorkey = image.get_at((0, 0))
image.set_colorkey(colorkey)
else:
image = image.convert_alpha()
return image
def info_print():
global score, killed, coins
font = pygame.font.Font(None, 30)
text_coord = 2
pygame.draw.rect(screen, (100, 100, 100), (0, 0, 200, 100), 3)
pygame.draw.rect(screen, (150, 150, 150), (3, 3, 194, 94), 3)
pygame.draw.rect(screen, (250, 250, 250), (5, 5, 190, 90))
text = [f'Счёт: {score}',
f'Убито: {killed}',
f'Монеты: {coins}']
for line in text:
string_rendered = font.render(line, 1, (50, 50, 50))
intro_rect = string_rendered.get_rect()
text_coord += 10
intro_rect.top = text_coord
intro_rect.x = 10
text_coord += intro_rect.height
screen.blit(string_rendered, intro_rect)
class Board:
def __init__(self, screen, width, height):
self.width = width
self.height = height
self.board = [[0] * width for _ in range(height)]
self.left = 0
self.top = 0
self.cell_size = 70
self.screen = screen
def set_view(self, left, top, cell_size):
self.left = left
self.top = top
self.cell_size = cell_size
def render(self):
tp, pp = [[0, 140], [17, 105], [35, 140]], [[17, 105], [35, 140], [52, 105]]
for y in range(self.height):
for x in range(self.width):
if y >= 2:
pygame.draw.rect(self.screen, (100, 100, 100), (
x * self.cell_size, y * self.cell_size, self.cell_size, self.cell_size),
1)
pygame.draw.rect(self.screen, (150, 150, 150), (
x * self.cell_size + 1, y * self.cell_size + 1, self.cell_size - 2,
self.cell_size - 2), 2)
pygame.draw.rect(self.screen, (250, 250, 250), (
x * self.cell_size + 3, y * self.cell_size + 3, self.cell_size - 4,
self.cell_size - 4))
for i in range(self.width * 2 - 1):
pygame.draw.polygon(screen, (0, 230, 200), pp)
pp[0][1] += 2
pp[0][0] += 4
pp[1][1] -= 3
pp[2][1] += 2
pp[2][0] -= 4
pygame.draw.polygon(screen, (0, 125, 200), pp)
pp[0][1] += 4
pp[0][0] += 6
pp[1][1] -= 7
pp[2][1] += 4
pp[2][0] -= 6
pygame.draw.polygon(screen, (0, 230, 200), pp)
pp[0][1] -= 6
pp[0][0] -= 10
pp[1][1] += 10
pp[2][1] -= 6
pp[2][0] += 10
for point in pp:
point[0] += 35
for i in range(self.width * 2):
pygame.draw.polygon(screen, (100, 100, 100), tp)
tp[0][1] -= 2
tp[0][0] += 4
tp[1][1] += 4
tp[2][1] -= 2
tp[2][0] -= 4
pygame.draw.polygon(screen, (150, 150, 150), tp)
tp[0][1] -= 2
tp[0][0] += 4
tp[1][1] += 4
tp[2][1] -= 2
tp[2][0] -= 4
pygame.draw.polygon(screen, (250, 250, 250), tp)
tp[0][1] += 4
tp[0][0] -= 8
tp[1][1] -= 8
tp[2][1] += 4
tp[2][0] += 8
for point in tp:
point[0] += 35
class Bullet(pygame.sprite.Sprite):
def __init__(self, enemy_sprites, x, damage, kind):
super().__init__(bullet_sprites)
self.damage = damage
if kind == 'Green laser gun':
self.image = load_image("green.png", -1)
elif kind == 'Purple laser gun':
self.image = load_image("purple.png", -1)
elif kind == 'Plasma gun':
self.image = pygame.transform.scale(load_image("plasma.png", -1), (25, 25))
self.rect = self.image.get_rect()
self.coords = self.rect.x, self.rect.y = x + 30, 665
self.mask = pygame.mask.from_surface(self.image)
self.fly(enemy_sprites)
def fly(self, enemy_sprites):
if self.rect.y >= 140:
self.rect.y -= 1
for enemy in enemy_sprites:
if pygame.sprite.collide_mask(enemy, self):
self.hit(enemy)
else:
self.kill()
def hit(self, enemy):
enemy.hp -= self.damage
self.kill()
class Weapon:
def __init__(self, player, kind):
self.kind = kind
self.ability = None
self.player = player
if self.kind == 'Green laser gun':
self.damage = 2
self.price = 0
elif self.kind == 'Purple laser gun':
self.damage = 4
self.price = 50
elif self.kind == 'Plasma gun':
self.damage = 8
self.price = 150
self.ability = 'Rage'
def shoot(self, enemy_sprites):
bullet = Bullet(enemy_sprites, self.player.rect.x, self.damage, self.kind)
class Player(pygame.sprite.Sprite):
def __init__(self, group):
super().__init__(group)
self.weapon = Weapon(self, 'Green laser gun')
self.image = load_image("player.jpg", -1)
self.rect = self.image.get_rect()
self.coords = self.rect.x, self.rect.y = 75, 635
self.mask = pygame.mask.from_surface(self.image)
def shoot(self, enemy_sprites):
self.weapon.shoot(enemy_sprites)
def move(self, side):
x = self.rect.x
if x < 630 and side == 'right':
x += 70
if x > 35 and side == 'left':
x -= 70
self.rect.x = x
class Enemy(pygame.sprite.Sprite):
global enemies_count, MiniG_rate, EnemyG_rate, MetalM_rate
def __init__(self, group):
super().__init__(group)
if enemies_count >= 30 and enemies_count % MetalM_rate == 0:
self.type = 'MM'
self.hp = 24
self.image = pygame.transform.scale(load_image("Metal_Man.png", -1), (120, 140))
self.rect = self.image.get_rect()
self.coords = self.rect.x, self.rect.y = random.randrange(10, 560, 70), 140
self.mask = pygame.mask.from_surface(self.image)
elif enemies_count >= 15 and enemies_count % EnemyG_rate == 0:
self.type = 'EG'
self.hp = 6
self.image = pygame.transform.scale(load_image('Enemy_glider.png', -1), (70, 70))
self.rect = self.image.get_rect()
self.coords = self.rect.x, self.rect.y = random.randrange(0, 700, 70), 140
self.mask = pygame.mask.from_surface(self.image)
else:
self.type = 'MG'
self.hp = 4
self.image = pygame.transform.scale(load_image('Mini_glider.png', -1), (70, 70))
self.rect = self.image.get_rect()
self.coords = self.rect.x, self.rect.y = random.randrange(0, 700, 70), 140
self.mask = pygame.mask.from_surface(self.image)
def death_check(self):
global killed, score, coins, FPS
if self.hp <= 0:
killed += 1
if self.type == 'MM':
score += 30
coins += 15
FPS += 10
elif self.type == 'EG':
score += 15
coins += 5
elif self.type == 'MG':
score += 10
coins += 2
self.kill()
def move(self):
self.rect.y += 1
def game_over():
global FPS, not_paused, score, killed, coins
def text_print():
game_over = ' GAME OVER'
intro_text = ["",
"Нажми клавишу A",
"чтобы сыграть еще раз",
'',
'Нажми на кнопку "Esc", ',
'чтобы выйти из игры',
f'Счёт: {score}',
f'Убито: {killed}',
f'Монеты: {coins}']
fon = pygame.transform.scale(load_image('fon.jpg'), (width, height))
screen.blit(fon, (0, 0))
font = pygame.font.Font(None, 50)
text_coord = 40
string_rendered = font.render(game_over, 1, pygame.Color('white'))
intro_rect = string_rendered.get_rect()
text_coord += 10
intro_rect.top = text_coord
intro_rect.x = 10
text_coord += intro_rect.height
screen.blit(string_rendered, intro_rect)
font = pygame.font.Font(None, 30)
for line in intro_text:
string_rendered = font.render(line, 1, pygame.Color('white'))
intro_rect = string_rendered.get_rect()
text_coord += 10
intro_rect.top = text_coord
intro_rect.x = 10
text_coord += intro_rect.height
intro_rect.x += 10
screen.blit(string_rendered, intro_rect)
FPS = 30
pygame.mouse.set_visible(True)
text_print()
while True:
for event in pygame.event.get():
if event.type == pygame.QUIT:
terminate()
elif event.type == pygame.KEYDOWN or event.type == pygame.MOUSEBUTTONDOWN:
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_ESCAPE:
terminate()
if event.key == 97:
pygame.quit()
subprocess.call("python" + " проект.py", shell=True)
if not_paused:
pygame.display.flip()
clock.tick(FPS)
terminate()
def terminate():
pygame.quit()
sys.exit()
def start_screen(screen, width, height):
global FPS, not_paused
def text_print():
intro_text = [" SPACE SOLDIER", "",
" Нажми любую клавишу,",
" чтобы начать игру",
' Нажимай на кнопки стрелок, чтобы перемещать персонажа',
' Не дай врагу пролететь мимо тебя!',
' Нажми на кнопку "Esc", ',
' чтобы открыть меню паузы',
' или попасть в магазин']
fon = pygame.transform.scale(load_image('fon.jpg'), (width, height))
font = pygame.font.Font(None, 30)
text_coord = 50
screen.blit(fon, (0, 0))
for line in intro_text:
string_rendered = font.render(line, 1, pygame.Color('black'))
intro_rect = string_rendered.get_rect()
text_coord += 10
intro_rect.top = text_coord
intro_rect.x = 10
text_coord += intro_rect.height
screen.blit(string_rendered, intro_rect)
pygame.mouse.set_visible(True)
text_print()
while True:
for event in pygame.event.get():
if event.type == pygame.QUIT:
terminate()
elif event.type == pygame.KEYDOWN or event.type == pygame.MOUSEBUTTONDOWN:
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_ESCAPE:
pause_menu(screen, width, height)
text_print()
else:
pygame.mouse.set_visible(False)
return
if not_paused:
pygame.display.flip()
clock.tick(FPS)
terminate()
def pause_menu(screen, width, height):
global FPS, not_paused
def text_print():
intro_text = ["Нажми на кнопку 'S',",
"чтобы открыть магазин",
'',
"Нажми на кнопку 'C',",
"чтобы продолжжить игру",
'',
"УПРАВЛЕНИЕ",
'',
'Нажимай на кнопки стрелок, чтобы перемещать персонажа',
'',
'Не дай врагу пролететь мимо тебя!',
'',
'Нажми на кнопку "Esc", ',
'чтобы закрыть меню паузы']
fon = pygame.transform.scale(load_image('fon.jpg'), (width, height))
font = pygame.font.Font(None, 30)
text_coord = 50
screen.blit(fon, (0, 0))
for line in intro_text:
string_rendered = font.render(line, 1, pygame.Color('black'))
intro_rect = string_rendered.get_rect()
text_coord += 10
intro_rect.top = text_coord
intro_rect.x = 10
text_coord += intro_rect.height
screen.blit(string_rendered, intro_rect)
pygame.mouse.set_visible(True)
fon = pygame.transform.scale(load_image('fon.jpg'), (width, height))
screen.blit(fon, (0, 0))
text_print()
while True:
for event in pygame.event.get():
if event.type == pygame.QUIT:
terminate()
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_ESCAPE:
not_paused = True
pygame.mouse.set_visible(False)
return
if event.key == 115:
shop(screen, width, height)
if event.key == 99:
return
pygame.display.flip()
clock.tick(FPS)
terminate()
def shop(screen, width, height):
global FPS, not_paused, WEAPONS_LIST, coins
def text_print():
intro_text = [" Нажми на кнопку 'U',",
"чтобы улучшить свое оружие",
'Нажми на кнопку "Esc", ',
'чтобы выйти из магазина', '',
'Текущее оружие:',
f'{player.weapon.kind}',
'Наносимый урон:',
f'{player.weapon.damage}',
'Следующее улучшение:',
f'{next_weapon}',
'Урон:',
f'{next_damage}',
'Стоимость:',
f'{next_price}',
'Ваши монеты:',
f'{coins}']
fon = pygame.transform.scale(load_image('fon.jpg'), (width, height))
font = pygame.font.Font(None, 30)
text_coord = 50
screen.blit(fon, (0, 0))
for line in intro_text:
string_rendered = font.render(line, 1, pygame.Color('black'))
intro_rect = string_rendered.get_rect()
text_coord += 10
intro_rect.top = text_coord
intro_rect.x = 10
text_coord += intro_rect.height
screen.blit(string_rendered, intro_rect)
if player.weapon.kind != 'Plasma gun':
next_weapon = WEAPONS_LIST[WEAPONS_LIST.index(player.weapon.kind) + 1]
if next_weapon == 'Purple laser gun':
next_damage = 4
next_price = 50
else:
next_damage = 6
next_price = 150
else:
next_weapon = 'Вы имеете лучшее оружие'
next_damage = 'Наносимый урон максимальный'
next_price = 'Покупать больше нечего'
pygame.mouse.set_visible(True)
fon = pygame.transform.scale(load_image('fon.jpg'), (width, height))
screen.blit(fon, (0, 0))
text_print()
while True:
for event in pygame.event.get():
if event.type == pygame.QUIT:
terminate()
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_ESCAPE:
pygame.mouse.set_visible(False)
screen.blit(fon, (0, 0))
return
if event.key == 117 and player.weapon.kind != 'Plasma gun' and coins >= next_price:
coins -= next_price
player.weapon = Weapon(player, WEAPONS_LIST[WEAPONS_LIST.index(player.weapon.kind) + 1])
pygame.display.flip()
clock.tick(FPS)
terminate()
pygame.init()
size = width, height = 700, 700
screen = pygame.display.set_mode(size)
pygame.display.set_caption('SPACE SOLDIER')
pygame.display.set_icon(load_image("icon.png", -1))
fon1 = pygame.transform.scale(load_image('fon1.png'), (700, 400))
board = Board(screen, 10, 10)
pygame.mouse.set_visible(True)
enemy_sprites = pygame.sprite.Group()
player_sprites = pygame.sprite.Group()
bullet_sprites = pygame.sprite.Group()
player = Player(player_sprites)
enemy_li = [Enemy(enemy_sprites)]
clock = pygame.time.Clock()
start_screen(screen, width, height)
pygame.time.set_timer(NEWENEMYSPAWN, fst_spawn)
while True:
for event in pygame.event.get():
if event.type == pygame.QUIT:
terminate()
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_ESCAPE:
not_paused = False
pause_menu(screen, width, height)
if not_paused:
if event.key == 275:
player.move('right')
elif event.key == 276:
player.move('left')
if event.key == 115:
player.shoot(enemy_sprites)
if not_paused and event.type == NEWENEMYSPAWN:
enemy_li.append(Enemy(enemy_sprites))
enemies_count += 1
if not_paused:
screen.blit(fon1, (0, 0))
board.render()
player_sprites.draw(screen)
enemy_sprites.draw(screen)
bullet_sprites.draw(screen)
for enemy in enemy_sprites:
if enemy.type != 'MM':
lim = 630
else:
lim = 560
if enemy.rect.y <= lim:
enemy.move()
else:
game_over()
for bullet in bullet_sprites:
bullet.fly(enemy_sprites)
enemy.death_check()
info_print()
pygame.display.flip()
clock.tick(FPS)
terminate()
|
normal
|
{
"blob_id": "244191087fcab2a6f03bf024708484b9838731ed",
"index": 9301,
"step-1": "<mask token>\n\n\nclass Player(pygame.sprite.Sprite):\n\n def __init__(self, group):\n super().__init__(group)\n self.weapon = Weapon(self, 'Green laser gun')\n self.image = load_image('player.jpg', -1)\n self.rect = self.image.get_rect()\n self.coords = self.rect.x, self.rect.y = 75, 635\n self.mask = pygame.mask.from_surface(self.image)\n <mask token>\n\n def move(self, side):\n x = self.rect.x\n if x < 630 and side == 'right':\n x += 70\n if x > 35 and side == 'left':\n x -= 70\n self.rect.x = x\n\n\nclass Enemy(pygame.sprite.Sprite):\n global enemies_count, MiniG_rate, EnemyG_rate, MetalM_rate\n\n def __init__(self, group):\n super().__init__(group)\n if enemies_count >= 30 and enemies_count % MetalM_rate == 0:\n self.type = 'MM'\n self.hp = 24\n self.image = pygame.transform.scale(load_image('Metal_Man.png',\n -1), (120, 140))\n self.rect = self.image.get_rect()\n self.coords = self.rect.x, self.rect.y = random.randrange(10, \n 560, 70), 140\n self.mask = pygame.mask.from_surface(self.image)\n elif enemies_count >= 15 and enemies_count % EnemyG_rate == 0:\n self.type = 'EG'\n self.hp = 6\n self.image = pygame.transform.scale(load_image(\n 'Enemy_glider.png', -1), (70, 70))\n self.rect = self.image.get_rect()\n self.coords = self.rect.x, self.rect.y = random.randrange(0, \n 700, 70), 140\n self.mask = pygame.mask.from_surface(self.image)\n else:\n self.type = 'MG'\n self.hp = 4\n self.image = pygame.transform.scale(load_image(\n 'Mini_glider.png', -1), (70, 70))\n self.rect = self.image.get_rect()\n self.coords = self.rect.x, self.rect.y = random.randrange(0, \n 700, 70), 140\n self.mask = pygame.mask.from_surface(self.image)\n\n def death_check(self):\n global killed, score, coins, FPS\n if self.hp <= 0:\n killed += 1\n if self.type == 'MM':\n score += 30\n coins += 15\n FPS += 10\n elif self.type == 'EG':\n score += 15\n coins += 5\n elif self.type == 'MG':\n score += 10\n coins += 2\n self.kill()\n\n def move(self):\n self.rect.y += 1\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass Bullet(pygame.sprite.Sprite):\n <mask token>\n <mask token>\n\n def hit(self, enemy):\n enemy.hp -= self.damage\n self.kill()\n\n\nclass Weapon:\n\n def __init__(self, player, kind):\n self.kind = kind\n self.ability = None\n self.player = player\n if self.kind == 'Green laser gun':\n self.damage = 2\n self.price = 0\n elif self.kind == 'Purple laser gun':\n self.damage = 4\n self.price = 50\n elif self.kind == 'Plasma gun':\n self.damage = 8\n self.price = 150\n self.ability = 'Rage'\n\n def shoot(self, enemy_sprites):\n bullet = Bullet(enemy_sprites, self.player.rect.x, self.damage,\n self.kind)\n\n\nclass Player(pygame.sprite.Sprite):\n\n def __init__(self, group):\n super().__init__(group)\n self.weapon = Weapon(self, 'Green laser gun')\n self.image = load_image('player.jpg', -1)\n self.rect = self.image.get_rect()\n self.coords = self.rect.x, self.rect.y = 75, 635\n self.mask = pygame.mask.from_surface(self.image)\n\n def shoot(self, enemy_sprites):\n self.weapon.shoot(enemy_sprites)\n\n def move(self, side):\n x = self.rect.x\n if x < 630 and side == 'right':\n x += 70\n if x > 35 and side == 'left':\n x -= 70\n self.rect.x = x\n\n\nclass Enemy(pygame.sprite.Sprite):\n global enemies_count, MiniG_rate, EnemyG_rate, MetalM_rate\n\n def __init__(self, group):\n super().__init__(group)\n if enemies_count >= 30 and enemies_count % MetalM_rate == 0:\n self.type = 'MM'\n self.hp = 24\n self.image = pygame.transform.scale(load_image('Metal_Man.png',\n -1), (120, 140))\n self.rect = self.image.get_rect()\n self.coords = self.rect.x, self.rect.y = random.randrange(10, \n 560, 70), 140\n self.mask = pygame.mask.from_surface(self.image)\n elif enemies_count >= 15 and enemies_count % EnemyG_rate == 0:\n self.type = 'EG'\n self.hp = 6\n self.image = pygame.transform.scale(load_image(\n 'Enemy_glider.png', -1), (70, 70))\n self.rect = self.image.get_rect()\n self.coords = self.rect.x, self.rect.y = random.randrange(0, \n 700, 70), 140\n self.mask = pygame.mask.from_surface(self.image)\n else:\n self.type = 'MG'\n self.hp = 4\n self.image = pygame.transform.scale(load_image(\n 'Mini_glider.png', -1), (70, 70))\n self.rect = self.image.get_rect()\n self.coords = self.rect.x, self.rect.y = random.randrange(0, \n 700, 70), 140\n self.mask = pygame.mask.from_surface(self.image)\n\n def death_check(self):\n global killed, score, coins, FPS\n if self.hp <= 0:\n killed += 1\n if self.type == 'MM':\n score += 30\n coins += 15\n FPS += 10\n elif self.type == 'EG':\n score += 15\n coins += 5\n elif self.type == 'MG':\n score += 10\n coins += 2\n self.kill()\n\n def move(self):\n self.rect.y += 1\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass Board:\n <mask token>\n <mask token>\n <mask token>\n\n\nclass Bullet(pygame.sprite.Sprite):\n\n def __init__(self, enemy_sprites, x, damage, kind):\n super().__init__(bullet_sprites)\n self.damage = damage\n if kind == 'Green laser gun':\n self.image = load_image('green.png', -1)\n elif kind == 'Purple laser gun':\n self.image = load_image('purple.png', -1)\n elif kind == 'Plasma gun':\n self.image = pygame.transform.scale(load_image('plasma.png', -1\n ), (25, 25))\n self.rect = self.image.get_rect()\n self.coords = self.rect.x, self.rect.y = x + 30, 665\n self.mask = pygame.mask.from_surface(self.image)\n self.fly(enemy_sprites)\n\n def fly(self, enemy_sprites):\n if self.rect.y >= 140:\n self.rect.y -= 1\n for enemy in enemy_sprites:\n if pygame.sprite.collide_mask(enemy, self):\n self.hit(enemy)\n else:\n self.kill()\n\n def hit(self, enemy):\n enemy.hp -= self.damage\n self.kill()\n\n\nclass Weapon:\n\n def __init__(self, player, kind):\n self.kind = kind\n self.ability = None\n self.player = player\n if self.kind == 'Green laser gun':\n self.damage = 2\n self.price = 0\n elif self.kind == 'Purple laser gun':\n self.damage = 4\n self.price = 50\n elif self.kind == 'Plasma gun':\n self.damage = 8\n self.price = 150\n self.ability = 'Rage'\n\n def shoot(self, enemy_sprites):\n bullet = Bullet(enemy_sprites, self.player.rect.x, self.damage,\n self.kind)\n\n\nclass Player(pygame.sprite.Sprite):\n\n def __init__(self, group):\n super().__init__(group)\n self.weapon = Weapon(self, 'Green laser gun')\n self.image = load_image('player.jpg', -1)\n self.rect = self.image.get_rect()\n self.coords = self.rect.x, self.rect.y = 75, 635\n self.mask = pygame.mask.from_surface(self.image)\n\n def shoot(self, enemy_sprites):\n self.weapon.shoot(enemy_sprites)\n\n def move(self, side):\n x = self.rect.x\n if x < 630 and side == 'right':\n x += 70\n if x > 35 and side == 'left':\n x -= 70\n self.rect.x = x\n\n\nclass Enemy(pygame.sprite.Sprite):\n global enemies_count, MiniG_rate, EnemyG_rate, MetalM_rate\n\n def __init__(self, group):\n super().__init__(group)\n if enemies_count >= 30 and enemies_count % MetalM_rate == 0:\n self.type = 'MM'\n self.hp = 24\n self.image = pygame.transform.scale(load_image('Metal_Man.png',\n -1), (120, 140))\n self.rect = self.image.get_rect()\n self.coords = self.rect.x, self.rect.y = random.randrange(10, \n 560, 70), 140\n self.mask = pygame.mask.from_surface(self.image)\n elif enemies_count >= 15 and enemies_count % EnemyG_rate == 0:\n self.type = 'EG'\n self.hp = 6\n self.image = pygame.transform.scale(load_image(\n 'Enemy_glider.png', -1), (70, 70))\n self.rect = self.image.get_rect()\n self.coords = self.rect.x, self.rect.y = random.randrange(0, \n 700, 70), 140\n self.mask = pygame.mask.from_surface(self.image)\n else:\n self.type = 'MG'\n self.hp = 4\n self.image = pygame.transform.scale(load_image(\n 'Mini_glider.png', -1), (70, 70))\n self.rect = self.image.get_rect()\n self.coords = self.rect.x, self.rect.y = random.randrange(0, \n 700, 70), 140\n self.mask = pygame.mask.from_surface(self.image)\n\n def death_check(self):\n global killed, score, coins, FPS\n if self.hp <= 0:\n killed += 1\n if self.type == 'MM':\n score += 30\n coins += 15\n FPS += 10\n elif self.type == 'EG':\n score += 15\n coins += 5\n elif self.type == 'MG':\n score += 10\n coins += 2\n self.kill()\n\n def move(self):\n self.rect.y += 1\n\n\n<mask token>\n",
"step-4": "<mask token>\n\n\ndef info_print():\n global score, killed, coins\n font = pygame.font.Font(None, 30)\n text_coord = 2\n pygame.draw.rect(screen, (100, 100, 100), (0, 0, 200, 100), 3)\n pygame.draw.rect(screen, (150, 150, 150), (3, 3, 194, 94), 3)\n pygame.draw.rect(screen, (250, 250, 250), (5, 5, 190, 90))\n text = [f'Счёт: {score}', f'Убито: {killed}', f'Монеты: {coins}']\n for line in text:\n string_rendered = font.render(line, 1, (50, 50, 50))\n intro_rect = string_rendered.get_rect()\n text_coord += 10\n intro_rect.top = text_coord\n intro_rect.x = 10\n text_coord += intro_rect.height\n screen.blit(string_rendered, intro_rect)\n\n\nclass Board:\n\n def __init__(self, screen, width, height):\n self.width = width\n self.height = height\n self.board = [([0] * width) for _ in range(height)]\n self.left = 0\n self.top = 0\n self.cell_size = 70\n self.screen = screen\n\n def set_view(self, left, top, cell_size):\n self.left = left\n self.top = top\n self.cell_size = cell_size\n\n def render(self):\n tp, pp = [[0, 140], [17, 105], [35, 140]], [[17, 105], [35, 140], [\n 52, 105]]\n for y in range(self.height):\n for x in range(self.width):\n if y >= 2:\n pygame.draw.rect(self.screen, (100, 100, 100), (x *\n self.cell_size, y * self.cell_size, self.cell_size,\n self.cell_size), 1)\n pygame.draw.rect(self.screen, (150, 150, 150), (x *\n self.cell_size + 1, y * self.cell_size + 1, self.\n cell_size - 2, self.cell_size - 2), 2)\n pygame.draw.rect(self.screen, (250, 250, 250), (x *\n self.cell_size + 3, y * self.cell_size + 3, self.\n cell_size - 4, self.cell_size - 4))\n for i in range(self.width * 2 - 1):\n pygame.draw.polygon(screen, (0, 230, 200), pp)\n pp[0][1] += 2\n pp[0][0] += 4\n pp[1][1] -= 3\n pp[2][1] += 2\n pp[2][0] -= 4\n pygame.draw.polygon(screen, (0, 125, 200), pp)\n pp[0][1] += 4\n pp[0][0] += 6\n pp[1][1] -= 7\n pp[2][1] += 4\n pp[2][0] -= 6\n pygame.draw.polygon(screen, (0, 230, 200), pp)\n pp[0][1] -= 6\n pp[0][0] -= 10\n pp[1][1] += 10\n pp[2][1] -= 6\n pp[2][0] += 10\n for point in pp:\n point[0] += 35\n for i in range(self.width * 2):\n pygame.draw.polygon(screen, (100, 100, 100), tp)\n tp[0][1] -= 2\n tp[0][0] += 4\n tp[1][1] += 4\n tp[2][1] -= 2\n tp[2][0] -= 4\n pygame.draw.polygon(screen, (150, 150, 150), tp)\n tp[0][1] -= 2\n tp[0][0] += 4\n tp[1][1] += 4\n tp[2][1] -= 2\n tp[2][0] -= 4\n pygame.draw.polygon(screen, (250, 250, 250), tp)\n tp[0][1] += 4\n tp[0][0] -= 8\n tp[1][1] -= 8\n tp[2][1] += 4\n tp[2][0] += 8\n for point in tp:\n point[0] += 35\n\n\nclass Bullet(pygame.sprite.Sprite):\n\n def __init__(self, enemy_sprites, x, damage, kind):\n super().__init__(bullet_sprites)\n self.damage = damage\n if kind == 'Green laser gun':\n self.image = load_image('green.png', -1)\n elif kind == 'Purple laser gun':\n self.image = load_image('purple.png', -1)\n elif kind == 'Plasma gun':\n self.image = pygame.transform.scale(load_image('plasma.png', -1\n ), (25, 25))\n self.rect = self.image.get_rect()\n self.coords = self.rect.x, self.rect.y = x + 30, 665\n self.mask = pygame.mask.from_surface(self.image)\n self.fly(enemy_sprites)\n\n def fly(self, enemy_sprites):\n if self.rect.y >= 140:\n self.rect.y -= 1\n for enemy in enemy_sprites:\n if pygame.sprite.collide_mask(enemy, self):\n self.hit(enemy)\n else:\n self.kill()\n\n def hit(self, enemy):\n enemy.hp -= self.damage\n self.kill()\n\n\nclass Weapon:\n\n def __init__(self, player, kind):\n self.kind = kind\n self.ability = None\n self.player = player\n if self.kind == 'Green laser gun':\n self.damage = 2\n self.price = 0\n elif self.kind == 'Purple laser gun':\n self.damage = 4\n self.price = 50\n elif self.kind == 'Plasma gun':\n self.damage = 8\n self.price = 150\n self.ability = 'Rage'\n\n def shoot(self, enemy_sprites):\n bullet = Bullet(enemy_sprites, self.player.rect.x, self.damage,\n self.kind)\n\n\nclass Player(pygame.sprite.Sprite):\n\n def __init__(self, group):\n super().__init__(group)\n self.weapon = Weapon(self, 'Green laser gun')\n self.image = load_image('player.jpg', -1)\n self.rect = self.image.get_rect()\n self.coords = self.rect.x, self.rect.y = 75, 635\n self.mask = pygame.mask.from_surface(self.image)\n\n def shoot(self, enemy_sprites):\n self.weapon.shoot(enemy_sprites)\n\n def move(self, side):\n x = self.rect.x\n if x < 630 and side == 'right':\n x += 70\n if x > 35 and side == 'left':\n x -= 70\n self.rect.x = x\n\n\nclass Enemy(pygame.sprite.Sprite):\n global enemies_count, MiniG_rate, EnemyG_rate, MetalM_rate\n\n def __init__(self, group):\n super().__init__(group)\n if enemies_count >= 30 and enemies_count % MetalM_rate == 0:\n self.type = 'MM'\n self.hp = 24\n self.image = pygame.transform.scale(load_image('Metal_Man.png',\n -1), (120, 140))\n self.rect = self.image.get_rect()\n self.coords = self.rect.x, self.rect.y = random.randrange(10, \n 560, 70), 140\n self.mask = pygame.mask.from_surface(self.image)\n elif enemies_count >= 15 and enemies_count % EnemyG_rate == 0:\n self.type = 'EG'\n self.hp = 6\n self.image = pygame.transform.scale(load_image(\n 'Enemy_glider.png', -1), (70, 70))\n self.rect = self.image.get_rect()\n self.coords = self.rect.x, self.rect.y = random.randrange(0, \n 700, 70), 140\n self.mask = pygame.mask.from_surface(self.image)\n else:\n self.type = 'MG'\n self.hp = 4\n self.image = pygame.transform.scale(load_image(\n 'Mini_glider.png', -1), (70, 70))\n self.rect = self.image.get_rect()\n self.coords = self.rect.x, self.rect.y = random.randrange(0, \n 700, 70), 140\n self.mask = pygame.mask.from_surface(self.image)\n\n def death_check(self):\n global killed, score, coins, FPS\n if self.hp <= 0:\n killed += 1\n if self.type == 'MM':\n score += 30\n coins += 15\n FPS += 10\n elif self.type == 'EG':\n score += 15\n coins += 5\n elif self.type == 'MG':\n score += 10\n coins += 2\n self.kill()\n\n def move(self):\n self.rect.y += 1\n\n\ndef game_over():\n global FPS, not_paused, score, killed, coins\n\n def text_print():\n game_over = ' GAME OVER'\n intro_text = ['', 'Нажми клавишу A', 'чтобы сыграть еще раз', '',\n 'Нажми на кнопку \"Esc\", ', 'чтобы выйти из игры',\n f'Счёт: {score}', f'Убито: {killed}', f'Монеты: {coins}']\n fon = pygame.transform.scale(load_image('fon.jpg'), (width, height))\n screen.blit(fon, (0, 0))\n font = pygame.font.Font(None, 50)\n text_coord = 40\n string_rendered = font.render(game_over, 1, pygame.Color('white'))\n intro_rect = string_rendered.get_rect()\n text_coord += 10\n intro_rect.top = text_coord\n intro_rect.x = 10\n text_coord += intro_rect.height\n screen.blit(string_rendered, intro_rect)\n font = pygame.font.Font(None, 30)\n for line in intro_text:\n string_rendered = font.render(line, 1, pygame.Color('white'))\n intro_rect = string_rendered.get_rect()\n text_coord += 10\n intro_rect.top = text_coord\n intro_rect.x = 10\n text_coord += intro_rect.height\n intro_rect.x += 10\n screen.blit(string_rendered, intro_rect)\n FPS = 30\n pygame.mouse.set_visible(True)\n text_print()\n while True:\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n terminate()\n elif event.type == pygame.KEYDOWN or event.type == pygame.MOUSEBUTTONDOWN:\n if event.type == pygame.KEYDOWN:\n if event.key == pygame.K_ESCAPE:\n terminate()\n if event.key == 97:\n pygame.quit()\n subprocess.call('python' + ' проект.py', shell=True)\n if not_paused:\n pygame.display.flip()\n clock.tick(FPS)\n terminate()\n\n\ndef terminate():\n pygame.quit()\n sys.exit()\n\n\n<mask token>\n",
"step-5": "import sys\r\nimport pygame\r\nimport os\r\nimport random\r\nimport subprocess\r\n\r\nFPS, NEWENEMYSPAWN, fst_spawn, not_paused, coins, enemies_count, killed, score = 50, 30, 2000, True, 0, 0, 0, 0\r\nMiniG_rate, EnemyG_rate, MetalM_rate = 1, 5, 15\r\nWEAPONS_LIST = ['Green laser gun', 'Purple laser gun', 'Plasma gun']\r\n\r\n\r\ndef load_image(name, colorkey=None):\r\n fullname = os.path.join('data', name)\r\n image = pygame.image.load(fullname).convert()\r\n if colorkey is not None:\r\n if colorkey == -1:\r\n colorkey = image.get_at((0, 0))\r\n image.set_colorkey(colorkey)\r\n else:\r\n image = image.convert_alpha()\r\n return image\r\n\r\n\r\ndef info_print():\r\n global score, killed, coins\r\n\r\n font = pygame.font.Font(None, 30)\r\n text_coord = 2\r\n pygame.draw.rect(screen, (100, 100, 100), (0, 0, 200, 100), 3)\r\n pygame.draw.rect(screen, (150, 150, 150), (3, 3, 194, 94), 3)\r\n pygame.draw.rect(screen, (250, 250, 250), (5, 5, 190, 90))\r\n text = [f'Счёт: {score}',\r\n f'Убито: {killed}',\r\n f'Монеты: {coins}']\r\n for line in text:\r\n string_rendered = font.render(line, 1, (50, 50, 50))\r\n intro_rect = string_rendered.get_rect()\r\n text_coord += 10\r\n intro_rect.top = text_coord\r\n intro_rect.x = 10\r\n text_coord += intro_rect.height\r\n screen.blit(string_rendered, intro_rect)\r\n\r\n\r\nclass Board:\r\n\r\n def __init__(self, screen, width, height):\r\n self.width = width\r\n self.height = height\r\n self.board = [[0] * width for _ in range(height)]\r\n self.left = 0\r\n self.top = 0\r\n self.cell_size = 70\r\n self.screen = screen\r\n\r\n def set_view(self, left, top, cell_size):\r\n self.left = left\r\n self.top = top\r\n self.cell_size = cell_size\r\n\r\n def render(self):\r\n tp, pp = [[0, 140], [17, 105], [35, 140]], [[17, 105], [35, 140], [52, 105]]\r\n for y in range(self.height):\r\n for x in range(self.width):\r\n if y >= 2:\r\n pygame.draw.rect(self.screen, (100, 100, 100), (\r\n x * self.cell_size, y * self.cell_size, self.cell_size, self.cell_size),\r\n 1)\r\n pygame.draw.rect(self.screen, (150, 150, 150), (\r\n x * self.cell_size + 1, y * self.cell_size + 1, self.cell_size - 2,\r\n self.cell_size - 2), 2)\r\n pygame.draw.rect(self.screen, (250, 250, 250), (\r\n x * self.cell_size + 3, y * self.cell_size + 3, self.cell_size - 4,\r\n self.cell_size - 4))\r\n for i in range(self.width * 2 - 1):\r\n pygame.draw.polygon(screen, (0, 230, 200), pp)\r\n pp[0][1] += 2\r\n pp[0][0] += 4\r\n pp[1][1] -= 3\r\n pp[2][1] += 2\r\n pp[2][0] -= 4\r\n pygame.draw.polygon(screen, (0, 125, 200), pp)\r\n pp[0][1] += 4\r\n pp[0][0] += 6\r\n pp[1][1] -= 7\r\n pp[2][1] += 4\r\n pp[2][0] -= 6\r\n pygame.draw.polygon(screen, (0, 230, 200), pp)\r\n pp[0][1] -= 6\r\n pp[0][0] -= 10\r\n pp[1][1] += 10\r\n pp[2][1] -= 6\r\n pp[2][0] += 10\r\n for point in pp:\r\n point[0] += 35\r\n for i in range(self.width * 2):\r\n pygame.draw.polygon(screen, (100, 100, 100), tp)\r\n tp[0][1] -= 2\r\n tp[0][0] += 4\r\n tp[1][1] += 4\r\n tp[2][1] -= 2\r\n tp[2][0] -= 4\r\n pygame.draw.polygon(screen, (150, 150, 150), tp)\r\n tp[0][1] -= 2\r\n tp[0][0] += 4\r\n tp[1][1] += 4\r\n tp[2][1] -= 2\r\n tp[2][0] -= 4\r\n pygame.draw.polygon(screen, (250, 250, 250), tp)\r\n tp[0][1] += 4\r\n tp[0][0] -= 8\r\n tp[1][1] -= 8\r\n tp[2][1] += 4\r\n tp[2][0] += 8\r\n for point in tp:\r\n point[0] += 35\r\n\r\n\r\nclass Bullet(pygame.sprite.Sprite):\r\n\r\n def __init__(self, enemy_sprites, x, damage, kind):\r\n super().__init__(bullet_sprites)\r\n self.damage = damage\r\n if kind == 'Green laser gun':\r\n self.image = load_image(\"green.png\", -1)\r\n elif kind == 'Purple laser gun':\r\n self.image = load_image(\"purple.png\", -1)\r\n elif kind == 'Plasma gun':\r\n self.image = pygame.transform.scale(load_image(\"plasma.png\", -1), (25, 25))\r\n self.rect = self.image.get_rect()\r\n self.coords = self.rect.x, self.rect.y = x + 30, 665\r\n self.mask = pygame.mask.from_surface(self.image)\r\n self.fly(enemy_sprites)\r\n\r\n def fly(self, enemy_sprites):\r\n if self.rect.y >= 140:\r\n self.rect.y -= 1\r\n for enemy in enemy_sprites:\r\n if pygame.sprite.collide_mask(enemy, self):\r\n self.hit(enemy)\r\n else:\r\n self.kill()\r\n\r\n def hit(self, enemy):\r\n enemy.hp -= self.damage\r\n self.kill()\r\n\r\n\r\nclass Weapon:\r\n\r\n def __init__(self, player, kind):\r\n self.kind = kind\r\n self.ability = None\r\n self.player = player\r\n if self.kind == 'Green laser gun':\r\n self.damage = 2\r\n self.price = 0\r\n elif self.kind == 'Purple laser gun':\r\n self.damage = 4\r\n self.price = 50\r\n elif self.kind == 'Plasma gun':\r\n self.damage = 8\r\n self.price = 150\r\n self.ability = 'Rage'\r\n\r\n def shoot(self, enemy_sprites):\r\n bullet = Bullet(enemy_sprites, self.player.rect.x, self.damage, self.kind)\r\n\r\n\r\nclass Player(pygame.sprite.Sprite):\r\n\r\n def __init__(self, group):\r\n super().__init__(group)\r\n self.weapon = Weapon(self, 'Green laser gun')\r\n self.image = load_image(\"player.jpg\", -1)\r\n self.rect = self.image.get_rect()\r\n self.coords = self.rect.x, self.rect.y = 75, 635\r\n self.mask = pygame.mask.from_surface(self.image)\r\n\r\n def shoot(self, enemy_sprites):\r\n self.weapon.shoot(enemy_sprites)\r\n\r\n def move(self, side):\r\n x = self.rect.x\r\n if x < 630 and side == 'right':\r\n x += 70\r\n if x > 35 and side == 'left':\r\n x -= 70\r\n self.rect.x = x\r\n\r\n\r\nclass Enemy(pygame.sprite.Sprite):\r\n global enemies_count, MiniG_rate, EnemyG_rate, MetalM_rate\r\n\r\n def __init__(self, group):\r\n super().__init__(group)\r\n if enemies_count >= 30 and enemies_count % MetalM_rate == 0:\r\n self.type = 'MM'\r\n self.hp = 24\r\n self.image = pygame.transform.scale(load_image(\"Metal_Man.png\", -1), (120, 140))\r\n self.rect = self.image.get_rect()\r\n self.coords = self.rect.x, self.rect.y = random.randrange(10, 560, 70), 140\r\n self.mask = pygame.mask.from_surface(self.image)\r\n elif enemies_count >= 15 and enemies_count % EnemyG_rate == 0:\r\n self.type = 'EG'\r\n self.hp = 6\r\n self.image = pygame.transform.scale(load_image('Enemy_glider.png', -1), (70, 70))\r\n self.rect = self.image.get_rect()\r\n self.coords = self.rect.x, self.rect.y = random.randrange(0, 700, 70), 140\r\n self.mask = pygame.mask.from_surface(self.image)\r\n else:\r\n self.type = 'MG'\r\n self.hp = 4\r\n self.image = pygame.transform.scale(load_image('Mini_glider.png', -1), (70, 70))\r\n self.rect = self.image.get_rect()\r\n self.coords = self.rect.x, self.rect.y = random.randrange(0, 700, 70), 140\r\n self.mask = pygame.mask.from_surface(self.image)\r\n\r\n def death_check(self):\r\n global killed, score, coins, FPS\r\n\r\n if self.hp <= 0:\r\n killed += 1\r\n if self.type == 'MM':\r\n score += 30\r\n coins += 15\r\n FPS += 10\r\n elif self.type == 'EG':\r\n score += 15\r\n coins += 5\r\n elif self.type == 'MG':\r\n score += 10\r\n coins += 2\r\n self.kill()\r\n\r\n def move(self):\r\n self.rect.y += 1\r\n\r\n\r\ndef game_over():\r\n global FPS, not_paused, score, killed, coins\r\n\r\n def text_print():\r\n game_over = ' GAME OVER'\r\n intro_text = [\"\",\r\n \"Нажми клавишу A\",\r\n \"чтобы сыграть еще раз\",\r\n '',\r\n 'Нажми на кнопку \"Esc\", ',\r\n 'чтобы выйти из игры',\r\n f'Счёт: {score}',\r\n f'Убито: {killed}',\r\n f'Монеты: {coins}']\r\n\r\n fon = pygame.transform.scale(load_image('fon.jpg'), (width, height))\r\n screen.blit(fon, (0, 0))\r\n font = pygame.font.Font(None, 50)\r\n text_coord = 40\r\n string_rendered = font.render(game_over, 1, pygame.Color('white'))\r\n intro_rect = string_rendered.get_rect()\r\n text_coord += 10\r\n intro_rect.top = text_coord\r\n intro_rect.x = 10\r\n text_coord += intro_rect.height\r\n screen.blit(string_rendered, intro_rect)\r\n font = pygame.font.Font(None, 30)\r\n for line in intro_text:\r\n string_rendered = font.render(line, 1, pygame.Color('white'))\r\n intro_rect = string_rendered.get_rect()\r\n text_coord += 10\r\n intro_rect.top = text_coord\r\n intro_rect.x = 10\r\n text_coord += intro_rect.height\r\n intro_rect.x += 10\r\n screen.blit(string_rendered, intro_rect)\r\n\r\n FPS = 30\r\n pygame.mouse.set_visible(True)\r\n text_print()\r\n while True:\r\n for event in pygame.event.get():\r\n if event.type == pygame.QUIT:\r\n terminate()\r\n elif event.type == pygame.KEYDOWN or event.type == pygame.MOUSEBUTTONDOWN:\r\n if event.type == pygame.KEYDOWN:\r\n if event.key == pygame.K_ESCAPE:\r\n terminate()\r\n if event.key == 97:\r\n pygame.quit()\r\n subprocess.call(\"python\" + \" проект.py\", shell=True)\r\n if not_paused:\r\n pygame.display.flip()\r\n clock.tick(FPS)\r\n terminate()\r\n\r\n\r\ndef terminate():\r\n pygame.quit()\r\n sys.exit()\r\n\r\n\r\ndef start_screen(screen, width, height):\r\n global FPS, not_paused\r\n\r\n def text_print():\r\n intro_text = [\" SPACE SOLDIER\", \"\",\r\n \" Нажми любую клавишу,\",\r\n \" чтобы начать игру\",\r\n ' Нажимай на кнопки стрелок, чтобы перемещать персонажа',\r\n ' Не дай врагу пролететь мимо тебя!',\r\n ' Нажми на кнопку \"Esc\", ',\r\n ' чтобы открыть меню паузы',\r\n ' или попасть в магазин']\r\n\r\n fon = pygame.transform.scale(load_image('fon.jpg'), (width, height))\r\n font = pygame.font.Font(None, 30)\r\n text_coord = 50\r\n screen.blit(fon, (0, 0))\r\n for line in intro_text:\r\n string_rendered = font.render(line, 1, pygame.Color('black'))\r\n intro_rect = string_rendered.get_rect()\r\n text_coord += 10\r\n intro_rect.top = text_coord\r\n intro_rect.x = 10\r\n text_coord += intro_rect.height\r\n screen.blit(string_rendered, intro_rect)\r\n\r\n pygame.mouse.set_visible(True)\r\n text_print()\r\n while True:\r\n for event in pygame.event.get():\r\n if event.type == pygame.QUIT:\r\n terminate()\r\n elif event.type == pygame.KEYDOWN or event.type == pygame.MOUSEBUTTONDOWN:\r\n if event.type == pygame.KEYDOWN:\r\n if event.key == pygame.K_ESCAPE:\r\n pause_menu(screen, width, height)\r\n text_print()\r\n else:\r\n pygame.mouse.set_visible(False)\r\n return\r\n if not_paused:\r\n pygame.display.flip()\r\n clock.tick(FPS)\r\n terminate()\r\n\r\n\r\ndef pause_menu(screen, width, height):\r\n global FPS, not_paused\r\n\r\n def text_print():\r\n intro_text = [\"Нажми на кнопку 'S',\",\r\n \"чтобы открыть магазин\",\r\n '',\r\n \"Нажми на кнопку 'C',\",\r\n \"чтобы продолжжить игру\",\r\n '',\r\n \"УПРАВЛЕНИЕ\",\r\n '',\r\n 'Нажимай на кнопки стрелок, чтобы перемещать персонажа',\r\n '',\r\n 'Не дай врагу пролететь мимо тебя!',\r\n '',\r\n 'Нажми на кнопку \"Esc\", ',\r\n 'чтобы закрыть меню паузы']\r\n\r\n fon = pygame.transform.scale(load_image('fon.jpg'), (width, height))\r\n font = pygame.font.Font(None, 30)\r\n text_coord = 50\r\n screen.blit(fon, (0, 0))\r\n for line in intro_text:\r\n string_rendered = font.render(line, 1, pygame.Color('black'))\r\n intro_rect = string_rendered.get_rect()\r\n text_coord += 10\r\n intro_rect.top = text_coord\r\n intro_rect.x = 10\r\n text_coord += intro_rect.height\r\n screen.blit(string_rendered, intro_rect)\r\n\r\n pygame.mouse.set_visible(True)\r\n fon = pygame.transform.scale(load_image('fon.jpg'), (width, height))\r\n screen.blit(fon, (0, 0))\r\n text_print()\r\n while True:\r\n for event in pygame.event.get():\r\n if event.type == pygame.QUIT:\r\n terminate()\r\n if event.type == pygame.KEYDOWN:\r\n if event.key == pygame.K_ESCAPE:\r\n not_paused = True\r\n pygame.mouse.set_visible(False)\r\n return\r\n if event.key == 115:\r\n shop(screen, width, height)\r\n if event.key == 99:\r\n return\r\n pygame.display.flip()\r\n clock.tick(FPS)\r\n terminate()\r\n\r\n\r\ndef shop(screen, width, height):\r\n global FPS, not_paused, WEAPONS_LIST, coins\r\n\r\n def text_print():\r\n intro_text = [\" Нажми на кнопку 'U',\",\r\n \"чтобы улучшить свое оружие\",\r\n 'Нажми на кнопку \"Esc\", ',\r\n 'чтобы выйти из магазина', '',\r\n 'Текущее оружие:',\r\n f'{player.weapon.kind}',\r\n 'Наносимый урон:',\r\n f'{player.weapon.damage}',\r\n 'Следующее улучшение:',\r\n f'{next_weapon}',\r\n 'Урон:',\r\n f'{next_damage}',\r\n 'Стоимость:',\r\n f'{next_price}',\r\n 'Ваши монеты:',\r\n f'{coins}']\r\n\r\n fon = pygame.transform.scale(load_image('fon.jpg'), (width, height))\r\n font = pygame.font.Font(None, 30)\r\n text_coord = 50\r\n screen.blit(fon, (0, 0))\r\n for line in intro_text:\r\n string_rendered = font.render(line, 1, pygame.Color('black'))\r\n intro_rect = string_rendered.get_rect()\r\n text_coord += 10\r\n intro_rect.top = text_coord\r\n intro_rect.x = 10\r\n text_coord += intro_rect.height\r\n screen.blit(string_rendered, intro_rect)\r\n\r\n if player.weapon.kind != 'Plasma gun':\r\n next_weapon = WEAPONS_LIST[WEAPONS_LIST.index(player.weapon.kind) + 1]\r\n if next_weapon == 'Purple laser gun':\r\n next_damage = 4\r\n next_price = 50\r\n else:\r\n next_damage = 6\r\n next_price = 150\r\n else:\r\n next_weapon = 'Вы имеете лучшее оружие'\r\n next_damage = 'Наносимый урон максимальный'\r\n next_price = 'Покупать больше нечего'\r\n\r\n pygame.mouse.set_visible(True)\r\n fon = pygame.transform.scale(load_image('fon.jpg'), (width, height))\r\n screen.blit(fon, (0, 0))\r\n text_print()\r\n while True:\r\n for event in pygame.event.get():\r\n if event.type == pygame.QUIT:\r\n terminate()\r\n if event.type == pygame.KEYDOWN:\r\n if event.key == pygame.K_ESCAPE:\r\n pygame.mouse.set_visible(False)\r\n screen.blit(fon, (0, 0))\r\n return\r\n if event.key == 117 and player.weapon.kind != 'Plasma gun' and coins >= next_price:\r\n coins -= next_price\r\n player.weapon = Weapon(player, WEAPONS_LIST[WEAPONS_LIST.index(player.weapon.kind) + 1])\r\n pygame.display.flip()\r\n clock.tick(FPS)\r\n terminate()\r\n\r\n\r\npygame.init()\r\nsize = width, height = 700, 700\r\nscreen = pygame.display.set_mode(size)\r\npygame.display.set_caption('SPACE SOLDIER')\r\npygame.display.set_icon(load_image(\"icon.png\", -1))\r\nfon1 = pygame.transform.scale(load_image('fon1.png'), (700, 400))\r\nboard = Board(screen, 10, 10)\r\npygame.mouse.set_visible(True)\r\nenemy_sprites = pygame.sprite.Group()\r\nplayer_sprites = pygame.sprite.Group()\r\nbullet_sprites = pygame.sprite.Group()\r\nplayer = Player(player_sprites)\r\nenemy_li = [Enemy(enemy_sprites)]\r\nclock = pygame.time.Clock()\r\nstart_screen(screen, width, height)\r\npygame.time.set_timer(NEWENEMYSPAWN, fst_spawn)\r\nwhile True:\r\n for event in pygame.event.get():\r\n if event.type == pygame.QUIT:\r\n terminate()\r\n if event.type == pygame.KEYDOWN:\r\n if event.key == pygame.K_ESCAPE:\r\n not_paused = False\r\n pause_menu(screen, width, height)\r\n if not_paused:\r\n if event.key == 275:\r\n player.move('right')\r\n elif event.key == 276:\r\n player.move('left')\r\n if event.key == 115:\r\n player.shoot(enemy_sprites)\r\n if not_paused and event.type == NEWENEMYSPAWN:\r\n enemy_li.append(Enemy(enemy_sprites))\r\n enemies_count += 1\r\n\r\n if not_paused:\r\n screen.blit(fon1, (0, 0))\r\n board.render()\r\n player_sprites.draw(screen)\r\n enemy_sprites.draw(screen)\r\n bullet_sprites.draw(screen)\r\n for enemy in enemy_sprites:\r\n if enemy.type != 'MM':\r\n lim = 630\r\n else:\r\n lim = 560\r\n if enemy.rect.y <= lim:\r\n enemy.move()\r\n else:\r\n game_over()\r\n for bullet in bullet_sprites:\r\n bullet.fly(enemy_sprites)\r\n enemy.death_check()\r\n info_print()\r\n pygame.display.flip()\r\n clock.tick(FPS)\r\nterminate()\r\n",
"step-ids": [
7,
13,
16,
22,
30
]
}
|
[
7,
13,
16,
22,
30
] |
<|reserved_special_token_0|>
class Version(object):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def __init__(self, soft):
"""
Constructor that takes software name
"""
self.soft = soft
self.app_dir = os.environ.get('APP_DIR')
if self.app_dir is None:
self.app_dir = '/opt'
self.sudo = True
if os.access(self.app_dir, os.W_OK):
self.sudo = False
self.soft_root = os.path.join(self.app_dir, self.soft)
self.soft_paths = sorted(glob.glob(self.soft_root + '/[0-9]*'))
self.versions = [v[len(self.soft_root) + 1:] for v in self.soft_paths]
path = os.path.realpath('%s/current' % self.soft_root)
self.current_version = path[path.rindex(os.path.sep) + 1:]
def set_version(self, index):
"""
Set software version by index
"""
sudo = 'sudo ' if self.sudo else ''
old_dir = 'current'
if index == -1:
print('Selecting system version')
if os.path.exists(os.path.join(self.soft_root, old_dir)):
os.system('cd %s && %srm %s' % (self.soft_root, sudo, old_dir))
else:
print("Selecting %s version '%s'" % (self.soft, self.versions[
index]))
directory = self.versions[index]
if os.path.exists(os.path.join(self.soft_root, old_dir)):
os.system('cd %s && %srm %s' % (self.soft_root, sudo, old_dir))
os.system('cd %s && %sln -s %s %s' % (self.soft_root, sudo,
directory, old_dir))
def ask_version(self):
"""
Prompt user for software version in the list of installed versions
"""
print('Please choose a version:')
index = 1
if self.current_version == 'current':
selected = self.SELECTED
else:
selected = ''
print('0: System' + selected)
for version in self.soft_paths:
number = version[len(self.soft_root) + 1:]
if number == self.current_version:
selected = self.SELECTED
else:
selected = ''
print(str(index) + ': ' + str(number) + selected)
index += 1
chosen = None
maximum = len(self.soft_paths)
while not chosen:
try:
choice = input()
except KeyboardInterrupt:
print('\nUser abort!')
sys.exit(0)
if re.match('\\d+', choice) and int(choice) <= maximum and int(
choice) >= 0:
index = int(choice) - 1
chosen = True
elif choice == '':
print('Keeping current')
sys.exit(0)
else:
print(
'Bad version, please choose a number between 0 and %s' %
str(maximum))
return index
@staticmethod
def run():
"""
Read software name on command line and run version selection
"""
try:
opts, args = getopt.getopt(sys.argv[1:], 'h', ['help'])
except getopt.GetoptError as exception:
print('Error parsing command line: %s' % exception)
print(Version.HELP)
sys.exit(1)
for option, _ in opts:
if option in ('-h', '--help'):
print(Version.HELP)
sys.exit(0)
else:
print("Error parsing command line: Unhandled option '%s'" %
option)
print(Version.HELP)
sys.exit(2)
if len(args) != 1:
print('Error parsing command line: You must pass software')
print(Version.HELP)
sys.exit(1)
soft = args[0]
version = Version(soft)
version.set_version(version.ask_version())
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Version(object):
<|reserved_special_token_0|>
HELP = """version [-h] software
Select software version in a menu:
-h To print this help screen.
software Software version to choose."""
SELECTED = ' *'
def __init__(self, soft):
"""
Constructor that takes software name
"""
self.soft = soft
self.app_dir = os.environ.get('APP_DIR')
if self.app_dir is None:
self.app_dir = '/opt'
self.sudo = True
if os.access(self.app_dir, os.W_OK):
self.sudo = False
self.soft_root = os.path.join(self.app_dir, self.soft)
self.soft_paths = sorted(glob.glob(self.soft_root + '/[0-9]*'))
self.versions = [v[len(self.soft_root) + 1:] for v in self.soft_paths]
path = os.path.realpath('%s/current' % self.soft_root)
self.current_version = path[path.rindex(os.path.sep) + 1:]
def set_version(self, index):
"""
Set software version by index
"""
sudo = 'sudo ' if self.sudo else ''
old_dir = 'current'
if index == -1:
print('Selecting system version')
if os.path.exists(os.path.join(self.soft_root, old_dir)):
os.system('cd %s && %srm %s' % (self.soft_root, sudo, old_dir))
else:
print("Selecting %s version '%s'" % (self.soft, self.versions[
index]))
directory = self.versions[index]
if os.path.exists(os.path.join(self.soft_root, old_dir)):
os.system('cd %s && %srm %s' % (self.soft_root, sudo, old_dir))
os.system('cd %s && %sln -s %s %s' % (self.soft_root, sudo,
directory, old_dir))
def ask_version(self):
"""
Prompt user for software version in the list of installed versions
"""
print('Please choose a version:')
index = 1
if self.current_version == 'current':
selected = self.SELECTED
else:
selected = ''
print('0: System' + selected)
for version in self.soft_paths:
number = version[len(self.soft_root) + 1:]
if number == self.current_version:
selected = self.SELECTED
else:
selected = ''
print(str(index) + ': ' + str(number) + selected)
index += 1
chosen = None
maximum = len(self.soft_paths)
while not chosen:
try:
choice = input()
except KeyboardInterrupt:
print('\nUser abort!')
sys.exit(0)
if re.match('\\d+', choice) and int(choice) <= maximum and int(
choice) >= 0:
index = int(choice) - 1
chosen = True
elif choice == '':
print('Keeping current')
sys.exit(0)
else:
print(
'Bad version, please choose a number between 0 and %s' %
str(maximum))
return index
@staticmethod
def run():
"""
Read software name on command line and run version selection
"""
try:
opts, args = getopt.getopt(sys.argv[1:], 'h', ['help'])
except getopt.GetoptError as exception:
print('Error parsing command line: %s' % exception)
print(Version.HELP)
sys.exit(1)
for option, _ in opts:
if option in ('-h', '--help'):
print(Version.HELP)
sys.exit(0)
else:
print("Error parsing command line: Unhandled option '%s'" %
option)
print(Version.HELP)
sys.exit(2)
if len(args) != 1:
print('Error parsing command line: You must pass software')
print(Version.HELP)
sys.exit(1)
soft = args[0]
version = Version(soft)
version.set_version(version.ask_version())
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Version(object):
"""
Software management class
"""
HELP = """version [-h] software
Select software version in a menu:
-h To print this help screen.
software Software version to choose."""
SELECTED = ' *'
def __init__(self, soft):
"""
Constructor that takes software name
"""
self.soft = soft
self.app_dir = os.environ.get('APP_DIR')
if self.app_dir is None:
self.app_dir = '/opt'
self.sudo = True
if os.access(self.app_dir, os.W_OK):
self.sudo = False
self.soft_root = os.path.join(self.app_dir, self.soft)
self.soft_paths = sorted(glob.glob(self.soft_root + '/[0-9]*'))
self.versions = [v[len(self.soft_root) + 1:] for v in self.soft_paths]
path = os.path.realpath('%s/current' % self.soft_root)
self.current_version = path[path.rindex(os.path.sep) + 1:]
def set_version(self, index):
"""
Set software version by index
"""
sudo = 'sudo ' if self.sudo else ''
old_dir = 'current'
if index == -1:
print('Selecting system version')
if os.path.exists(os.path.join(self.soft_root, old_dir)):
os.system('cd %s && %srm %s' % (self.soft_root, sudo, old_dir))
else:
print("Selecting %s version '%s'" % (self.soft, self.versions[
index]))
directory = self.versions[index]
if os.path.exists(os.path.join(self.soft_root, old_dir)):
os.system('cd %s && %srm %s' % (self.soft_root, sudo, old_dir))
os.system('cd %s && %sln -s %s %s' % (self.soft_root, sudo,
directory, old_dir))
def ask_version(self):
"""
Prompt user for software version in the list of installed versions
"""
print('Please choose a version:')
index = 1
if self.current_version == 'current':
selected = self.SELECTED
else:
selected = ''
print('0: System' + selected)
for version in self.soft_paths:
number = version[len(self.soft_root) + 1:]
if number == self.current_version:
selected = self.SELECTED
else:
selected = ''
print(str(index) + ': ' + str(number) + selected)
index += 1
chosen = None
maximum = len(self.soft_paths)
while not chosen:
try:
choice = input()
except KeyboardInterrupt:
print('\nUser abort!')
sys.exit(0)
if re.match('\\d+', choice) and int(choice) <= maximum and int(
choice) >= 0:
index = int(choice) - 1
chosen = True
elif choice == '':
print('Keeping current')
sys.exit(0)
else:
print(
'Bad version, please choose a number between 0 and %s' %
str(maximum))
return index
@staticmethod
def run():
"""
Read software name on command line and run version selection
"""
try:
opts, args = getopt.getopt(sys.argv[1:], 'h', ['help'])
except getopt.GetoptError as exception:
print('Error parsing command line: %s' % exception)
print(Version.HELP)
sys.exit(1)
for option, _ in opts:
if option in ('-h', '--help'):
print(Version.HELP)
sys.exit(0)
else:
print("Error parsing command line: Unhandled option '%s'" %
option)
print(Version.HELP)
sys.exit(2)
if len(args) != 1:
print('Error parsing command line: You must pass software')
print(Version.HELP)
sys.exit(1)
soft = args[0]
version = Version(soft)
version.set_version(version.ask_version())
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
try:
input = raw_input
except NameError:
pass
class Version(object):
"""
Software management class
"""
HELP = """version [-h] software
Select software version in a menu:
-h To print this help screen.
software Software version to choose."""
SELECTED = ' *'
def __init__(self, soft):
"""
Constructor that takes software name
"""
self.soft = soft
self.app_dir = os.environ.get('APP_DIR')
if self.app_dir is None:
self.app_dir = '/opt'
self.sudo = True
if os.access(self.app_dir, os.W_OK):
self.sudo = False
self.soft_root = os.path.join(self.app_dir, self.soft)
self.soft_paths = sorted(glob.glob(self.soft_root + '/[0-9]*'))
self.versions = [v[len(self.soft_root) + 1:] for v in self.soft_paths]
path = os.path.realpath('%s/current' % self.soft_root)
self.current_version = path[path.rindex(os.path.sep) + 1:]
def set_version(self, index):
"""
Set software version by index
"""
sudo = 'sudo ' if self.sudo else ''
old_dir = 'current'
if index == -1:
print('Selecting system version')
if os.path.exists(os.path.join(self.soft_root, old_dir)):
os.system('cd %s && %srm %s' % (self.soft_root, sudo, old_dir))
else:
print("Selecting %s version '%s'" % (self.soft, self.versions[
index]))
directory = self.versions[index]
if os.path.exists(os.path.join(self.soft_root, old_dir)):
os.system('cd %s && %srm %s' % (self.soft_root, sudo, old_dir))
os.system('cd %s && %sln -s %s %s' % (self.soft_root, sudo,
directory, old_dir))
def ask_version(self):
"""
Prompt user for software version in the list of installed versions
"""
print('Please choose a version:')
index = 1
if self.current_version == 'current':
selected = self.SELECTED
else:
selected = ''
print('0: System' + selected)
for version in self.soft_paths:
number = version[len(self.soft_root) + 1:]
if number == self.current_version:
selected = self.SELECTED
else:
selected = ''
print(str(index) + ': ' + str(number) + selected)
index += 1
chosen = None
maximum = len(self.soft_paths)
while not chosen:
try:
choice = input()
except KeyboardInterrupt:
print('\nUser abort!')
sys.exit(0)
if re.match('\\d+', choice) and int(choice) <= maximum and int(
choice) >= 0:
index = int(choice) - 1
chosen = True
elif choice == '':
print('Keeping current')
sys.exit(0)
else:
print(
'Bad version, please choose a number between 0 and %s' %
str(maximum))
return index
@staticmethod
def run():
"""
Read software name on command line and run version selection
"""
try:
opts, args = getopt.getopt(sys.argv[1:], 'h', ['help'])
except getopt.GetoptError as exception:
print('Error parsing command line: %s' % exception)
print(Version.HELP)
sys.exit(1)
for option, _ in opts:
if option in ('-h', '--help'):
print(Version.HELP)
sys.exit(0)
else:
print("Error parsing command line: Unhandled option '%s'" %
option)
print(Version.HELP)
sys.exit(2)
if len(args) != 1:
print('Error parsing command line: You must pass software')
print(Version.HELP)
sys.exit(1)
soft = args[0]
version = Version(soft)
version.set_version(version.ask_version())
if __name__ == '__main__':
Version.run()
<|reserved_special_token_1|>
#!/usr/bin/env python
# encoding: UTF-8
'''
Script to select current version for a given soft (python, ruby or java).
'''
import os
import re
import sys
import glob
import getopt
# fix input in Python 2 and 3
try:
input = raw_input # pylint: disable=redefined-builtin,invalid-name
except NameError:
pass
class Version(object): # pylint: disable=useless-object-inheritance
'''
Software management class
'''
HELP = '''version [-h] software
Select software version in a menu:
-h To print this help screen.
software Software version to choose.'''
SELECTED = ' *'
def __init__(self, soft):
'''
Constructor that takes software name
'''
self.soft = soft
self.app_dir = os.environ.get('APP_DIR')
if self.app_dir is None:
self.app_dir = '/opt'
self.sudo = True
if os.access(self.app_dir, os.W_OK):
self.sudo = False
self.soft_root = os.path.join(self.app_dir, self.soft)
self.soft_paths = sorted(glob.glob(self.soft_root+'/[0-9]*'))
self.versions = [v[len(self.soft_root)+1:] for v in self.soft_paths]
path = os.path.realpath("%s/current" % self.soft_root)
self.current_version = path[path.rindex(os.path.sep)+1:]
def set_version(self, index):
'''
Set software version by index
'''
sudo = 'sudo ' if self.sudo else ''
old_dir = "current"
if index == -1:
print("Selecting system version")
if os.path.exists(os.path.join(self.soft_root, old_dir)):
os.system("cd %s && %srm %s" % (self.soft_root, sudo, old_dir))
else:
print("Selecting %s version '%s'" %
(self.soft, self.versions[index]))
directory = self.versions[index]
if os.path.exists(os.path.join(self.soft_root, old_dir)):
os.system("cd %s && %srm %s" % (self.soft_root, sudo, old_dir))
os.system("cd %s && %sln -s %s %s" % (self.soft_root, sudo, directory, old_dir))
def ask_version(self):
'''
Prompt user for software version in the list of installed versions
'''
# print version list
print('Please choose a version:')
index = 1
if self.current_version == 'current':
selected = self.SELECTED
else:
selected = ''
print("0: System"+selected)
for version in self.soft_paths:
number = version[len(self.soft_root)+1:]
if number == self.current_version:
selected = self.SELECTED
else:
selected = ''
print(str(index)+': '+str(number)+selected)
index += 1
# ask for the version
chosen = None
maximum = len(self.soft_paths)
while not chosen:
try:
choice = input()
except KeyboardInterrupt:
print("\nUser abort!")
sys.exit(0)
if re.match('\\d+', choice) and int(choice) <= maximum and \
int(choice) >= 0:
index = int(choice) - 1
chosen = True
elif choice == '':
print("Keeping current")
sys.exit(0)
else:
print("Bad version, please choose a number between 0 and %s" %
str(maximum))
# return index in version table
return index
@staticmethod
def run():
'''
Read software name on command line and run version selection
'''
try:
opts, args = getopt.getopt(sys.argv[1:], 'h', ['help'])
except getopt.GetoptError as exception:
print('Error parsing command line: %s' % exception)
print(Version.HELP)
sys.exit(1)
for option, _ in opts:
if option in ('-h', '--help'):
print(Version.HELP)
sys.exit(0)
else:
print("Error parsing command line: Unhandled option '%s'" % option)
print(Version.HELP)
sys.exit(2)
if len(args) != 1:
print("Error parsing command line: You must pass software")
print(Version.HELP)
sys.exit(1)
soft = args[0]
version = Version(soft)
version.set_version(version.ask_version())
if __name__ == '__main__':
Version.run()
|
flexible
|
{
"blob_id": "93e8e9fc4f0503dfc3243bef5ab8261a4cdfc296",
"index": 1009,
"step-1": "<mask token>\n\n\nclass Version(object):\n <mask token>\n <mask token>\n <mask token>\n\n def __init__(self, soft):\n \"\"\"\n Constructor that takes software name\n \"\"\"\n self.soft = soft\n self.app_dir = os.environ.get('APP_DIR')\n if self.app_dir is None:\n self.app_dir = '/opt'\n self.sudo = True\n if os.access(self.app_dir, os.W_OK):\n self.sudo = False\n self.soft_root = os.path.join(self.app_dir, self.soft)\n self.soft_paths = sorted(glob.glob(self.soft_root + '/[0-9]*'))\n self.versions = [v[len(self.soft_root) + 1:] for v in self.soft_paths]\n path = os.path.realpath('%s/current' % self.soft_root)\n self.current_version = path[path.rindex(os.path.sep) + 1:]\n\n def set_version(self, index):\n \"\"\"\n Set software version by index\n \"\"\"\n sudo = 'sudo ' if self.sudo else ''\n old_dir = 'current'\n if index == -1:\n print('Selecting system version')\n if os.path.exists(os.path.join(self.soft_root, old_dir)):\n os.system('cd %s && %srm %s' % (self.soft_root, sudo, old_dir))\n else:\n print(\"Selecting %s version '%s'\" % (self.soft, self.versions[\n index]))\n directory = self.versions[index]\n if os.path.exists(os.path.join(self.soft_root, old_dir)):\n os.system('cd %s && %srm %s' % (self.soft_root, sudo, old_dir))\n os.system('cd %s && %sln -s %s %s' % (self.soft_root, sudo,\n directory, old_dir))\n\n def ask_version(self):\n \"\"\"\n Prompt user for software version in the list of installed versions\n \"\"\"\n print('Please choose a version:')\n index = 1\n if self.current_version == 'current':\n selected = self.SELECTED\n else:\n selected = ''\n print('0: System' + selected)\n for version in self.soft_paths:\n number = version[len(self.soft_root) + 1:]\n if number == self.current_version:\n selected = self.SELECTED\n else:\n selected = ''\n print(str(index) + ': ' + str(number) + selected)\n index += 1\n chosen = None\n maximum = len(self.soft_paths)\n while not chosen:\n try:\n choice = input()\n except KeyboardInterrupt:\n print('\\nUser abort!')\n sys.exit(0)\n if re.match('\\\\d+', choice) and int(choice) <= maximum and int(\n choice) >= 0:\n index = int(choice) - 1\n chosen = True\n elif choice == '':\n print('Keeping current')\n sys.exit(0)\n else:\n print(\n 'Bad version, please choose a number between 0 and %s' %\n str(maximum))\n return index\n\n @staticmethod\n def run():\n \"\"\"\n Read software name on command line and run version selection\n \"\"\"\n try:\n opts, args = getopt.getopt(sys.argv[1:], 'h', ['help'])\n except getopt.GetoptError as exception:\n print('Error parsing command line: %s' % exception)\n print(Version.HELP)\n sys.exit(1)\n for option, _ in opts:\n if option in ('-h', '--help'):\n print(Version.HELP)\n sys.exit(0)\n else:\n print(\"Error parsing command line: Unhandled option '%s'\" %\n option)\n print(Version.HELP)\n sys.exit(2)\n if len(args) != 1:\n print('Error parsing command line: You must pass software')\n print(Version.HELP)\n sys.exit(1)\n soft = args[0]\n version = Version(soft)\n version.set_version(version.ask_version())\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass Version(object):\n <mask token>\n HELP = \"\"\"version [-h] software\nSelect software version in a menu:\n-h To print this help screen.\nsoftware Software version to choose.\"\"\"\n SELECTED = ' *'\n\n def __init__(self, soft):\n \"\"\"\n Constructor that takes software name\n \"\"\"\n self.soft = soft\n self.app_dir = os.environ.get('APP_DIR')\n if self.app_dir is None:\n self.app_dir = '/opt'\n self.sudo = True\n if os.access(self.app_dir, os.W_OK):\n self.sudo = False\n self.soft_root = os.path.join(self.app_dir, self.soft)\n self.soft_paths = sorted(glob.glob(self.soft_root + '/[0-9]*'))\n self.versions = [v[len(self.soft_root) + 1:] for v in self.soft_paths]\n path = os.path.realpath('%s/current' % self.soft_root)\n self.current_version = path[path.rindex(os.path.sep) + 1:]\n\n def set_version(self, index):\n \"\"\"\n Set software version by index\n \"\"\"\n sudo = 'sudo ' if self.sudo else ''\n old_dir = 'current'\n if index == -1:\n print('Selecting system version')\n if os.path.exists(os.path.join(self.soft_root, old_dir)):\n os.system('cd %s && %srm %s' % (self.soft_root, sudo, old_dir))\n else:\n print(\"Selecting %s version '%s'\" % (self.soft, self.versions[\n index]))\n directory = self.versions[index]\n if os.path.exists(os.path.join(self.soft_root, old_dir)):\n os.system('cd %s && %srm %s' % (self.soft_root, sudo, old_dir))\n os.system('cd %s && %sln -s %s %s' % (self.soft_root, sudo,\n directory, old_dir))\n\n def ask_version(self):\n \"\"\"\n Prompt user for software version in the list of installed versions\n \"\"\"\n print('Please choose a version:')\n index = 1\n if self.current_version == 'current':\n selected = self.SELECTED\n else:\n selected = ''\n print('0: System' + selected)\n for version in self.soft_paths:\n number = version[len(self.soft_root) + 1:]\n if number == self.current_version:\n selected = self.SELECTED\n else:\n selected = ''\n print(str(index) + ': ' + str(number) + selected)\n index += 1\n chosen = None\n maximum = len(self.soft_paths)\n while not chosen:\n try:\n choice = input()\n except KeyboardInterrupt:\n print('\\nUser abort!')\n sys.exit(0)\n if re.match('\\\\d+', choice) and int(choice) <= maximum and int(\n choice) >= 0:\n index = int(choice) - 1\n chosen = True\n elif choice == '':\n print('Keeping current')\n sys.exit(0)\n else:\n print(\n 'Bad version, please choose a number between 0 and %s' %\n str(maximum))\n return index\n\n @staticmethod\n def run():\n \"\"\"\n Read software name on command line and run version selection\n \"\"\"\n try:\n opts, args = getopt.getopt(sys.argv[1:], 'h', ['help'])\n except getopt.GetoptError as exception:\n print('Error parsing command line: %s' % exception)\n print(Version.HELP)\n sys.exit(1)\n for option, _ in opts:\n if option in ('-h', '--help'):\n print(Version.HELP)\n sys.exit(0)\n else:\n print(\"Error parsing command line: Unhandled option '%s'\" %\n option)\n print(Version.HELP)\n sys.exit(2)\n if len(args) != 1:\n print('Error parsing command line: You must pass software')\n print(Version.HELP)\n sys.exit(1)\n soft = args[0]\n version = Version(soft)\n version.set_version(version.ask_version())\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass Version(object):\n \"\"\"\n Software management class\n \"\"\"\n HELP = \"\"\"version [-h] software\nSelect software version in a menu:\n-h To print this help screen.\nsoftware Software version to choose.\"\"\"\n SELECTED = ' *'\n\n def __init__(self, soft):\n \"\"\"\n Constructor that takes software name\n \"\"\"\n self.soft = soft\n self.app_dir = os.environ.get('APP_DIR')\n if self.app_dir is None:\n self.app_dir = '/opt'\n self.sudo = True\n if os.access(self.app_dir, os.W_OK):\n self.sudo = False\n self.soft_root = os.path.join(self.app_dir, self.soft)\n self.soft_paths = sorted(glob.glob(self.soft_root + '/[0-9]*'))\n self.versions = [v[len(self.soft_root) + 1:] for v in self.soft_paths]\n path = os.path.realpath('%s/current' % self.soft_root)\n self.current_version = path[path.rindex(os.path.sep) + 1:]\n\n def set_version(self, index):\n \"\"\"\n Set software version by index\n \"\"\"\n sudo = 'sudo ' if self.sudo else ''\n old_dir = 'current'\n if index == -1:\n print('Selecting system version')\n if os.path.exists(os.path.join(self.soft_root, old_dir)):\n os.system('cd %s && %srm %s' % (self.soft_root, sudo, old_dir))\n else:\n print(\"Selecting %s version '%s'\" % (self.soft, self.versions[\n index]))\n directory = self.versions[index]\n if os.path.exists(os.path.join(self.soft_root, old_dir)):\n os.system('cd %s && %srm %s' % (self.soft_root, sudo, old_dir))\n os.system('cd %s && %sln -s %s %s' % (self.soft_root, sudo,\n directory, old_dir))\n\n def ask_version(self):\n \"\"\"\n Prompt user for software version in the list of installed versions\n \"\"\"\n print('Please choose a version:')\n index = 1\n if self.current_version == 'current':\n selected = self.SELECTED\n else:\n selected = ''\n print('0: System' + selected)\n for version in self.soft_paths:\n number = version[len(self.soft_root) + 1:]\n if number == self.current_version:\n selected = self.SELECTED\n else:\n selected = ''\n print(str(index) + ': ' + str(number) + selected)\n index += 1\n chosen = None\n maximum = len(self.soft_paths)\n while not chosen:\n try:\n choice = input()\n except KeyboardInterrupt:\n print('\\nUser abort!')\n sys.exit(0)\n if re.match('\\\\d+', choice) and int(choice) <= maximum and int(\n choice) >= 0:\n index = int(choice) - 1\n chosen = True\n elif choice == '':\n print('Keeping current')\n sys.exit(0)\n else:\n print(\n 'Bad version, please choose a number between 0 and %s' %\n str(maximum))\n return index\n\n @staticmethod\n def run():\n \"\"\"\n Read software name on command line and run version selection\n \"\"\"\n try:\n opts, args = getopt.getopt(sys.argv[1:], 'h', ['help'])\n except getopt.GetoptError as exception:\n print('Error parsing command line: %s' % exception)\n print(Version.HELP)\n sys.exit(1)\n for option, _ in opts:\n if option in ('-h', '--help'):\n print(Version.HELP)\n sys.exit(0)\n else:\n print(\"Error parsing command line: Unhandled option '%s'\" %\n option)\n print(Version.HELP)\n sys.exit(2)\n if len(args) != 1:\n print('Error parsing command line: You must pass software')\n print(Version.HELP)\n sys.exit(1)\n soft = args[0]\n version = Version(soft)\n version.set_version(version.ask_version())\n\n\n<mask token>\n",
"step-4": "<mask token>\ntry:\n input = raw_input\nexcept NameError:\n pass\n\n\nclass Version(object):\n \"\"\"\n Software management class\n \"\"\"\n HELP = \"\"\"version [-h] software\nSelect software version in a menu:\n-h To print this help screen.\nsoftware Software version to choose.\"\"\"\n SELECTED = ' *'\n\n def __init__(self, soft):\n \"\"\"\n Constructor that takes software name\n \"\"\"\n self.soft = soft\n self.app_dir = os.environ.get('APP_DIR')\n if self.app_dir is None:\n self.app_dir = '/opt'\n self.sudo = True\n if os.access(self.app_dir, os.W_OK):\n self.sudo = False\n self.soft_root = os.path.join(self.app_dir, self.soft)\n self.soft_paths = sorted(glob.glob(self.soft_root + '/[0-9]*'))\n self.versions = [v[len(self.soft_root) + 1:] for v in self.soft_paths]\n path = os.path.realpath('%s/current' % self.soft_root)\n self.current_version = path[path.rindex(os.path.sep) + 1:]\n\n def set_version(self, index):\n \"\"\"\n Set software version by index\n \"\"\"\n sudo = 'sudo ' if self.sudo else ''\n old_dir = 'current'\n if index == -1:\n print('Selecting system version')\n if os.path.exists(os.path.join(self.soft_root, old_dir)):\n os.system('cd %s && %srm %s' % (self.soft_root, sudo, old_dir))\n else:\n print(\"Selecting %s version '%s'\" % (self.soft, self.versions[\n index]))\n directory = self.versions[index]\n if os.path.exists(os.path.join(self.soft_root, old_dir)):\n os.system('cd %s && %srm %s' % (self.soft_root, sudo, old_dir))\n os.system('cd %s && %sln -s %s %s' % (self.soft_root, sudo,\n directory, old_dir))\n\n def ask_version(self):\n \"\"\"\n Prompt user for software version in the list of installed versions\n \"\"\"\n print('Please choose a version:')\n index = 1\n if self.current_version == 'current':\n selected = self.SELECTED\n else:\n selected = ''\n print('0: System' + selected)\n for version in self.soft_paths:\n number = version[len(self.soft_root) + 1:]\n if number == self.current_version:\n selected = self.SELECTED\n else:\n selected = ''\n print(str(index) + ': ' + str(number) + selected)\n index += 1\n chosen = None\n maximum = len(self.soft_paths)\n while not chosen:\n try:\n choice = input()\n except KeyboardInterrupt:\n print('\\nUser abort!')\n sys.exit(0)\n if re.match('\\\\d+', choice) and int(choice) <= maximum and int(\n choice) >= 0:\n index = int(choice) - 1\n chosen = True\n elif choice == '':\n print('Keeping current')\n sys.exit(0)\n else:\n print(\n 'Bad version, please choose a number between 0 and %s' %\n str(maximum))\n return index\n\n @staticmethod\n def run():\n \"\"\"\n Read software name on command line and run version selection\n \"\"\"\n try:\n opts, args = getopt.getopt(sys.argv[1:], 'h', ['help'])\n except getopt.GetoptError as exception:\n print('Error parsing command line: %s' % exception)\n print(Version.HELP)\n sys.exit(1)\n for option, _ in opts:\n if option in ('-h', '--help'):\n print(Version.HELP)\n sys.exit(0)\n else:\n print(\"Error parsing command line: Unhandled option '%s'\" %\n option)\n print(Version.HELP)\n sys.exit(2)\n if len(args) != 1:\n print('Error parsing command line: You must pass software')\n print(Version.HELP)\n sys.exit(1)\n soft = args[0]\n version = Version(soft)\n version.set_version(version.ask_version())\n\n\nif __name__ == '__main__':\n Version.run()\n",
"step-5": "#!/usr/bin/env python\n# encoding: UTF-8\n\n'''\nScript to select current version for a given soft (python, ruby or java).\n'''\n\nimport os\nimport re\nimport sys\nimport glob\nimport getopt\n\n\n# fix input in Python 2 and 3\ntry:\n input = raw_input # pylint: disable=redefined-builtin,invalid-name\nexcept NameError:\n pass\n\n\nclass Version(object): # pylint: disable=useless-object-inheritance\n '''\n Software management class\n '''\n\n HELP = '''version [-h] software\nSelect software version in a menu:\n-h To print this help screen.\nsoftware Software version to choose.'''\n SELECTED = ' *'\n\n def __init__(self, soft):\n '''\n Constructor that takes software name\n '''\n self.soft = soft\n self.app_dir = os.environ.get('APP_DIR')\n if self.app_dir is None:\n self.app_dir = '/opt'\n self.sudo = True\n if os.access(self.app_dir, os.W_OK):\n self.sudo = False\n self.soft_root = os.path.join(self.app_dir, self.soft)\n self.soft_paths = sorted(glob.glob(self.soft_root+'/[0-9]*'))\n self.versions = [v[len(self.soft_root)+1:] for v in self.soft_paths]\n path = os.path.realpath(\"%s/current\" % self.soft_root)\n self.current_version = path[path.rindex(os.path.sep)+1:]\n\n def set_version(self, index):\n '''\n Set software version by index\n '''\n sudo = 'sudo ' if self.sudo else ''\n old_dir = \"current\"\n if index == -1:\n print(\"Selecting system version\")\n if os.path.exists(os.path.join(self.soft_root, old_dir)):\n os.system(\"cd %s && %srm %s\" % (self.soft_root, sudo, old_dir))\n else:\n print(\"Selecting %s version '%s'\" %\n (self.soft, self.versions[index]))\n directory = self.versions[index]\n if os.path.exists(os.path.join(self.soft_root, old_dir)):\n os.system(\"cd %s && %srm %s\" % (self.soft_root, sudo, old_dir))\n os.system(\"cd %s && %sln -s %s %s\" % (self.soft_root, sudo, directory, old_dir))\n\n def ask_version(self):\n '''\n Prompt user for software version in the list of installed versions\n '''\n # print version list\n print('Please choose a version:')\n index = 1\n if self.current_version == 'current':\n selected = self.SELECTED\n else:\n selected = ''\n print(\"0: System\"+selected)\n for version in self.soft_paths:\n number = version[len(self.soft_root)+1:]\n if number == self.current_version:\n selected = self.SELECTED\n else:\n selected = ''\n print(str(index)+': '+str(number)+selected)\n index += 1\n # ask for the version\n chosen = None\n maximum = len(self.soft_paths)\n while not chosen:\n try:\n choice = input()\n except KeyboardInterrupt:\n print(\"\\nUser abort!\")\n sys.exit(0)\n if re.match('\\\\d+', choice) and int(choice) <= maximum and \\\n int(choice) >= 0:\n index = int(choice) - 1\n chosen = True\n elif choice == '':\n print(\"Keeping current\")\n sys.exit(0)\n else:\n print(\"Bad version, please choose a number between 0 and %s\" %\n str(maximum))\n # return index in version table\n return index\n\n @staticmethod\n def run():\n '''\n Read software name on command line and run version selection\n '''\n try:\n opts, args = getopt.getopt(sys.argv[1:], 'h', ['help'])\n except getopt.GetoptError as exception:\n print('Error parsing command line: %s' % exception)\n print(Version.HELP)\n sys.exit(1)\n for option, _ in opts:\n if option in ('-h', '--help'):\n print(Version.HELP)\n sys.exit(0)\n else:\n print(\"Error parsing command line: Unhandled option '%s'\" % option)\n print(Version.HELP)\n sys.exit(2)\n if len(args) != 1:\n print(\"Error parsing command line: You must pass software\")\n print(Version.HELP)\n sys.exit(1)\n soft = args[0]\n version = Version(soft)\n version.set_version(version.ask_version())\n\n\nif __name__ == '__main__':\n Version.run()\n",
"step-ids": [
5,
6,
7,
8,
10
]
}
|
[
5,
6,
7,
8,
10
] |
#encoding:utf-8
class Employee():
def __int__(self,name,sex,salary):
self.name = name
self.sex = sex
self.salary = salary
def give_raise(self):
222
|
normal
|
{
"blob_id": "014509170b98a38838859d3ca48c74ca6be0bd46",
"index": 7190,
"step-1": "#encoding:utf-8\nclass Employee():\n def __int__(self,name,sex,salary):\n self.name = name\n self.sex = sex\n self.salary = salary\n def give_raise(self):\n 222",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
# dates.py
"""Date/time parsing and manipulation functions
"""
# Some people, when confronted with a problem, think
# "I know, I'll use regular expressions."
# Now they have two problems.
# -- Jamie Zawinski
import datetime as dt
import time
import re
_months = [
'january',
'february',
'march',
'april',
'may',
'june',
'july',
'august',
'september',
'october',
'november',
'december',
]
# Formatting directives and corresponding regular expression
_regexps = {
'B': r'(?P<b>' + '|'.join(_months) + ')',
'b': r'(?P<b>' + '|'.join(m[0:3] for m in _months) + ')',
'm': r'(?P<m>\d\d?)',
'd': r'(?P<d>\d\d?)',
'Y': r'(?P<Y>\d\d\d\d)',
'y': r'(?P<y>\d\d)',
'I': r'(?P<H>0?[1-9]|1[012])',
'H': r'(?P<H>[01]?[0-9]|2[0-3])',
'M': r'(?P<M>[0-5]\d)',
'S': r'(?P<S>[0-5]\d)',
'f': r'(?P<f>\d+)',
'p': r'(?P<p>am|pm)',
}
# Support date formats and examples
_date_formats = [
'B d, Y', # October 15, 2006
'b d, Y', # Oct 15, 2006
'B d Y', # October 15 2006
'b d Y', # Oct 15 2006
'B d', # October 15
'b d', # Oct 15
'Y/m/d', # 2006/10/15
'Y-m-d', # 2006-10-15
'm/d/Y', # 10/15/2006
'm-d-Y', # 10-15-2006
'm/d/y', # 10/15/06
'm-d-y', # 10-15-06
'y/m/d', # 06/10/15
'y-m-d', # 06-10-15
]
# Supported time formats and examples
_time_formats = [
'I:M:S.f p', # 3:05:29.108 PM
'H:M:S.f', # 15:05:29.108
'I:M:S p', # 3:05:29 PM
'H:M:S', # 15:05:29
'I:M p', # 3:05 PM
'H:M', # 15:05
]
class CannotParse (Exception):
"""Failure to parse a date or time.
"""
pass
def parse(string, format):
"""Attempt to parse the given string as a date in the given format.
This is similar to `datetime.strptime`, but this can handle date strings
with trailing characters. If it still fails to parse, raise a
`CannotParse` exception.
Examples::
>>> parse('2010/08/28', '%Y/%m/%d')
datetime.datetime(2010, 8, 28, 0, 0)
>>> parse('2010/08/28 extra stuff', '%Y/%m/%d')
datetime.datetime(2010, 8, 28, 0, 0)
>>> parse('2010/08/28', '%m/%d/%y')
Traceback (most recent call last):
CannotParse: time data '2010/08/28' does not match format '%m/%d/%y'
"""
# Count the number of spaces in the format string (N), and
# truncate everything after the (N+1)th space
spaces = format.count(' ') + 1
string = ' '.join(string.split()[:spaces])
try:
result = dt.datetime.strptime(string, format)
except ValueError, err:
raise CannotParse(str(err))
else:
return result
def format_regexp(simple_format):
r"""Given a simplified date or time format string, return ``(format,
regexp)``, where ``format`` is a `strptime`-compatible format string, and
``regexp`` is a regular expression that matches dates or times in that
format.
The ``simple_format`` string supports a subset of `strptime` formatting
directives, with the leading ``%`` characters removed.
Examples::
>>> format_regexp('Y/m/d')
('%Y/%m/%d', '(?P<Y>\\d\\d\\d\\d)/(?P<m>\\d\\d?)/(?P<d>\\d\\d?)')
>>> format_regexp('H:M:S')
('%H:%M:%S', '(?P<H>[01]?[0-9]|2[0-3]):(?P<M>[0-5]\\d):(?P<S>[0-5]\\d)')
"""
format, regexp = ('', '')
for char in simple_format:
if char in _regexps:
format += '%' + char
regexp += _regexps[char]
else:
format += char
regexp += char
return (format, regexp)
def _compiled_format_regexps(date_formats, time_formats):
"""Return a list of ``(format, compiled_regexp)`` for all combinations
of ``date_formats`` and ``time_formats``.
"""
# List of all combinations of date_formats and time_formats
date_time_formats = []
for df in date_formats:
for tf in time_formats:
date_time_formats.append(df + ' ' + tf)
# Add date-only formats
for df in date_formats:
date_time_formats.append(df)
# Add time-only formats
for tf in time_formats:
date_time_formats.append(tf)
# (format, compiled_regexp) for each supported format
format_regexps = []
for dt_format in date_time_formats:
format, regexp = format_regexp(dt_format)
# Compile the regexp
format_regexps.append(
(format, re.compile(regexp, re.IGNORECASE))
)
return format_regexps
def guess_format(string):
"""Try to guess the date/time format of ``string``, or raise a
`CannotParse` exception.
Examples::
>>> guess_format('2010/01/28 13:25:49')
'%Y/%m/%d %H:%M:%S'
>>> guess_format('01/28/10 1:25:49 PM')
'%m/%d/%y %I:%M:%S %p'
>>> guess_format('01/28/2010 13:25:49.123')
'%m/%d/%Y %H:%M:%S.%f'
>>> guess_format('Aug 15 2009 15:24')
'%b %d %Y %H:%M'
>>> guess_format('3-14-15 9:26:53.589')
'%m-%d-%y %H:%M:%S.%f'
Leading and trailing text may be present::
>>> guess_format('FOO April 1, 2007 3:45 PM BAR')
'%B %d, %Y %I:%M %p'
>>> guess_format('[[2010-09-25 14:19:24]]')
'%Y-%m-%d %H:%M:%S'
"""
format_regexps = _compiled_format_regexps(_date_formats, _time_formats)
for format, regexp in format_regexps:
if regexp.search(string):
return format
# Nothing matched
raise CannotParse("Could not guess date/time format in: %s" % string)
def guess_file_date_format(filename):
"""Open the given file and use `guess_format` to look for a
date/time at the beginning of each line. Return the format string for
the first one that's found. Raise `CannotParse` if none is found.
"""
for line in open(filename):
try:
format = guess_format(line)
except CannotParse:
pass
else:
return format
raise CannotParse("No date/time strings found in '%s'" % filename)
def date_chop(line, dateformat='%m/%d/%y %I:%M:%S %p', resolution=60):
"""Given a ``line`` of text, get a date/time formatted as ``dateformat``,
and return a `datetime` object rounded to the nearest ``resolution``
seconds. If ``line`` fails to match ``dateformat``, a `CannotParse`
exception is raised.
Examples::
>>> date_chop('1976/05/19 12:05:17', '%Y/%m/%d %H:%M:%S', 60)
datetime.datetime(1976, 5, 19, 12, 5)
>>> date_chop('1976/05/19 12:05:17', '%Y/%m/%d %H:%M:%S', 3600)
datetime.datetime(1976, 5, 19, 12, 0)
"""
timestamp = parse(line, dateformat)
# Round the timestamp to the given resolution
# First convert to seconds-since-epoch
epoch_seconds = int(time.mktime(timestamp.timetuple()))
# Then do integer division to truncate
rounded_seconds = (epoch_seconds / resolution) * resolution
# Convert back to a datetime
return dt.datetime.fromtimestamp(rounded_seconds)
|
normal
|
{
"blob_id": "458bc2b5f843e4c5bb3f9180ab2cbec7409b8d3e",
"index": 4946,
"step-1": "# dates.py\n\n\"\"\"Date/time parsing and manipulation functions\n\"\"\"\n\n# Some people, when confronted with a problem, think\n# \"I know, I'll use regular expressions.\"\n# Now they have two problems.\n# -- Jamie Zawinski\n\nimport datetime as dt\nimport time\nimport re\n\n_months = [\n 'january',\n 'february',\n 'march',\n 'april',\n 'may',\n 'june',\n 'july',\n 'august',\n 'september',\n 'october',\n 'november',\n 'december',\n]\n\n# Formatting directives and corresponding regular expression\n_regexps = {\n 'B': r'(?P<b>' + '|'.join(_months) + ')',\n 'b': r'(?P<b>' + '|'.join(m[0:3] for m in _months) + ')',\n 'm': r'(?P<m>\\d\\d?)',\n 'd': r'(?P<d>\\d\\d?)',\n 'Y': r'(?P<Y>\\d\\d\\d\\d)',\n 'y': r'(?P<y>\\d\\d)',\n 'I': r'(?P<H>0?[1-9]|1[012])',\n 'H': r'(?P<H>[01]?[0-9]|2[0-3])',\n 'M': r'(?P<M>[0-5]\\d)',\n 'S': r'(?P<S>[0-5]\\d)',\n 'f': r'(?P<f>\\d+)',\n 'p': r'(?P<p>am|pm)',\n}\n\n# Support date formats and examples\n_date_formats = [\n 'B d, Y', # October 15, 2006\n 'b d, Y', # Oct 15, 2006\n 'B d Y', # October 15 2006\n 'b d Y', # Oct 15 2006\n 'B d', # October 15\n 'b d', # Oct 15\n 'Y/m/d', # 2006/10/15\n 'Y-m-d', # 2006-10-15\n 'm/d/Y', # 10/15/2006\n 'm-d-Y', # 10-15-2006\n 'm/d/y', # 10/15/06\n 'm-d-y', # 10-15-06\n 'y/m/d', # 06/10/15\n 'y-m-d', # 06-10-15\n]\n\n# Supported time formats and examples\n_time_formats = [\n 'I:M:S.f p', # 3:05:29.108 PM\n 'H:M:S.f', # 15:05:29.108\n 'I:M:S p', # 3:05:29 PM\n 'H:M:S', # 15:05:29\n 'I:M p', # 3:05 PM\n 'H:M', # 15:05\n]\n\n\nclass CannotParse (Exception):\n \"\"\"Failure to parse a date or time.\n \"\"\"\n pass\n\n\ndef parse(string, format):\n \"\"\"Attempt to parse the given string as a date in the given format.\n This is similar to `datetime.strptime`, but this can handle date strings\n with trailing characters. If it still fails to parse, raise a\n `CannotParse` exception.\n\n Examples::\n\n >>> parse('2010/08/28', '%Y/%m/%d')\n datetime.datetime(2010, 8, 28, 0, 0)\n\n >>> parse('2010/08/28 extra stuff', '%Y/%m/%d')\n datetime.datetime(2010, 8, 28, 0, 0)\n\n >>> parse('2010/08/28', '%m/%d/%y')\n Traceback (most recent call last):\n CannotParse: time data '2010/08/28' does not match format '%m/%d/%y'\n\n \"\"\"\n # Count the number of spaces in the format string (N), and\n # truncate everything after the (N+1)th space\n spaces = format.count(' ') + 1\n string = ' '.join(string.split()[:spaces])\n\n try:\n result = dt.datetime.strptime(string, format)\n except ValueError, err:\n raise CannotParse(str(err))\n else:\n return result\n\n\ndef format_regexp(simple_format):\n r\"\"\"Given a simplified date or time format string, return ``(format,\n regexp)``, where ``format`` is a `strptime`-compatible format string, and\n ``regexp`` is a regular expression that matches dates or times in that\n format.\n\n The ``simple_format`` string supports a subset of `strptime` formatting\n directives, with the leading ``%`` characters removed.\n\n Examples::\n\n >>> format_regexp('Y/m/d')\n ('%Y/%m/%d', '(?P<Y>\\\\d\\\\d\\\\d\\\\d)/(?P<m>\\\\d\\\\d?)/(?P<d>\\\\d\\\\d?)')\n\n >>> format_regexp('H:M:S')\n ('%H:%M:%S', '(?P<H>[01]?[0-9]|2[0-3]):(?P<M>[0-5]\\\\d):(?P<S>[0-5]\\\\d)')\n\n \"\"\"\n format, regexp = ('', '')\n for char in simple_format:\n if char in _regexps:\n format += '%' + char\n regexp += _regexps[char]\n else:\n format += char\n regexp += char\n return (format, regexp)\n\n\ndef _compiled_format_regexps(date_formats, time_formats):\n \"\"\"Return a list of ``(format, compiled_regexp)`` for all combinations\n of ``date_formats`` and ``time_formats``.\n \"\"\"\n # List of all combinations of date_formats and time_formats\n date_time_formats = []\n for df in date_formats:\n for tf in time_formats:\n date_time_formats.append(df + ' ' + tf)\n\n # Add date-only formats\n for df in date_formats:\n date_time_formats.append(df)\n\n # Add time-only formats\n for tf in time_formats:\n date_time_formats.append(tf)\n\n # (format, compiled_regexp) for each supported format\n format_regexps = []\n for dt_format in date_time_formats:\n format, regexp = format_regexp(dt_format)\n # Compile the regexp\n format_regexps.append(\n (format, re.compile(regexp, re.IGNORECASE))\n )\n\n return format_regexps\n\n\ndef guess_format(string):\n \"\"\"Try to guess the date/time format of ``string``, or raise a\n `CannotParse` exception.\n\n Examples::\n\n >>> guess_format('2010/01/28 13:25:49')\n '%Y/%m/%d %H:%M:%S'\n\n >>> guess_format('01/28/10 1:25:49 PM')\n '%m/%d/%y %I:%M:%S %p'\n\n >>> guess_format('01/28/2010 13:25:49.123')\n '%m/%d/%Y %H:%M:%S.%f'\n\n >>> guess_format('Aug 15 2009 15:24')\n '%b %d %Y %H:%M'\n\n >>> guess_format('3-14-15 9:26:53.589')\n '%m-%d-%y %H:%M:%S.%f'\n\n Leading and trailing text may be present::\n\n >>> guess_format('FOO April 1, 2007 3:45 PM BAR')\n '%B %d, %Y %I:%M %p'\n\n >>> guess_format('[[2010-09-25 14:19:24]]')\n '%Y-%m-%d %H:%M:%S'\n\n \"\"\"\n format_regexps = _compiled_format_regexps(_date_formats, _time_formats)\n for format, regexp in format_regexps:\n if regexp.search(string):\n return format\n # Nothing matched\n raise CannotParse(\"Could not guess date/time format in: %s\" % string)\n\n\ndef guess_file_date_format(filename):\n \"\"\"Open the given file and use `guess_format` to look for a\n date/time at the beginning of each line. Return the format string for\n the first one that's found. Raise `CannotParse` if none is found.\n \"\"\"\n for line in open(filename):\n try:\n format = guess_format(line)\n except CannotParse:\n pass\n else:\n return format\n\n raise CannotParse(\"No date/time strings found in '%s'\" % filename)\n\n\ndef date_chop(line, dateformat='%m/%d/%y %I:%M:%S %p', resolution=60):\n \"\"\"Given a ``line`` of text, get a date/time formatted as ``dateformat``,\n and return a `datetime` object rounded to the nearest ``resolution``\n seconds. If ``line`` fails to match ``dateformat``, a `CannotParse`\n exception is raised.\n\n Examples::\n\n >>> date_chop('1976/05/19 12:05:17', '%Y/%m/%d %H:%M:%S', 60)\n datetime.datetime(1976, 5, 19, 12, 5)\n\n >>> date_chop('1976/05/19 12:05:17', '%Y/%m/%d %H:%M:%S', 3600)\n datetime.datetime(1976, 5, 19, 12, 0)\n\n \"\"\"\n timestamp = parse(line, dateformat)\n # Round the timestamp to the given resolution\n # First convert to seconds-since-epoch\n epoch_seconds = int(time.mktime(timestamp.timetuple()))\n # Then do integer division to truncate\n rounded_seconds = (epoch_seconds / resolution) * resolution\n # Convert back to a datetime\n return dt.datetime.fromtimestamp(rounded_seconds)\n\n\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
# file with function to randomly select user from all of the data, all of the games
import ast
import csv
import numpy as np
import pandas as pd
import sys
from nba_api.stats.static import players
# some fun little work to get a random player
def get_random_player(file_name):
def need_s(num):
return 's' if num!=1 else ''
csv.field_size_limit(sys.maxsize)
# the rows are really long!
res = pd.read_csv(file_name, header=None)
r = np.random.randint(0, len(res.values))
arr = ast.literal_eval(res.values[r][1])
player = players.find_player_by_id(res.values[r][0])['full_name']
print(f'{player} selected.')
r_idx = np.random.randint(0, len(arr))
game = arr[r_idx]
x = f'On {game[0]}, {player} scored {game[-1]} point{need_s(game[-1])}, dished out '\
f'{game[16]} assist{need_s(game[16])}, grabbed {game[15]} rebound{need_s(game[15])}, '\
f'had {game[17]} steal{need_s(game[17])}, and had {game[18]} block{need_s(game[18])}.'
print(x)
return player, arr
|
normal
|
{
"blob_id": "ac178d4e009a40bde5d76e854edc6f6ae8422610",
"index": 1106,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef get_random_player(file_name):\n\n def need_s(num):\n return 's' if num != 1 else ''\n csv.field_size_limit(sys.maxsize)\n res = pd.read_csv(file_name, header=None)\n r = np.random.randint(0, len(res.values))\n arr = ast.literal_eval(res.values[r][1])\n player = players.find_player_by_id(res.values[r][0])['full_name']\n print(f'{player} selected.')\n r_idx = np.random.randint(0, len(arr))\n game = arr[r_idx]\n x = (\n f'On {game[0]}, {player} scored {game[-1]} point{need_s(game[-1])}, dished out {game[16]} assist{need_s(game[16])}, grabbed {game[15]} rebound{need_s(game[15])}, had {game[17]} steal{need_s(game[17])}, and had {game[18]} block{need_s(game[18])}.'\n )\n print(x)\n return player, arr\n",
"step-3": "import ast\nimport csv\nimport numpy as np\nimport pandas as pd\nimport sys\nfrom nba_api.stats.static import players\n\n\ndef get_random_player(file_name):\n\n def need_s(num):\n return 's' if num != 1 else ''\n csv.field_size_limit(sys.maxsize)\n res = pd.read_csv(file_name, header=None)\n r = np.random.randint(0, len(res.values))\n arr = ast.literal_eval(res.values[r][1])\n player = players.find_player_by_id(res.values[r][0])['full_name']\n print(f'{player} selected.')\n r_idx = np.random.randint(0, len(arr))\n game = arr[r_idx]\n x = (\n f'On {game[0]}, {player} scored {game[-1]} point{need_s(game[-1])}, dished out {game[16]} assist{need_s(game[16])}, grabbed {game[15]} rebound{need_s(game[15])}, had {game[17]} steal{need_s(game[17])}, and had {game[18]} block{need_s(game[18])}.'\n )\n print(x)\n return player, arr\n",
"step-4": "# file with function to randomly select user from all of the data, all of the games\nimport ast\nimport csv\nimport numpy as np\nimport pandas as pd\nimport sys\n\nfrom nba_api.stats.static import players\n\n# some fun little work to get a random player\ndef get_random_player(file_name):\n def need_s(num):\n return 's' if num!=1 else ''\n csv.field_size_limit(sys.maxsize)\n # the rows are really long!\n res = pd.read_csv(file_name, header=None)\n r = np.random.randint(0, len(res.values))\n arr = ast.literal_eval(res.values[r][1])\n player = players.find_player_by_id(res.values[r][0])['full_name']\n print(f'{player} selected.')\n r_idx = np.random.randint(0, len(arr))\n game = arr[r_idx]\n x = f'On {game[0]}, {player} scored {game[-1]} point{need_s(game[-1])}, dished out '\\\n f'{game[16]} assist{need_s(game[16])}, grabbed {game[15]} rebound{need_s(game[15])}, '\\\n f'had {game[17]} steal{need_s(game[17])}, and had {game[18]} block{need_s(game[18])}.'\n print(x)\n return player, arr",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
class TestCommands(commands.Cog, description='Unstable test commands',
command_attrs=dict(hidden=True, description='Can only be used by an Owner')
):
<|reserved_special_token_0|>
async def cog_check(self, ctx):
return await self.bot.is_owner(ctx.author)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class TestCommands(commands.Cog, description='Unstable test commands',
command_attrs=dict(hidden=True, description='Can only be used by an Owner')
):
def __init__(self, bot):
self.bot = bot
self.hidden = True
print('Loaded', __name__)
async def cog_check(self, ctx):
return await self.bot.is_owner(ctx.author)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class TestCommands(commands.Cog, description='Unstable test commands',
command_attrs=dict(hidden=True, description='Can only be used by an Owner')
):
def __init__(self, bot):
self.bot = bot
self.hidden = True
print('Loaded', __name__)
async def cog_check(self, ctx):
return await self.bot.is_owner(ctx.author)
def setup(bot):
if getattr(bot, 'debug', False):
bot.add_cog(TestCommands(bot))
<|reserved_special_token_1|>
import discord
from discord.ext import commands
class TestCommands(commands.Cog, description='Unstable test commands',
command_attrs=dict(hidden=True, description='Can only be used by an Owner')
):
def __init__(self, bot):
self.bot = bot
self.hidden = True
print('Loaded', __name__)
async def cog_check(self, ctx):
return await self.bot.is_owner(ctx.author)
def setup(bot):
if getattr(bot, 'debug', False):
bot.add_cog(TestCommands(bot))
<|reserved_special_token_1|>
import discord
from discord.ext import commands
class TestCommands(commands.Cog, description="Unstable test commands", command_attrs=dict(hidden=True, description="Can only be used by an Owner")):
def __init__(self, bot):
self.bot = bot
self.hidden = True
print("Loaded", __name__)
async def cog_check(self, ctx):
return await self.bot.is_owner(ctx.author)
def setup(bot):
if getattr(bot, "debug", False):
bot.add_cog(TestCommands(bot))
|
flexible
|
{
"blob_id": "d5a5c6f9d483b2998cd0d9e47b37ab4499fa1c2a",
"index": 6279,
"step-1": "<mask token>\n\n\nclass TestCommands(commands.Cog, description='Unstable test commands',\n command_attrs=dict(hidden=True, description='Can only be used by an Owner')\n ):\n <mask token>\n\n async def cog_check(self, ctx):\n return await self.bot.is_owner(ctx.author)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass TestCommands(commands.Cog, description='Unstable test commands',\n command_attrs=dict(hidden=True, description='Can only be used by an Owner')\n ):\n\n def __init__(self, bot):\n self.bot = bot\n self.hidden = True\n print('Loaded', __name__)\n\n async def cog_check(self, ctx):\n return await self.bot.is_owner(ctx.author)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass TestCommands(commands.Cog, description='Unstable test commands',\n command_attrs=dict(hidden=True, description='Can only be used by an Owner')\n ):\n\n def __init__(self, bot):\n self.bot = bot\n self.hidden = True\n print('Loaded', __name__)\n\n async def cog_check(self, ctx):\n return await self.bot.is_owner(ctx.author)\n\n\ndef setup(bot):\n if getattr(bot, 'debug', False):\n bot.add_cog(TestCommands(bot))\n",
"step-4": "import discord\nfrom discord.ext import commands\n\n\nclass TestCommands(commands.Cog, description='Unstable test commands',\n command_attrs=dict(hidden=True, description='Can only be used by an Owner')\n ):\n\n def __init__(self, bot):\n self.bot = bot\n self.hidden = True\n print('Loaded', __name__)\n\n async def cog_check(self, ctx):\n return await self.bot.is_owner(ctx.author)\n\n\ndef setup(bot):\n if getattr(bot, 'debug', False):\n bot.add_cog(TestCommands(bot))\n",
"step-5": "import discord\nfrom discord.ext import commands\n\n\nclass TestCommands(commands.Cog, description=\"Unstable test commands\", command_attrs=dict(hidden=True, description=\"Can only be used by an Owner\")):\n def __init__(self, bot):\n self.bot = bot\n self.hidden = True\n print(\"Loaded\", __name__)\n\n\n async def cog_check(self, ctx):\n return await self.bot.is_owner(ctx.author)\n\n\ndef setup(bot):\n if getattr(bot, \"debug\", False):\n bot.add_cog(TestCommands(bot))\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
'''
Classes
'''
class Person:
alive = True
'''
Possible Attributes for a Person:
1. Name
2. Age
3. Gender
'''
def __init__(self, name, age, gender):
self.name = name
self.age = age
self.gender = gender
self.salary = 0
def greet(self):
print("Hello ", self.name)
def greetByTime(self, time="Morning"):
print("Hello", self.name, " . ", time)
print("Accessing Static Variable", Person.alive)
p = Person("John", 30, "Male")
print("\n\nAccessing Functions \n\n")
p.greet()
p.greetByTime()
p.greetByTime("Goodnight")
print("\n\nAccessing Variables \n\n")
print(p.name, p.age, p.gender)
|
normal
|
{
"blob_id": "11feb13f38f2484c867a8b3fa525ffecf419dfe5",
"index": 9957,
"step-1": "<mask token>\n\n\nclass Person:\n alive = True\n <mask token>\n\n def __init__(self, name, age, gender):\n self.name = name\n self.age = age\n self.gender = gender\n self.salary = 0\n\n def greet(self):\n print('Hello ', self.name)\n\n def greetByTime(self, time='Morning'):\n print('Hello', self.name, ' . ', time)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass Person:\n alive = True\n \"\"\"\n\n Possible Attributes for a Person:\n\n 1. Name\n 2. Age\n 3. Gender\n\n \"\"\"\n\n def __init__(self, name, age, gender):\n self.name = name\n self.age = age\n self.gender = gender\n self.salary = 0\n\n def greet(self):\n print('Hello ', self.name)\n\n def greetByTime(self, time='Morning'):\n print('Hello', self.name, ' . ', time)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass Person:\n alive = True\n \"\"\"\n\n Possible Attributes for a Person:\n\n 1. Name\n 2. Age\n 3. Gender\n\n \"\"\"\n\n def __init__(self, name, age, gender):\n self.name = name\n self.age = age\n self.gender = gender\n self.salary = 0\n\n def greet(self):\n print('Hello ', self.name)\n\n def greetByTime(self, time='Morning'):\n print('Hello', self.name, ' . ', time)\n\n\nprint('Accessing Static Variable', Person.alive)\n<mask token>\nprint(\"\"\"\n\nAccessing Functions \n\n\"\"\")\np.greet()\np.greetByTime()\np.greetByTime('Goodnight')\nprint(\"\"\"\n\nAccessing Variables \n\n\"\"\")\nprint(p.name, p.age, p.gender)\n",
"step-4": "<mask token>\n\n\nclass Person:\n alive = True\n \"\"\"\n\n Possible Attributes for a Person:\n\n 1. Name\n 2. Age\n 3. Gender\n\n \"\"\"\n\n def __init__(self, name, age, gender):\n self.name = name\n self.age = age\n self.gender = gender\n self.salary = 0\n\n def greet(self):\n print('Hello ', self.name)\n\n def greetByTime(self, time='Morning'):\n print('Hello', self.name, ' . ', time)\n\n\nprint('Accessing Static Variable', Person.alive)\np = Person('John', 30, 'Male')\nprint(\"\"\"\n\nAccessing Functions \n\n\"\"\")\np.greet()\np.greetByTime()\np.greetByTime('Goodnight')\nprint(\"\"\"\n\nAccessing Variables \n\n\"\"\")\nprint(p.name, p.age, p.gender)\n",
"step-5": "'''\n\nClasses\n\n'''\n\n\nclass Person:\n alive = True\n\n '''\n\n Possible Attributes for a Person:\n\n 1. Name\n 2. Age\n 3. Gender\n\n '''\n\n def __init__(self, name, age, gender):\n self.name = name\n self.age = age\n self.gender = gender\n self.salary = 0\n\n def greet(self):\n print(\"Hello \", self.name)\n\n def greetByTime(self, time=\"Morning\"):\n print(\"Hello\", self.name, \" . \", time)\n\n\nprint(\"Accessing Static Variable\", Person.alive)\np = Person(\"John\", 30, \"Male\")\n\nprint(\"\\n\\nAccessing Functions \\n\\n\")\np.greet()\np.greetByTime()\np.greetByTime(\"Goodnight\")\n\nprint(\"\\n\\nAccessing Variables \\n\\n\")\nprint(p.name, p.age, p.gender)\n",
"step-ids": [
5,
6,
7,
8,
9
]
}
|
[
5,
6,
7,
8,
9
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
try:
number = int(input('Enter number: '))
if number < 31:
for num in range(1, number + 1):
print('2 ^', num, '=', 2 ** num)
else:
print('Enter number in valid range')
except Exception:
print('Exception occured')
<|reserved_special_token_1|>
"""
* author - kajol
* date - 12/24/2020
* time - 1:24 PM
* package - com.bridgelabz.basicprograms
* Title - Print a table of the powers of 2 that are less than or equal to 2^N
"""
try:
number = int(input("Enter number: "))
#print power of 2 within given range
if number < 31:
for num in range(1, number+1):
print("2 ^", num, "=", 2**num)
else:
print("Enter number in valid range")
except Exception:
print("Exception occured")
|
flexible
|
{
"blob_id": "b0f0bcfb5739d46de54cbe46614e82bf5a2d13fb",
"index": 9038,
"step-1": "<mask token>\n",
"step-2": "<mask token>\ntry:\n number = int(input('Enter number: '))\n if number < 31:\n for num in range(1, number + 1):\n print('2 ^', num, '=', 2 ** num)\n else:\n print('Enter number in valid range')\nexcept Exception:\n print('Exception occured')\n",
"step-3": "\"\"\"\n * author - kajol\n * date - 12/24/2020\n * time - 1:24 PM\n * package - com.bridgelabz.basicprograms\n * Title - Print a table of the powers of 2 that are less than or equal to 2^N\n\"\"\"\n\ntry:\n number = int(input(\"Enter number: \"))\n #print power of 2 within given range\n if number < 31:\n for num in range(1, number+1):\n print(\"2 ^\", num, \"=\", 2**num)\n else:\n print(\"Enter number in valid range\")\nexcept Exception:\n print(\"Exception occured\")\n\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
def ex7(*siruri, x=1, flag=True):
res = ()
for sir in siruri:
chars = []
for char in sir:
if ord(char) % x == (not flag):
chars.append(char)
res += chars,
return res
<|reserved_special_token_0|>
<|reserved_special_token_1|>
def ex7(*siruri, x=1, flag=True):
res = ()
for sir in siruri:
chars = []
for char in sir:
if ord(char) % x == (not flag):
chars.append(char)
res += chars,
return res
print(ex7('test', 'hello', 'lab002', x=2, flag=False))
<|reserved_special_token_1|>
def ex7(*siruri, x=1, flag=True):
res = ()
for sir in siruri:
chars = []
for char in sir:
if ord(char) % x == (not flag):
chars.append(char)
res += (chars,)
return res
print(ex7("test", "hello", "lab002", x=2, flag=False))
|
flexible
|
{
"blob_id": "90a402cccf383ed6a12b70ecdc3de623e6e223f9",
"index": 8365,
"step-1": "<mask token>\n",
"step-2": "def ex7(*siruri, x=1, flag=True):\n res = ()\n for sir in siruri:\n chars = []\n for char in sir:\n if ord(char) % x == (not flag):\n chars.append(char)\n res += chars,\n return res\n\n\n<mask token>\n",
"step-3": "def ex7(*siruri, x=1, flag=True):\n res = ()\n for sir in siruri:\n chars = []\n for char in sir:\n if ord(char) % x == (not flag):\n chars.append(char)\n res += chars,\n return res\n\n\nprint(ex7('test', 'hello', 'lab002', x=2, flag=False))\n",
"step-4": "def ex7(*siruri, x=1, flag=True):\n res = ()\n for sir in siruri:\n chars = []\n for char in sir:\n if ord(char) % x == (not flag):\n chars.append(char)\n res += (chars,)\n\n return res\n\n\nprint(ex7(\"test\", \"hello\", \"lab002\", x=2, flag=False))\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
import datetime
import json
from dateutil import parser
import mock
from python_http_client.exceptions import ForbiddenError
from rdr_service import clock, config
from rdr_service.api_util import open_cloud_file
from rdr_service.clock import FakeClock
from rdr_service.dao.database_utils import format_datetime
from rdr_service.dao.genomics_dao import GenomicGcDataFileDao, GenomicGCValidationMetricsDao, GenomicIncidentDao, \
GenomicSetMemberDao, UserEventMetricsDao, GenomicJobRunDao, GenomicResultWithdrawalsDao, \
GenomicMemberReportStateDao, GenomicAppointmentEventMetricsDao, GenomicAppointmentEventDao, GenomicResultViewedDao, \
GenomicInformingLoopDao, GenomicAppointmentEventNotifiedDao, GenomicDefaultBaseDao
from rdr_service.dao.message_broker_dao import MessageBrokenEventDataDao
from rdr_service.genomic_enums import GenomicIncidentCode, GenomicJob, GenomicWorkflowState, GenomicSubProcessResult, \
GenomicSubProcessStatus, GenomicManifestTypes, GenomicQcStatus, GenomicReportState
from rdr_service.genomic.genomic_job_components import GenomicFileIngester
from rdr_service.genomic.genomic_job_controller import GenomicJobController
from rdr_service.model.genomics import GenomicGcDataFile, GenomicIncident, GenomicSetMember, GenomicGCValidationMetrics,\
GenomicGCROutreachEscalationNotified
from rdr_service.offline.genomics import genomic_pipeline, genomic_cvl_pipeline
from rdr_service.participant_enums import WithdrawalStatus
from tests import test_data
from tests.genomics_tests.test_genomic_utils import create_ingestion_test_file
from tests.helpers.unittest_base import BaseTestCase
class GenomicJobControllerTest(BaseTestCase):
def setUp(self):
super(GenomicJobControllerTest, self).setUp()
self.data_file_dao = GenomicGcDataFileDao()
self.event_data_dao = MessageBrokenEventDataDao()
self.incident_dao = GenomicIncidentDao()
self.member_dao = GenomicSetMemberDao()
self.metrics_dao = GenomicGCValidationMetricsDao()
self.user_event_metrics_dao = UserEventMetricsDao()
self.job_run_dao = GenomicJobRunDao()
self.report_state_dao = GenomicMemberReportStateDao()
self.appointment_event_dao = GenomicAppointmentEventDao()
self.appointment_metrics_dao = GenomicAppointmentEventMetricsDao()
def test_incident_with_long_message(self):
"""Make sure the length of incident messages doesn't cause issues when recording them"""
incident_message = "1" * (GenomicIncident.message.type.length + 20)
mock_slack_handler = mock.MagicMock()
job_controller = GenomicJobController(job_id=1)
job_controller.genomic_alert_slack = mock_slack_handler
job_controller.create_incident(message=incident_message, slack=True)
# Double check that the incident was saved successfully, with part of the message
incident: GenomicIncident = self.session.query(GenomicIncident).one()
self.assertTrue(incident_message.startswith(incident.message))
# Make sure Slack received the full message
mock_slack_handler.send_message_to_webhook.assert_called_with(
message_data={
'text': incident_message
}
)
def test_gvcf_files_ingestion(self):
job_controller = GenomicJobController(job_id=38)
bucket_name = "test_bucket"
file_path = "Wgs_sample_raw_data/SS_VCF_research/BCM_A100153482_21042005280_SIA0013441__1.hard-filtered.gvcf.gz"
file_path_md5 = "Wgs_sample_raw_data/SS_VCF_research/" \
"BCM_A100153482_21042005280_SIA0013441__1.hard-filtered.gvcf.gz.md5sum"
full_path = f'{bucket_name}/{file_path}'
full_path_md5 = f'{bucket_name}/{file_path_md5}'
gen_set = self.data_generator.create_database_genomic_set(
genomicSetName=".",
genomicSetCriteria=".",
genomicSetVersion=1
)
gen_member = self.data_generator.create_database_genomic_set_member(
genomicSetId=gen_set.id,
biobankId="100153482",
sampleId="21042005280",
genomeType="aou_wgs",
genomicWorkflowState=GenomicWorkflowState.AW1
)
gen_job_run = self.data_generator.create_database_genomic_job_run(
jobId=GenomicJob.AW1_MANIFEST,
startTime=clock.CLOCK.now(),
runResult=GenomicSubProcessResult.SUCCESS
)
gen_processed_file = self.data_generator.create_database_genomic_file_processed(
runId=gen_job_run.id,
startTime=clock.CLOCK.now(),
filePath='/test_file_path',
bucketName='test_bucket',
fileName='test_file_name',
)
self.data_generator.create_database_genomic_gc_validation_metrics(
genomicSetMemberId=gen_member.id,
genomicFileProcessedId=gen_processed_file.id
)
job_controller.ingest_data_files_into_gc_metrics(file_path_md5, bucket_name)
metrics = self.metrics_dao.get_metrics_by_member_id(gen_member.id)
self.assertIsNotNone(metrics.gvcfMd5Path)
self.assertEqual(metrics.gvcfMd5Path, full_path_md5)
job_controller.ingest_data_files_into_gc_metrics(file_path, bucket_name)
metrics = self.metrics_dao.get_metrics_by_member_id(gen_member.id)
self.assertIsNotNone(metrics.gvcfPath)
self.assertEqual(metrics.gvcfPath, full_path)
def test_gvcf_files_ingestion_create_incident(self):
bucket_name = "test_bucket"
file_path = "Wgs_sample_raw_data/SS_VCF_research/BCM_A100153482_21042005280_SIA0013441__1.hard-filtered.gvcf.gz"
gen_set = self.data_generator.create_database_genomic_set(
genomicSetName=".",
genomicSetCriteria=".",
genomicSetVersion=1
)
gen_member = self.data_generator.create_database_genomic_set_member(
genomicSetId=gen_set.id,
biobankId="111111111",
sampleId="222222222222",
genomeType="aou_wgs",
genomicWorkflowState=GenomicWorkflowState.AW1
)
gen_job_run = self.data_generator.create_database_genomic_job_run(
jobId=GenomicJob.AW1_MANIFEST,
startTime=clock.CLOCK.now(),
runResult=GenomicSubProcessResult.SUCCESS
)
gen_processed_file = self.data_generator.create_database_genomic_file_processed(
runId=gen_job_run.id,
startTime=clock.CLOCK.now(),
filePath='/test_file_path',
bucketName=bucket_name,
fileName='test_file_name',
)
self.data_generator.create_database_genomic_gc_validation_metrics(
genomicSetMemberId=gen_member.id,
genomicFileProcessedId=gen_processed_file.id
)
with GenomicJobController(GenomicJob.INGEST_DATA_FILES) as controller:
controller.ingest_data_files_into_gc_metrics(file_path, bucket_name)
incident = self.incident_dao.get(1)
self.assertIsNotNone(incident)
self.assertEqual(incident.code, GenomicIncidentCode.UNABLE_TO_FIND_METRIC.name)
self.assertEqual(incident.data_file_path, file_path)
self.assertEqual(incident.message, 'INGEST_DATA_FILES: Cannot find '
'genomics metric record for sample id: '
'21042005280')
def test_accession_data_files(self):
test_bucket_baylor = "fake-data-bucket-baylor"
test_idat_file = "fake-data-bucket-baylor/Genotyping_sample_raw_data/204027270091_R02C01_Grn.idat"
test_vcf_file = "fake-data-bucket-baylor/Genotyping_sample_raw_data/204027270091_R02C01.vcf.gz"
test_cram_file = "fake-data-bucket-baylor/Wgs_sample_raw_data/" \
"CRAMs_CRAIs/BCM_A100134256_21063006771_SIA0017196_1.cram"
test_files = [test_idat_file, test_vcf_file, test_cram_file]
test_time = datetime.datetime(2021, 7, 9, 14, 1, 1)
# run job controller method on each file
with clock.FakeClock(test_time):
for file_path in test_files:
with GenomicJobController(GenomicJob.ACCESSION_DATA_FILES) as controller:
controller.accession_data_files(file_path, test_bucket_baylor)
inserted_files = self.data_file_dao.get_all()
# idat
expected_idat = GenomicGcDataFile(
id=1,
created=test_time,
modified=test_time,
file_path=test_idat_file,
gc_site_id='jh',
bucket_name='fake-data-bucket-baylor',
file_prefix='Genotyping_sample_raw_data',
file_name='204027270091_R02C01_Grn.idat',
file_type='Grn.idat',
identifier_type='chipwellbarcode',
identifier_value='204027270091_R02C01',
ignore_flag=0,
)
# vcf
expected_vcf = GenomicGcDataFile(
id=2,
created=test_time,
modified=test_time,
file_path=test_vcf_file,
gc_site_id='jh',
bucket_name='fake-data-bucket-baylor',
file_prefix='Genotyping_sample_raw_data',
file_name='204027270091_R02C01.vcf.gz',
file_type='vcf.gz',
identifier_type='chipwellbarcode',
identifier_value='204027270091_R02C01',
ignore_flag=0,
)
# cram
expected_cram = GenomicGcDataFile(
id=3,
created=test_time,
modified=test_time,
file_path=test_cram_file,
gc_site_id='bcm',
bucket_name='fake-data-bucket-baylor',
file_prefix='Wgs_sample_raw_data/CRAMs_CRAIs',
file_name='BCM_A100134256_21063006771_SIA0017196_1.cram',
file_type='cram',
identifier_type='sample_id',
identifier_value='21063006771',
ignore_flag=0,
)
# obj mapping
expected_objs = {
0: expected_idat,
1: expected_vcf,
2: expected_cram
}
# verify test objects match expectations
for i in range(3):
self.assertEqual(expected_objs[i].bucket_name, inserted_files[i].bucket_name)
self.assertEqual(expected_objs[i].created, inserted_files[i].created)
self.assertEqual(expected_objs[i].file_name, inserted_files[i].file_name)
self.assertEqual(expected_objs[i].file_path, inserted_files[i].file_path)
self.assertEqual(expected_objs[i].file_prefix, inserted_files[i].file_prefix)
self.assertEqual(expected_objs[i].file_type, inserted_files[i].file_type)
self.assertEqual(expected_objs[i].gc_site_id, inserted_files[i].gc_site_id)
self.assertEqual(expected_objs[i].id, inserted_files[i].id)
self.assertEqual(expected_objs[i].identifier_type, inserted_files[i].identifier_type)
self.assertEqual(expected_objs[i].identifier_value, inserted_files[i].identifier_value)
self.assertEqual(expected_objs[i].ignore_flag, inserted_files[i].ignore_flag)
self.assertEqual(expected_objs[i].metadata, inserted_files[i].metadata)
self.assertEqual(expected_objs[i].modified, inserted_files[i].modified)
def test_updating_members_blocklists(self):
gen_set = self.data_generator.create_database_genomic_set(
genomicSetName=".",
genomicSetCriteria=".",
genomicSetVersion=1
)
ids_should_be_updated = []
# for just created and wf state query and MATCHES criteria
for i in range(4):
ids_should_be_updated.append(
self.data_generator.create_database_genomic_set_member(
genomicSetId=gen_set.id,
biobankId="100153482",
sampleId="21042005280",
genomeType='test_investigation_one' if i & 2 != 0 else 'aou_wgs',
genomicWorkflowState=GenomicWorkflowState.AW0,
ai_an='Y' if i & 2 == 0 else 'N'
).id
)
# for just created and wf state query and DOES NOT MATCH criteria
for i in range(2):
self.data_generator.create_database_genomic_set_member(
genomicSetId=gen_set.id,
biobankId="100153482",
sampleId="21042005280",
genomeType='aou_array',
genomicWorkflowState=GenomicWorkflowState.AW0,
ai_an='N'
)
with GenomicJobController(GenomicJob.UPDATE_MEMBERS_BLOCKLISTS) as controller:
controller.update_members_blocklists()
# current config json in base_config.json
created_members = self.member_dao.get_all()
blocklisted = list(filter(lambda x: x.blockResults == 1 or x.blockResearch == 1, created_members))
self.assertTrue(ids_should_be_updated.sort() == [obj.id for obj in blocklisted].sort())
# should be RESEARCH blocked
self.assertTrue(all(
obj.blockResearch == 1 and obj.blockResearchReason is not None and obj.blockResearchReason == 'aian'
for obj in created_members if obj.ai_an == 'Y' and obj.genomicWorkflowState == GenomicWorkflowState.AW0)
)
# should NOT be RESULTS blocked
self.assertTrue(all(
obj.blockResults == 0 and obj.blockResultsReason is None
for obj in created_members if obj.ai_an == 'Y' and obj.genomicWorkflowState == GenomicWorkflowState.AW0)
)
# should be RESEARCH blocked
self.assertTrue(all(
obj.blockResearch == 1 and obj.blockResearchReason is not None and obj.blockResearchReason == 'test_sample_swap'
for obj in created_members if obj.genomeType == 'test_investigation_one' and obj.genomicWorkflowState ==
GenomicWorkflowState.AW0)
)
# should be RESULTS blocked
self.assertTrue(all(
obj.blockResults == 1 and obj.blockResultsReason is not None and obj.blockResultsReason == 'test_sample_swap'
for obj in created_members if obj.genomeType == 'test_investigation_one' and obj.genomicWorkflowState ==
GenomicWorkflowState.AW0)
)
# should NOT be RESEARCH/RESULTS blocked
self.assertTrue(all(
obj.blockResearch == 0 and obj.blockResearchReason is None
for obj in created_members if obj.genomeType == 'aou_array' and obj.genomicWorkflowState ==
GenomicWorkflowState.AW0)
)
self.assertTrue(all(
obj.blockResults == 0 and obj.blockResultsReason is None
for obj in created_members if obj.genomeType == 'aou_array' and obj.genomicWorkflowState ==
GenomicWorkflowState.AW0)
)
# clear current set member records
with self.member_dao.session() as session:
session.query(GenomicSetMember).delete()
run_result = self.job_run_dao.get(1)
self.assertEqual(run_result.runStatus, GenomicSubProcessStatus.COMPLETED)
self.assertEqual(run_result.runResult, GenomicSubProcessResult.SUCCESS)
# for modified data query and MATCHES criteria
for i in range(4):
self.data_generator.create_database_genomic_set_member(
genomicSetId=gen_set.id,
biobankId="100153482",
sampleId="21042005280",
genomeType='test_investigation_one' if i & 2 != 0 else 'aou_wgs',
genomicWorkflowState=GenomicWorkflowState.AW1,
ai_an='Y' if i & 2 == 0 else 'N'
)
with GenomicJobController(GenomicJob.UPDATE_MEMBERS_BLOCKLISTS) as controller:
controller.update_members_blocklists()
modified_members = self.member_dao.get_all()
# should be RESEARCH blocked
self.assertTrue(all(
obj.blockResearch == 1 and obj.blockResearchReason is not None and obj.blockResearchReason == 'aian'
for obj in modified_members if obj.ai_an == 'Y' and obj.genomicWorkflowState == GenomicWorkflowState.AW1)
)
# should NOT be RESULTS blocked
self.assertTrue(all(
obj.blockResults == 0 and obj.blockResultsReason is None
for obj in modified_members if obj.ai_an == 'Y' and obj.genomicWorkflowState == GenomicWorkflowState.AW1)
)
# should be RESEARCH blocked
self.assertTrue(all(
obj.blockResearch == 1 and obj.blockResearchReason is not None and obj.blockResearchReason == 'test_sample_swap'
for obj in modified_members if obj.genomeType == 'test_investigation_one' and obj.genomicWorkflowState ==
GenomicWorkflowState.AW1)
)
# should be RESULTS blocked
self.assertTrue(all(
obj.blockResults == 1 and obj.blockResultsReason is not None and obj.blockResultsReason == 'test_sample_swap'
for obj in modified_members if obj.genomeType == 'test_investigation_one' and obj.genomicWorkflowState ==
GenomicWorkflowState.AW1)
)
run_result = self.job_run_dao.get(2)
self.assertEqual(run_result.runStatus, GenomicSubProcessStatus.COMPLETED)
self.assertEqual(run_result.runResult, GenomicSubProcessResult.SUCCESS)
def test_ingest_user_metrics_file(self):
test_file = 'Genomic-Metrics-File-User-Events-Test.csv'
bucket_name = 'test_bucket'
sub_folder = 'user_events'
pids = []
file_ingester = GenomicFileIngester()
for _ in range(2):
pid = self.data_generator.create_database_participant()
pids.append(pid.participantId)
test_metrics_file = create_ingestion_test_file(
test_file,
bucket_name,
sub_folder)
test_file_path = f'{bucket_name}/{sub_folder}/{test_metrics_file}'
with open_cloud_file(test_file_path) as csv_file:
metrics_to_ingest = file_ingester._read_data_to_ingest(csv_file)
with GenomicJobController(GenomicJob.METRICS_FILE_INGEST) as controller:
controller.ingest_metrics_file(
metric_type='user_events',
file_path=test_file_path,
)
job_run_id = controller.job_run.id
metrics = self.user_event_metrics_dao.get_all()
for pid in pids:
file_metrics = list(filter(lambda x: int(x['participant_id'].split('P')[-1]) == pid, metrics_to_ingest[
'rows']))
participant_ingested_metrics = list(filter(lambda x: x.participant_id == pid, metrics))
self.assertEqual(len(file_metrics), len(participant_ingested_metrics))
self.assertTrue(all(obj.run_id == job_run_id for obj in participant_ingested_metrics))
@mock.patch('rdr_service.genomic.genomic_job_controller.GenomicJobController.execute_cloud_task')
def test_reconcile_pdr_data(self, mock_cloud_task):
# init new job run in __enter__
with GenomicJobController(GenomicJob.RECONCILE_PDR_DATA) as controller:
controller.reconcile_pdr_data()
cloud_task_endpoint = 'rebuild_genomic_table_records_task'
first_run = self.job_run_dao.get_all()
self.assertEqual(mock_cloud_task.call_count, 1)
call_args = mock_cloud_task.call_args_list
self.assertEqual(len(call_args), 1)
self.assertEqual(call_args[0].args[0]['table'], self.job_run_dao.model_type.__tablename__)
self.assertTrue(type(call_args[0].args[0]['ids']) is list)
self.assertEqual(call_args[0].args[0]['ids'], [obj.id for obj in first_run])
self.assertEqual(call_args[0].args[1], cloud_task_endpoint)
participant = self.data_generator.create_database_participant()
gen_set = self.data_generator.create_database_genomic_set(
genomicSetName=".",
genomicSetCriteria=".",
genomicSetVersion=1
)
plus_ten = clock.CLOCK.now() + datetime.timedelta(minutes=10)
plus_ten = plus_ten.replace(microsecond=0)
with FakeClock(plus_ten):
for i in range(2):
gen_member = self.data_generator.create_database_genomic_set_member(
genomicSetId=gen_set.id,
biobankId="100153482",
sampleId="21042005280",
genomeType="aou_wgs",
genomicWorkflowState=GenomicWorkflowState.AW1
)
gen_processed_file = self.data_generator.create_database_genomic_file_processed(
runId=first_run[0].id,
startTime=clock.CLOCK.now(),
filePath=f'test_file_path_{i}',
bucketName='test_bucket',
fileName='test_file_name',
)
self.data_generator.create_database_genomic_gc_validation_metrics(
genomicSetMemberId=gen_member.id,
genomicFileProcessedId=gen_processed_file.id
)
manifest = self.data_generator.create_database_genomic_manifest_file(
manifestTypeId=2,
filePath=f'test_file_path_{i}'
)
self.data_generator.create_database_genomic_manifest_feedback(
inputManifestFileId=manifest.id,
feedbackRecordCount=2
)
self.data_generator.create_database_genomic_user_event_metrics(
participant_id=participant.participantId,
event_name='test_event',
run_id=1,
)
self.data_generator.create_database_genomic_informing_loop(
message_record_id=1,
event_type='informing_loop_decision',
module_type='gem',
participant_id=participant.participantId,
decision_value='maybe_later',
event_authored_time=clock.CLOCK.now()
)
self.data_generator.create_database_genomic_cvl_past_due(
cvl_site_id='co',
email_notification_sent=0,
sample_id='sample_test',
results_type='hdr',
genomic_set_member_id=gen_member.id
)
self.data_generator.create_database_genomic_appointment(
message_record_id=i,
appointment_id=i,
event_type='appointment_scheduled',
module_type='hdr',
participant_id=participant.participantId,
event_authored_time=clock.CLOCK.now(),
source='Color',
appointment_timestamp=format_datetime(clock.CLOCK.now()),
appointment_timezone='America/Los_Angeles',
location='123 address st',
contact_number='17348675309',
language='en'
)
self.data_generator.create_database_genomic_member_report_state(
genomic_set_member_id=gen_member.id,
participant_id=participant.participantId,
module='gem',
genomic_report_state=GenomicReportState.GEM_RPT_READY,
event_authored_time=clock.CLOCK.now()
)
self.data_generator.create_genomic_result_viewed(
participant_id=participant.participantId,
event_type='result_viewed',
event_authored_time=clock.CLOCK.now(),
module_type='gem',
sample_id=gen_member.sampleId
)
# gets new records that were created with last job run from above
with GenomicJobController(GenomicJob.RECONCILE_PDR_DATA) as controller:
controller.reconcile_pdr_data()
affected_tables = [
'genomic_set',
'genomic_set_member',
'genomic_job_run',
'genomic_file_processed',
'genomic_gc_validation_metrics',
'genomic_manifest_file',
'genomic_manifest_feedback',
'genomic_informing_loop',
'genomic_cvl_results_past_due',
'user_event_metrics',
'genomic_member_report_state',
'genomic_result_viewed',
'genomic_appointment_event'
]
num_calls = len(affected_tables) + 1
self.assertEqual(mock_cloud_task.call_count, num_calls)
call_args = mock_cloud_task.call_args_list
self.assertEqual(len(call_args), num_calls)
mock_tables = set([obj[0][0]['table'] for obj in call_args])
mock_endpoint = [obj[0][1] for obj in call_args]
self.assertTrue([mock_tables].sort() == affected_tables.sort())
self.assertTrue(all(obj for obj in mock_endpoint if obj == cloud_task_endpoint))
@mock.patch('rdr_service.genomic.genomic_job_controller.GenomicJobController.execute_cloud_task')
def test_retry_manifest_ingestions_if_deltas(self, mock_cloud_task):
bucket_name = "test-bucket"
aw1_file_name = "AW1_wgs_sample_manifests/RDR_AoU_SEQ_PKG-2104-026571.csv"
aw1_manifest_path = f"{bucket_name}/{aw1_file_name}"
aw2_file_name = "AW2_wgs_data_manifests/RDR_AoU_SEQ_DataManifest_04092021.csv"
aw2_manifest_path = f"{bucket_name}/{aw2_file_name}"
gen_set = self.data_generator.create_database_genomic_set(
genomicSetName=".",
genomicSetCriteria=".",
genomicSetVersion=1
)
# Create AW1 job_run
aw1_job_run = self.data_generator.create_database_genomic_job_run(
jobId=GenomicJob.AW1_MANIFEST,
startTime=clock.CLOCK.now(),
endTime=clock.CLOCK.now(),
runResult=GenomicSubProcessResult.SUCCESS
)
# Create AW2 job_run
aw2_job_run = self.data_generator.create_database_genomic_job_run(
jobId=GenomicJob.METRICS_INGESTION,
startTime=clock.CLOCK.now(),
endTime=clock.CLOCK.now(),
runResult=GenomicSubProcessResult.SUCCESS
)
# should have no data
with GenomicJobController(GenomicJob.RETRY_MANIFEST_INGESTIONS) as controller:
controller.retry_manifest_ingestions()
job_run = self.job_run_dao.get(3)
self.assertEqual(job_run.jobId, GenomicJob.RETRY_MANIFEST_INGESTIONS)
self.assertEqual(job_run.runStatus, GenomicSubProcessStatus.COMPLETED)
self.assertEqual(job_run.runResult, GenomicSubProcessResult.NO_FILES)
self.assertEqual(mock_cloud_task.call_count, 0)
self.assertFalse(mock_cloud_task.call_count)
# Create genomic_aw1_raw record
self.data_generator.create_database_genomic_aw1_raw(
file_path=aw1_manifest_path,
package_id="PKG-2104-026571",
biobank_id="A10001",
)
# Create genomic_aw2_raw record
self.data_generator.create_database_genomic_aw2_raw(
file_path=aw2_manifest_path,
biobank_id="A10001",
sample_id="100001",
biobankidsampleid="A10001_100001",
)
# Create AW1 genomic_manifest_file record
aw1_manifest_file = self.data_generator.create_database_genomic_manifest_file(
created=clock.CLOCK.now(),
modified=clock.CLOCK.now(),
uploadDate=clock.CLOCK.now(),
manifestTypeId=GenomicManifestTypes.AW1,
filePath=aw1_manifest_path,
fileName=aw1_file_name,
bucketName=bucket_name,
recordCount=1,
rdrProcessingComplete=1,
rdrProcessingCompleteDate=clock.CLOCK.now(),
)
# Create AW2 genomic_manifest_file record
aw2_manifest_file = self.data_generator.create_database_genomic_manifest_file(
created=clock.CLOCK.now(),
modified=clock.CLOCK.now(),
uploadDate=clock.CLOCK.now(),
manifestTypeId=GenomicManifestTypes.AW2,
filePath=aw2_manifest_path,
fileName=aw2_file_name,
bucketName=bucket_name,
recordCount=1,
rdrProcessingComplete=1,
rdrProcessingCompleteDate=clock.CLOCK.now(),
)
# Create AW1 file_processed
aw1_file_processed = self.data_generator.create_database_genomic_file_processed(
runId=aw1_job_run.id,
startTime=clock.CLOCK.now(),
genomicManifestFileId=aw1_manifest_file.id,
filePath=f"/{aw1_manifest_path}",
bucketName=bucket_name,
fileName=aw1_file_name,
)
# Create AW2 file_processed
aw2_file_processed = self.data_generator.create_database_genomic_file_processed(
runId=aw2_job_run.id,
startTime=clock.CLOCK.now(),
genomicManifestFileId=aw2_manifest_file.id,
filePath=f"/{aw2_manifest_path}",
bucketName=bucket_name,
fileName=aw2_file_name,
)
# genomic_set_member for AW1
gen_member = self.data_generator.create_database_genomic_set_member(
genomicSetId=gen_set.id,
biobankId="100153482",
sampleId="21042005280",
genomeType="aou_wgs",
genomicWorkflowState=GenomicWorkflowState.AW1,
aw1FileProcessedId=aw1_file_processed.id
)
# genomic_gc_validation_metrics for AW1
self.data_generator.create_database_genomic_gc_validation_metrics(
genomicSetMemberId=gen_member.id,
genomicFileProcessedId=aw2_file_processed.id
)
# one AW1/AW2 with no deltas
with GenomicJobController(GenomicJob.RETRY_MANIFEST_INGESTIONS) as controller:
controller.retry_manifest_ingestions()
job_run = self.job_run_dao.get(4)
self.assertEqual(job_run.jobId, GenomicJob.RETRY_MANIFEST_INGESTIONS)
self.assertEqual(job_run.runStatus, GenomicSubProcessStatus.COMPLETED)
self.assertEqual(job_run.runResult, GenomicSubProcessResult.NO_FILES)
self.assertEqual(mock_cloud_task.call_count, 0)
self.assertFalse(mock_cloud_task.call_count)
# empty tables resulting in deltas and cloud task calls
with self.member_dao.session() as session:
session.query(GenomicGCValidationMetrics).delete()
session.query(GenomicSetMember).delete()
with GenomicJobController(GenomicJob.RETRY_MANIFEST_INGESTIONS) as controller:
controller.retry_manifest_ingestions()
job_run = self.job_run_dao.get(5)
self.assertEqual(job_run.jobId, GenomicJob.RETRY_MANIFEST_INGESTIONS)
self.assertEqual(job_run.runStatus, GenomicSubProcessStatus.COMPLETED)
self.assertEqual(job_run.runResult, GenomicSubProcessResult.SUCCESS)
# one AW1/AW2 with deltas
self.assertEqual(mock_cloud_task.call_count, 2)
self.assertTrue(mock_cloud_task.call_count)
call_args = mock_cloud_task.call_args_list
self.assertEqual(len(call_args), 2)
cloud_task_endpoint = ['ingest_aw1_manifest_task', 'ingest_aw2_manifest_task']
mock_endpoint = [obj[0][1] for obj in call_args]
self.assertTrue(all(obj for obj in mock_endpoint if obj == cloud_task_endpoint))
mock_buckets = set([obj[0][0]['bucket_name'] for obj in call_args])
self.assertTrue(len(mock_buckets), 1)
self.assertTrue(list(mock_buckets)[0] == bucket_name)
def test_calculate_informing_loop_ready_flags(self):
num_participants = 4
gen_set = self.data_generator.create_database_genomic_set(
genomicSetName=".",
genomicSetCriteria=".",
genomicSetVersion=1
)
for num in range(num_participants):
plus_num = clock.CLOCK.now() + datetime.timedelta(minutes=num)
plus_num = plus_num.replace(microsecond=0)
with FakeClock(plus_num):
summary = self.data_generator.create_database_participant_summary(
consentForStudyEnrollment=1,
consentForGenomicsROR=1
)
stored_sample = self.data_generator.create_database_biobank_stored_sample(
biobankId=summary.biobankId,
biobankOrderIdentifier=self.fake.pyint()
)
collection_site = self.data_generator.create_database_site(
siteType='Clinic'
)
order = self.data_generator.create_database_biobank_order(
collectedSiteId=collection_site.siteId,
participantId=summary.participantId,
finalizedTime=plus_num
)
self.data_generator.create_database_biobank_order_identifier(
value=stored_sample.biobankOrderIdentifier,
biobankOrderId=order.biobankOrderId,
system="1",
)
self.data_generator.create_database_biobank_order_identifier(
value=stored_sample.biobankOrderIdentifier,
biobankOrderId=order.biobankOrderId,
system="2",
)
member = self.data_generator.create_database_genomic_set_member(
genomicSetId=gen_set.id,
participantId=summary.participantId,
genomeType=config.GENOME_TYPE_WGS,
qcStatus=GenomicQcStatus.PASS,
gcManifestSampleSource='Whole Blood',
collectionTubeId=stored_sample.biobankStoredSampleId
)
self.data_generator.create_database_genomic_gc_validation_metrics(
genomicSetMemberId=member.id,
sexConcordance='True',
drcFpConcordance='Pass',
drcSexConcordance='Pass',
processingStatus='Pass'
)
members_for_ready_loop = self.member_dao.get_members_for_informing_loop_ready()
self.assertEqual(len(members_for_ready_loop), num_participants)
current_set_members = self.member_dao.get_all()
self.assertTrue(all(obj.informingLoopReadyFlag == 0 for obj in current_set_members))
self.assertTrue(all(obj.informingLoopReadyFlagModified is None for obj in current_set_members))
with GenomicJobController(GenomicJob.CALCULATE_INFORMING_LOOP_READY) as controller:
controller.calculate_informing_loop_ready_flags()
# no config object, controller method should return
members_for_ready_loop = self.member_dao.get_members_for_informing_loop_ready()
self.assertEqual(len(members_for_ready_loop), num_participants)
calculation_limit = 2
config.override_setting(config.CALCULATE_READY_FLAG_LIMIT, [calculation_limit])
with GenomicJobController(GenomicJob.CALCULATE_INFORMING_LOOP_READY) as controller:
controller.calculate_informing_loop_ready_flags()
current_set_members = self.member_dao.get_all()
self.assertTrue(any(obj.informingLoopReadyFlag == 1 for obj in current_set_members))
self.assertTrue(any(obj.informingLoopReadyFlagModified is not None for obj in current_set_members))
current_loops_set = [obj for obj in current_set_members if obj.informingLoopReadyFlag == 1
and obj.informingLoopReadyFlagModified is not None]
self.assertEqual(len(current_loops_set), calculation_limit)
members_for_ready_loop = self.member_dao.get_members_for_informing_loop_ready()
self.assertEqual(len(members_for_ready_loop), num_participants // 2)
with GenomicJobController(GenomicJob.CALCULATE_INFORMING_LOOP_READY) as controller:
controller.calculate_informing_loop_ready_flags()
current_set_members = self.member_dao.get_all()
self.assertTrue(all(obj.informingLoopReadyFlag == 1 for obj in current_set_members))
self.assertTrue(all(obj.informingLoopReadyFlagModified is not None for obj in current_set_members))
members_for_ready_loop = self.member_dao.get_members_for_informing_loop_ready()
self.assertEqual(len(members_for_ready_loop), 0)
@mock.patch('rdr_service.services.email_service.EmailService.send_email')
def test_getting_results_withdrawn(self, email_mock):
num_participants = 4
result_withdrawal_dao = GenomicResultWithdrawalsDao()
gen_set = self.data_generator.create_database_genomic_set(
genomicSetName=".",
genomicSetCriteria=".",
genomicSetVersion=1
)
gen_job_run = self.data_generator.create_database_genomic_job_run(
jobId=GenomicJob.AW1_MANIFEST,
startTime=clock.CLOCK.now(),
runResult=GenomicSubProcessResult.SUCCESS
)
pids = []
for num in range(num_participants):
summary = self.data_generator.create_database_participant_summary(
consentForStudyEnrollment=1,
consentForGenomicsROR=1,
withdrawalStatus=WithdrawalStatus.EARLY_OUT
)
self.data_generator.create_database_genomic_set_member(
genomicSetId=gen_set.id,
participantId=summary.participantId,
genomeType=config.GENOME_TYPE_ARRAY,
gemA1ManifestJobRunId=gen_job_run.id if num % 2 == 0 else None
)
self.data_generator.create_database_genomic_set_member(
genomicSetId=gen_set.id,
participantId=summary.participantId,
genomeType=config.GENOME_TYPE_WGS,
cvlW1ilHdrJobRunId=gen_job_run.id
)
pids.append(summary.participantId)
config.override_setting(config.RDR_GENOMICS_NOTIFICATION_EMAIL, 'email@test.com')
with GenomicJobController(GenomicJob.RESULTS_PIPELINE_WITHDRAWALS) as controller:
controller.check_results_withdrawals()
# mock checks should be two => 1 GEM 1 HEALTH
self.assertEqual(email_mock.call_count, 2)
call_args = email_mock.call_args_list
self.assertTrue(any('GEM' in call.args[0].subject for call in call_args))
self.assertTrue(any('HEALTH' in call.args[0].subject for call in call_args))
job_runs = self.job_run_dao.get_all()
current_job_run = list(filter(lambda x: x.jobId == GenomicJob.RESULTS_PIPELINE_WITHDRAWALS, job_runs))[0]
self.assertTrue(current_job_run.runResult == GenomicSubProcessResult.SUCCESS)
all_withdrawal_records = result_withdrawal_dao.get_all()
self.assertTrue(len(all_withdrawal_records) == len(pids))
self.assertTrue(all(obj.participant_id in pids for obj in all_withdrawal_records))
array_results = list(filter(lambda x: x.array_results == 1, all_withdrawal_records))
# should only be 2
self.assertTrue(len(array_results), 2)
cvl_results = list(filter(lambda x: x.cvl_results == 1, all_withdrawal_records))
# should be 4 for num of participants
self.assertTrue(len(cvl_results), num_participants)
with GenomicJobController(GenomicJob.RESULTS_PIPELINE_WITHDRAWALS) as controller:
controller.check_results_withdrawals()
# mock checks should still be two on account of no records
self.assertEqual(email_mock.call_count, 2)
job_runs = self.job_run_dao.get_all()
current_job_run = list(filter(lambda x: x.jobId == GenomicJob.RESULTS_PIPELINE_WITHDRAWALS, job_runs))[1]
self.assertTrue(current_job_run.runResult == GenomicSubProcessResult.NO_RESULTS)
def test_gem_results_to_report_state(self):
num_participants = 8
gen_set = self.data_generator.create_database_genomic_set(
genomicSetName=".",
genomicSetCriteria=".",
genomicSetVersion=1
)
gem_a2_job_run = self.data_generator.create_database_genomic_job_run(
jobId=GenomicJob.GEM_A2_MANIFEST,
startTime=clock.CLOCK.now(),
runResult=GenomicSubProcessResult.SUCCESS
)
pids_to_update, member_ids = [], []
for num in range(num_participants):
summary = self.data_generator.create_database_participant_summary(
consentForStudyEnrollment=1,
consentForGenomicsROR=1,
withdrawalStatus=WithdrawalStatus.EARLY_OUT
)
member = self.data_generator.create_database_genomic_set_member(
genomicSetId=gen_set.id,
participantId=summary.participantId,
genomeType=config.GENOME_TYPE_ARRAY
)
if num % 2 == 0:
member_ids.append(member.id)
pids_to_update.append(summary.participantId)
with GenomicJobController(GenomicJob.GEM_RESULT_REPORTS) as controller:
controller.gem_results_to_report_state()
current_job_runs = self.job_run_dao.get_all()
self.assertEqual(len(current_job_runs), 2)
current_job_run = list(filter(lambda x: x.jobId == GenomicJob.GEM_RESULT_REPORTS, current_job_runs))[0]
self.assertTrue(current_job_run.runResult == GenomicSubProcessResult.NO_RESULTS)
current_members = self.member_dao.get_all()
# 4 members updated correctly should return
for member in current_members:
if member.participantId in pids_to_update:
member.gemA2ManifestJobRunId = gem_a2_job_run.id
member.genomicWorkflowState = GenomicWorkflowState.GEM_RPT_READY
self.member_dao.update(member)
with GenomicJobController(GenomicJob.GEM_RESULT_REPORTS) as controller:
controller.gem_results_to_report_state()
current_job_runs = self.job_run_dao.get_all()
self.assertEqual(len(current_job_runs), 3)
current_job_run = list(filter(lambda x: x.jobId == GenomicJob.GEM_RESULT_REPORTS, current_job_runs))[1]
self.assertTrue(current_job_run.runResult == GenomicSubProcessResult.SUCCESS)
current_gem_report_states = self.report_state_dao.get_all()
self.assertEqual(len(current_gem_report_states), len(pids_to_update))
self.assertTrue(all(obj.event_type == 'result_ready' for obj in current_gem_report_states))
self.assertTrue(all(obj.event_authored_time is not None for obj in current_gem_report_states))
self.assertTrue(all(obj.module == 'gem' for obj in current_gem_report_states))
self.assertTrue(
all(obj.genomic_report_state == GenomicReportState.GEM_RPT_READY for obj in current_gem_report_states)
)
self.assertTrue(
all(obj.genomic_report_state_str == GenomicReportState.GEM_RPT_READY.name for obj in
current_gem_report_states)
)
self.assertTrue(
all(obj.genomic_set_member_id in member_ids for obj in
current_gem_report_states)
)
# 4 members inserted already should not return
with GenomicJobController(GenomicJob.GEM_RESULT_REPORTS) as controller:
controller.gem_results_to_report_state()
current_job_runs = self.job_run_dao.get_all()
self.assertEqual(len(current_job_runs), 4)
current_job_run = list(filter(lambda x: x.jobId == GenomicJob.GEM_RESULT_REPORTS, current_job_runs))[2]
self.assertTrue(current_job_run.runResult == GenomicSubProcessResult.NO_RESULTS)
self.clear_table_after_test('genomic_member_report_state')
def test_reconcile_informing_loop(self):
event_dao = UserEventMetricsDao()
event_dao.truncate() # for test suite
il_dao = GenomicInformingLoopDao()
for pid in range(8):
self.data_generator.create_database_participant(participantId=1 + pid, biobankId=1 + pid)
# Set up initial job run ID
self.data_generator.create_database_genomic_job_run(
jobId=GenomicJob.METRICS_FILE_INGEST,
startTime=clock.CLOCK.now()
)
# create genomic set
self.data_generator.create_database_genomic_set(
genomicSetName='test',
genomicSetCriteria='.',
genomicSetVersion=1
)
# insert set members
for b in ["aou_array", "aou_wgs"]:
for i in range(1, 9):
self.data_generator.create_database_genomic_set_member(
participantId=i,
genomicSetId=1,
biobankId=i,
collectionTubeId=100 + i,
sampleId=10 + i,
genomeType=b,
)
# Set up ingested metrics data
events = ['gem.informing_loop.started',
'gem.informing_loop.screen8_no',
'gem.informing_loop.screen8_yes',
'hdr.informing_loop.started',
'gem.informing_loop.screen3',
'pgx.informing_loop.screen8_no',
'hdr.informing_loop.screen10_no']
for p in range(4):
for i in range(len(events)):
self.data_generator.create_database_genomic_user_event_metrics(
created=clock.CLOCK.now(),
modified=clock.CLOCK.now(),
participant_id=p + 1,
created_at=datetime.datetime(2021, 12, 29, 00) + datetime.timedelta(hours=i),
event_name=events[i],
run_id=1,
ignore_flag=0,
)
# Set up informing loop from message broker records
decisions = [None, 'no', 'yes']
for p in range(3):
for i in range(2):
self.data_generator.create_database_genomic_informing_loop(
message_record_id=i,
event_type='informing_loop_started' if i == 0 else 'informing_loop_decision',
module_type='gem',
participant_id=p + 1,
decision_value=decisions[i],
sample_id=100 + p,
event_authored_time=datetime.datetime(2021, 12, 29, 00) + datetime.timedelta(hours=i)
)
# Test for no message but yes user event
self.data_generator.create_database_genomic_user_event_metrics(
created=clock.CLOCK.now(),
modified=clock.CLOCK.now(),
participant_id=6,
created_at=datetime.datetime(2021, 12, 29, 00),
event_name='gem.informing_loop.screen8_yes',
run_id=1,
ignore_flag=0,
)
# Run reconcile job
genomic_pipeline.reconcile_informing_loop_responses()
# Test mismatched GEM data ingested correctly
pid_list = [1, 2, 3, 6]
new_il_values = il_dao.get_latest_il_for_pids(
pid_list=pid_list,
module="gem"
)
for value in new_il_values:
self.assertEqual("yes", value.decision_value)
pid_list = [1, 2, 3, 4]
for module in ["hdr", "pgx"]:
new_il_values = il_dao.get_latest_il_for_pids(
pid_list=pid_list,
module=module
)
for value in new_il_values:
self.assertEqual("no", value.decision_value)
self.assertIsNotNone(value.created_from_metric_id)
def test_reconcile_message_broker_results_ready(self):
# Create Test Participants' data
# create genomic set
self.data_generator.create_database_genomic_set(
genomicSetName='test',
genomicSetCriteria='.',
genomicSetVersion=1
)
# Set up initial job run ID
self.data_generator.create_database_genomic_job_run(
jobId=GenomicJob.METRICS_FILE_INGEST,
startTime=clock.CLOCK.now()
)
for pid in range(7):
self.data_generator.create_database_participant(participantId=1 + pid, biobankId=1 + pid)
# insert set members and event metrics records
for i in range(1, 6):
self.data_generator.create_database_genomic_set_member(
participantId=i,
genomicSetId=1,
biobankId=i,
collectionTubeId=100 + i,
sampleId=10 + i,
genomeType="aou_wgs",
)
# 3 PGX records
if i < 4:
self.data_generator.create_database_genomic_user_event_metrics(
participant_id=i,
created_at=datetime.datetime(2022, 10, 6, 00),
event_name="pgx.result_ready",
run_id=1,
)
# 1 HDR Positive
if i == 4:
self.data_generator.create_database_genomic_user_event_metrics(
participant_id=i,
created_at=datetime.datetime(2022, 10, 6, 00),
event_name="hdr.result_ready.informative",
run_id=1,
)
# 1 HDR uninformative
if i == 5:
self.data_generator.create_database_genomic_user_event_metrics(
participant_id=i,
created_at=datetime.datetime(2022, 10, 6, 00),
event_name="hdr.result_ready.uninformative",
run_id=1,
)
# Run job
genomic_cvl_pipeline.reconcile_message_broker_results_ready()
# Test correct data inserted
report_state_dao = GenomicMemberReportStateDao()
states = report_state_dao.get_all()
self.assertEqual(5, len(states))
pgx_records = [rec for rec in states if rec.module == "pgx_v1"]
hdr_record_uninf = [rec for rec in states
if rec.genomic_report_state == GenomicReportState.HDR_RPT_UNINFORMATIVE][0]
hdr_record_pos = [rec for rec in states
if rec.genomic_report_state == GenomicReportState.HDR_RPT_POSITIVE][0]
for pgx_record in pgx_records:
self.assertEqual(GenomicReportState.PGX_RPT_READY, pgx_record.genomic_report_state)
self.assertEqual("PGX_RPT_READY", pgx_record.genomic_report_state_str)
self.assertEqual(int(pgx_record.sample_id), pgx_record.participant_id + 10)
self.assertEqual("result_ready", pgx_record.event_type)
self.assertEqual(datetime.datetime(2022, 10, 6, 00), pgx_record.event_authored_time)
self.assertIsNotNone(pgx_record.created_from_metric_id)
self.assertEqual("HDR_RPT_UNINFORMATIVE", hdr_record_uninf.genomic_report_state_str)
self.assertEqual(int(hdr_record_uninf.sample_id), hdr_record_uninf.participant_id + 10)
self.assertEqual("result_ready", hdr_record_uninf.event_type)
self.assertEqual(datetime.datetime(2022, 10, 6, 00), hdr_record_uninf.event_authored_time)
self.assertIsNotNone(hdr_record_uninf.created_from_metric_id)
self.assertEqual("HDR_RPT_POSITIVE", hdr_record_pos.genomic_report_state_str)
self.assertEqual(int(hdr_record_pos.sample_id), hdr_record_pos.participant_id + 10)
self.assertEqual("result_ready", hdr_record_pos.event_type)
self.assertEqual(datetime.datetime(2022, 10, 6, 00), hdr_record_pos.event_authored_time)
self.assertIsNotNone(hdr_record_pos.created_from_metric_id)
def test_reconcile_message_broker_results_viewed(self):
# Create Test Participants' data
# create genomic set
self.data_generator.create_database_genomic_set(
genomicSetName='test',
genomicSetCriteria='.',
genomicSetVersion=1
)
# Set up initial job run ID
self.data_generator.create_database_genomic_job_run(
jobId=GenomicJob.METRICS_FILE_INGEST,
startTime=clock.CLOCK.now()
)
for pid in range(3):
self.data_generator.create_database_participant(participantId=1 + pid, biobankId=1 + pid)
# insert set members and event metrics records
for i in range(1, 3):
self.data_generator.create_database_genomic_set_member(
participantId=i,
genomicSetId=1,
biobankId=i,
collectionTubeId=100 + i,
sampleId=10 + i,
genomeType="aou_wgs",
)
# 1 PGX Viewed
if i == 1:
self.data_generator.create_database_genomic_user_event_metrics(
participant_id=i,
created_at=datetime.datetime(2022, 10, 6, 00),
event_name="pgx.opened_at",
run_id=1,
)
# 1 HDR Viewed
if i == 2:
self.data_generator.create_database_genomic_user_event_metrics(
participant_id=i,
created_at=datetime.datetime(2022, 10, 6, 00),
event_name="hdr.opened_at",
run_id=1,
)
genomic_cvl_pipeline.reconcile_message_broker_results_viewed()
# Test correct data inserted
result_viewed_dao = GenomicResultViewedDao()
results = result_viewed_dao.get_all()
self.assertEqual(2, len(results))
for record in results:
if record.participant_id == 1:
self.assertEqual("pgx_v1", record.module_type)
else:
self.assertEqual("hdr_v1", record.module_type)
self.assertEqual(int(record.sample_id), record.participant_id + 10)
self.assertEqual("result_viewed", record.event_type)
self.assertEqual(datetime.datetime(2022, 10, 6, 00), record.first_viewed)
self.assertIsNotNone(record.created_from_metric_id)
def test_ingest_appointment_metrics_file(self):
test_file = 'Genomic-Metrics-File-Appointment-Events-Test.json'
bucket_name = 'test_bucket'
sub_folder = 'appointment_events'
pids = []
for _ in range(4):
summary = self.data_generator.create_database_participant_summary()
pids.append(summary.participantId)
test_file_path = f'{bucket_name}/{sub_folder}/{test_file}'
appointment_data = test_data.load_test_data_json(
"Genomic-Metrics-File-Appointment-Events-Test.json")
appointment_data_str = json.dumps(appointment_data, indent=4)
with open_cloud_file(test_file_path, mode='wb') as cloud_file:
cloud_file.write(appointment_data_str.encode("utf-8"))
with GenomicJobController(GenomicJob.APPOINTMENT_METRICS_FILE_INGEST) as controller:
controller.ingest_appointment_metrics_file(
file_path=test_file_path,
)
all_metrics = self.appointment_metrics_dao.get_all()
# should be 5 metric records for whats in json file
self.assertEqual(len(all_metrics), 5)
self.assertTrue(all((obj.participant_id in pids for obj in all_metrics)))
self.assertTrue(all((obj.file_path == test_file_path for obj in all_metrics)))
self.assertTrue(all((obj.appointment_event is not None for obj in all_metrics)))
self.assertTrue(all((obj.created is not None for obj in all_metrics)))
self.assertTrue(all((obj.modified is not None for obj in all_metrics)))
self.assertTrue(all((obj.module_type is not None for obj in all_metrics)))
self.assertTrue(all((obj.event_authored_time is not None for obj in all_metrics)))
self.assertTrue(all((obj.event_type is not None for obj in all_metrics)))
current_job_runs = self.job_run_dao.get_all()
self.assertEqual(len(current_job_runs), 1)
current_job_run = current_job_runs[0]
self.assertTrue(current_job_run.jobId == GenomicJob.APPOINTMENT_METRICS_FILE_INGEST)
self.assertTrue(current_job_run.runResult == GenomicSubProcessResult.SUCCESS)
self.clear_table_after_test('genomic_appointment_event_metrics')
def test_reconcile_appointments_with_metrics(self):
fake_date = parser.parse('2020-05-29T08:00:01-05:00')
for num in range(4):
summary = self.data_generator.create_database_participant_summary()
missing_json = {
"event": "appointment_updated",
"eventAuthoredTime": "2022-09-16T17:18:38Z",
"participantId": f'P{summary.participantId}',
"messageBody": {
"module_type": "hdr",
"appointment_timestamp": "2022-09-19T19:30:00+00:00",
"id": 55,
"appointment_timezone": "America/Los_Angeles",
"location": "CA",
"contact_number": "18043704252",
"language": "en",
"source": "Color"
}
}
if num % 2 == 0:
self.data_generator.create_database_genomic_appointment(
message_record_id=num,
appointment_id=num,
event_type='appointment_scheduled',
module_type='hdr',
participant_id=summary.participantId,
event_authored_time=fake_date,
source='Color',
appointment_timestamp=format_datetime(clock.CLOCK.now()),
appointment_timezone='America/Los_Angeles',
location='123 address st',
contact_number='17348675309',
language='en'
)
self.data_generator.create_database_genomic_appointment_metric(
participant_id=summary.participantId,
appointment_event=json.dumps(missing_json, indent=4) if num % 2 != 0 else 'foo',
file_path='test_file_path',
module_type='hdr',
event_authored_time=fake_date,
event_type='appointment_updated' if num % 2 != 0 else 'appointment_scheduled'
)
current_events = self.appointment_event_dao.get_all()
# should be 2 initial appointment events
self.assertEqual(len(current_events), 2)
current_metrics = self.appointment_metrics_dao.get_all()
# should be 4 initial appointment events
self.assertEqual(len(current_metrics), 4)
self.assertTrue(all(obj.reconcile_job_run_id is None for obj in current_metrics))
with GenomicJobController(GenomicJob.APPOINTMENT_METRICS_RECONCILE) as controller:
controller.reconcile_appointment_events_from_metrics()
job_run = self.job_run_dao.get_all()
self.assertEqual(len(job_run), 1)
self.assertTrue(job_run[0].jobId == GenomicJob.APPOINTMENT_METRICS_RECONCILE)
current_events = self.appointment_event_dao.get_all()
# should be 4 appointment events 2 initial + 2 added
self.assertEqual(len(current_events), 4)
scheduled = list(filter(lambda x: x.event_type == 'appointment_scheduled', current_events))
self.assertEqual(len(scheduled), 2)
self.assertTrue(all(obj.created_from_metric_id is None for obj in scheduled))
updated = list(filter(lambda x: x.event_type == 'appointment_updated', current_events))
self.assertEqual(len(updated), 2)
self.assertTrue(all(obj.created_from_metric_id is not None for obj in updated))
current_metrics = self.appointment_metrics_dao.get_all()
# should STILL be 4 initial appointment events
self.assertEqual(len(current_metrics), 4)
self.assertTrue(all(obj.reconcile_job_run_id is not None for obj in current_metrics))
self.assertTrue(all(obj.reconcile_job_run_id == job_run[0].id for obj in current_metrics))
self.clear_table_after_test('genomic_appointment_event_metrics')
@mock.patch('rdr_service.services.email_service.EmailService.send_email')
def test_check_appointments_gror_changed(self, email_mock):
fake_date = parser.parse("2022-09-01T13:43:23")
notified_dao = GenomicAppointmentEventNotifiedDao()
config.override_setting(config.GENOMIC_COLOR_PM_EMAIL, ['test@example.com'])
num_participants = 4
for num in range(num_participants):
gror = num if num > 1 else 1
summary = self.data_generator.create_database_participant_summary(
consentForStudyEnrollment=1,
consentForGenomicsROR=gror
)
self.data_generator.create_database_genomic_appointment(
message_record_id=num,
appointment_id=num,
event_type='appointment_scheduled',
module_type='hdr',
participant_id=summary.participantId,
event_authored_time=fake_date,
source='Color',
appointment_timestamp=format_datetime(clock.CLOCK.now()),
appointment_timezone='America/Los_Angeles',
location='123 address st',
contact_number='17348675309',
language='en'
)
changed_ppts = self.appointment_event_dao.get_appointments_gror_changed()
self.assertEqual(2, len(changed_ppts))
with GenomicJobController(GenomicJob.CHECK_APPOINTMENT_GROR_CHANGED) as controller:
controller.check_appointments_gror_changed()
self.assertEqual(email_mock.call_count, 1)
notified_appointments = notified_dao.get_all()
self.assertEqual(2, len(notified_appointments))
# test notified not returned by query
summary = self.data_generator.create_database_participant_summary(
consentForStudyEnrollment=1,
consentForGenomicsROR=2
)
self.data_generator.create_database_genomic_appointment(
message_record_id=5,
appointment_id=5,
event_type='appointment_scheduled',
module_type='hdr',
participant_id=summary.participantId,
event_authored_time=fake_date,
source='Color',
appointment_timestamp=format_datetime(clock.CLOCK.now()),
appointment_timezone='America/Los_Angeles',
location='123 address st',
contact_number='17348675309',
language='en'
)
changed_ppts = self.appointment_event_dao.get_appointments_gror_changed()
self.assertEqual(1, len(changed_ppts))
@mock.patch('rdr_service.services.email_service.EmailService.send_email')
def test_check_gcr_14day_escalation(self, email_mock):
fake_date = parser.parse("2022-09-01T13:43:23")
fake_date2 = parser.parse("2022-09-02T14:14:00")
fake_date3 = parser.parse("2022-09-03T15:15:00")
config.override_setting(config.GENOMIC_GCR_ESCALATION_EMAILS, ['test@example.com'])
self.data_generator.create_database_genomic_set(
genomicSetName='test',
genomicSetCriteria='.',
genomicSetVersion=1
)
pids = []
for _ in range(6):
summary = self.data_generator.create_database_participant_summary(
consentForStudyEnrollment=1,
consentForGenomicsROR=1
)
set_member = self.data_generator.create_database_genomic_set_member(
participantId=summary.participantId,
genomicSetId=1,
biobankId=1001,
collectionTubeId=100,
sampleId=10,
genomeType="aou_wgs",
)
self.data_generator.create_database_genomic_member_report_state(
participant_id=summary.participantId,
genomic_report_state=GenomicReportState.HDR_RPT_POSITIVE,
genomic_set_member_id=set_member.id,
module='hdr_v1',
event_authored_time=fake_date
)
pids.append(summary.participantId)
# Appointment scheduled in future: don't notify
self.data_generator.create_database_genomic_appointment(
message_record_id=101,
appointment_id=102,
event_type='appointment_scheduled',
module_type='hdr',
participant_id=pids[0],
event_authored_time=fake_date,
source='Color',
appointment_timestamp=format_datetime(clock.CLOCK.now()),
appointment_timezone='America/Los_Angeles',
location='123 address st',
contact_number='17348675309',
language='en'
)
# Appointment completed: don't notify
self.data_generator.create_database_genomic_appointment(
message_record_id=102,
appointment_id=103,
event_type='appointment_completed',
module_type='hdr',
participant_id=pids[1],
event_authored_time=fake_date,
source='Color',
appointment_timestamp=fake_date,
appointment_timezone='America/Los_Angeles',
location='123 address st',
contact_number='17348675309',
language='en'
)
# Appointment scheduled then canceled: notify
self.data_generator.create_database_genomic_appointment(
message_record_id=103,
appointment_id=104,
event_type='appointment_scheduled',
module_type='hdr',
participant_id=pids[2],
event_authored_time=fake_date2,
source='Color',
appointment_timestamp=format_datetime(clock.CLOCK.now()),
appointment_timezone='America/Los_Angeles',
location='123 address st',
contact_number='17348675309',
language='en'
)
self.data_generator.create_database_genomic_appointment(
message_record_id=104,
appointment_id=104,
event_type='appointment_cancelled',
module_type='hdr',
participant_id=pids[2],
event_authored_time=fake_date3,
source='Color',
appointment_timestamp=format_datetime(clock.CLOCK.now()),
appointment_timezone='America/Los_Angeles',
location='123 address st',
contact_number='17348675309',
language='en'
)
notified_dao = GenomicDefaultBaseDao(model_type=GenomicGCROutreachEscalationNotified)
notified_dao.insert_bulk([{
'participant_id': pids[4],
'created': clock.CLOCK.now(),
'modified': clock.CLOCK.now(),
'message_sent': True
},{
'participant_id': pids[5],
'created': clock.CLOCK.now(),
'modified': clock.CLOCK.now(),
'message_sent': False
}])
with clock.FakeClock(parser.parse('2022-11-1T05:15:00')):
escalated_participants = self.report_state_dao.get_hdr_result_positive_no_appointment(num_days=14)
results = [pid[0] for pid in escalated_participants]
self.assertIn(pids[2], results)
self.assertIn(pids[3], results)
self.assertIn(pids[5], results)
self.assertNotIn(pids[0], results)
self.assertNotIn(pids[1], results)
self.assertNotIn(pids[4], results)
with GenomicJobController(GenomicJob.CHECK_GCR_OUTREACH_ESCALATION) as controller:
controller.check_gcr_escalation(controller.job_id)
self.assertEqual(email_mock.call_count, 3)
self.assertEqual(email_mock.call_args.args[0].subject, 'GCR Outreach 14 Day Escalation')
self.clear_table_after_test('genomic_gcr_outreach_escalation_notified')
@mock.patch('rdr_service.services.email_service.EmailService.send_email')
def test_check_gcr_14day_escalation_error(self, email_mock):
email_mock.side_effect = ForbiddenError(mock.Mock(code=403))
mock_slack_handler = mock.MagicMock()
fake_date = parser.parse("2023-06-01T13:43:23")
config.override_setting(config.GENOMIC_GCR_ESCALATION_EMAILS, ['test@example.com'])
self.data_generator.create_database_genomic_set(
genomicSetName='test',
genomicSetCriteria='.',
genomicSetVersion=1
)
pids = []
for _ in range(2):
summary = self.data_generator.create_database_participant_summary(
consentForStudyEnrollment=1,
consentForGenomicsROR=1
)
set_member = self.data_generator.create_database_genomic_set_member(
participantId=summary.participantId,
genomicSetId=1,
biobankId=1001,
collectionTubeId=100,
sampleId=10,
genomeType="aou_wgs",
)
self.data_generator.create_database_genomic_member_report_state(
participant_id=summary.participantId,
genomic_report_state=GenomicReportState.HDR_RPT_POSITIVE,
genomic_set_member_id=set_member.id,
module='hdr_v1',
event_authored_time=fake_date
)
pids.append(summary.participantId)
self.data_generator.create_database_genomic_appointment(
message_record_id=102,
appointment_id=103,
event_type='appointment_completed',
module_type='hdr',
participant_id=pids[1],
event_authored_time=fake_date,
source='Color',
appointment_timestamp=fake_date,
appointment_timezone='America/Los_Angeles',
location='123 address st',
contact_number='17348675309',
language='en'
)
with GenomicJobController(GenomicJob.CHECK_GCR_OUTREACH_ESCALATION) as controller:
controller.genomic_alert_slack = mock_slack_handler
controller.check_gcr_escalation(controller.job_id)
notified_dao = GenomicDefaultBaseDao(model_type=GenomicGCROutreachEscalationNotified)
with notified_dao.session() as session:
notification = session.query(
GenomicGCROutreachEscalationNotified
).filter(
GenomicGCROutreachEscalationNotified.participant_id == pids[0]
).one()
self.assertEqual(email_mock.call_count, 1)
self.assertEqual(mock_slack_handler.send_message_to_webhook.call_count, 1)
self.assertEqual(False, notification.message_sent)
self.clear_table_after_test('genomic_gcr_outreach_escalation_notified')
@mock.patch('rdr_service.services.email_service.EmailService.send_email')
def test_check_gcr_ce_escalation(self, email_mock):
fake_date = parser.parse("2022-09-01T13:43:23")
fake_date2 = parser.parse("2022-09-02T14:14:00")
fake_date3 = parser.parse("2022-09-03T15:15:00")
config.override_setting(config.GENOMIC_GCR_ESCALATION_EMAILS, ['test@example.com'])
self.data_generator.create_database_genomic_set(
genomicSetName='test',
genomicSetCriteria='.',
genomicSetVersion=1
)
pids = []
for _ in range(6):
summary = self.data_generator.create_database_participant_summary(
consentForStudyEnrollment=1,
consentForGenomicsROR=1
)
set_member = self.data_generator.create_database_genomic_set_member(
participantId=summary.participantId,
genomicSetId=1,
biobankId=1001,
collectionTubeId=100,
sampleId=10,
genomeType="aou_wgs",
participantOrigin='careevolution'
)
self.data_generator.create_database_genomic_member_report_state(
participant_id=summary.participantId,
genomic_report_state=GenomicReportState.HDR_RPT_POSITIVE,
genomic_set_member_id=set_member.id,
module='hdr_v1',
event_authored_time=fake_date
)
pids.append(summary.participantId)
# Appointment scheduled in future: don't notify
self.data_generator.create_database_genomic_appointment(
message_record_id=101,
appointment_id=102,
event_type='appointment_scheduled',
module_type='hdr',
participant_id=pids[0],
event_authored_time=fake_date,
source='Color',
appointment_timestamp=format_datetime(clock.CLOCK.now()),
appointment_timezone='America/Los_Angeles',
location='123 address st',
contact_number='17348675309',
language='en'
)
# Appointment completed: don't notify
self.data_generator.create_database_genomic_appointment(
message_record_id=102,
appointment_id=103,
event_type='appointment_completed',
module_type='hdr',
participant_id=pids[1],
event_authored_time=fake_date,
source='Color',
appointment_timestamp=fake_date,
appointment_timezone='America/Los_Angeles',
location='123 address st',
contact_number='17348675309',
language='en'
)
# Appointment scheduled then canceled: notify
self.data_generator.create_database_genomic_appointment(
message_record_id=103,
appointment_id=104,
event_type='appointment_scheduled',
module_type='hdr',
participant_id=pids[2],
event_authored_time=fake_date2,
source='Color',
appointment_timestamp=format_datetime(clock.CLOCK.now()),
appointment_timezone='America/Los_Angeles',
location='123 address st',
contact_number='17348675309',
language='en'
)
self.data_generator.create_database_genomic_appointment(
message_record_id=104,
appointment_id=104,
event_type='appointment_cancelled',
module_type='hdr',
participant_id=pids[2],
event_authored_time=fake_date3,
source='Color',
appointment_timestamp=format_datetime(clock.CLOCK.now()),
appointment_timezone='America/Los_Angeles',
location='123 address st',
contact_number='17348675309',
language='en'
)
notified_dao = GenomicDefaultBaseDao(model_type=GenomicGCROutreachEscalationNotified)
notified_dao.insert_bulk([{
'participant_id': pids[4],
'created': clock.CLOCK.now(),
'modified': clock.CLOCK.now(),
'message_sent': True
},{
'participant_id': pids[5],
'created': clock.CLOCK.now(),
'modified': clock.CLOCK.now(),
'message_sent': False
}])
with clock.FakeClock(parser.parse('2022-11-1T05:15:00')):
escalated_participants = self.report_state_dao.get_hdr_result_positive_no_appointment(
num_days=30,
participant_origin='careevolution'
)
results = [pid[0] for pid in escalated_participants]
self.assertIn(pids[2], results)
self.assertIn(pids[3], results)
self.assertIn(pids[5], results)
self.assertNotIn(pids[0], results)
self.assertNotIn(pids[1], results)
self.assertNotIn(pids[4], results)
with GenomicJobController(GenomicJob.CHECK_GCR_CE_OUTREACH_ESCALATION) as controller:
controller.check_gcr_escalation(controller.job_id)
self.assertEqual(email_mock.call_count, 3)
self.assertEqual(email_mock.call_args.args[0].subject, 'GCR Outreach 30 Day Escalation')
self.clear_table_after_test('genomic_gcr_outreach_escalation_notified')
@mock.patch('rdr_service.genomic.genomic_job_controller.GenomicJobController.execute_cloud_task')
def test_execute_auto_generation_from_last_run(self, cloud_task_mock):
with GenomicJobController(
GenomicJob.PR_PR_WORKFLOW
) as controller:
controller.job_result = GenomicSubProcessResult.ERROR
controller._end_run()
controller.execute_auto_generation_from_cloud_task()
last_job_run_status = self.job_run_dao.get_last_run_status_for_job_id(job_id=GenomicJob.PR_PR_WORKFLOW)
self.assertTrue(last_job_run_status is not None)
self.assertTrue(last_job_run_status[0] == GenomicSubProcessResult.ERROR)
# task SHOULD NOT be called
self.assertEqual(cloud_task_mock.called, False)
self.assertEqual(cloud_task_mock.call_count, 0)
with GenomicJobController(
GenomicJob.PR_PR_WORKFLOW
) as controller:
controller.job_result = GenomicSubProcessResult.SUCCESS
controller._end_run()
controller.execute_auto_generation_from_cloud_task()
last_job_run_status = self.job_run_dao.get_last_run_status_for_job_id(job_id=GenomicJob.PR_PR_WORKFLOW)
self.assertTrue(last_job_run_status is not None)
self.assertTrue(last_job_run_status[0] == GenomicSubProcessResult.SUCCESS)
# task SHOULD be called
self.assertEqual(cloud_task_mock.called, True)
self.assertTrue(cloud_task_mock.call_args[1].get('payload').get('manifest_type') == 'p0')
self.assertTrue(cloud_task_mock.call_args[1].get('task_queue') == 'genomic-generate-manifest')
all_job_runs = self.job_run_dao.get_all()
self.assertEqual(len(all_job_runs), 2)
self.assertTrue(all(obj.runResult in [GenomicSubProcessResult.SUCCESS, GenomicSubProcessResult.ERROR] for obj
in all_job_runs))
self.assertTrue(all(obj.jobId == GenomicJob.PR_PR_WORKFLOW for obj in all_job_runs))
|
normal
|
{
"blob_id": "bd179fda18551d4f3d8a4d695a9da38ee607ef1d",
"index": 2168,
"step-1": "<mask token>\n\n\nclass GenomicJobControllerTest(BaseTestCase):\n\n def setUp(self):\n super(GenomicJobControllerTest, self).setUp()\n self.data_file_dao = GenomicGcDataFileDao()\n self.event_data_dao = MessageBrokenEventDataDao()\n self.incident_dao = GenomicIncidentDao()\n self.member_dao = GenomicSetMemberDao()\n self.metrics_dao = GenomicGCValidationMetricsDao()\n self.user_event_metrics_dao = UserEventMetricsDao()\n self.job_run_dao = GenomicJobRunDao()\n self.report_state_dao = GenomicMemberReportStateDao()\n self.appointment_event_dao = GenomicAppointmentEventDao()\n self.appointment_metrics_dao = GenomicAppointmentEventMetricsDao()\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def test_updating_members_blocklists(self):\n gen_set = self.data_generator.create_database_genomic_set(\n genomicSetName='.', genomicSetCriteria='.', genomicSetVersion=1)\n ids_should_be_updated = []\n for i in range(4):\n ids_should_be_updated.append(self.data_generator.\n create_database_genomic_set_member(genomicSetId=gen_set.id,\n biobankId='100153482', sampleId='21042005280', genomeType=\n 'test_investigation_one' if i & 2 != 0 else 'aou_wgs',\n genomicWorkflowState=GenomicWorkflowState.AW0, ai_an='Y' if\n i & 2 == 0 else 'N').id)\n for i in range(2):\n self.data_generator.create_database_genomic_set_member(genomicSetId\n =gen_set.id, biobankId='100153482', sampleId='21042005280',\n genomeType='aou_array', genomicWorkflowState=\n GenomicWorkflowState.AW0, ai_an='N')\n with GenomicJobController(GenomicJob.UPDATE_MEMBERS_BLOCKLISTS\n ) as controller:\n controller.update_members_blocklists()\n created_members = self.member_dao.get_all()\n blocklisted = list(filter(lambda x: x.blockResults == 1 or x.\n blockResearch == 1, created_members))\n self.assertTrue(ids_should_be_updated.sort() == [obj.id for obj in\n blocklisted].sort())\n self.assertTrue(all(obj.blockResearch == 1 and obj.\n blockResearchReason is not None and obj.blockResearchReason ==\n 'aian' for obj in created_members if obj.ai_an == 'Y' and obj.\n genomicWorkflowState == GenomicWorkflowState.AW0))\n self.assertTrue(all(obj.blockResults == 0 and obj.\n blockResultsReason is None for obj in created_members if obj.\n ai_an == 'Y' and obj.genomicWorkflowState ==\n GenomicWorkflowState.AW0))\n self.assertTrue(all(obj.blockResearch == 1 and obj.\n blockResearchReason is not None and obj.blockResearchReason ==\n 'test_sample_swap' for obj in created_members if obj.genomeType ==\n 'test_investigation_one' and obj.genomicWorkflowState ==\n GenomicWorkflowState.AW0))\n self.assertTrue(all(obj.blockResults == 1 and obj.\n blockResultsReason is not None and obj.blockResultsReason ==\n 'test_sample_swap' for obj in created_members if obj.genomeType ==\n 'test_investigation_one' and obj.genomicWorkflowState ==\n GenomicWorkflowState.AW0))\n self.assertTrue(all(obj.blockResearch == 0 and obj.\n blockResearchReason is None for obj in created_members if obj.\n genomeType == 'aou_array' and obj.genomicWorkflowState ==\n GenomicWorkflowState.AW0))\n self.assertTrue(all(obj.blockResults == 0 and obj.\n blockResultsReason is None for obj in created_members if obj.\n genomeType == 'aou_array' and obj.genomicWorkflowState ==\n GenomicWorkflowState.AW0))\n with self.member_dao.session() as session:\n session.query(GenomicSetMember).delete()\n run_result = self.job_run_dao.get(1)\n self.assertEqual(run_result.runStatus, GenomicSubProcessStatus.\n COMPLETED)\n self.assertEqual(run_result.runResult, GenomicSubProcessResult.SUCCESS)\n for i in range(4):\n self.data_generator.create_database_genomic_set_member(genomicSetId\n =gen_set.id, biobankId='100153482', sampleId='21042005280',\n genomeType='test_investigation_one' if i & 2 != 0 else\n 'aou_wgs', genomicWorkflowState=GenomicWorkflowState.AW1,\n ai_an='Y' if i & 2 == 0 else 'N')\n with GenomicJobController(GenomicJob.UPDATE_MEMBERS_BLOCKLISTS\n ) as controller:\n controller.update_members_blocklists()\n modified_members = self.member_dao.get_all()\n self.assertTrue(all(obj.blockResearch == 1 and obj.\n blockResearchReason is not None and obj.blockResearchReason ==\n 'aian' for obj in modified_members if obj.ai_an == 'Y' and obj.\n genomicWorkflowState == GenomicWorkflowState.AW1))\n self.assertTrue(all(obj.blockResults == 0 and obj.\n blockResultsReason is None for obj in modified_members if obj.\n ai_an == 'Y' and obj.genomicWorkflowState ==\n GenomicWorkflowState.AW1))\n self.assertTrue(all(obj.blockResearch == 1 and obj.\n blockResearchReason is not None and obj.blockResearchReason ==\n 'test_sample_swap' for obj in modified_members if obj.\n genomeType == 'test_investigation_one' and obj.\n genomicWorkflowState == GenomicWorkflowState.AW1))\n self.assertTrue(all(obj.blockResults == 1 and obj.\n blockResultsReason is not None and obj.blockResultsReason ==\n 'test_sample_swap' for obj in modified_members if obj.\n genomeType == 'test_investigation_one' and obj.\n genomicWorkflowState == GenomicWorkflowState.AW1))\n run_result = self.job_run_dao.get(2)\n self.assertEqual(run_result.runStatus, GenomicSubProcessStatus.\n COMPLETED)\n self.assertEqual(run_result.runResult, GenomicSubProcessResult.SUCCESS)\n <mask token>\n\n @mock.patch(\n 'rdr_service.genomic.genomic_job_controller.GenomicJobController.execute_cloud_task'\n )\n def test_reconcile_pdr_data(self, mock_cloud_task):\n with GenomicJobController(GenomicJob.RECONCILE_PDR_DATA) as controller:\n controller.reconcile_pdr_data()\n cloud_task_endpoint = 'rebuild_genomic_table_records_task'\n first_run = self.job_run_dao.get_all()\n self.assertEqual(mock_cloud_task.call_count, 1)\n call_args = mock_cloud_task.call_args_list\n self.assertEqual(len(call_args), 1)\n self.assertEqual(call_args[0].args[0]['table'], self.job_run_dao.\n model_type.__tablename__)\n self.assertTrue(type(call_args[0].args[0]['ids']) is list)\n self.assertEqual(call_args[0].args[0]['ids'], [obj.id for obj in\n first_run])\n self.assertEqual(call_args[0].args[1], cloud_task_endpoint)\n participant = self.data_generator.create_database_participant()\n gen_set = self.data_generator.create_database_genomic_set(\n genomicSetName='.', genomicSetCriteria='.', genomicSetVersion=1)\n plus_ten = clock.CLOCK.now() + datetime.timedelta(minutes=10)\n plus_ten = plus_ten.replace(microsecond=0)\n with FakeClock(plus_ten):\n for i in range(2):\n gen_member = (self.data_generator.\n create_database_genomic_set_member(genomicSetId=gen_set\n .id, biobankId='100153482', sampleId='21042005280',\n genomeType='aou_wgs', genomicWorkflowState=\n GenomicWorkflowState.AW1))\n gen_processed_file = (self.data_generator.\n create_database_genomic_file_processed(runId=first_run[\n 0].id, startTime=clock.CLOCK.now(), filePath=\n f'test_file_path_{i}', bucketName='test_bucket',\n fileName='test_file_name'))\n self.data_generator.create_database_genomic_gc_validation_metrics(\n genomicSetMemberId=gen_member.id,\n genomicFileProcessedId=gen_processed_file.id)\n manifest = (self.data_generator.\n create_database_genomic_manifest_file(manifestTypeId=2,\n filePath=f'test_file_path_{i}'))\n self.data_generator.create_database_genomic_manifest_feedback(\n inputManifestFileId=manifest.id, feedbackRecordCount=2)\n self.data_generator.create_database_genomic_user_event_metrics(\n participant_id=participant.participantId, event_name=\n 'test_event', run_id=1)\n self.data_generator.create_database_genomic_informing_loop(\n message_record_id=1, event_type=\n 'informing_loop_decision', module_type='gem',\n participant_id=participant.participantId,\n decision_value='maybe_later', event_authored_time=clock\n .CLOCK.now())\n self.data_generator.create_database_genomic_cvl_past_due(\n cvl_site_id='co', email_notification_sent=0, sample_id=\n 'sample_test', results_type='hdr',\n genomic_set_member_id=gen_member.id)\n self.data_generator.create_database_genomic_appointment(\n message_record_id=i, appointment_id=i, event_type=\n 'appointment_scheduled', module_type='hdr',\n participant_id=participant.participantId,\n event_authored_time=clock.CLOCK.now(), source='Color',\n appointment_timestamp=format_datetime(clock.CLOCK.now()\n ), appointment_timezone='America/Los_Angeles', location\n ='123 address st', contact_number='17348675309',\n language='en')\n self.data_generator.create_database_genomic_member_report_state(\n genomic_set_member_id=gen_member.id, participant_id=\n participant.participantId, module='gem',\n genomic_report_state=GenomicReportState.GEM_RPT_READY,\n event_authored_time=clock.CLOCK.now())\n self.data_generator.create_genomic_result_viewed(participant_id\n =participant.participantId, event_type='result_viewed',\n event_authored_time=clock.CLOCK.now(), module_type=\n 'gem', sample_id=gen_member.sampleId)\n with GenomicJobController(GenomicJob.RECONCILE_PDR_DATA) as controller:\n controller.reconcile_pdr_data()\n affected_tables = ['genomic_set', 'genomic_set_member',\n 'genomic_job_run', 'genomic_file_processed',\n 'genomic_gc_validation_metrics', 'genomic_manifest_file',\n 'genomic_manifest_feedback', 'genomic_informing_loop',\n 'genomic_cvl_results_past_due', 'user_event_metrics',\n 'genomic_member_report_state', 'genomic_result_viewed',\n 'genomic_appointment_event']\n num_calls = len(affected_tables) + 1\n self.assertEqual(mock_cloud_task.call_count, num_calls)\n call_args = mock_cloud_task.call_args_list\n self.assertEqual(len(call_args), num_calls)\n mock_tables = set([obj[0][0]['table'] for obj in call_args])\n mock_endpoint = [obj[0][1] for obj in call_args]\n self.assertTrue([mock_tables].sort() == affected_tables.sort())\n self.assertTrue(all(obj for obj in mock_endpoint if obj ==\n cloud_task_endpoint))\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def test_reconcile_message_broker_results_ready(self):\n self.data_generator.create_database_genomic_set(genomicSetName=\n 'test', genomicSetCriteria='.', genomicSetVersion=1)\n self.data_generator.create_database_genomic_job_run(jobId=\n GenomicJob.METRICS_FILE_INGEST, startTime=clock.CLOCK.now())\n for pid in range(7):\n self.data_generator.create_database_participant(participantId=1 +\n pid, biobankId=1 + pid)\n for i in range(1, 6):\n self.data_generator.create_database_genomic_set_member(\n participantId=i, genomicSetId=1, biobankId=i,\n collectionTubeId=100 + i, sampleId=10 + i, genomeType='aou_wgs'\n )\n if i < 4:\n self.data_generator.create_database_genomic_user_event_metrics(\n participant_id=i, created_at=datetime.datetime(2022, 10,\n 6, 0), event_name='pgx.result_ready', run_id=1)\n if i == 4:\n self.data_generator.create_database_genomic_user_event_metrics(\n participant_id=i, created_at=datetime.datetime(2022, 10,\n 6, 0), event_name='hdr.result_ready.informative', run_id=1)\n if i == 5:\n self.data_generator.create_database_genomic_user_event_metrics(\n participant_id=i, created_at=datetime.datetime(2022, 10,\n 6, 0), event_name='hdr.result_ready.uninformative',\n run_id=1)\n genomic_cvl_pipeline.reconcile_message_broker_results_ready()\n report_state_dao = GenomicMemberReportStateDao()\n states = report_state_dao.get_all()\n self.assertEqual(5, len(states))\n pgx_records = [rec for rec in states if rec.module == 'pgx_v1']\n hdr_record_uninf = [rec for rec in states if rec.\n genomic_report_state == GenomicReportState.HDR_RPT_UNINFORMATIVE][0\n ]\n hdr_record_pos = [rec for rec in states if rec.genomic_report_state ==\n GenomicReportState.HDR_RPT_POSITIVE][0]\n for pgx_record in pgx_records:\n self.assertEqual(GenomicReportState.PGX_RPT_READY, pgx_record.\n genomic_report_state)\n self.assertEqual('PGX_RPT_READY', pgx_record.\n genomic_report_state_str)\n self.assertEqual(int(pgx_record.sample_id), pgx_record.\n participant_id + 10)\n self.assertEqual('result_ready', pgx_record.event_type)\n self.assertEqual(datetime.datetime(2022, 10, 6, 0), pgx_record.\n event_authored_time)\n self.assertIsNotNone(pgx_record.created_from_metric_id)\n self.assertEqual('HDR_RPT_UNINFORMATIVE', hdr_record_uninf.\n genomic_report_state_str)\n self.assertEqual(int(hdr_record_uninf.sample_id), hdr_record_uninf.\n participant_id + 10)\n self.assertEqual('result_ready', hdr_record_uninf.event_type)\n self.assertEqual(datetime.datetime(2022, 10, 6, 0),\n hdr_record_uninf.event_authored_time)\n self.assertIsNotNone(hdr_record_uninf.created_from_metric_id)\n self.assertEqual('HDR_RPT_POSITIVE', hdr_record_pos.\n genomic_report_state_str)\n self.assertEqual(int(hdr_record_pos.sample_id), hdr_record_pos.\n participant_id + 10)\n self.assertEqual('result_ready', hdr_record_pos.event_type)\n self.assertEqual(datetime.datetime(2022, 10, 6, 0), hdr_record_pos.\n event_authored_time)\n self.assertIsNotNone(hdr_record_pos.created_from_metric_id)\n <mask token>\n <mask token>\n\n def test_reconcile_appointments_with_metrics(self):\n fake_date = parser.parse('2020-05-29T08:00:01-05:00')\n for num in range(4):\n summary = self.data_generator.create_database_participant_summary()\n missing_json = {'event': 'appointment_updated',\n 'eventAuthoredTime': '2022-09-16T17:18:38Z',\n 'participantId': f'P{summary.participantId}', 'messageBody':\n {'module_type': 'hdr', 'appointment_timestamp':\n '2022-09-19T19:30:00+00:00', 'id': 55,\n 'appointment_timezone': 'America/Los_Angeles', 'location':\n 'CA', 'contact_number': '18043704252', 'language': 'en',\n 'source': 'Color'}}\n if num % 2 == 0:\n self.data_generator.create_database_genomic_appointment(\n message_record_id=num, appointment_id=num, event_type=\n 'appointment_scheduled', module_type='hdr',\n participant_id=summary.participantId,\n event_authored_time=fake_date, source='Color',\n appointment_timestamp=format_datetime(clock.CLOCK.now()\n ), appointment_timezone='America/Los_Angeles', location\n ='123 address st', contact_number='17348675309',\n language='en')\n self.data_generator.create_database_genomic_appointment_metric(\n participant_id=summary.participantId, appointment_event=\n json.dumps(missing_json, indent=4) if num % 2 != 0 else\n 'foo', file_path='test_file_path', module_type='hdr',\n event_authored_time=fake_date, event_type=\n 'appointment_updated' if num % 2 != 0 else\n 'appointment_scheduled')\n current_events = self.appointment_event_dao.get_all()\n self.assertEqual(len(current_events), 2)\n current_metrics = self.appointment_metrics_dao.get_all()\n self.assertEqual(len(current_metrics), 4)\n self.assertTrue(all(obj.reconcile_job_run_id is None for obj in\n current_metrics))\n with GenomicJobController(GenomicJob.APPOINTMENT_METRICS_RECONCILE\n ) as controller:\n controller.reconcile_appointment_events_from_metrics()\n job_run = self.job_run_dao.get_all()\n self.assertEqual(len(job_run), 1)\n self.assertTrue(job_run[0].jobId == GenomicJob.\n APPOINTMENT_METRICS_RECONCILE)\n current_events = self.appointment_event_dao.get_all()\n self.assertEqual(len(current_events), 4)\n scheduled = list(filter(lambda x: x.event_type ==\n 'appointment_scheduled', current_events))\n self.assertEqual(len(scheduled), 2)\n self.assertTrue(all(obj.created_from_metric_id is None for obj in\n scheduled))\n updated = list(filter(lambda x: x.event_type ==\n 'appointment_updated', current_events))\n self.assertEqual(len(updated), 2)\n self.assertTrue(all(obj.created_from_metric_id is not None for obj in\n updated))\n current_metrics = self.appointment_metrics_dao.get_all()\n self.assertEqual(len(current_metrics), 4)\n self.assertTrue(all(obj.reconcile_job_run_id is not None for obj in\n current_metrics))\n self.assertTrue(all(obj.reconcile_job_run_id == job_run[0].id for\n obj in current_metrics))\n self.clear_table_after_test('genomic_appointment_event_metrics')\n\n @mock.patch('rdr_service.services.email_service.EmailService.send_email')\n def test_check_appointments_gror_changed(self, email_mock):\n fake_date = parser.parse('2022-09-01T13:43:23')\n notified_dao = GenomicAppointmentEventNotifiedDao()\n config.override_setting(config.GENOMIC_COLOR_PM_EMAIL, [\n 'test@example.com'])\n num_participants = 4\n for num in range(num_participants):\n gror = num if num > 1 else 1\n summary = self.data_generator.create_database_participant_summary(\n consentForStudyEnrollment=1, consentForGenomicsROR=gror)\n self.data_generator.create_database_genomic_appointment(\n message_record_id=num, appointment_id=num, event_type=\n 'appointment_scheduled', module_type='hdr', participant_id=\n summary.participantId, event_authored_time=fake_date,\n source='Color', appointment_timestamp=format_datetime(clock\n .CLOCK.now()), appointment_timezone='America/Los_Angeles',\n location='123 address st', contact_number='17348675309',\n language='en')\n changed_ppts = (self.appointment_event_dao.\n get_appointments_gror_changed())\n self.assertEqual(2, len(changed_ppts))\n with GenomicJobController(GenomicJob.CHECK_APPOINTMENT_GROR_CHANGED\n ) as controller:\n controller.check_appointments_gror_changed()\n self.assertEqual(email_mock.call_count, 1)\n notified_appointments = notified_dao.get_all()\n self.assertEqual(2, len(notified_appointments))\n summary = self.data_generator.create_database_participant_summary(\n consentForStudyEnrollment=1, consentForGenomicsROR=2)\n self.data_generator.create_database_genomic_appointment(\n message_record_id=5, appointment_id=5, event_type=\n 'appointment_scheduled', module_type='hdr', participant_id=\n summary.participantId, event_authored_time=fake_date, source=\n 'Color', appointment_timestamp=format_datetime(clock.CLOCK.now(\n )), appointment_timezone='America/Los_Angeles', location=\n '123 address st', contact_number='17348675309', language='en')\n changed_ppts = (self.appointment_event_dao.\n get_appointments_gror_changed())\n self.assertEqual(1, len(changed_ppts))\n\n @mock.patch('rdr_service.services.email_service.EmailService.send_email')\n def test_check_gcr_14day_escalation(self, email_mock):\n fake_date = parser.parse('2022-09-01T13:43:23')\n fake_date2 = parser.parse('2022-09-02T14:14:00')\n fake_date3 = parser.parse('2022-09-03T15:15:00')\n config.override_setting(config.GENOMIC_GCR_ESCALATION_EMAILS, [\n 'test@example.com'])\n self.data_generator.create_database_genomic_set(genomicSetName=\n 'test', genomicSetCriteria='.', genomicSetVersion=1)\n pids = []\n for _ in range(6):\n summary = self.data_generator.create_database_participant_summary(\n consentForStudyEnrollment=1, consentForGenomicsROR=1)\n set_member = (self.data_generator.\n create_database_genomic_set_member(participantId=summary.\n participantId, genomicSetId=1, biobankId=1001,\n collectionTubeId=100, sampleId=10, genomeType='aou_wgs'))\n self.data_generator.create_database_genomic_member_report_state(\n participant_id=summary.participantId, genomic_report_state=\n GenomicReportState.HDR_RPT_POSITIVE, genomic_set_member_id=\n set_member.id, module='hdr_v1', event_authored_time=fake_date)\n pids.append(summary.participantId)\n self.data_generator.create_database_genomic_appointment(\n message_record_id=101, appointment_id=102, event_type=\n 'appointment_scheduled', module_type='hdr', participant_id=pids\n [0], event_authored_time=fake_date, source='Color',\n appointment_timestamp=format_datetime(clock.CLOCK.now()),\n appointment_timezone='America/Los_Angeles', location=\n '123 address st', contact_number='17348675309', language='en')\n self.data_generator.create_database_genomic_appointment(\n message_record_id=102, appointment_id=103, event_type=\n 'appointment_completed', module_type='hdr', participant_id=pids\n [1], event_authored_time=fake_date, source='Color',\n appointment_timestamp=fake_date, appointment_timezone=\n 'America/Los_Angeles', location='123 address st',\n contact_number='17348675309', language='en')\n self.data_generator.create_database_genomic_appointment(\n message_record_id=103, appointment_id=104, event_type=\n 'appointment_scheduled', module_type='hdr', participant_id=pids\n [2], event_authored_time=fake_date2, source='Color',\n appointment_timestamp=format_datetime(clock.CLOCK.now()),\n appointment_timezone='America/Los_Angeles', location=\n '123 address st', contact_number='17348675309', language='en')\n self.data_generator.create_database_genomic_appointment(\n message_record_id=104, appointment_id=104, event_type=\n 'appointment_cancelled', module_type='hdr', participant_id=pids\n [2], event_authored_time=fake_date3, source='Color',\n appointment_timestamp=format_datetime(clock.CLOCK.now()),\n appointment_timezone='America/Los_Angeles', location=\n '123 address st', contact_number='17348675309', language='en')\n notified_dao = GenomicDefaultBaseDao(model_type=\n GenomicGCROutreachEscalationNotified)\n notified_dao.insert_bulk([{'participant_id': pids[4], 'created':\n clock.CLOCK.now(), 'modified': clock.CLOCK.now(),\n 'message_sent': True}, {'participant_id': pids[5], 'created':\n clock.CLOCK.now(), 'modified': clock.CLOCK.now(),\n 'message_sent': False}])\n with clock.FakeClock(parser.parse('2022-11-1T05:15:00')):\n escalated_participants = (self.report_state_dao.\n get_hdr_result_positive_no_appointment(num_days=14))\n results = [pid[0] for pid in escalated_participants]\n self.assertIn(pids[2], results)\n self.assertIn(pids[3], results)\n self.assertIn(pids[5], results)\n self.assertNotIn(pids[0], results)\n self.assertNotIn(pids[1], results)\n self.assertNotIn(pids[4], results)\n with GenomicJobController(GenomicJob.CHECK_GCR_OUTREACH_ESCALATION\n ) as controller:\n controller.check_gcr_escalation(controller.job_id)\n self.assertEqual(email_mock.call_count, 3)\n self.assertEqual(email_mock.call_args.args[0].subject,\n 'GCR Outreach 14 Day Escalation')\n self.clear_table_after_test('genomic_gcr_outreach_escalation_notified')\n <mask token>\n <mask token>\n\n @mock.patch(\n 'rdr_service.genomic.genomic_job_controller.GenomicJobController.execute_cloud_task'\n )\n def test_execute_auto_generation_from_last_run(self, cloud_task_mock):\n with GenomicJobController(GenomicJob.PR_PR_WORKFLOW) as controller:\n controller.job_result = GenomicSubProcessResult.ERROR\n controller._end_run()\n controller.execute_auto_generation_from_cloud_task()\n last_job_run_status = self.job_run_dao.get_last_run_status_for_job_id(\n job_id=GenomicJob.PR_PR_WORKFLOW)\n self.assertTrue(last_job_run_status is not None)\n self.assertTrue(last_job_run_status[0] == GenomicSubProcessResult.ERROR\n )\n self.assertEqual(cloud_task_mock.called, False)\n self.assertEqual(cloud_task_mock.call_count, 0)\n with GenomicJobController(GenomicJob.PR_PR_WORKFLOW) as controller:\n controller.job_result = GenomicSubProcessResult.SUCCESS\n controller._end_run()\n controller.execute_auto_generation_from_cloud_task()\n last_job_run_status = self.job_run_dao.get_last_run_status_for_job_id(\n job_id=GenomicJob.PR_PR_WORKFLOW)\n self.assertTrue(last_job_run_status is not None)\n self.assertTrue(last_job_run_status[0] == GenomicSubProcessResult.\n SUCCESS)\n self.assertEqual(cloud_task_mock.called, True)\n self.assertTrue(cloud_task_mock.call_args[1].get('payload').get(\n 'manifest_type') == 'p0')\n self.assertTrue(cloud_task_mock.call_args[1].get('task_queue') ==\n 'genomic-generate-manifest')\n all_job_runs = self.job_run_dao.get_all()\n self.assertEqual(len(all_job_runs), 2)\n self.assertTrue(all(obj.runResult in [GenomicSubProcessResult.\n SUCCESS, GenomicSubProcessResult.ERROR] for obj in all_job_runs))\n self.assertTrue(all(obj.jobId == GenomicJob.PR_PR_WORKFLOW for obj in\n all_job_runs))\n",
"step-2": "<mask token>\n\n\nclass GenomicJobControllerTest(BaseTestCase):\n\n def setUp(self):\n super(GenomicJobControllerTest, self).setUp()\n self.data_file_dao = GenomicGcDataFileDao()\n self.event_data_dao = MessageBrokenEventDataDao()\n self.incident_dao = GenomicIncidentDao()\n self.member_dao = GenomicSetMemberDao()\n self.metrics_dao = GenomicGCValidationMetricsDao()\n self.user_event_metrics_dao = UserEventMetricsDao()\n self.job_run_dao = GenomicJobRunDao()\n self.report_state_dao = GenomicMemberReportStateDao()\n self.appointment_event_dao = GenomicAppointmentEventDao()\n self.appointment_metrics_dao = GenomicAppointmentEventMetricsDao()\n\n def test_incident_with_long_message(self):\n \"\"\"Make sure the length of incident messages doesn't cause issues when recording them\"\"\"\n incident_message = '1' * (GenomicIncident.message.type.length + 20)\n mock_slack_handler = mock.MagicMock()\n job_controller = GenomicJobController(job_id=1)\n job_controller.genomic_alert_slack = mock_slack_handler\n job_controller.create_incident(message=incident_message, slack=True)\n incident: GenomicIncident = self.session.query(GenomicIncident).one()\n self.assertTrue(incident_message.startswith(incident.message))\n mock_slack_handler.send_message_to_webhook.assert_called_with(\n message_data={'text': incident_message})\n <mask token>\n\n def test_gvcf_files_ingestion_create_incident(self):\n bucket_name = 'test_bucket'\n file_path = (\n 'Wgs_sample_raw_data/SS_VCF_research/BCM_A100153482_21042005280_SIA0013441__1.hard-filtered.gvcf.gz'\n )\n gen_set = self.data_generator.create_database_genomic_set(\n genomicSetName='.', genomicSetCriteria='.', genomicSetVersion=1)\n gen_member = self.data_generator.create_database_genomic_set_member(\n genomicSetId=gen_set.id, biobankId='111111111', sampleId=\n '222222222222', genomeType='aou_wgs', genomicWorkflowState=\n GenomicWorkflowState.AW1)\n gen_job_run = self.data_generator.create_database_genomic_job_run(jobId\n =GenomicJob.AW1_MANIFEST, startTime=clock.CLOCK.now(),\n runResult=GenomicSubProcessResult.SUCCESS)\n gen_processed_file = (self.data_generator.\n create_database_genomic_file_processed(runId=gen_job_run.id,\n startTime=clock.CLOCK.now(), filePath='/test_file_path',\n bucketName=bucket_name, fileName='test_file_name'))\n self.data_generator.create_database_genomic_gc_validation_metrics(\n genomicSetMemberId=gen_member.id, genomicFileProcessedId=\n gen_processed_file.id)\n with GenomicJobController(GenomicJob.INGEST_DATA_FILES) as controller:\n controller.ingest_data_files_into_gc_metrics(file_path, bucket_name\n )\n incident = self.incident_dao.get(1)\n self.assertIsNotNone(incident)\n self.assertEqual(incident.code, GenomicIncidentCode.\n UNABLE_TO_FIND_METRIC.name)\n self.assertEqual(incident.data_file_path, file_path)\n self.assertEqual(incident.message,\n 'INGEST_DATA_FILES: Cannot find genomics metric record for sample id: 21042005280'\n )\n <mask token>\n\n def test_updating_members_blocklists(self):\n gen_set = self.data_generator.create_database_genomic_set(\n genomicSetName='.', genomicSetCriteria='.', genomicSetVersion=1)\n ids_should_be_updated = []\n for i in range(4):\n ids_should_be_updated.append(self.data_generator.\n create_database_genomic_set_member(genomicSetId=gen_set.id,\n biobankId='100153482', sampleId='21042005280', genomeType=\n 'test_investigation_one' if i & 2 != 0 else 'aou_wgs',\n genomicWorkflowState=GenomicWorkflowState.AW0, ai_an='Y' if\n i & 2 == 0 else 'N').id)\n for i in range(2):\n self.data_generator.create_database_genomic_set_member(genomicSetId\n =gen_set.id, biobankId='100153482', sampleId='21042005280',\n genomeType='aou_array', genomicWorkflowState=\n GenomicWorkflowState.AW0, ai_an='N')\n with GenomicJobController(GenomicJob.UPDATE_MEMBERS_BLOCKLISTS\n ) as controller:\n controller.update_members_blocklists()\n created_members = self.member_dao.get_all()\n blocklisted = list(filter(lambda x: x.blockResults == 1 or x.\n blockResearch == 1, created_members))\n self.assertTrue(ids_should_be_updated.sort() == [obj.id for obj in\n blocklisted].sort())\n self.assertTrue(all(obj.blockResearch == 1 and obj.\n blockResearchReason is not None and obj.blockResearchReason ==\n 'aian' for obj in created_members if obj.ai_an == 'Y' and obj.\n genomicWorkflowState == GenomicWorkflowState.AW0))\n self.assertTrue(all(obj.blockResults == 0 and obj.\n blockResultsReason is None for obj in created_members if obj.\n ai_an == 'Y' and obj.genomicWorkflowState ==\n GenomicWorkflowState.AW0))\n self.assertTrue(all(obj.blockResearch == 1 and obj.\n blockResearchReason is not None and obj.blockResearchReason ==\n 'test_sample_swap' for obj in created_members if obj.genomeType ==\n 'test_investigation_one' and obj.genomicWorkflowState ==\n GenomicWorkflowState.AW0))\n self.assertTrue(all(obj.blockResults == 1 and obj.\n blockResultsReason is not None and obj.blockResultsReason ==\n 'test_sample_swap' for obj in created_members if obj.genomeType ==\n 'test_investigation_one' and obj.genomicWorkflowState ==\n GenomicWorkflowState.AW0))\n self.assertTrue(all(obj.blockResearch == 0 and obj.\n blockResearchReason is None for obj in created_members if obj.\n genomeType == 'aou_array' and obj.genomicWorkflowState ==\n GenomicWorkflowState.AW0))\n self.assertTrue(all(obj.blockResults == 0 and obj.\n blockResultsReason is None for obj in created_members if obj.\n genomeType == 'aou_array' and obj.genomicWorkflowState ==\n GenomicWorkflowState.AW0))\n with self.member_dao.session() as session:\n session.query(GenomicSetMember).delete()\n run_result = self.job_run_dao.get(1)\n self.assertEqual(run_result.runStatus, GenomicSubProcessStatus.\n COMPLETED)\n self.assertEqual(run_result.runResult, GenomicSubProcessResult.SUCCESS)\n for i in range(4):\n self.data_generator.create_database_genomic_set_member(genomicSetId\n =gen_set.id, biobankId='100153482', sampleId='21042005280',\n genomeType='test_investigation_one' if i & 2 != 0 else\n 'aou_wgs', genomicWorkflowState=GenomicWorkflowState.AW1,\n ai_an='Y' if i & 2 == 0 else 'N')\n with GenomicJobController(GenomicJob.UPDATE_MEMBERS_BLOCKLISTS\n ) as controller:\n controller.update_members_blocklists()\n modified_members = self.member_dao.get_all()\n self.assertTrue(all(obj.blockResearch == 1 and obj.\n blockResearchReason is not None and obj.blockResearchReason ==\n 'aian' for obj in modified_members if obj.ai_an == 'Y' and obj.\n genomicWorkflowState == GenomicWorkflowState.AW1))\n self.assertTrue(all(obj.blockResults == 0 and obj.\n blockResultsReason is None for obj in modified_members if obj.\n ai_an == 'Y' and obj.genomicWorkflowState ==\n GenomicWorkflowState.AW1))\n self.assertTrue(all(obj.blockResearch == 1 and obj.\n blockResearchReason is not None and obj.blockResearchReason ==\n 'test_sample_swap' for obj in modified_members if obj.\n genomeType == 'test_investigation_one' and obj.\n genomicWorkflowState == GenomicWorkflowState.AW1))\n self.assertTrue(all(obj.blockResults == 1 and obj.\n blockResultsReason is not None and obj.blockResultsReason ==\n 'test_sample_swap' for obj in modified_members if obj.\n genomeType == 'test_investigation_one' and obj.\n genomicWorkflowState == GenomicWorkflowState.AW1))\n run_result = self.job_run_dao.get(2)\n self.assertEqual(run_result.runStatus, GenomicSubProcessStatus.\n COMPLETED)\n self.assertEqual(run_result.runResult, GenomicSubProcessResult.SUCCESS)\n\n def test_ingest_user_metrics_file(self):\n test_file = 'Genomic-Metrics-File-User-Events-Test.csv'\n bucket_name = 'test_bucket'\n sub_folder = 'user_events'\n pids = []\n file_ingester = GenomicFileIngester()\n for _ in range(2):\n pid = self.data_generator.create_database_participant()\n pids.append(pid.participantId)\n test_metrics_file = create_ingestion_test_file(test_file,\n bucket_name, sub_folder)\n test_file_path = f'{bucket_name}/{sub_folder}/{test_metrics_file}'\n with open_cloud_file(test_file_path) as csv_file:\n metrics_to_ingest = file_ingester._read_data_to_ingest(csv_file)\n with GenomicJobController(GenomicJob.METRICS_FILE_INGEST\n ) as controller:\n controller.ingest_metrics_file(metric_type='user_events',\n file_path=test_file_path)\n job_run_id = controller.job_run.id\n metrics = self.user_event_metrics_dao.get_all()\n for pid in pids:\n file_metrics = list(filter(lambda x: int(x['participant_id'].\n split('P')[-1]) == pid, metrics_to_ingest['rows']))\n participant_ingested_metrics = list(filter(lambda x: x.\n participant_id == pid, metrics))\n self.assertEqual(len(file_metrics), len(\n participant_ingested_metrics))\n self.assertTrue(all(obj.run_id == job_run_id for obj in\n participant_ingested_metrics))\n\n @mock.patch(\n 'rdr_service.genomic.genomic_job_controller.GenomicJobController.execute_cloud_task'\n )\n def test_reconcile_pdr_data(self, mock_cloud_task):\n with GenomicJobController(GenomicJob.RECONCILE_PDR_DATA) as controller:\n controller.reconcile_pdr_data()\n cloud_task_endpoint = 'rebuild_genomic_table_records_task'\n first_run = self.job_run_dao.get_all()\n self.assertEqual(mock_cloud_task.call_count, 1)\n call_args = mock_cloud_task.call_args_list\n self.assertEqual(len(call_args), 1)\n self.assertEqual(call_args[0].args[0]['table'], self.job_run_dao.\n model_type.__tablename__)\n self.assertTrue(type(call_args[0].args[0]['ids']) is list)\n self.assertEqual(call_args[0].args[0]['ids'], [obj.id for obj in\n first_run])\n self.assertEqual(call_args[0].args[1], cloud_task_endpoint)\n participant = self.data_generator.create_database_participant()\n gen_set = self.data_generator.create_database_genomic_set(\n genomicSetName='.', genomicSetCriteria='.', genomicSetVersion=1)\n plus_ten = clock.CLOCK.now() + datetime.timedelta(minutes=10)\n plus_ten = plus_ten.replace(microsecond=0)\n with FakeClock(plus_ten):\n for i in range(2):\n gen_member = (self.data_generator.\n create_database_genomic_set_member(genomicSetId=gen_set\n .id, biobankId='100153482', sampleId='21042005280',\n genomeType='aou_wgs', genomicWorkflowState=\n GenomicWorkflowState.AW1))\n gen_processed_file = (self.data_generator.\n create_database_genomic_file_processed(runId=first_run[\n 0].id, startTime=clock.CLOCK.now(), filePath=\n f'test_file_path_{i}', bucketName='test_bucket',\n fileName='test_file_name'))\n self.data_generator.create_database_genomic_gc_validation_metrics(\n genomicSetMemberId=gen_member.id,\n genomicFileProcessedId=gen_processed_file.id)\n manifest = (self.data_generator.\n create_database_genomic_manifest_file(manifestTypeId=2,\n filePath=f'test_file_path_{i}'))\n self.data_generator.create_database_genomic_manifest_feedback(\n inputManifestFileId=manifest.id, feedbackRecordCount=2)\n self.data_generator.create_database_genomic_user_event_metrics(\n participant_id=participant.participantId, event_name=\n 'test_event', run_id=1)\n self.data_generator.create_database_genomic_informing_loop(\n message_record_id=1, event_type=\n 'informing_loop_decision', module_type='gem',\n participant_id=participant.participantId,\n decision_value='maybe_later', event_authored_time=clock\n .CLOCK.now())\n self.data_generator.create_database_genomic_cvl_past_due(\n cvl_site_id='co', email_notification_sent=0, sample_id=\n 'sample_test', results_type='hdr',\n genomic_set_member_id=gen_member.id)\n self.data_generator.create_database_genomic_appointment(\n message_record_id=i, appointment_id=i, event_type=\n 'appointment_scheduled', module_type='hdr',\n participant_id=participant.participantId,\n event_authored_time=clock.CLOCK.now(), source='Color',\n appointment_timestamp=format_datetime(clock.CLOCK.now()\n ), appointment_timezone='America/Los_Angeles', location\n ='123 address st', contact_number='17348675309',\n language='en')\n self.data_generator.create_database_genomic_member_report_state(\n genomic_set_member_id=gen_member.id, participant_id=\n participant.participantId, module='gem',\n genomic_report_state=GenomicReportState.GEM_RPT_READY,\n event_authored_time=clock.CLOCK.now())\n self.data_generator.create_genomic_result_viewed(participant_id\n =participant.participantId, event_type='result_viewed',\n event_authored_time=clock.CLOCK.now(), module_type=\n 'gem', sample_id=gen_member.sampleId)\n with GenomicJobController(GenomicJob.RECONCILE_PDR_DATA) as controller:\n controller.reconcile_pdr_data()\n affected_tables = ['genomic_set', 'genomic_set_member',\n 'genomic_job_run', 'genomic_file_processed',\n 'genomic_gc_validation_metrics', 'genomic_manifest_file',\n 'genomic_manifest_feedback', 'genomic_informing_loop',\n 'genomic_cvl_results_past_due', 'user_event_metrics',\n 'genomic_member_report_state', 'genomic_result_viewed',\n 'genomic_appointment_event']\n num_calls = len(affected_tables) + 1\n self.assertEqual(mock_cloud_task.call_count, num_calls)\n call_args = mock_cloud_task.call_args_list\n self.assertEqual(len(call_args), num_calls)\n mock_tables = set([obj[0][0]['table'] for obj in call_args])\n mock_endpoint = [obj[0][1] for obj in call_args]\n self.assertTrue([mock_tables].sort() == affected_tables.sort())\n self.assertTrue(all(obj for obj in mock_endpoint if obj ==\n cloud_task_endpoint))\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def test_reconcile_message_broker_results_ready(self):\n self.data_generator.create_database_genomic_set(genomicSetName=\n 'test', genomicSetCriteria='.', genomicSetVersion=1)\n self.data_generator.create_database_genomic_job_run(jobId=\n GenomicJob.METRICS_FILE_INGEST, startTime=clock.CLOCK.now())\n for pid in range(7):\n self.data_generator.create_database_participant(participantId=1 +\n pid, biobankId=1 + pid)\n for i in range(1, 6):\n self.data_generator.create_database_genomic_set_member(\n participantId=i, genomicSetId=1, biobankId=i,\n collectionTubeId=100 + i, sampleId=10 + i, genomeType='aou_wgs'\n )\n if i < 4:\n self.data_generator.create_database_genomic_user_event_metrics(\n participant_id=i, created_at=datetime.datetime(2022, 10,\n 6, 0), event_name='pgx.result_ready', run_id=1)\n if i == 4:\n self.data_generator.create_database_genomic_user_event_metrics(\n participant_id=i, created_at=datetime.datetime(2022, 10,\n 6, 0), event_name='hdr.result_ready.informative', run_id=1)\n if i == 5:\n self.data_generator.create_database_genomic_user_event_metrics(\n participant_id=i, created_at=datetime.datetime(2022, 10,\n 6, 0), event_name='hdr.result_ready.uninformative',\n run_id=1)\n genomic_cvl_pipeline.reconcile_message_broker_results_ready()\n report_state_dao = GenomicMemberReportStateDao()\n states = report_state_dao.get_all()\n self.assertEqual(5, len(states))\n pgx_records = [rec for rec in states if rec.module == 'pgx_v1']\n hdr_record_uninf = [rec for rec in states if rec.\n genomic_report_state == GenomicReportState.HDR_RPT_UNINFORMATIVE][0\n ]\n hdr_record_pos = [rec for rec in states if rec.genomic_report_state ==\n GenomicReportState.HDR_RPT_POSITIVE][0]\n for pgx_record in pgx_records:\n self.assertEqual(GenomicReportState.PGX_RPT_READY, pgx_record.\n genomic_report_state)\n self.assertEqual('PGX_RPT_READY', pgx_record.\n genomic_report_state_str)\n self.assertEqual(int(pgx_record.sample_id), pgx_record.\n participant_id + 10)\n self.assertEqual('result_ready', pgx_record.event_type)\n self.assertEqual(datetime.datetime(2022, 10, 6, 0), pgx_record.\n event_authored_time)\n self.assertIsNotNone(pgx_record.created_from_metric_id)\n self.assertEqual('HDR_RPT_UNINFORMATIVE', hdr_record_uninf.\n genomic_report_state_str)\n self.assertEqual(int(hdr_record_uninf.sample_id), hdr_record_uninf.\n participant_id + 10)\n self.assertEqual('result_ready', hdr_record_uninf.event_type)\n self.assertEqual(datetime.datetime(2022, 10, 6, 0),\n hdr_record_uninf.event_authored_time)\n self.assertIsNotNone(hdr_record_uninf.created_from_metric_id)\n self.assertEqual('HDR_RPT_POSITIVE', hdr_record_pos.\n genomic_report_state_str)\n self.assertEqual(int(hdr_record_pos.sample_id), hdr_record_pos.\n participant_id + 10)\n self.assertEqual('result_ready', hdr_record_pos.event_type)\n self.assertEqual(datetime.datetime(2022, 10, 6, 0), hdr_record_pos.\n event_authored_time)\n self.assertIsNotNone(hdr_record_pos.created_from_metric_id)\n <mask token>\n\n def test_ingest_appointment_metrics_file(self):\n test_file = 'Genomic-Metrics-File-Appointment-Events-Test.json'\n bucket_name = 'test_bucket'\n sub_folder = 'appointment_events'\n pids = []\n for _ in range(4):\n summary = self.data_generator.create_database_participant_summary()\n pids.append(summary.participantId)\n test_file_path = f'{bucket_name}/{sub_folder}/{test_file}'\n appointment_data = test_data.load_test_data_json(\n 'Genomic-Metrics-File-Appointment-Events-Test.json')\n appointment_data_str = json.dumps(appointment_data, indent=4)\n with open_cloud_file(test_file_path, mode='wb') as cloud_file:\n cloud_file.write(appointment_data_str.encode('utf-8'))\n with GenomicJobController(GenomicJob.APPOINTMENT_METRICS_FILE_INGEST\n ) as controller:\n controller.ingest_appointment_metrics_file(file_path=test_file_path\n )\n all_metrics = self.appointment_metrics_dao.get_all()\n self.assertEqual(len(all_metrics), 5)\n self.assertTrue(all(obj.participant_id in pids for obj in all_metrics))\n self.assertTrue(all(obj.file_path == test_file_path for obj in\n all_metrics))\n self.assertTrue(all(obj.appointment_event is not None for obj in\n all_metrics))\n self.assertTrue(all(obj.created is not None for obj in all_metrics))\n self.assertTrue(all(obj.modified is not None for obj in all_metrics))\n self.assertTrue(all(obj.module_type is not None for obj in all_metrics)\n )\n self.assertTrue(all(obj.event_authored_time is not None for obj in\n all_metrics))\n self.assertTrue(all(obj.event_type is not None for obj in all_metrics))\n current_job_runs = self.job_run_dao.get_all()\n self.assertEqual(len(current_job_runs), 1)\n current_job_run = current_job_runs[0]\n self.assertTrue(current_job_run.jobId == GenomicJob.\n APPOINTMENT_METRICS_FILE_INGEST)\n self.assertTrue(current_job_run.runResult ==\n GenomicSubProcessResult.SUCCESS)\n self.clear_table_after_test('genomic_appointment_event_metrics')\n\n def test_reconcile_appointments_with_metrics(self):\n fake_date = parser.parse('2020-05-29T08:00:01-05:00')\n for num in range(4):\n summary = self.data_generator.create_database_participant_summary()\n missing_json = {'event': 'appointment_updated',\n 'eventAuthoredTime': '2022-09-16T17:18:38Z',\n 'participantId': f'P{summary.participantId}', 'messageBody':\n {'module_type': 'hdr', 'appointment_timestamp':\n '2022-09-19T19:30:00+00:00', 'id': 55,\n 'appointment_timezone': 'America/Los_Angeles', 'location':\n 'CA', 'contact_number': '18043704252', 'language': 'en',\n 'source': 'Color'}}\n if num % 2 == 0:\n self.data_generator.create_database_genomic_appointment(\n message_record_id=num, appointment_id=num, event_type=\n 'appointment_scheduled', module_type='hdr',\n participant_id=summary.participantId,\n event_authored_time=fake_date, source='Color',\n appointment_timestamp=format_datetime(clock.CLOCK.now()\n ), appointment_timezone='America/Los_Angeles', location\n ='123 address st', contact_number='17348675309',\n language='en')\n self.data_generator.create_database_genomic_appointment_metric(\n participant_id=summary.participantId, appointment_event=\n json.dumps(missing_json, indent=4) if num % 2 != 0 else\n 'foo', file_path='test_file_path', module_type='hdr',\n event_authored_time=fake_date, event_type=\n 'appointment_updated' if num % 2 != 0 else\n 'appointment_scheduled')\n current_events = self.appointment_event_dao.get_all()\n self.assertEqual(len(current_events), 2)\n current_metrics = self.appointment_metrics_dao.get_all()\n self.assertEqual(len(current_metrics), 4)\n self.assertTrue(all(obj.reconcile_job_run_id is None for obj in\n current_metrics))\n with GenomicJobController(GenomicJob.APPOINTMENT_METRICS_RECONCILE\n ) as controller:\n controller.reconcile_appointment_events_from_metrics()\n job_run = self.job_run_dao.get_all()\n self.assertEqual(len(job_run), 1)\n self.assertTrue(job_run[0].jobId == GenomicJob.\n APPOINTMENT_METRICS_RECONCILE)\n current_events = self.appointment_event_dao.get_all()\n self.assertEqual(len(current_events), 4)\n scheduled = list(filter(lambda x: x.event_type ==\n 'appointment_scheduled', current_events))\n self.assertEqual(len(scheduled), 2)\n self.assertTrue(all(obj.created_from_metric_id is None for obj in\n scheduled))\n updated = list(filter(lambda x: x.event_type ==\n 'appointment_updated', current_events))\n self.assertEqual(len(updated), 2)\n self.assertTrue(all(obj.created_from_metric_id is not None for obj in\n updated))\n current_metrics = self.appointment_metrics_dao.get_all()\n self.assertEqual(len(current_metrics), 4)\n self.assertTrue(all(obj.reconcile_job_run_id is not None for obj in\n current_metrics))\n self.assertTrue(all(obj.reconcile_job_run_id == job_run[0].id for\n obj in current_metrics))\n self.clear_table_after_test('genomic_appointment_event_metrics')\n\n @mock.patch('rdr_service.services.email_service.EmailService.send_email')\n def test_check_appointments_gror_changed(self, email_mock):\n fake_date = parser.parse('2022-09-01T13:43:23')\n notified_dao = GenomicAppointmentEventNotifiedDao()\n config.override_setting(config.GENOMIC_COLOR_PM_EMAIL, [\n 'test@example.com'])\n num_participants = 4\n for num in range(num_participants):\n gror = num if num > 1 else 1\n summary = self.data_generator.create_database_participant_summary(\n consentForStudyEnrollment=1, consentForGenomicsROR=gror)\n self.data_generator.create_database_genomic_appointment(\n message_record_id=num, appointment_id=num, event_type=\n 'appointment_scheduled', module_type='hdr', participant_id=\n summary.participantId, event_authored_time=fake_date,\n source='Color', appointment_timestamp=format_datetime(clock\n .CLOCK.now()), appointment_timezone='America/Los_Angeles',\n location='123 address st', contact_number='17348675309',\n language='en')\n changed_ppts = (self.appointment_event_dao.\n get_appointments_gror_changed())\n self.assertEqual(2, len(changed_ppts))\n with GenomicJobController(GenomicJob.CHECK_APPOINTMENT_GROR_CHANGED\n ) as controller:\n controller.check_appointments_gror_changed()\n self.assertEqual(email_mock.call_count, 1)\n notified_appointments = notified_dao.get_all()\n self.assertEqual(2, len(notified_appointments))\n summary = self.data_generator.create_database_participant_summary(\n consentForStudyEnrollment=1, consentForGenomicsROR=2)\n self.data_generator.create_database_genomic_appointment(\n message_record_id=5, appointment_id=5, event_type=\n 'appointment_scheduled', module_type='hdr', participant_id=\n summary.participantId, event_authored_time=fake_date, source=\n 'Color', appointment_timestamp=format_datetime(clock.CLOCK.now(\n )), appointment_timezone='America/Los_Angeles', location=\n '123 address st', contact_number='17348675309', language='en')\n changed_ppts = (self.appointment_event_dao.\n get_appointments_gror_changed())\n self.assertEqual(1, len(changed_ppts))\n\n @mock.patch('rdr_service.services.email_service.EmailService.send_email')\n def test_check_gcr_14day_escalation(self, email_mock):\n fake_date = parser.parse('2022-09-01T13:43:23')\n fake_date2 = parser.parse('2022-09-02T14:14:00')\n fake_date3 = parser.parse('2022-09-03T15:15:00')\n config.override_setting(config.GENOMIC_GCR_ESCALATION_EMAILS, [\n 'test@example.com'])\n self.data_generator.create_database_genomic_set(genomicSetName=\n 'test', genomicSetCriteria='.', genomicSetVersion=1)\n pids = []\n for _ in range(6):\n summary = self.data_generator.create_database_participant_summary(\n consentForStudyEnrollment=1, consentForGenomicsROR=1)\n set_member = (self.data_generator.\n create_database_genomic_set_member(participantId=summary.\n participantId, genomicSetId=1, biobankId=1001,\n collectionTubeId=100, sampleId=10, genomeType='aou_wgs'))\n self.data_generator.create_database_genomic_member_report_state(\n participant_id=summary.participantId, genomic_report_state=\n GenomicReportState.HDR_RPT_POSITIVE, genomic_set_member_id=\n set_member.id, module='hdr_v1', event_authored_time=fake_date)\n pids.append(summary.participantId)\n self.data_generator.create_database_genomic_appointment(\n message_record_id=101, appointment_id=102, event_type=\n 'appointment_scheduled', module_type='hdr', participant_id=pids\n [0], event_authored_time=fake_date, source='Color',\n appointment_timestamp=format_datetime(clock.CLOCK.now()),\n appointment_timezone='America/Los_Angeles', location=\n '123 address st', contact_number='17348675309', language='en')\n self.data_generator.create_database_genomic_appointment(\n message_record_id=102, appointment_id=103, event_type=\n 'appointment_completed', module_type='hdr', participant_id=pids\n [1], event_authored_time=fake_date, source='Color',\n appointment_timestamp=fake_date, appointment_timezone=\n 'America/Los_Angeles', location='123 address st',\n contact_number='17348675309', language='en')\n self.data_generator.create_database_genomic_appointment(\n message_record_id=103, appointment_id=104, event_type=\n 'appointment_scheduled', module_type='hdr', participant_id=pids\n [2], event_authored_time=fake_date2, source='Color',\n appointment_timestamp=format_datetime(clock.CLOCK.now()),\n appointment_timezone='America/Los_Angeles', location=\n '123 address st', contact_number='17348675309', language='en')\n self.data_generator.create_database_genomic_appointment(\n message_record_id=104, appointment_id=104, event_type=\n 'appointment_cancelled', module_type='hdr', participant_id=pids\n [2], event_authored_time=fake_date3, source='Color',\n appointment_timestamp=format_datetime(clock.CLOCK.now()),\n appointment_timezone='America/Los_Angeles', location=\n '123 address st', contact_number='17348675309', language='en')\n notified_dao = GenomicDefaultBaseDao(model_type=\n GenomicGCROutreachEscalationNotified)\n notified_dao.insert_bulk([{'participant_id': pids[4], 'created':\n clock.CLOCK.now(), 'modified': clock.CLOCK.now(),\n 'message_sent': True}, {'participant_id': pids[5], 'created':\n clock.CLOCK.now(), 'modified': clock.CLOCK.now(),\n 'message_sent': False}])\n with clock.FakeClock(parser.parse('2022-11-1T05:15:00')):\n escalated_participants = (self.report_state_dao.\n get_hdr_result_positive_no_appointment(num_days=14))\n results = [pid[0] for pid in escalated_participants]\n self.assertIn(pids[2], results)\n self.assertIn(pids[3], results)\n self.assertIn(pids[5], results)\n self.assertNotIn(pids[0], results)\n self.assertNotIn(pids[1], results)\n self.assertNotIn(pids[4], results)\n with GenomicJobController(GenomicJob.CHECK_GCR_OUTREACH_ESCALATION\n ) as controller:\n controller.check_gcr_escalation(controller.job_id)\n self.assertEqual(email_mock.call_count, 3)\n self.assertEqual(email_mock.call_args.args[0].subject,\n 'GCR Outreach 14 Day Escalation')\n self.clear_table_after_test('genomic_gcr_outreach_escalation_notified')\n <mask token>\n <mask token>\n\n @mock.patch(\n 'rdr_service.genomic.genomic_job_controller.GenomicJobController.execute_cloud_task'\n )\n def test_execute_auto_generation_from_last_run(self, cloud_task_mock):\n with GenomicJobController(GenomicJob.PR_PR_WORKFLOW) as controller:\n controller.job_result = GenomicSubProcessResult.ERROR\n controller._end_run()\n controller.execute_auto_generation_from_cloud_task()\n last_job_run_status = self.job_run_dao.get_last_run_status_for_job_id(\n job_id=GenomicJob.PR_PR_WORKFLOW)\n self.assertTrue(last_job_run_status is not None)\n self.assertTrue(last_job_run_status[0] == GenomicSubProcessResult.ERROR\n )\n self.assertEqual(cloud_task_mock.called, False)\n self.assertEqual(cloud_task_mock.call_count, 0)\n with GenomicJobController(GenomicJob.PR_PR_WORKFLOW) as controller:\n controller.job_result = GenomicSubProcessResult.SUCCESS\n controller._end_run()\n controller.execute_auto_generation_from_cloud_task()\n last_job_run_status = self.job_run_dao.get_last_run_status_for_job_id(\n job_id=GenomicJob.PR_PR_WORKFLOW)\n self.assertTrue(last_job_run_status is not None)\n self.assertTrue(last_job_run_status[0] == GenomicSubProcessResult.\n SUCCESS)\n self.assertEqual(cloud_task_mock.called, True)\n self.assertTrue(cloud_task_mock.call_args[1].get('payload').get(\n 'manifest_type') == 'p0')\n self.assertTrue(cloud_task_mock.call_args[1].get('task_queue') ==\n 'genomic-generate-manifest')\n all_job_runs = self.job_run_dao.get_all()\n self.assertEqual(len(all_job_runs), 2)\n self.assertTrue(all(obj.runResult in [GenomicSubProcessResult.\n SUCCESS, GenomicSubProcessResult.ERROR] for obj in all_job_runs))\n self.assertTrue(all(obj.jobId == GenomicJob.PR_PR_WORKFLOW for obj in\n all_job_runs))\n",
"step-3": "<mask token>\n\n\nclass GenomicJobControllerTest(BaseTestCase):\n\n def setUp(self):\n super(GenomicJobControllerTest, self).setUp()\n self.data_file_dao = GenomicGcDataFileDao()\n self.event_data_dao = MessageBrokenEventDataDao()\n self.incident_dao = GenomicIncidentDao()\n self.member_dao = GenomicSetMemberDao()\n self.metrics_dao = GenomicGCValidationMetricsDao()\n self.user_event_metrics_dao = UserEventMetricsDao()\n self.job_run_dao = GenomicJobRunDao()\n self.report_state_dao = GenomicMemberReportStateDao()\n self.appointment_event_dao = GenomicAppointmentEventDao()\n self.appointment_metrics_dao = GenomicAppointmentEventMetricsDao()\n\n def test_incident_with_long_message(self):\n \"\"\"Make sure the length of incident messages doesn't cause issues when recording them\"\"\"\n incident_message = '1' * (GenomicIncident.message.type.length + 20)\n mock_slack_handler = mock.MagicMock()\n job_controller = GenomicJobController(job_id=1)\n job_controller.genomic_alert_slack = mock_slack_handler\n job_controller.create_incident(message=incident_message, slack=True)\n incident: GenomicIncident = self.session.query(GenomicIncident).one()\n self.assertTrue(incident_message.startswith(incident.message))\n mock_slack_handler.send_message_to_webhook.assert_called_with(\n message_data={'text': incident_message})\n <mask token>\n\n def test_gvcf_files_ingestion_create_incident(self):\n bucket_name = 'test_bucket'\n file_path = (\n 'Wgs_sample_raw_data/SS_VCF_research/BCM_A100153482_21042005280_SIA0013441__1.hard-filtered.gvcf.gz'\n )\n gen_set = self.data_generator.create_database_genomic_set(\n genomicSetName='.', genomicSetCriteria='.', genomicSetVersion=1)\n gen_member = self.data_generator.create_database_genomic_set_member(\n genomicSetId=gen_set.id, biobankId='111111111', sampleId=\n '222222222222', genomeType='aou_wgs', genomicWorkflowState=\n GenomicWorkflowState.AW1)\n gen_job_run = self.data_generator.create_database_genomic_job_run(jobId\n =GenomicJob.AW1_MANIFEST, startTime=clock.CLOCK.now(),\n runResult=GenomicSubProcessResult.SUCCESS)\n gen_processed_file = (self.data_generator.\n create_database_genomic_file_processed(runId=gen_job_run.id,\n startTime=clock.CLOCK.now(), filePath='/test_file_path',\n bucketName=bucket_name, fileName='test_file_name'))\n self.data_generator.create_database_genomic_gc_validation_metrics(\n genomicSetMemberId=gen_member.id, genomicFileProcessedId=\n gen_processed_file.id)\n with GenomicJobController(GenomicJob.INGEST_DATA_FILES) as controller:\n controller.ingest_data_files_into_gc_metrics(file_path, bucket_name\n )\n incident = self.incident_dao.get(1)\n self.assertIsNotNone(incident)\n self.assertEqual(incident.code, GenomicIncidentCode.\n UNABLE_TO_FIND_METRIC.name)\n self.assertEqual(incident.data_file_path, file_path)\n self.assertEqual(incident.message,\n 'INGEST_DATA_FILES: Cannot find genomics metric record for sample id: 21042005280'\n )\n <mask token>\n\n def test_updating_members_blocklists(self):\n gen_set = self.data_generator.create_database_genomic_set(\n genomicSetName='.', genomicSetCriteria='.', genomicSetVersion=1)\n ids_should_be_updated = []\n for i in range(4):\n ids_should_be_updated.append(self.data_generator.\n create_database_genomic_set_member(genomicSetId=gen_set.id,\n biobankId='100153482', sampleId='21042005280', genomeType=\n 'test_investigation_one' if i & 2 != 0 else 'aou_wgs',\n genomicWorkflowState=GenomicWorkflowState.AW0, ai_an='Y' if\n i & 2 == 0 else 'N').id)\n for i in range(2):\n self.data_generator.create_database_genomic_set_member(genomicSetId\n =gen_set.id, biobankId='100153482', sampleId='21042005280',\n genomeType='aou_array', genomicWorkflowState=\n GenomicWorkflowState.AW0, ai_an='N')\n with GenomicJobController(GenomicJob.UPDATE_MEMBERS_BLOCKLISTS\n ) as controller:\n controller.update_members_blocklists()\n created_members = self.member_dao.get_all()\n blocklisted = list(filter(lambda x: x.blockResults == 1 or x.\n blockResearch == 1, created_members))\n self.assertTrue(ids_should_be_updated.sort() == [obj.id for obj in\n blocklisted].sort())\n self.assertTrue(all(obj.blockResearch == 1 and obj.\n blockResearchReason is not None and obj.blockResearchReason ==\n 'aian' for obj in created_members if obj.ai_an == 'Y' and obj.\n genomicWorkflowState == GenomicWorkflowState.AW0))\n self.assertTrue(all(obj.blockResults == 0 and obj.\n blockResultsReason is None for obj in created_members if obj.\n ai_an == 'Y' and obj.genomicWorkflowState ==\n GenomicWorkflowState.AW0))\n self.assertTrue(all(obj.blockResearch == 1 and obj.\n blockResearchReason is not None and obj.blockResearchReason ==\n 'test_sample_swap' for obj in created_members if obj.genomeType ==\n 'test_investigation_one' and obj.genomicWorkflowState ==\n GenomicWorkflowState.AW0))\n self.assertTrue(all(obj.blockResults == 1 and obj.\n blockResultsReason is not None and obj.blockResultsReason ==\n 'test_sample_swap' for obj in created_members if obj.genomeType ==\n 'test_investigation_one' and obj.genomicWorkflowState ==\n GenomicWorkflowState.AW0))\n self.assertTrue(all(obj.blockResearch == 0 and obj.\n blockResearchReason is None for obj in created_members if obj.\n genomeType == 'aou_array' and obj.genomicWorkflowState ==\n GenomicWorkflowState.AW0))\n self.assertTrue(all(obj.blockResults == 0 and obj.\n blockResultsReason is None for obj in created_members if obj.\n genomeType == 'aou_array' and obj.genomicWorkflowState ==\n GenomicWorkflowState.AW0))\n with self.member_dao.session() as session:\n session.query(GenomicSetMember).delete()\n run_result = self.job_run_dao.get(1)\n self.assertEqual(run_result.runStatus, GenomicSubProcessStatus.\n COMPLETED)\n self.assertEqual(run_result.runResult, GenomicSubProcessResult.SUCCESS)\n for i in range(4):\n self.data_generator.create_database_genomic_set_member(genomicSetId\n =gen_set.id, biobankId='100153482', sampleId='21042005280',\n genomeType='test_investigation_one' if i & 2 != 0 else\n 'aou_wgs', genomicWorkflowState=GenomicWorkflowState.AW1,\n ai_an='Y' if i & 2 == 0 else 'N')\n with GenomicJobController(GenomicJob.UPDATE_MEMBERS_BLOCKLISTS\n ) as controller:\n controller.update_members_blocklists()\n modified_members = self.member_dao.get_all()\n self.assertTrue(all(obj.blockResearch == 1 and obj.\n blockResearchReason is not None and obj.blockResearchReason ==\n 'aian' for obj in modified_members if obj.ai_an == 'Y' and obj.\n genomicWorkflowState == GenomicWorkflowState.AW1))\n self.assertTrue(all(obj.blockResults == 0 and obj.\n blockResultsReason is None for obj in modified_members if obj.\n ai_an == 'Y' and obj.genomicWorkflowState ==\n GenomicWorkflowState.AW1))\n self.assertTrue(all(obj.blockResearch == 1 and obj.\n blockResearchReason is not None and obj.blockResearchReason ==\n 'test_sample_swap' for obj in modified_members if obj.\n genomeType == 'test_investigation_one' and obj.\n genomicWorkflowState == GenomicWorkflowState.AW1))\n self.assertTrue(all(obj.blockResults == 1 and obj.\n blockResultsReason is not None and obj.blockResultsReason ==\n 'test_sample_swap' for obj in modified_members if obj.\n genomeType == 'test_investigation_one' and obj.\n genomicWorkflowState == GenomicWorkflowState.AW1))\n run_result = self.job_run_dao.get(2)\n self.assertEqual(run_result.runStatus, GenomicSubProcessStatus.\n COMPLETED)\n self.assertEqual(run_result.runResult, GenomicSubProcessResult.SUCCESS)\n\n def test_ingest_user_metrics_file(self):\n test_file = 'Genomic-Metrics-File-User-Events-Test.csv'\n bucket_name = 'test_bucket'\n sub_folder = 'user_events'\n pids = []\n file_ingester = GenomicFileIngester()\n for _ in range(2):\n pid = self.data_generator.create_database_participant()\n pids.append(pid.participantId)\n test_metrics_file = create_ingestion_test_file(test_file,\n bucket_name, sub_folder)\n test_file_path = f'{bucket_name}/{sub_folder}/{test_metrics_file}'\n with open_cloud_file(test_file_path) as csv_file:\n metrics_to_ingest = file_ingester._read_data_to_ingest(csv_file)\n with GenomicJobController(GenomicJob.METRICS_FILE_INGEST\n ) as controller:\n controller.ingest_metrics_file(metric_type='user_events',\n file_path=test_file_path)\n job_run_id = controller.job_run.id\n metrics = self.user_event_metrics_dao.get_all()\n for pid in pids:\n file_metrics = list(filter(lambda x: int(x['participant_id'].\n split('P')[-1]) == pid, metrics_to_ingest['rows']))\n participant_ingested_metrics = list(filter(lambda x: x.\n participant_id == pid, metrics))\n self.assertEqual(len(file_metrics), len(\n participant_ingested_metrics))\n self.assertTrue(all(obj.run_id == job_run_id for obj in\n participant_ingested_metrics))\n\n @mock.patch(\n 'rdr_service.genomic.genomic_job_controller.GenomicJobController.execute_cloud_task'\n )\n def test_reconcile_pdr_data(self, mock_cloud_task):\n with GenomicJobController(GenomicJob.RECONCILE_PDR_DATA) as controller:\n controller.reconcile_pdr_data()\n cloud_task_endpoint = 'rebuild_genomic_table_records_task'\n first_run = self.job_run_dao.get_all()\n self.assertEqual(mock_cloud_task.call_count, 1)\n call_args = mock_cloud_task.call_args_list\n self.assertEqual(len(call_args), 1)\n self.assertEqual(call_args[0].args[0]['table'], self.job_run_dao.\n model_type.__tablename__)\n self.assertTrue(type(call_args[0].args[0]['ids']) is list)\n self.assertEqual(call_args[0].args[0]['ids'], [obj.id for obj in\n first_run])\n self.assertEqual(call_args[0].args[1], cloud_task_endpoint)\n participant = self.data_generator.create_database_participant()\n gen_set = self.data_generator.create_database_genomic_set(\n genomicSetName='.', genomicSetCriteria='.', genomicSetVersion=1)\n plus_ten = clock.CLOCK.now() + datetime.timedelta(minutes=10)\n plus_ten = plus_ten.replace(microsecond=0)\n with FakeClock(plus_ten):\n for i in range(2):\n gen_member = (self.data_generator.\n create_database_genomic_set_member(genomicSetId=gen_set\n .id, biobankId='100153482', sampleId='21042005280',\n genomeType='aou_wgs', genomicWorkflowState=\n GenomicWorkflowState.AW1))\n gen_processed_file = (self.data_generator.\n create_database_genomic_file_processed(runId=first_run[\n 0].id, startTime=clock.CLOCK.now(), filePath=\n f'test_file_path_{i}', bucketName='test_bucket',\n fileName='test_file_name'))\n self.data_generator.create_database_genomic_gc_validation_metrics(\n genomicSetMemberId=gen_member.id,\n genomicFileProcessedId=gen_processed_file.id)\n manifest = (self.data_generator.\n create_database_genomic_manifest_file(manifestTypeId=2,\n filePath=f'test_file_path_{i}'))\n self.data_generator.create_database_genomic_manifest_feedback(\n inputManifestFileId=manifest.id, feedbackRecordCount=2)\n self.data_generator.create_database_genomic_user_event_metrics(\n participant_id=participant.participantId, event_name=\n 'test_event', run_id=1)\n self.data_generator.create_database_genomic_informing_loop(\n message_record_id=1, event_type=\n 'informing_loop_decision', module_type='gem',\n participant_id=participant.participantId,\n decision_value='maybe_later', event_authored_time=clock\n .CLOCK.now())\n self.data_generator.create_database_genomic_cvl_past_due(\n cvl_site_id='co', email_notification_sent=0, sample_id=\n 'sample_test', results_type='hdr',\n genomic_set_member_id=gen_member.id)\n self.data_generator.create_database_genomic_appointment(\n message_record_id=i, appointment_id=i, event_type=\n 'appointment_scheduled', module_type='hdr',\n participant_id=participant.participantId,\n event_authored_time=clock.CLOCK.now(), source='Color',\n appointment_timestamp=format_datetime(clock.CLOCK.now()\n ), appointment_timezone='America/Los_Angeles', location\n ='123 address st', contact_number='17348675309',\n language='en')\n self.data_generator.create_database_genomic_member_report_state(\n genomic_set_member_id=gen_member.id, participant_id=\n participant.participantId, module='gem',\n genomic_report_state=GenomicReportState.GEM_RPT_READY,\n event_authored_time=clock.CLOCK.now())\n self.data_generator.create_genomic_result_viewed(participant_id\n =participant.participantId, event_type='result_viewed',\n event_authored_time=clock.CLOCK.now(), module_type=\n 'gem', sample_id=gen_member.sampleId)\n with GenomicJobController(GenomicJob.RECONCILE_PDR_DATA) as controller:\n controller.reconcile_pdr_data()\n affected_tables = ['genomic_set', 'genomic_set_member',\n 'genomic_job_run', 'genomic_file_processed',\n 'genomic_gc_validation_metrics', 'genomic_manifest_file',\n 'genomic_manifest_feedback', 'genomic_informing_loop',\n 'genomic_cvl_results_past_due', 'user_event_metrics',\n 'genomic_member_report_state', 'genomic_result_viewed',\n 'genomic_appointment_event']\n num_calls = len(affected_tables) + 1\n self.assertEqual(mock_cloud_task.call_count, num_calls)\n call_args = mock_cloud_task.call_args_list\n self.assertEqual(len(call_args), num_calls)\n mock_tables = set([obj[0][0]['table'] for obj in call_args])\n mock_endpoint = [obj[0][1] for obj in call_args]\n self.assertTrue([mock_tables].sort() == affected_tables.sort())\n self.assertTrue(all(obj for obj in mock_endpoint if obj ==\n cloud_task_endpoint))\n\n @mock.patch(\n 'rdr_service.genomic.genomic_job_controller.GenomicJobController.execute_cloud_task'\n )\n def test_retry_manifest_ingestions_if_deltas(self, mock_cloud_task):\n bucket_name = 'test-bucket'\n aw1_file_name = (\n 'AW1_wgs_sample_manifests/RDR_AoU_SEQ_PKG-2104-026571.csv')\n aw1_manifest_path = f'{bucket_name}/{aw1_file_name}'\n aw2_file_name = (\n 'AW2_wgs_data_manifests/RDR_AoU_SEQ_DataManifest_04092021.csv')\n aw2_manifest_path = f'{bucket_name}/{aw2_file_name}'\n gen_set = self.data_generator.create_database_genomic_set(\n genomicSetName='.', genomicSetCriteria='.', genomicSetVersion=1)\n aw1_job_run = self.data_generator.create_database_genomic_job_run(jobId\n =GenomicJob.AW1_MANIFEST, startTime=clock.CLOCK.now(), endTime=\n clock.CLOCK.now(), runResult=GenomicSubProcessResult.SUCCESS)\n aw2_job_run = self.data_generator.create_database_genomic_job_run(jobId\n =GenomicJob.METRICS_INGESTION, startTime=clock.CLOCK.now(),\n endTime=clock.CLOCK.now(), runResult=GenomicSubProcessResult.\n SUCCESS)\n with GenomicJobController(GenomicJob.RETRY_MANIFEST_INGESTIONS\n ) as controller:\n controller.retry_manifest_ingestions()\n job_run = self.job_run_dao.get(3)\n self.assertEqual(job_run.jobId, GenomicJob.RETRY_MANIFEST_INGESTIONS)\n self.assertEqual(job_run.runStatus, GenomicSubProcessStatus.COMPLETED)\n self.assertEqual(job_run.runResult, GenomicSubProcessResult.NO_FILES)\n self.assertEqual(mock_cloud_task.call_count, 0)\n self.assertFalse(mock_cloud_task.call_count)\n self.data_generator.create_database_genomic_aw1_raw(file_path=\n aw1_manifest_path, package_id='PKG-2104-026571', biobank_id=\n 'A10001')\n self.data_generator.create_database_genomic_aw2_raw(file_path=\n aw2_manifest_path, biobank_id='A10001', sample_id='100001',\n biobankidsampleid='A10001_100001')\n aw1_manifest_file = (self.data_generator.\n create_database_genomic_manifest_file(created=clock.CLOCK.now(),\n modified=clock.CLOCK.now(), uploadDate=clock.CLOCK.now(),\n manifestTypeId=GenomicManifestTypes.AW1, filePath=\n aw1_manifest_path, fileName=aw1_file_name, bucketName=\n bucket_name, recordCount=1, rdrProcessingComplete=1,\n rdrProcessingCompleteDate=clock.CLOCK.now()))\n aw2_manifest_file = (self.data_generator.\n create_database_genomic_manifest_file(created=clock.CLOCK.now(),\n modified=clock.CLOCK.now(), uploadDate=clock.CLOCK.now(),\n manifestTypeId=GenomicManifestTypes.AW2, filePath=\n aw2_manifest_path, fileName=aw2_file_name, bucketName=\n bucket_name, recordCount=1, rdrProcessingComplete=1,\n rdrProcessingCompleteDate=clock.CLOCK.now()))\n aw1_file_processed = (self.data_generator.\n create_database_genomic_file_processed(runId=aw1_job_run.id,\n startTime=clock.CLOCK.now(), genomicManifestFileId=\n aw1_manifest_file.id, filePath=f'/{aw1_manifest_path}',\n bucketName=bucket_name, fileName=aw1_file_name))\n aw2_file_processed = (self.data_generator.\n create_database_genomic_file_processed(runId=aw2_job_run.id,\n startTime=clock.CLOCK.now(), genomicManifestFileId=\n aw2_manifest_file.id, filePath=f'/{aw2_manifest_path}',\n bucketName=bucket_name, fileName=aw2_file_name))\n gen_member = self.data_generator.create_database_genomic_set_member(\n genomicSetId=gen_set.id, biobankId='100153482', sampleId=\n '21042005280', genomeType='aou_wgs', genomicWorkflowState=\n GenomicWorkflowState.AW1, aw1FileProcessedId=aw1_file_processed.id)\n self.data_generator.create_database_genomic_gc_validation_metrics(\n genomicSetMemberId=gen_member.id, genomicFileProcessedId=\n aw2_file_processed.id)\n with GenomicJobController(GenomicJob.RETRY_MANIFEST_INGESTIONS\n ) as controller:\n controller.retry_manifest_ingestions()\n job_run = self.job_run_dao.get(4)\n self.assertEqual(job_run.jobId, GenomicJob.RETRY_MANIFEST_INGESTIONS)\n self.assertEqual(job_run.runStatus, GenomicSubProcessStatus.COMPLETED)\n self.assertEqual(job_run.runResult, GenomicSubProcessResult.NO_FILES)\n self.assertEqual(mock_cloud_task.call_count, 0)\n self.assertFalse(mock_cloud_task.call_count)\n with self.member_dao.session() as session:\n session.query(GenomicGCValidationMetrics).delete()\n session.query(GenomicSetMember).delete()\n with GenomicJobController(GenomicJob.RETRY_MANIFEST_INGESTIONS\n ) as controller:\n controller.retry_manifest_ingestions()\n job_run = self.job_run_dao.get(5)\n self.assertEqual(job_run.jobId, GenomicJob.RETRY_MANIFEST_INGESTIONS)\n self.assertEqual(job_run.runStatus, GenomicSubProcessStatus.COMPLETED)\n self.assertEqual(job_run.runResult, GenomicSubProcessResult.SUCCESS)\n self.assertEqual(mock_cloud_task.call_count, 2)\n self.assertTrue(mock_cloud_task.call_count)\n call_args = mock_cloud_task.call_args_list\n self.assertEqual(len(call_args), 2)\n cloud_task_endpoint = ['ingest_aw1_manifest_task',\n 'ingest_aw2_manifest_task']\n mock_endpoint = [obj[0][1] for obj in call_args]\n self.assertTrue(all(obj for obj in mock_endpoint if obj ==\n cloud_task_endpoint))\n mock_buckets = set([obj[0][0]['bucket_name'] for obj in call_args])\n self.assertTrue(len(mock_buckets), 1)\n self.assertTrue(list(mock_buckets)[0] == bucket_name)\n\n def test_calculate_informing_loop_ready_flags(self):\n num_participants = 4\n gen_set = self.data_generator.create_database_genomic_set(\n genomicSetName='.', genomicSetCriteria='.', genomicSetVersion=1)\n for num in range(num_participants):\n plus_num = clock.CLOCK.now() + datetime.timedelta(minutes=num)\n plus_num = plus_num.replace(microsecond=0)\n with FakeClock(plus_num):\n summary = (self.data_generator.\n create_database_participant_summary(\n consentForStudyEnrollment=1, consentForGenomicsROR=1))\n stored_sample = (self.data_generator.\n create_database_biobank_stored_sample(biobankId=summary\n .biobankId, biobankOrderIdentifier=self.fake.pyint()))\n collection_site = self.data_generator.create_database_site(\n siteType='Clinic')\n order = self.data_generator.create_database_biobank_order(\n collectedSiteId=collection_site.siteId, participantId=\n summary.participantId, finalizedTime=plus_num)\n self.data_generator.create_database_biobank_order_identifier(\n value=stored_sample.biobankOrderIdentifier,\n biobankOrderId=order.biobankOrderId, system='1')\n self.data_generator.create_database_biobank_order_identifier(\n value=stored_sample.biobankOrderIdentifier,\n biobankOrderId=order.biobankOrderId, system='2')\n member = (self.data_generator.\n create_database_genomic_set_member(genomicSetId=gen_set\n .id, participantId=summary.participantId, genomeType=\n config.GENOME_TYPE_WGS, qcStatus=GenomicQcStatus.PASS,\n gcManifestSampleSource='Whole Blood', collectionTubeId=\n stored_sample.biobankStoredSampleId))\n self.data_generator.create_database_genomic_gc_validation_metrics(\n genomicSetMemberId=member.id, sexConcordance='True',\n drcFpConcordance='Pass', drcSexConcordance='Pass',\n processingStatus='Pass')\n members_for_ready_loop = (self.member_dao.\n get_members_for_informing_loop_ready())\n self.assertEqual(len(members_for_ready_loop), num_participants)\n current_set_members = self.member_dao.get_all()\n self.assertTrue(all(obj.informingLoopReadyFlag == 0 for obj in\n current_set_members))\n self.assertTrue(all(obj.informingLoopReadyFlagModified is None for\n obj in current_set_members))\n with GenomicJobController(GenomicJob.CALCULATE_INFORMING_LOOP_READY\n ) as controller:\n controller.calculate_informing_loop_ready_flags()\n members_for_ready_loop = (self.member_dao.\n get_members_for_informing_loop_ready())\n self.assertEqual(len(members_for_ready_loop), num_participants)\n calculation_limit = 2\n config.override_setting(config.CALCULATE_READY_FLAG_LIMIT, [\n calculation_limit])\n with GenomicJobController(GenomicJob.CALCULATE_INFORMING_LOOP_READY\n ) as controller:\n controller.calculate_informing_loop_ready_flags()\n current_set_members = self.member_dao.get_all()\n self.assertTrue(any(obj.informingLoopReadyFlag == 1 for obj in\n current_set_members))\n self.assertTrue(any(obj.informingLoopReadyFlagModified is not None for\n obj in current_set_members))\n current_loops_set = [obj for obj in current_set_members if obj.\n informingLoopReadyFlag == 1 and obj.\n informingLoopReadyFlagModified is not None]\n self.assertEqual(len(current_loops_set), calculation_limit)\n members_for_ready_loop = (self.member_dao.\n get_members_for_informing_loop_ready())\n self.assertEqual(len(members_for_ready_loop), num_participants // 2)\n with GenomicJobController(GenomicJob.CALCULATE_INFORMING_LOOP_READY\n ) as controller:\n controller.calculate_informing_loop_ready_flags()\n current_set_members = self.member_dao.get_all()\n self.assertTrue(all(obj.informingLoopReadyFlag == 1 for obj in\n current_set_members))\n self.assertTrue(all(obj.informingLoopReadyFlagModified is not None for\n obj in current_set_members))\n members_for_ready_loop = (self.member_dao.\n get_members_for_informing_loop_ready())\n self.assertEqual(len(members_for_ready_loop), 0)\n\n @mock.patch('rdr_service.services.email_service.EmailService.send_email')\n def test_getting_results_withdrawn(self, email_mock):\n num_participants = 4\n result_withdrawal_dao = GenomicResultWithdrawalsDao()\n gen_set = self.data_generator.create_database_genomic_set(\n genomicSetName='.', genomicSetCriteria='.', genomicSetVersion=1)\n gen_job_run = self.data_generator.create_database_genomic_job_run(jobId\n =GenomicJob.AW1_MANIFEST, startTime=clock.CLOCK.now(),\n runResult=GenomicSubProcessResult.SUCCESS)\n pids = []\n for num in range(num_participants):\n summary = self.data_generator.create_database_participant_summary(\n consentForStudyEnrollment=1, consentForGenomicsROR=1,\n withdrawalStatus=WithdrawalStatus.EARLY_OUT)\n self.data_generator.create_database_genomic_set_member(genomicSetId\n =gen_set.id, participantId=summary.participantId,\n genomeType=config.GENOME_TYPE_ARRAY, gemA1ManifestJobRunId=\n gen_job_run.id if num % 2 == 0 else None)\n self.data_generator.create_database_genomic_set_member(genomicSetId\n =gen_set.id, participantId=summary.participantId,\n genomeType=config.GENOME_TYPE_WGS, cvlW1ilHdrJobRunId=\n gen_job_run.id)\n pids.append(summary.participantId)\n config.override_setting(config.RDR_GENOMICS_NOTIFICATION_EMAIL,\n 'email@test.com')\n with GenomicJobController(GenomicJob.RESULTS_PIPELINE_WITHDRAWALS\n ) as controller:\n controller.check_results_withdrawals()\n self.assertEqual(email_mock.call_count, 2)\n call_args = email_mock.call_args_list\n self.assertTrue(any('GEM' in call.args[0].subject for call in\n call_args))\n self.assertTrue(any('HEALTH' in call.args[0].subject for call in\n call_args))\n job_runs = self.job_run_dao.get_all()\n current_job_run = list(filter(lambda x: x.jobId == GenomicJob.\n RESULTS_PIPELINE_WITHDRAWALS, job_runs))[0]\n self.assertTrue(current_job_run.runResult ==\n GenomicSubProcessResult.SUCCESS)\n all_withdrawal_records = result_withdrawal_dao.get_all()\n self.assertTrue(len(all_withdrawal_records) == len(pids))\n self.assertTrue(all(obj.participant_id in pids for obj in\n all_withdrawal_records))\n array_results = list(filter(lambda x: x.array_results == 1,\n all_withdrawal_records))\n self.assertTrue(len(array_results), 2)\n cvl_results = list(filter(lambda x: x.cvl_results == 1,\n all_withdrawal_records))\n self.assertTrue(len(cvl_results), num_participants)\n with GenomicJobController(GenomicJob.RESULTS_PIPELINE_WITHDRAWALS\n ) as controller:\n controller.check_results_withdrawals()\n self.assertEqual(email_mock.call_count, 2)\n job_runs = self.job_run_dao.get_all()\n current_job_run = list(filter(lambda x: x.jobId == GenomicJob.\n RESULTS_PIPELINE_WITHDRAWALS, job_runs))[1]\n self.assertTrue(current_job_run.runResult ==\n GenomicSubProcessResult.NO_RESULTS)\n\n def test_gem_results_to_report_state(self):\n num_participants = 8\n gen_set = self.data_generator.create_database_genomic_set(\n genomicSetName='.', genomicSetCriteria='.', genomicSetVersion=1)\n gem_a2_job_run = self.data_generator.create_database_genomic_job_run(\n jobId=GenomicJob.GEM_A2_MANIFEST, startTime=clock.CLOCK.now(),\n runResult=GenomicSubProcessResult.SUCCESS)\n pids_to_update, member_ids = [], []\n for num in range(num_participants):\n summary = self.data_generator.create_database_participant_summary(\n consentForStudyEnrollment=1, consentForGenomicsROR=1,\n withdrawalStatus=WithdrawalStatus.EARLY_OUT)\n member = self.data_generator.create_database_genomic_set_member(\n genomicSetId=gen_set.id, participantId=summary.\n participantId, genomeType=config.GENOME_TYPE_ARRAY)\n if num % 2 == 0:\n member_ids.append(member.id)\n pids_to_update.append(summary.participantId)\n with GenomicJobController(GenomicJob.GEM_RESULT_REPORTS) as controller:\n controller.gem_results_to_report_state()\n current_job_runs = self.job_run_dao.get_all()\n self.assertEqual(len(current_job_runs), 2)\n current_job_run = list(filter(lambda x: x.jobId == GenomicJob.\n GEM_RESULT_REPORTS, current_job_runs))[0]\n self.assertTrue(current_job_run.runResult ==\n GenomicSubProcessResult.NO_RESULTS)\n current_members = self.member_dao.get_all()\n for member in current_members:\n if member.participantId in pids_to_update:\n member.gemA2ManifestJobRunId = gem_a2_job_run.id\n member.genomicWorkflowState = (GenomicWorkflowState.\n GEM_RPT_READY)\n self.member_dao.update(member)\n with GenomicJobController(GenomicJob.GEM_RESULT_REPORTS) as controller:\n controller.gem_results_to_report_state()\n current_job_runs = self.job_run_dao.get_all()\n self.assertEqual(len(current_job_runs), 3)\n current_job_run = list(filter(lambda x: x.jobId == GenomicJob.\n GEM_RESULT_REPORTS, current_job_runs))[1]\n self.assertTrue(current_job_run.runResult ==\n GenomicSubProcessResult.SUCCESS)\n current_gem_report_states = self.report_state_dao.get_all()\n self.assertEqual(len(current_gem_report_states), len(pids_to_update))\n self.assertTrue(all(obj.event_type == 'result_ready' for obj in\n current_gem_report_states))\n self.assertTrue(all(obj.event_authored_time is not None for obj in\n current_gem_report_states))\n self.assertTrue(all(obj.module == 'gem' for obj in\n current_gem_report_states))\n self.assertTrue(all(obj.genomic_report_state == GenomicReportState.\n GEM_RPT_READY for obj in current_gem_report_states))\n self.assertTrue(all(obj.genomic_report_state_str ==\n GenomicReportState.GEM_RPT_READY.name for obj in\n current_gem_report_states))\n self.assertTrue(all(obj.genomic_set_member_id in member_ids for obj in\n current_gem_report_states))\n with GenomicJobController(GenomicJob.GEM_RESULT_REPORTS) as controller:\n controller.gem_results_to_report_state()\n current_job_runs = self.job_run_dao.get_all()\n self.assertEqual(len(current_job_runs), 4)\n current_job_run = list(filter(lambda x: x.jobId == GenomicJob.\n GEM_RESULT_REPORTS, current_job_runs))[2]\n self.assertTrue(current_job_run.runResult ==\n GenomicSubProcessResult.NO_RESULTS)\n self.clear_table_after_test('genomic_member_report_state')\n <mask token>\n\n def test_reconcile_message_broker_results_ready(self):\n self.data_generator.create_database_genomic_set(genomicSetName=\n 'test', genomicSetCriteria='.', genomicSetVersion=1)\n self.data_generator.create_database_genomic_job_run(jobId=\n GenomicJob.METRICS_FILE_INGEST, startTime=clock.CLOCK.now())\n for pid in range(7):\n self.data_generator.create_database_participant(participantId=1 +\n pid, biobankId=1 + pid)\n for i in range(1, 6):\n self.data_generator.create_database_genomic_set_member(\n participantId=i, genomicSetId=1, biobankId=i,\n collectionTubeId=100 + i, sampleId=10 + i, genomeType='aou_wgs'\n )\n if i < 4:\n self.data_generator.create_database_genomic_user_event_metrics(\n participant_id=i, created_at=datetime.datetime(2022, 10,\n 6, 0), event_name='pgx.result_ready', run_id=1)\n if i == 4:\n self.data_generator.create_database_genomic_user_event_metrics(\n participant_id=i, created_at=datetime.datetime(2022, 10,\n 6, 0), event_name='hdr.result_ready.informative', run_id=1)\n if i == 5:\n self.data_generator.create_database_genomic_user_event_metrics(\n participant_id=i, created_at=datetime.datetime(2022, 10,\n 6, 0), event_name='hdr.result_ready.uninformative',\n run_id=1)\n genomic_cvl_pipeline.reconcile_message_broker_results_ready()\n report_state_dao = GenomicMemberReportStateDao()\n states = report_state_dao.get_all()\n self.assertEqual(5, len(states))\n pgx_records = [rec for rec in states if rec.module == 'pgx_v1']\n hdr_record_uninf = [rec for rec in states if rec.\n genomic_report_state == GenomicReportState.HDR_RPT_UNINFORMATIVE][0\n ]\n hdr_record_pos = [rec for rec in states if rec.genomic_report_state ==\n GenomicReportState.HDR_RPT_POSITIVE][0]\n for pgx_record in pgx_records:\n self.assertEqual(GenomicReportState.PGX_RPT_READY, pgx_record.\n genomic_report_state)\n self.assertEqual('PGX_RPT_READY', pgx_record.\n genomic_report_state_str)\n self.assertEqual(int(pgx_record.sample_id), pgx_record.\n participant_id + 10)\n self.assertEqual('result_ready', pgx_record.event_type)\n self.assertEqual(datetime.datetime(2022, 10, 6, 0), pgx_record.\n event_authored_time)\n self.assertIsNotNone(pgx_record.created_from_metric_id)\n self.assertEqual('HDR_RPT_UNINFORMATIVE', hdr_record_uninf.\n genomic_report_state_str)\n self.assertEqual(int(hdr_record_uninf.sample_id), hdr_record_uninf.\n participant_id + 10)\n self.assertEqual('result_ready', hdr_record_uninf.event_type)\n self.assertEqual(datetime.datetime(2022, 10, 6, 0),\n hdr_record_uninf.event_authored_time)\n self.assertIsNotNone(hdr_record_uninf.created_from_metric_id)\n self.assertEqual('HDR_RPT_POSITIVE', hdr_record_pos.\n genomic_report_state_str)\n self.assertEqual(int(hdr_record_pos.sample_id), hdr_record_pos.\n participant_id + 10)\n self.assertEqual('result_ready', hdr_record_pos.event_type)\n self.assertEqual(datetime.datetime(2022, 10, 6, 0), hdr_record_pos.\n event_authored_time)\n self.assertIsNotNone(hdr_record_pos.created_from_metric_id)\n <mask token>\n\n def test_ingest_appointment_metrics_file(self):\n test_file = 'Genomic-Metrics-File-Appointment-Events-Test.json'\n bucket_name = 'test_bucket'\n sub_folder = 'appointment_events'\n pids = []\n for _ in range(4):\n summary = self.data_generator.create_database_participant_summary()\n pids.append(summary.participantId)\n test_file_path = f'{bucket_name}/{sub_folder}/{test_file}'\n appointment_data = test_data.load_test_data_json(\n 'Genomic-Metrics-File-Appointment-Events-Test.json')\n appointment_data_str = json.dumps(appointment_data, indent=4)\n with open_cloud_file(test_file_path, mode='wb') as cloud_file:\n cloud_file.write(appointment_data_str.encode('utf-8'))\n with GenomicJobController(GenomicJob.APPOINTMENT_METRICS_FILE_INGEST\n ) as controller:\n controller.ingest_appointment_metrics_file(file_path=test_file_path\n )\n all_metrics = self.appointment_metrics_dao.get_all()\n self.assertEqual(len(all_metrics), 5)\n self.assertTrue(all(obj.participant_id in pids for obj in all_metrics))\n self.assertTrue(all(obj.file_path == test_file_path for obj in\n all_metrics))\n self.assertTrue(all(obj.appointment_event is not None for obj in\n all_metrics))\n self.assertTrue(all(obj.created is not None for obj in all_metrics))\n self.assertTrue(all(obj.modified is not None for obj in all_metrics))\n self.assertTrue(all(obj.module_type is not None for obj in all_metrics)\n )\n self.assertTrue(all(obj.event_authored_time is not None for obj in\n all_metrics))\n self.assertTrue(all(obj.event_type is not None for obj in all_metrics))\n current_job_runs = self.job_run_dao.get_all()\n self.assertEqual(len(current_job_runs), 1)\n current_job_run = current_job_runs[0]\n self.assertTrue(current_job_run.jobId == GenomicJob.\n APPOINTMENT_METRICS_FILE_INGEST)\n self.assertTrue(current_job_run.runResult ==\n GenomicSubProcessResult.SUCCESS)\n self.clear_table_after_test('genomic_appointment_event_metrics')\n\n def test_reconcile_appointments_with_metrics(self):\n fake_date = parser.parse('2020-05-29T08:00:01-05:00')\n for num in range(4):\n summary = self.data_generator.create_database_participant_summary()\n missing_json = {'event': 'appointment_updated',\n 'eventAuthoredTime': '2022-09-16T17:18:38Z',\n 'participantId': f'P{summary.participantId}', 'messageBody':\n {'module_type': 'hdr', 'appointment_timestamp':\n '2022-09-19T19:30:00+00:00', 'id': 55,\n 'appointment_timezone': 'America/Los_Angeles', 'location':\n 'CA', 'contact_number': '18043704252', 'language': 'en',\n 'source': 'Color'}}\n if num % 2 == 0:\n self.data_generator.create_database_genomic_appointment(\n message_record_id=num, appointment_id=num, event_type=\n 'appointment_scheduled', module_type='hdr',\n participant_id=summary.participantId,\n event_authored_time=fake_date, source='Color',\n appointment_timestamp=format_datetime(clock.CLOCK.now()\n ), appointment_timezone='America/Los_Angeles', location\n ='123 address st', contact_number='17348675309',\n language='en')\n self.data_generator.create_database_genomic_appointment_metric(\n participant_id=summary.participantId, appointment_event=\n json.dumps(missing_json, indent=4) if num % 2 != 0 else\n 'foo', file_path='test_file_path', module_type='hdr',\n event_authored_time=fake_date, event_type=\n 'appointment_updated' if num % 2 != 0 else\n 'appointment_scheduled')\n current_events = self.appointment_event_dao.get_all()\n self.assertEqual(len(current_events), 2)\n current_metrics = self.appointment_metrics_dao.get_all()\n self.assertEqual(len(current_metrics), 4)\n self.assertTrue(all(obj.reconcile_job_run_id is None for obj in\n current_metrics))\n with GenomicJobController(GenomicJob.APPOINTMENT_METRICS_RECONCILE\n ) as controller:\n controller.reconcile_appointment_events_from_metrics()\n job_run = self.job_run_dao.get_all()\n self.assertEqual(len(job_run), 1)\n self.assertTrue(job_run[0].jobId == GenomicJob.\n APPOINTMENT_METRICS_RECONCILE)\n current_events = self.appointment_event_dao.get_all()\n self.assertEqual(len(current_events), 4)\n scheduled = list(filter(lambda x: x.event_type ==\n 'appointment_scheduled', current_events))\n self.assertEqual(len(scheduled), 2)\n self.assertTrue(all(obj.created_from_metric_id is None for obj in\n scheduled))\n updated = list(filter(lambda x: x.event_type ==\n 'appointment_updated', current_events))\n self.assertEqual(len(updated), 2)\n self.assertTrue(all(obj.created_from_metric_id is not None for obj in\n updated))\n current_metrics = self.appointment_metrics_dao.get_all()\n self.assertEqual(len(current_metrics), 4)\n self.assertTrue(all(obj.reconcile_job_run_id is not None for obj in\n current_metrics))\n self.assertTrue(all(obj.reconcile_job_run_id == job_run[0].id for\n obj in current_metrics))\n self.clear_table_after_test('genomic_appointment_event_metrics')\n\n @mock.patch('rdr_service.services.email_service.EmailService.send_email')\n def test_check_appointments_gror_changed(self, email_mock):\n fake_date = parser.parse('2022-09-01T13:43:23')\n notified_dao = GenomicAppointmentEventNotifiedDao()\n config.override_setting(config.GENOMIC_COLOR_PM_EMAIL, [\n 'test@example.com'])\n num_participants = 4\n for num in range(num_participants):\n gror = num if num > 1 else 1\n summary = self.data_generator.create_database_participant_summary(\n consentForStudyEnrollment=1, consentForGenomicsROR=gror)\n self.data_generator.create_database_genomic_appointment(\n message_record_id=num, appointment_id=num, event_type=\n 'appointment_scheduled', module_type='hdr', participant_id=\n summary.participantId, event_authored_time=fake_date,\n source='Color', appointment_timestamp=format_datetime(clock\n .CLOCK.now()), appointment_timezone='America/Los_Angeles',\n location='123 address st', contact_number='17348675309',\n language='en')\n changed_ppts = (self.appointment_event_dao.\n get_appointments_gror_changed())\n self.assertEqual(2, len(changed_ppts))\n with GenomicJobController(GenomicJob.CHECK_APPOINTMENT_GROR_CHANGED\n ) as controller:\n controller.check_appointments_gror_changed()\n self.assertEqual(email_mock.call_count, 1)\n notified_appointments = notified_dao.get_all()\n self.assertEqual(2, len(notified_appointments))\n summary = self.data_generator.create_database_participant_summary(\n consentForStudyEnrollment=1, consentForGenomicsROR=2)\n self.data_generator.create_database_genomic_appointment(\n message_record_id=5, appointment_id=5, event_type=\n 'appointment_scheduled', module_type='hdr', participant_id=\n summary.participantId, event_authored_time=fake_date, source=\n 'Color', appointment_timestamp=format_datetime(clock.CLOCK.now(\n )), appointment_timezone='America/Los_Angeles', location=\n '123 address st', contact_number='17348675309', language='en')\n changed_ppts = (self.appointment_event_dao.\n get_appointments_gror_changed())\n self.assertEqual(1, len(changed_ppts))\n\n @mock.patch('rdr_service.services.email_service.EmailService.send_email')\n def test_check_gcr_14day_escalation(self, email_mock):\n fake_date = parser.parse('2022-09-01T13:43:23')\n fake_date2 = parser.parse('2022-09-02T14:14:00')\n fake_date3 = parser.parse('2022-09-03T15:15:00')\n config.override_setting(config.GENOMIC_GCR_ESCALATION_EMAILS, [\n 'test@example.com'])\n self.data_generator.create_database_genomic_set(genomicSetName=\n 'test', genomicSetCriteria='.', genomicSetVersion=1)\n pids = []\n for _ in range(6):\n summary = self.data_generator.create_database_participant_summary(\n consentForStudyEnrollment=1, consentForGenomicsROR=1)\n set_member = (self.data_generator.\n create_database_genomic_set_member(participantId=summary.\n participantId, genomicSetId=1, biobankId=1001,\n collectionTubeId=100, sampleId=10, genomeType='aou_wgs'))\n self.data_generator.create_database_genomic_member_report_state(\n participant_id=summary.participantId, genomic_report_state=\n GenomicReportState.HDR_RPT_POSITIVE, genomic_set_member_id=\n set_member.id, module='hdr_v1', event_authored_time=fake_date)\n pids.append(summary.participantId)\n self.data_generator.create_database_genomic_appointment(\n message_record_id=101, appointment_id=102, event_type=\n 'appointment_scheduled', module_type='hdr', participant_id=pids\n [0], event_authored_time=fake_date, source='Color',\n appointment_timestamp=format_datetime(clock.CLOCK.now()),\n appointment_timezone='America/Los_Angeles', location=\n '123 address st', contact_number='17348675309', language='en')\n self.data_generator.create_database_genomic_appointment(\n message_record_id=102, appointment_id=103, event_type=\n 'appointment_completed', module_type='hdr', participant_id=pids\n [1], event_authored_time=fake_date, source='Color',\n appointment_timestamp=fake_date, appointment_timezone=\n 'America/Los_Angeles', location='123 address st',\n contact_number='17348675309', language='en')\n self.data_generator.create_database_genomic_appointment(\n message_record_id=103, appointment_id=104, event_type=\n 'appointment_scheduled', module_type='hdr', participant_id=pids\n [2], event_authored_time=fake_date2, source='Color',\n appointment_timestamp=format_datetime(clock.CLOCK.now()),\n appointment_timezone='America/Los_Angeles', location=\n '123 address st', contact_number='17348675309', language='en')\n self.data_generator.create_database_genomic_appointment(\n message_record_id=104, appointment_id=104, event_type=\n 'appointment_cancelled', module_type='hdr', participant_id=pids\n [2], event_authored_time=fake_date3, source='Color',\n appointment_timestamp=format_datetime(clock.CLOCK.now()),\n appointment_timezone='America/Los_Angeles', location=\n '123 address st', contact_number='17348675309', language='en')\n notified_dao = GenomicDefaultBaseDao(model_type=\n GenomicGCROutreachEscalationNotified)\n notified_dao.insert_bulk([{'participant_id': pids[4], 'created':\n clock.CLOCK.now(), 'modified': clock.CLOCK.now(),\n 'message_sent': True}, {'participant_id': pids[5], 'created':\n clock.CLOCK.now(), 'modified': clock.CLOCK.now(),\n 'message_sent': False}])\n with clock.FakeClock(parser.parse('2022-11-1T05:15:00')):\n escalated_participants = (self.report_state_dao.\n get_hdr_result_positive_no_appointment(num_days=14))\n results = [pid[0] for pid in escalated_participants]\n self.assertIn(pids[2], results)\n self.assertIn(pids[3], results)\n self.assertIn(pids[5], results)\n self.assertNotIn(pids[0], results)\n self.assertNotIn(pids[1], results)\n self.assertNotIn(pids[4], results)\n with GenomicJobController(GenomicJob.CHECK_GCR_OUTREACH_ESCALATION\n ) as controller:\n controller.check_gcr_escalation(controller.job_id)\n self.assertEqual(email_mock.call_count, 3)\n self.assertEqual(email_mock.call_args.args[0].subject,\n 'GCR Outreach 14 Day Escalation')\n self.clear_table_after_test('genomic_gcr_outreach_escalation_notified')\n <mask token>\n <mask token>\n\n @mock.patch(\n 'rdr_service.genomic.genomic_job_controller.GenomicJobController.execute_cloud_task'\n )\n def test_execute_auto_generation_from_last_run(self, cloud_task_mock):\n with GenomicJobController(GenomicJob.PR_PR_WORKFLOW) as controller:\n controller.job_result = GenomicSubProcessResult.ERROR\n controller._end_run()\n controller.execute_auto_generation_from_cloud_task()\n last_job_run_status = self.job_run_dao.get_last_run_status_for_job_id(\n job_id=GenomicJob.PR_PR_WORKFLOW)\n self.assertTrue(last_job_run_status is not None)\n self.assertTrue(last_job_run_status[0] == GenomicSubProcessResult.ERROR\n )\n self.assertEqual(cloud_task_mock.called, False)\n self.assertEqual(cloud_task_mock.call_count, 0)\n with GenomicJobController(GenomicJob.PR_PR_WORKFLOW) as controller:\n controller.job_result = GenomicSubProcessResult.SUCCESS\n controller._end_run()\n controller.execute_auto_generation_from_cloud_task()\n last_job_run_status = self.job_run_dao.get_last_run_status_for_job_id(\n job_id=GenomicJob.PR_PR_WORKFLOW)\n self.assertTrue(last_job_run_status is not None)\n self.assertTrue(last_job_run_status[0] == GenomicSubProcessResult.\n SUCCESS)\n self.assertEqual(cloud_task_mock.called, True)\n self.assertTrue(cloud_task_mock.call_args[1].get('payload').get(\n 'manifest_type') == 'p0')\n self.assertTrue(cloud_task_mock.call_args[1].get('task_queue') ==\n 'genomic-generate-manifest')\n all_job_runs = self.job_run_dao.get_all()\n self.assertEqual(len(all_job_runs), 2)\n self.assertTrue(all(obj.runResult in [GenomicSubProcessResult.\n SUCCESS, GenomicSubProcessResult.ERROR] for obj in all_job_runs))\n self.assertTrue(all(obj.jobId == GenomicJob.PR_PR_WORKFLOW for obj in\n all_job_runs))\n",
"step-4": "<mask token>\n\n\nclass GenomicJobControllerTest(BaseTestCase):\n\n def setUp(self):\n super(GenomicJobControllerTest, self).setUp()\n self.data_file_dao = GenomicGcDataFileDao()\n self.event_data_dao = MessageBrokenEventDataDao()\n self.incident_dao = GenomicIncidentDao()\n self.member_dao = GenomicSetMemberDao()\n self.metrics_dao = GenomicGCValidationMetricsDao()\n self.user_event_metrics_dao = UserEventMetricsDao()\n self.job_run_dao = GenomicJobRunDao()\n self.report_state_dao = GenomicMemberReportStateDao()\n self.appointment_event_dao = GenomicAppointmentEventDao()\n self.appointment_metrics_dao = GenomicAppointmentEventMetricsDao()\n\n def test_incident_with_long_message(self):\n \"\"\"Make sure the length of incident messages doesn't cause issues when recording them\"\"\"\n incident_message = '1' * (GenomicIncident.message.type.length + 20)\n mock_slack_handler = mock.MagicMock()\n job_controller = GenomicJobController(job_id=1)\n job_controller.genomic_alert_slack = mock_slack_handler\n job_controller.create_incident(message=incident_message, slack=True)\n incident: GenomicIncident = self.session.query(GenomicIncident).one()\n self.assertTrue(incident_message.startswith(incident.message))\n mock_slack_handler.send_message_to_webhook.assert_called_with(\n message_data={'text': incident_message})\n\n def test_gvcf_files_ingestion(self):\n job_controller = GenomicJobController(job_id=38)\n bucket_name = 'test_bucket'\n file_path = (\n 'Wgs_sample_raw_data/SS_VCF_research/BCM_A100153482_21042005280_SIA0013441__1.hard-filtered.gvcf.gz'\n )\n file_path_md5 = (\n 'Wgs_sample_raw_data/SS_VCF_research/BCM_A100153482_21042005280_SIA0013441__1.hard-filtered.gvcf.gz.md5sum'\n )\n full_path = f'{bucket_name}/{file_path}'\n full_path_md5 = f'{bucket_name}/{file_path_md5}'\n gen_set = self.data_generator.create_database_genomic_set(\n genomicSetName='.', genomicSetCriteria='.', genomicSetVersion=1)\n gen_member = self.data_generator.create_database_genomic_set_member(\n genomicSetId=gen_set.id, biobankId='100153482', sampleId=\n '21042005280', genomeType='aou_wgs', genomicWorkflowState=\n GenomicWorkflowState.AW1)\n gen_job_run = self.data_generator.create_database_genomic_job_run(jobId\n =GenomicJob.AW1_MANIFEST, startTime=clock.CLOCK.now(),\n runResult=GenomicSubProcessResult.SUCCESS)\n gen_processed_file = (self.data_generator.\n create_database_genomic_file_processed(runId=gen_job_run.id,\n startTime=clock.CLOCK.now(), filePath='/test_file_path',\n bucketName='test_bucket', fileName='test_file_name'))\n self.data_generator.create_database_genomic_gc_validation_metrics(\n genomicSetMemberId=gen_member.id, genomicFileProcessedId=\n gen_processed_file.id)\n job_controller.ingest_data_files_into_gc_metrics(file_path_md5,\n bucket_name)\n metrics = self.metrics_dao.get_metrics_by_member_id(gen_member.id)\n self.assertIsNotNone(metrics.gvcfMd5Path)\n self.assertEqual(metrics.gvcfMd5Path, full_path_md5)\n job_controller.ingest_data_files_into_gc_metrics(file_path, bucket_name\n )\n metrics = self.metrics_dao.get_metrics_by_member_id(gen_member.id)\n self.assertIsNotNone(metrics.gvcfPath)\n self.assertEqual(metrics.gvcfPath, full_path)\n\n def test_gvcf_files_ingestion_create_incident(self):\n bucket_name = 'test_bucket'\n file_path = (\n 'Wgs_sample_raw_data/SS_VCF_research/BCM_A100153482_21042005280_SIA0013441__1.hard-filtered.gvcf.gz'\n )\n gen_set = self.data_generator.create_database_genomic_set(\n genomicSetName='.', genomicSetCriteria='.', genomicSetVersion=1)\n gen_member = self.data_generator.create_database_genomic_set_member(\n genomicSetId=gen_set.id, biobankId='111111111', sampleId=\n '222222222222', genomeType='aou_wgs', genomicWorkflowState=\n GenomicWorkflowState.AW1)\n gen_job_run = self.data_generator.create_database_genomic_job_run(jobId\n =GenomicJob.AW1_MANIFEST, startTime=clock.CLOCK.now(),\n runResult=GenomicSubProcessResult.SUCCESS)\n gen_processed_file = (self.data_generator.\n create_database_genomic_file_processed(runId=gen_job_run.id,\n startTime=clock.CLOCK.now(), filePath='/test_file_path',\n bucketName=bucket_name, fileName='test_file_name'))\n self.data_generator.create_database_genomic_gc_validation_metrics(\n genomicSetMemberId=gen_member.id, genomicFileProcessedId=\n gen_processed_file.id)\n with GenomicJobController(GenomicJob.INGEST_DATA_FILES) as controller:\n controller.ingest_data_files_into_gc_metrics(file_path, bucket_name\n )\n incident = self.incident_dao.get(1)\n self.assertIsNotNone(incident)\n self.assertEqual(incident.code, GenomicIncidentCode.\n UNABLE_TO_FIND_METRIC.name)\n self.assertEqual(incident.data_file_path, file_path)\n self.assertEqual(incident.message,\n 'INGEST_DATA_FILES: Cannot find genomics metric record for sample id: 21042005280'\n )\n\n def test_accession_data_files(self):\n test_bucket_baylor = 'fake-data-bucket-baylor'\n test_idat_file = (\n 'fake-data-bucket-baylor/Genotyping_sample_raw_data/204027270091_R02C01_Grn.idat'\n )\n test_vcf_file = (\n 'fake-data-bucket-baylor/Genotyping_sample_raw_data/204027270091_R02C01.vcf.gz'\n )\n test_cram_file = (\n 'fake-data-bucket-baylor/Wgs_sample_raw_data/CRAMs_CRAIs/BCM_A100134256_21063006771_SIA0017196_1.cram'\n )\n test_files = [test_idat_file, test_vcf_file, test_cram_file]\n test_time = datetime.datetime(2021, 7, 9, 14, 1, 1)\n with clock.FakeClock(test_time):\n for file_path in test_files:\n with GenomicJobController(GenomicJob.ACCESSION_DATA_FILES\n ) as controller:\n controller.accession_data_files(file_path,\n test_bucket_baylor)\n inserted_files = self.data_file_dao.get_all()\n expected_idat = GenomicGcDataFile(id=1, created=test_time, modified\n =test_time, file_path=test_idat_file, gc_site_id='jh',\n bucket_name='fake-data-bucket-baylor', file_prefix=\n 'Genotyping_sample_raw_data', file_name=\n '204027270091_R02C01_Grn.idat', file_type='Grn.idat',\n identifier_type='chipwellbarcode', identifier_value=\n '204027270091_R02C01', ignore_flag=0)\n expected_vcf = GenomicGcDataFile(id=2, created=test_time, modified=\n test_time, file_path=test_vcf_file, gc_site_id='jh',\n bucket_name='fake-data-bucket-baylor', file_prefix=\n 'Genotyping_sample_raw_data', file_name=\n '204027270091_R02C01.vcf.gz', file_type='vcf.gz',\n identifier_type='chipwellbarcode', identifier_value=\n '204027270091_R02C01', ignore_flag=0)\n expected_cram = GenomicGcDataFile(id=3, created=test_time, modified\n =test_time, file_path=test_cram_file, gc_site_id='bcm',\n bucket_name='fake-data-bucket-baylor', file_prefix=\n 'Wgs_sample_raw_data/CRAMs_CRAIs', file_name=\n 'BCM_A100134256_21063006771_SIA0017196_1.cram', file_type=\n 'cram', identifier_type='sample_id', identifier_value=\n '21063006771', ignore_flag=0)\n expected_objs = {(0): expected_idat, (1): expected_vcf, (2):\n expected_cram}\n for i in range(3):\n self.assertEqual(expected_objs[i].bucket_name, inserted_files[i\n ].bucket_name)\n self.assertEqual(expected_objs[i].created, inserted_files[i].\n created)\n self.assertEqual(expected_objs[i].file_name, inserted_files[i].\n file_name)\n self.assertEqual(expected_objs[i].file_path, inserted_files[i].\n file_path)\n self.assertEqual(expected_objs[i].file_prefix, inserted_files[i\n ].file_prefix)\n self.assertEqual(expected_objs[i].file_type, inserted_files[i].\n file_type)\n self.assertEqual(expected_objs[i].gc_site_id, inserted_files[i]\n .gc_site_id)\n self.assertEqual(expected_objs[i].id, inserted_files[i].id)\n self.assertEqual(expected_objs[i].identifier_type,\n inserted_files[i].identifier_type)\n self.assertEqual(expected_objs[i].identifier_value,\n inserted_files[i].identifier_value)\n self.assertEqual(expected_objs[i].ignore_flag, inserted_files[i\n ].ignore_flag)\n self.assertEqual(expected_objs[i].metadata, inserted_files[i].\n metadata)\n self.assertEqual(expected_objs[i].modified, inserted_files[i].\n modified)\n\n def test_updating_members_blocklists(self):\n gen_set = self.data_generator.create_database_genomic_set(\n genomicSetName='.', genomicSetCriteria='.', genomicSetVersion=1)\n ids_should_be_updated = []\n for i in range(4):\n ids_should_be_updated.append(self.data_generator.\n create_database_genomic_set_member(genomicSetId=gen_set.id,\n biobankId='100153482', sampleId='21042005280', genomeType=\n 'test_investigation_one' if i & 2 != 0 else 'aou_wgs',\n genomicWorkflowState=GenomicWorkflowState.AW0, ai_an='Y' if\n i & 2 == 0 else 'N').id)\n for i in range(2):\n self.data_generator.create_database_genomic_set_member(genomicSetId\n =gen_set.id, biobankId='100153482', sampleId='21042005280',\n genomeType='aou_array', genomicWorkflowState=\n GenomicWorkflowState.AW0, ai_an='N')\n with GenomicJobController(GenomicJob.UPDATE_MEMBERS_BLOCKLISTS\n ) as controller:\n controller.update_members_blocklists()\n created_members = self.member_dao.get_all()\n blocklisted = list(filter(lambda x: x.blockResults == 1 or x.\n blockResearch == 1, created_members))\n self.assertTrue(ids_should_be_updated.sort() == [obj.id for obj in\n blocklisted].sort())\n self.assertTrue(all(obj.blockResearch == 1 and obj.\n blockResearchReason is not None and obj.blockResearchReason ==\n 'aian' for obj in created_members if obj.ai_an == 'Y' and obj.\n genomicWorkflowState == GenomicWorkflowState.AW0))\n self.assertTrue(all(obj.blockResults == 0 and obj.\n blockResultsReason is None for obj in created_members if obj.\n ai_an == 'Y' and obj.genomicWorkflowState ==\n GenomicWorkflowState.AW0))\n self.assertTrue(all(obj.blockResearch == 1 and obj.\n blockResearchReason is not None and obj.blockResearchReason ==\n 'test_sample_swap' for obj in created_members if obj.genomeType ==\n 'test_investigation_one' and obj.genomicWorkflowState ==\n GenomicWorkflowState.AW0))\n self.assertTrue(all(obj.blockResults == 1 and obj.\n blockResultsReason is not None and obj.blockResultsReason ==\n 'test_sample_swap' for obj in created_members if obj.genomeType ==\n 'test_investigation_one' and obj.genomicWorkflowState ==\n GenomicWorkflowState.AW0))\n self.assertTrue(all(obj.blockResearch == 0 and obj.\n blockResearchReason is None for obj in created_members if obj.\n genomeType == 'aou_array' and obj.genomicWorkflowState ==\n GenomicWorkflowState.AW0))\n self.assertTrue(all(obj.blockResults == 0 and obj.\n blockResultsReason is None for obj in created_members if obj.\n genomeType == 'aou_array' and obj.genomicWorkflowState ==\n GenomicWorkflowState.AW0))\n with self.member_dao.session() as session:\n session.query(GenomicSetMember).delete()\n run_result = self.job_run_dao.get(1)\n self.assertEqual(run_result.runStatus, GenomicSubProcessStatus.\n COMPLETED)\n self.assertEqual(run_result.runResult, GenomicSubProcessResult.SUCCESS)\n for i in range(4):\n self.data_generator.create_database_genomic_set_member(genomicSetId\n =gen_set.id, biobankId='100153482', sampleId='21042005280',\n genomeType='test_investigation_one' if i & 2 != 0 else\n 'aou_wgs', genomicWorkflowState=GenomicWorkflowState.AW1,\n ai_an='Y' if i & 2 == 0 else 'N')\n with GenomicJobController(GenomicJob.UPDATE_MEMBERS_BLOCKLISTS\n ) as controller:\n controller.update_members_blocklists()\n modified_members = self.member_dao.get_all()\n self.assertTrue(all(obj.blockResearch == 1 and obj.\n blockResearchReason is not None and obj.blockResearchReason ==\n 'aian' for obj in modified_members if obj.ai_an == 'Y' and obj.\n genomicWorkflowState == GenomicWorkflowState.AW1))\n self.assertTrue(all(obj.blockResults == 0 and obj.\n blockResultsReason is None for obj in modified_members if obj.\n ai_an == 'Y' and obj.genomicWorkflowState ==\n GenomicWorkflowState.AW1))\n self.assertTrue(all(obj.blockResearch == 1 and obj.\n blockResearchReason is not None and obj.blockResearchReason ==\n 'test_sample_swap' for obj in modified_members if obj.\n genomeType == 'test_investigation_one' and obj.\n genomicWorkflowState == GenomicWorkflowState.AW1))\n self.assertTrue(all(obj.blockResults == 1 and obj.\n blockResultsReason is not None and obj.blockResultsReason ==\n 'test_sample_swap' for obj in modified_members if obj.\n genomeType == 'test_investigation_one' and obj.\n genomicWorkflowState == GenomicWorkflowState.AW1))\n run_result = self.job_run_dao.get(2)\n self.assertEqual(run_result.runStatus, GenomicSubProcessStatus.\n COMPLETED)\n self.assertEqual(run_result.runResult, GenomicSubProcessResult.SUCCESS)\n\n def test_ingest_user_metrics_file(self):\n test_file = 'Genomic-Metrics-File-User-Events-Test.csv'\n bucket_name = 'test_bucket'\n sub_folder = 'user_events'\n pids = []\n file_ingester = GenomicFileIngester()\n for _ in range(2):\n pid = self.data_generator.create_database_participant()\n pids.append(pid.participantId)\n test_metrics_file = create_ingestion_test_file(test_file,\n bucket_name, sub_folder)\n test_file_path = f'{bucket_name}/{sub_folder}/{test_metrics_file}'\n with open_cloud_file(test_file_path) as csv_file:\n metrics_to_ingest = file_ingester._read_data_to_ingest(csv_file)\n with GenomicJobController(GenomicJob.METRICS_FILE_INGEST\n ) as controller:\n controller.ingest_metrics_file(metric_type='user_events',\n file_path=test_file_path)\n job_run_id = controller.job_run.id\n metrics = self.user_event_metrics_dao.get_all()\n for pid in pids:\n file_metrics = list(filter(lambda x: int(x['participant_id'].\n split('P')[-1]) == pid, metrics_to_ingest['rows']))\n participant_ingested_metrics = list(filter(lambda x: x.\n participant_id == pid, metrics))\n self.assertEqual(len(file_metrics), len(\n participant_ingested_metrics))\n self.assertTrue(all(obj.run_id == job_run_id for obj in\n participant_ingested_metrics))\n\n @mock.patch(\n 'rdr_service.genomic.genomic_job_controller.GenomicJobController.execute_cloud_task'\n )\n def test_reconcile_pdr_data(self, mock_cloud_task):\n with GenomicJobController(GenomicJob.RECONCILE_PDR_DATA) as controller:\n controller.reconcile_pdr_data()\n cloud_task_endpoint = 'rebuild_genomic_table_records_task'\n first_run = self.job_run_dao.get_all()\n self.assertEqual(mock_cloud_task.call_count, 1)\n call_args = mock_cloud_task.call_args_list\n self.assertEqual(len(call_args), 1)\n self.assertEqual(call_args[0].args[0]['table'], self.job_run_dao.\n model_type.__tablename__)\n self.assertTrue(type(call_args[0].args[0]['ids']) is list)\n self.assertEqual(call_args[0].args[0]['ids'], [obj.id for obj in\n first_run])\n self.assertEqual(call_args[0].args[1], cloud_task_endpoint)\n participant = self.data_generator.create_database_participant()\n gen_set = self.data_generator.create_database_genomic_set(\n genomicSetName='.', genomicSetCriteria='.', genomicSetVersion=1)\n plus_ten = clock.CLOCK.now() + datetime.timedelta(minutes=10)\n plus_ten = plus_ten.replace(microsecond=0)\n with FakeClock(plus_ten):\n for i in range(2):\n gen_member = (self.data_generator.\n create_database_genomic_set_member(genomicSetId=gen_set\n .id, biobankId='100153482', sampleId='21042005280',\n genomeType='aou_wgs', genomicWorkflowState=\n GenomicWorkflowState.AW1))\n gen_processed_file = (self.data_generator.\n create_database_genomic_file_processed(runId=first_run[\n 0].id, startTime=clock.CLOCK.now(), filePath=\n f'test_file_path_{i}', bucketName='test_bucket',\n fileName='test_file_name'))\n self.data_generator.create_database_genomic_gc_validation_metrics(\n genomicSetMemberId=gen_member.id,\n genomicFileProcessedId=gen_processed_file.id)\n manifest = (self.data_generator.\n create_database_genomic_manifest_file(manifestTypeId=2,\n filePath=f'test_file_path_{i}'))\n self.data_generator.create_database_genomic_manifest_feedback(\n inputManifestFileId=manifest.id, feedbackRecordCount=2)\n self.data_generator.create_database_genomic_user_event_metrics(\n participant_id=participant.participantId, event_name=\n 'test_event', run_id=1)\n self.data_generator.create_database_genomic_informing_loop(\n message_record_id=1, event_type=\n 'informing_loop_decision', module_type='gem',\n participant_id=participant.participantId,\n decision_value='maybe_later', event_authored_time=clock\n .CLOCK.now())\n self.data_generator.create_database_genomic_cvl_past_due(\n cvl_site_id='co', email_notification_sent=0, sample_id=\n 'sample_test', results_type='hdr',\n genomic_set_member_id=gen_member.id)\n self.data_generator.create_database_genomic_appointment(\n message_record_id=i, appointment_id=i, event_type=\n 'appointment_scheduled', module_type='hdr',\n participant_id=participant.participantId,\n event_authored_time=clock.CLOCK.now(), source='Color',\n appointment_timestamp=format_datetime(clock.CLOCK.now()\n ), appointment_timezone='America/Los_Angeles', location\n ='123 address st', contact_number='17348675309',\n language='en')\n self.data_generator.create_database_genomic_member_report_state(\n genomic_set_member_id=gen_member.id, participant_id=\n participant.participantId, module='gem',\n genomic_report_state=GenomicReportState.GEM_RPT_READY,\n event_authored_time=clock.CLOCK.now())\n self.data_generator.create_genomic_result_viewed(participant_id\n =participant.participantId, event_type='result_viewed',\n event_authored_time=clock.CLOCK.now(), module_type=\n 'gem', sample_id=gen_member.sampleId)\n with GenomicJobController(GenomicJob.RECONCILE_PDR_DATA) as controller:\n controller.reconcile_pdr_data()\n affected_tables = ['genomic_set', 'genomic_set_member',\n 'genomic_job_run', 'genomic_file_processed',\n 'genomic_gc_validation_metrics', 'genomic_manifest_file',\n 'genomic_manifest_feedback', 'genomic_informing_loop',\n 'genomic_cvl_results_past_due', 'user_event_metrics',\n 'genomic_member_report_state', 'genomic_result_viewed',\n 'genomic_appointment_event']\n num_calls = len(affected_tables) + 1\n self.assertEqual(mock_cloud_task.call_count, num_calls)\n call_args = mock_cloud_task.call_args_list\n self.assertEqual(len(call_args), num_calls)\n mock_tables = set([obj[0][0]['table'] for obj in call_args])\n mock_endpoint = [obj[0][1] for obj in call_args]\n self.assertTrue([mock_tables].sort() == affected_tables.sort())\n self.assertTrue(all(obj for obj in mock_endpoint if obj ==\n cloud_task_endpoint))\n\n @mock.patch(\n 'rdr_service.genomic.genomic_job_controller.GenomicJobController.execute_cloud_task'\n )\n def test_retry_manifest_ingestions_if_deltas(self, mock_cloud_task):\n bucket_name = 'test-bucket'\n aw1_file_name = (\n 'AW1_wgs_sample_manifests/RDR_AoU_SEQ_PKG-2104-026571.csv')\n aw1_manifest_path = f'{bucket_name}/{aw1_file_name}'\n aw2_file_name = (\n 'AW2_wgs_data_manifests/RDR_AoU_SEQ_DataManifest_04092021.csv')\n aw2_manifest_path = f'{bucket_name}/{aw2_file_name}'\n gen_set = self.data_generator.create_database_genomic_set(\n genomicSetName='.', genomicSetCriteria='.', genomicSetVersion=1)\n aw1_job_run = self.data_generator.create_database_genomic_job_run(jobId\n =GenomicJob.AW1_MANIFEST, startTime=clock.CLOCK.now(), endTime=\n clock.CLOCK.now(), runResult=GenomicSubProcessResult.SUCCESS)\n aw2_job_run = self.data_generator.create_database_genomic_job_run(jobId\n =GenomicJob.METRICS_INGESTION, startTime=clock.CLOCK.now(),\n endTime=clock.CLOCK.now(), runResult=GenomicSubProcessResult.\n SUCCESS)\n with GenomicJobController(GenomicJob.RETRY_MANIFEST_INGESTIONS\n ) as controller:\n controller.retry_manifest_ingestions()\n job_run = self.job_run_dao.get(3)\n self.assertEqual(job_run.jobId, GenomicJob.RETRY_MANIFEST_INGESTIONS)\n self.assertEqual(job_run.runStatus, GenomicSubProcessStatus.COMPLETED)\n self.assertEqual(job_run.runResult, GenomicSubProcessResult.NO_FILES)\n self.assertEqual(mock_cloud_task.call_count, 0)\n self.assertFalse(mock_cloud_task.call_count)\n self.data_generator.create_database_genomic_aw1_raw(file_path=\n aw1_manifest_path, package_id='PKG-2104-026571', biobank_id=\n 'A10001')\n self.data_generator.create_database_genomic_aw2_raw(file_path=\n aw2_manifest_path, biobank_id='A10001', sample_id='100001',\n biobankidsampleid='A10001_100001')\n aw1_manifest_file = (self.data_generator.\n create_database_genomic_manifest_file(created=clock.CLOCK.now(),\n modified=clock.CLOCK.now(), uploadDate=clock.CLOCK.now(),\n manifestTypeId=GenomicManifestTypes.AW1, filePath=\n aw1_manifest_path, fileName=aw1_file_name, bucketName=\n bucket_name, recordCount=1, rdrProcessingComplete=1,\n rdrProcessingCompleteDate=clock.CLOCK.now()))\n aw2_manifest_file = (self.data_generator.\n create_database_genomic_manifest_file(created=clock.CLOCK.now(),\n modified=clock.CLOCK.now(), uploadDate=clock.CLOCK.now(),\n manifestTypeId=GenomicManifestTypes.AW2, filePath=\n aw2_manifest_path, fileName=aw2_file_name, bucketName=\n bucket_name, recordCount=1, rdrProcessingComplete=1,\n rdrProcessingCompleteDate=clock.CLOCK.now()))\n aw1_file_processed = (self.data_generator.\n create_database_genomic_file_processed(runId=aw1_job_run.id,\n startTime=clock.CLOCK.now(), genomicManifestFileId=\n aw1_manifest_file.id, filePath=f'/{aw1_manifest_path}',\n bucketName=bucket_name, fileName=aw1_file_name))\n aw2_file_processed = (self.data_generator.\n create_database_genomic_file_processed(runId=aw2_job_run.id,\n startTime=clock.CLOCK.now(), genomicManifestFileId=\n aw2_manifest_file.id, filePath=f'/{aw2_manifest_path}',\n bucketName=bucket_name, fileName=aw2_file_name))\n gen_member = self.data_generator.create_database_genomic_set_member(\n genomicSetId=gen_set.id, biobankId='100153482', sampleId=\n '21042005280', genomeType='aou_wgs', genomicWorkflowState=\n GenomicWorkflowState.AW1, aw1FileProcessedId=aw1_file_processed.id)\n self.data_generator.create_database_genomic_gc_validation_metrics(\n genomicSetMemberId=gen_member.id, genomicFileProcessedId=\n aw2_file_processed.id)\n with GenomicJobController(GenomicJob.RETRY_MANIFEST_INGESTIONS\n ) as controller:\n controller.retry_manifest_ingestions()\n job_run = self.job_run_dao.get(4)\n self.assertEqual(job_run.jobId, GenomicJob.RETRY_MANIFEST_INGESTIONS)\n self.assertEqual(job_run.runStatus, GenomicSubProcessStatus.COMPLETED)\n self.assertEqual(job_run.runResult, GenomicSubProcessResult.NO_FILES)\n self.assertEqual(mock_cloud_task.call_count, 0)\n self.assertFalse(mock_cloud_task.call_count)\n with self.member_dao.session() as session:\n session.query(GenomicGCValidationMetrics).delete()\n session.query(GenomicSetMember).delete()\n with GenomicJobController(GenomicJob.RETRY_MANIFEST_INGESTIONS\n ) as controller:\n controller.retry_manifest_ingestions()\n job_run = self.job_run_dao.get(5)\n self.assertEqual(job_run.jobId, GenomicJob.RETRY_MANIFEST_INGESTIONS)\n self.assertEqual(job_run.runStatus, GenomicSubProcessStatus.COMPLETED)\n self.assertEqual(job_run.runResult, GenomicSubProcessResult.SUCCESS)\n self.assertEqual(mock_cloud_task.call_count, 2)\n self.assertTrue(mock_cloud_task.call_count)\n call_args = mock_cloud_task.call_args_list\n self.assertEqual(len(call_args), 2)\n cloud_task_endpoint = ['ingest_aw1_manifest_task',\n 'ingest_aw2_manifest_task']\n mock_endpoint = [obj[0][1] for obj in call_args]\n self.assertTrue(all(obj for obj in mock_endpoint if obj ==\n cloud_task_endpoint))\n mock_buckets = set([obj[0][0]['bucket_name'] for obj in call_args])\n self.assertTrue(len(mock_buckets), 1)\n self.assertTrue(list(mock_buckets)[0] == bucket_name)\n\n def test_calculate_informing_loop_ready_flags(self):\n num_participants = 4\n gen_set = self.data_generator.create_database_genomic_set(\n genomicSetName='.', genomicSetCriteria='.', genomicSetVersion=1)\n for num in range(num_participants):\n plus_num = clock.CLOCK.now() + datetime.timedelta(minutes=num)\n plus_num = plus_num.replace(microsecond=0)\n with FakeClock(plus_num):\n summary = (self.data_generator.\n create_database_participant_summary(\n consentForStudyEnrollment=1, consentForGenomicsROR=1))\n stored_sample = (self.data_generator.\n create_database_biobank_stored_sample(biobankId=summary\n .biobankId, biobankOrderIdentifier=self.fake.pyint()))\n collection_site = self.data_generator.create_database_site(\n siteType='Clinic')\n order = self.data_generator.create_database_biobank_order(\n collectedSiteId=collection_site.siteId, participantId=\n summary.participantId, finalizedTime=plus_num)\n self.data_generator.create_database_biobank_order_identifier(\n value=stored_sample.biobankOrderIdentifier,\n biobankOrderId=order.biobankOrderId, system='1')\n self.data_generator.create_database_biobank_order_identifier(\n value=stored_sample.biobankOrderIdentifier,\n biobankOrderId=order.biobankOrderId, system='2')\n member = (self.data_generator.\n create_database_genomic_set_member(genomicSetId=gen_set\n .id, participantId=summary.participantId, genomeType=\n config.GENOME_TYPE_WGS, qcStatus=GenomicQcStatus.PASS,\n gcManifestSampleSource='Whole Blood', collectionTubeId=\n stored_sample.biobankStoredSampleId))\n self.data_generator.create_database_genomic_gc_validation_metrics(\n genomicSetMemberId=member.id, sexConcordance='True',\n drcFpConcordance='Pass', drcSexConcordance='Pass',\n processingStatus='Pass')\n members_for_ready_loop = (self.member_dao.\n get_members_for_informing_loop_ready())\n self.assertEqual(len(members_for_ready_loop), num_participants)\n current_set_members = self.member_dao.get_all()\n self.assertTrue(all(obj.informingLoopReadyFlag == 0 for obj in\n current_set_members))\n self.assertTrue(all(obj.informingLoopReadyFlagModified is None for\n obj in current_set_members))\n with GenomicJobController(GenomicJob.CALCULATE_INFORMING_LOOP_READY\n ) as controller:\n controller.calculate_informing_loop_ready_flags()\n members_for_ready_loop = (self.member_dao.\n get_members_for_informing_loop_ready())\n self.assertEqual(len(members_for_ready_loop), num_participants)\n calculation_limit = 2\n config.override_setting(config.CALCULATE_READY_FLAG_LIMIT, [\n calculation_limit])\n with GenomicJobController(GenomicJob.CALCULATE_INFORMING_LOOP_READY\n ) as controller:\n controller.calculate_informing_loop_ready_flags()\n current_set_members = self.member_dao.get_all()\n self.assertTrue(any(obj.informingLoopReadyFlag == 1 for obj in\n current_set_members))\n self.assertTrue(any(obj.informingLoopReadyFlagModified is not None for\n obj in current_set_members))\n current_loops_set = [obj for obj in current_set_members if obj.\n informingLoopReadyFlag == 1 and obj.\n informingLoopReadyFlagModified is not None]\n self.assertEqual(len(current_loops_set), calculation_limit)\n members_for_ready_loop = (self.member_dao.\n get_members_for_informing_loop_ready())\n self.assertEqual(len(members_for_ready_loop), num_participants // 2)\n with GenomicJobController(GenomicJob.CALCULATE_INFORMING_LOOP_READY\n ) as controller:\n controller.calculate_informing_loop_ready_flags()\n current_set_members = self.member_dao.get_all()\n self.assertTrue(all(obj.informingLoopReadyFlag == 1 for obj in\n current_set_members))\n self.assertTrue(all(obj.informingLoopReadyFlagModified is not None for\n obj in current_set_members))\n members_for_ready_loop = (self.member_dao.\n get_members_for_informing_loop_ready())\n self.assertEqual(len(members_for_ready_loop), 0)\n\n @mock.patch('rdr_service.services.email_service.EmailService.send_email')\n def test_getting_results_withdrawn(self, email_mock):\n num_participants = 4\n result_withdrawal_dao = GenomicResultWithdrawalsDao()\n gen_set = self.data_generator.create_database_genomic_set(\n genomicSetName='.', genomicSetCriteria='.', genomicSetVersion=1)\n gen_job_run = self.data_generator.create_database_genomic_job_run(jobId\n =GenomicJob.AW1_MANIFEST, startTime=clock.CLOCK.now(),\n runResult=GenomicSubProcessResult.SUCCESS)\n pids = []\n for num in range(num_participants):\n summary = self.data_generator.create_database_participant_summary(\n consentForStudyEnrollment=1, consentForGenomicsROR=1,\n withdrawalStatus=WithdrawalStatus.EARLY_OUT)\n self.data_generator.create_database_genomic_set_member(genomicSetId\n =gen_set.id, participantId=summary.participantId,\n genomeType=config.GENOME_TYPE_ARRAY, gemA1ManifestJobRunId=\n gen_job_run.id if num % 2 == 0 else None)\n self.data_generator.create_database_genomic_set_member(genomicSetId\n =gen_set.id, participantId=summary.participantId,\n genomeType=config.GENOME_TYPE_WGS, cvlW1ilHdrJobRunId=\n gen_job_run.id)\n pids.append(summary.participantId)\n config.override_setting(config.RDR_GENOMICS_NOTIFICATION_EMAIL,\n 'email@test.com')\n with GenomicJobController(GenomicJob.RESULTS_PIPELINE_WITHDRAWALS\n ) as controller:\n controller.check_results_withdrawals()\n self.assertEqual(email_mock.call_count, 2)\n call_args = email_mock.call_args_list\n self.assertTrue(any('GEM' in call.args[0].subject for call in\n call_args))\n self.assertTrue(any('HEALTH' in call.args[0].subject for call in\n call_args))\n job_runs = self.job_run_dao.get_all()\n current_job_run = list(filter(lambda x: x.jobId == GenomicJob.\n RESULTS_PIPELINE_WITHDRAWALS, job_runs))[0]\n self.assertTrue(current_job_run.runResult ==\n GenomicSubProcessResult.SUCCESS)\n all_withdrawal_records = result_withdrawal_dao.get_all()\n self.assertTrue(len(all_withdrawal_records) == len(pids))\n self.assertTrue(all(obj.participant_id in pids for obj in\n all_withdrawal_records))\n array_results = list(filter(lambda x: x.array_results == 1,\n all_withdrawal_records))\n self.assertTrue(len(array_results), 2)\n cvl_results = list(filter(lambda x: x.cvl_results == 1,\n all_withdrawal_records))\n self.assertTrue(len(cvl_results), num_participants)\n with GenomicJobController(GenomicJob.RESULTS_PIPELINE_WITHDRAWALS\n ) as controller:\n controller.check_results_withdrawals()\n self.assertEqual(email_mock.call_count, 2)\n job_runs = self.job_run_dao.get_all()\n current_job_run = list(filter(lambda x: x.jobId == GenomicJob.\n RESULTS_PIPELINE_WITHDRAWALS, job_runs))[1]\n self.assertTrue(current_job_run.runResult ==\n GenomicSubProcessResult.NO_RESULTS)\n\n def test_gem_results_to_report_state(self):\n num_participants = 8\n gen_set = self.data_generator.create_database_genomic_set(\n genomicSetName='.', genomicSetCriteria='.', genomicSetVersion=1)\n gem_a2_job_run = self.data_generator.create_database_genomic_job_run(\n jobId=GenomicJob.GEM_A2_MANIFEST, startTime=clock.CLOCK.now(),\n runResult=GenomicSubProcessResult.SUCCESS)\n pids_to_update, member_ids = [], []\n for num in range(num_participants):\n summary = self.data_generator.create_database_participant_summary(\n consentForStudyEnrollment=1, consentForGenomicsROR=1,\n withdrawalStatus=WithdrawalStatus.EARLY_OUT)\n member = self.data_generator.create_database_genomic_set_member(\n genomicSetId=gen_set.id, participantId=summary.\n participantId, genomeType=config.GENOME_TYPE_ARRAY)\n if num % 2 == 0:\n member_ids.append(member.id)\n pids_to_update.append(summary.participantId)\n with GenomicJobController(GenomicJob.GEM_RESULT_REPORTS) as controller:\n controller.gem_results_to_report_state()\n current_job_runs = self.job_run_dao.get_all()\n self.assertEqual(len(current_job_runs), 2)\n current_job_run = list(filter(lambda x: x.jobId == GenomicJob.\n GEM_RESULT_REPORTS, current_job_runs))[0]\n self.assertTrue(current_job_run.runResult ==\n GenomicSubProcessResult.NO_RESULTS)\n current_members = self.member_dao.get_all()\n for member in current_members:\n if member.participantId in pids_to_update:\n member.gemA2ManifestJobRunId = gem_a2_job_run.id\n member.genomicWorkflowState = (GenomicWorkflowState.\n GEM_RPT_READY)\n self.member_dao.update(member)\n with GenomicJobController(GenomicJob.GEM_RESULT_REPORTS) as controller:\n controller.gem_results_to_report_state()\n current_job_runs = self.job_run_dao.get_all()\n self.assertEqual(len(current_job_runs), 3)\n current_job_run = list(filter(lambda x: x.jobId == GenomicJob.\n GEM_RESULT_REPORTS, current_job_runs))[1]\n self.assertTrue(current_job_run.runResult ==\n GenomicSubProcessResult.SUCCESS)\n current_gem_report_states = self.report_state_dao.get_all()\n self.assertEqual(len(current_gem_report_states), len(pids_to_update))\n self.assertTrue(all(obj.event_type == 'result_ready' for obj in\n current_gem_report_states))\n self.assertTrue(all(obj.event_authored_time is not None for obj in\n current_gem_report_states))\n self.assertTrue(all(obj.module == 'gem' for obj in\n current_gem_report_states))\n self.assertTrue(all(obj.genomic_report_state == GenomicReportState.\n GEM_RPT_READY for obj in current_gem_report_states))\n self.assertTrue(all(obj.genomic_report_state_str ==\n GenomicReportState.GEM_RPT_READY.name for obj in\n current_gem_report_states))\n self.assertTrue(all(obj.genomic_set_member_id in member_ids for obj in\n current_gem_report_states))\n with GenomicJobController(GenomicJob.GEM_RESULT_REPORTS) as controller:\n controller.gem_results_to_report_state()\n current_job_runs = self.job_run_dao.get_all()\n self.assertEqual(len(current_job_runs), 4)\n current_job_run = list(filter(lambda x: x.jobId == GenomicJob.\n GEM_RESULT_REPORTS, current_job_runs))[2]\n self.assertTrue(current_job_run.runResult ==\n GenomicSubProcessResult.NO_RESULTS)\n self.clear_table_after_test('genomic_member_report_state')\n\n def test_reconcile_informing_loop(self):\n event_dao = UserEventMetricsDao()\n event_dao.truncate()\n il_dao = GenomicInformingLoopDao()\n for pid in range(8):\n self.data_generator.create_database_participant(participantId=1 +\n pid, biobankId=1 + pid)\n self.data_generator.create_database_genomic_job_run(jobId=\n GenomicJob.METRICS_FILE_INGEST, startTime=clock.CLOCK.now())\n self.data_generator.create_database_genomic_set(genomicSetName=\n 'test', genomicSetCriteria='.', genomicSetVersion=1)\n for b in ['aou_array', 'aou_wgs']:\n for i in range(1, 9):\n self.data_generator.create_database_genomic_set_member(\n participantId=i, genomicSetId=1, biobankId=i,\n collectionTubeId=100 + i, sampleId=10 + i, genomeType=b)\n events = ['gem.informing_loop.started',\n 'gem.informing_loop.screen8_no',\n 'gem.informing_loop.screen8_yes', 'hdr.informing_loop.started',\n 'gem.informing_loop.screen3', 'pgx.informing_loop.screen8_no',\n 'hdr.informing_loop.screen10_no']\n for p in range(4):\n for i in range(len(events)):\n self.data_generator.create_database_genomic_user_event_metrics(\n created=clock.CLOCK.now(), modified=clock.CLOCK.now(),\n participant_id=p + 1, created_at=datetime.datetime(2021,\n 12, 29, 0) + datetime.timedelta(hours=i), event_name=\n events[i], run_id=1, ignore_flag=0)\n decisions = [None, 'no', 'yes']\n for p in range(3):\n for i in range(2):\n self.data_generator.create_database_genomic_informing_loop(\n message_record_id=i, event_type=\n 'informing_loop_started' if i == 0 else\n 'informing_loop_decision', module_type='gem',\n participant_id=p + 1, decision_value=decisions[i],\n sample_id=100 + p, event_authored_time=datetime.\n datetime(2021, 12, 29, 0) + datetime.timedelta(hours=i))\n self.data_generator.create_database_genomic_user_event_metrics(created\n =clock.CLOCK.now(), modified=clock.CLOCK.now(), participant_id=\n 6, created_at=datetime.datetime(2021, 12, 29, 0), event_name=\n 'gem.informing_loop.screen8_yes', run_id=1, ignore_flag=0)\n genomic_pipeline.reconcile_informing_loop_responses()\n pid_list = [1, 2, 3, 6]\n new_il_values = il_dao.get_latest_il_for_pids(pid_list=pid_list,\n module='gem')\n for value in new_il_values:\n self.assertEqual('yes', value.decision_value)\n pid_list = [1, 2, 3, 4]\n for module in ['hdr', 'pgx']:\n new_il_values = il_dao.get_latest_il_for_pids(pid_list=pid_list,\n module=module)\n for value in new_il_values:\n self.assertEqual('no', value.decision_value)\n self.assertIsNotNone(value.created_from_metric_id)\n\n def test_reconcile_message_broker_results_ready(self):\n self.data_generator.create_database_genomic_set(genomicSetName=\n 'test', genomicSetCriteria='.', genomicSetVersion=1)\n self.data_generator.create_database_genomic_job_run(jobId=\n GenomicJob.METRICS_FILE_INGEST, startTime=clock.CLOCK.now())\n for pid in range(7):\n self.data_generator.create_database_participant(participantId=1 +\n pid, biobankId=1 + pid)\n for i in range(1, 6):\n self.data_generator.create_database_genomic_set_member(\n participantId=i, genomicSetId=1, biobankId=i,\n collectionTubeId=100 + i, sampleId=10 + i, genomeType='aou_wgs'\n )\n if i < 4:\n self.data_generator.create_database_genomic_user_event_metrics(\n participant_id=i, created_at=datetime.datetime(2022, 10,\n 6, 0), event_name='pgx.result_ready', run_id=1)\n if i == 4:\n self.data_generator.create_database_genomic_user_event_metrics(\n participant_id=i, created_at=datetime.datetime(2022, 10,\n 6, 0), event_name='hdr.result_ready.informative', run_id=1)\n if i == 5:\n self.data_generator.create_database_genomic_user_event_metrics(\n participant_id=i, created_at=datetime.datetime(2022, 10,\n 6, 0), event_name='hdr.result_ready.uninformative',\n run_id=1)\n genomic_cvl_pipeline.reconcile_message_broker_results_ready()\n report_state_dao = GenomicMemberReportStateDao()\n states = report_state_dao.get_all()\n self.assertEqual(5, len(states))\n pgx_records = [rec for rec in states if rec.module == 'pgx_v1']\n hdr_record_uninf = [rec for rec in states if rec.\n genomic_report_state == GenomicReportState.HDR_RPT_UNINFORMATIVE][0\n ]\n hdr_record_pos = [rec for rec in states if rec.genomic_report_state ==\n GenomicReportState.HDR_RPT_POSITIVE][0]\n for pgx_record in pgx_records:\n self.assertEqual(GenomicReportState.PGX_RPT_READY, pgx_record.\n genomic_report_state)\n self.assertEqual('PGX_RPT_READY', pgx_record.\n genomic_report_state_str)\n self.assertEqual(int(pgx_record.sample_id), pgx_record.\n participant_id + 10)\n self.assertEqual('result_ready', pgx_record.event_type)\n self.assertEqual(datetime.datetime(2022, 10, 6, 0), pgx_record.\n event_authored_time)\n self.assertIsNotNone(pgx_record.created_from_metric_id)\n self.assertEqual('HDR_RPT_UNINFORMATIVE', hdr_record_uninf.\n genomic_report_state_str)\n self.assertEqual(int(hdr_record_uninf.sample_id), hdr_record_uninf.\n participant_id + 10)\n self.assertEqual('result_ready', hdr_record_uninf.event_type)\n self.assertEqual(datetime.datetime(2022, 10, 6, 0),\n hdr_record_uninf.event_authored_time)\n self.assertIsNotNone(hdr_record_uninf.created_from_metric_id)\n self.assertEqual('HDR_RPT_POSITIVE', hdr_record_pos.\n genomic_report_state_str)\n self.assertEqual(int(hdr_record_pos.sample_id), hdr_record_pos.\n participant_id + 10)\n self.assertEqual('result_ready', hdr_record_pos.event_type)\n self.assertEqual(datetime.datetime(2022, 10, 6, 0), hdr_record_pos.\n event_authored_time)\n self.assertIsNotNone(hdr_record_pos.created_from_metric_id)\n\n def test_reconcile_message_broker_results_viewed(self):\n self.data_generator.create_database_genomic_set(genomicSetName=\n 'test', genomicSetCriteria='.', genomicSetVersion=1)\n self.data_generator.create_database_genomic_job_run(jobId=\n GenomicJob.METRICS_FILE_INGEST, startTime=clock.CLOCK.now())\n for pid in range(3):\n self.data_generator.create_database_participant(participantId=1 +\n pid, biobankId=1 + pid)\n for i in range(1, 3):\n self.data_generator.create_database_genomic_set_member(\n participantId=i, genomicSetId=1, biobankId=i,\n collectionTubeId=100 + i, sampleId=10 + i, genomeType='aou_wgs'\n )\n if i == 1:\n self.data_generator.create_database_genomic_user_event_metrics(\n participant_id=i, created_at=datetime.datetime(2022, 10,\n 6, 0), event_name='pgx.opened_at', run_id=1)\n if i == 2:\n self.data_generator.create_database_genomic_user_event_metrics(\n participant_id=i, created_at=datetime.datetime(2022, 10,\n 6, 0), event_name='hdr.opened_at', run_id=1)\n genomic_cvl_pipeline.reconcile_message_broker_results_viewed()\n result_viewed_dao = GenomicResultViewedDao()\n results = result_viewed_dao.get_all()\n self.assertEqual(2, len(results))\n for record in results:\n if record.participant_id == 1:\n self.assertEqual('pgx_v1', record.module_type)\n else:\n self.assertEqual('hdr_v1', record.module_type)\n self.assertEqual(int(record.sample_id), record.participant_id + 10)\n self.assertEqual('result_viewed', record.event_type)\n self.assertEqual(datetime.datetime(2022, 10, 6, 0), record.\n first_viewed)\n self.assertIsNotNone(record.created_from_metric_id)\n\n def test_ingest_appointment_metrics_file(self):\n test_file = 'Genomic-Metrics-File-Appointment-Events-Test.json'\n bucket_name = 'test_bucket'\n sub_folder = 'appointment_events'\n pids = []\n for _ in range(4):\n summary = self.data_generator.create_database_participant_summary()\n pids.append(summary.participantId)\n test_file_path = f'{bucket_name}/{sub_folder}/{test_file}'\n appointment_data = test_data.load_test_data_json(\n 'Genomic-Metrics-File-Appointment-Events-Test.json')\n appointment_data_str = json.dumps(appointment_data, indent=4)\n with open_cloud_file(test_file_path, mode='wb') as cloud_file:\n cloud_file.write(appointment_data_str.encode('utf-8'))\n with GenomicJobController(GenomicJob.APPOINTMENT_METRICS_FILE_INGEST\n ) as controller:\n controller.ingest_appointment_metrics_file(file_path=test_file_path\n )\n all_metrics = self.appointment_metrics_dao.get_all()\n self.assertEqual(len(all_metrics), 5)\n self.assertTrue(all(obj.participant_id in pids for obj in all_metrics))\n self.assertTrue(all(obj.file_path == test_file_path for obj in\n all_metrics))\n self.assertTrue(all(obj.appointment_event is not None for obj in\n all_metrics))\n self.assertTrue(all(obj.created is not None for obj in all_metrics))\n self.assertTrue(all(obj.modified is not None for obj in all_metrics))\n self.assertTrue(all(obj.module_type is not None for obj in all_metrics)\n )\n self.assertTrue(all(obj.event_authored_time is not None for obj in\n all_metrics))\n self.assertTrue(all(obj.event_type is not None for obj in all_metrics))\n current_job_runs = self.job_run_dao.get_all()\n self.assertEqual(len(current_job_runs), 1)\n current_job_run = current_job_runs[0]\n self.assertTrue(current_job_run.jobId == GenomicJob.\n APPOINTMENT_METRICS_FILE_INGEST)\n self.assertTrue(current_job_run.runResult ==\n GenomicSubProcessResult.SUCCESS)\n self.clear_table_after_test('genomic_appointment_event_metrics')\n\n def test_reconcile_appointments_with_metrics(self):\n fake_date = parser.parse('2020-05-29T08:00:01-05:00')\n for num in range(4):\n summary = self.data_generator.create_database_participant_summary()\n missing_json = {'event': 'appointment_updated',\n 'eventAuthoredTime': '2022-09-16T17:18:38Z',\n 'participantId': f'P{summary.participantId}', 'messageBody':\n {'module_type': 'hdr', 'appointment_timestamp':\n '2022-09-19T19:30:00+00:00', 'id': 55,\n 'appointment_timezone': 'America/Los_Angeles', 'location':\n 'CA', 'contact_number': '18043704252', 'language': 'en',\n 'source': 'Color'}}\n if num % 2 == 0:\n self.data_generator.create_database_genomic_appointment(\n message_record_id=num, appointment_id=num, event_type=\n 'appointment_scheduled', module_type='hdr',\n participant_id=summary.participantId,\n event_authored_time=fake_date, source='Color',\n appointment_timestamp=format_datetime(clock.CLOCK.now()\n ), appointment_timezone='America/Los_Angeles', location\n ='123 address st', contact_number='17348675309',\n language='en')\n self.data_generator.create_database_genomic_appointment_metric(\n participant_id=summary.participantId, appointment_event=\n json.dumps(missing_json, indent=4) if num % 2 != 0 else\n 'foo', file_path='test_file_path', module_type='hdr',\n event_authored_time=fake_date, event_type=\n 'appointment_updated' if num % 2 != 0 else\n 'appointment_scheduled')\n current_events = self.appointment_event_dao.get_all()\n self.assertEqual(len(current_events), 2)\n current_metrics = self.appointment_metrics_dao.get_all()\n self.assertEqual(len(current_metrics), 4)\n self.assertTrue(all(obj.reconcile_job_run_id is None for obj in\n current_metrics))\n with GenomicJobController(GenomicJob.APPOINTMENT_METRICS_RECONCILE\n ) as controller:\n controller.reconcile_appointment_events_from_metrics()\n job_run = self.job_run_dao.get_all()\n self.assertEqual(len(job_run), 1)\n self.assertTrue(job_run[0].jobId == GenomicJob.\n APPOINTMENT_METRICS_RECONCILE)\n current_events = self.appointment_event_dao.get_all()\n self.assertEqual(len(current_events), 4)\n scheduled = list(filter(lambda x: x.event_type ==\n 'appointment_scheduled', current_events))\n self.assertEqual(len(scheduled), 2)\n self.assertTrue(all(obj.created_from_metric_id is None for obj in\n scheduled))\n updated = list(filter(lambda x: x.event_type ==\n 'appointment_updated', current_events))\n self.assertEqual(len(updated), 2)\n self.assertTrue(all(obj.created_from_metric_id is not None for obj in\n updated))\n current_metrics = self.appointment_metrics_dao.get_all()\n self.assertEqual(len(current_metrics), 4)\n self.assertTrue(all(obj.reconcile_job_run_id is not None for obj in\n current_metrics))\n self.assertTrue(all(obj.reconcile_job_run_id == job_run[0].id for\n obj in current_metrics))\n self.clear_table_after_test('genomic_appointment_event_metrics')\n\n @mock.patch('rdr_service.services.email_service.EmailService.send_email')\n def test_check_appointments_gror_changed(self, email_mock):\n fake_date = parser.parse('2022-09-01T13:43:23')\n notified_dao = GenomicAppointmentEventNotifiedDao()\n config.override_setting(config.GENOMIC_COLOR_PM_EMAIL, [\n 'test@example.com'])\n num_participants = 4\n for num in range(num_participants):\n gror = num if num > 1 else 1\n summary = self.data_generator.create_database_participant_summary(\n consentForStudyEnrollment=1, consentForGenomicsROR=gror)\n self.data_generator.create_database_genomic_appointment(\n message_record_id=num, appointment_id=num, event_type=\n 'appointment_scheduled', module_type='hdr', participant_id=\n summary.participantId, event_authored_time=fake_date,\n source='Color', appointment_timestamp=format_datetime(clock\n .CLOCK.now()), appointment_timezone='America/Los_Angeles',\n location='123 address st', contact_number='17348675309',\n language='en')\n changed_ppts = (self.appointment_event_dao.\n get_appointments_gror_changed())\n self.assertEqual(2, len(changed_ppts))\n with GenomicJobController(GenomicJob.CHECK_APPOINTMENT_GROR_CHANGED\n ) as controller:\n controller.check_appointments_gror_changed()\n self.assertEqual(email_mock.call_count, 1)\n notified_appointments = notified_dao.get_all()\n self.assertEqual(2, len(notified_appointments))\n summary = self.data_generator.create_database_participant_summary(\n consentForStudyEnrollment=1, consentForGenomicsROR=2)\n self.data_generator.create_database_genomic_appointment(\n message_record_id=5, appointment_id=5, event_type=\n 'appointment_scheduled', module_type='hdr', participant_id=\n summary.participantId, event_authored_time=fake_date, source=\n 'Color', appointment_timestamp=format_datetime(clock.CLOCK.now(\n )), appointment_timezone='America/Los_Angeles', location=\n '123 address st', contact_number='17348675309', language='en')\n changed_ppts = (self.appointment_event_dao.\n get_appointments_gror_changed())\n self.assertEqual(1, len(changed_ppts))\n\n @mock.patch('rdr_service.services.email_service.EmailService.send_email')\n def test_check_gcr_14day_escalation(self, email_mock):\n fake_date = parser.parse('2022-09-01T13:43:23')\n fake_date2 = parser.parse('2022-09-02T14:14:00')\n fake_date3 = parser.parse('2022-09-03T15:15:00')\n config.override_setting(config.GENOMIC_GCR_ESCALATION_EMAILS, [\n 'test@example.com'])\n self.data_generator.create_database_genomic_set(genomicSetName=\n 'test', genomicSetCriteria='.', genomicSetVersion=1)\n pids = []\n for _ in range(6):\n summary = self.data_generator.create_database_participant_summary(\n consentForStudyEnrollment=1, consentForGenomicsROR=1)\n set_member = (self.data_generator.\n create_database_genomic_set_member(participantId=summary.\n participantId, genomicSetId=1, biobankId=1001,\n collectionTubeId=100, sampleId=10, genomeType='aou_wgs'))\n self.data_generator.create_database_genomic_member_report_state(\n participant_id=summary.participantId, genomic_report_state=\n GenomicReportState.HDR_RPT_POSITIVE, genomic_set_member_id=\n set_member.id, module='hdr_v1', event_authored_time=fake_date)\n pids.append(summary.participantId)\n self.data_generator.create_database_genomic_appointment(\n message_record_id=101, appointment_id=102, event_type=\n 'appointment_scheduled', module_type='hdr', participant_id=pids\n [0], event_authored_time=fake_date, source='Color',\n appointment_timestamp=format_datetime(clock.CLOCK.now()),\n appointment_timezone='America/Los_Angeles', location=\n '123 address st', contact_number='17348675309', language='en')\n self.data_generator.create_database_genomic_appointment(\n message_record_id=102, appointment_id=103, event_type=\n 'appointment_completed', module_type='hdr', participant_id=pids\n [1], event_authored_time=fake_date, source='Color',\n appointment_timestamp=fake_date, appointment_timezone=\n 'America/Los_Angeles', location='123 address st',\n contact_number='17348675309', language='en')\n self.data_generator.create_database_genomic_appointment(\n message_record_id=103, appointment_id=104, event_type=\n 'appointment_scheduled', module_type='hdr', participant_id=pids\n [2], event_authored_time=fake_date2, source='Color',\n appointment_timestamp=format_datetime(clock.CLOCK.now()),\n appointment_timezone='America/Los_Angeles', location=\n '123 address st', contact_number='17348675309', language='en')\n self.data_generator.create_database_genomic_appointment(\n message_record_id=104, appointment_id=104, event_type=\n 'appointment_cancelled', module_type='hdr', participant_id=pids\n [2], event_authored_time=fake_date3, source='Color',\n appointment_timestamp=format_datetime(clock.CLOCK.now()),\n appointment_timezone='America/Los_Angeles', location=\n '123 address st', contact_number='17348675309', language='en')\n notified_dao = GenomicDefaultBaseDao(model_type=\n GenomicGCROutreachEscalationNotified)\n notified_dao.insert_bulk([{'participant_id': pids[4], 'created':\n clock.CLOCK.now(), 'modified': clock.CLOCK.now(),\n 'message_sent': True}, {'participant_id': pids[5], 'created':\n clock.CLOCK.now(), 'modified': clock.CLOCK.now(),\n 'message_sent': False}])\n with clock.FakeClock(parser.parse('2022-11-1T05:15:00')):\n escalated_participants = (self.report_state_dao.\n get_hdr_result_positive_no_appointment(num_days=14))\n results = [pid[0] for pid in escalated_participants]\n self.assertIn(pids[2], results)\n self.assertIn(pids[3], results)\n self.assertIn(pids[5], results)\n self.assertNotIn(pids[0], results)\n self.assertNotIn(pids[1], results)\n self.assertNotIn(pids[4], results)\n with GenomicJobController(GenomicJob.CHECK_GCR_OUTREACH_ESCALATION\n ) as controller:\n controller.check_gcr_escalation(controller.job_id)\n self.assertEqual(email_mock.call_count, 3)\n self.assertEqual(email_mock.call_args.args[0].subject,\n 'GCR Outreach 14 Day Escalation')\n self.clear_table_after_test('genomic_gcr_outreach_escalation_notified')\n <mask token>\n\n @mock.patch('rdr_service.services.email_service.EmailService.send_email')\n def test_check_gcr_ce_escalation(self, email_mock):\n fake_date = parser.parse('2022-09-01T13:43:23')\n fake_date2 = parser.parse('2022-09-02T14:14:00')\n fake_date3 = parser.parse('2022-09-03T15:15:00')\n config.override_setting(config.GENOMIC_GCR_ESCALATION_EMAILS, [\n 'test@example.com'])\n self.data_generator.create_database_genomic_set(genomicSetName=\n 'test', genomicSetCriteria='.', genomicSetVersion=1)\n pids = []\n for _ in range(6):\n summary = self.data_generator.create_database_participant_summary(\n consentForStudyEnrollment=1, consentForGenomicsROR=1)\n set_member = (self.data_generator.\n create_database_genomic_set_member(participantId=summary.\n participantId, genomicSetId=1, biobankId=1001,\n collectionTubeId=100, sampleId=10, genomeType='aou_wgs',\n participantOrigin='careevolution'))\n self.data_generator.create_database_genomic_member_report_state(\n participant_id=summary.participantId, genomic_report_state=\n GenomicReportState.HDR_RPT_POSITIVE, genomic_set_member_id=\n set_member.id, module='hdr_v1', event_authored_time=fake_date)\n pids.append(summary.participantId)\n self.data_generator.create_database_genomic_appointment(\n message_record_id=101, appointment_id=102, event_type=\n 'appointment_scheduled', module_type='hdr', participant_id=pids\n [0], event_authored_time=fake_date, source='Color',\n appointment_timestamp=format_datetime(clock.CLOCK.now()),\n appointment_timezone='America/Los_Angeles', location=\n '123 address st', contact_number='17348675309', language='en')\n self.data_generator.create_database_genomic_appointment(\n message_record_id=102, appointment_id=103, event_type=\n 'appointment_completed', module_type='hdr', participant_id=pids\n [1], event_authored_time=fake_date, source='Color',\n appointment_timestamp=fake_date, appointment_timezone=\n 'America/Los_Angeles', location='123 address st',\n contact_number='17348675309', language='en')\n self.data_generator.create_database_genomic_appointment(\n message_record_id=103, appointment_id=104, event_type=\n 'appointment_scheduled', module_type='hdr', participant_id=pids\n [2], event_authored_time=fake_date2, source='Color',\n appointment_timestamp=format_datetime(clock.CLOCK.now()),\n appointment_timezone='America/Los_Angeles', location=\n '123 address st', contact_number='17348675309', language='en')\n self.data_generator.create_database_genomic_appointment(\n message_record_id=104, appointment_id=104, event_type=\n 'appointment_cancelled', module_type='hdr', participant_id=pids\n [2], event_authored_time=fake_date3, source='Color',\n appointment_timestamp=format_datetime(clock.CLOCK.now()),\n appointment_timezone='America/Los_Angeles', location=\n '123 address st', contact_number='17348675309', language='en')\n notified_dao = GenomicDefaultBaseDao(model_type=\n GenomicGCROutreachEscalationNotified)\n notified_dao.insert_bulk([{'participant_id': pids[4], 'created':\n clock.CLOCK.now(), 'modified': clock.CLOCK.now(),\n 'message_sent': True}, {'participant_id': pids[5], 'created':\n clock.CLOCK.now(), 'modified': clock.CLOCK.now(),\n 'message_sent': False}])\n with clock.FakeClock(parser.parse('2022-11-1T05:15:00')):\n escalated_participants = (self.report_state_dao.\n get_hdr_result_positive_no_appointment(num_days=30,\n participant_origin='careevolution'))\n results = [pid[0] for pid in escalated_participants]\n self.assertIn(pids[2], results)\n self.assertIn(pids[3], results)\n self.assertIn(pids[5], results)\n self.assertNotIn(pids[0], results)\n self.assertNotIn(pids[1], results)\n self.assertNotIn(pids[4], results)\n with GenomicJobController(GenomicJob.CHECK_GCR_CE_OUTREACH_ESCALATION\n ) as controller:\n controller.check_gcr_escalation(controller.job_id)\n self.assertEqual(email_mock.call_count, 3)\n self.assertEqual(email_mock.call_args.args[0].subject,\n 'GCR Outreach 30 Day Escalation')\n self.clear_table_after_test('genomic_gcr_outreach_escalation_notified')\n\n @mock.patch(\n 'rdr_service.genomic.genomic_job_controller.GenomicJobController.execute_cloud_task'\n )\n def test_execute_auto_generation_from_last_run(self, cloud_task_mock):\n with GenomicJobController(GenomicJob.PR_PR_WORKFLOW) as controller:\n controller.job_result = GenomicSubProcessResult.ERROR\n controller._end_run()\n controller.execute_auto_generation_from_cloud_task()\n last_job_run_status = self.job_run_dao.get_last_run_status_for_job_id(\n job_id=GenomicJob.PR_PR_WORKFLOW)\n self.assertTrue(last_job_run_status is not None)\n self.assertTrue(last_job_run_status[0] == GenomicSubProcessResult.ERROR\n )\n self.assertEqual(cloud_task_mock.called, False)\n self.assertEqual(cloud_task_mock.call_count, 0)\n with GenomicJobController(GenomicJob.PR_PR_WORKFLOW) as controller:\n controller.job_result = GenomicSubProcessResult.SUCCESS\n controller._end_run()\n controller.execute_auto_generation_from_cloud_task()\n last_job_run_status = self.job_run_dao.get_last_run_status_for_job_id(\n job_id=GenomicJob.PR_PR_WORKFLOW)\n self.assertTrue(last_job_run_status is not None)\n self.assertTrue(last_job_run_status[0] == GenomicSubProcessResult.\n SUCCESS)\n self.assertEqual(cloud_task_mock.called, True)\n self.assertTrue(cloud_task_mock.call_args[1].get('payload').get(\n 'manifest_type') == 'p0')\n self.assertTrue(cloud_task_mock.call_args[1].get('task_queue') ==\n 'genomic-generate-manifest')\n all_job_runs = self.job_run_dao.get_all()\n self.assertEqual(len(all_job_runs), 2)\n self.assertTrue(all(obj.runResult in [GenomicSubProcessResult.\n SUCCESS, GenomicSubProcessResult.ERROR] for obj in all_job_runs))\n self.assertTrue(all(obj.jobId == GenomicJob.PR_PR_WORKFLOW for obj in\n all_job_runs))\n",
"step-5": "import datetime\nimport json\n\nfrom dateutil import parser\nimport mock\nfrom python_http_client.exceptions import ForbiddenError\n\nfrom rdr_service import clock, config\nfrom rdr_service.api_util import open_cloud_file\nfrom rdr_service.clock import FakeClock\nfrom rdr_service.dao.database_utils import format_datetime\nfrom rdr_service.dao.genomics_dao import GenomicGcDataFileDao, GenomicGCValidationMetricsDao, GenomicIncidentDao, \\\n GenomicSetMemberDao, UserEventMetricsDao, GenomicJobRunDao, GenomicResultWithdrawalsDao, \\\n GenomicMemberReportStateDao, GenomicAppointmentEventMetricsDao, GenomicAppointmentEventDao, GenomicResultViewedDao, \\\n GenomicInformingLoopDao, GenomicAppointmentEventNotifiedDao, GenomicDefaultBaseDao\nfrom rdr_service.dao.message_broker_dao import MessageBrokenEventDataDao\nfrom rdr_service.genomic_enums import GenomicIncidentCode, GenomicJob, GenomicWorkflowState, GenomicSubProcessResult, \\\n GenomicSubProcessStatus, GenomicManifestTypes, GenomicQcStatus, GenomicReportState\nfrom rdr_service.genomic.genomic_job_components import GenomicFileIngester\nfrom rdr_service.genomic.genomic_job_controller import GenomicJobController\nfrom rdr_service.model.genomics import GenomicGcDataFile, GenomicIncident, GenomicSetMember, GenomicGCValidationMetrics,\\\n GenomicGCROutreachEscalationNotified\nfrom rdr_service.offline.genomics import genomic_pipeline, genomic_cvl_pipeline\nfrom rdr_service.participant_enums import WithdrawalStatus\nfrom tests import test_data\nfrom tests.genomics_tests.test_genomic_utils import create_ingestion_test_file\nfrom tests.helpers.unittest_base import BaseTestCase\n\n\nclass GenomicJobControllerTest(BaseTestCase):\n def setUp(self):\n super(GenomicJobControllerTest, self).setUp()\n self.data_file_dao = GenomicGcDataFileDao()\n self.event_data_dao = MessageBrokenEventDataDao()\n self.incident_dao = GenomicIncidentDao()\n self.member_dao = GenomicSetMemberDao()\n self.metrics_dao = GenomicGCValidationMetricsDao()\n self.user_event_metrics_dao = UserEventMetricsDao()\n self.job_run_dao = GenomicJobRunDao()\n self.report_state_dao = GenomicMemberReportStateDao()\n self.appointment_event_dao = GenomicAppointmentEventDao()\n self.appointment_metrics_dao = GenomicAppointmentEventMetricsDao()\n\n def test_incident_with_long_message(self):\n \"\"\"Make sure the length of incident messages doesn't cause issues when recording them\"\"\"\n incident_message = \"1\" * (GenomicIncident.message.type.length + 20)\n mock_slack_handler = mock.MagicMock()\n\n job_controller = GenomicJobController(job_id=1)\n job_controller.genomic_alert_slack = mock_slack_handler\n job_controller.create_incident(message=incident_message, slack=True)\n\n # Double check that the incident was saved successfully, with part of the message\n incident: GenomicIncident = self.session.query(GenomicIncident).one()\n self.assertTrue(incident_message.startswith(incident.message))\n\n # Make sure Slack received the full message\n mock_slack_handler.send_message_to_webhook.assert_called_with(\n message_data={\n 'text': incident_message\n }\n )\n\n def test_gvcf_files_ingestion(self):\n job_controller = GenomicJobController(job_id=38)\n bucket_name = \"test_bucket\"\n\n file_path = \"Wgs_sample_raw_data/SS_VCF_research/BCM_A100153482_21042005280_SIA0013441__1.hard-filtered.gvcf.gz\"\n file_path_md5 = \"Wgs_sample_raw_data/SS_VCF_research/\" \\\n \"BCM_A100153482_21042005280_SIA0013441__1.hard-filtered.gvcf.gz.md5sum\"\n\n full_path = f'{bucket_name}/{file_path}'\n full_path_md5 = f'{bucket_name}/{file_path_md5}'\n\n gen_set = self.data_generator.create_database_genomic_set(\n genomicSetName=\".\",\n genomicSetCriteria=\".\",\n genomicSetVersion=1\n )\n\n gen_member = self.data_generator.create_database_genomic_set_member(\n genomicSetId=gen_set.id,\n biobankId=\"100153482\",\n sampleId=\"21042005280\",\n genomeType=\"aou_wgs\",\n genomicWorkflowState=GenomicWorkflowState.AW1\n )\n\n gen_job_run = self.data_generator.create_database_genomic_job_run(\n jobId=GenomicJob.AW1_MANIFEST,\n startTime=clock.CLOCK.now(),\n runResult=GenomicSubProcessResult.SUCCESS\n )\n\n gen_processed_file = self.data_generator.create_database_genomic_file_processed(\n runId=gen_job_run.id,\n startTime=clock.CLOCK.now(),\n filePath='/test_file_path',\n bucketName='test_bucket',\n fileName='test_file_name',\n )\n\n self.data_generator.create_database_genomic_gc_validation_metrics(\n genomicSetMemberId=gen_member.id,\n genomicFileProcessedId=gen_processed_file.id\n )\n\n job_controller.ingest_data_files_into_gc_metrics(file_path_md5, bucket_name)\n\n metrics = self.metrics_dao.get_metrics_by_member_id(gen_member.id)\n\n self.assertIsNotNone(metrics.gvcfMd5Path)\n self.assertEqual(metrics.gvcfMd5Path, full_path_md5)\n\n job_controller.ingest_data_files_into_gc_metrics(file_path, bucket_name)\n\n metrics = self.metrics_dao.get_metrics_by_member_id(gen_member.id)\n\n self.assertIsNotNone(metrics.gvcfPath)\n self.assertEqual(metrics.gvcfPath, full_path)\n\n def test_gvcf_files_ingestion_create_incident(self):\n bucket_name = \"test_bucket\"\n file_path = \"Wgs_sample_raw_data/SS_VCF_research/BCM_A100153482_21042005280_SIA0013441__1.hard-filtered.gvcf.gz\"\n\n gen_set = self.data_generator.create_database_genomic_set(\n genomicSetName=\".\",\n genomicSetCriteria=\".\",\n genomicSetVersion=1\n )\n\n gen_member = self.data_generator.create_database_genomic_set_member(\n genomicSetId=gen_set.id,\n biobankId=\"111111111\",\n sampleId=\"222222222222\",\n genomeType=\"aou_wgs\",\n genomicWorkflowState=GenomicWorkflowState.AW1\n )\n\n gen_job_run = self.data_generator.create_database_genomic_job_run(\n jobId=GenomicJob.AW1_MANIFEST,\n startTime=clock.CLOCK.now(),\n runResult=GenomicSubProcessResult.SUCCESS\n )\n\n gen_processed_file = self.data_generator.create_database_genomic_file_processed(\n runId=gen_job_run.id,\n startTime=clock.CLOCK.now(),\n filePath='/test_file_path',\n bucketName=bucket_name,\n fileName='test_file_name',\n )\n\n self.data_generator.create_database_genomic_gc_validation_metrics(\n genomicSetMemberId=gen_member.id,\n genomicFileProcessedId=gen_processed_file.id\n )\n\n with GenomicJobController(GenomicJob.INGEST_DATA_FILES) as controller:\n controller.ingest_data_files_into_gc_metrics(file_path, bucket_name)\n\n incident = self.incident_dao.get(1)\n self.assertIsNotNone(incident)\n self.assertEqual(incident.code, GenomicIncidentCode.UNABLE_TO_FIND_METRIC.name)\n self.assertEqual(incident.data_file_path, file_path)\n self.assertEqual(incident.message, 'INGEST_DATA_FILES: Cannot find '\n 'genomics metric record for sample id: '\n '21042005280')\n\n def test_accession_data_files(self):\n test_bucket_baylor = \"fake-data-bucket-baylor\"\n test_idat_file = \"fake-data-bucket-baylor/Genotyping_sample_raw_data/204027270091_R02C01_Grn.idat\"\n test_vcf_file = \"fake-data-bucket-baylor/Genotyping_sample_raw_data/204027270091_R02C01.vcf.gz\"\n\n test_cram_file = \"fake-data-bucket-baylor/Wgs_sample_raw_data/\" \\\n \"CRAMs_CRAIs/BCM_A100134256_21063006771_SIA0017196_1.cram\"\n\n test_files = [test_idat_file, test_vcf_file, test_cram_file]\n\n test_time = datetime.datetime(2021, 7, 9, 14, 1, 1)\n\n # run job controller method on each file\n with clock.FakeClock(test_time):\n\n for file_path in test_files:\n with GenomicJobController(GenomicJob.ACCESSION_DATA_FILES) as controller:\n controller.accession_data_files(file_path, test_bucket_baylor)\n\n inserted_files = self.data_file_dao.get_all()\n\n # idat\n expected_idat = GenomicGcDataFile(\n id=1,\n created=test_time,\n modified=test_time,\n file_path=test_idat_file,\n gc_site_id='jh',\n bucket_name='fake-data-bucket-baylor',\n file_prefix='Genotyping_sample_raw_data',\n file_name='204027270091_R02C01_Grn.idat',\n file_type='Grn.idat',\n identifier_type='chipwellbarcode',\n identifier_value='204027270091_R02C01',\n ignore_flag=0,\n )\n\n # vcf\n expected_vcf = GenomicGcDataFile(\n id=2,\n created=test_time,\n modified=test_time,\n file_path=test_vcf_file,\n gc_site_id='jh',\n bucket_name='fake-data-bucket-baylor',\n file_prefix='Genotyping_sample_raw_data',\n file_name='204027270091_R02C01.vcf.gz',\n file_type='vcf.gz',\n identifier_type='chipwellbarcode',\n identifier_value='204027270091_R02C01',\n ignore_flag=0,\n )\n\n # cram\n expected_cram = GenomicGcDataFile(\n id=3,\n created=test_time,\n modified=test_time,\n file_path=test_cram_file,\n gc_site_id='bcm',\n bucket_name='fake-data-bucket-baylor',\n file_prefix='Wgs_sample_raw_data/CRAMs_CRAIs',\n file_name='BCM_A100134256_21063006771_SIA0017196_1.cram',\n file_type='cram',\n identifier_type='sample_id',\n identifier_value='21063006771',\n ignore_flag=0,\n )\n\n # obj mapping\n expected_objs = {\n 0: expected_idat,\n 1: expected_vcf,\n 2: expected_cram\n }\n\n # verify test objects match expectations\n for i in range(3):\n self.assertEqual(expected_objs[i].bucket_name, inserted_files[i].bucket_name)\n self.assertEqual(expected_objs[i].created, inserted_files[i].created)\n self.assertEqual(expected_objs[i].file_name, inserted_files[i].file_name)\n self.assertEqual(expected_objs[i].file_path, inserted_files[i].file_path)\n self.assertEqual(expected_objs[i].file_prefix, inserted_files[i].file_prefix)\n self.assertEqual(expected_objs[i].file_type, inserted_files[i].file_type)\n self.assertEqual(expected_objs[i].gc_site_id, inserted_files[i].gc_site_id)\n self.assertEqual(expected_objs[i].id, inserted_files[i].id)\n self.assertEqual(expected_objs[i].identifier_type, inserted_files[i].identifier_type)\n self.assertEqual(expected_objs[i].identifier_value, inserted_files[i].identifier_value)\n self.assertEqual(expected_objs[i].ignore_flag, inserted_files[i].ignore_flag)\n self.assertEqual(expected_objs[i].metadata, inserted_files[i].metadata)\n self.assertEqual(expected_objs[i].modified, inserted_files[i].modified)\n\n def test_updating_members_blocklists(self):\n\n gen_set = self.data_generator.create_database_genomic_set(\n genomicSetName=\".\",\n genomicSetCriteria=\".\",\n genomicSetVersion=1\n )\n\n ids_should_be_updated = []\n # for just created and wf state query and MATCHES criteria\n for i in range(4):\n ids_should_be_updated.append(\n self.data_generator.create_database_genomic_set_member(\n genomicSetId=gen_set.id,\n biobankId=\"100153482\",\n sampleId=\"21042005280\",\n genomeType='test_investigation_one' if i & 2 != 0 else 'aou_wgs',\n genomicWorkflowState=GenomicWorkflowState.AW0,\n ai_an='Y' if i & 2 == 0 else 'N'\n ).id\n )\n\n # for just created and wf state query and DOES NOT MATCH criteria\n for i in range(2):\n self.data_generator.create_database_genomic_set_member(\n genomicSetId=gen_set.id,\n biobankId=\"100153482\",\n sampleId=\"21042005280\",\n genomeType='aou_array',\n genomicWorkflowState=GenomicWorkflowState.AW0,\n ai_an='N'\n )\n\n with GenomicJobController(GenomicJob.UPDATE_MEMBERS_BLOCKLISTS) as controller:\n controller.update_members_blocklists()\n\n # current config json in base_config.json\n created_members = self.member_dao.get_all()\n\n blocklisted = list(filter(lambda x: x.blockResults == 1 or x.blockResearch == 1, created_members))\n self.assertTrue(ids_should_be_updated.sort() == [obj.id for obj in blocklisted].sort())\n\n # should be RESEARCH blocked\n self.assertTrue(all(\n obj.blockResearch == 1 and obj.blockResearchReason is not None and obj.blockResearchReason == 'aian'\n for obj in created_members if obj.ai_an == 'Y' and obj.genomicWorkflowState == GenomicWorkflowState.AW0)\n )\n\n # should NOT be RESULTS blocked\n self.assertTrue(all(\n obj.blockResults == 0 and obj.blockResultsReason is None\n for obj in created_members if obj.ai_an == 'Y' and obj.genomicWorkflowState == GenomicWorkflowState.AW0)\n )\n\n # should be RESEARCH blocked\n self.assertTrue(all(\n obj.blockResearch == 1 and obj.blockResearchReason is not None and obj.blockResearchReason == 'test_sample_swap'\n for obj in created_members if obj.genomeType == 'test_investigation_one' and obj.genomicWorkflowState ==\n GenomicWorkflowState.AW0)\n )\n\n # should be RESULTS blocked\n self.assertTrue(all(\n obj.blockResults == 1 and obj.blockResultsReason is not None and obj.blockResultsReason == 'test_sample_swap'\n for obj in created_members if obj.genomeType == 'test_investigation_one' and obj.genomicWorkflowState ==\n GenomicWorkflowState.AW0)\n )\n\n # should NOT be RESEARCH/RESULTS blocked\n self.assertTrue(all(\n obj.blockResearch == 0 and obj.blockResearchReason is None\n for obj in created_members if obj.genomeType == 'aou_array' and obj.genomicWorkflowState ==\n GenomicWorkflowState.AW0)\n )\n\n self.assertTrue(all(\n obj.blockResults == 0 and obj.blockResultsReason is None\n for obj in created_members if obj.genomeType == 'aou_array' and obj.genomicWorkflowState ==\n GenomicWorkflowState.AW0)\n )\n\n # clear current set member records\n with self.member_dao.session() as session:\n session.query(GenomicSetMember).delete()\n\n run_result = self.job_run_dao.get(1)\n\n self.assertEqual(run_result.runStatus, GenomicSubProcessStatus.COMPLETED)\n self.assertEqual(run_result.runResult, GenomicSubProcessResult.SUCCESS)\n\n # for modified data query and MATCHES criteria\n for i in range(4):\n self.data_generator.create_database_genomic_set_member(\n genomicSetId=gen_set.id,\n biobankId=\"100153482\",\n sampleId=\"21042005280\",\n genomeType='test_investigation_one' if i & 2 != 0 else 'aou_wgs',\n genomicWorkflowState=GenomicWorkflowState.AW1,\n ai_an='Y' if i & 2 == 0 else 'N'\n )\n\n with GenomicJobController(GenomicJob.UPDATE_MEMBERS_BLOCKLISTS) as controller:\n controller.update_members_blocklists()\n\n modified_members = self.member_dao.get_all()\n\n # should be RESEARCH blocked\n self.assertTrue(all(\n obj.blockResearch == 1 and obj.blockResearchReason is not None and obj.blockResearchReason == 'aian'\n for obj in modified_members if obj.ai_an == 'Y' and obj.genomicWorkflowState == GenomicWorkflowState.AW1)\n )\n\n # should NOT be RESULTS blocked\n self.assertTrue(all(\n obj.blockResults == 0 and obj.blockResultsReason is None\n for obj in modified_members if obj.ai_an == 'Y' and obj.genomicWorkflowState == GenomicWorkflowState.AW1)\n )\n\n # should be RESEARCH blocked\n self.assertTrue(all(\n obj.blockResearch == 1 and obj.blockResearchReason is not None and obj.blockResearchReason == 'test_sample_swap'\n for obj in modified_members if obj.genomeType == 'test_investigation_one' and obj.genomicWorkflowState ==\n GenomicWorkflowState.AW1)\n )\n\n # should be RESULTS blocked\n self.assertTrue(all(\n obj.blockResults == 1 and obj.blockResultsReason is not None and obj.blockResultsReason == 'test_sample_swap'\n for obj in modified_members if obj.genomeType == 'test_investigation_one' and obj.genomicWorkflowState ==\n GenomicWorkflowState.AW1)\n )\n\n run_result = self.job_run_dao.get(2)\n\n self.assertEqual(run_result.runStatus, GenomicSubProcessStatus.COMPLETED)\n self.assertEqual(run_result.runResult, GenomicSubProcessResult.SUCCESS)\n\n def test_ingest_user_metrics_file(self):\n test_file = 'Genomic-Metrics-File-User-Events-Test.csv'\n bucket_name = 'test_bucket'\n sub_folder = 'user_events'\n pids = []\n\n file_ingester = GenomicFileIngester()\n\n for _ in range(2):\n pid = self.data_generator.create_database_participant()\n pids.append(pid.participantId)\n\n test_metrics_file = create_ingestion_test_file(\n test_file,\n bucket_name,\n sub_folder)\n\n test_file_path = f'{bucket_name}/{sub_folder}/{test_metrics_file}'\n\n with open_cloud_file(test_file_path) as csv_file:\n metrics_to_ingest = file_ingester._read_data_to_ingest(csv_file)\n\n with GenomicJobController(GenomicJob.METRICS_FILE_INGEST) as controller:\n controller.ingest_metrics_file(\n metric_type='user_events',\n file_path=test_file_path,\n )\n\n job_run_id = controller.job_run.id\n metrics = self.user_event_metrics_dao.get_all()\n\n for pid in pids:\n file_metrics = list(filter(lambda x: int(x['participant_id'].split('P')[-1]) == pid, metrics_to_ingest[\n 'rows']))\n participant_ingested_metrics = list(filter(lambda x: x.participant_id == pid, metrics))\n\n self.assertEqual(len(file_metrics), len(participant_ingested_metrics))\n self.assertTrue(all(obj.run_id == job_run_id for obj in participant_ingested_metrics))\n\n @mock.patch('rdr_service.genomic.genomic_job_controller.GenomicJobController.execute_cloud_task')\n def test_reconcile_pdr_data(self, mock_cloud_task):\n\n # init new job run in __enter__\n with GenomicJobController(GenomicJob.RECONCILE_PDR_DATA) as controller:\n controller.reconcile_pdr_data()\n\n cloud_task_endpoint = 'rebuild_genomic_table_records_task'\n\n first_run = self.job_run_dao.get_all()\n\n self.assertEqual(mock_cloud_task.call_count, 1)\n call_args = mock_cloud_task.call_args_list\n\n self.assertEqual(len(call_args), 1)\n self.assertEqual(call_args[0].args[0]['table'], self.job_run_dao.model_type.__tablename__)\n\n self.assertTrue(type(call_args[0].args[0]['ids']) is list)\n self.assertEqual(call_args[0].args[0]['ids'], [obj.id for obj in first_run])\n self.assertEqual(call_args[0].args[1], cloud_task_endpoint)\n\n participant = self.data_generator.create_database_participant()\n\n gen_set = self.data_generator.create_database_genomic_set(\n genomicSetName=\".\",\n genomicSetCriteria=\".\",\n genomicSetVersion=1\n )\n\n plus_ten = clock.CLOCK.now() + datetime.timedelta(minutes=10)\n plus_ten = plus_ten.replace(microsecond=0)\n with FakeClock(plus_ten):\n for i in range(2):\n gen_member = self.data_generator.create_database_genomic_set_member(\n genomicSetId=gen_set.id,\n biobankId=\"100153482\",\n sampleId=\"21042005280\",\n genomeType=\"aou_wgs\",\n genomicWorkflowState=GenomicWorkflowState.AW1\n )\n\n gen_processed_file = self.data_generator.create_database_genomic_file_processed(\n runId=first_run[0].id,\n startTime=clock.CLOCK.now(),\n filePath=f'test_file_path_{i}',\n bucketName='test_bucket',\n fileName='test_file_name',\n )\n\n self.data_generator.create_database_genomic_gc_validation_metrics(\n genomicSetMemberId=gen_member.id,\n genomicFileProcessedId=gen_processed_file.id\n )\n\n manifest = self.data_generator.create_database_genomic_manifest_file(\n manifestTypeId=2,\n filePath=f'test_file_path_{i}'\n )\n\n self.data_generator.create_database_genomic_manifest_feedback(\n inputManifestFileId=manifest.id,\n feedbackRecordCount=2\n )\n\n self.data_generator.create_database_genomic_user_event_metrics(\n participant_id=participant.participantId,\n event_name='test_event',\n run_id=1,\n )\n\n self.data_generator.create_database_genomic_informing_loop(\n message_record_id=1,\n event_type='informing_loop_decision',\n module_type='gem',\n participant_id=participant.participantId,\n decision_value='maybe_later',\n event_authored_time=clock.CLOCK.now()\n )\n\n self.data_generator.create_database_genomic_cvl_past_due(\n cvl_site_id='co',\n email_notification_sent=0,\n sample_id='sample_test',\n results_type='hdr',\n genomic_set_member_id=gen_member.id\n )\n\n self.data_generator.create_database_genomic_appointment(\n message_record_id=i,\n appointment_id=i,\n event_type='appointment_scheduled',\n module_type='hdr',\n participant_id=participant.participantId,\n event_authored_time=clock.CLOCK.now(),\n source='Color',\n appointment_timestamp=format_datetime(clock.CLOCK.now()),\n appointment_timezone='America/Los_Angeles',\n location='123 address st',\n contact_number='17348675309',\n language='en'\n )\n\n self.data_generator.create_database_genomic_member_report_state(\n genomic_set_member_id=gen_member.id,\n participant_id=participant.participantId,\n module='gem',\n genomic_report_state=GenomicReportState.GEM_RPT_READY,\n event_authored_time=clock.CLOCK.now()\n )\n\n self.data_generator.create_genomic_result_viewed(\n participant_id=participant.participantId,\n event_type='result_viewed',\n event_authored_time=clock.CLOCK.now(),\n module_type='gem',\n sample_id=gen_member.sampleId\n )\n\n # gets new records that were created with last job run from above\n with GenomicJobController(GenomicJob.RECONCILE_PDR_DATA) as controller:\n controller.reconcile_pdr_data()\n\n affected_tables = [\n 'genomic_set',\n 'genomic_set_member',\n 'genomic_job_run',\n 'genomic_file_processed',\n 'genomic_gc_validation_metrics',\n 'genomic_manifest_file',\n 'genomic_manifest_feedback',\n 'genomic_informing_loop',\n 'genomic_cvl_results_past_due',\n 'user_event_metrics',\n 'genomic_member_report_state',\n 'genomic_result_viewed',\n 'genomic_appointment_event'\n ]\n\n num_calls = len(affected_tables) + 1\n\n self.assertEqual(mock_cloud_task.call_count, num_calls)\n call_args = mock_cloud_task.call_args_list\n self.assertEqual(len(call_args), num_calls)\n\n mock_tables = set([obj[0][0]['table'] for obj in call_args])\n mock_endpoint = [obj[0][1] for obj in call_args]\n\n self.assertTrue([mock_tables].sort() == affected_tables.sort())\n self.assertTrue(all(obj for obj in mock_endpoint if obj == cloud_task_endpoint))\n\n @mock.patch('rdr_service.genomic.genomic_job_controller.GenomicJobController.execute_cloud_task')\n def test_retry_manifest_ingestions_if_deltas(self, mock_cloud_task):\n\n bucket_name = \"test-bucket\"\n aw1_file_name = \"AW1_wgs_sample_manifests/RDR_AoU_SEQ_PKG-2104-026571.csv\"\n aw1_manifest_path = f\"{bucket_name}/{aw1_file_name}\"\n\n aw2_file_name = \"AW2_wgs_data_manifests/RDR_AoU_SEQ_DataManifest_04092021.csv\"\n aw2_manifest_path = f\"{bucket_name}/{aw2_file_name}\"\n\n gen_set = self.data_generator.create_database_genomic_set(\n genomicSetName=\".\",\n genomicSetCriteria=\".\",\n genomicSetVersion=1\n )\n\n # Create AW1 job_run\n aw1_job_run = self.data_generator.create_database_genomic_job_run(\n jobId=GenomicJob.AW1_MANIFEST,\n startTime=clock.CLOCK.now(),\n endTime=clock.CLOCK.now(),\n runResult=GenomicSubProcessResult.SUCCESS\n )\n\n # Create AW2 job_run\n aw2_job_run = self.data_generator.create_database_genomic_job_run(\n jobId=GenomicJob.METRICS_INGESTION,\n startTime=clock.CLOCK.now(),\n endTime=clock.CLOCK.now(),\n runResult=GenomicSubProcessResult.SUCCESS\n )\n\n # should have no data\n with GenomicJobController(GenomicJob.RETRY_MANIFEST_INGESTIONS) as controller:\n controller.retry_manifest_ingestions()\n\n job_run = self.job_run_dao.get(3)\n self.assertEqual(job_run.jobId, GenomicJob.RETRY_MANIFEST_INGESTIONS)\n self.assertEqual(job_run.runStatus, GenomicSubProcessStatus.COMPLETED)\n self.assertEqual(job_run.runResult, GenomicSubProcessResult.NO_FILES)\n\n self.assertEqual(mock_cloud_task.call_count, 0)\n self.assertFalse(mock_cloud_task.call_count)\n\n # Create genomic_aw1_raw record\n self.data_generator.create_database_genomic_aw1_raw(\n file_path=aw1_manifest_path,\n package_id=\"PKG-2104-026571\",\n biobank_id=\"A10001\",\n )\n\n # Create genomic_aw2_raw record\n self.data_generator.create_database_genomic_aw2_raw(\n file_path=aw2_manifest_path,\n biobank_id=\"A10001\",\n sample_id=\"100001\",\n biobankidsampleid=\"A10001_100001\",\n )\n\n # Create AW1 genomic_manifest_file record\n aw1_manifest_file = self.data_generator.create_database_genomic_manifest_file(\n created=clock.CLOCK.now(),\n modified=clock.CLOCK.now(),\n uploadDate=clock.CLOCK.now(),\n manifestTypeId=GenomicManifestTypes.AW1,\n filePath=aw1_manifest_path,\n fileName=aw1_file_name,\n bucketName=bucket_name,\n recordCount=1,\n rdrProcessingComplete=1,\n rdrProcessingCompleteDate=clock.CLOCK.now(),\n )\n\n # Create AW2 genomic_manifest_file record\n aw2_manifest_file = self.data_generator.create_database_genomic_manifest_file(\n created=clock.CLOCK.now(),\n modified=clock.CLOCK.now(),\n uploadDate=clock.CLOCK.now(),\n manifestTypeId=GenomicManifestTypes.AW2,\n filePath=aw2_manifest_path,\n fileName=aw2_file_name,\n bucketName=bucket_name,\n recordCount=1,\n rdrProcessingComplete=1,\n rdrProcessingCompleteDate=clock.CLOCK.now(),\n )\n\n # Create AW1 file_processed\n aw1_file_processed = self.data_generator.create_database_genomic_file_processed(\n runId=aw1_job_run.id,\n startTime=clock.CLOCK.now(),\n genomicManifestFileId=aw1_manifest_file.id,\n filePath=f\"/{aw1_manifest_path}\",\n bucketName=bucket_name,\n fileName=aw1_file_name,\n )\n\n # Create AW2 file_processed\n aw2_file_processed = self.data_generator.create_database_genomic_file_processed(\n runId=aw2_job_run.id,\n startTime=clock.CLOCK.now(),\n genomicManifestFileId=aw2_manifest_file.id,\n filePath=f\"/{aw2_manifest_path}\",\n bucketName=bucket_name,\n fileName=aw2_file_name,\n )\n\n # genomic_set_member for AW1\n gen_member = self.data_generator.create_database_genomic_set_member(\n genomicSetId=gen_set.id,\n biobankId=\"100153482\",\n sampleId=\"21042005280\",\n genomeType=\"aou_wgs\",\n genomicWorkflowState=GenomicWorkflowState.AW1,\n aw1FileProcessedId=aw1_file_processed.id\n )\n\n # genomic_gc_validation_metrics for AW1\n self.data_generator.create_database_genomic_gc_validation_metrics(\n genomicSetMemberId=gen_member.id,\n genomicFileProcessedId=aw2_file_processed.id\n )\n\n # one AW1/AW2 with no deltas\n with GenomicJobController(GenomicJob.RETRY_MANIFEST_INGESTIONS) as controller:\n controller.retry_manifest_ingestions()\n\n job_run = self.job_run_dao.get(4)\n self.assertEqual(job_run.jobId, GenomicJob.RETRY_MANIFEST_INGESTIONS)\n self.assertEqual(job_run.runStatus, GenomicSubProcessStatus.COMPLETED)\n self.assertEqual(job_run.runResult, GenomicSubProcessResult.NO_FILES)\n\n self.assertEqual(mock_cloud_task.call_count, 0)\n self.assertFalse(mock_cloud_task.call_count)\n\n # empty tables resulting in deltas and cloud task calls\n with self.member_dao.session() as session:\n session.query(GenomicGCValidationMetrics).delete()\n session.query(GenomicSetMember).delete()\n\n with GenomicJobController(GenomicJob.RETRY_MANIFEST_INGESTIONS) as controller:\n controller.retry_manifest_ingestions()\n\n job_run = self.job_run_dao.get(5)\n self.assertEqual(job_run.jobId, GenomicJob.RETRY_MANIFEST_INGESTIONS)\n self.assertEqual(job_run.runStatus, GenomicSubProcessStatus.COMPLETED)\n self.assertEqual(job_run.runResult, GenomicSubProcessResult.SUCCESS)\n\n # one AW1/AW2 with deltas\n self.assertEqual(mock_cloud_task.call_count, 2)\n self.assertTrue(mock_cloud_task.call_count)\n\n call_args = mock_cloud_task.call_args_list\n self.assertEqual(len(call_args), 2)\n\n cloud_task_endpoint = ['ingest_aw1_manifest_task', 'ingest_aw2_manifest_task']\n mock_endpoint = [obj[0][1] for obj in call_args]\n self.assertTrue(all(obj for obj in mock_endpoint if obj == cloud_task_endpoint))\n\n mock_buckets = set([obj[0][0]['bucket_name'] for obj in call_args])\n self.assertTrue(len(mock_buckets), 1)\n self.assertTrue(list(mock_buckets)[0] == bucket_name)\n\n def test_calculate_informing_loop_ready_flags(self):\n num_participants = 4\n gen_set = self.data_generator.create_database_genomic_set(\n genomicSetName=\".\",\n genomicSetCriteria=\".\",\n genomicSetVersion=1\n )\n\n for num in range(num_participants):\n plus_num = clock.CLOCK.now() + datetime.timedelta(minutes=num)\n plus_num = plus_num.replace(microsecond=0)\n with FakeClock(plus_num):\n summary = self.data_generator.create_database_participant_summary(\n consentForStudyEnrollment=1,\n consentForGenomicsROR=1\n )\n stored_sample = self.data_generator.create_database_biobank_stored_sample(\n biobankId=summary.biobankId,\n biobankOrderIdentifier=self.fake.pyint()\n )\n collection_site = self.data_generator.create_database_site(\n siteType='Clinic'\n )\n order = self.data_generator.create_database_biobank_order(\n collectedSiteId=collection_site.siteId,\n participantId=summary.participantId,\n finalizedTime=plus_num\n )\n self.data_generator.create_database_biobank_order_identifier(\n value=stored_sample.biobankOrderIdentifier,\n biobankOrderId=order.biobankOrderId,\n system=\"1\",\n )\n self.data_generator.create_database_biobank_order_identifier(\n value=stored_sample.biobankOrderIdentifier,\n biobankOrderId=order.biobankOrderId,\n system=\"2\",\n )\n member = self.data_generator.create_database_genomic_set_member(\n genomicSetId=gen_set.id,\n participantId=summary.participantId,\n genomeType=config.GENOME_TYPE_WGS,\n qcStatus=GenomicQcStatus.PASS,\n gcManifestSampleSource='Whole Blood',\n collectionTubeId=stored_sample.biobankStoredSampleId\n )\n self.data_generator.create_database_genomic_gc_validation_metrics(\n genomicSetMemberId=member.id,\n sexConcordance='True',\n drcFpConcordance='Pass',\n drcSexConcordance='Pass',\n processingStatus='Pass'\n )\n\n\n members_for_ready_loop = self.member_dao.get_members_for_informing_loop_ready()\n self.assertEqual(len(members_for_ready_loop), num_participants)\n\n current_set_members = self.member_dao.get_all()\n self.assertTrue(all(obj.informingLoopReadyFlag == 0 for obj in current_set_members))\n self.assertTrue(all(obj.informingLoopReadyFlagModified is None for obj in current_set_members))\n\n with GenomicJobController(GenomicJob.CALCULATE_INFORMING_LOOP_READY) as controller:\n controller.calculate_informing_loop_ready_flags()\n\n # no config object, controller method should return\n members_for_ready_loop = self.member_dao.get_members_for_informing_loop_ready()\n self.assertEqual(len(members_for_ready_loop), num_participants)\n\n calculation_limit = 2\n config.override_setting(config.CALCULATE_READY_FLAG_LIMIT, [calculation_limit])\n\n with GenomicJobController(GenomicJob.CALCULATE_INFORMING_LOOP_READY) as controller:\n controller.calculate_informing_loop_ready_flags()\n\n current_set_members = self.member_dao.get_all()\n self.assertTrue(any(obj.informingLoopReadyFlag == 1 for obj in current_set_members))\n self.assertTrue(any(obj.informingLoopReadyFlagModified is not None for obj in current_set_members))\n\n current_loops_set = [obj for obj in current_set_members if obj.informingLoopReadyFlag == 1\n and obj.informingLoopReadyFlagModified is not None]\n self.assertEqual(len(current_loops_set), calculation_limit)\n\n members_for_ready_loop = self.member_dao.get_members_for_informing_loop_ready()\n self.assertEqual(len(members_for_ready_loop), num_participants // 2)\n\n with GenomicJobController(GenomicJob.CALCULATE_INFORMING_LOOP_READY) as controller:\n controller.calculate_informing_loop_ready_flags()\n\n current_set_members = self.member_dao.get_all()\n self.assertTrue(all(obj.informingLoopReadyFlag == 1 for obj in current_set_members))\n self.assertTrue(all(obj.informingLoopReadyFlagModified is not None for obj in current_set_members))\n\n members_for_ready_loop = self.member_dao.get_members_for_informing_loop_ready()\n self.assertEqual(len(members_for_ready_loop), 0)\n\n @mock.patch('rdr_service.services.email_service.EmailService.send_email')\n def test_getting_results_withdrawn(self, email_mock):\n num_participants = 4\n result_withdrawal_dao = GenomicResultWithdrawalsDao()\n\n gen_set = self.data_generator.create_database_genomic_set(\n genomicSetName=\".\",\n genomicSetCriteria=\".\",\n genomicSetVersion=1\n )\n gen_job_run = self.data_generator.create_database_genomic_job_run(\n jobId=GenomicJob.AW1_MANIFEST,\n startTime=clock.CLOCK.now(),\n runResult=GenomicSubProcessResult.SUCCESS\n )\n\n pids = []\n for num in range(num_participants):\n summary = self.data_generator.create_database_participant_summary(\n consentForStudyEnrollment=1,\n consentForGenomicsROR=1,\n withdrawalStatus=WithdrawalStatus.EARLY_OUT\n )\n\n self.data_generator.create_database_genomic_set_member(\n genomicSetId=gen_set.id,\n participantId=summary.participantId,\n genomeType=config.GENOME_TYPE_ARRAY,\n gemA1ManifestJobRunId=gen_job_run.id if num % 2 == 0 else None\n )\n\n self.data_generator.create_database_genomic_set_member(\n genomicSetId=gen_set.id,\n participantId=summary.participantId,\n genomeType=config.GENOME_TYPE_WGS,\n cvlW1ilHdrJobRunId=gen_job_run.id\n )\n\n pids.append(summary.participantId)\n\n config.override_setting(config.RDR_GENOMICS_NOTIFICATION_EMAIL, 'email@test.com')\n\n with GenomicJobController(GenomicJob.RESULTS_PIPELINE_WITHDRAWALS) as controller:\n controller.check_results_withdrawals()\n\n # mock checks should be two => 1 GEM 1 HEALTH\n self.assertEqual(email_mock.call_count, 2)\n call_args = email_mock.call_args_list\n\n self.assertTrue(any('GEM' in call.args[0].subject for call in call_args))\n self.assertTrue(any('HEALTH' in call.args[0].subject for call in call_args))\n\n job_runs = self.job_run_dao.get_all()\n current_job_run = list(filter(lambda x: x.jobId == GenomicJob.RESULTS_PIPELINE_WITHDRAWALS, job_runs))[0]\n self.assertTrue(current_job_run.runResult == GenomicSubProcessResult.SUCCESS)\n\n all_withdrawal_records = result_withdrawal_dao.get_all()\n\n self.assertTrue(len(all_withdrawal_records) == len(pids))\n self.assertTrue(all(obj.participant_id in pids for obj in all_withdrawal_records))\n\n array_results = list(filter(lambda x: x.array_results == 1, all_withdrawal_records))\n\n # should only be 2\n self.assertTrue(len(array_results), 2)\n\n cvl_results = list(filter(lambda x: x.cvl_results == 1, all_withdrawal_records))\n\n # should be 4 for num of participants\n self.assertTrue(len(cvl_results), num_participants)\n\n with GenomicJobController(GenomicJob.RESULTS_PIPELINE_WITHDRAWALS) as controller:\n controller.check_results_withdrawals()\n\n # mock checks should still be two on account of no records\n self.assertEqual(email_mock.call_count, 2)\n\n job_runs = self.job_run_dao.get_all()\n current_job_run = list(filter(lambda x: x.jobId == GenomicJob.RESULTS_PIPELINE_WITHDRAWALS, job_runs))[1]\n\n self.assertTrue(current_job_run.runResult == GenomicSubProcessResult.NO_RESULTS)\n\n def test_gem_results_to_report_state(self):\n num_participants = 8\n\n gen_set = self.data_generator.create_database_genomic_set(\n genomicSetName=\".\",\n genomicSetCriteria=\".\",\n genomicSetVersion=1\n )\n\n gem_a2_job_run = self.data_generator.create_database_genomic_job_run(\n jobId=GenomicJob.GEM_A2_MANIFEST,\n startTime=clock.CLOCK.now(),\n runResult=GenomicSubProcessResult.SUCCESS\n )\n\n pids_to_update, member_ids = [], []\n for num in range(num_participants):\n summary = self.data_generator.create_database_participant_summary(\n consentForStudyEnrollment=1,\n consentForGenomicsROR=1,\n withdrawalStatus=WithdrawalStatus.EARLY_OUT\n )\n\n member = self.data_generator.create_database_genomic_set_member(\n genomicSetId=gen_set.id,\n participantId=summary.participantId,\n genomeType=config.GENOME_TYPE_ARRAY\n )\n\n if num % 2 == 0:\n member_ids.append(member.id)\n pids_to_update.append(summary.participantId)\n\n with GenomicJobController(GenomicJob.GEM_RESULT_REPORTS) as controller:\n controller.gem_results_to_report_state()\n\n current_job_runs = self.job_run_dao.get_all()\n self.assertEqual(len(current_job_runs), 2)\n\n current_job_run = list(filter(lambda x: x.jobId == GenomicJob.GEM_RESULT_REPORTS, current_job_runs))[0]\n self.assertTrue(current_job_run.runResult == GenomicSubProcessResult.NO_RESULTS)\n\n current_members = self.member_dao.get_all()\n\n # 4 members updated correctly should return\n for member in current_members:\n if member.participantId in pids_to_update:\n member.gemA2ManifestJobRunId = gem_a2_job_run.id\n member.genomicWorkflowState = GenomicWorkflowState.GEM_RPT_READY\n self.member_dao.update(member)\n\n with GenomicJobController(GenomicJob.GEM_RESULT_REPORTS) as controller:\n controller.gem_results_to_report_state()\n\n current_job_runs = self.job_run_dao.get_all()\n self.assertEqual(len(current_job_runs), 3)\n\n current_job_run = list(filter(lambda x: x.jobId == GenomicJob.GEM_RESULT_REPORTS, current_job_runs))[1]\n self.assertTrue(current_job_run.runResult == GenomicSubProcessResult.SUCCESS)\n\n current_gem_report_states = self.report_state_dao.get_all()\n self.assertEqual(len(current_gem_report_states), len(pids_to_update))\n self.assertTrue(all(obj.event_type == 'result_ready' for obj in current_gem_report_states))\n self.assertTrue(all(obj.event_authored_time is not None for obj in current_gem_report_states))\n self.assertTrue(all(obj.module == 'gem' for obj in current_gem_report_states))\n self.assertTrue(\n all(obj.genomic_report_state == GenomicReportState.GEM_RPT_READY for obj in current_gem_report_states)\n )\n self.assertTrue(\n all(obj.genomic_report_state_str == GenomicReportState.GEM_RPT_READY.name for obj in\n current_gem_report_states)\n )\n self.assertTrue(\n all(obj.genomic_set_member_id in member_ids for obj in\n current_gem_report_states)\n )\n\n # 4 members inserted already should not return\n with GenomicJobController(GenomicJob.GEM_RESULT_REPORTS) as controller:\n controller.gem_results_to_report_state()\n\n current_job_runs = self.job_run_dao.get_all()\n self.assertEqual(len(current_job_runs), 4)\n\n current_job_run = list(filter(lambda x: x.jobId == GenomicJob.GEM_RESULT_REPORTS, current_job_runs))[2]\n self.assertTrue(current_job_run.runResult == GenomicSubProcessResult.NO_RESULTS)\n\n self.clear_table_after_test('genomic_member_report_state')\n\n def test_reconcile_informing_loop(self):\n event_dao = UserEventMetricsDao()\n event_dao.truncate() # for test suite\n il_dao = GenomicInformingLoopDao()\n\n for pid in range(8):\n self.data_generator.create_database_participant(participantId=1 + pid, biobankId=1 + pid)\n\n # Set up initial job run ID\n self.data_generator.create_database_genomic_job_run(\n jobId=GenomicJob.METRICS_FILE_INGEST,\n startTime=clock.CLOCK.now()\n )\n\n # create genomic set\n self.data_generator.create_database_genomic_set(\n genomicSetName='test',\n genomicSetCriteria='.',\n genomicSetVersion=1\n )\n # insert set members\n for b in [\"aou_array\", \"aou_wgs\"]:\n for i in range(1, 9):\n self.data_generator.create_database_genomic_set_member(\n participantId=i,\n genomicSetId=1,\n biobankId=i,\n collectionTubeId=100 + i,\n sampleId=10 + i,\n genomeType=b,\n )\n\n # Set up ingested metrics data\n events = ['gem.informing_loop.started',\n 'gem.informing_loop.screen8_no',\n 'gem.informing_loop.screen8_yes',\n 'hdr.informing_loop.started',\n 'gem.informing_loop.screen3',\n 'pgx.informing_loop.screen8_no',\n 'hdr.informing_loop.screen10_no']\n\n for p in range(4):\n for i in range(len(events)):\n self.data_generator.create_database_genomic_user_event_metrics(\n created=clock.CLOCK.now(),\n modified=clock.CLOCK.now(),\n participant_id=p + 1,\n created_at=datetime.datetime(2021, 12, 29, 00) + datetime.timedelta(hours=i),\n event_name=events[i],\n run_id=1,\n ignore_flag=0,\n )\n # Set up informing loop from message broker records\n decisions = [None, 'no', 'yes']\n for p in range(3):\n for i in range(2):\n self.data_generator.create_database_genomic_informing_loop(\n message_record_id=i,\n event_type='informing_loop_started' if i == 0 else 'informing_loop_decision',\n module_type='gem',\n participant_id=p + 1,\n decision_value=decisions[i],\n sample_id=100 + p,\n event_authored_time=datetime.datetime(2021, 12, 29, 00) + datetime.timedelta(hours=i)\n )\n\n # Test for no message but yes user event\n self.data_generator.create_database_genomic_user_event_metrics(\n created=clock.CLOCK.now(),\n modified=clock.CLOCK.now(),\n participant_id=6,\n created_at=datetime.datetime(2021, 12, 29, 00),\n event_name='gem.informing_loop.screen8_yes',\n run_id=1,\n ignore_flag=0,\n )\n\n # Run reconcile job\n genomic_pipeline.reconcile_informing_loop_responses()\n\n # Test mismatched GEM data ingested correctly\n pid_list = [1, 2, 3, 6]\n\n new_il_values = il_dao.get_latest_il_for_pids(\n pid_list=pid_list,\n module=\"gem\"\n )\n\n for value in new_il_values:\n self.assertEqual(\"yes\", value.decision_value)\n\n pid_list = [1, 2, 3, 4]\n for module in [\"hdr\", \"pgx\"]:\n new_il_values = il_dao.get_latest_il_for_pids(\n pid_list=pid_list,\n module=module\n )\n\n for value in new_il_values:\n self.assertEqual(\"no\", value.decision_value)\n self.assertIsNotNone(value.created_from_metric_id)\n\n def test_reconcile_message_broker_results_ready(self):\n # Create Test Participants' data\n # create genomic set\n self.data_generator.create_database_genomic_set(\n genomicSetName='test',\n genomicSetCriteria='.',\n genomicSetVersion=1\n )\n # Set up initial job run ID\n self.data_generator.create_database_genomic_job_run(\n jobId=GenomicJob.METRICS_FILE_INGEST,\n startTime=clock.CLOCK.now()\n )\n\n for pid in range(7):\n self.data_generator.create_database_participant(participantId=1 + pid, biobankId=1 + pid)\n\n # insert set members and event metrics records\n for i in range(1, 6):\n self.data_generator.create_database_genomic_set_member(\n participantId=i,\n genomicSetId=1,\n biobankId=i,\n collectionTubeId=100 + i,\n sampleId=10 + i,\n genomeType=\"aou_wgs\",\n )\n\n # 3 PGX records\n if i < 4:\n self.data_generator.create_database_genomic_user_event_metrics(\n participant_id=i,\n created_at=datetime.datetime(2022, 10, 6, 00),\n event_name=\"pgx.result_ready\",\n run_id=1,\n )\n\n # 1 HDR Positive\n if i == 4:\n self.data_generator.create_database_genomic_user_event_metrics(\n participant_id=i,\n created_at=datetime.datetime(2022, 10, 6, 00),\n event_name=\"hdr.result_ready.informative\",\n run_id=1,\n )\n\n # 1 HDR uninformative\n if i == 5:\n self.data_generator.create_database_genomic_user_event_metrics(\n participant_id=i,\n created_at=datetime.datetime(2022, 10, 6, 00),\n event_name=\"hdr.result_ready.uninformative\",\n run_id=1,\n )\n\n # Run job\n genomic_cvl_pipeline.reconcile_message_broker_results_ready()\n\n # Test correct data inserted\n report_state_dao = GenomicMemberReportStateDao()\n states = report_state_dao.get_all()\n\n self.assertEqual(5, len(states))\n\n pgx_records = [rec for rec in states if rec.module == \"pgx_v1\"]\n hdr_record_uninf = [rec for rec in states\n if rec.genomic_report_state == GenomicReportState.HDR_RPT_UNINFORMATIVE][0]\n\n hdr_record_pos = [rec for rec in states\n if rec.genomic_report_state == GenomicReportState.HDR_RPT_POSITIVE][0]\n\n for pgx_record in pgx_records:\n self.assertEqual(GenomicReportState.PGX_RPT_READY, pgx_record.genomic_report_state)\n self.assertEqual(\"PGX_RPT_READY\", pgx_record.genomic_report_state_str)\n self.assertEqual(int(pgx_record.sample_id), pgx_record.participant_id + 10)\n self.assertEqual(\"result_ready\", pgx_record.event_type)\n self.assertEqual(datetime.datetime(2022, 10, 6, 00), pgx_record.event_authored_time)\n self.assertIsNotNone(pgx_record.created_from_metric_id)\n\n self.assertEqual(\"HDR_RPT_UNINFORMATIVE\", hdr_record_uninf.genomic_report_state_str)\n self.assertEqual(int(hdr_record_uninf.sample_id), hdr_record_uninf.participant_id + 10)\n self.assertEqual(\"result_ready\", hdr_record_uninf.event_type)\n self.assertEqual(datetime.datetime(2022, 10, 6, 00), hdr_record_uninf.event_authored_time)\n self.assertIsNotNone(hdr_record_uninf.created_from_metric_id)\n\n self.assertEqual(\"HDR_RPT_POSITIVE\", hdr_record_pos.genomic_report_state_str)\n self.assertEqual(int(hdr_record_pos.sample_id), hdr_record_pos.participant_id + 10)\n self.assertEqual(\"result_ready\", hdr_record_pos.event_type)\n self.assertEqual(datetime.datetime(2022, 10, 6, 00), hdr_record_pos.event_authored_time)\n self.assertIsNotNone(hdr_record_pos.created_from_metric_id)\n\n def test_reconcile_message_broker_results_viewed(self):\n # Create Test Participants' data\n # create genomic set\n self.data_generator.create_database_genomic_set(\n genomicSetName='test',\n genomicSetCriteria='.',\n genomicSetVersion=1\n )\n # Set up initial job run ID\n self.data_generator.create_database_genomic_job_run(\n jobId=GenomicJob.METRICS_FILE_INGEST,\n startTime=clock.CLOCK.now()\n )\n\n for pid in range(3):\n self.data_generator.create_database_participant(participantId=1 + pid, biobankId=1 + pid)\n\n # insert set members and event metrics records\n for i in range(1, 3):\n self.data_generator.create_database_genomic_set_member(\n participantId=i,\n genomicSetId=1,\n biobankId=i,\n collectionTubeId=100 + i,\n sampleId=10 + i,\n genomeType=\"aou_wgs\",\n )\n\n # 1 PGX Viewed\n if i == 1:\n self.data_generator.create_database_genomic_user_event_metrics(\n participant_id=i,\n created_at=datetime.datetime(2022, 10, 6, 00),\n event_name=\"pgx.opened_at\",\n run_id=1,\n )\n\n # 1 HDR Viewed\n if i == 2:\n self.data_generator.create_database_genomic_user_event_metrics(\n participant_id=i,\n created_at=datetime.datetime(2022, 10, 6, 00),\n event_name=\"hdr.opened_at\",\n run_id=1,\n )\n\n genomic_cvl_pipeline.reconcile_message_broker_results_viewed()\n\n # Test correct data inserted\n result_viewed_dao = GenomicResultViewedDao()\n results = result_viewed_dao.get_all()\n\n self.assertEqual(2, len(results))\n\n for record in results:\n if record.participant_id == 1:\n self.assertEqual(\"pgx_v1\", record.module_type)\n else:\n self.assertEqual(\"hdr_v1\", record.module_type)\n self.assertEqual(int(record.sample_id), record.participant_id + 10)\n self.assertEqual(\"result_viewed\", record.event_type)\n self.assertEqual(datetime.datetime(2022, 10, 6, 00), record.first_viewed)\n self.assertIsNotNone(record.created_from_metric_id)\n\n def test_ingest_appointment_metrics_file(self):\n test_file = 'Genomic-Metrics-File-Appointment-Events-Test.json'\n bucket_name = 'test_bucket'\n sub_folder = 'appointment_events'\n pids = []\n\n for _ in range(4):\n summary = self.data_generator.create_database_participant_summary()\n pids.append(summary.participantId)\n\n test_file_path = f'{bucket_name}/{sub_folder}/{test_file}'\n\n appointment_data = test_data.load_test_data_json(\n \"Genomic-Metrics-File-Appointment-Events-Test.json\")\n appointment_data_str = json.dumps(appointment_data, indent=4)\n\n with open_cloud_file(test_file_path, mode='wb') as cloud_file:\n cloud_file.write(appointment_data_str.encode(\"utf-8\"))\n\n with GenomicJobController(GenomicJob.APPOINTMENT_METRICS_FILE_INGEST) as controller:\n controller.ingest_appointment_metrics_file(\n file_path=test_file_path,\n )\n\n all_metrics = self.appointment_metrics_dao.get_all()\n\n # should be 5 metric records for whats in json file\n self.assertEqual(len(all_metrics), 5)\n self.assertTrue(all((obj.participant_id in pids for obj in all_metrics)))\n self.assertTrue(all((obj.file_path == test_file_path for obj in all_metrics)))\n self.assertTrue(all((obj.appointment_event is not None for obj in all_metrics)))\n self.assertTrue(all((obj.created is not None for obj in all_metrics)))\n self.assertTrue(all((obj.modified is not None for obj in all_metrics)))\n self.assertTrue(all((obj.module_type is not None for obj in all_metrics)))\n self.assertTrue(all((obj.event_authored_time is not None for obj in all_metrics)))\n self.assertTrue(all((obj.event_type is not None for obj in all_metrics)))\n\n current_job_runs = self.job_run_dao.get_all()\n self.assertEqual(len(current_job_runs), 1)\n\n current_job_run = current_job_runs[0]\n self.assertTrue(current_job_run.jobId == GenomicJob.APPOINTMENT_METRICS_FILE_INGEST)\n self.assertTrue(current_job_run.runResult == GenomicSubProcessResult.SUCCESS)\n\n self.clear_table_after_test('genomic_appointment_event_metrics')\n\n def test_reconcile_appointments_with_metrics(self):\n fake_date = parser.parse('2020-05-29T08:00:01-05:00')\n\n for num in range(4):\n summary = self.data_generator.create_database_participant_summary()\n\n missing_json = {\n \"event\": \"appointment_updated\",\n \"eventAuthoredTime\": \"2022-09-16T17:18:38Z\",\n \"participantId\": f'P{summary.participantId}',\n \"messageBody\": {\n \"module_type\": \"hdr\",\n \"appointment_timestamp\": \"2022-09-19T19:30:00+00:00\",\n \"id\": 55,\n \"appointment_timezone\": \"America/Los_Angeles\",\n \"location\": \"CA\",\n \"contact_number\": \"18043704252\",\n \"language\": \"en\",\n \"source\": \"Color\"\n }\n }\n\n if num % 2 == 0:\n self.data_generator.create_database_genomic_appointment(\n message_record_id=num,\n appointment_id=num,\n event_type='appointment_scheduled',\n module_type='hdr',\n participant_id=summary.participantId,\n event_authored_time=fake_date,\n source='Color',\n appointment_timestamp=format_datetime(clock.CLOCK.now()),\n appointment_timezone='America/Los_Angeles',\n location='123 address st',\n contact_number='17348675309',\n language='en'\n )\n\n self.data_generator.create_database_genomic_appointment_metric(\n participant_id=summary.participantId,\n appointment_event=json.dumps(missing_json, indent=4) if num % 2 != 0 else 'foo',\n file_path='test_file_path',\n module_type='hdr',\n event_authored_time=fake_date,\n event_type='appointment_updated' if num % 2 != 0 else 'appointment_scheduled'\n )\n\n current_events = self.appointment_event_dao.get_all()\n # should be 2 initial appointment events\n self.assertEqual(len(current_events), 2)\n\n current_metrics = self.appointment_metrics_dao.get_all()\n # should be 4 initial appointment events\n self.assertEqual(len(current_metrics), 4)\n self.assertTrue(all(obj.reconcile_job_run_id is None for obj in current_metrics))\n\n with GenomicJobController(GenomicJob.APPOINTMENT_METRICS_RECONCILE) as controller:\n controller.reconcile_appointment_events_from_metrics()\n\n job_run = self.job_run_dao.get_all()\n self.assertEqual(len(job_run), 1)\n self.assertTrue(job_run[0].jobId == GenomicJob.APPOINTMENT_METRICS_RECONCILE)\n\n current_events = self.appointment_event_dao.get_all()\n # should be 4 appointment events 2 initial + 2 added\n self.assertEqual(len(current_events), 4)\n\n scheduled = list(filter(lambda x: x.event_type == 'appointment_scheduled', current_events))\n self.assertEqual(len(scheduled), 2)\n self.assertTrue(all(obj.created_from_metric_id is None for obj in scheduled))\n\n updated = list(filter(lambda x: x.event_type == 'appointment_updated', current_events))\n self.assertEqual(len(updated), 2)\n self.assertTrue(all(obj.created_from_metric_id is not None for obj in updated))\n\n current_metrics = self.appointment_metrics_dao.get_all()\n # should STILL be 4 initial appointment events\n self.assertEqual(len(current_metrics), 4)\n self.assertTrue(all(obj.reconcile_job_run_id is not None for obj in current_metrics))\n self.assertTrue(all(obj.reconcile_job_run_id == job_run[0].id for obj in current_metrics))\n\n self.clear_table_after_test('genomic_appointment_event_metrics')\n\n @mock.patch('rdr_service.services.email_service.EmailService.send_email')\n def test_check_appointments_gror_changed(self, email_mock):\n fake_date = parser.parse(\"2022-09-01T13:43:23\")\n notified_dao = GenomicAppointmentEventNotifiedDao()\n config.override_setting(config.GENOMIC_COLOR_PM_EMAIL, ['test@example.com'])\n num_participants = 4\n for num in range(num_participants):\n gror = num if num > 1 else 1\n summary = self.data_generator.create_database_participant_summary(\n consentForStudyEnrollment=1,\n consentForGenomicsROR=gror\n )\n self.data_generator.create_database_genomic_appointment(\n message_record_id=num,\n appointment_id=num,\n event_type='appointment_scheduled',\n module_type='hdr',\n participant_id=summary.participantId,\n event_authored_time=fake_date,\n source='Color',\n appointment_timestamp=format_datetime(clock.CLOCK.now()),\n appointment_timezone='America/Los_Angeles',\n location='123 address st',\n contact_number='17348675309',\n language='en'\n )\n\n changed_ppts = self.appointment_event_dao.get_appointments_gror_changed()\n self.assertEqual(2, len(changed_ppts))\n with GenomicJobController(GenomicJob.CHECK_APPOINTMENT_GROR_CHANGED) as controller:\n controller.check_appointments_gror_changed()\n\n self.assertEqual(email_mock.call_count, 1)\n notified_appointments = notified_dao.get_all()\n self.assertEqual(2, len(notified_appointments))\n\n # test notified not returned by query\n summary = self.data_generator.create_database_participant_summary(\n consentForStudyEnrollment=1,\n consentForGenomicsROR=2\n )\n self.data_generator.create_database_genomic_appointment(\n message_record_id=5,\n appointment_id=5,\n event_type='appointment_scheduled',\n module_type='hdr',\n participant_id=summary.participantId,\n event_authored_time=fake_date,\n source='Color',\n appointment_timestamp=format_datetime(clock.CLOCK.now()),\n appointment_timezone='America/Los_Angeles',\n location='123 address st',\n contact_number='17348675309',\n language='en'\n )\n\n changed_ppts = self.appointment_event_dao.get_appointments_gror_changed()\n self.assertEqual(1, len(changed_ppts))\n\n @mock.patch('rdr_service.services.email_service.EmailService.send_email')\n def test_check_gcr_14day_escalation(self, email_mock):\n fake_date = parser.parse(\"2022-09-01T13:43:23\")\n fake_date2 = parser.parse(\"2022-09-02T14:14:00\")\n fake_date3 = parser.parse(\"2022-09-03T15:15:00\")\n config.override_setting(config.GENOMIC_GCR_ESCALATION_EMAILS, ['test@example.com'])\n self.data_generator.create_database_genomic_set(\n genomicSetName='test',\n genomicSetCriteria='.',\n genomicSetVersion=1\n )\n pids = []\n for _ in range(6):\n summary = self.data_generator.create_database_participant_summary(\n consentForStudyEnrollment=1,\n consentForGenomicsROR=1\n )\n set_member = self.data_generator.create_database_genomic_set_member(\n participantId=summary.participantId,\n genomicSetId=1,\n biobankId=1001,\n collectionTubeId=100,\n sampleId=10,\n genomeType=\"aou_wgs\",\n )\n self.data_generator.create_database_genomic_member_report_state(\n participant_id=summary.participantId,\n genomic_report_state=GenomicReportState.HDR_RPT_POSITIVE,\n genomic_set_member_id=set_member.id,\n module='hdr_v1',\n event_authored_time=fake_date\n )\n pids.append(summary.participantId)\n\n # Appointment scheduled in future: don't notify\n self.data_generator.create_database_genomic_appointment(\n message_record_id=101,\n appointment_id=102,\n event_type='appointment_scheduled',\n module_type='hdr',\n participant_id=pids[0],\n event_authored_time=fake_date,\n source='Color',\n appointment_timestamp=format_datetime(clock.CLOCK.now()),\n appointment_timezone='America/Los_Angeles',\n location='123 address st',\n contact_number='17348675309',\n language='en'\n )\n\n # Appointment completed: don't notify\n self.data_generator.create_database_genomic_appointment(\n message_record_id=102,\n appointment_id=103,\n event_type='appointment_completed',\n module_type='hdr',\n participant_id=pids[1],\n event_authored_time=fake_date,\n source='Color',\n appointment_timestamp=fake_date,\n appointment_timezone='America/Los_Angeles',\n location='123 address st',\n contact_number='17348675309',\n language='en'\n )\n\n # Appointment scheduled then canceled: notify\n self.data_generator.create_database_genomic_appointment(\n message_record_id=103,\n appointment_id=104,\n event_type='appointment_scheduled',\n module_type='hdr',\n participant_id=pids[2],\n event_authored_time=fake_date2,\n source='Color',\n appointment_timestamp=format_datetime(clock.CLOCK.now()),\n appointment_timezone='America/Los_Angeles',\n location='123 address st',\n contact_number='17348675309',\n language='en'\n )\n self.data_generator.create_database_genomic_appointment(\n message_record_id=104,\n appointment_id=104,\n event_type='appointment_cancelled',\n module_type='hdr',\n participant_id=pids[2],\n event_authored_time=fake_date3,\n source='Color',\n appointment_timestamp=format_datetime(clock.CLOCK.now()),\n appointment_timezone='America/Los_Angeles',\n location='123 address st',\n contact_number='17348675309',\n language='en'\n )\n\n notified_dao = GenomicDefaultBaseDao(model_type=GenomicGCROutreachEscalationNotified)\n notified_dao.insert_bulk([{\n 'participant_id': pids[4],\n 'created': clock.CLOCK.now(),\n 'modified': clock.CLOCK.now(),\n 'message_sent': True\n },{\n 'participant_id': pids[5],\n 'created': clock.CLOCK.now(),\n 'modified': clock.CLOCK.now(),\n 'message_sent': False\n }])\n\n with clock.FakeClock(parser.parse('2022-11-1T05:15:00')):\n escalated_participants = self.report_state_dao.get_hdr_result_positive_no_appointment(num_days=14)\n results = [pid[0] for pid in escalated_participants]\n self.assertIn(pids[2], results)\n self.assertIn(pids[3], results)\n self.assertIn(pids[5], results)\n self.assertNotIn(pids[0], results)\n self.assertNotIn(pids[1], results)\n self.assertNotIn(pids[4], results)\n\n with GenomicJobController(GenomicJob.CHECK_GCR_OUTREACH_ESCALATION) as controller:\n controller.check_gcr_escalation(controller.job_id)\n\n self.assertEqual(email_mock.call_count, 3)\n self.assertEqual(email_mock.call_args.args[0].subject, 'GCR Outreach 14 Day Escalation')\n\n self.clear_table_after_test('genomic_gcr_outreach_escalation_notified')\n\n @mock.patch('rdr_service.services.email_service.EmailService.send_email')\n def test_check_gcr_14day_escalation_error(self, email_mock):\n email_mock.side_effect = ForbiddenError(mock.Mock(code=403))\n mock_slack_handler = mock.MagicMock()\n\n fake_date = parser.parse(\"2023-06-01T13:43:23\")\n\n config.override_setting(config.GENOMIC_GCR_ESCALATION_EMAILS, ['test@example.com'])\n self.data_generator.create_database_genomic_set(\n genomicSetName='test',\n genomicSetCriteria='.',\n genomicSetVersion=1\n )\n\n pids = []\n for _ in range(2):\n summary = self.data_generator.create_database_participant_summary(\n consentForStudyEnrollment=1,\n consentForGenomicsROR=1\n )\n set_member = self.data_generator.create_database_genomic_set_member(\n participantId=summary.participantId,\n genomicSetId=1,\n biobankId=1001,\n collectionTubeId=100,\n sampleId=10,\n genomeType=\"aou_wgs\",\n )\n self.data_generator.create_database_genomic_member_report_state(\n participant_id=summary.participantId,\n genomic_report_state=GenomicReportState.HDR_RPT_POSITIVE,\n genomic_set_member_id=set_member.id,\n module='hdr_v1',\n event_authored_time=fake_date\n )\n pids.append(summary.participantId)\n\n self.data_generator.create_database_genomic_appointment(\n message_record_id=102,\n appointment_id=103,\n event_type='appointment_completed',\n module_type='hdr',\n participant_id=pids[1],\n event_authored_time=fake_date,\n source='Color',\n appointment_timestamp=fake_date,\n appointment_timezone='America/Los_Angeles',\n location='123 address st',\n contact_number='17348675309',\n language='en'\n )\n\n with GenomicJobController(GenomicJob.CHECK_GCR_OUTREACH_ESCALATION) as controller:\n controller.genomic_alert_slack = mock_slack_handler\n controller.check_gcr_escalation(controller.job_id)\n\n notified_dao = GenomicDefaultBaseDao(model_type=GenomicGCROutreachEscalationNotified)\n with notified_dao.session() as session:\n notification = session.query(\n GenomicGCROutreachEscalationNotified\n ).filter(\n GenomicGCROutreachEscalationNotified.participant_id == pids[0]\n ).one()\n\n self.assertEqual(email_mock.call_count, 1)\n self.assertEqual(mock_slack_handler.send_message_to_webhook.call_count, 1)\n self.assertEqual(False, notification.message_sent)\n\n self.clear_table_after_test('genomic_gcr_outreach_escalation_notified')\n\n @mock.patch('rdr_service.services.email_service.EmailService.send_email')\n def test_check_gcr_ce_escalation(self, email_mock):\n fake_date = parser.parse(\"2022-09-01T13:43:23\")\n fake_date2 = parser.parse(\"2022-09-02T14:14:00\")\n fake_date3 = parser.parse(\"2022-09-03T15:15:00\")\n config.override_setting(config.GENOMIC_GCR_ESCALATION_EMAILS, ['test@example.com'])\n self.data_generator.create_database_genomic_set(\n genomicSetName='test',\n genomicSetCriteria='.',\n genomicSetVersion=1\n )\n pids = []\n for _ in range(6):\n summary = self.data_generator.create_database_participant_summary(\n consentForStudyEnrollment=1,\n consentForGenomicsROR=1\n )\n set_member = self.data_generator.create_database_genomic_set_member(\n participantId=summary.participantId,\n genomicSetId=1,\n biobankId=1001,\n collectionTubeId=100,\n sampleId=10,\n genomeType=\"aou_wgs\",\n participantOrigin='careevolution'\n )\n self.data_generator.create_database_genomic_member_report_state(\n participant_id=summary.participantId,\n genomic_report_state=GenomicReportState.HDR_RPT_POSITIVE,\n genomic_set_member_id=set_member.id,\n module='hdr_v1',\n event_authored_time=fake_date\n )\n pids.append(summary.participantId)\n\n # Appointment scheduled in future: don't notify\n self.data_generator.create_database_genomic_appointment(\n message_record_id=101,\n appointment_id=102,\n event_type='appointment_scheduled',\n module_type='hdr',\n participant_id=pids[0],\n event_authored_time=fake_date,\n source='Color',\n appointment_timestamp=format_datetime(clock.CLOCK.now()),\n appointment_timezone='America/Los_Angeles',\n location='123 address st',\n contact_number='17348675309',\n language='en'\n )\n\n # Appointment completed: don't notify\n self.data_generator.create_database_genomic_appointment(\n message_record_id=102,\n appointment_id=103,\n event_type='appointment_completed',\n module_type='hdr',\n participant_id=pids[1],\n event_authored_time=fake_date,\n source='Color',\n appointment_timestamp=fake_date,\n appointment_timezone='America/Los_Angeles',\n location='123 address st',\n contact_number='17348675309',\n language='en'\n )\n\n # Appointment scheduled then canceled: notify\n self.data_generator.create_database_genomic_appointment(\n message_record_id=103,\n appointment_id=104,\n event_type='appointment_scheduled',\n module_type='hdr',\n participant_id=pids[2],\n event_authored_time=fake_date2,\n source='Color',\n appointment_timestamp=format_datetime(clock.CLOCK.now()),\n appointment_timezone='America/Los_Angeles',\n location='123 address st',\n contact_number='17348675309',\n language='en'\n )\n self.data_generator.create_database_genomic_appointment(\n message_record_id=104,\n appointment_id=104,\n event_type='appointment_cancelled',\n module_type='hdr',\n participant_id=pids[2],\n event_authored_time=fake_date3,\n source='Color',\n appointment_timestamp=format_datetime(clock.CLOCK.now()),\n appointment_timezone='America/Los_Angeles',\n location='123 address st',\n contact_number='17348675309',\n language='en'\n )\n\n notified_dao = GenomicDefaultBaseDao(model_type=GenomicGCROutreachEscalationNotified)\n notified_dao.insert_bulk([{\n 'participant_id': pids[4],\n 'created': clock.CLOCK.now(),\n 'modified': clock.CLOCK.now(),\n 'message_sent': True\n },{\n 'participant_id': pids[5],\n 'created': clock.CLOCK.now(),\n 'modified': clock.CLOCK.now(),\n 'message_sent': False\n }])\n\n with clock.FakeClock(parser.parse('2022-11-1T05:15:00')):\n escalated_participants = self.report_state_dao.get_hdr_result_positive_no_appointment(\n num_days=30,\n participant_origin='careevolution'\n )\n results = [pid[0] for pid in escalated_participants]\n\n self.assertIn(pids[2], results)\n self.assertIn(pids[3], results)\n self.assertIn(pids[5], results)\n self.assertNotIn(pids[0], results)\n self.assertNotIn(pids[1], results)\n self.assertNotIn(pids[4], results)\n\n with GenomicJobController(GenomicJob.CHECK_GCR_CE_OUTREACH_ESCALATION) as controller:\n controller.check_gcr_escalation(controller.job_id)\n\n self.assertEqual(email_mock.call_count, 3)\n self.assertEqual(email_mock.call_args.args[0].subject, 'GCR Outreach 30 Day Escalation')\n\n self.clear_table_after_test('genomic_gcr_outreach_escalation_notified')\n\n @mock.patch('rdr_service.genomic.genomic_job_controller.GenomicJobController.execute_cloud_task')\n def test_execute_auto_generation_from_last_run(self, cloud_task_mock):\n\n with GenomicJobController(\n GenomicJob.PR_PR_WORKFLOW\n ) as controller:\n controller.job_result = GenomicSubProcessResult.ERROR\n controller._end_run()\n controller.execute_auto_generation_from_cloud_task()\n\n last_job_run_status = self.job_run_dao.get_last_run_status_for_job_id(job_id=GenomicJob.PR_PR_WORKFLOW)\n self.assertTrue(last_job_run_status is not None)\n self.assertTrue(last_job_run_status[0] == GenomicSubProcessResult.ERROR)\n\n # task SHOULD NOT be called\n self.assertEqual(cloud_task_mock.called, False)\n self.assertEqual(cloud_task_mock.call_count, 0)\n\n with GenomicJobController(\n GenomicJob.PR_PR_WORKFLOW\n ) as controller:\n controller.job_result = GenomicSubProcessResult.SUCCESS\n controller._end_run()\n controller.execute_auto_generation_from_cloud_task()\n\n last_job_run_status = self.job_run_dao.get_last_run_status_for_job_id(job_id=GenomicJob.PR_PR_WORKFLOW)\n self.assertTrue(last_job_run_status is not None)\n self.assertTrue(last_job_run_status[0] == GenomicSubProcessResult.SUCCESS)\n\n # task SHOULD be called\n self.assertEqual(cloud_task_mock.called, True)\n self.assertTrue(cloud_task_mock.call_args[1].get('payload').get('manifest_type') == 'p0')\n self.assertTrue(cloud_task_mock.call_args[1].get('task_queue') == 'genomic-generate-manifest')\n\n all_job_runs = self.job_run_dao.get_all()\n self.assertEqual(len(all_job_runs), 2)\n self.assertTrue(all(obj.runResult in [GenomicSubProcessResult.SUCCESS, GenomicSubProcessResult.ERROR] for obj\n in all_job_runs))\n self.assertTrue(all(obj.jobId == GenomicJob.PR_PR_WORKFLOW for obj in all_job_runs))\n\n",
"step-ids": [
9,
13,
17,
22,
25
]
}
|
[
9,
13,
17,
22,
25
] |
n = 1
ip = []
ma = []
l = [0, 0, 0, 0, 0, 0, 0] # a, b, c, d, e, wpm, pr
while n != 0:
a = input().strip().split("~")
n = len(a)
if n == 1:
break
ip.append(a[0])
ma.append(a[1])
for i in ip:
ipn = i.split(".")
try:
if 1 <= int(ipn[0]) <= 126:
p = 0
elif 128 <= int(ipn[0]) <= 191:
p = 1
elif 192 <= int(ipn[0]) <= 223:
p = 2
elif 224 <= int(ipn[0]) <= 239:
p = 3
elif 240 <= int(ipn(0)) <= 255:
p = 4
elif int(ipn[0]) == 0 or 127:
continue
if 0 <= int(ipn[1]) <= 255:
if int(ipn[0]) == 10:
p = 6
elif int(ipn[0]) == 172 and 16 <= int(ipn[1]) <= 31:
p = 6
elif int(ipn[0]) == 192 and int(ipn[1]) == 168:
p = 6
if 0 <= int(ipn[2]) <= 255:
if 0 <= int(ipn[3]) <= 255:
l[p] += 1
else:
l[5] += 1
else:
l[5] += 1
else:
l[5] += 1
except:
l[5] += 1
for m in ma:
mn = m.split(".")
b = bin(int(''.join(mn)))
le = b.find("0")
ri = b.rfind("1")
if le > ri:
l[5] += 1
for o in l:
print(str(o),end=" ")
|
normal
|
{
"blob_id": "4a13f05fbbe598242f5663d27d578d2eb977e103",
"index": 6137,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nwhile n != 0:\n a = input().strip().split('~')\n n = len(a)\n if n == 1:\n break\n ip.append(a[0])\n ma.append(a[1])\nfor i in ip:\n ipn = i.split('.')\n try:\n if 1 <= int(ipn[0]) <= 126:\n p = 0\n elif 128 <= int(ipn[0]) <= 191:\n p = 1\n elif 192 <= int(ipn[0]) <= 223:\n p = 2\n elif 224 <= int(ipn[0]) <= 239:\n p = 3\n elif 240 <= int(ipn(0)) <= 255:\n p = 4\n elif int(ipn[0]) == 0 or 127:\n continue\n if 0 <= int(ipn[1]) <= 255:\n if int(ipn[0]) == 10:\n p = 6\n elif int(ipn[0]) == 172 and 16 <= int(ipn[1]) <= 31:\n p = 6\n elif int(ipn[0]) == 192 and int(ipn[1]) == 168:\n p = 6\n if 0 <= int(ipn[2]) <= 255:\n if 0 <= int(ipn[3]) <= 255:\n l[p] += 1\n else:\n l[5] += 1\n else:\n l[5] += 1\n else:\n l[5] += 1\n except:\n l[5] += 1\nfor m in ma:\n mn = m.split('.')\n b = bin(int(''.join(mn)))\n le = b.find('0')\n ri = b.rfind('1')\n if le > ri:\n l[5] += 1\nfor o in l:\n print(str(o), end=' ')\n",
"step-3": "n = 1\nip = []\nma = []\nl = [0, 0, 0, 0, 0, 0, 0]\nwhile n != 0:\n a = input().strip().split('~')\n n = len(a)\n if n == 1:\n break\n ip.append(a[0])\n ma.append(a[1])\nfor i in ip:\n ipn = i.split('.')\n try:\n if 1 <= int(ipn[0]) <= 126:\n p = 0\n elif 128 <= int(ipn[0]) <= 191:\n p = 1\n elif 192 <= int(ipn[0]) <= 223:\n p = 2\n elif 224 <= int(ipn[0]) <= 239:\n p = 3\n elif 240 <= int(ipn(0)) <= 255:\n p = 4\n elif int(ipn[0]) == 0 or 127:\n continue\n if 0 <= int(ipn[1]) <= 255:\n if int(ipn[0]) == 10:\n p = 6\n elif int(ipn[0]) == 172 and 16 <= int(ipn[1]) <= 31:\n p = 6\n elif int(ipn[0]) == 192 and int(ipn[1]) == 168:\n p = 6\n if 0 <= int(ipn[2]) <= 255:\n if 0 <= int(ipn[3]) <= 255:\n l[p] += 1\n else:\n l[5] += 1\n else:\n l[5] += 1\n else:\n l[5] += 1\n except:\n l[5] += 1\nfor m in ma:\n mn = m.split('.')\n b = bin(int(''.join(mn)))\n le = b.find('0')\n ri = b.rfind('1')\n if le > ri:\n l[5] += 1\nfor o in l:\n print(str(o), end=' ')\n",
"step-4": "n = 1\nip = []\nma = []\nl = [0, 0, 0, 0, 0, 0, 0] # a, b, c, d, e, wpm, pr\nwhile n != 0:\n a = input().strip().split(\"~\")\n n = len(a)\n if n == 1:\n break\n ip.append(a[0])\n ma.append(a[1])\n\nfor i in ip:\n ipn = i.split(\".\")\n try:\n if 1 <= int(ipn[0]) <= 126:\n p = 0\n elif 128 <= int(ipn[0]) <= 191:\n p = 1\n elif 192 <= int(ipn[0]) <= 223:\n p = 2\n elif 224 <= int(ipn[0]) <= 239:\n p = 3\n elif 240 <= int(ipn(0)) <= 255:\n p = 4\n elif int(ipn[0]) == 0 or 127:\n continue\n if 0 <= int(ipn[1]) <= 255:\n if int(ipn[0]) == 10:\n p = 6\n elif int(ipn[0]) == 172 and 16 <= int(ipn[1]) <= 31:\n p = 6\n elif int(ipn[0]) == 192 and int(ipn[1]) == 168:\n p = 6\n if 0 <= int(ipn[2]) <= 255:\n if 0 <= int(ipn[3]) <= 255:\n l[p] += 1\n else:\n l[5] += 1\n else:\n l[5] += 1\n else:\n l[5] += 1\n except:\n l[5] += 1\n \nfor m in ma:\n mn = m.split(\".\")\n b = bin(int(''.join(mn)))\n le = b.find(\"0\")\n ri = b.rfind(\"1\")\n if le > ri:\n l[5] += 1\n\nfor o in l:\n print(str(o),end=\" \")",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Apr 14 09:53:10 2021
@author: kaouther
"""
# -*- coding: utf-8 -*-
"""
Spyder Editor
This is a temporary script file.
"""
import pandas as pd
#path = '/home/kaouther/Documents/Internship/pre_process/input_files/heart_forKaouther.xlsx'
#path = '/home/kaouther/Documents/Internship/pre_process/input_files/tissues_9m_forKaouther3.xlsx'
path = '/home/kaouther/Documents/Internship/pre_process/input_files/clean/TabulaMuris_Senis_Brain.xlsx'
#path=input('enter the complete path to your input file')
#path = input('Paste the absolute path to the file') #enter the path to the heart_forKaouther.xlsx
#df = pd.read_csv(path, delimiter = "\t")
df = pd.read_excel(path)
#function de extract the last caracterfrom a string
def get_rep_name(string):
return (string[-1:])
#get columns names (samples & biological replicates)
column_names = df.columns
column_names = column_names.delete([0]) #remove gene
#get only biological replicates
biological_rep=[]
mean_replicates= dict()
for name in column_names:
if get_rep_name(name) not in biological_rep:
#print(get_rep_name(name))
biological_rep.append(name[-1:])
#dictionnary to store the sum of values of a type of biological rep and nb of iteration
for i in range (0,len(biological_rep),1):
mean_replicates['mean_replicate_'+biological_rep[i]] = [0]*len(df)
mean_replicates['nb_itteration_'+biological_rep[i]] = [0]*len(df)
for k in range (0,len(df),1):
for i in range (0, len(column_names),1):
for j in biological_rep:
if j in get_rep_name(column_names[i]):
mean_replicates['mean_replicate_'+j][k]+= df.loc[k,column_names[i]]
mean_replicates['nb_itteration_'+j][k]+=1
dico2 = dict() #store tuples sum and iteration on each line
dico3 = dict() #store the mean calculation
for i in range (0,len(biological_rep),1):
dico3['mean_replicate_'+biological_rep[i]] = [0]*len(df)
#get list of mean replicates
list_mean_replicates =[]
for i in range (0,len(biological_rep),1):
list_mean_replicates.append('mean_replicate_'+biological_rep[i])
#dico to store as a tuple the sum and iteration for each mean rep
for key in list_mean_replicates:
for key2 in mean_replicates:
if key != key2 and get_rep_name(key) == get_rep_name(key2):
print( key,key2)
dico2[key]= list(zip((mean_replicates[key]),mean_replicates[key2]))
#dico to calculate the average per gene per mean replicate
for key in dico2:
for i in range(0,len(df),1):
cal = round(dico2[key][i][0]/ dico2[key][i][1])
dico3[key][i]= cal
#store results in new df in new columns
final_df = df.copy()
for mean in list_mean_replicates:
final_df[mean] = 0
for i in range(0,len(final_df),1):
for key in list_mean_replicates:
final_df.loc[i,key] = dico3[key][i]
#export as excel the df
final_df.to_excel ('/home/kaouther/Documents/Internship/pre_process/output_files/brain_matrix.xlsx', index = False, header=True)
#final_df.to_csv('/home/kaouther/Documents/Internship/pre_process/output_files/'+'tissues_mean.csv', index = False, header=True)
#final_df.to_excel('/home/kaouther/Documents/Internship/pre_process/output_files/'+'tissues_matrix.xlsx', index = False, header=True)
#file_name= input('file name')
#final_df.to_excel(file_name+'.xlsx', index = False, header=True)
duplicateRowsDF = final_df[final_df.iloc[:,0].duplicated()]
|
normal
|
{
"blob_id": "a3588a521a87765d215fd2048407e5e54fb87e94",
"index": 4276,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef get_rep_name(string):\n return string[-1:]\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef get_rep_name(string):\n return string[-1:]\n\n\n<mask token>\nfor name in column_names:\n if get_rep_name(name) not in biological_rep:\n biological_rep.append(name[-1:])\nfor i in range(0, len(biological_rep), 1):\n mean_replicates['mean_replicate_' + biological_rep[i]] = [0] * len(df)\n mean_replicates['nb_itteration_' + biological_rep[i]] = [0] * len(df)\nfor k in range(0, len(df), 1):\n for i in range(0, len(column_names), 1):\n for j in biological_rep:\n if j in get_rep_name(column_names[i]):\n mean_replicates['mean_replicate_' + j][k] += df.loc[k,\n column_names[i]]\n mean_replicates['nb_itteration_' + j][k] += 1\n<mask token>\nfor i in range(0, len(biological_rep), 1):\n dico3['mean_replicate_' + biological_rep[i]] = [0] * len(df)\n<mask token>\nfor i in range(0, len(biological_rep), 1):\n list_mean_replicates.append('mean_replicate_' + biological_rep[i])\nfor key in list_mean_replicates:\n for key2 in mean_replicates:\n if key != key2 and get_rep_name(key) == get_rep_name(key2):\n print(key, key2)\n dico2[key] = list(zip(mean_replicates[key], mean_replicates[key2]))\nfor key in dico2:\n for i in range(0, len(df), 1):\n cal = round(dico2[key][i][0] / dico2[key][i][1])\n dico3[key][i] = cal\n<mask token>\nfor mean in list_mean_replicates:\n final_df[mean] = 0\nfor i in range(0, len(final_df), 1):\n for key in list_mean_replicates:\n final_df.loc[i, key] = dico3[key][i]\nfinal_df.to_excel(\n '/home/kaouther/Documents/Internship/pre_process/output_files/brain_matrix.xlsx'\n , index=False, header=True)\n<mask token>\n",
"step-4": "<mask token>\npath = (\n '/home/kaouther/Documents/Internship/pre_process/input_files/clean/TabulaMuris_Senis_Brain.xlsx'\n )\ndf = pd.read_excel(path)\n\n\ndef get_rep_name(string):\n return string[-1:]\n\n\ncolumn_names = df.columns\ncolumn_names = column_names.delete([0])\nbiological_rep = []\nmean_replicates = dict()\nfor name in column_names:\n if get_rep_name(name) not in biological_rep:\n biological_rep.append(name[-1:])\nfor i in range(0, len(biological_rep), 1):\n mean_replicates['mean_replicate_' + biological_rep[i]] = [0] * len(df)\n mean_replicates['nb_itteration_' + biological_rep[i]] = [0] * len(df)\nfor k in range(0, len(df), 1):\n for i in range(0, len(column_names), 1):\n for j in biological_rep:\n if j in get_rep_name(column_names[i]):\n mean_replicates['mean_replicate_' + j][k] += df.loc[k,\n column_names[i]]\n mean_replicates['nb_itteration_' + j][k] += 1\ndico2 = dict()\ndico3 = dict()\nfor i in range(0, len(biological_rep), 1):\n dico3['mean_replicate_' + biological_rep[i]] = [0] * len(df)\nlist_mean_replicates = []\nfor i in range(0, len(biological_rep), 1):\n list_mean_replicates.append('mean_replicate_' + biological_rep[i])\nfor key in list_mean_replicates:\n for key2 in mean_replicates:\n if key != key2 and get_rep_name(key) == get_rep_name(key2):\n print(key, key2)\n dico2[key] = list(zip(mean_replicates[key], mean_replicates[key2]))\nfor key in dico2:\n for i in range(0, len(df), 1):\n cal = round(dico2[key][i][0] / dico2[key][i][1])\n dico3[key][i] = cal\nfinal_df = df.copy()\nfor mean in list_mean_replicates:\n final_df[mean] = 0\nfor i in range(0, len(final_df), 1):\n for key in list_mean_replicates:\n final_df.loc[i, key] = dico3[key][i]\nfinal_df.to_excel(\n '/home/kaouther/Documents/Internship/pre_process/output_files/brain_matrix.xlsx'\n , index=False, header=True)\nduplicateRowsDF = final_df[final_df.iloc[:, 0].duplicated()]\n",
"step-5": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Apr 14 09:53:10 2021\n\n@author: kaouther\n\"\"\"\n\n# -*- coding: utf-8 -*-\n\"\"\"\nSpyder Editor\n\nThis is a temporary script file.\n\"\"\"\nimport pandas as pd\n#path = '/home/kaouther/Documents/Internship/pre_process/input_files/heart_forKaouther.xlsx'\n#path = '/home/kaouther/Documents/Internship/pre_process/input_files/tissues_9m_forKaouther3.xlsx'\n\npath = '/home/kaouther/Documents/Internship/pre_process/input_files/clean/TabulaMuris_Senis_Brain.xlsx'\n#path=input('enter the complete path to your input file')\n\n#path = input('Paste the absolute path to the file') #enter the path to the heart_forKaouther.xlsx\n#df = pd.read_csv(path, delimiter = \"\\t\")\ndf = pd.read_excel(path)\n#function de extract the last caracterfrom a string\ndef get_rep_name(string):\n return (string[-1:])\n\n#get columns names (samples & biological replicates)\ncolumn_names = df.columns\ncolumn_names = column_names.delete([0]) #remove gene\n\n#get only biological replicates \nbiological_rep=[]\nmean_replicates= dict()\nfor name in column_names:\n if get_rep_name(name) not in biological_rep:\n #print(get_rep_name(name))\n biological_rep.append(name[-1:])\n \n#dictionnary to store the sum of values of a type of biological rep and nb of iteration\nfor i in range (0,len(biological_rep),1): \n mean_replicates['mean_replicate_'+biological_rep[i]] = [0]*len(df)\n mean_replicates['nb_itteration_'+biological_rep[i]] = [0]*len(df)\nfor k in range (0,len(df),1):\n \n for i in range (0, len(column_names),1):\n for j in biological_rep:\n if j in get_rep_name(column_names[i]):\n mean_replicates['mean_replicate_'+j][k]+= df.loc[k,column_names[i]]\n mean_replicates['nb_itteration_'+j][k]+=1\n\n\ndico2 = dict() #store tuples sum and iteration on each line\ndico3 = dict() #store the mean calculation \n\nfor i in range (0,len(biological_rep),1):\n dico3['mean_replicate_'+biological_rep[i]] = [0]*len(df)\n\n#get list of mean replicates\nlist_mean_replicates =[]\nfor i in range (0,len(biological_rep),1):\n list_mean_replicates.append('mean_replicate_'+biological_rep[i])\n#dico to store as a tuple the sum and iteration for each mean rep\nfor key in list_mean_replicates:\n for key2 in mean_replicates:\n if key != key2 and get_rep_name(key) == get_rep_name(key2):\n print( key,key2)\n \n dico2[key]= list(zip((mean_replicates[key]),mean_replicates[key2]))\n#dico to calculate the average per gene per mean replicate \nfor key in dico2:\n for i in range(0,len(df),1): \n cal = round(dico2[key][i][0]/ dico2[key][i][1])\n dico3[key][i]= cal\n#store results in new df in new columns\nfinal_df = df.copy()\nfor mean in list_mean_replicates:\n final_df[mean] = 0\n \nfor i in range(0,len(final_df),1):\n for key in list_mean_replicates:\n final_df.loc[i,key] = dico3[key][i]\n#export as excel the df \nfinal_df.to_excel ('/home/kaouther/Documents/Internship/pre_process/output_files/brain_matrix.xlsx', index = False, header=True)\n#final_df.to_csv('/home/kaouther/Documents/Internship/pre_process/output_files/'+'tissues_mean.csv', index = False, header=True)\n#final_df.to_excel('/home/kaouther/Documents/Internship/pre_process/output_files/'+'tissues_matrix.xlsx', index = False, header=True)\n#file_name= input('file name')\n#final_df.to_excel(file_name+'.xlsx', index = False, header=True)\n\nduplicateRowsDF = final_df[final_df.iloc[:,0].duplicated()]\n",
"step-ids": [
0,
1,
2,
3,
5
]
}
|
[
0,
1,
2,
3,
5
] |
from inotifier import Notifier
from IPython.display import display, Audio, HTML
import pkg_resources
import time
class AudioPopupNotifier(Notifier):
"""Play Sound and show Popup upon cell completion"""
def __init__(self, message="Cell Completed", audio_file="pad_confirm.wav"):
super(AudioPopupNotifier, self).__init__()
self.message = message
self.audio_file = audio_file
try:
self.audio = pkg_resources.resource_string('inotifications', 'sounds/{}'.format(audio_file))
except IOError:
self.audio = audio_file
self.template = '<script type="text/javascript">alert("{}");</script>'
def notify(self):
display(Audio(self.audio, autoplay=True))
time.sleep(3)
display(HTML(self.template.format(self.message)))
|
normal
|
{
"blob_id": "94a3a74260fac58b4cad7422608f91ae3a1a0272",
"index": 6247,
"step-1": "<mask token>\n\n\nclass AudioPopupNotifier(Notifier):\n <mask token>\n <mask token>\n\n def notify(self):\n display(Audio(self.audio, autoplay=True))\n time.sleep(3)\n display(HTML(self.template.format(self.message)))\n",
"step-2": "<mask token>\n\n\nclass AudioPopupNotifier(Notifier):\n <mask token>\n\n def __init__(self, message='Cell Completed', audio_file='pad_confirm.wav'):\n super(AudioPopupNotifier, self).__init__()\n self.message = message\n self.audio_file = audio_file\n try:\n self.audio = pkg_resources.resource_string('inotifications',\n 'sounds/{}'.format(audio_file))\n except IOError:\n self.audio = audio_file\n self.template = '<script type=\"text/javascript\">alert(\"{}\");</script>'\n\n def notify(self):\n display(Audio(self.audio, autoplay=True))\n time.sleep(3)\n display(HTML(self.template.format(self.message)))\n",
"step-3": "<mask token>\n\n\nclass AudioPopupNotifier(Notifier):\n \"\"\"Play Sound and show Popup upon cell completion\"\"\"\n\n def __init__(self, message='Cell Completed', audio_file='pad_confirm.wav'):\n super(AudioPopupNotifier, self).__init__()\n self.message = message\n self.audio_file = audio_file\n try:\n self.audio = pkg_resources.resource_string('inotifications',\n 'sounds/{}'.format(audio_file))\n except IOError:\n self.audio = audio_file\n self.template = '<script type=\"text/javascript\">alert(\"{}\");</script>'\n\n def notify(self):\n display(Audio(self.audio, autoplay=True))\n time.sleep(3)\n display(HTML(self.template.format(self.message)))\n",
"step-4": "from inotifier import Notifier\nfrom IPython.display import display, Audio, HTML\nimport pkg_resources\nimport time\n\n\nclass AudioPopupNotifier(Notifier):\n \"\"\"Play Sound and show Popup upon cell completion\"\"\"\n\n def __init__(self, message='Cell Completed', audio_file='pad_confirm.wav'):\n super(AudioPopupNotifier, self).__init__()\n self.message = message\n self.audio_file = audio_file\n try:\n self.audio = pkg_resources.resource_string('inotifications',\n 'sounds/{}'.format(audio_file))\n except IOError:\n self.audio = audio_file\n self.template = '<script type=\"text/javascript\">alert(\"{}\");</script>'\n\n def notify(self):\n display(Audio(self.audio, autoplay=True))\n time.sleep(3)\n display(HTML(self.template.format(self.message)))\n",
"step-5": "from inotifier import Notifier\nfrom IPython.display import display, Audio, HTML\n\nimport pkg_resources\nimport time\n\n\nclass AudioPopupNotifier(Notifier):\n \"\"\"Play Sound and show Popup upon cell completion\"\"\"\n\n def __init__(self, message=\"Cell Completed\", audio_file=\"pad_confirm.wav\"):\n super(AudioPopupNotifier, self).__init__()\n self.message = message\n self.audio_file = audio_file\n try:\n self.audio = pkg_resources.resource_string('inotifications', 'sounds/{}'.format(audio_file))\n except IOError:\n self.audio = audio_file\n\n self.template = '<script type=\"text/javascript\">alert(\"{}\");</script>'\n\n def notify(self):\n display(Audio(self.audio, autoplay=True))\n time.sleep(3)\n display(HTML(self.template.format(self.message)))\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
import pandas as pd
import numpy as np
import os
import matplotlib.pyplot as plt
from datetime import datetime
import statsmodels.api as sm
from quant.stock.stock import Stock
from quant.stock.date import Date
from quant.utility_fun.factor_preprocess import FactorPreProcess
from quant.utility_fun.write_excel import WriteExcel
def factor_neutral(factor_series, neutral_frame):
"""
中性化
"""
concat_data = pd.concat([factor_series, neutral_frame], axis=1)
concat_data = concat_data.dropna()
factor_val = concat_data.ix[:, 0]
neutral_val = concat_data.ix[:, 1:]
model = sm.OLS(factor_val.values, neutral_val.values)
regress = model.fit()
params = regress.params
params = pd.DataFrame(params, index=neutral_val.columns, columns=['param'])
factor_res = factor_val - regress.predict(neutral_val)
return params, factor_res
def cal_factor_alpha_return(factor_name, beg_date, end_date, cal_period):
# param
###############################################################################################################
###############################################################################################################
group_number = 8
year_trade_days = 242
min_stock_number = 100
out_path = 'E:\\3_Data\\5_stock_data\\3_alpha_model\\'
alpha_remove_extreme_value = True # alpha 因子 取极值
alpha_standard = True # alpha 因子 标准化
alpha_industry_neutral = True # alpha 因子 行业中性
alpha_barra_style_neutral = True # alpha 因子 风格中性
# read data
###############################################################################################################
###############################################################################################################
price = Stock().get_factor_h5("PriceCloseAdjust", None, "alpha_dfc")
alpha_val = Stock().get_factor_h5(factor_name, None, "alpha_dfc")
industry = Stock().get_factor_h5("industry_citic1", None, "primary_mfc")
industry = industry.applymap(lambda x: x.decode('utf-8'))
[alpha_val, industry] = FactorPreProcess().make_same_index_columns([alpha_val, industry])
if alpha_barra_style_neutral:
size = Stock().get_factor_h5("NORMAL_CNE5_SIZE", None, 'barra_risk_dfc')
beta = Stock().get_factor_h5("NORMAL_CNE5_BETA", None, 'barra_risk_dfc')
nolin_size = Stock().get_factor_h5("NORMAL_CNE5_NON_LINEAR_SIZE", None, 'barra_risk_dfc')
momentum = Stock().get_factor_h5("NORMAL_CNE5_MOMENTUM", None, 'barra_risk_dfc')
[size, beta, nolin_size] = FactorPreProcess().make_same_index_columns([size, beta, nolin_size])
beg_date = max(beg_date, price.columns[0], alpha_val.columns[0], beta.columns[0])
end_date = min(end_date, price.columns[-1], alpha_val.columns[-1], beta.columns[-1])
else:
beg_date = max(beg_date, price.columns[0], alpha_val.columns[0])
end_date = min(end_date, price.columns[-1], alpha_val.columns[-1])
date_series = Date().get_trade_date_series(beg_date, end_date, period=cal_period)
date_series = list(set(date_series) & set(alpha_val.columns))
date_series.sort()
# pre process data
###############################################################################################################
###############################################################################################################
if alpha_remove_extreme_value:
alpha_val = FactorPreProcess().remove_extreme_value_mad(alpha_val)
if alpha_standard:
alpha_val = FactorPreProcess().standardization(alpha_val)
# cal everyday
###############################################################################################################
###############################################################################################################
alpha_return = pd.DataFrame([], index=date_series)
alpha_exposure = pd.DataFrame([], index=date_series, columns=price.index)
for i_date in range(len(date_series) - 2):
cur_cal_date = date_series[i_date]
next_cal_date = date_series[i_date + 1]
buy_date = Date().get_trade_date_offset(cur_cal_date, 1)
sell_date = Date().get_trade_date_offset(next_cal_date, 1)
print(" Calculating Factor %s Alpha Return At %s" % (factor_name, cur_cal_date))
alpha_return.index.name = 'CalDate'
alpha_return.ix[cur_cal_date, "BuyDate"] = buy_date
alpha_return.ix[cur_cal_date, "SellDate"] = sell_date
alpha_date = alpha_val[cur_cal_date]
buy_price = price[buy_date]
sell_price = price[sell_date]
pct_date = sell_price / buy_price - 1.0
if alpha_industry_neutral:
try:
industry_date = industry[cur_cal_date]
industry_dummy = pd.get_dummies(industry_date)
except:
continue
if len(pd.concat([alpha_date, industry_date], axis=1).dropna()) < min_stock_number:
continue
else:
params, factor_res = factor_neutral(factor_series=alpha_date, neutral_frame=industry_dummy)
alpha_date = factor_res
alpha_date = FactorPreProcess().remove_extreme_value_mad(alpha_date)
alpha_date = FactorPreProcess().standardization(alpha_date)
if alpha_barra_style_neutral:
try:
size_date = size[cur_cal_date]
beta_date = beta[cur_cal_date]
nolin_size_date = nolin_size[cur_cal_date]
momentum_date = momentum[cur_cal_date]
except:
continue
if len(pd.concat([alpha_date, size_date], axis=1).dropna()) < min_stock_number:
continue
else:
barra_risk_exposure = pd.concat([beta_date, size_date,
nolin_size_date, momentum_date], axis=1)
barra_risk_exposure.columns = ['beta', 'size', 'nolin_size', 'momentum']
params, factor_res = factor_neutral(factor_series=alpha_date, neutral_frame=barra_risk_exposure)
alpha_date = factor_res
alpha_date = FactorPreProcess().remove_extreme_value_mad(alpha_date)
alpha_date = FactorPreProcess().standardization(alpha_date)
alpha_exposure.ix[cur_cal_date, :] = alpha_date
res = pd.concat([alpha_date, pct_date], axis=1)
res.columns = ['alpha_val', 'period_pct']
res = res.dropna()
res = res.sort_values(by=['alpha_val'], ascending=False)
labels = ["group_" + str(i) for i in list(range(1, group_number + 1))]
res['group'] = pd.cut(res['alpha_val'], bins=group_number, labels=labels)
period_return = (res['alpha_val'] * res['period_pct']).mean()
alpha_return.ix[cur_cal_date, "FactorReturn"] = period_return
information_correlation = res['alpha_val'].corr(res['period_pct'])
alpha_return.ix[cur_cal_date, "IC"] = information_correlation
group_pct = res.groupby(by=['group'])['period_pct'].mean()
for i_label in range(len(labels)):
alpha_return.ix[cur_cal_date, labels[i_label]] = group_pct.values[i_label]
alpha_return = alpha_return.dropna(subset=['FactorReturn'])
alpha_return["CumFactorReturn"] = alpha_return['FactorReturn'].cumsum()
cum_labels = ["Cum_" + str(x) for x in labels]
alpha_return[cum_labels] = alpha_return[labels].cumsum()
# plot
###############################################################################################################
###############################################################################################################
# plt_col = []
# plt_col.append("CumFactorReturn")
# plt_col.extend(cum_labels)
# alpha_return[plt_col].plot()
# plt.title(factor_name)
# plt.show()
# describe annual
###############################################################################################################
###############################################################################################################
back_test_beg_date = Date().get_trade_date_offset(date_series[0], 1)
back_test_end_date = Date().get_trade_date_offset(date_series[len(date_series) - 1], 1)
back_test_days = Date().get_trade_date_diff(back_test_beg_date, back_test_end_date)
backtest_year = back_test_days / year_trade_days
alpha_return['year'] = alpha_return.index.map(lambda x: datetime.strptime(x, "%Y%m%d").year)
year_factor_return = alpha_return.groupby(by=['year'])['FactorReturn'].sum()
year_count = alpha_return.groupby(by=['year'])['FactorReturn'].count()
year_ic_mean = alpha_return.groupby(by=['year'])['IC'].mean()
year_ic_std = alpha_return.groupby(by=['year'])['IC'].std()
year_gp_mean = alpha_return.groupby(by=['year'])[labels].mean()
year_describe = pd.concat([year_factor_return, year_count, year_ic_mean, year_ic_std, year_gp_mean], axis=1)
col = ['YearFactorReturn', 'Count', 'IC_mean', 'IC_std']
col.extend(labels)
year_describe.columns = col
year_describe['YearFactorReturn'] = year_describe['YearFactorReturn'] / year_describe['Count'] * year_count
year_describe['IC_IR'] = year_describe['IC_mean'] / year_describe['IC_std'] * np.sqrt(50)
year_describe.ix['Sum', 'YearFactorReturn'] = alpha_return["CumFactorReturn"].values[-1] / backtest_year
year_describe.ix['Sum', 'IC_IR'] = alpha_return["IC"].mean() / alpha_return["IC"].std() * np.sqrt(50)
year_describe.ix['Sum', 'IC_mean'] = alpha_return["IC"].mean()
year_describe.ix['Sum', 'IC_std'] = alpha_return["IC"].std()
year_describe.ix['Sum', labels] = year_describe.ix[0:-1, labels].sum()
year_describe.index = year_describe.index.map(str)
for i in range(len(year_describe)):
year = year_describe.index[i]
corr_pd = pd.DataFrame(year_describe.ix[year, labels].values, index=labels, columns=['group_return'])
corr_pd['group_number'] = (list(range(1, group_number+1)))
year_describe.ix[year, 'Group_Corr'] = corr_pd.corr().ix[0, 1]
# save data
###############################################################################################################
###############################################################################################################
# alpha_exposure_neutral
###############################################################################################################
alpha_exposure = alpha_exposure.astype(np.float)
filename = os.path.join(out_path, 'alpha_exposure_neutral', factor_name + "_FactorExposureNeutral.csv")
alpha_exposure.T.to_csv(filename)
# exposure_corr
###############################################################################################################
exposure_corr = pd.DataFrame([], index=alpha_exposure.index, columns=['Exposure_Corr'])
for i_date in range(1, len(alpha_exposure.index)):
last_exposure_date = alpha_exposure.index[i_date-1]
cur_exposure_date = alpha_exposure.index[i_date]
exposure_adjoin = alpha_exposure.ix[last_exposure_date:cur_exposure_date, :]
exposure_adjoin = exposure_adjoin.T.dropna()
exposure_corr.ix[cur_exposure_date, 'Exposure_Corr'] = exposure_adjoin.corr().ix[0, 1]
exposure_corr = exposure_corr.dropna()
exposure_corr.ix['Mean', 'Exposure_Corr'] = exposure_corr['Exposure_Corr'].mean()
filename = os.path.join(out_path, 'alpha_exposure_stability', factor_name + "_FactorExposureCorr.csv")
exposure_corr.to_csv(filename)
# Factor Return
###############################################################################################################
filename = os.path.join(out_path, 'alpha_return', factor_name + "_FactorReturn.xlsx")
sheet_name = "FactorReturn"
we = WriteExcel(filename)
ws = we.add_worksheet(sheet_name)
num_format_pd = pd.DataFrame([], columns=year_describe.columns, index=['format'])
num_format_pd.ix['format', :] = '0.00%'
num_format_pd.ix['format', ['Count', 'IC_IR']] = '0.00'
we.write_pandas(year_describe, ws, begin_row_number=0, begin_col_number=1,
num_format_pd=num_format_pd, color="blue", fillna=True)
num_format_pd = pd.DataFrame([], columns=alpha_return.columns, index=['format'])
num_format_pd.ix['format', :] = '0.00%'
num_format_pd.ix['format', ['year']] = '0'
we.write_pandas(alpha_return, ws, begin_row_number=0, begin_col_number=2+len(year_describe.columns),
num_format_pd=num_format_pd, color="blue", fillna=True)
we.close()
###############################################################################################################
if __name__ == '__main__':
cal_period = "W"
beg_date = "20040101"
end_date = datetime.today().strftime("%Y%m%d")
path = "E:\\3_Data\\5_stock_data\\3_alpha_model\\"
file = "MyAlpha.xlsx"
data = pd.read_excel(os.path.join(path, file), encoding='gbk')
data = data[data['计算因子收益率'] == "是"]
data = data.reset_index(drop=True)
for i in range(0, len(data)):
factor_name = data.ix[i, "因子名"]
print("#################### 开始计算因子收益率 %s 数据 ####################" % factor_name)
cal_factor_alpha_return(factor_name, beg_date, end_date, cal_period)
print("#################### 结束计算因子收益率 %s 数据 ####################" % factor_name)
|
normal
|
{
"blob_id": "1d0730e8fd120e1c4bc5b89cbd766234e1fa3bca",
"index": 2197,
"step-1": "<mask token>\n\n\ndef cal_factor_alpha_return(factor_name, beg_date, end_date, cal_period):\n group_number = 8\n year_trade_days = 242\n min_stock_number = 100\n out_path = 'E:\\\\3_Data\\\\5_stock_data\\\\3_alpha_model\\\\'\n alpha_remove_extreme_value = True\n alpha_standard = True\n alpha_industry_neutral = True\n alpha_barra_style_neutral = True\n price = Stock().get_factor_h5('PriceCloseAdjust', None, 'alpha_dfc')\n alpha_val = Stock().get_factor_h5(factor_name, None, 'alpha_dfc')\n industry = Stock().get_factor_h5('industry_citic1', None, 'primary_mfc')\n industry = industry.applymap(lambda x: x.decode('utf-8'))\n [alpha_val, industry] = FactorPreProcess().make_same_index_columns([\n alpha_val, industry])\n if alpha_barra_style_neutral:\n size = Stock().get_factor_h5('NORMAL_CNE5_SIZE', None, 'barra_risk_dfc'\n )\n beta = Stock().get_factor_h5('NORMAL_CNE5_BETA', None, 'barra_risk_dfc'\n )\n nolin_size = Stock().get_factor_h5('NORMAL_CNE5_NON_LINEAR_SIZE',\n None, 'barra_risk_dfc')\n momentum = Stock().get_factor_h5('NORMAL_CNE5_MOMENTUM', None,\n 'barra_risk_dfc')\n [size, beta, nolin_size] = FactorPreProcess().make_same_index_columns([\n size, beta, nolin_size])\n beg_date = max(beg_date, price.columns[0], alpha_val.columns[0],\n beta.columns[0])\n end_date = min(end_date, price.columns[-1], alpha_val.columns[-1],\n beta.columns[-1])\n else:\n beg_date = max(beg_date, price.columns[0], alpha_val.columns[0])\n end_date = min(end_date, price.columns[-1], alpha_val.columns[-1])\n date_series = Date().get_trade_date_series(beg_date, end_date, period=\n cal_period)\n date_series = list(set(date_series) & set(alpha_val.columns))\n date_series.sort()\n if alpha_remove_extreme_value:\n alpha_val = FactorPreProcess().remove_extreme_value_mad(alpha_val)\n if alpha_standard:\n alpha_val = FactorPreProcess().standardization(alpha_val)\n alpha_return = pd.DataFrame([], index=date_series)\n alpha_exposure = pd.DataFrame([], index=date_series, columns=price.index)\n for i_date in range(len(date_series) - 2):\n cur_cal_date = date_series[i_date]\n next_cal_date = date_series[i_date + 1]\n buy_date = Date().get_trade_date_offset(cur_cal_date, 1)\n sell_date = Date().get_trade_date_offset(next_cal_date, 1)\n print(' Calculating Factor %s Alpha Return At %s' % (factor_name,\n cur_cal_date))\n alpha_return.index.name = 'CalDate'\n alpha_return.ix[cur_cal_date, 'BuyDate'] = buy_date\n alpha_return.ix[cur_cal_date, 'SellDate'] = sell_date\n alpha_date = alpha_val[cur_cal_date]\n buy_price = price[buy_date]\n sell_price = price[sell_date]\n pct_date = sell_price / buy_price - 1.0\n if alpha_industry_neutral:\n try:\n industry_date = industry[cur_cal_date]\n industry_dummy = pd.get_dummies(industry_date)\n except:\n continue\n if len(pd.concat([alpha_date, industry_date], axis=1).dropna()\n ) < min_stock_number:\n continue\n else:\n params, factor_res = factor_neutral(factor_series=\n alpha_date, neutral_frame=industry_dummy)\n alpha_date = factor_res\n alpha_date = FactorPreProcess().remove_extreme_value_mad(\n alpha_date)\n alpha_date = FactorPreProcess().standardization(alpha_date)\n if alpha_barra_style_neutral:\n try:\n size_date = size[cur_cal_date]\n beta_date = beta[cur_cal_date]\n nolin_size_date = nolin_size[cur_cal_date]\n momentum_date = momentum[cur_cal_date]\n except:\n continue\n if len(pd.concat([alpha_date, size_date], axis=1).dropna()\n ) < min_stock_number:\n continue\n else:\n barra_risk_exposure = pd.concat([beta_date, size_date,\n nolin_size_date, momentum_date], axis=1)\n barra_risk_exposure.columns = ['beta', 'size', 'nolin_size',\n 'momentum']\n params, factor_res = factor_neutral(factor_series=\n alpha_date, neutral_frame=barra_risk_exposure)\n alpha_date = factor_res\n alpha_date = FactorPreProcess().remove_extreme_value_mad(\n alpha_date)\n alpha_date = FactorPreProcess().standardization(alpha_date)\n alpha_exposure.ix[cur_cal_date, :] = alpha_date\n res = pd.concat([alpha_date, pct_date], axis=1)\n res.columns = ['alpha_val', 'period_pct']\n res = res.dropna()\n res = res.sort_values(by=['alpha_val'], ascending=False)\n labels = [('group_' + str(i)) for i in list(range(1, group_number + 1))\n ]\n res['group'] = pd.cut(res['alpha_val'], bins=group_number, labels=\n labels)\n period_return = (res['alpha_val'] * res['period_pct']).mean()\n alpha_return.ix[cur_cal_date, 'FactorReturn'] = period_return\n information_correlation = res['alpha_val'].corr(res['period_pct'])\n alpha_return.ix[cur_cal_date, 'IC'] = information_correlation\n group_pct = res.groupby(by=['group'])['period_pct'].mean()\n for i_label in range(len(labels)):\n alpha_return.ix[cur_cal_date, labels[i_label]] = group_pct.values[\n i_label]\n alpha_return = alpha_return.dropna(subset=['FactorReturn'])\n alpha_return['CumFactorReturn'] = alpha_return['FactorReturn'].cumsum()\n cum_labels = [('Cum_' + str(x)) for x in labels]\n alpha_return[cum_labels] = alpha_return[labels].cumsum()\n back_test_beg_date = Date().get_trade_date_offset(date_series[0], 1)\n back_test_end_date = Date().get_trade_date_offset(date_series[len(\n date_series) - 1], 1)\n back_test_days = Date().get_trade_date_diff(back_test_beg_date,\n back_test_end_date)\n backtest_year = back_test_days / year_trade_days\n alpha_return['year'] = alpha_return.index.map(lambda x: datetime.\n strptime(x, '%Y%m%d').year)\n year_factor_return = alpha_return.groupby(by=['year'])['FactorReturn'].sum(\n )\n year_count = alpha_return.groupby(by=['year'])['FactorReturn'].count()\n year_ic_mean = alpha_return.groupby(by=['year'])['IC'].mean()\n year_ic_std = alpha_return.groupby(by=['year'])['IC'].std()\n year_gp_mean = alpha_return.groupby(by=['year'])[labels].mean()\n year_describe = pd.concat([year_factor_return, year_count, year_ic_mean,\n year_ic_std, year_gp_mean], axis=1)\n col = ['YearFactorReturn', 'Count', 'IC_mean', 'IC_std']\n col.extend(labels)\n year_describe.columns = col\n year_describe['YearFactorReturn'] = year_describe['YearFactorReturn'\n ] / year_describe['Count'] * year_count\n year_describe['IC_IR'] = year_describe['IC_mean'] / year_describe['IC_std'\n ] * np.sqrt(50)\n year_describe.ix['Sum', 'YearFactorReturn'] = alpha_return[\n 'CumFactorReturn'].values[-1] / backtest_year\n year_describe.ix['Sum', 'IC_IR'] = alpha_return['IC'].mean(\n ) / alpha_return['IC'].std() * np.sqrt(50)\n year_describe.ix['Sum', 'IC_mean'] = alpha_return['IC'].mean()\n year_describe.ix['Sum', 'IC_std'] = alpha_return['IC'].std()\n year_describe.ix['Sum', labels] = year_describe.ix[0:-1, labels].sum()\n year_describe.index = year_describe.index.map(str)\n for i in range(len(year_describe)):\n year = year_describe.index[i]\n corr_pd = pd.DataFrame(year_describe.ix[year, labels].values, index\n =labels, columns=['group_return'])\n corr_pd['group_number'] = list(range(1, group_number + 1))\n year_describe.ix[year, 'Group_Corr'] = corr_pd.corr().ix[0, 1]\n alpha_exposure = alpha_exposure.astype(np.float)\n filename = os.path.join(out_path, 'alpha_exposure_neutral', factor_name +\n '_FactorExposureNeutral.csv')\n alpha_exposure.T.to_csv(filename)\n exposure_corr = pd.DataFrame([], index=alpha_exposure.index, columns=[\n 'Exposure_Corr'])\n for i_date in range(1, len(alpha_exposure.index)):\n last_exposure_date = alpha_exposure.index[i_date - 1]\n cur_exposure_date = alpha_exposure.index[i_date]\n exposure_adjoin = alpha_exposure.ix[last_exposure_date:\n cur_exposure_date, :]\n exposure_adjoin = exposure_adjoin.T.dropna()\n exposure_corr.ix[cur_exposure_date, 'Exposure_Corr'\n ] = exposure_adjoin.corr().ix[0, 1]\n exposure_corr = exposure_corr.dropna()\n exposure_corr.ix['Mean', 'Exposure_Corr'] = exposure_corr['Exposure_Corr'\n ].mean()\n filename = os.path.join(out_path, 'alpha_exposure_stability', \n factor_name + '_FactorExposureCorr.csv')\n exposure_corr.to_csv(filename)\n filename = os.path.join(out_path, 'alpha_return', factor_name +\n '_FactorReturn.xlsx')\n sheet_name = 'FactorReturn'\n we = WriteExcel(filename)\n ws = we.add_worksheet(sheet_name)\n num_format_pd = pd.DataFrame([], columns=year_describe.columns, index=[\n 'format'])\n num_format_pd.ix['format', :] = '0.00%'\n num_format_pd.ix['format', ['Count', 'IC_IR']] = '0.00'\n we.write_pandas(year_describe, ws, begin_row_number=0, begin_col_number\n =1, num_format_pd=num_format_pd, color='blue', fillna=True)\n num_format_pd = pd.DataFrame([], columns=alpha_return.columns, index=[\n 'format'])\n num_format_pd.ix['format', :] = '0.00%'\n num_format_pd.ix['format', ['year']] = '0'\n we.write_pandas(alpha_return, ws, begin_row_number=0, begin_col_number=\n 2 + len(year_describe.columns), num_format_pd=num_format_pd, color=\n 'blue', fillna=True)\n we.close()\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef factor_neutral(factor_series, neutral_frame):\n \"\"\"\n 中性化\n \"\"\"\n concat_data = pd.concat([factor_series, neutral_frame], axis=1)\n concat_data = concat_data.dropna()\n factor_val = concat_data.ix[:, 0]\n neutral_val = concat_data.ix[:, 1:]\n model = sm.OLS(factor_val.values, neutral_val.values)\n regress = model.fit()\n params = regress.params\n params = pd.DataFrame(params, index=neutral_val.columns, columns=['param'])\n factor_res = factor_val - regress.predict(neutral_val)\n return params, factor_res\n\n\ndef cal_factor_alpha_return(factor_name, beg_date, end_date, cal_period):\n group_number = 8\n year_trade_days = 242\n min_stock_number = 100\n out_path = 'E:\\\\3_Data\\\\5_stock_data\\\\3_alpha_model\\\\'\n alpha_remove_extreme_value = True\n alpha_standard = True\n alpha_industry_neutral = True\n alpha_barra_style_neutral = True\n price = Stock().get_factor_h5('PriceCloseAdjust', None, 'alpha_dfc')\n alpha_val = Stock().get_factor_h5(factor_name, None, 'alpha_dfc')\n industry = Stock().get_factor_h5('industry_citic1', None, 'primary_mfc')\n industry = industry.applymap(lambda x: x.decode('utf-8'))\n [alpha_val, industry] = FactorPreProcess().make_same_index_columns([\n alpha_val, industry])\n if alpha_barra_style_neutral:\n size = Stock().get_factor_h5('NORMAL_CNE5_SIZE', None, 'barra_risk_dfc'\n )\n beta = Stock().get_factor_h5('NORMAL_CNE5_BETA', None, 'barra_risk_dfc'\n )\n nolin_size = Stock().get_factor_h5('NORMAL_CNE5_NON_LINEAR_SIZE',\n None, 'barra_risk_dfc')\n momentum = Stock().get_factor_h5('NORMAL_CNE5_MOMENTUM', None,\n 'barra_risk_dfc')\n [size, beta, nolin_size] = FactorPreProcess().make_same_index_columns([\n size, beta, nolin_size])\n beg_date = max(beg_date, price.columns[0], alpha_val.columns[0],\n beta.columns[0])\n end_date = min(end_date, price.columns[-1], alpha_val.columns[-1],\n beta.columns[-1])\n else:\n beg_date = max(beg_date, price.columns[0], alpha_val.columns[0])\n end_date = min(end_date, price.columns[-1], alpha_val.columns[-1])\n date_series = Date().get_trade_date_series(beg_date, end_date, period=\n cal_period)\n date_series = list(set(date_series) & set(alpha_val.columns))\n date_series.sort()\n if alpha_remove_extreme_value:\n alpha_val = FactorPreProcess().remove_extreme_value_mad(alpha_val)\n if alpha_standard:\n alpha_val = FactorPreProcess().standardization(alpha_val)\n alpha_return = pd.DataFrame([], index=date_series)\n alpha_exposure = pd.DataFrame([], index=date_series, columns=price.index)\n for i_date in range(len(date_series) - 2):\n cur_cal_date = date_series[i_date]\n next_cal_date = date_series[i_date + 1]\n buy_date = Date().get_trade_date_offset(cur_cal_date, 1)\n sell_date = Date().get_trade_date_offset(next_cal_date, 1)\n print(' Calculating Factor %s Alpha Return At %s' % (factor_name,\n cur_cal_date))\n alpha_return.index.name = 'CalDate'\n alpha_return.ix[cur_cal_date, 'BuyDate'] = buy_date\n alpha_return.ix[cur_cal_date, 'SellDate'] = sell_date\n alpha_date = alpha_val[cur_cal_date]\n buy_price = price[buy_date]\n sell_price = price[sell_date]\n pct_date = sell_price / buy_price - 1.0\n if alpha_industry_neutral:\n try:\n industry_date = industry[cur_cal_date]\n industry_dummy = pd.get_dummies(industry_date)\n except:\n continue\n if len(pd.concat([alpha_date, industry_date], axis=1).dropna()\n ) < min_stock_number:\n continue\n else:\n params, factor_res = factor_neutral(factor_series=\n alpha_date, neutral_frame=industry_dummy)\n alpha_date = factor_res\n alpha_date = FactorPreProcess().remove_extreme_value_mad(\n alpha_date)\n alpha_date = FactorPreProcess().standardization(alpha_date)\n if alpha_barra_style_neutral:\n try:\n size_date = size[cur_cal_date]\n beta_date = beta[cur_cal_date]\n nolin_size_date = nolin_size[cur_cal_date]\n momentum_date = momentum[cur_cal_date]\n except:\n continue\n if len(pd.concat([alpha_date, size_date], axis=1).dropna()\n ) < min_stock_number:\n continue\n else:\n barra_risk_exposure = pd.concat([beta_date, size_date,\n nolin_size_date, momentum_date], axis=1)\n barra_risk_exposure.columns = ['beta', 'size', 'nolin_size',\n 'momentum']\n params, factor_res = factor_neutral(factor_series=\n alpha_date, neutral_frame=barra_risk_exposure)\n alpha_date = factor_res\n alpha_date = FactorPreProcess().remove_extreme_value_mad(\n alpha_date)\n alpha_date = FactorPreProcess().standardization(alpha_date)\n alpha_exposure.ix[cur_cal_date, :] = alpha_date\n res = pd.concat([alpha_date, pct_date], axis=1)\n res.columns = ['alpha_val', 'period_pct']\n res = res.dropna()\n res = res.sort_values(by=['alpha_val'], ascending=False)\n labels = [('group_' + str(i)) for i in list(range(1, group_number + 1))\n ]\n res['group'] = pd.cut(res['alpha_val'], bins=group_number, labels=\n labels)\n period_return = (res['alpha_val'] * res['period_pct']).mean()\n alpha_return.ix[cur_cal_date, 'FactorReturn'] = period_return\n information_correlation = res['alpha_val'].corr(res['period_pct'])\n alpha_return.ix[cur_cal_date, 'IC'] = information_correlation\n group_pct = res.groupby(by=['group'])['period_pct'].mean()\n for i_label in range(len(labels)):\n alpha_return.ix[cur_cal_date, labels[i_label]] = group_pct.values[\n i_label]\n alpha_return = alpha_return.dropna(subset=['FactorReturn'])\n alpha_return['CumFactorReturn'] = alpha_return['FactorReturn'].cumsum()\n cum_labels = [('Cum_' + str(x)) for x in labels]\n alpha_return[cum_labels] = alpha_return[labels].cumsum()\n back_test_beg_date = Date().get_trade_date_offset(date_series[0], 1)\n back_test_end_date = Date().get_trade_date_offset(date_series[len(\n date_series) - 1], 1)\n back_test_days = Date().get_trade_date_diff(back_test_beg_date,\n back_test_end_date)\n backtest_year = back_test_days / year_trade_days\n alpha_return['year'] = alpha_return.index.map(lambda x: datetime.\n strptime(x, '%Y%m%d').year)\n year_factor_return = alpha_return.groupby(by=['year'])['FactorReturn'].sum(\n )\n year_count = alpha_return.groupby(by=['year'])['FactorReturn'].count()\n year_ic_mean = alpha_return.groupby(by=['year'])['IC'].mean()\n year_ic_std = alpha_return.groupby(by=['year'])['IC'].std()\n year_gp_mean = alpha_return.groupby(by=['year'])[labels].mean()\n year_describe = pd.concat([year_factor_return, year_count, year_ic_mean,\n year_ic_std, year_gp_mean], axis=1)\n col = ['YearFactorReturn', 'Count', 'IC_mean', 'IC_std']\n col.extend(labels)\n year_describe.columns = col\n year_describe['YearFactorReturn'] = year_describe['YearFactorReturn'\n ] / year_describe['Count'] * year_count\n year_describe['IC_IR'] = year_describe['IC_mean'] / year_describe['IC_std'\n ] * np.sqrt(50)\n year_describe.ix['Sum', 'YearFactorReturn'] = alpha_return[\n 'CumFactorReturn'].values[-1] / backtest_year\n year_describe.ix['Sum', 'IC_IR'] = alpha_return['IC'].mean(\n ) / alpha_return['IC'].std() * np.sqrt(50)\n year_describe.ix['Sum', 'IC_mean'] = alpha_return['IC'].mean()\n year_describe.ix['Sum', 'IC_std'] = alpha_return['IC'].std()\n year_describe.ix['Sum', labels] = year_describe.ix[0:-1, labels].sum()\n year_describe.index = year_describe.index.map(str)\n for i in range(len(year_describe)):\n year = year_describe.index[i]\n corr_pd = pd.DataFrame(year_describe.ix[year, labels].values, index\n =labels, columns=['group_return'])\n corr_pd['group_number'] = list(range(1, group_number + 1))\n year_describe.ix[year, 'Group_Corr'] = corr_pd.corr().ix[0, 1]\n alpha_exposure = alpha_exposure.astype(np.float)\n filename = os.path.join(out_path, 'alpha_exposure_neutral', factor_name +\n '_FactorExposureNeutral.csv')\n alpha_exposure.T.to_csv(filename)\n exposure_corr = pd.DataFrame([], index=alpha_exposure.index, columns=[\n 'Exposure_Corr'])\n for i_date in range(1, len(alpha_exposure.index)):\n last_exposure_date = alpha_exposure.index[i_date - 1]\n cur_exposure_date = alpha_exposure.index[i_date]\n exposure_adjoin = alpha_exposure.ix[last_exposure_date:\n cur_exposure_date, :]\n exposure_adjoin = exposure_adjoin.T.dropna()\n exposure_corr.ix[cur_exposure_date, 'Exposure_Corr'\n ] = exposure_adjoin.corr().ix[0, 1]\n exposure_corr = exposure_corr.dropna()\n exposure_corr.ix['Mean', 'Exposure_Corr'] = exposure_corr['Exposure_Corr'\n ].mean()\n filename = os.path.join(out_path, 'alpha_exposure_stability', \n factor_name + '_FactorExposureCorr.csv')\n exposure_corr.to_csv(filename)\n filename = os.path.join(out_path, 'alpha_return', factor_name +\n '_FactorReturn.xlsx')\n sheet_name = 'FactorReturn'\n we = WriteExcel(filename)\n ws = we.add_worksheet(sheet_name)\n num_format_pd = pd.DataFrame([], columns=year_describe.columns, index=[\n 'format'])\n num_format_pd.ix['format', :] = '0.00%'\n num_format_pd.ix['format', ['Count', 'IC_IR']] = '0.00'\n we.write_pandas(year_describe, ws, begin_row_number=0, begin_col_number\n =1, num_format_pd=num_format_pd, color='blue', fillna=True)\n num_format_pd = pd.DataFrame([], columns=alpha_return.columns, index=[\n 'format'])\n num_format_pd.ix['format', :] = '0.00%'\n num_format_pd.ix['format', ['year']] = '0'\n we.write_pandas(alpha_return, ws, begin_row_number=0, begin_col_number=\n 2 + len(year_describe.columns), num_format_pd=num_format_pd, color=\n 'blue', fillna=True)\n we.close()\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef factor_neutral(factor_series, neutral_frame):\n \"\"\"\n 中性化\n \"\"\"\n concat_data = pd.concat([factor_series, neutral_frame], axis=1)\n concat_data = concat_data.dropna()\n factor_val = concat_data.ix[:, 0]\n neutral_val = concat_data.ix[:, 1:]\n model = sm.OLS(factor_val.values, neutral_val.values)\n regress = model.fit()\n params = regress.params\n params = pd.DataFrame(params, index=neutral_val.columns, columns=['param'])\n factor_res = factor_val - regress.predict(neutral_val)\n return params, factor_res\n\n\ndef cal_factor_alpha_return(factor_name, beg_date, end_date, cal_period):\n group_number = 8\n year_trade_days = 242\n min_stock_number = 100\n out_path = 'E:\\\\3_Data\\\\5_stock_data\\\\3_alpha_model\\\\'\n alpha_remove_extreme_value = True\n alpha_standard = True\n alpha_industry_neutral = True\n alpha_barra_style_neutral = True\n price = Stock().get_factor_h5('PriceCloseAdjust', None, 'alpha_dfc')\n alpha_val = Stock().get_factor_h5(factor_name, None, 'alpha_dfc')\n industry = Stock().get_factor_h5('industry_citic1', None, 'primary_mfc')\n industry = industry.applymap(lambda x: x.decode('utf-8'))\n [alpha_val, industry] = FactorPreProcess().make_same_index_columns([\n alpha_val, industry])\n if alpha_barra_style_neutral:\n size = Stock().get_factor_h5('NORMAL_CNE5_SIZE', None, 'barra_risk_dfc'\n )\n beta = Stock().get_factor_h5('NORMAL_CNE5_BETA', None, 'barra_risk_dfc'\n )\n nolin_size = Stock().get_factor_h5('NORMAL_CNE5_NON_LINEAR_SIZE',\n None, 'barra_risk_dfc')\n momentum = Stock().get_factor_h5('NORMAL_CNE5_MOMENTUM', None,\n 'barra_risk_dfc')\n [size, beta, nolin_size] = FactorPreProcess().make_same_index_columns([\n size, beta, nolin_size])\n beg_date = max(beg_date, price.columns[0], alpha_val.columns[0],\n beta.columns[0])\n end_date = min(end_date, price.columns[-1], alpha_val.columns[-1],\n beta.columns[-1])\n else:\n beg_date = max(beg_date, price.columns[0], alpha_val.columns[0])\n end_date = min(end_date, price.columns[-1], alpha_val.columns[-1])\n date_series = Date().get_trade_date_series(beg_date, end_date, period=\n cal_period)\n date_series = list(set(date_series) & set(alpha_val.columns))\n date_series.sort()\n if alpha_remove_extreme_value:\n alpha_val = FactorPreProcess().remove_extreme_value_mad(alpha_val)\n if alpha_standard:\n alpha_val = FactorPreProcess().standardization(alpha_val)\n alpha_return = pd.DataFrame([], index=date_series)\n alpha_exposure = pd.DataFrame([], index=date_series, columns=price.index)\n for i_date in range(len(date_series) - 2):\n cur_cal_date = date_series[i_date]\n next_cal_date = date_series[i_date + 1]\n buy_date = Date().get_trade_date_offset(cur_cal_date, 1)\n sell_date = Date().get_trade_date_offset(next_cal_date, 1)\n print(' Calculating Factor %s Alpha Return At %s' % (factor_name,\n cur_cal_date))\n alpha_return.index.name = 'CalDate'\n alpha_return.ix[cur_cal_date, 'BuyDate'] = buy_date\n alpha_return.ix[cur_cal_date, 'SellDate'] = sell_date\n alpha_date = alpha_val[cur_cal_date]\n buy_price = price[buy_date]\n sell_price = price[sell_date]\n pct_date = sell_price / buy_price - 1.0\n if alpha_industry_neutral:\n try:\n industry_date = industry[cur_cal_date]\n industry_dummy = pd.get_dummies(industry_date)\n except:\n continue\n if len(pd.concat([alpha_date, industry_date], axis=1).dropna()\n ) < min_stock_number:\n continue\n else:\n params, factor_res = factor_neutral(factor_series=\n alpha_date, neutral_frame=industry_dummy)\n alpha_date = factor_res\n alpha_date = FactorPreProcess().remove_extreme_value_mad(\n alpha_date)\n alpha_date = FactorPreProcess().standardization(alpha_date)\n if alpha_barra_style_neutral:\n try:\n size_date = size[cur_cal_date]\n beta_date = beta[cur_cal_date]\n nolin_size_date = nolin_size[cur_cal_date]\n momentum_date = momentum[cur_cal_date]\n except:\n continue\n if len(pd.concat([alpha_date, size_date], axis=1).dropna()\n ) < min_stock_number:\n continue\n else:\n barra_risk_exposure = pd.concat([beta_date, size_date,\n nolin_size_date, momentum_date], axis=1)\n barra_risk_exposure.columns = ['beta', 'size', 'nolin_size',\n 'momentum']\n params, factor_res = factor_neutral(factor_series=\n alpha_date, neutral_frame=barra_risk_exposure)\n alpha_date = factor_res\n alpha_date = FactorPreProcess().remove_extreme_value_mad(\n alpha_date)\n alpha_date = FactorPreProcess().standardization(alpha_date)\n alpha_exposure.ix[cur_cal_date, :] = alpha_date\n res = pd.concat([alpha_date, pct_date], axis=1)\n res.columns = ['alpha_val', 'period_pct']\n res = res.dropna()\n res = res.sort_values(by=['alpha_val'], ascending=False)\n labels = [('group_' + str(i)) for i in list(range(1, group_number + 1))\n ]\n res['group'] = pd.cut(res['alpha_val'], bins=group_number, labels=\n labels)\n period_return = (res['alpha_val'] * res['period_pct']).mean()\n alpha_return.ix[cur_cal_date, 'FactorReturn'] = period_return\n information_correlation = res['alpha_val'].corr(res['period_pct'])\n alpha_return.ix[cur_cal_date, 'IC'] = information_correlation\n group_pct = res.groupby(by=['group'])['period_pct'].mean()\n for i_label in range(len(labels)):\n alpha_return.ix[cur_cal_date, labels[i_label]] = group_pct.values[\n i_label]\n alpha_return = alpha_return.dropna(subset=['FactorReturn'])\n alpha_return['CumFactorReturn'] = alpha_return['FactorReturn'].cumsum()\n cum_labels = [('Cum_' + str(x)) for x in labels]\n alpha_return[cum_labels] = alpha_return[labels].cumsum()\n back_test_beg_date = Date().get_trade_date_offset(date_series[0], 1)\n back_test_end_date = Date().get_trade_date_offset(date_series[len(\n date_series) - 1], 1)\n back_test_days = Date().get_trade_date_diff(back_test_beg_date,\n back_test_end_date)\n backtest_year = back_test_days / year_trade_days\n alpha_return['year'] = alpha_return.index.map(lambda x: datetime.\n strptime(x, '%Y%m%d').year)\n year_factor_return = alpha_return.groupby(by=['year'])['FactorReturn'].sum(\n )\n year_count = alpha_return.groupby(by=['year'])['FactorReturn'].count()\n year_ic_mean = alpha_return.groupby(by=['year'])['IC'].mean()\n year_ic_std = alpha_return.groupby(by=['year'])['IC'].std()\n year_gp_mean = alpha_return.groupby(by=['year'])[labels].mean()\n year_describe = pd.concat([year_factor_return, year_count, year_ic_mean,\n year_ic_std, year_gp_mean], axis=1)\n col = ['YearFactorReturn', 'Count', 'IC_mean', 'IC_std']\n col.extend(labels)\n year_describe.columns = col\n year_describe['YearFactorReturn'] = year_describe['YearFactorReturn'\n ] / year_describe['Count'] * year_count\n year_describe['IC_IR'] = year_describe['IC_mean'] / year_describe['IC_std'\n ] * np.sqrt(50)\n year_describe.ix['Sum', 'YearFactorReturn'] = alpha_return[\n 'CumFactorReturn'].values[-1] / backtest_year\n year_describe.ix['Sum', 'IC_IR'] = alpha_return['IC'].mean(\n ) / alpha_return['IC'].std() * np.sqrt(50)\n year_describe.ix['Sum', 'IC_mean'] = alpha_return['IC'].mean()\n year_describe.ix['Sum', 'IC_std'] = alpha_return['IC'].std()\n year_describe.ix['Sum', labels] = year_describe.ix[0:-1, labels].sum()\n year_describe.index = year_describe.index.map(str)\n for i in range(len(year_describe)):\n year = year_describe.index[i]\n corr_pd = pd.DataFrame(year_describe.ix[year, labels].values, index\n =labels, columns=['group_return'])\n corr_pd['group_number'] = list(range(1, group_number + 1))\n year_describe.ix[year, 'Group_Corr'] = corr_pd.corr().ix[0, 1]\n alpha_exposure = alpha_exposure.astype(np.float)\n filename = os.path.join(out_path, 'alpha_exposure_neutral', factor_name +\n '_FactorExposureNeutral.csv')\n alpha_exposure.T.to_csv(filename)\n exposure_corr = pd.DataFrame([], index=alpha_exposure.index, columns=[\n 'Exposure_Corr'])\n for i_date in range(1, len(alpha_exposure.index)):\n last_exposure_date = alpha_exposure.index[i_date - 1]\n cur_exposure_date = alpha_exposure.index[i_date]\n exposure_adjoin = alpha_exposure.ix[last_exposure_date:\n cur_exposure_date, :]\n exposure_adjoin = exposure_adjoin.T.dropna()\n exposure_corr.ix[cur_exposure_date, 'Exposure_Corr'\n ] = exposure_adjoin.corr().ix[0, 1]\n exposure_corr = exposure_corr.dropna()\n exposure_corr.ix['Mean', 'Exposure_Corr'] = exposure_corr['Exposure_Corr'\n ].mean()\n filename = os.path.join(out_path, 'alpha_exposure_stability', \n factor_name + '_FactorExposureCorr.csv')\n exposure_corr.to_csv(filename)\n filename = os.path.join(out_path, 'alpha_return', factor_name +\n '_FactorReturn.xlsx')\n sheet_name = 'FactorReturn'\n we = WriteExcel(filename)\n ws = we.add_worksheet(sheet_name)\n num_format_pd = pd.DataFrame([], columns=year_describe.columns, index=[\n 'format'])\n num_format_pd.ix['format', :] = '0.00%'\n num_format_pd.ix['format', ['Count', 'IC_IR']] = '0.00'\n we.write_pandas(year_describe, ws, begin_row_number=0, begin_col_number\n =1, num_format_pd=num_format_pd, color='blue', fillna=True)\n num_format_pd = pd.DataFrame([], columns=alpha_return.columns, index=[\n 'format'])\n num_format_pd.ix['format', :] = '0.00%'\n num_format_pd.ix['format', ['year']] = '0'\n we.write_pandas(alpha_return, ws, begin_row_number=0, begin_col_number=\n 2 + len(year_describe.columns), num_format_pd=num_format_pd, color=\n 'blue', fillna=True)\n we.close()\n\n\nif __name__ == '__main__':\n cal_period = 'W'\n beg_date = '20040101'\n end_date = datetime.today().strftime('%Y%m%d')\n path = 'E:\\\\3_Data\\\\5_stock_data\\\\3_alpha_model\\\\'\n file = 'MyAlpha.xlsx'\n data = pd.read_excel(os.path.join(path, file), encoding='gbk')\n data = data[data['计算因子收益率'] == '是']\n data = data.reset_index(drop=True)\n for i in range(0, len(data)):\n factor_name = data.ix[i, '因子名']\n print('#################### 开始计算因子收益率 %s 数据 ####################' %\n factor_name)\n cal_factor_alpha_return(factor_name, beg_date, end_date, cal_period)\n print('#################### 结束计算因子收益率 %s 数据 ####################' %\n factor_name)\n",
"step-4": "import pandas as pd\nimport numpy as np\nimport os\nimport matplotlib.pyplot as plt\nfrom datetime import datetime\nimport statsmodels.api as sm\nfrom quant.stock.stock import Stock\nfrom quant.stock.date import Date\nfrom quant.utility_fun.factor_preprocess import FactorPreProcess\nfrom quant.utility_fun.write_excel import WriteExcel\n\n\ndef factor_neutral(factor_series, neutral_frame):\n \"\"\"\n 中性化\n \"\"\"\n concat_data = pd.concat([factor_series, neutral_frame], axis=1)\n concat_data = concat_data.dropna()\n factor_val = concat_data.ix[:, 0]\n neutral_val = concat_data.ix[:, 1:]\n model = sm.OLS(factor_val.values, neutral_val.values)\n regress = model.fit()\n params = regress.params\n params = pd.DataFrame(params, index=neutral_val.columns, columns=['param'])\n factor_res = factor_val - regress.predict(neutral_val)\n return params, factor_res\n\n\ndef cal_factor_alpha_return(factor_name, beg_date, end_date, cal_period):\n group_number = 8\n year_trade_days = 242\n min_stock_number = 100\n out_path = 'E:\\\\3_Data\\\\5_stock_data\\\\3_alpha_model\\\\'\n alpha_remove_extreme_value = True\n alpha_standard = True\n alpha_industry_neutral = True\n alpha_barra_style_neutral = True\n price = Stock().get_factor_h5('PriceCloseAdjust', None, 'alpha_dfc')\n alpha_val = Stock().get_factor_h5(factor_name, None, 'alpha_dfc')\n industry = Stock().get_factor_h5('industry_citic1', None, 'primary_mfc')\n industry = industry.applymap(lambda x: x.decode('utf-8'))\n [alpha_val, industry] = FactorPreProcess().make_same_index_columns([\n alpha_val, industry])\n if alpha_barra_style_neutral:\n size = Stock().get_factor_h5('NORMAL_CNE5_SIZE', None, 'barra_risk_dfc'\n )\n beta = Stock().get_factor_h5('NORMAL_CNE5_BETA', None, 'barra_risk_dfc'\n )\n nolin_size = Stock().get_factor_h5('NORMAL_CNE5_NON_LINEAR_SIZE',\n None, 'barra_risk_dfc')\n momentum = Stock().get_factor_h5('NORMAL_CNE5_MOMENTUM', None,\n 'barra_risk_dfc')\n [size, beta, nolin_size] = FactorPreProcess().make_same_index_columns([\n size, beta, nolin_size])\n beg_date = max(beg_date, price.columns[0], alpha_val.columns[0],\n beta.columns[0])\n end_date = min(end_date, price.columns[-1], alpha_val.columns[-1],\n beta.columns[-1])\n else:\n beg_date = max(beg_date, price.columns[0], alpha_val.columns[0])\n end_date = min(end_date, price.columns[-1], alpha_val.columns[-1])\n date_series = Date().get_trade_date_series(beg_date, end_date, period=\n cal_period)\n date_series = list(set(date_series) & set(alpha_val.columns))\n date_series.sort()\n if alpha_remove_extreme_value:\n alpha_val = FactorPreProcess().remove_extreme_value_mad(alpha_val)\n if alpha_standard:\n alpha_val = FactorPreProcess().standardization(alpha_val)\n alpha_return = pd.DataFrame([], index=date_series)\n alpha_exposure = pd.DataFrame([], index=date_series, columns=price.index)\n for i_date in range(len(date_series) - 2):\n cur_cal_date = date_series[i_date]\n next_cal_date = date_series[i_date + 1]\n buy_date = Date().get_trade_date_offset(cur_cal_date, 1)\n sell_date = Date().get_trade_date_offset(next_cal_date, 1)\n print(' Calculating Factor %s Alpha Return At %s' % (factor_name,\n cur_cal_date))\n alpha_return.index.name = 'CalDate'\n alpha_return.ix[cur_cal_date, 'BuyDate'] = buy_date\n alpha_return.ix[cur_cal_date, 'SellDate'] = sell_date\n alpha_date = alpha_val[cur_cal_date]\n buy_price = price[buy_date]\n sell_price = price[sell_date]\n pct_date = sell_price / buy_price - 1.0\n if alpha_industry_neutral:\n try:\n industry_date = industry[cur_cal_date]\n industry_dummy = pd.get_dummies(industry_date)\n except:\n continue\n if len(pd.concat([alpha_date, industry_date], axis=1).dropna()\n ) < min_stock_number:\n continue\n else:\n params, factor_res = factor_neutral(factor_series=\n alpha_date, neutral_frame=industry_dummy)\n alpha_date = factor_res\n alpha_date = FactorPreProcess().remove_extreme_value_mad(\n alpha_date)\n alpha_date = FactorPreProcess().standardization(alpha_date)\n if alpha_barra_style_neutral:\n try:\n size_date = size[cur_cal_date]\n beta_date = beta[cur_cal_date]\n nolin_size_date = nolin_size[cur_cal_date]\n momentum_date = momentum[cur_cal_date]\n except:\n continue\n if len(pd.concat([alpha_date, size_date], axis=1).dropna()\n ) < min_stock_number:\n continue\n else:\n barra_risk_exposure = pd.concat([beta_date, size_date,\n nolin_size_date, momentum_date], axis=1)\n barra_risk_exposure.columns = ['beta', 'size', 'nolin_size',\n 'momentum']\n params, factor_res = factor_neutral(factor_series=\n alpha_date, neutral_frame=barra_risk_exposure)\n alpha_date = factor_res\n alpha_date = FactorPreProcess().remove_extreme_value_mad(\n alpha_date)\n alpha_date = FactorPreProcess().standardization(alpha_date)\n alpha_exposure.ix[cur_cal_date, :] = alpha_date\n res = pd.concat([alpha_date, pct_date], axis=1)\n res.columns = ['alpha_val', 'period_pct']\n res = res.dropna()\n res = res.sort_values(by=['alpha_val'], ascending=False)\n labels = [('group_' + str(i)) for i in list(range(1, group_number + 1))\n ]\n res['group'] = pd.cut(res['alpha_val'], bins=group_number, labels=\n labels)\n period_return = (res['alpha_val'] * res['period_pct']).mean()\n alpha_return.ix[cur_cal_date, 'FactorReturn'] = period_return\n information_correlation = res['alpha_val'].corr(res['period_pct'])\n alpha_return.ix[cur_cal_date, 'IC'] = information_correlation\n group_pct = res.groupby(by=['group'])['period_pct'].mean()\n for i_label in range(len(labels)):\n alpha_return.ix[cur_cal_date, labels[i_label]] = group_pct.values[\n i_label]\n alpha_return = alpha_return.dropna(subset=['FactorReturn'])\n alpha_return['CumFactorReturn'] = alpha_return['FactorReturn'].cumsum()\n cum_labels = [('Cum_' + str(x)) for x in labels]\n alpha_return[cum_labels] = alpha_return[labels].cumsum()\n back_test_beg_date = Date().get_trade_date_offset(date_series[0], 1)\n back_test_end_date = Date().get_trade_date_offset(date_series[len(\n date_series) - 1], 1)\n back_test_days = Date().get_trade_date_diff(back_test_beg_date,\n back_test_end_date)\n backtest_year = back_test_days / year_trade_days\n alpha_return['year'] = alpha_return.index.map(lambda x: datetime.\n strptime(x, '%Y%m%d').year)\n year_factor_return = alpha_return.groupby(by=['year'])['FactorReturn'].sum(\n )\n year_count = alpha_return.groupby(by=['year'])['FactorReturn'].count()\n year_ic_mean = alpha_return.groupby(by=['year'])['IC'].mean()\n year_ic_std = alpha_return.groupby(by=['year'])['IC'].std()\n year_gp_mean = alpha_return.groupby(by=['year'])[labels].mean()\n year_describe = pd.concat([year_factor_return, year_count, year_ic_mean,\n year_ic_std, year_gp_mean], axis=1)\n col = ['YearFactorReturn', 'Count', 'IC_mean', 'IC_std']\n col.extend(labels)\n year_describe.columns = col\n year_describe['YearFactorReturn'] = year_describe['YearFactorReturn'\n ] / year_describe['Count'] * year_count\n year_describe['IC_IR'] = year_describe['IC_mean'] / year_describe['IC_std'\n ] * np.sqrt(50)\n year_describe.ix['Sum', 'YearFactorReturn'] = alpha_return[\n 'CumFactorReturn'].values[-1] / backtest_year\n year_describe.ix['Sum', 'IC_IR'] = alpha_return['IC'].mean(\n ) / alpha_return['IC'].std() * np.sqrt(50)\n year_describe.ix['Sum', 'IC_mean'] = alpha_return['IC'].mean()\n year_describe.ix['Sum', 'IC_std'] = alpha_return['IC'].std()\n year_describe.ix['Sum', labels] = year_describe.ix[0:-1, labels].sum()\n year_describe.index = year_describe.index.map(str)\n for i in range(len(year_describe)):\n year = year_describe.index[i]\n corr_pd = pd.DataFrame(year_describe.ix[year, labels].values, index\n =labels, columns=['group_return'])\n corr_pd['group_number'] = list(range(1, group_number + 1))\n year_describe.ix[year, 'Group_Corr'] = corr_pd.corr().ix[0, 1]\n alpha_exposure = alpha_exposure.astype(np.float)\n filename = os.path.join(out_path, 'alpha_exposure_neutral', factor_name +\n '_FactorExposureNeutral.csv')\n alpha_exposure.T.to_csv(filename)\n exposure_corr = pd.DataFrame([], index=alpha_exposure.index, columns=[\n 'Exposure_Corr'])\n for i_date in range(1, len(alpha_exposure.index)):\n last_exposure_date = alpha_exposure.index[i_date - 1]\n cur_exposure_date = alpha_exposure.index[i_date]\n exposure_adjoin = alpha_exposure.ix[last_exposure_date:\n cur_exposure_date, :]\n exposure_adjoin = exposure_adjoin.T.dropna()\n exposure_corr.ix[cur_exposure_date, 'Exposure_Corr'\n ] = exposure_adjoin.corr().ix[0, 1]\n exposure_corr = exposure_corr.dropna()\n exposure_corr.ix['Mean', 'Exposure_Corr'] = exposure_corr['Exposure_Corr'\n ].mean()\n filename = os.path.join(out_path, 'alpha_exposure_stability', \n factor_name + '_FactorExposureCorr.csv')\n exposure_corr.to_csv(filename)\n filename = os.path.join(out_path, 'alpha_return', factor_name +\n '_FactorReturn.xlsx')\n sheet_name = 'FactorReturn'\n we = WriteExcel(filename)\n ws = we.add_worksheet(sheet_name)\n num_format_pd = pd.DataFrame([], columns=year_describe.columns, index=[\n 'format'])\n num_format_pd.ix['format', :] = '0.00%'\n num_format_pd.ix['format', ['Count', 'IC_IR']] = '0.00'\n we.write_pandas(year_describe, ws, begin_row_number=0, begin_col_number\n =1, num_format_pd=num_format_pd, color='blue', fillna=True)\n num_format_pd = pd.DataFrame([], columns=alpha_return.columns, index=[\n 'format'])\n num_format_pd.ix['format', :] = '0.00%'\n num_format_pd.ix['format', ['year']] = '0'\n we.write_pandas(alpha_return, ws, begin_row_number=0, begin_col_number=\n 2 + len(year_describe.columns), num_format_pd=num_format_pd, color=\n 'blue', fillna=True)\n we.close()\n\n\nif __name__ == '__main__':\n cal_period = 'W'\n beg_date = '20040101'\n end_date = datetime.today().strftime('%Y%m%d')\n path = 'E:\\\\3_Data\\\\5_stock_data\\\\3_alpha_model\\\\'\n file = 'MyAlpha.xlsx'\n data = pd.read_excel(os.path.join(path, file), encoding='gbk')\n data = data[data['计算因子收益率'] == '是']\n data = data.reset_index(drop=True)\n for i in range(0, len(data)):\n factor_name = data.ix[i, '因子名']\n print('#################### 开始计算因子收益率 %s 数据 ####################' %\n factor_name)\n cal_factor_alpha_return(factor_name, beg_date, end_date, cal_period)\n print('#################### 结束计算因子收益率 %s 数据 ####################' %\n factor_name)\n",
"step-5": "import pandas as pd\nimport numpy as np\nimport os\nimport matplotlib.pyplot as plt\nfrom datetime import datetime\nimport statsmodels.api as sm\nfrom quant.stock.stock import Stock\nfrom quant.stock.date import Date\nfrom quant.utility_fun.factor_preprocess import FactorPreProcess\nfrom quant.utility_fun.write_excel import WriteExcel\n\n\ndef factor_neutral(factor_series, neutral_frame):\n\n \"\"\"\n 中性化\n \"\"\"\n\n concat_data = pd.concat([factor_series, neutral_frame], axis=1)\n concat_data = concat_data.dropna()\n\n factor_val = concat_data.ix[:, 0]\n neutral_val = concat_data.ix[:, 1:]\n\n model = sm.OLS(factor_val.values, neutral_val.values)\n regress = model.fit()\n\n params = regress.params\n params = pd.DataFrame(params, index=neutral_val.columns, columns=['param'])\n factor_res = factor_val - regress.predict(neutral_val)\n\n return params, factor_res\n\n\ndef cal_factor_alpha_return(factor_name, beg_date, end_date, cal_period):\n\n # param\n ###############################################################################################################\n ###############################################################################################################\n group_number = 8\n year_trade_days = 242\n min_stock_number = 100\n out_path = 'E:\\\\3_Data\\\\5_stock_data\\\\3_alpha_model\\\\'\n\n alpha_remove_extreme_value = True # alpha 因子 取极值\n alpha_standard = True # alpha 因子 标准化\n alpha_industry_neutral = True # alpha 因子 行业中性\n alpha_barra_style_neutral = True # alpha 因子 风格中性\n\n # read data\n ###############################################################################################################\n ###############################################################################################################\n price = Stock().get_factor_h5(\"PriceCloseAdjust\", None, \"alpha_dfc\")\n alpha_val = Stock().get_factor_h5(factor_name, None, \"alpha_dfc\")\n industry = Stock().get_factor_h5(\"industry_citic1\", None, \"primary_mfc\")\n industry = industry.applymap(lambda x: x.decode('utf-8'))\n \n [alpha_val, industry] = FactorPreProcess().make_same_index_columns([alpha_val, industry])\n \n if alpha_barra_style_neutral:\n \n size = Stock().get_factor_h5(\"NORMAL_CNE5_SIZE\", None, 'barra_risk_dfc')\n beta = Stock().get_factor_h5(\"NORMAL_CNE5_BETA\", None, 'barra_risk_dfc')\n nolin_size = Stock().get_factor_h5(\"NORMAL_CNE5_NON_LINEAR_SIZE\", None, 'barra_risk_dfc')\n momentum = Stock().get_factor_h5(\"NORMAL_CNE5_MOMENTUM\", None, 'barra_risk_dfc')\n\n [size, beta, nolin_size] = FactorPreProcess().make_same_index_columns([size, beta, nolin_size])\n beg_date = max(beg_date, price.columns[0], alpha_val.columns[0], beta.columns[0])\n end_date = min(end_date, price.columns[-1], alpha_val.columns[-1], beta.columns[-1])\n\n else:\n beg_date = max(beg_date, price.columns[0], alpha_val.columns[0])\n end_date = min(end_date, price.columns[-1], alpha_val.columns[-1])\n\n date_series = Date().get_trade_date_series(beg_date, end_date, period=cal_period)\n date_series = list(set(date_series) & set(alpha_val.columns))\n date_series.sort()\n\n # pre process data\n ###############################################################################################################\n ###############################################################################################################\n if alpha_remove_extreme_value:\n alpha_val = FactorPreProcess().remove_extreme_value_mad(alpha_val)\n\n if alpha_standard:\n alpha_val = FactorPreProcess().standardization(alpha_val)\n\n # cal everyday\n ###############################################################################################################\n ###############################################################################################################\n alpha_return = pd.DataFrame([], index=date_series)\n alpha_exposure = pd.DataFrame([], index=date_series, columns=price.index)\n\n for i_date in range(len(date_series) - 2):\n\n cur_cal_date = date_series[i_date]\n next_cal_date = date_series[i_date + 1]\n buy_date = Date().get_trade_date_offset(cur_cal_date, 1)\n sell_date = Date().get_trade_date_offset(next_cal_date, 1)\n print(\" Calculating Factor %s Alpha Return At %s\" % (factor_name, cur_cal_date))\n\n alpha_return.index.name = 'CalDate'\n alpha_return.ix[cur_cal_date, \"BuyDate\"] = buy_date\n alpha_return.ix[cur_cal_date, \"SellDate\"] = sell_date\n\n alpha_date = alpha_val[cur_cal_date]\n buy_price = price[buy_date]\n sell_price = price[sell_date]\n pct_date = sell_price / buy_price - 1.0\n\n if alpha_industry_neutral:\n\n try:\n industry_date = industry[cur_cal_date]\n industry_dummy = pd.get_dummies(industry_date)\n except:\n continue\n\n if len(pd.concat([alpha_date, industry_date], axis=1).dropna()) < min_stock_number:\n continue\n else:\n params, factor_res = factor_neutral(factor_series=alpha_date, neutral_frame=industry_dummy)\n alpha_date = factor_res\n alpha_date = FactorPreProcess().remove_extreme_value_mad(alpha_date)\n alpha_date = FactorPreProcess().standardization(alpha_date)\n\n if alpha_barra_style_neutral:\n\n try:\n size_date = size[cur_cal_date]\n beta_date = beta[cur_cal_date]\n nolin_size_date = nolin_size[cur_cal_date]\n momentum_date = momentum[cur_cal_date]\n except:\n continue\n\n if len(pd.concat([alpha_date, size_date], axis=1).dropna()) < min_stock_number:\n continue\n else:\n barra_risk_exposure = pd.concat([beta_date, size_date,\n nolin_size_date, momentum_date], axis=1)\n barra_risk_exposure.columns = ['beta', 'size', 'nolin_size', 'momentum']\n params, factor_res = factor_neutral(factor_series=alpha_date, neutral_frame=barra_risk_exposure)\n alpha_date = factor_res\n alpha_date = FactorPreProcess().remove_extreme_value_mad(alpha_date)\n alpha_date = FactorPreProcess().standardization(alpha_date)\n\n alpha_exposure.ix[cur_cal_date, :] = alpha_date\n res = pd.concat([alpha_date, pct_date], axis=1)\n res.columns = ['alpha_val', 'period_pct']\n res = res.dropna()\n res = res.sort_values(by=['alpha_val'], ascending=False)\n\n labels = [\"group_\" + str(i) for i in list(range(1, group_number + 1))]\n res['group'] = pd.cut(res['alpha_val'], bins=group_number, labels=labels)\n\n period_return = (res['alpha_val'] * res['period_pct']).mean()\n alpha_return.ix[cur_cal_date, \"FactorReturn\"] = period_return\n\n information_correlation = res['alpha_val'].corr(res['period_pct'])\n alpha_return.ix[cur_cal_date, \"IC\"] = information_correlation\n\n group_pct = res.groupby(by=['group'])['period_pct'].mean()\n for i_label in range(len(labels)):\n alpha_return.ix[cur_cal_date, labels[i_label]] = group_pct.values[i_label]\n\n alpha_return = alpha_return.dropna(subset=['FactorReturn'])\n alpha_return[\"CumFactorReturn\"] = alpha_return['FactorReturn'].cumsum()\n cum_labels = [\"Cum_\" + str(x) for x in labels]\n alpha_return[cum_labels] = alpha_return[labels].cumsum()\n\n # plot\n ###############################################################################################################\n ###############################################################################################################\n # plt_col = []\n # plt_col.append(\"CumFactorReturn\")\n # plt_col.extend(cum_labels)\n # alpha_return[plt_col].plot()\n # plt.title(factor_name)\n # plt.show()\n\n # describe annual\n ###############################################################################################################\n ###############################################################################################################\n\n back_test_beg_date = Date().get_trade_date_offset(date_series[0], 1)\n back_test_end_date = Date().get_trade_date_offset(date_series[len(date_series) - 1], 1)\n back_test_days = Date().get_trade_date_diff(back_test_beg_date, back_test_end_date)\n\n backtest_year = back_test_days / year_trade_days\n\n alpha_return['year'] = alpha_return.index.map(lambda x: datetime.strptime(x, \"%Y%m%d\").year)\n\n year_factor_return = alpha_return.groupby(by=['year'])['FactorReturn'].sum()\n year_count = alpha_return.groupby(by=['year'])['FactorReturn'].count()\n year_ic_mean = alpha_return.groupby(by=['year'])['IC'].mean()\n year_ic_std = alpha_return.groupby(by=['year'])['IC'].std()\n year_gp_mean = alpha_return.groupby(by=['year'])[labels].mean()\n\n year_describe = pd.concat([year_factor_return, year_count, year_ic_mean, year_ic_std, year_gp_mean], axis=1)\n col = ['YearFactorReturn', 'Count', 'IC_mean', 'IC_std']\n col.extend(labels)\n year_describe.columns = col\n\n year_describe['YearFactorReturn'] = year_describe['YearFactorReturn'] / year_describe['Count'] * year_count\n year_describe['IC_IR'] = year_describe['IC_mean'] / year_describe['IC_std'] * np.sqrt(50)\n\n year_describe.ix['Sum', 'YearFactorReturn'] = alpha_return[\"CumFactorReturn\"].values[-1] / backtest_year\n year_describe.ix['Sum', 'IC_IR'] = alpha_return[\"IC\"].mean() / alpha_return[\"IC\"].std() * np.sqrt(50)\n year_describe.ix['Sum', 'IC_mean'] = alpha_return[\"IC\"].mean()\n year_describe.ix['Sum', 'IC_std'] = alpha_return[\"IC\"].std()\n year_describe.ix['Sum', labels] = year_describe.ix[0:-1, labels].sum()\n year_describe.index = year_describe.index.map(str)\n\n for i in range(len(year_describe)):\n year = year_describe.index[i]\n corr_pd = pd.DataFrame(year_describe.ix[year, labels].values, index=labels, columns=['group_return'])\n corr_pd['group_number'] = (list(range(1, group_number+1)))\n year_describe.ix[year, 'Group_Corr'] = corr_pd.corr().ix[0, 1]\n\n # save data\n ###############################################################################################################\n ###############################################################################################################\n\n # alpha_exposure_neutral\n ###############################################################################################################\n alpha_exposure = alpha_exposure.astype(np.float)\n filename = os.path.join(out_path, 'alpha_exposure_neutral', factor_name + \"_FactorExposureNeutral.csv\")\n alpha_exposure.T.to_csv(filename)\n\n # exposure_corr\n ###############################################################################################################\n exposure_corr = pd.DataFrame([], index=alpha_exposure.index, columns=['Exposure_Corr'])\n\n for i_date in range(1, len(alpha_exposure.index)):\n last_exposure_date = alpha_exposure.index[i_date-1]\n cur_exposure_date = alpha_exposure.index[i_date]\n exposure_adjoin = alpha_exposure.ix[last_exposure_date:cur_exposure_date, :]\n exposure_adjoin = exposure_adjoin.T.dropna()\n exposure_corr.ix[cur_exposure_date, 'Exposure_Corr'] = exposure_adjoin.corr().ix[0, 1]\n\n exposure_corr = exposure_corr.dropna()\n exposure_corr.ix['Mean', 'Exposure_Corr'] = exposure_corr['Exposure_Corr'].mean()\n filename = os.path.join(out_path, 'alpha_exposure_stability', factor_name + \"_FactorExposureCorr.csv\")\n exposure_corr.to_csv(filename)\n\n # Factor Return\n ###############################################################################################################\n filename = os.path.join(out_path, 'alpha_return', factor_name + \"_FactorReturn.xlsx\")\n sheet_name = \"FactorReturn\"\n\n we = WriteExcel(filename)\n ws = we.add_worksheet(sheet_name)\n\n num_format_pd = pd.DataFrame([], columns=year_describe.columns, index=['format'])\n num_format_pd.ix['format', :] = '0.00%'\n num_format_pd.ix['format', ['Count', 'IC_IR']] = '0.00'\n we.write_pandas(year_describe, ws, begin_row_number=0, begin_col_number=1,\n num_format_pd=num_format_pd, color=\"blue\", fillna=True)\n\n num_format_pd = pd.DataFrame([], columns=alpha_return.columns, index=['format'])\n num_format_pd.ix['format', :] = '0.00%'\n num_format_pd.ix['format', ['year']] = '0'\n we.write_pandas(alpha_return, ws, begin_row_number=0, begin_col_number=2+len(year_describe.columns),\n num_format_pd=num_format_pd, color=\"blue\", fillna=True)\n we.close()\n ###############################################################################################################\n\n\nif __name__ == '__main__':\n\n cal_period = \"W\"\n beg_date = \"20040101\"\n end_date = datetime.today().strftime(\"%Y%m%d\")\n\n path = \"E:\\\\3_Data\\\\5_stock_data\\\\3_alpha_model\\\\\"\n file = \"MyAlpha.xlsx\"\n\n data = pd.read_excel(os.path.join(path, file), encoding='gbk')\n data = data[data['计算因子收益率'] == \"是\"]\n data = data.reset_index(drop=True)\n\n for i in range(0, len(data)):\n\n factor_name = data.ix[i, \"因子名\"]\n print(\"#################### 开始计算因子收益率 %s 数据 ####################\" % factor_name)\n cal_factor_alpha_return(factor_name, beg_date, end_date, cal_period)\n print(\"#################### 结束计算因子收益率 %s 数据 ####################\" % factor_name)\n\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
from scrapy.contrib.spiders import CrawlSpider, Rule
from scrapy.contrib.linkextractors import LinkExtractor
from scrapy.contrib.linkextractors.sgml import SgmlLinkExtractor
from mp_data_scrapper.items import MpDataScrapperItem
class MininovaSpider(CrawlSpider):
name = 'mp'
allowed_domains = ['india.gov.in']
start_urls = ['http://india.gov.in/my-government/indian-parliament/lok-sabha',
'http://india.gov.in/my-government/indian-parliament/lok-sabha?page=1',
'http://india.gov.in/my-government/indian-parliament/lok-sabha?page=2',
'http://india.gov.in/my-government/indian-parliament/lok-sabha?page=3',
'http://india.gov.in/my-government/indian-parliament/lok-sabha?page=4',
'http://india.gov.in/my-government/indian-parliament/lok-sabha?page=5',
'http://india.gov.in/my-government/indian-parliament/lok-sabha?page=6',
'http://india.gov.in/my-government/indian-parliament/lok-sabha?page=7',
'http://india.gov.in/my-government/indian-parliament/lok-sabha?page=8',
'http://india.gov.in/my-government/indian-parliament/lok-sabha?page=9',
'http://india.gov.in/my-government/indian-parliament/lok-sabha?page=10',
'http://india.gov.in/my-government/indian-parliament/lok-sabha?page=11',
'http://india.gov.in/my-government/indian-parliament/lok-sabha?page=12',
'http://india.gov.in/my-government/indian-parliament/lok-sabha?page=13',
'http://india.gov.in/my-government/indian-parliament/lok-sabha?page=14',
'http://india.gov.in/my-government/indian-parliament/lok-sabha?page=15',
'http://india.gov.in/my-government/indian-parliament/lok-sabha?page=16',
'http://india.gov.in/my-government/indian-parliament/lok-sabha?page=17',
'http://india.gov.in/my-government/indian-parliament/lok-sabha?page=18',
'http://india.gov.in/my-government/indian-parliament/lok-sabha?page=19',
'http://india.gov.in/my-government/indian-parliament/lok-sabha?page=20',
'http://india.gov.in/my-government/indian-parliament/lok-sabha?page=21',
]
rules = [Rule(SgmlLinkExtractor(allow=['/my-government/indian-parliament/[^?]+'], deny=['my-government/indian-parliament/lok-sabha', 'my-government/indian-parliament/rajya-sabha'], unique=True), process_links='process_links', callback='parse_mp', follow=True)]
def parse_mp(self, response):
mp = MpDataScrapperItem()
try:
mp['name'] = response.xpath("//h1/text()").extract()[0]
except IndexError:
pass
try:
mp['constituency'] = response.xpath("//span[@class='views-label views-label-field-const-name-value']/following::span[1]/text()").extract()[0]
#mp['constituency'] = response.xpath("//span[contains(concat(' ',normalize-space(@class),' '),' views-label-field-const-name-value ')]/following::span[1]/text()").extract()[0]
except IndexError:
pass
try:
mp['party'] = response.xpath("//span[@class='views-label views-label-field-party-fname-value']/following::span[1]/text()").extract()[0]
except IndexError:
pass
try:
mp['father'] = response.xpath("//span[@class='views-label views-label-field-father-name-value']/following::span[1]/text()").extract()[0]
except IndexError:
pass
try:
mp['mother'] = response.xpath("//span[@class='views-label views-label-field-mother-name-value']/following::span[1]/text()").extract()[0]
except IndexError:
pass
try:
mp['dob'] = response.xpath("//span[@class='views-label views-label-field-dob-value']/following::span[1]/text()").extract()[0]
except IndexError:
pass
try:
mp['birth_place'] = response.xpath("//span[@class='views-label views-label-field-birth-place-value']/following::span[1]/text()").extract()[0]
except IndexError:
pass
try:
mp['marital_status'] = response.xpath("//span[@class='views-label views-label-field-marital-status-value']/following::span[1]/text()").extract()[0]
except IndexError:
pass
try:
mp['spouse_name'] = response.xpath("//span[@class='views-label views-label-field-spouse-name-value']/following::span[1]/text()").extract()[0]
except IndexError:
pass
try:
mp['num_sons'] = response.xpath("//span[@class='views-label views-label-field-sons-value']/following::span[1]/text()").extract()[0]
except IndexError:
pass
try:
mp['num_daughters'] = response.xpath("//span[@class='views-label views-label-field-daughters-value']/following::span[1]/text()").extract()[0]
except IndexError:
pass
try:
mp['state'] = response.xpath("//span[@class='views-label views-label-field-state-name-value']/following::span[1]/text()").extract()[0]
except IndexError:
pass
try:
mp['permanent_address'] = response.xpath("//span[@class='views-label views-label-phpcode-1']/following::span[1]/text()").extract()[0]
except IndexError:
pass
try:
mp['present_address'] = response.xpath("//span[@class='views-label views-label-phpcode-2']/following::span[1]/text()").extract()[0]
except IndexError:
pass
try:
mp['email'] = response.xpath("//span[@class='views-label views-label-field-email-value']/following::span[1]/text()").extract()[0]
except IndexError:
pass
try:
mp['education'] = response.xpath("//span[@class='views-label views-label-phpcode-5']/following::span[1]/text()").extract()[0]
except IndexError:
pass
try:
mp['positions_held'] = response.xpath("//span[@class='views-label views-label-phpcode']/following::span[1]/text()").extract()[0]
except IndexError:
pass
try:
mp['social_cultural_activities'] = response.xpath("//span[@class='views-label views-label-phpcode-7']/following::span[1]/text()").extract()[0]
except IndexError:
pass
try:
mp['sports_clubs'] = response.xpath("//span[@class='views-label views-label-phpcode-8']/following::span[1]/text()").extract()[0]
except IndexError:
pass
try:
mp['pastimes_recreation'] = response.xpath("//span[@class='views-label views-label-phpcode-9']/following::span[1]/text()").extract()[0]
except IndexError:
pass
try:
mp['countries_visited'] = response.xpath("//span[@class='views-label views-label-phpcode-4']/following::span[1]/text()").extract()[0]
except IndexError:
pass
try:
mp['other_info'] = response.xpath("//span[@class='views-label views-label-phpcode-3']/following::span[1]/text()").extract()[0]
except IndexError:
pass
try:
mp['photo'] = 'http://india.gov.in' + response.xpath("//div[@class='views-field views-field-phpcode-10']/child::span[1]/child::img[1]/@src").extract()[0]
except IndexError:
pass
return mp
def process_links(self,links):
for i, w in enumerate(links):
print w.url
#w.url = w.url.replace("http://india.gov.in/my-government/indian-parliament/lok-sabha", "http://india.gov.in")
links[i] = w
return links
|
normal
|
{
"blob_id": "94e9d67095dde4d3bf7ddb207ac17a4c250a2bfc",
"index": 1986,
"step-1": "from scrapy.contrib.spiders import CrawlSpider, Rule\nfrom scrapy.contrib.linkextractors import LinkExtractor\nfrom scrapy.contrib.linkextractors.sgml import SgmlLinkExtractor\nfrom mp_data_scrapper.items import MpDataScrapperItem\n\nclass MininovaSpider(CrawlSpider):\n\n name = 'mp'\n allowed_domains = ['india.gov.in']\n start_urls = ['http://india.gov.in/my-government/indian-parliament/lok-sabha',\n 'http://india.gov.in/my-government/indian-parliament/lok-sabha?page=1',\n 'http://india.gov.in/my-government/indian-parliament/lok-sabha?page=2',\n 'http://india.gov.in/my-government/indian-parliament/lok-sabha?page=3',\n 'http://india.gov.in/my-government/indian-parliament/lok-sabha?page=4',\n 'http://india.gov.in/my-government/indian-parliament/lok-sabha?page=5',\n 'http://india.gov.in/my-government/indian-parliament/lok-sabha?page=6',\n 'http://india.gov.in/my-government/indian-parliament/lok-sabha?page=7',\n 'http://india.gov.in/my-government/indian-parliament/lok-sabha?page=8',\n 'http://india.gov.in/my-government/indian-parliament/lok-sabha?page=9',\n 'http://india.gov.in/my-government/indian-parliament/lok-sabha?page=10',\n 'http://india.gov.in/my-government/indian-parliament/lok-sabha?page=11',\n 'http://india.gov.in/my-government/indian-parliament/lok-sabha?page=12',\n 'http://india.gov.in/my-government/indian-parliament/lok-sabha?page=13',\n 'http://india.gov.in/my-government/indian-parliament/lok-sabha?page=14',\n 'http://india.gov.in/my-government/indian-parliament/lok-sabha?page=15',\n 'http://india.gov.in/my-government/indian-parliament/lok-sabha?page=16',\n 'http://india.gov.in/my-government/indian-parliament/lok-sabha?page=17',\n 'http://india.gov.in/my-government/indian-parliament/lok-sabha?page=18',\n 'http://india.gov.in/my-government/indian-parliament/lok-sabha?page=19',\n 'http://india.gov.in/my-government/indian-parliament/lok-sabha?page=20',\n 'http://india.gov.in/my-government/indian-parliament/lok-sabha?page=21',\n ]\n rules = [Rule(SgmlLinkExtractor(allow=['/my-government/indian-parliament/[^?]+'], deny=['my-government/indian-parliament/lok-sabha', 'my-government/indian-parliament/rajya-sabha'], unique=True), process_links='process_links', callback='parse_mp', follow=True)]\n\n def parse_mp(self, response):\n mp = MpDataScrapperItem()\n\ttry:\n mp['name'] = response.xpath(\"//h1/text()\").extract()[0]\n\texcept IndexError:\n\t pass\n\ttry:\n mp['constituency'] = response.xpath(\"//span[@class='views-label views-label-field-const-name-value']/following::span[1]/text()\").extract()[0]\n #mp['constituency'] = response.xpath(\"//span[contains(concat(' ',normalize-space(@class),' '),' views-label-field-const-name-value ')]/following::span[1]/text()\").extract()[0]\n\texcept IndexError:\n\t pass\n\ttry:\n mp['party'] = response.xpath(\"//span[@class='views-label views-label-field-party-fname-value']/following::span[1]/text()\").extract()[0]\n\texcept IndexError:\n\t pass\n\ttry:\n mp['father'] = response.xpath(\"//span[@class='views-label views-label-field-father-name-value']/following::span[1]/text()\").extract()[0]\n\texcept IndexError:\n\t pass\n\ttry:\n mp['mother'] = response.xpath(\"//span[@class='views-label views-label-field-mother-name-value']/following::span[1]/text()\").extract()[0]\n\texcept IndexError:\n\t pass\n\ttry:\n mp['dob'] = response.xpath(\"//span[@class='views-label views-label-field-dob-value']/following::span[1]/text()\").extract()[0]\n\texcept IndexError:\n\t pass\n\ttry:\n mp['birth_place'] = response.xpath(\"//span[@class='views-label views-label-field-birth-place-value']/following::span[1]/text()\").extract()[0]\n\texcept IndexError:\n\t pass\n\ttry:\n mp['marital_status'] = response.xpath(\"//span[@class='views-label views-label-field-marital-status-value']/following::span[1]/text()\").extract()[0]\n\texcept IndexError:\n\t pass\n\ttry:\n mp['spouse_name'] = response.xpath(\"//span[@class='views-label views-label-field-spouse-name-value']/following::span[1]/text()\").extract()[0]\n\texcept IndexError:\n\t pass\n\ttry:\n mp['num_sons'] = response.xpath(\"//span[@class='views-label views-label-field-sons-value']/following::span[1]/text()\").extract()[0]\n\texcept IndexError:\n\t pass\n\ttry:\n mp['num_daughters'] = response.xpath(\"//span[@class='views-label views-label-field-daughters-value']/following::span[1]/text()\").extract()[0]\n\texcept IndexError:\n\t pass\n\ttry:\n mp['state'] = response.xpath(\"//span[@class='views-label views-label-field-state-name-value']/following::span[1]/text()\").extract()[0]\n\texcept IndexError:\n\t pass\n\ttry:\n mp['permanent_address'] = response.xpath(\"//span[@class='views-label views-label-phpcode-1']/following::span[1]/text()\").extract()[0]\n\texcept IndexError:\n\t pass\n\ttry:\n mp['present_address'] = response.xpath(\"//span[@class='views-label views-label-phpcode-2']/following::span[1]/text()\").extract()[0]\n\texcept IndexError:\n\t pass\n\ttry:\n mp['email'] = response.xpath(\"//span[@class='views-label views-label-field-email-value']/following::span[1]/text()\").extract()[0]\n\texcept IndexError:\n\t pass\n\ttry:\n mp['education'] = response.xpath(\"//span[@class='views-label views-label-phpcode-5']/following::span[1]/text()\").extract()[0]\n\texcept IndexError:\n\t pass\n\ttry:\n mp['positions_held'] = response.xpath(\"//span[@class='views-label views-label-phpcode']/following::span[1]/text()\").extract()[0]\n\texcept IndexError:\n\t pass\n\ttry:\n mp['social_cultural_activities'] = response.xpath(\"//span[@class='views-label views-label-phpcode-7']/following::span[1]/text()\").extract()[0]\n\texcept IndexError:\n\t pass\n\ttry:\n mp['sports_clubs'] = response.xpath(\"//span[@class='views-label views-label-phpcode-8']/following::span[1]/text()\").extract()[0]\n\texcept IndexError:\n\t pass\n\ttry:\n mp['pastimes_recreation'] = response.xpath(\"//span[@class='views-label views-label-phpcode-9']/following::span[1]/text()\").extract()[0]\n\texcept IndexError:\n\t pass\n\ttry:\n mp['countries_visited'] = response.xpath(\"//span[@class='views-label views-label-phpcode-4']/following::span[1]/text()\").extract()[0]\n\texcept IndexError:\n\t pass\n\ttry:\n mp['other_info'] = response.xpath(\"//span[@class='views-label views-label-phpcode-3']/following::span[1]/text()\").extract()[0]\n\texcept IndexError:\n\t pass\n\ttry:\n mp['photo'] = 'http://india.gov.in' + response.xpath(\"//div[@class='views-field views-field-phpcode-10']/child::span[1]/child::img[1]/@src\").extract()[0]\n\texcept IndexError:\n\t pass\n return mp\n\n def process_links(self,links):\n for i, w in enumerate(links):\n print w.url\n #w.url = w.url.replace(\"http://india.gov.in/my-government/indian-parliament/lok-sabha\", \"http://india.gov.in\")\n links[i] = w\n return links\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
#!/usr/bin/env python
#
# Copyright (C) University College London, 2007-2012, all rights reserved.
#
# This file is part of HemeLB and is CONFIDENTIAL. You may not work
# with, install, use, duplicate, modify, redistribute or share this
# file, or any part thereof, other than as allowed by any agreement
# specifically made by you with University College London.
#
# encoding: utf-8
"""
test_machine_environment.py
Created by James Hetherington on 2012-01-19.
Copyright (c) 2012 UCL. All rights reserved.
"""
import unittest
import sys
import copy
import textwrap
from ..fab import *
class TestFabric(unittest.TestCase):
def setUp(self):
#Update the user config with testing example
env.test_home=os.path.join(env.localroot,'deploy','test')
user_config=yaml.load(open(os.path.join(env.localroot,'deploy','test','machines_user.yml')))
env.update(user_config['default'])
execute(planck) #Default machine target is assumed as planck.
#Monkeypatch the fabric commands to do nothing, but record what they would have done
sys.modules['deploy.fab'].run=lambda command: self.commands.append(command)
def mock_local(command,original=sys.modules['deploy.fab'].local):
self.commands.append(command)
original(command)
sys.modules['deploy.fab'].local=mock_local
sys.modules['deploy.fab'].put=lambda source,target: self.commands.append("put "+source+" "+target)
sys.modules['deploy.fab'].rsync_project=lambda **args: self.commands.append("rsync "+args['local_dir']+" "+args['remote_dir'])
def mock_profile(profile,original=sys.modules['deploy.fab'].generate):
self.commands.append("generate %g %g %g"%(profile.VoxelSize, profile.Steps , profile.Cycles) )
original(profile)
sys.modules['deploy.fab'].generate=mock_profile
self.commands=[]
env.build_number='abcd1234'
def assertCommandCount(self,should_be):
self.assertEqual(len(self.commands),should_be)
def assertCommand(self,should_be,index=-1):
self.assertEqual(self.commands[index],should_be)
def assertCommandRegexp(self,should_be,index=-1):
self.assertRegexpMatches(self.commands[index],should_be)
def test_machine_alias(self):
self.assertEqual(env.remote,"planck.chem.ucl.ac.uk")
execute(julian)
self.assertEqual(env.remote,"julian.chem.ucl.ac.uk")
execute(hector)
self.assertEqual(env.remote,"login.hector.ac.uk")
def test_clean(self):
execute(clean)
self.assertCommand('make clean')
def test_with_job(self):
with settings(results_path="banana",local_results='pineapple'):
with_job('foo')
self.assertEqual(env.job_results,"banana/foo")
self.assertEqual(env.job_results_local,"pineapple/foo")
def test_with_template_job(self):
with settings(results_path='banana',foo='fish',bar='swim',job_name_template="${foo}_${bar}"):
with_template_job()
self.assertEqual(env.job_results,"banana/fish_swim")
def test_hemelb(self):
execute(hemelb,'cylinder',cores=5)
self.assertEqual(env.name,"cylinder_abcd1234_planck_5_10_10")
self.assertCommandRegexp('mkdir -p .*config_files/cylinder',0)
self.assertCommandRegexp('rsync .*config_files/cylinder',1)
self.assertCommandRegexp("put .*scripts/cylinder_abcd1234_planck_5_10_10.sh",2)
self.assertCommandRegexp("mkdir -p .*results/cylinder_abcd1234_planck_5_10_10",3)
self.assertCommandRegexp("cp .*scripts/cylinder_abcd1234_planck_5_10_10.sh .*results/cylinder_abcd1234_planck_5_10_10",4)
self.assertCommandRegexp("cp .*CMakeCache.txt .*results/cylinder_abcd1234_planck_5_10_10",5)
self.assertCommandRegexp("put .*env.yml",6)
self.assertCommandRegexp("chmod u\+x .*scripts/cylinder_abcd1234_planck_5_10_10.sh",7)
self.assertCommandRegexp(".*scripts/cylinder_abcd1234_planck_5_10_10.sh",8)
self.assertCommandCount(9)
def test_hemelbs(self):
execute(hemelbs,'cylinder',cores='[1:6:1]')
self.assertCommandRegexp('rsync .*config_files/cylinder',1)
self.assertCommandRegexp("cylinder_abcd1234_planck_5_10_10.sh")
self.assertCommandCount(9*5)
def test_create_config(self):
execute(create_config,'cylinder',VoxelSize=0.1)
self.assertEqual(env.config,"cylinder_0_1_1000_3")
self.assertCommandRegexp("mkdir -p .*/configs/cylinder_0_1_1000_3",0)
self.assertCommand("generate 0.1 1000 3",1)
self.assertCommandCount(2)
def test_create_configs(self):
execute(create_configs,'cylinder',VoxelSize='[0.1:0.21:0.01]')
self.assertEqual(env.config,"cylinder_0_2_1000_3")
self.assertCommandRegexp("mkdir -p .*/configs/cylinder_0_1_1000_3",0)
self.assertCommand("generate 0.1 1000 3",1)
self.assertCommandCount(2*11)
def test_hemelb_profile(self):
execute(hemelb_profile,'cylinder',VoxelSize='[0.1:0.21:0.01]',cores='[1:6:1]')
self.assertEqual(env.name,"cylinder_0_2_1000_3_abcd1234_planck_5_10_10")
self.assertCommandRegexp("mkdir -p .*/configs/cylinder_0_1_1000_3",0)
self.assertCommand("generate 0.1 1000 3",1)
self.assertCommandRegexp('mkdir -p .*config_files/cylinder',2)
self.assertCommandRegexp('rsync .*config_files/cylinder',3)
self.assertCommandRegexp("put .*scripts/cylinder_0_1_1000_3_abcd1234_planck_1_10_10.sh",4)
self.assertCommandRegexp("mkdir -p .*results/cylinder_0_1_1000_3_abcd1234_planck_1_10_10",5)
self.assertCommandRegexp("cp .*scripts/cylinder_0_1_1000_3_abcd1234_planck_1_10_10.sh .*results/cylinder_0_1_1000_3_abcd1234_planck_1_10_10",6)
self.assertCommandRegexp("cp .*CMakeCache.txt .*results/cylinder_0_1_1000_3_abcd1234_planck_1_10_10",7)
self.assertCommandRegexp("put .*env.yml",8)
self.assertCommandRegexp("chmod u\+x .*scripts/cylinder_0_1_1000_3_abcd1234_planck_1_10_10.sh",9)
self.assertCommandRegexp(".*scripts/cylinder_0_1_1000_3_abcd1234_planck_1_10_10.sh",10)
self.assertCommandCount(2*11 + 9*11*5)
def test_hemelb_profile_no_config_generation(self):
execute(hemelb_profile,'cylinder',VoxelSize='[0.1:0.21:0.01]',cores='[1:6:1]',create_configs="False")
self.assertEqual(env.name,"cylinder_0_2_1000_3_abcd1234_planck_5_10_10")
self.assertCommandRegexp('mkdir -p .*config_files/cylinder',0)
self.assertCommandRegexp('rsync .*config_files/cylinder',1)
self.assertCommandRegexp("put .*scripts/cylinder_0_1_1000_3_abcd1234_planck_1_10_10.sh",2)
self.assertCommandRegexp("mkdir -p .*results/cylinder_0_1_1000_3_abcd1234_planck_1_10_10",3)
self.assertCommandRegexp("cp .*scripts/cylinder_0_1_1000_3_abcd1234_planck_1_10_10.sh .*results/cylinder_0_1_1000_3_abcd1234_planck_1_10_10",4)
self.assertCommandRegexp("cp .*CMakeCache.txt .*results/cylinder_0_1_1000_3_abcd1234_planck_1_10_10",5)
self.assertCommandRegexp("put .*env.yml",6)
self.assertCommandRegexp("chmod u\+x .*scripts/cylinder_0_1_1000_3_abcd1234_planck_1_10_10.sh",7)
self.assertCommandRegexp(".*scripts/cylinder_0_1_1000_3_abcd1234_planck_1_10_10.sh",8)
self.assertCommandCount(9*11*5)
def test_configure_default(self):
execute(configure)
target={
'CMAKE_BUILD_TYPE': "Release",
'CMAKE_CXX_FLAGS_RELEASE': "-O4",
'CMAKE_INSTALL_PREFIX': env.install_path,
'CPPUNIT_PATCH_LDL' : True,
"HEMELB_DEPENDENCIES_INSTALL_PATH": env.install_path,
"HEMELB_SUBPROJECT_MAKE_JOBS": 1
}
self.assertEqual(env.total_cmake_options,target)
#Can't just assert on a string here, as the order of the dict is not defined
for key,value in target.iteritems():
self.assertRegexpMatches(env.cmake_flags,"-D%s=%s"%(key,value))
def test_configure_debug(self):
execute(configure,'debug')
self.assertEqual(env.total_cmake_options,
{
'CMAKE_BUILD_TYPE': "Debug",
'HEMELB_OPTIMISATION': "",
'HEMELB_LOG_LEVEL': "debug",
'CPPUNIT_PATCH_LDL' : True,
'CMAKE_INSTALL_PREFIX': env.install_path,
"HEMELB_DEPENDENCIES_INSTALL_PATH": env.install_path,
"HEMELB_SUBPROJECT_MAKE_JOBS": 1
})
def test_script_template(self):
script=script_templates('dummy_ge_header','dummy_jobscript',commands=['extra'])
content=open(script).read()
self.assertEqual(content,"user: test_user\n\nrun bananas\n\nextra")
|
normal
|
{
"blob_id": "7700e3c4061f0e81a1dea8fa8b27a0380fc26e71",
"index": 7171,
"step-1": "<mask token>\n\n\nclass TestFabric(unittest.TestCase):\n\n def setUp(self):\n env.test_home = os.path.join(env.localroot, 'deploy', 'test')\n user_config = yaml.load(open(os.path.join(env.localroot, 'deploy',\n 'test', 'machines_user.yml')))\n env.update(user_config['default'])\n execute(planck)\n sys.modules['deploy.fab'].run = lambda command: self.commands.append(\n command)\n\n def mock_local(command, original=sys.modules['deploy.fab'].local):\n self.commands.append(command)\n original(command)\n sys.modules['deploy.fab'].local = mock_local\n sys.modules['deploy.fab'\n ].put = lambda source, target: self.commands.append('put ' +\n source + ' ' + target)\n sys.modules['deploy.fab'\n ].rsync_project = lambda **args: self.commands.append('rsync ' +\n args['local_dir'] + ' ' + args['remote_dir'])\n\n def mock_profile(profile, original=sys.modules['deploy.fab'].generate):\n self.commands.append('generate %g %g %g' % (profile.VoxelSize,\n profile.Steps, profile.Cycles))\n original(profile)\n sys.modules['deploy.fab'].generate = mock_profile\n self.commands = []\n env.build_number = 'abcd1234'\n\n def assertCommandCount(self, should_be):\n self.assertEqual(len(self.commands), should_be)\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def test_with_job(self):\n with settings(results_path='banana', local_results='pineapple'):\n with_job('foo')\n self.assertEqual(env.job_results, 'banana/foo')\n self.assertEqual(env.job_results_local, 'pineapple/foo')\n\n def test_with_template_job(self):\n with settings(results_path='banana', foo='fish', bar='swim',\n job_name_template='${foo}_${bar}'):\n with_template_job()\n self.assertEqual(env.job_results, 'banana/fish_swim')\n\n def test_hemelb(self):\n execute(hemelb, 'cylinder', cores=5)\n self.assertEqual(env.name, 'cylinder_abcd1234_planck_5_10_10')\n self.assertCommandRegexp('mkdir -p .*config_files/cylinder', 0)\n self.assertCommandRegexp('rsync .*config_files/cylinder', 1)\n self.assertCommandRegexp(\n 'put .*scripts/cylinder_abcd1234_planck_5_10_10.sh', 2)\n self.assertCommandRegexp(\n 'mkdir -p .*results/cylinder_abcd1234_planck_5_10_10', 3)\n self.assertCommandRegexp(\n 'cp .*scripts/cylinder_abcd1234_planck_5_10_10.sh .*results/cylinder_abcd1234_planck_5_10_10'\n , 4)\n self.assertCommandRegexp(\n 'cp .*CMakeCache.txt .*results/cylinder_abcd1234_planck_5_10_10', 5\n )\n self.assertCommandRegexp('put .*env.yml', 6)\n self.assertCommandRegexp(\n 'chmod u\\\\+x .*scripts/cylinder_abcd1234_planck_5_10_10.sh', 7)\n self.assertCommandRegexp(\n '.*scripts/cylinder_abcd1234_planck_5_10_10.sh', 8)\n self.assertCommandCount(9)\n <mask token>\n\n def test_create_config(self):\n execute(create_config, 'cylinder', VoxelSize=0.1)\n self.assertEqual(env.config, 'cylinder_0_1_1000_3')\n self.assertCommandRegexp('mkdir -p .*/configs/cylinder_0_1_1000_3', 0)\n self.assertCommand('generate 0.1 1000 3', 1)\n self.assertCommandCount(2)\n <mask token>\n\n def test_hemelb_profile(self):\n execute(hemelb_profile, 'cylinder', VoxelSize='[0.1:0.21:0.01]',\n cores='[1:6:1]')\n self.assertEqual(env.name,\n 'cylinder_0_2_1000_3_abcd1234_planck_5_10_10')\n self.assertCommandRegexp('mkdir -p .*/configs/cylinder_0_1_1000_3', 0)\n self.assertCommand('generate 0.1 1000 3', 1)\n self.assertCommandRegexp('mkdir -p .*config_files/cylinder', 2)\n self.assertCommandRegexp('rsync .*config_files/cylinder', 3)\n self.assertCommandRegexp(\n 'put .*scripts/cylinder_0_1_1000_3_abcd1234_planck_1_10_10.sh', 4)\n self.assertCommandRegexp(\n 'mkdir -p .*results/cylinder_0_1_1000_3_abcd1234_planck_1_10_10', 5\n )\n self.assertCommandRegexp(\n 'cp .*scripts/cylinder_0_1_1000_3_abcd1234_planck_1_10_10.sh .*results/cylinder_0_1_1000_3_abcd1234_planck_1_10_10'\n , 6)\n self.assertCommandRegexp(\n 'cp .*CMakeCache.txt .*results/cylinder_0_1_1000_3_abcd1234_planck_1_10_10'\n , 7)\n self.assertCommandRegexp('put .*env.yml', 8)\n self.assertCommandRegexp(\n 'chmod u\\\\+x .*scripts/cylinder_0_1_1000_3_abcd1234_planck_1_10_10.sh'\n , 9)\n self.assertCommandRegexp(\n '.*scripts/cylinder_0_1_1000_3_abcd1234_planck_1_10_10.sh', 10)\n self.assertCommandCount(2 * 11 + 9 * 11 * 5)\n\n def test_hemelb_profile_no_config_generation(self):\n execute(hemelb_profile, 'cylinder', VoxelSize='[0.1:0.21:0.01]',\n cores='[1:6:1]', create_configs='False')\n self.assertEqual(env.name,\n 'cylinder_0_2_1000_3_abcd1234_planck_5_10_10')\n self.assertCommandRegexp('mkdir -p .*config_files/cylinder', 0)\n self.assertCommandRegexp('rsync .*config_files/cylinder', 1)\n self.assertCommandRegexp(\n 'put .*scripts/cylinder_0_1_1000_3_abcd1234_planck_1_10_10.sh', 2)\n self.assertCommandRegexp(\n 'mkdir -p .*results/cylinder_0_1_1000_3_abcd1234_planck_1_10_10', 3\n )\n self.assertCommandRegexp(\n 'cp .*scripts/cylinder_0_1_1000_3_abcd1234_planck_1_10_10.sh .*results/cylinder_0_1_1000_3_abcd1234_planck_1_10_10'\n , 4)\n self.assertCommandRegexp(\n 'cp .*CMakeCache.txt .*results/cylinder_0_1_1000_3_abcd1234_planck_1_10_10'\n , 5)\n self.assertCommandRegexp('put .*env.yml', 6)\n self.assertCommandRegexp(\n 'chmod u\\\\+x .*scripts/cylinder_0_1_1000_3_abcd1234_planck_1_10_10.sh'\n , 7)\n self.assertCommandRegexp(\n '.*scripts/cylinder_0_1_1000_3_abcd1234_planck_1_10_10.sh', 8)\n self.assertCommandCount(9 * 11 * 5)\n\n def test_configure_default(self):\n execute(configure)\n target = {'CMAKE_BUILD_TYPE': 'Release', 'CMAKE_CXX_FLAGS_RELEASE':\n '-O4', 'CMAKE_INSTALL_PREFIX': env.install_path,\n 'CPPUNIT_PATCH_LDL': True, 'HEMELB_DEPENDENCIES_INSTALL_PATH':\n env.install_path, 'HEMELB_SUBPROJECT_MAKE_JOBS': 1}\n self.assertEqual(env.total_cmake_options, target)\n for key, value in target.iteritems():\n self.assertRegexpMatches(env.cmake_flags, '-D%s=%s' % (key, value))\n\n def test_configure_debug(self):\n execute(configure, 'debug')\n self.assertEqual(env.total_cmake_options, {'CMAKE_BUILD_TYPE':\n 'Debug', 'HEMELB_OPTIMISATION': '', 'HEMELB_LOG_LEVEL': 'debug',\n 'CPPUNIT_PATCH_LDL': True, 'CMAKE_INSTALL_PREFIX': env.\n install_path, 'HEMELB_DEPENDENCIES_INSTALL_PATH': env.\n install_path, 'HEMELB_SUBPROJECT_MAKE_JOBS': 1})\n\n def test_script_template(self):\n script = script_templates('dummy_ge_header', 'dummy_jobscript',\n commands=['extra'])\n content = open(script).read()\n self.assertEqual(content, 'user: test_user\\n\\nrun bananas\\n\\nextra')\n",
"step-2": "<mask token>\n\n\nclass TestFabric(unittest.TestCase):\n\n def setUp(self):\n env.test_home = os.path.join(env.localroot, 'deploy', 'test')\n user_config = yaml.load(open(os.path.join(env.localroot, 'deploy',\n 'test', 'machines_user.yml')))\n env.update(user_config['default'])\n execute(planck)\n sys.modules['deploy.fab'].run = lambda command: self.commands.append(\n command)\n\n def mock_local(command, original=sys.modules['deploy.fab'].local):\n self.commands.append(command)\n original(command)\n sys.modules['deploy.fab'].local = mock_local\n sys.modules['deploy.fab'\n ].put = lambda source, target: self.commands.append('put ' +\n source + ' ' + target)\n sys.modules['deploy.fab'\n ].rsync_project = lambda **args: self.commands.append('rsync ' +\n args['local_dir'] + ' ' + args['remote_dir'])\n\n def mock_profile(profile, original=sys.modules['deploy.fab'].generate):\n self.commands.append('generate %g %g %g' % (profile.VoxelSize,\n profile.Steps, profile.Cycles))\n original(profile)\n sys.modules['deploy.fab'].generate = mock_profile\n self.commands = []\n env.build_number = 'abcd1234'\n\n def assertCommandCount(self, should_be):\n self.assertEqual(len(self.commands), should_be)\n <mask token>\n <mask token>\n <mask token>\n\n def test_clean(self):\n execute(clean)\n self.assertCommand('make clean')\n\n def test_with_job(self):\n with settings(results_path='banana', local_results='pineapple'):\n with_job('foo')\n self.assertEqual(env.job_results, 'banana/foo')\n self.assertEqual(env.job_results_local, 'pineapple/foo')\n\n def test_with_template_job(self):\n with settings(results_path='banana', foo='fish', bar='swim',\n job_name_template='${foo}_${bar}'):\n with_template_job()\n self.assertEqual(env.job_results, 'banana/fish_swim')\n\n def test_hemelb(self):\n execute(hemelb, 'cylinder', cores=5)\n self.assertEqual(env.name, 'cylinder_abcd1234_planck_5_10_10')\n self.assertCommandRegexp('mkdir -p .*config_files/cylinder', 0)\n self.assertCommandRegexp('rsync .*config_files/cylinder', 1)\n self.assertCommandRegexp(\n 'put .*scripts/cylinder_abcd1234_planck_5_10_10.sh', 2)\n self.assertCommandRegexp(\n 'mkdir -p .*results/cylinder_abcd1234_planck_5_10_10', 3)\n self.assertCommandRegexp(\n 'cp .*scripts/cylinder_abcd1234_planck_5_10_10.sh .*results/cylinder_abcd1234_planck_5_10_10'\n , 4)\n self.assertCommandRegexp(\n 'cp .*CMakeCache.txt .*results/cylinder_abcd1234_planck_5_10_10', 5\n )\n self.assertCommandRegexp('put .*env.yml', 6)\n self.assertCommandRegexp(\n 'chmod u\\\\+x .*scripts/cylinder_abcd1234_planck_5_10_10.sh', 7)\n self.assertCommandRegexp(\n '.*scripts/cylinder_abcd1234_planck_5_10_10.sh', 8)\n self.assertCommandCount(9)\n <mask token>\n\n def test_create_config(self):\n execute(create_config, 'cylinder', VoxelSize=0.1)\n self.assertEqual(env.config, 'cylinder_0_1_1000_3')\n self.assertCommandRegexp('mkdir -p .*/configs/cylinder_0_1_1000_3', 0)\n self.assertCommand('generate 0.1 1000 3', 1)\n self.assertCommandCount(2)\n\n def test_create_configs(self):\n execute(create_configs, 'cylinder', VoxelSize='[0.1:0.21:0.01]')\n self.assertEqual(env.config, 'cylinder_0_2_1000_3')\n self.assertCommandRegexp('mkdir -p .*/configs/cylinder_0_1_1000_3', 0)\n self.assertCommand('generate 0.1 1000 3', 1)\n self.assertCommandCount(2 * 11)\n\n def test_hemelb_profile(self):\n execute(hemelb_profile, 'cylinder', VoxelSize='[0.1:0.21:0.01]',\n cores='[1:6:1]')\n self.assertEqual(env.name,\n 'cylinder_0_2_1000_3_abcd1234_planck_5_10_10')\n self.assertCommandRegexp('mkdir -p .*/configs/cylinder_0_1_1000_3', 0)\n self.assertCommand('generate 0.1 1000 3', 1)\n self.assertCommandRegexp('mkdir -p .*config_files/cylinder', 2)\n self.assertCommandRegexp('rsync .*config_files/cylinder', 3)\n self.assertCommandRegexp(\n 'put .*scripts/cylinder_0_1_1000_3_abcd1234_planck_1_10_10.sh', 4)\n self.assertCommandRegexp(\n 'mkdir -p .*results/cylinder_0_1_1000_3_abcd1234_planck_1_10_10', 5\n )\n self.assertCommandRegexp(\n 'cp .*scripts/cylinder_0_1_1000_3_abcd1234_planck_1_10_10.sh .*results/cylinder_0_1_1000_3_abcd1234_planck_1_10_10'\n , 6)\n self.assertCommandRegexp(\n 'cp .*CMakeCache.txt .*results/cylinder_0_1_1000_3_abcd1234_planck_1_10_10'\n , 7)\n self.assertCommandRegexp('put .*env.yml', 8)\n self.assertCommandRegexp(\n 'chmod u\\\\+x .*scripts/cylinder_0_1_1000_3_abcd1234_planck_1_10_10.sh'\n , 9)\n self.assertCommandRegexp(\n '.*scripts/cylinder_0_1_1000_3_abcd1234_planck_1_10_10.sh', 10)\n self.assertCommandCount(2 * 11 + 9 * 11 * 5)\n\n def test_hemelb_profile_no_config_generation(self):\n execute(hemelb_profile, 'cylinder', VoxelSize='[0.1:0.21:0.01]',\n cores='[1:6:1]', create_configs='False')\n self.assertEqual(env.name,\n 'cylinder_0_2_1000_3_abcd1234_planck_5_10_10')\n self.assertCommandRegexp('mkdir -p .*config_files/cylinder', 0)\n self.assertCommandRegexp('rsync .*config_files/cylinder', 1)\n self.assertCommandRegexp(\n 'put .*scripts/cylinder_0_1_1000_3_abcd1234_planck_1_10_10.sh', 2)\n self.assertCommandRegexp(\n 'mkdir -p .*results/cylinder_0_1_1000_3_abcd1234_planck_1_10_10', 3\n )\n self.assertCommandRegexp(\n 'cp .*scripts/cylinder_0_1_1000_3_abcd1234_planck_1_10_10.sh .*results/cylinder_0_1_1000_3_abcd1234_planck_1_10_10'\n , 4)\n self.assertCommandRegexp(\n 'cp .*CMakeCache.txt .*results/cylinder_0_1_1000_3_abcd1234_planck_1_10_10'\n , 5)\n self.assertCommandRegexp('put .*env.yml', 6)\n self.assertCommandRegexp(\n 'chmod u\\\\+x .*scripts/cylinder_0_1_1000_3_abcd1234_planck_1_10_10.sh'\n , 7)\n self.assertCommandRegexp(\n '.*scripts/cylinder_0_1_1000_3_abcd1234_planck_1_10_10.sh', 8)\n self.assertCommandCount(9 * 11 * 5)\n\n def test_configure_default(self):\n execute(configure)\n target = {'CMAKE_BUILD_TYPE': 'Release', 'CMAKE_CXX_FLAGS_RELEASE':\n '-O4', 'CMAKE_INSTALL_PREFIX': env.install_path,\n 'CPPUNIT_PATCH_LDL': True, 'HEMELB_DEPENDENCIES_INSTALL_PATH':\n env.install_path, 'HEMELB_SUBPROJECT_MAKE_JOBS': 1}\n self.assertEqual(env.total_cmake_options, target)\n for key, value in target.iteritems():\n self.assertRegexpMatches(env.cmake_flags, '-D%s=%s' % (key, value))\n\n def test_configure_debug(self):\n execute(configure, 'debug')\n self.assertEqual(env.total_cmake_options, {'CMAKE_BUILD_TYPE':\n 'Debug', 'HEMELB_OPTIMISATION': '', 'HEMELB_LOG_LEVEL': 'debug',\n 'CPPUNIT_PATCH_LDL': True, 'CMAKE_INSTALL_PREFIX': env.\n install_path, 'HEMELB_DEPENDENCIES_INSTALL_PATH': env.\n install_path, 'HEMELB_SUBPROJECT_MAKE_JOBS': 1})\n\n def test_script_template(self):\n script = script_templates('dummy_ge_header', 'dummy_jobscript',\n commands=['extra'])\n content = open(script).read()\n self.assertEqual(content, 'user: test_user\\n\\nrun bananas\\n\\nextra')\n",
"step-3": "<mask token>\n\n\nclass TestFabric(unittest.TestCase):\n\n def setUp(self):\n env.test_home = os.path.join(env.localroot, 'deploy', 'test')\n user_config = yaml.load(open(os.path.join(env.localroot, 'deploy',\n 'test', 'machines_user.yml')))\n env.update(user_config['default'])\n execute(planck)\n sys.modules['deploy.fab'].run = lambda command: self.commands.append(\n command)\n\n def mock_local(command, original=sys.modules['deploy.fab'].local):\n self.commands.append(command)\n original(command)\n sys.modules['deploy.fab'].local = mock_local\n sys.modules['deploy.fab'\n ].put = lambda source, target: self.commands.append('put ' +\n source + ' ' + target)\n sys.modules['deploy.fab'\n ].rsync_project = lambda **args: self.commands.append('rsync ' +\n args['local_dir'] + ' ' + args['remote_dir'])\n\n def mock_profile(profile, original=sys.modules['deploy.fab'].generate):\n self.commands.append('generate %g %g %g' % (profile.VoxelSize,\n profile.Steps, profile.Cycles))\n original(profile)\n sys.modules['deploy.fab'].generate = mock_profile\n self.commands = []\n env.build_number = 'abcd1234'\n\n def assertCommandCount(self, should_be):\n self.assertEqual(len(self.commands), should_be)\n <mask token>\n <mask token>\n\n def test_machine_alias(self):\n self.assertEqual(env.remote, 'planck.chem.ucl.ac.uk')\n execute(julian)\n self.assertEqual(env.remote, 'julian.chem.ucl.ac.uk')\n execute(hector)\n self.assertEqual(env.remote, 'login.hector.ac.uk')\n\n def test_clean(self):\n execute(clean)\n self.assertCommand('make clean')\n\n def test_with_job(self):\n with settings(results_path='banana', local_results='pineapple'):\n with_job('foo')\n self.assertEqual(env.job_results, 'banana/foo')\n self.assertEqual(env.job_results_local, 'pineapple/foo')\n\n def test_with_template_job(self):\n with settings(results_path='banana', foo='fish', bar='swim',\n job_name_template='${foo}_${bar}'):\n with_template_job()\n self.assertEqual(env.job_results, 'banana/fish_swim')\n\n def test_hemelb(self):\n execute(hemelb, 'cylinder', cores=5)\n self.assertEqual(env.name, 'cylinder_abcd1234_planck_5_10_10')\n self.assertCommandRegexp('mkdir -p .*config_files/cylinder', 0)\n self.assertCommandRegexp('rsync .*config_files/cylinder', 1)\n self.assertCommandRegexp(\n 'put .*scripts/cylinder_abcd1234_planck_5_10_10.sh', 2)\n self.assertCommandRegexp(\n 'mkdir -p .*results/cylinder_abcd1234_planck_5_10_10', 3)\n self.assertCommandRegexp(\n 'cp .*scripts/cylinder_abcd1234_planck_5_10_10.sh .*results/cylinder_abcd1234_planck_5_10_10'\n , 4)\n self.assertCommandRegexp(\n 'cp .*CMakeCache.txt .*results/cylinder_abcd1234_planck_5_10_10', 5\n )\n self.assertCommandRegexp('put .*env.yml', 6)\n self.assertCommandRegexp(\n 'chmod u\\\\+x .*scripts/cylinder_abcd1234_planck_5_10_10.sh', 7)\n self.assertCommandRegexp(\n '.*scripts/cylinder_abcd1234_planck_5_10_10.sh', 8)\n self.assertCommandCount(9)\n\n def test_hemelbs(self):\n execute(hemelbs, 'cylinder', cores='[1:6:1]')\n self.assertCommandRegexp('rsync .*config_files/cylinder', 1)\n self.assertCommandRegexp('cylinder_abcd1234_planck_5_10_10.sh')\n self.assertCommandCount(9 * 5)\n\n def test_create_config(self):\n execute(create_config, 'cylinder', VoxelSize=0.1)\n self.assertEqual(env.config, 'cylinder_0_1_1000_3')\n self.assertCommandRegexp('mkdir -p .*/configs/cylinder_0_1_1000_3', 0)\n self.assertCommand('generate 0.1 1000 3', 1)\n self.assertCommandCount(2)\n\n def test_create_configs(self):\n execute(create_configs, 'cylinder', VoxelSize='[0.1:0.21:0.01]')\n self.assertEqual(env.config, 'cylinder_0_2_1000_3')\n self.assertCommandRegexp('mkdir -p .*/configs/cylinder_0_1_1000_3', 0)\n self.assertCommand('generate 0.1 1000 3', 1)\n self.assertCommandCount(2 * 11)\n\n def test_hemelb_profile(self):\n execute(hemelb_profile, 'cylinder', VoxelSize='[0.1:0.21:0.01]',\n cores='[1:6:1]')\n self.assertEqual(env.name,\n 'cylinder_0_2_1000_3_abcd1234_planck_5_10_10')\n self.assertCommandRegexp('mkdir -p .*/configs/cylinder_0_1_1000_3', 0)\n self.assertCommand('generate 0.1 1000 3', 1)\n self.assertCommandRegexp('mkdir -p .*config_files/cylinder', 2)\n self.assertCommandRegexp('rsync .*config_files/cylinder', 3)\n self.assertCommandRegexp(\n 'put .*scripts/cylinder_0_1_1000_3_abcd1234_planck_1_10_10.sh', 4)\n self.assertCommandRegexp(\n 'mkdir -p .*results/cylinder_0_1_1000_3_abcd1234_planck_1_10_10', 5\n )\n self.assertCommandRegexp(\n 'cp .*scripts/cylinder_0_1_1000_3_abcd1234_planck_1_10_10.sh .*results/cylinder_0_1_1000_3_abcd1234_planck_1_10_10'\n , 6)\n self.assertCommandRegexp(\n 'cp .*CMakeCache.txt .*results/cylinder_0_1_1000_3_abcd1234_planck_1_10_10'\n , 7)\n self.assertCommandRegexp('put .*env.yml', 8)\n self.assertCommandRegexp(\n 'chmod u\\\\+x .*scripts/cylinder_0_1_1000_3_abcd1234_planck_1_10_10.sh'\n , 9)\n self.assertCommandRegexp(\n '.*scripts/cylinder_0_1_1000_3_abcd1234_planck_1_10_10.sh', 10)\n self.assertCommandCount(2 * 11 + 9 * 11 * 5)\n\n def test_hemelb_profile_no_config_generation(self):\n execute(hemelb_profile, 'cylinder', VoxelSize='[0.1:0.21:0.01]',\n cores='[1:6:1]', create_configs='False')\n self.assertEqual(env.name,\n 'cylinder_0_2_1000_3_abcd1234_planck_5_10_10')\n self.assertCommandRegexp('mkdir -p .*config_files/cylinder', 0)\n self.assertCommandRegexp('rsync .*config_files/cylinder', 1)\n self.assertCommandRegexp(\n 'put .*scripts/cylinder_0_1_1000_3_abcd1234_planck_1_10_10.sh', 2)\n self.assertCommandRegexp(\n 'mkdir -p .*results/cylinder_0_1_1000_3_abcd1234_planck_1_10_10', 3\n )\n self.assertCommandRegexp(\n 'cp .*scripts/cylinder_0_1_1000_3_abcd1234_planck_1_10_10.sh .*results/cylinder_0_1_1000_3_abcd1234_planck_1_10_10'\n , 4)\n self.assertCommandRegexp(\n 'cp .*CMakeCache.txt .*results/cylinder_0_1_1000_3_abcd1234_planck_1_10_10'\n , 5)\n self.assertCommandRegexp('put .*env.yml', 6)\n self.assertCommandRegexp(\n 'chmod u\\\\+x .*scripts/cylinder_0_1_1000_3_abcd1234_planck_1_10_10.sh'\n , 7)\n self.assertCommandRegexp(\n '.*scripts/cylinder_0_1_1000_3_abcd1234_planck_1_10_10.sh', 8)\n self.assertCommandCount(9 * 11 * 5)\n\n def test_configure_default(self):\n execute(configure)\n target = {'CMAKE_BUILD_TYPE': 'Release', 'CMAKE_CXX_FLAGS_RELEASE':\n '-O4', 'CMAKE_INSTALL_PREFIX': env.install_path,\n 'CPPUNIT_PATCH_LDL': True, 'HEMELB_DEPENDENCIES_INSTALL_PATH':\n env.install_path, 'HEMELB_SUBPROJECT_MAKE_JOBS': 1}\n self.assertEqual(env.total_cmake_options, target)\n for key, value in target.iteritems():\n self.assertRegexpMatches(env.cmake_flags, '-D%s=%s' % (key, value))\n\n def test_configure_debug(self):\n execute(configure, 'debug')\n self.assertEqual(env.total_cmake_options, {'CMAKE_BUILD_TYPE':\n 'Debug', 'HEMELB_OPTIMISATION': '', 'HEMELB_LOG_LEVEL': 'debug',\n 'CPPUNIT_PATCH_LDL': True, 'CMAKE_INSTALL_PREFIX': env.\n install_path, 'HEMELB_DEPENDENCIES_INSTALL_PATH': env.\n install_path, 'HEMELB_SUBPROJECT_MAKE_JOBS': 1})\n\n def test_script_template(self):\n script = script_templates('dummy_ge_header', 'dummy_jobscript',\n commands=['extra'])\n content = open(script).read()\n self.assertEqual(content, 'user: test_user\\n\\nrun bananas\\n\\nextra')\n",
"step-4": "<mask token>\nimport unittest\nimport sys\nimport copy\nimport textwrap\nfrom ..fab import *\n\n\nclass TestFabric(unittest.TestCase):\n\n def setUp(self):\n env.test_home = os.path.join(env.localroot, 'deploy', 'test')\n user_config = yaml.load(open(os.path.join(env.localroot, 'deploy',\n 'test', 'machines_user.yml')))\n env.update(user_config['default'])\n execute(planck)\n sys.modules['deploy.fab'].run = lambda command: self.commands.append(\n command)\n\n def mock_local(command, original=sys.modules['deploy.fab'].local):\n self.commands.append(command)\n original(command)\n sys.modules['deploy.fab'].local = mock_local\n sys.modules['deploy.fab'\n ].put = lambda source, target: self.commands.append('put ' +\n source + ' ' + target)\n sys.modules['deploy.fab'\n ].rsync_project = lambda **args: self.commands.append('rsync ' +\n args['local_dir'] + ' ' + args['remote_dir'])\n\n def mock_profile(profile, original=sys.modules['deploy.fab'].generate):\n self.commands.append('generate %g %g %g' % (profile.VoxelSize,\n profile.Steps, profile.Cycles))\n original(profile)\n sys.modules['deploy.fab'].generate = mock_profile\n self.commands = []\n env.build_number = 'abcd1234'\n\n def assertCommandCount(self, should_be):\n self.assertEqual(len(self.commands), should_be)\n\n def assertCommand(self, should_be, index=-1):\n self.assertEqual(self.commands[index], should_be)\n\n def assertCommandRegexp(self, should_be, index=-1):\n self.assertRegexpMatches(self.commands[index], should_be)\n\n def test_machine_alias(self):\n self.assertEqual(env.remote, 'planck.chem.ucl.ac.uk')\n execute(julian)\n self.assertEqual(env.remote, 'julian.chem.ucl.ac.uk')\n execute(hector)\n self.assertEqual(env.remote, 'login.hector.ac.uk')\n\n def test_clean(self):\n execute(clean)\n self.assertCommand('make clean')\n\n def test_with_job(self):\n with settings(results_path='banana', local_results='pineapple'):\n with_job('foo')\n self.assertEqual(env.job_results, 'banana/foo')\n self.assertEqual(env.job_results_local, 'pineapple/foo')\n\n def test_with_template_job(self):\n with settings(results_path='banana', foo='fish', bar='swim',\n job_name_template='${foo}_${bar}'):\n with_template_job()\n self.assertEqual(env.job_results, 'banana/fish_swim')\n\n def test_hemelb(self):\n execute(hemelb, 'cylinder', cores=5)\n self.assertEqual(env.name, 'cylinder_abcd1234_planck_5_10_10')\n self.assertCommandRegexp('mkdir -p .*config_files/cylinder', 0)\n self.assertCommandRegexp('rsync .*config_files/cylinder', 1)\n self.assertCommandRegexp(\n 'put .*scripts/cylinder_abcd1234_planck_5_10_10.sh', 2)\n self.assertCommandRegexp(\n 'mkdir -p .*results/cylinder_abcd1234_planck_5_10_10', 3)\n self.assertCommandRegexp(\n 'cp .*scripts/cylinder_abcd1234_planck_5_10_10.sh .*results/cylinder_abcd1234_planck_5_10_10'\n , 4)\n self.assertCommandRegexp(\n 'cp .*CMakeCache.txt .*results/cylinder_abcd1234_planck_5_10_10', 5\n )\n self.assertCommandRegexp('put .*env.yml', 6)\n self.assertCommandRegexp(\n 'chmod u\\\\+x .*scripts/cylinder_abcd1234_planck_5_10_10.sh', 7)\n self.assertCommandRegexp(\n '.*scripts/cylinder_abcd1234_planck_5_10_10.sh', 8)\n self.assertCommandCount(9)\n\n def test_hemelbs(self):\n execute(hemelbs, 'cylinder', cores='[1:6:1]')\n self.assertCommandRegexp('rsync .*config_files/cylinder', 1)\n self.assertCommandRegexp('cylinder_abcd1234_planck_5_10_10.sh')\n self.assertCommandCount(9 * 5)\n\n def test_create_config(self):\n execute(create_config, 'cylinder', VoxelSize=0.1)\n self.assertEqual(env.config, 'cylinder_0_1_1000_3')\n self.assertCommandRegexp('mkdir -p .*/configs/cylinder_0_1_1000_3', 0)\n self.assertCommand('generate 0.1 1000 3', 1)\n self.assertCommandCount(2)\n\n def test_create_configs(self):\n execute(create_configs, 'cylinder', VoxelSize='[0.1:0.21:0.01]')\n self.assertEqual(env.config, 'cylinder_0_2_1000_3')\n self.assertCommandRegexp('mkdir -p .*/configs/cylinder_0_1_1000_3', 0)\n self.assertCommand('generate 0.1 1000 3', 1)\n self.assertCommandCount(2 * 11)\n\n def test_hemelb_profile(self):\n execute(hemelb_profile, 'cylinder', VoxelSize='[0.1:0.21:0.01]',\n cores='[1:6:1]')\n self.assertEqual(env.name,\n 'cylinder_0_2_1000_3_abcd1234_planck_5_10_10')\n self.assertCommandRegexp('mkdir -p .*/configs/cylinder_0_1_1000_3', 0)\n self.assertCommand('generate 0.1 1000 3', 1)\n self.assertCommandRegexp('mkdir -p .*config_files/cylinder', 2)\n self.assertCommandRegexp('rsync .*config_files/cylinder', 3)\n self.assertCommandRegexp(\n 'put .*scripts/cylinder_0_1_1000_3_abcd1234_planck_1_10_10.sh', 4)\n self.assertCommandRegexp(\n 'mkdir -p .*results/cylinder_0_1_1000_3_abcd1234_planck_1_10_10', 5\n )\n self.assertCommandRegexp(\n 'cp .*scripts/cylinder_0_1_1000_3_abcd1234_planck_1_10_10.sh .*results/cylinder_0_1_1000_3_abcd1234_planck_1_10_10'\n , 6)\n self.assertCommandRegexp(\n 'cp .*CMakeCache.txt .*results/cylinder_0_1_1000_3_abcd1234_planck_1_10_10'\n , 7)\n self.assertCommandRegexp('put .*env.yml', 8)\n self.assertCommandRegexp(\n 'chmod u\\\\+x .*scripts/cylinder_0_1_1000_3_abcd1234_planck_1_10_10.sh'\n , 9)\n self.assertCommandRegexp(\n '.*scripts/cylinder_0_1_1000_3_abcd1234_planck_1_10_10.sh', 10)\n self.assertCommandCount(2 * 11 + 9 * 11 * 5)\n\n def test_hemelb_profile_no_config_generation(self):\n execute(hemelb_profile, 'cylinder', VoxelSize='[0.1:0.21:0.01]',\n cores='[1:6:1]', create_configs='False')\n self.assertEqual(env.name,\n 'cylinder_0_2_1000_3_abcd1234_planck_5_10_10')\n self.assertCommandRegexp('mkdir -p .*config_files/cylinder', 0)\n self.assertCommandRegexp('rsync .*config_files/cylinder', 1)\n self.assertCommandRegexp(\n 'put .*scripts/cylinder_0_1_1000_3_abcd1234_planck_1_10_10.sh', 2)\n self.assertCommandRegexp(\n 'mkdir -p .*results/cylinder_0_1_1000_3_abcd1234_planck_1_10_10', 3\n )\n self.assertCommandRegexp(\n 'cp .*scripts/cylinder_0_1_1000_3_abcd1234_planck_1_10_10.sh .*results/cylinder_0_1_1000_3_abcd1234_planck_1_10_10'\n , 4)\n self.assertCommandRegexp(\n 'cp .*CMakeCache.txt .*results/cylinder_0_1_1000_3_abcd1234_planck_1_10_10'\n , 5)\n self.assertCommandRegexp('put .*env.yml', 6)\n self.assertCommandRegexp(\n 'chmod u\\\\+x .*scripts/cylinder_0_1_1000_3_abcd1234_planck_1_10_10.sh'\n , 7)\n self.assertCommandRegexp(\n '.*scripts/cylinder_0_1_1000_3_abcd1234_planck_1_10_10.sh', 8)\n self.assertCommandCount(9 * 11 * 5)\n\n def test_configure_default(self):\n execute(configure)\n target = {'CMAKE_BUILD_TYPE': 'Release', 'CMAKE_CXX_FLAGS_RELEASE':\n '-O4', 'CMAKE_INSTALL_PREFIX': env.install_path,\n 'CPPUNIT_PATCH_LDL': True, 'HEMELB_DEPENDENCIES_INSTALL_PATH':\n env.install_path, 'HEMELB_SUBPROJECT_MAKE_JOBS': 1}\n self.assertEqual(env.total_cmake_options, target)\n for key, value in target.iteritems():\n self.assertRegexpMatches(env.cmake_flags, '-D%s=%s' % (key, value))\n\n def test_configure_debug(self):\n execute(configure, 'debug')\n self.assertEqual(env.total_cmake_options, {'CMAKE_BUILD_TYPE':\n 'Debug', 'HEMELB_OPTIMISATION': '', 'HEMELB_LOG_LEVEL': 'debug',\n 'CPPUNIT_PATCH_LDL': True, 'CMAKE_INSTALL_PREFIX': env.\n install_path, 'HEMELB_DEPENDENCIES_INSTALL_PATH': env.\n install_path, 'HEMELB_SUBPROJECT_MAKE_JOBS': 1})\n\n def test_script_template(self):\n script = script_templates('dummy_ge_header', 'dummy_jobscript',\n commands=['extra'])\n content = open(script).read()\n self.assertEqual(content, 'user: test_user\\n\\nrun bananas\\n\\nextra')\n",
"step-5": "#!/usr/bin/env python\n# \n# Copyright (C) University College London, 2007-2012, all rights reserved.\n# \n# This file is part of HemeLB and is CONFIDENTIAL. You may not work \n# with, install, use, duplicate, modify, redistribute or share this\n# file, or any part thereof, other than as allowed by any agreement\n# specifically made by you with University College London.\n# \n\n# encoding: utf-8\n\"\"\"\ntest_machine_environment.py\n\nCreated by James Hetherington on 2012-01-19.\nCopyright (c) 2012 UCL. All rights reserved.\n\"\"\"\nimport unittest\nimport sys\nimport copy\nimport textwrap\n\nfrom ..fab import *\n\nclass TestFabric(unittest.TestCase):\n def setUp(self):\n \t#Update the user config with testing example\n \tenv.test_home=os.path.join(env.localroot,'deploy','test')\n \tuser_config=yaml.load(open(os.path.join(env.localroot,'deploy','test','machines_user.yml')))\n \tenv.update(user_config['default'])\n \texecute(planck) #Default machine target is assumed as planck.\n \t#Monkeypatch the fabric commands to do nothing, but record what they would have done\n \tsys.modules['deploy.fab'].run=lambda command: self.commands.append(command)\n \tdef mock_local(command,original=sys.modules['deploy.fab'].local):\n \t self.commands.append(command)\n \t original(command)\n \tsys.modules['deploy.fab'].local=mock_local \n \tsys.modules['deploy.fab'].put=lambda source,target: self.commands.append(\"put \"+source+\" \"+target)\n \tsys.modules['deploy.fab'].rsync_project=lambda **args: self.commands.append(\"rsync \"+args['local_dir']+\" \"+args['remote_dir'])\n \tdef mock_profile(profile,original=sys.modules['deploy.fab'].generate):\n \t self.commands.append(\"generate %g %g %g\"%(profile.VoxelSize, profile.Steps , profile.Cycles) )\n \t original(profile)\n \tsys.modules['deploy.fab'].generate=mock_profile\n \tself.commands=[]\n \tenv.build_number='abcd1234'\n def assertCommandCount(self,should_be):\n self.assertEqual(len(self.commands),should_be)\n def assertCommand(self,should_be,index=-1):\n \tself.assertEqual(self.commands[index],should_be)\n def assertCommandRegexp(self,should_be,index=-1):\n \tself.assertRegexpMatches(self.commands[index],should_be)\n def test_machine_alias(self):\n \tself.assertEqual(env.remote,\"planck.chem.ucl.ac.uk\")\n \texecute(julian)\n \tself.assertEqual(env.remote,\"julian.chem.ucl.ac.uk\")\n \texecute(hector)\n \tself.assertEqual(env.remote,\"login.hector.ac.uk\")\n def test_clean(self):\n \texecute(clean)\n \tself.assertCommand('make clean')\n def test_with_job(self):\n with settings(results_path=\"banana\",local_results='pineapple'):\n with_job('foo')\n self.assertEqual(env.job_results,\"banana/foo\")\n self.assertEqual(env.job_results_local,\"pineapple/foo\")\n def test_with_template_job(self):\n with settings(results_path='banana',foo='fish',bar='swim',job_name_template=\"${foo}_${bar}\"): \n with_template_job()\n self.assertEqual(env.job_results,\"banana/fish_swim\")\n def test_hemelb(self):\n execute(hemelb,'cylinder',cores=5)\n self.assertEqual(env.name,\"cylinder_abcd1234_planck_5_10_10\")\n self.assertCommandRegexp('mkdir -p .*config_files/cylinder',0)\n self.assertCommandRegexp('rsync .*config_files/cylinder',1)\n self.assertCommandRegexp(\"put .*scripts/cylinder_abcd1234_planck_5_10_10.sh\",2)\n self.assertCommandRegexp(\"mkdir -p .*results/cylinder_abcd1234_planck_5_10_10\",3)\n self.assertCommandRegexp(\"cp .*scripts/cylinder_abcd1234_planck_5_10_10.sh .*results/cylinder_abcd1234_planck_5_10_10\",4)\n self.assertCommandRegexp(\"cp .*CMakeCache.txt .*results/cylinder_abcd1234_planck_5_10_10\",5)\n self.assertCommandRegexp(\"put .*env.yml\",6)\n self.assertCommandRegexp(\"chmod u\\+x .*scripts/cylinder_abcd1234_planck_5_10_10.sh\",7)\n self.assertCommandRegexp(\".*scripts/cylinder_abcd1234_planck_5_10_10.sh\",8)\n self.assertCommandCount(9)\n def test_hemelbs(self):\n execute(hemelbs,'cylinder',cores='[1:6:1]')\n self.assertCommandRegexp('rsync .*config_files/cylinder',1)\n self.assertCommandRegexp(\"cylinder_abcd1234_planck_5_10_10.sh\")\n self.assertCommandCount(9*5)\n def test_create_config(self):\n execute(create_config,'cylinder',VoxelSize=0.1)\n self.assertEqual(env.config,\"cylinder_0_1_1000_3\")\n self.assertCommandRegexp(\"mkdir -p .*/configs/cylinder_0_1_1000_3\",0)\n self.assertCommand(\"generate 0.1 1000 3\",1)\n self.assertCommandCount(2)\n def test_create_configs(self):\n execute(create_configs,'cylinder',VoxelSize='[0.1:0.21:0.01]')\n self.assertEqual(env.config,\"cylinder_0_2_1000_3\")\n self.assertCommandRegexp(\"mkdir -p .*/configs/cylinder_0_1_1000_3\",0)\n self.assertCommand(\"generate 0.1 1000 3\",1)\n self.assertCommandCount(2*11)\n def test_hemelb_profile(self):\n execute(hemelb_profile,'cylinder',VoxelSize='[0.1:0.21:0.01]',cores='[1:6:1]')\n self.assertEqual(env.name,\"cylinder_0_2_1000_3_abcd1234_planck_5_10_10\")\n self.assertCommandRegexp(\"mkdir -p .*/configs/cylinder_0_1_1000_3\",0)\n self.assertCommand(\"generate 0.1 1000 3\",1)\n self.assertCommandRegexp('mkdir -p .*config_files/cylinder',2)\n self.assertCommandRegexp('rsync .*config_files/cylinder',3)\n self.assertCommandRegexp(\"put .*scripts/cylinder_0_1_1000_3_abcd1234_planck_1_10_10.sh\",4)\n self.assertCommandRegexp(\"mkdir -p .*results/cylinder_0_1_1000_3_abcd1234_planck_1_10_10\",5)\n self.assertCommandRegexp(\"cp .*scripts/cylinder_0_1_1000_3_abcd1234_planck_1_10_10.sh .*results/cylinder_0_1_1000_3_abcd1234_planck_1_10_10\",6)\n self.assertCommandRegexp(\"cp .*CMakeCache.txt .*results/cylinder_0_1_1000_3_abcd1234_planck_1_10_10\",7)\n self.assertCommandRegexp(\"put .*env.yml\",8)\n self.assertCommandRegexp(\"chmod u\\+x .*scripts/cylinder_0_1_1000_3_abcd1234_planck_1_10_10.sh\",9)\n self.assertCommandRegexp(\".*scripts/cylinder_0_1_1000_3_abcd1234_planck_1_10_10.sh\",10)\n self.assertCommandCount(2*11 + 9*11*5)\n def test_hemelb_profile_no_config_generation(self):\n execute(hemelb_profile,'cylinder',VoxelSize='[0.1:0.21:0.01]',cores='[1:6:1]',create_configs=\"False\")\n self.assertEqual(env.name,\"cylinder_0_2_1000_3_abcd1234_planck_5_10_10\")\n self.assertCommandRegexp('mkdir -p .*config_files/cylinder',0)\n self.assertCommandRegexp('rsync .*config_files/cylinder',1)\n self.assertCommandRegexp(\"put .*scripts/cylinder_0_1_1000_3_abcd1234_planck_1_10_10.sh\",2)\n self.assertCommandRegexp(\"mkdir -p .*results/cylinder_0_1_1000_3_abcd1234_planck_1_10_10\",3)\n self.assertCommandRegexp(\"cp .*scripts/cylinder_0_1_1000_3_abcd1234_planck_1_10_10.sh .*results/cylinder_0_1_1000_3_abcd1234_planck_1_10_10\",4)\n self.assertCommandRegexp(\"cp .*CMakeCache.txt .*results/cylinder_0_1_1000_3_abcd1234_planck_1_10_10\",5)\n self.assertCommandRegexp(\"put .*env.yml\",6)\n self.assertCommandRegexp(\"chmod u\\+x .*scripts/cylinder_0_1_1000_3_abcd1234_planck_1_10_10.sh\",7)\n self.assertCommandRegexp(\".*scripts/cylinder_0_1_1000_3_abcd1234_planck_1_10_10.sh\",8)\n self.assertCommandCount(9*11*5)\n def test_configure_default(self):\n execute(configure)\n target={\n 'CMAKE_BUILD_TYPE': \"Release\",\n 'CMAKE_CXX_FLAGS_RELEASE': \"-O4\",\n 'CMAKE_INSTALL_PREFIX': env.install_path,\n 'CPPUNIT_PATCH_LDL' : True,\n \"HEMELB_DEPENDENCIES_INSTALL_PATH\": env.install_path,\n \"HEMELB_SUBPROJECT_MAKE_JOBS\": 1\n }\n self.assertEqual(env.total_cmake_options,target)\n #Can't just assert on a string here, as the order of the dict is not defined\n for key,value in target.iteritems():\n self.assertRegexpMatches(env.cmake_flags,\"-D%s=%s\"%(key,value))\n def test_configure_debug(self):\n execute(configure,'debug')\n self.assertEqual(env.total_cmake_options,\n {\n 'CMAKE_BUILD_TYPE': \"Debug\",\n 'HEMELB_OPTIMISATION': \"\",\n 'HEMELB_LOG_LEVEL': \"debug\",\n 'CPPUNIT_PATCH_LDL' : True,\n 'CMAKE_INSTALL_PREFIX': env.install_path,\n \"HEMELB_DEPENDENCIES_INSTALL_PATH\": env.install_path,\n \"HEMELB_SUBPROJECT_MAKE_JOBS\": 1\n })\n \n def test_script_template(self):\n script=script_templates('dummy_ge_header','dummy_jobscript',commands=['extra'])\n content=open(script).read()\n self.assertEqual(content,\"user: test_user\\n\\nrun bananas\\n\\nextra\")",
"step-ids": [
12,
14,
16,
19,
20
]
}
|
[
12,
14,
16,
19,
20
] |
class SurveyRepository:
def __init__(self):
self._surveys = {}
def get_survey(self, survey_id):
if survey_id in self._surveys:
return self._surveys[survey_id]
def save(self, survey):
self._surveys[survey.id] = survey
|
normal
|
{
"blob_id": "961643e93582bd92e148d00efebbfe38f99100fc",
"index": 2866,
"step-1": "class SurveyRepository:\n <mask token>\n <mask token>\n <mask token>\n",
"step-2": "class SurveyRepository:\n\n def __init__(self):\n self._surveys = {}\n <mask token>\n <mask token>\n",
"step-3": "class SurveyRepository:\n\n def __init__(self):\n self._surveys = {}\n <mask token>\n\n def save(self, survey):\n self._surveys[survey.id] = survey\n",
"step-4": "class SurveyRepository:\n\n def __init__(self):\n self._surveys = {}\n\n def get_survey(self, survey_id):\n if survey_id in self._surveys:\n return self._surveys[survey_id]\n\n def save(self, survey):\n self._surveys[survey.id] = survey\n",
"step-5": null,
"step-ids": [
1,
2,
3,
4
]
}
|
[
1,
2,
3,
4
] |
class Node:
def __init__(self, char=None):
self.char = char
self.children = []
self.end = False
<|reserved_special_token_0|>
def search(sequence):
tmp_node = root
found = False
for letter in sequence:
common = False
for child in tmp_node.children:
if child.char == letter:
tmp_node = child
common = True
break
if not common:
return found
if tmp_node.end:
found = True
return found
<|reserved_special_token_0|>
<|reserved_special_token_1|>
class Node:
def __init__(self, char=None):
self.char = char
self.children = []
self.end = False
<|reserved_special_token_0|>
def insert(s, curr):
if curr.children and curr.children[0].char == s[0]:
curr = curr.children[0]
elif len(curr.children) > 1 and curr.children[1].char == s[0]:
curr = curr.children[1]
else:
new_node = Node(s[0])
curr.children.append(new_node)
curr = new_node
if len(s) > 1:
s = s[1:]
insert(s, curr)
else:
curr.end = True
def search(sequence):
tmp_node = root
found = False
for letter in sequence:
common = False
for child in tmp_node.children:
if child.char == letter:
tmp_node = child
common = True
break
if not common:
return found
if tmp_node.end:
found = True
return found
<|reserved_special_token_0|>
<|reserved_special_token_1|>
class Node:
def __init__(self, char=None):
self.char = char
self.children = []
self.end = False
<|reserved_special_token_0|>
def insert(s, curr):
if curr.children and curr.children[0].char == s[0]:
curr = curr.children[0]
elif len(curr.children) > 1 and curr.children[1].char == s[0]:
curr = curr.children[1]
else:
new_node = Node(s[0])
curr.children.append(new_node)
curr = new_node
if len(s) > 1:
s = s[1:]
insert(s, curr)
else:
curr.end = True
def search(sequence):
tmp_node = root
found = False
for letter in sequence:
common = False
for child in tmp_node.children:
if child.char == letter:
tmp_node = child
common = True
break
if not common:
return found
if tmp_node.end:
found = True
return found
print(
"""Type any number of sequences containing only 2 types
of characters 'a' and 'b' to fill the database (ended by blank entry)."""
)
<|reserved_special_token_0|>
while True:
seq = input('Sequence: ')
if seq == '':
break
sequences.append(seq)
<|reserved_special_token_0|>
for seq in sequences:
insert(seq, root)
print('Select 2 sequences from the database.')
<|reserved_special_token_0|>
if search(seq1) and search(seq2):
for i in range(min(len(seq1), len(seq2))):
if seq1[i] == seq2[i]:
node_no += 1
letter = seq1[i]
else:
break
print('Last common node is -', letter, '- with node no.', node_no)
else:
print('One or both the sequences not found in the database.')
<|reserved_special_token_1|>
class Node:
def __init__(self, char=None):
self.char = char
self.children = []
self.end = False
root = Node('*')
curr = root
def insert(s, curr):
if curr.children and curr.children[0].char == s[0]:
curr = curr.children[0]
elif len(curr.children) > 1 and curr.children[1].char == s[0]:
curr = curr.children[1]
else:
new_node = Node(s[0])
curr.children.append(new_node)
curr = new_node
if len(s) > 1:
s = s[1:]
insert(s, curr)
else:
curr.end = True
def search(sequence):
tmp_node = root
found = False
for letter in sequence:
common = False
for child in tmp_node.children:
if child.char == letter:
tmp_node = child
common = True
break
if not common:
return found
if tmp_node.end:
found = True
return found
print(
"""Type any number of sequences containing only 2 types
of characters 'a' and 'b' to fill the database (ended by blank entry)."""
)
sequences = []
while True:
seq = input('Sequence: ')
if seq == '':
break
sequences.append(seq)
node_no = 0
letter = 'none'
for seq in sequences:
insert(seq, root)
print('Select 2 sequences from the database.')
seq1 = input('Sequence 1: ')
seq2 = input('Sequence 2: ')
if search(seq1) and search(seq2):
for i in range(min(len(seq1), len(seq2))):
if seq1[i] == seq2[i]:
node_no += 1
letter = seq1[i]
else:
break
print('Last common node is -', letter, '- with node no.', node_no)
else:
print('One or both the sequences not found in the database.')
<|reserved_special_token_1|>
class Node:
def __init__(self, char = None):
self.char = char
self.children = []
self.end = False
root = Node('*')
curr = root
# recursive insert into the trie
def insert(s, curr):
if curr.children and curr.children[0].char == s[0]:
curr = curr.children[0]
elif len(curr.children) > 1 and curr.children[1].char == s[0]:
curr = curr.children[1]
else:
new_node = Node(s[0])
curr.children.append(new_node)
curr = new_node
if len(s) > 1:
s = s[1:]
insert(s, curr)
else:
curr.end = True
# search for a string in the trie
def search(sequence):
tmp_node = root
found = False
for letter in sequence:
common = False
for child in tmp_node.children:
if child.char == letter:
tmp_node = child
common = True
break
if not common:
return found
if tmp_node.end:
found = True
return found
# user input
print('''Type any number of sequences containing only 2 types
of characters 'a' and 'b' to fill the database (ended by blank entry).''')
sequences = []
while True:
seq = input("Sequence: ")
if seq == '':
break
sequences.append(seq)
node_no = 0
letter = 'none'
# loads strings into the trie
for seq in sequences:
insert(seq, root)
print("Select 2 sequences from the database.")
# takes 2 strings from user to compare
seq1 = input("Sequence 1: ")
seq2 = input("Sequence 2: ")
if search(seq1) and search(seq2):
for i in range(min(len(seq1), len(seq2))):
if seq1[i] == seq2[i]:
node_no += 1
letter = seq1[i]
else:
break
print("Last common node is -", letter, "- with node no.", node_no)
else:
print("One or both the sequences not found in the database.")
|
flexible
|
{
"blob_id": "37c42a5e52832c81660e88f45d93e6a9f0300de0",
"index": 7654,
"step-1": "class Node:\n\n def __init__(self, char=None):\n self.char = char\n self.children = []\n self.end = False\n\n\n<mask token>\n\n\ndef search(sequence):\n tmp_node = root\n found = False\n for letter in sequence:\n common = False\n for child in tmp_node.children:\n if child.char == letter:\n tmp_node = child\n common = True\n break\n if not common:\n return found\n if tmp_node.end:\n found = True\n return found\n\n\n<mask token>\n",
"step-2": "class Node:\n\n def __init__(self, char=None):\n self.char = char\n self.children = []\n self.end = False\n\n\n<mask token>\n\n\ndef insert(s, curr):\n if curr.children and curr.children[0].char == s[0]:\n curr = curr.children[0]\n elif len(curr.children) > 1 and curr.children[1].char == s[0]:\n curr = curr.children[1]\n else:\n new_node = Node(s[0])\n curr.children.append(new_node)\n curr = new_node\n if len(s) > 1:\n s = s[1:]\n insert(s, curr)\n else:\n curr.end = True\n\n\ndef search(sequence):\n tmp_node = root\n found = False\n for letter in sequence:\n common = False\n for child in tmp_node.children:\n if child.char == letter:\n tmp_node = child\n common = True\n break\n if not common:\n return found\n if tmp_node.end:\n found = True\n return found\n\n\n<mask token>\n",
"step-3": "class Node:\n\n def __init__(self, char=None):\n self.char = char\n self.children = []\n self.end = False\n\n\n<mask token>\n\n\ndef insert(s, curr):\n if curr.children and curr.children[0].char == s[0]:\n curr = curr.children[0]\n elif len(curr.children) > 1 and curr.children[1].char == s[0]:\n curr = curr.children[1]\n else:\n new_node = Node(s[0])\n curr.children.append(new_node)\n curr = new_node\n if len(s) > 1:\n s = s[1:]\n insert(s, curr)\n else:\n curr.end = True\n\n\ndef search(sequence):\n tmp_node = root\n found = False\n for letter in sequence:\n common = False\n for child in tmp_node.children:\n if child.char == letter:\n tmp_node = child\n common = True\n break\n if not common:\n return found\n if tmp_node.end:\n found = True\n return found\n\n\nprint(\n \"\"\"Type any number of sequences containing only 2 types\nof characters 'a' and 'b' to fill the database (ended by blank entry).\"\"\"\n )\n<mask token>\nwhile True:\n seq = input('Sequence: ')\n if seq == '':\n break\n sequences.append(seq)\n<mask token>\nfor seq in sequences:\n insert(seq, root)\nprint('Select 2 sequences from the database.')\n<mask token>\nif search(seq1) and search(seq2):\n for i in range(min(len(seq1), len(seq2))):\n if seq1[i] == seq2[i]:\n node_no += 1\n letter = seq1[i]\n else:\n break\n print('Last common node is -', letter, '- with node no.', node_no)\nelse:\n print('One or both the sequences not found in the database.')\n",
"step-4": "class Node:\n\n def __init__(self, char=None):\n self.char = char\n self.children = []\n self.end = False\n\n\nroot = Node('*')\ncurr = root\n\n\ndef insert(s, curr):\n if curr.children and curr.children[0].char == s[0]:\n curr = curr.children[0]\n elif len(curr.children) > 1 and curr.children[1].char == s[0]:\n curr = curr.children[1]\n else:\n new_node = Node(s[0])\n curr.children.append(new_node)\n curr = new_node\n if len(s) > 1:\n s = s[1:]\n insert(s, curr)\n else:\n curr.end = True\n\n\ndef search(sequence):\n tmp_node = root\n found = False\n for letter in sequence:\n common = False\n for child in tmp_node.children:\n if child.char == letter:\n tmp_node = child\n common = True\n break\n if not common:\n return found\n if tmp_node.end:\n found = True\n return found\n\n\nprint(\n \"\"\"Type any number of sequences containing only 2 types\nof characters 'a' and 'b' to fill the database (ended by blank entry).\"\"\"\n )\nsequences = []\nwhile True:\n seq = input('Sequence: ')\n if seq == '':\n break\n sequences.append(seq)\nnode_no = 0\nletter = 'none'\nfor seq in sequences:\n insert(seq, root)\nprint('Select 2 sequences from the database.')\nseq1 = input('Sequence 1: ')\nseq2 = input('Sequence 2: ')\nif search(seq1) and search(seq2):\n for i in range(min(len(seq1), len(seq2))):\n if seq1[i] == seq2[i]:\n node_no += 1\n letter = seq1[i]\n else:\n break\n print('Last common node is -', letter, '- with node no.', node_no)\nelse:\n print('One or both the sequences not found in the database.')\n",
"step-5": "class Node:\n def __init__(self, char = None):\n self.char = char\n self.children = []\n self.end = False\n\nroot = Node('*')\ncurr = root\n\n# recursive insert into the trie\ndef insert(s, curr):\n if curr.children and curr.children[0].char == s[0]:\n curr = curr.children[0]\n elif len(curr.children) > 1 and curr.children[1].char == s[0]:\n curr = curr.children[1] \n else:\n new_node = Node(s[0])\n curr.children.append(new_node)\n curr = new_node\n\n if len(s) > 1:\n s = s[1:]\n insert(s, curr)\n else:\n curr.end = True\n \n# search for a string in the trie\ndef search(sequence):\n tmp_node = root\n found = False\n for letter in sequence:\n common = False\n for child in tmp_node.children:\n if child.char == letter:\n tmp_node = child\n common = True\n break\n if not common:\n return found\n if tmp_node.end:\n found = True\n return found\n\n# user input\nprint('''Type any number of sequences containing only 2 types\nof characters 'a' and 'b' to fill the database (ended by blank entry).''')\n\nsequences = []\n\nwhile True:\n seq = input(\"Sequence: \")\n if seq == '':\n break\n sequences.append(seq)\n\nnode_no = 0\nletter = 'none'\n\n# loads strings into the trie\nfor seq in sequences:\n insert(seq, root)\n\nprint(\"Select 2 sequences from the database.\")\n\n# takes 2 strings from user to compare\nseq1 = input(\"Sequence 1: \")\nseq2 = input(\"Sequence 2: \")\n\nif search(seq1) and search(seq2):\n for i in range(min(len(seq1), len(seq2))):\n if seq1[i] == seq2[i]:\n node_no += 1\n letter = seq1[i]\n else:\n break\n print(\"Last common node is -\", letter, \"- with node no.\", node_no)\n \nelse:\n print(\"One or both the sequences not found in the database.\")\n",
"step-ids": [
3,
4,
5,
6,
7
]
}
|
[
3,
4,
5,
6,
7
] |
# Generated by Django 3.1.6 on 2021-02-27 23:29
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('RMS', '0001_initial'),
]
operations = [
migrations.RenameField(
model_name='inventorytable',
old_name='Restaurant_ID',
new_name='Restaurant',
),
migrations.RenameField(
model_name='menuitemstable',
old_name='Restaurant_ID',
new_name='Restaurant',
),
migrations.RenameField(
model_name='reciperequirementstable',
old_name='Ingredient_ID',
new_name='Ingredient',
),
migrations.RenameField(
model_name='reciperequirementstable',
old_name='Item_ID',
new_name='Item',
),
migrations.RenameField(
model_name='reciperequirementstable',
old_name='Restaurant_ID',
new_name='Restaurant',
),
migrations.RenameField(
model_name='seatmanagementtable',
old_name='Restaurant_ID',
new_name='Restaurant',
),
]
|
normal
|
{
"blob_id": "ba336094d38a47457198919ce60969144a8fdedb",
"index": 5374,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Migration(migrations.Migration):\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Migration(migrations.Migration):\n dependencies = [('RMS', '0001_initial')]\n operations = [migrations.RenameField(model_name='inventorytable',\n old_name='Restaurant_ID', new_name='Restaurant'), migrations.\n RenameField(model_name='menuitemstable', old_name='Restaurant_ID',\n new_name='Restaurant'), migrations.RenameField(model_name=\n 'reciperequirementstable', old_name='Ingredient_ID', new_name=\n 'Ingredient'), migrations.RenameField(model_name=\n 'reciperequirementstable', old_name='Item_ID', new_name='Item'),\n migrations.RenameField(model_name='reciperequirementstable',\n old_name='Restaurant_ID', new_name='Restaurant'), migrations.\n RenameField(model_name='seatmanagementtable', old_name=\n 'Restaurant_ID', new_name='Restaurant')]\n",
"step-4": "from django.db import migrations\n\n\nclass Migration(migrations.Migration):\n dependencies = [('RMS', '0001_initial')]\n operations = [migrations.RenameField(model_name='inventorytable',\n old_name='Restaurant_ID', new_name='Restaurant'), migrations.\n RenameField(model_name='menuitemstable', old_name='Restaurant_ID',\n new_name='Restaurant'), migrations.RenameField(model_name=\n 'reciperequirementstable', old_name='Ingredient_ID', new_name=\n 'Ingredient'), migrations.RenameField(model_name=\n 'reciperequirementstable', old_name='Item_ID', new_name='Item'),\n migrations.RenameField(model_name='reciperequirementstable',\n old_name='Restaurant_ID', new_name='Restaurant'), migrations.\n RenameField(model_name='seatmanagementtable', old_name=\n 'Restaurant_ID', new_name='Restaurant')]\n",
"step-5": "# Generated by Django 3.1.6 on 2021-02-27 23:29\n\nfrom django.db import migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('RMS', '0001_initial'),\n ]\n\n operations = [\n migrations.RenameField(\n model_name='inventorytable',\n old_name='Restaurant_ID',\n new_name='Restaurant',\n ),\n migrations.RenameField(\n model_name='menuitemstable',\n old_name='Restaurant_ID',\n new_name='Restaurant',\n ),\n migrations.RenameField(\n model_name='reciperequirementstable',\n old_name='Ingredient_ID',\n new_name='Ingredient',\n ),\n migrations.RenameField(\n model_name='reciperequirementstable',\n old_name='Item_ID',\n new_name='Item',\n ),\n migrations.RenameField(\n model_name='reciperequirementstable',\n old_name='Restaurant_ID',\n new_name='Restaurant',\n ),\n migrations.RenameField(\n model_name='seatmanagementtable',\n old_name='Restaurant_ID',\n new_name='Restaurant',\n ),\n ]\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
import requests
import os
from dotenv import load_dotenv
from datetime import datetime
load_dotenv(".env") # loads the environment file
USERNAME = os.getenv("USER")
TOKEN = os.getenv("TOKEN")
pixela_endpoint = "https://pixe.la/v1/users"
# MAKING AN ACCOUNT
user_params = {
"token": TOKEN,
"username": USERNAME,
"agreeTermsOfService": "yes",
"notMinor": "yes",
}
# response = requests.post(url=pixela_endpoint, json=user_params) # sends the user_params as json
# print(response.text) # gives the response as a piece of text
# CREATING A GRAPH
graph_endpoint = f"{pixela_endpoint}/{USERNAME}/graphs" # endpoint for the graph creation
graph_config = {
"id": "graph1",
"name": "Reading Graph",
"unit": "hours",
"type": "int",
"color": "shibafu"
}
headers = {
"X-USER-TOKEN": TOKEN
}
# response = requests.post(url=graph_endpoint, json=graph_config, headers=headers) These lines were use to create graph
# print(response.text)
# POST A PIXEL
post_pixel_endpoint = f"{pixela_endpoint}/{USERNAME}/graphs/graph1"
# today = datetime(year=2020, month=12, day=25) custom date
today = datetime.now()
formatted_date = today.strftime("%Y%m%d")
pixel_config = {
"date": today.strftime("%Y%m%d"),
"quantity": input("How many hours did you spend reading today? "),
}
response = requests.post(url=post_pixel_endpoint, headers=headers, json=pixel_config) # post a new pixel
print(response.text)
# UPDATING A PIXEL
update_endpoint = f"{pixela_endpoint}/{USERNAME}/graphs/graph1/{formatted_date}"
updated_pixel = {
"quantity": "3"
}
# response = requests.put(url=update_endpoint, headers=headers, json=updated_pixel)
# print(response.text)
# DELETING A PIXEL
# delete_endpoint = f"{pixela_endpoint}/{USERNAME}/graphs/graph1/{formatted_date}"
# response = requests.delete(url=delete_endpoint,headers=headers)
|
normal
|
{
"blob_id": "ba34dfcad0cb9bac9c462bdf60e55dee6ba9d58d",
"index": 9255,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nload_dotenv('.env')\n<mask token>\nprint(response.text)\n<mask token>\n",
"step-3": "<mask token>\nload_dotenv('.env')\nUSERNAME = os.getenv('USER')\nTOKEN = os.getenv('TOKEN')\npixela_endpoint = 'https://pixe.la/v1/users'\nuser_params = {'token': TOKEN, 'username': USERNAME, 'agreeTermsOfService':\n 'yes', 'notMinor': 'yes'}\ngraph_endpoint = f'{pixela_endpoint}/{USERNAME}/graphs'\ngraph_config = {'id': 'graph1', 'name': 'Reading Graph', 'unit': 'hours',\n 'type': 'int', 'color': 'shibafu'}\nheaders = {'X-USER-TOKEN': TOKEN}\npost_pixel_endpoint = f'{pixela_endpoint}/{USERNAME}/graphs/graph1'\ntoday = datetime.now()\nformatted_date = today.strftime('%Y%m%d')\npixel_config = {'date': today.strftime('%Y%m%d'), 'quantity': input(\n 'How many hours did you spend reading today? ')}\nresponse = requests.post(url=post_pixel_endpoint, headers=headers, json=\n pixel_config)\nprint(response.text)\nupdate_endpoint = (\n f'{pixela_endpoint}/{USERNAME}/graphs/graph1/{formatted_date}')\nupdated_pixel = {'quantity': '3'}\n",
"step-4": "import requests\nimport os\nfrom dotenv import load_dotenv\nfrom datetime import datetime\nload_dotenv('.env')\nUSERNAME = os.getenv('USER')\nTOKEN = os.getenv('TOKEN')\npixela_endpoint = 'https://pixe.la/v1/users'\nuser_params = {'token': TOKEN, 'username': USERNAME, 'agreeTermsOfService':\n 'yes', 'notMinor': 'yes'}\ngraph_endpoint = f'{pixela_endpoint}/{USERNAME}/graphs'\ngraph_config = {'id': 'graph1', 'name': 'Reading Graph', 'unit': 'hours',\n 'type': 'int', 'color': 'shibafu'}\nheaders = {'X-USER-TOKEN': TOKEN}\npost_pixel_endpoint = f'{pixela_endpoint}/{USERNAME}/graphs/graph1'\ntoday = datetime.now()\nformatted_date = today.strftime('%Y%m%d')\npixel_config = {'date': today.strftime('%Y%m%d'), 'quantity': input(\n 'How many hours did you spend reading today? ')}\nresponse = requests.post(url=post_pixel_endpoint, headers=headers, json=\n pixel_config)\nprint(response.text)\nupdate_endpoint = (\n f'{pixela_endpoint}/{USERNAME}/graphs/graph1/{formatted_date}')\nupdated_pixel = {'quantity': '3'}\n",
"step-5": "import requests\r\nimport os\r\nfrom dotenv import load_dotenv\r\nfrom datetime import datetime\r\n\r\nload_dotenv(\".env\") # loads the environment file\r\n\r\n\r\nUSERNAME = os.getenv(\"USER\")\r\nTOKEN = os.getenv(\"TOKEN\")\r\npixela_endpoint = \"https://pixe.la/v1/users\"\r\n\r\n\r\n\r\n# MAKING AN ACCOUNT\r\nuser_params = {\r\n \"token\": TOKEN,\r\n \"username\": USERNAME,\r\n \"agreeTermsOfService\": \"yes\",\r\n \"notMinor\": \"yes\",\r\n\r\n}\r\n\r\n# response = requests.post(url=pixela_endpoint, json=user_params) # sends the user_params as json\r\n# print(response.text) # gives the response as a piece of text\r\n\r\n\r\n# CREATING A GRAPH\r\ngraph_endpoint = f\"{pixela_endpoint}/{USERNAME}/graphs\" # endpoint for the graph creation\r\n\r\ngraph_config = {\r\n \"id\": \"graph1\",\r\n \"name\": \"Reading Graph\",\r\n \"unit\": \"hours\",\r\n \"type\": \"int\",\r\n \"color\": \"shibafu\"\r\n\r\n}\r\n\r\nheaders = {\r\n \"X-USER-TOKEN\": TOKEN\r\n}\r\n\r\n# response = requests.post(url=graph_endpoint, json=graph_config, headers=headers) These lines were use to create graph\r\n# print(response.text)\r\n\r\n\r\n# POST A PIXEL\r\npost_pixel_endpoint = f\"{pixela_endpoint}/{USERNAME}/graphs/graph1\"\r\n\r\n\r\n# today = datetime(year=2020, month=12, day=25) custom date\r\ntoday = datetime.now()\r\nformatted_date = today.strftime(\"%Y%m%d\")\r\npixel_config = {\r\n \"date\": today.strftime(\"%Y%m%d\"),\r\n \"quantity\": input(\"How many hours did you spend reading today? \"),\r\n\r\n}\r\n\r\nresponse = requests.post(url=post_pixel_endpoint, headers=headers, json=pixel_config) # post a new pixel\r\nprint(response.text)\r\n\r\n\r\n# UPDATING A PIXEL\r\n\r\nupdate_endpoint = f\"{pixela_endpoint}/{USERNAME}/graphs/graph1/{formatted_date}\"\r\nupdated_pixel = {\r\n \"quantity\": \"3\"\r\n}\r\n\r\n# response = requests.put(url=update_endpoint, headers=headers, json=updated_pixel)\r\n# print(response.text)\r\n\r\n\r\n# DELETING A PIXEL\r\n\r\n# delete_endpoint = f\"{pixela_endpoint}/{USERNAME}/graphs/graph1/{formatted_date}\"\r\n# response = requests.delete(url=delete_endpoint,headers=headers)\r\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
print('yourname is: ', age, 'and your are', 'years old')
<|reserved_special_token_1|>
<|reserved_special_token_0|>
myName = 'Christian D. Goyes'
myDate = 1998
year = 2020
age = year - myDate
print('yourname is: ', age, 'and your are', 'years old')
<|reserved_special_token_1|>
#Developer: Chritian D. Goyes
'''
this script show your name and your age.
'''
myName = 'Christian D. Goyes'
myDate = 1998
year = 2020
age = year - myDate
print ("yourname is: ", age, "and your are", "years old")
|
flexible
|
{
"blob_id": "f5331b56abea41873bd3936028471d0da1c58236",
"index": 4986,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint('yourname is: ', age, 'and your are', 'years old')\n",
"step-3": "<mask token>\nmyName = 'Christian D. Goyes'\nmyDate = 1998\nyear = 2020\nage = year - myDate\nprint('yourname is: ', age, 'and your are', 'years old')\n",
"step-4": "#Developer: Chritian D. Goyes \n'''\nthis script show your name and your age.\n'''\n\nmyName = 'Christian D. Goyes'\nmyDate = 1998\nyear = 2020\n\nage = year - myDate\n\nprint (\"yourname is: \", age, \"and your are\", \"years old\")",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
from sqlalchemy.orm import sessionmaker
from IMDB.spiders.models import IMDB_DATABASE, db_connect, create_table
class ScrapySpiderPipeline(object):
# Bu Fonksiyon Veritabanı bağlantısını ve oturum oluşturucuyu başlatır ve bir İlişkisel Veritabanı tablosu oluşturur.
def __init__(self):
engine = db_connect()
create_table(engine)
self.Session = sessionmaker(bind=engine)
# Bu Fonksiyon Spiderdan Gelen Dataları Models.py Dosyasındaki Model Şablonuna Göre İşleme Sokarak Verileri Database İçine Kaydeder
def process_item(self, item, spider):
session = self.Session()
ım_db = IMDB_DATABASE()
ım_db.MOVIE_CODE = item["MOVIE_CODE"]
ım_db.MOVIE_NAME = item["MOVIE_NAME"]
ım_db.YEAR = item["YEAR"]
ım_db.RANK = item["RANK"]
ım_db.IMDB_RATING = item["IMDB_RATING"]
# Buradaki Try Except istisna blokları datalar kaydedilirken varsa oluşan hataları ayıklayarak bizlere mesaj olarak döner
try:
session.add(ım_db)
session.commit()
except:
session.rollback()
raise
finally:
session.close()
return item
|
normal
|
{
"blob_id": "16074fc1824a99b6fd1c4bf113d5b752308e8803",
"index": 5198,
"step-1": "<mask token>\n\n\nclass ScrapySpiderPipeline(object):\n <mask token>\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass ScrapySpiderPipeline(object):\n\n def __init__(self):\n engine = db_connect()\n create_table(engine)\n self.Session = sessionmaker(bind=engine)\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass ScrapySpiderPipeline(object):\n\n def __init__(self):\n engine = db_connect()\n create_table(engine)\n self.Session = sessionmaker(bind=engine)\n\n def process_item(self, item, spider):\n session = self.Session()\n ım_db = IMDB_DATABASE()\n ım_db.MOVIE_CODE = item['MOVIE_CODE']\n ım_db.MOVIE_NAME = item['MOVIE_NAME']\n ım_db.YEAR = item['YEAR']\n ım_db.RANK = item['RANK']\n ım_db.IMDB_RATING = item['IMDB_RATING']\n try:\n session.add(ım_db)\n session.commit()\n except:\n session.rollback()\n raise\n finally:\n session.close()\n return item\n",
"step-4": "from sqlalchemy.orm import sessionmaker\nfrom IMDB.spiders.models import IMDB_DATABASE, db_connect, create_table\n\n\nclass ScrapySpiderPipeline(object):\n\n def __init__(self):\n engine = db_connect()\n create_table(engine)\n self.Session = sessionmaker(bind=engine)\n\n def process_item(self, item, spider):\n session = self.Session()\n ım_db = IMDB_DATABASE()\n ım_db.MOVIE_CODE = item['MOVIE_CODE']\n ım_db.MOVIE_NAME = item['MOVIE_NAME']\n ım_db.YEAR = item['YEAR']\n ım_db.RANK = item['RANK']\n ım_db.IMDB_RATING = item['IMDB_RATING']\n try:\n session.add(ım_db)\n session.commit()\n except:\n session.rollback()\n raise\n finally:\n session.close()\n return item\n",
"step-5": "from sqlalchemy.orm import sessionmaker\nfrom IMDB.spiders.models import IMDB_DATABASE, db_connect, create_table\n\n\nclass ScrapySpiderPipeline(object):\n \n # Bu Fonksiyon Veritabanı bağlantısını ve oturum oluşturucuyu başlatır ve bir İlişkisel Veritabanı tablosu oluşturur.\n def __init__(self):\n \n engine = db_connect()\n create_table(engine)\n \n self.Session = sessionmaker(bind=engine)\n\n # Bu Fonksiyon Spiderdan Gelen Dataları Models.py Dosyasındaki Model Şablonuna Göre İşleme Sokarak Verileri Database İçine Kaydeder\n def process_item(self, item, spider):\n\n session = self.Session()\n \n ım_db = IMDB_DATABASE()\n \n ım_db.MOVIE_CODE = item[\"MOVIE_CODE\"]\n \n ım_db.MOVIE_NAME = item[\"MOVIE_NAME\"]\n\n ım_db.YEAR = item[\"YEAR\"]\n\n ım_db.RANK = item[\"RANK\"]\n\n ım_db.IMDB_RATING = item[\"IMDB_RATING\"]\n\n\n\n # Buradaki Try Except istisna blokları datalar kaydedilirken varsa oluşan hataları ayıklayarak bizlere mesaj olarak döner\n try:\n session.add(ım_db)\n session.commit()\n \n except:\n session.rollback()\n raise\n \n finally:\n session.close()\n\n return item\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
# -*- coding: utf-8 -*-
from rest_framework.views import APIView
from ..Models.ConnectToDBModel import *
from ..Models.RegionInfoModel import *
from .CommonView import *
def get_one_spot(region):
comments_data = get_comment_data();
data = {};
data['id'] = region.id;
data['name'] = region.name;
data['address'] = region.address;
data['lng'] = region.lng;
data['lat'] = region.lat;
spot_comment_data = comments_data[(comments_data['search_key'] == str(region.search_key))]
data['commentNumber'] = spot_comment_data.iloc[:, 0].size;
data['commentScore'] = get_score(spot_comment_data['comment_score'].mean());
return data;
def get_spot_list(request):
#进行解码token
# username = decodeToken(request);
# print(username);
res = {};
try:
list = [get_one_spot(region) for region in regioninfo.objects];
# 返回所有的文档对象列表
res['list'] = list;
return json_response(res);
except Exception:
return json_error(error_string='查询发生错误',code = 12,api = "spotlist");
class SpotListView(APIView):
def get(self, request, *args, **kwargs):
try:
return get_spot_list(request);
except KeyError:
return json_error(error_string="请求错误", code=500);
|
normal
|
{
"blob_id": "0b0b22043dda94ea57344fb3bf47255ad85c7f5b",
"index": 1408,
"step-1": "<mask token>\n\n\nclass SpotListView(APIView):\n <mask token>\n",
"step-2": "<mask token>\n\n\ndef get_one_spot(region):\n comments_data = get_comment_data()\n data = {}\n data['id'] = region.id\n data['name'] = region.name\n data['address'] = region.address\n data['lng'] = region.lng\n data['lat'] = region.lat\n spot_comment_data = comments_data[comments_data['search_key'] == str(\n region.search_key)]\n data['commentNumber'] = spot_comment_data.iloc[:, 0].size\n data['commentScore'] = get_score(spot_comment_data['comment_score'].mean())\n return data\n\n\n<mask token>\n\n\nclass SpotListView(APIView):\n\n def get(self, request, *args, **kwargs):\n try:\n return get_spot_list(request)\n except KeyError:\n return json_error(error_string='请求错误', code=500)\n",
"step-3": "<mask token>\n\n\ndef get_one_spot(region):\n comments_data = get_comment_data()\n data = {}\n data['id'] = region.id\n data['name'] = region.name\n data['address'] = region.address\n data['lng'] = region.lng\n data['lat'] = region.lat\n spot_comment_data = comments_data[comments_data['search_key'] == str(\n region.search_key)]\n data['commentNumber'] = spot_comment_data.iloc[:, 0].size\n data['commentScore'] = get_score(spot_comment_data['comment_score'].mean())\n return data\n\n\ndef get_spot_list(request):\n res = {}\n try:\n list = [get_one_spot(region) for region in regioninfo.objects]\n res['list'] = list\n return json_response(res)\n except Exception:\n return json_error(error_string='查询发生错误', code=12, api='spotlist')\n\n\nclass SpotListView(APIView):\n\n def get(self, request, *args, **kwargs):\n try:\n return get_spot_list(request)\n except KeyError:\n return json_error(error_string='请求错误', code=500)\n",
"step-4": "from rest_framework.views import APIView\nfrom ..Models.ConnectToDBModel import *\nfrom ..Models.RegionInfoModel import *\nfrom .CommonView import *\n\n\ndef get_one_spot(region):\n comments_data = get_comment_data()\n data = {}\n data['id'] = region.id\n data['name'] = region.name\n data['address'] = region.address\n data['lng'] = region.lng\n data['lat'] = region.lat\n spot_comment_data = comments_data[comments_data['search_key'] == str(\n region.search_key)]\n data['commentNumber'] = spot_comment_data.iloc[:, 0].size\n data['commentScore'] = get_score(spot_comment_data['comment_score'].mean())\n return data\n\n\ndef get_spot_list(request):\n res = {}\n try:\n list = [get_one_spot(region) for region in regioninfo.objects]\n res['list'] = list\n return json_response(res)\n except Exception:\n return json_error(error_string='查询发生错误', code=12, api='spotlist')\n\n\nclass SpotListView(APIView):\n\n def get(self, request, *args, **kwargs):\n try:\n return get_spot_list(request)\n except KeyError:\n return json_error(error_string='请求错误', code=500)\n",
"step-5": "# -*- coding: utf-8 -*-\nfrom rest_framework.views import APIView\nfrom ..Models.ConnectToDBModel import *\nfrom ..Models.RegionInfoModel import *\nfrom .CommonView import *\n\n\n\ndef get_one_spot(region):\n\n comments_data = get_comment_data();\n\n data = {};\n data['id'] = region.id;\n data['name'] = region.name;\n data['address'] = region.address;\n data['lng'] = region.lng;\n data['lat'] = region.lat;\n spot_comment_data = comments_data[(comments_data['search_key'] == str(region.search_key))]\n data['commentNumber'] = spot_comment_data.iloc[:, 0].size;\n data['commentScore'] = get_score(spot_comment_data['comment_score'].mean());\n return data;\ndef get_spot_list(request):\n #进行解码token\n # username = decodeToken(request);\n # print(username);\n res = {};\n try:\n\n list = [get_one_spot(region) for region in regioninfo.objects];\n # 返回所有的文档对象列表\n res['list'] = list;\n return json_response(res);\n except Exception:\n return json_error(error_string='查询发生错误',code = 12,api = \"spotlist\");\n\n\n\nclass SpotListView(APIView):\n\n def get(self, request, *args, **kwargs):\n try:\n\n return get_spot_list(request);\n except KeyError:\n return json_error(error_string=\"请求错误\", code=500);\n",
"step-ids": [
1,
3,
4,
5,
6
]
}
|
[
1,
3,
4,
5,
6
] |
# -*- coding: utf-8 -*-
# Scrapy settings for reddit_scraper project
#
# For simplicity, this file contains only the most important settings by
# default. All the other settings are documented here:
#
# http://doc.scrapy.org/en/latest/topics/settings.html
#
BOT_NAME = 'reddit_scraper'
SPIDER_MODULES = ['reddit_scraper.spiders']
NEWSPIDER_MODULE = 'reddit_scraper.spiders'
# Crawl responsibly by identifying yourself (and your website) on the user-agent
#USER_AGENT = 'reddit_scraper (+http://www.yourdomain.com)'
|
normal
|
{
"blob_id": "a352768c2928cb7a33b9f1a31a0b3d8e56a8376a",
"index": 5588,
"step-1": "<mask token>\n",
"step-2": "BOT_NAME = 'reddit_scraper'\nSPIDER_MODULES = ['reddit_scraper.spiders']\nNEWSPIDER_MODULE = 'reddit_scraper.spiders'\n",
"step-3": "# -*- coding: utf-8 -*-\n\n# Scrapy settings for reddit_scraper project\n#\n# For simplicity, this file contains only the most important settings by\n# default. All the other settings are documented here:\n#\n# http://doc.scrapy.org/en/latest/topics/settings.html\n#\n\nBOT_NAME = 'reddit_scraper'\n\nSPIDER_MODULES = ['reddit_scraper.spiders']\nNEWSPIDER_MODULE = 'reddit_scraper.spiders'\n\n\n# Crawl responsibly by identifying yourself (and your website) on the user-agent\n#USER_AGENT = 'reddit_scraper (+http://www.yourdomain.com)'\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Migration(migrations.Migration):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Migration(migrations.Migration):
dependencies = [migrations.swappable_dependency(settings.
AUTH_USER_MODEL), ('Assemblage', '0002_auto_20161014_1710')]
operations = [migrations.RemoveField(model_name='hotelingroup', name=
'negative_votes'), migrations.RemoveField(model_name='hotelingroup',
name='positive_votes'), migrations.RemoveField(model_name=
'hotelingroup', name='voters'), migrations.AddField(model_name=
'hotelingroup', name='negative_voters', field=models.
ManyToManyField(related_name='hotelingroup_negative_voters', to=
settings.AUTH_USER_MODEL)), migrations.AddField(model_name=
'hotelingroup', name='positive_voters', field=models.
ManyToManyField(related_name='hotelingroup_positive_voters', to=
settings.AUTH_USER_MODEL))]
<|reserved_special_token_1|>
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [migrations.swappable_dependency(settings.
AUTH_USER_MODEL), ('Assemblage', '0002_auto_20161014_1710')]
operations = [migrations.RemoveField(model_name='hotelingroup', name=
'negative_votes'), migrations.RemoveField(model_name='hotelingroup',
name='positive_votes'), migrations.RemoveField(model_name=
'hotelingroup', name='voters'), migrations.AddField(model_name=
'hotelingroup', name='negative_voters', field=models.
ManyToManyField(related_name='hotelingroup_negative_voters', to=
settings.AUTH_USER_MODEL)), migrations.AddField(model_name=
'hotelingroup', name='positive_voters', field=models.
ManyToManyField(related_name='hotelingroup_positive_voters', to=
settings.AUTH_USER_MODEL))]
<|reserved_special_token_1|>
# -*- coding: utf-8 -*-
# Generated by Django 1.10.2 on 2016-10-14 19:37
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('Assemblage', '0002_auto_20161014_1710'),
]
operations = [
migrations.RemoveField(
model_name='hotelingroup',
name='negative_votes',
),
migrations.RemoveField(
model_name='hotelingroup',
name='positive_votes',
),
migrations.RemoveField(
model_name='hotelingroup',
name='voters',
),
migrations.AddField(
model_name='hotelingroup',
name='negative_voters',
field=models.ManyToManyField(related_name='hotelingroup_negative_voters', to=settings.AUTH_USER_MODEL),
),
migrations.AddField(
model_name='hotelingroup',
name='positive_voters',
field=models.ManyToManyField(related_name='hotelingroup_positive_voters', to=settings.AUTH_USER_MODEL),
),
]
|
flexible
|
{
"blob_id": "8c05259ce577e6b6a6efdf778832e9bb817e47fd",
"index": 1414,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Migration(migrations.Migration):\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Migration(migrations.Migration):\n dependencies = [migrations.swappable_dependency(settings.\n AUTH_USER_MODEL), ('Assemblage', '0002_auto_20161014_1710')]\n operations = [migrations.RemoveField(model_name='hotelingroup', name=\n 'negative_votes'), migrations.RemoveField(model_name='hotelingroup',\n name='positive_votes'), migrations.RemoveField(model_name=\n 'hotelingroup', name='voters'), migrations.AddField(model_name=\n 'hotelingroup', name='negative_voters', field=models.\n ManyToManyField(related_name='hotelingroup_negative_voters', to=\n settings.AUTH_USER_MODEL)), migrations.AddField(model_name=\n 'hotelingroup', name='positive_voters', field=models.\n ManyToManyField(related_name='hotelingroup_positive_voters', to=\n settings.AUTH_USER_MODEL))]\n",
"step-4": "from __future__ import unicode_literals\nfrom django.conf import settings\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n dependencies = [migrations.swappable_dependency(settings.\n AUTH_USER_MODEL), ('Assemblage', '0002_auto_20161014_1710')]\n operations = [migrations.RemoveField(model_name='hotelingroup', name=\n 'negative_votes'), migrations.RemoveField(model_name='hotelingroup',\n name='positive_votes'), migrations.RemoveField(model_name=\n 'hotelingroup', name='voters'), migrations.AddField(model_name=\n 'hotelingroup', name='negative_voters', field=models.\n ManyToManyField(related_name='hotelingroup_negative_voters', to=\n settings.AUTH_USER_MODEL)), migrations.AddField(model_name=\n 'hotelingroup', name='positive_voters', field=models.\n ManyToManyField(related_name='hotelingroup_positive_voters', to=\n settings.AUTH_USER_MODEL))]\n",
"step-5": "# -*- coding: utf-8 -*-\n# Generated by Django 1.10.2 on 2016-10-14 19:37\nfrom __future__ import unicode_literals\n\nfrom django.conf import settings\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n migrations.swappable_dependency(settings.AUTH_USER_MODEL),\n ('Assemblage', '0002_auto_20161014_1710'),\n ]\n\n operations = [\n migrations.RemoveField(\n model_name='hotelingroup',\n name='negative_votes',\n ),\n migrations.RemoveField(\n model_name='hotelingroup',\n name='positive_votes',\n ),\n migrations.RemoveField(\n model_name='hotelingroup',\n name='voters',\n ),\n migrations.AddField(\n model_name='hotelingroup',\n name='negative_voters',\n field=models.ManyToManyField(related_name='hotelingroup_negative_voters', to=settings.AUTH_USER_MODEL),\n ),\n migrations.AddField(\n model_name='hotelingroup',\n name='positive_voters',\n field=models.ManyToManyField(related_name='hotelingroup_positive_voters', to=settings.AUTH_USER_MODEL),\n ),\n ]\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
#!/usr/bin/env python
# made for comparing unfiltered and filtered scorefiles for Rosetta enzdes post analysis
import argparse
import collections
import re
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.backends.backend_pdf import PdfPages
def data_from_sc_file(axes, f, uf, true_max):
"""initializes two dictionaries and poplulates them based on -f and -u options"""
f_combo_dict = collections.defaultdict(list)
uf_combo_dict = collections.defaultdict(list)
max_x = -10000
max_y = -10000
min_x = 10000
min_y = 10000
for fileType in [uf, f]:
for i, item in enumerate(fileType):
with open(item) as f:
header = f.readline().split()
indices = [header.index(a) for a in axes]
for line in f:
line_list = line.split()
if (not line_list) or (line_list[0].startswith("#")) or (line_list[0][0].isalpha()):
continue
try:
desc_str = line_list[indices[-1]]
found_desc = re.search('A([0-9]+)_P([0-9]+)', desc_str).group()
except AttributeError:
continue
point_list = [line_list[i] for i in indices[:-1]]
point_tuple = tuple(map(float, point_list))
if point_tuple[0] > max_x:
max_x = point_tuple[0]
if point_tuple[0] < min_x:
min_x = point_tuple[0]
if point_tuple[1] > max_y:
max_y = point_tuple[1]
if point_tuple[1] < min_y:
min_y = point_tuple[1]
if not true_max:
if max_x > 0:
max_x = 0
if max_y > 0:
max_y = 0
if fileType == uf:
uf_combo_dict[found_desc].append(point_tuple)
else:
f_combo_dict[found_desc].append(point_tuple)
return uf_combo_dict, f_combo_dict, min_x, max_x, min_y, max_y
def gen_plots(uf_dict, f_dict, min_x, max_x, min_y, max_y, axes, name, histogram, total):
"""makes pdf of plots - one plot for each A[0-9]_P[0-9]"""
with PdfPages(name) as pdf:
total_xuf = []
total_yuf = []
total_xf = []
total_yf = []
for entry in uf_dict:
print 'Making plot for ' + entry
xuf, yuf = zip(*uf_dict[entry])
fig = plt.figure()
ax1 = fig.add_subplot(111)
ax1.scatter(xuf, yuf, c='#ad4851', marker='o', label='initial structures')
try:
xf, yf = zip(*f_dict[entry])
ax1.scatter(xf, yf, c='orange', marker='x', label='selected structures')
except ValueError:
xf = []
yf = []
plt.legend(loc='upper right')
plt.title(entry, fontsize=30)
plt.xlim(min_x, max_x)
plt.ylim(min_y, max_y)
plt.xlabel(axes[0], fontsize=20)
plt.ylabel(axes[1], fontsize=20)
pdf.savefig(fig)
plt.close()
if total:
total_xuf.extend(xuf)
total_yuf.extend(yuf)
total_xf.extend(xf)
total_yf.extend(yf)
if histogram:
bins = np.linspace(min_y, max_y, num=10)
plt.hist(yuf, bins, alpha=0.5, color='b', label='initial structures')
try:
plt.hist(yf, bins, alpha=0.5, color='orange', label='selected structures')
except ValueError:
pass
plt.legend(loc='upper right')
plt.title(entry, fontsize=30)
plt.xlabel(axes[1], fontsize=20)
plt.ylabel('Frequency', fontsize=20)
pdf.savefig()
plt.close()
if total:
print 'Making composite plot'
fig = plt.figure()
ax1 = fig.add_subplot(111)
ax1.scatter(total_xuf, total_yuf, c='#ad4851', marker='o', label='initial structures')
ax1.scatter(total_xf, total_yf, c='orange', marker='x', label='selected structures')
plt.legend(loc='upper right')
plt.title('Composite Plot', fontsize=30)
plt.xlim(min_x, max_x)
plt.ylim(min_y, max_y)
plt.xlabel(axes[0], fontsize=20)
plt.ylabel(axes[1], fontsize=20)
pdf.savefig(fig)
plt.close()
def main(x_axis, y_axis, filtered, unfiltered, name, histogram, total, true_max):
"""create axes variable and calls previous functions"""
axes = [x_axis, y_axis, 'description']
uf_dict, f_dict, min_x, max_x, min_y, max_y = data_from_sc_file(axes, filtered, unfiltered, true_max)
gen_plots(uf_dict, f_dict, min_x, max_x, min_y, max_y, axes, name, histogram, total)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Generates scatter plot of data from rosetta score files")
parser.add_argument("-x", "--xaxis",
help="criterion to be plotted on x-axis (default: total_score)",
default='total_score')
parser.add_argument("-y", "--yaxis",
help="criterion to be plotted on y-axis (default: SR_1_total_score)",
default='SR_1_total_score')
parser.add_argument("-n", "--name", default='postProcessPlot.pdf',
help='name of output pdf (default: postProcessPlot.pdf')
parser.add_argument("-b", "--histogram", action="store_true",
help="turn on histogram for y-axis parameter")
parser.add_argument("-c", "--composite", action="store_true",
help='make a composite plot that combines all subplots')
parser.add_argument("-t", "--true_max", action="store_true",
help='make plots with true maximum - will not cap max at 0')
requiredO = parser.add_argument_group('required arguments')
requiredO.add_argument("-s", "--selected", nargs='*', required=True,
help="one or more filtered score files from which data is pulled")
requiredO.add_argument("-i", "--initial", nargs='*', required=True,
help="one or more unfiltered score files from which data is pulled")
args = parser.parse_args()
main(args.xaxis, args.yaxis, args.selected, args.initial, args.name, args.histogram, args.composite, args.true_max)
|
normal
|
{
"blob_id": "17b0baef5e366d70ea393259df1965e75b7d12e1",
"index": 5789,
"step-1": "#!/usr/bin/env python\n\n# made for comparing unfiltered and filtered scorefiles for Rosetta enzdes post analysis\n\nimport argparse\nimport collections\nimport re\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom matplotlib.backends.backend_pdf import PdfPages \n\n\ndef data_from_sc_file(axes, f, uf, true_max):\n \"\"\"initializes two dictionaries and poplulates them based on -f and -u options\"\"\"\n f_combo_dict = collections.defaultdict(list)\n uf_combo_dict = collections.defaultdict(list)\n max_x = -10000\n max_y = -10000\n min_x = 10000\n min_y = 10000\n\n for fileType in [uf, f]:\n for i, item in enumerate(fileType):\n with open(item) as f:\n header = f.readline().split()\n indices = [header.index(a) for a in axes]\n \n for line in f:\n line_list = line.split()\n if (not line_list) or (line_list[0].startswith(\"#\")) or (line_list[0][0].isalpha()):\n continue\n try:\n desc_str = line_list[indices[-1]]\n found_desc = re.search('A([0-9]+)_P([0-9]+)', desc_str).group()\n except AttributeError:\n continue\n \n point_list = [line_list[i] for i in indices[:-1]]\n point_tuple = tuple(map(float, point_list))\n if point_tuple[0] > max_x:\n max_x = point_tuple[0]\n if point_tuple[0] < min_x:\n min_x = point_tuple[0]\n if point_tuple[1] > max_y:\n max_y = point_tuple[1]\n if point_tuple[1] < min_y:\n min_y = point_tuple[1]\n\n if not true_max:\n if max_x > 0:\n max_x = 0\n if max_y > 0:\n max_y = 0\n \n if fileType == uf:\n uf_combo_dict[found_desc].append(point_tuple)\n else:\n f_combo_dict[found_desc].append(point_tuple)\n return uf_combo_dict, f_combo_dict, min_x, max_x, min_y, max_y\n\n\ndef gen_plots(uf_dict, f_dict, min_x, max_x, min_y, max_y, axes, name, histogram, total):\n \"\"\"makes pdf of plots - one plot for each A[0-9]_P[0-9]\"\"\"\n with PdfPages(name) as pdf:\n total_xuf = []\n total_yuf = []\n total_xf = []\n total_yf = []\n for entry in uf_dict:\n print 'Making plot for ' + entry\n xuf, yuf = zip(*uf_dict[entry])\n fig = plt.figure()\n ax1 = fig.add_subplot(111)\n ax1.scatter(xuf, yuf, c='#ad4851', marker='o', label='initial structures')\n try:\n xf, yf = zip(*f_dict[entry])\n ax1.scatter(xf, yf, c='orange', marker='x', label='selected structures')\n except ValueError:\n xf = []\n yf = []\n plt.legend(loc='upper right')\n plt.title(entry, fontsize=30)\n plt.xlim(min_x, max_x)\n plt.ylim(min_y, max_y)\n plt.xlabel(axes[0], fontsize=20)\n plt.ylabel(axes[1], fontsize=20)\n pdf.savefig(fig)\n plt.close()\n\n if total:\n total_xuf.extend(xuf)\n total_yuf.extend(yuf)\n total_xf.extend(xf)\n total_yf.extend(yf)\n\n if histogram:\n bins = np.linspace(min_y, max_y, num=10)\n plt.hist(yuf, bins, alpha=0.5, color='b', label='initial structures')\n try:\n plt.hist(yf, bins, alpha=0.5, color='orange', label='selected structures')\n except ValueError:\n pass\n plt.legend(loc='upper right')\n plt.title(entry, fontsize=30)\n plt.xlabel(axes[1], fontsize=20)\n plt.ylabel('Frequency', fontsize=20)\n pdf.savefig()\n plt.close()\n\n if total:\n print 'Making composite plot'\n fig = plt.figure()\n ax1 = fig.add_subplot(111)\n ax1.scatter(total_xuf, total_yuf, c='#ad4851', marker='o', label='initial structures')\n ax1.scatter(total_xf, total_yf, c='orange', marker='x', label='selected structures')\n plt.legend(loc='upper right')\n plt.title('Composite Plot', fontsize=30)\n plt.xlim(min_x, max_x)\n plt.ylim(min_y, max_y)\n plt.xlabel(axes[0], fontsize=20)\n plt.ylabel(axes[1], fontsize=20)\n pdf.savefig(fig)\n plt.close()\n\n\ndef main(x_axis, y_axis, filtered, unfiltered, name, histogram, total, true_max):\n \"\"\"create axes variable and calls previous functions\"\"\"\n axes = [x_axis, y_axis, 'description']\n uf_dict, f_dict, min_x, max_x, min_y, max_y = data_from_sc_file(axes, filtered, unfiltered, true_max)\n gen_plots(uf_dict, f_dict, min_x, max_x, min_y, max_y, axes, name, histogram, total)\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(description=\"Generates scatter plot of data from rosetta score files\")\n parser.add_argument(\"-x\", \"--xaxis\",\n help=\"criterion to be plotted on x-axis (default: total_score)\",\n default='total_score')\n parser.add_argument(\"-y\", \"--yaxis\",\n help=\"criterion to be plotted on y-axis (default: SR_1_total_score)\",\n default='SR_1_total_score')\n parser.add_argument(\"-n\", \"--name\", default='postProcessPlot.pdf',\n help='name of output pdf (default: postProcessPlot.pdf')\n parser.add_argument(\"-b\", \"--histogram\", action=\"store_true\",\n help=\"turn on histogram for y-axis parameter\")\n parser.add_argument(\"-c\", \"--composite\", action=\"store_true\",\n help='make a composite plot that combines all subplots')\n parser.add_argument(\"-t\", \"--true_max\", action=\"store_true\",\n help='make plots with true maximum - will not cap max at 0')\n requiredO = parser.add_argument_group('required arguments')\n requiredO.add_argument(\"-s\", \"--selected\", nargs='*', required=True,\n help=\"one or more filtered score files from which data is pulled\")\n requiredO.add_argument(\"-i\", \"--initial\", nargs='*', required=True,\n help=\"one or more unfiltered score files from which data is pulled\")\n args = parser.parse_args()\n\n main(args.xaxis, args.yaxis, args.selected, args.initial, args.name, args.histogram, args.composite, args.true_max)\n\n \n\n\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
<|reserved_special_token_0|>
class ClassEnumerationHandler(RelativeHandlerInterface):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def process(self, target: Class):
"""
Process class receiver.
Steps:
1. Filter attrs not derived from xs:enumeration
2. Flatten attrs derived from xs:union of enumerations
3. Promote inner enumeration classes to root classes
"""
self.filter(target)
self.flatten(target)
self.promote(target)
@classmethod
def filter(cls, target: Class):
"""Filter attrs not derived from xs:enumeration if there are any
xs:enumeration attrs."""
enumerations = [attr for attr in target.attrs if attr.is_enumeration]
if enumerations:
target.attrs = enumerations
<|reserved_special_token_0|>
def promote(self, target: Class):
"""
Promote inner enumeration classes to root classes.
Steps:
1. Find inner enumerations
2. Clone and update their qualified name
3. Update attributes types
"""
for inner in list(target.inner):
if inner.is_enumeration:
target.inner.remove(inner)
clone = self.clone_enumeration(inner, target.name)
self.container.add(clone)
for attr in target.attrs:
self.update_types(attr, inner.qname, clone.qname)
@classmethod
def clone_enumeration(cls, inner: Class, name: str) ->Class:
clone = inner.clone()
clone.qname = build_qname(clone.target_namespace,
f'{name}_{clone.name}')
return clone
@classmethod
def update_types(cls, attr: Attr, search: str, replace: str):
for attr_type in attr.types:
if attr_type.qname == search and attr_type.forward:
attr_type.qname = replace
attr_type.forward = False
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class ClassEnumerationHandler(RelativeHandlerInterface):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def process(self, target: Class):
"""
Process class receiver.
Steps:
1. Filter attrs not derived from xs:enumeration
2. Flatten attrs derived from xs:union of enumerations
3. Promote inner enumeration classes to root classes
"""
self.filter(target)
self.flatten(target)
self.promote(target)
@classmethod
def filter(cls, target: Class):
"""Filter attrs not derived from xs:enumeration if there are any
xs:enumeration attrs."""
enumerations = [attr for attr in target.attrs if attr.is_enumeration]
if enumerations:
target.attrs = enumerations
def flatten(self, target: Class):
"""
Flatten attrs derived from xs:union of enumeration classes.
Find the enumeration classes and merge all of their members in
the target class.
"""
if len(target.attrs) != 1 or target.attrs[0].tag != Tag.UNION:
return
enums: List[Any] = []
for attr_type in target.attrs[0].types:
if attr_type.forward:
enums.extend(target.inner)
elif not attr_type.native:
enums.append(self.container.find(attr_type.qname))
else:
enums.append(None)
merge = all(isinstance(x, Class) and x.is_enumeration for x in enums)
if merge:
target.attrs.clear()
target.inner.clear()
target.attrs.extend(attr.clone() for enum in enums for attr in
enum.attrs)
def promote(self, target: Class):
"""
Promote inner enumeration classes to root classes.
Steps:
1. Find inner enumerations
2. Clone and update their qualified name
3. Update attributes types
"""
for inner in list(target.inner):
if inner.is_enumeration:
target.inner.remove(inner)
clone = self.clone_enumeration(inner, target.name)
self.container.add(clone)
for attr in target.attrs:
self.update_types(attr, inner.qname, clone.qname)
@classmethod
def clone_enumeration(cls, inner: Class, name: str) ->Class:
clone = inner.clone()
clone.qname = build_qname(clone.target_namespace,
f'{name}_{clone.name}')
return clone
@classmethod
def update_types(cls, attr: Attr, search: str, replace: str):
for attr_type in attr.types:
if attr_type.qname == search and attr_type.forward:
attr_type.qname = replace
attr_type.forward = False
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class ClassEnumerationHandler(RelativeHandlerInterface):
"""Enumeration class processor."""
__slots__ = ()
def process(self, target: Class):
"""
Process class receiver.
Steps:
1. Filter attrs not derived from xs:enumeration
2. Flatten attrs derived from xs:union of enumerations
3. Promote inner enumeration classes to root classes
"""
self.filter(target)
self.flatten(target)
self.promote(target)
@classmethod
def filter(cls, target: Class):
"""Filter attrs not derived from xs:enumeration if there are any
xs:enumeration attrs."""
enumerations = [attr for attr in target.attrs if attr.is_enumeration]
if enumerations:
target.attrs = enumerations
def flatten(self, target: Class):
"""
Flatten attrs derived from xs:union of enumeration classes.
Find the enumeration classes and merge all of their members in
the target class.
"""
if len(target.attrs) != 1 or target.attrs[0].tag != Tag.UNION:
return
enums: List[Any] = []
for attr_type in target.attrs[0].types:
if attr_type.forward:
enums.extend(target.inner)
elif not attr_type.native:
enums.append(self.container.find(attr_type.qname))
else:
enums.append(None)
merge = all(isinstance(x, Class) and x.is_enumeration for x in enums)
if merge:
target.attrs.clear()
target.inner.clear()
target.attrs.extend(attr.clone() for enum in enums for attr in
enum.attrs)
def promote(self, target: Class):
"""
Promote inner enumeration classes to root classes.
Steps:
1. Find inner enumerations
2. Clone and update their qualified name
3. Update attributes types
"""
for inner in list(target.inner):
if inner.is_enumeration:
target.inner.remove(inner)
clone = self.clone_enumeration(inner, target.name)
self.container.add(clone)
for attr in target.attrs:
self.update_types(attr, inner.qname, clone.qname)
@classmethod
def clone_enumeration(cls, inner: Class, name: str) ->Class:
clone = inner.clone()
clone.qname = build_qname(clone.target_namespace,
f'{name}_{clone.name}')
return clone
@classmethod
def update_types(cls, attr: Attr, search: str, replace: str):
for attr_type in attr.types:
if attr_type.qname == search and attr_type.forward:
attr_type.qname = replace
attr_type.forward = False
<|reserved_special_token_1|>
from typing import Any
from typing import List
from xsdata.codegen.mixins import RelativeHandlerInterface
from xsdata.codegen.models import Attr
from xsdata.codegen.models import Class
from xsdata.models.enums import Tag
from xsdata.utils.namespaces import build_qname
class ClassEnumerationHandler(RelativeHandlerInterface):
"""Enumeration class processor."""
__slots__ = ()
def process(self, target: Class):
"""
Process class receiver.
Steps:
1. Filter attrs not derived from xs:enumeration
2. Flatten attrs derived from xs:union of enumerations
3. Promote inner enumeration classes to root classes
"""
self.filter(target)
self.flatten(target)
self.promote(target)
@classmethod
def filter(cls, target: Class):
"""Filter attrs not derived from xs:enumeration if there are any
xs:enumeration attrs."""
enumerations = [attr for attr in target.attrs if attr.is_enumeration]
if enumerations:
target.attrs = enumerations
def flatten(self, target: Class):
"""
Flatten attrs derived from xs:union of enumeration classes.
Find the enumeration classes and merge all of their members in
the target class.
"""
if len(target.attrs) != 1 or target.attrs[0].tag != Tag.UNION:
return
enums: List[Any] = []
for attr_type in target.attrs[0].types:
if attr_type.forward:
enums.extend(target.inner)
elif not attr_type.native:
enums.append(self.container.find(attr_type.qname))
else:
enums.append(None)
merge = all(isinstance(x, Class) and x.is_enumeration for x in enums)
if merge:
target.attrs.clear()
target.inner.clear()
target.attrs.extend(attr.clone() for enum in enums for attr in
enum.attrs)
def promote(self, target: Class):
"""
Promote inner enumeration classes to root classes.
Steps:
1. Find inner enumerations
2. Clone and update their qualified name
3. Update attributes types
"""
for inner in list(target.inner):
if inner.is_enumeration:
target.inner.remove(inner)
clone = self.clone_enumeration(inner, target.name)
self.container.add(clone)
for attr in target.attrs:
self.update_types(attr, inner.qname, clone.qname)
@classmethod
def clone_enumeration(cls, inner: Class, name: str) ->Class:
clone = inner.clone()
clone.qname = build_qname(clone.target_namespace,
f'{name}_{clone.name}')
return clone
@classmethod
def update_types(cls, attr: Attr, search: str, replace: str):
for attr_type in attr.types:
if attr_type.qname == search and attr_type.forward:
attr_type.qname = replace
attr_type.forward = False
<|reserved_special_token_1|>
from typing import Any
from typing import List
from xsdata.codegen.mixins import RelativeHandlerInterface
from xsdata.codegen.models import Attr
from xsdata.codegen.models import Class
from xsdata.models.enums import Tag
from xsdata.utils.namespaces import build_qname
class ClassEnumerationHandler(RelativeHandlerInterface):
"""Enumeration class processor."""
__slots__ = ()
def process(self, target: Class):
"""
Process class receiver.
Steps:
1. Filter attrs not derived from xs:enumeration
2. Flatten attrs derived from xs:union of enumerations
3. Promote inner enumeration classes to root classes
"""
self.filter(target)
self.flatten(target)
self.promote(target)
@classmethod
def filter(cls, target: Class):
"""Filter attrs not derived from xs:enumeration if there are any
xs:enumeration attrs."""
enumerations = [attr for attr in target.attrs if attr.is_enumeration]
if enumerations:
target.attrs = enumerations
def flatten(self, target: Class):
"""
Flatten attrs derived from xs:union of enumeration classes.
Find the enumeration classes and merge all of their members in
the target class.
"""
if len(target.attrs) != 1 or target.attrs[0].tag != Tag.UNION:
return
enums: List[Any] = []
for attr_type in target.attrs[0].types:
if attr_type.forward:
enums.extend(target.inner)
elif not attr_type.native:
enums.append(self.container.find(attr_type.qname))
else:
enums.append(None)
merge = all(isinstance(x, Class) and x.is_enumeration for x in enums)
if merge:
target.attrs.clear()
target.inner.clear()
target.attrs.extend(attr.clone() for enum in enums for attr in enum.attrs)
def promote(self, target: Class):
"""
Promote inner enumeration classes to root classes.
Steps:
1. Find inner enumerations
2. Clone and update their qualified name
3. Update attributes types
"""
for inner in list(target.inner):
if inner.is_enumeration:
target.inner.remove(inner)
clone = self.clone_enumeration(inner, target.name)
self.container.add(clone)
for attr in target.attrs:
self.update_types(attr, inner.qname, clone.qname)
@classmethod
def clone_enumeration(cls, inner: Class, name: str) -> Class:
clone = inner.clone()
clone.qname = build_qname(clone.target_namespace, f"{name}_{clone.name}")
return clone
@classmethod
def update_types(cls, attr: Attr, search: str, replace: str):
for attr_type in attr.types:
if attr_type.qname == search and attr_type.forward:
attr_type.qname = replace
attr_type.forward = False
|
flexible
|
{
"blob_id": "4d9064add28302fe173a8b0a81ee7d187db8aead",
"index": 6029,
"step-1": "<mask token>\n\n\nclass ClassEnumerationHandler(RelativeHandlerInterface):\n <mask token>\n <mask token>\n\n def process(self, target: Class):\n \"\"\"\n Process class receiver.\n\n Steps:\n 1. Filter attrs not derived from xs:enumeration\n 2. Flatten attrs derived from xs:union of enumerations\n 3. Promote inner enumeration classes to root classes\n \"\"\"\n self.filter(target)\n self.flatten(target)\n self.promote(target)\n\n @classmethod\n def filter(cls, target: Class):\n \"\"\"Filter attrs not derived from xs:enumeration if there are any\n xs:enumeration attrs.\"\"\"\n enumerations = [attr for attr in target.attrs if attr.is_enumeration]\n if enumerations:\n target.attrs = enumerations\n <mask token>\n\n def promote(self, target: Class):\n \"\"\"\n Promote inner enumeration classes to root classes.\n\n Steps:\n 1. Find inner enumerations\n 2. Clone and update their qualified name\n 3. Update attributes types\n \"\"\"\n for inner in list(target.inner):\n if inner.is_enumeration:\n target.inner.remove(inner)\n clone = self.clone_enumeration(inner, target.name)\n self.container.add(clone)\n for attr in target.attrs:\n self.update_types(attr, inner.qname, clone.qname)\n\n @classmethod\n def clone_enumeration(cls, inner: Class, name: str) ->Class:\n clone = inner.clone()\n clone.qname = build_qname(clone.target_namespace,\n f'{name}_{clone.name}')\n return clone\n\n @classmethod\n def update_types(cls, attr: Attr, search: str, replace: str):\n for attr_type in attr.types:\n if attr_type.qname == search and attr_type.forward:\n attr_type.qname = replace\n attr_type.forward = False\n",
"step-2": "<mask token>\n\n\nclass ClassEnumerationHandler(RelativeHandlerInterface):\n <mask token>\n <mask token>\n\n def process(self, target: Class):\n \"\"\"\n Process class receiver.\n\n Steps:\n 1. Filter attrs not derived from xs:enumeration\n 2. Flatten attrs derived from xs:union of enumerations\n 3. Promote inner enumeration classes to root classes\n \"\"\"\n self.filter(target)\n self.flatten(target)\n self.promote(target)\n\n @classmethod\n def filter(cls, target: Class):\n \"\"\"Filter attrs not derived from xs:enumeration if there are any\n xs:enumeration attrs.\"\"\"\n enumerations = [attr for attr in target.attrs if attr.is_enumeration]\n if enumerations:\n target.attrs = enumerations\n\n def flatten(self, target: Class):\n \"\"\"\n Flatten attrs derived from xs:union of enumeration classes.\n\n Find the enumeration classes and merge all of their members in\n the target class.\n \"\"\"\n if len(target.attrs) != 1 or target.attrs[0].tag != Tag.UNION:\n return\n enums: List[Any] = []\n for attr_type in target.attrs[0].types:\n if attr_type.forward:\n enums.extend(target.inner)\n elif not attr_type.native:\n enums.append(self.container.find(attr_type.qname))\n else:\n enums.append(None)\n merge = all(isinstance(x, Class) and x.is_enumeration for x in enums)\n if merge:\n target.attrs.clear()\n target.inner.clear()\n target.attrs.extend(attr.clone() for enum in enums for attr in\n enum.attrs)\n\n def promote(self, target: Class):\n \"\"\"\n Promote inner enumeration classes to root classes.\n\n Steps:\n 1. Find inner enumerations\n 2. Clone and update their qualified name\n 3. Update attributes types\n \"\"\"\n for inner in list(target.inner):\n if inner.is_enumeration:\n target.inner.remove(inner)\n clone = self.clone_enumeration(inner, target.name)\n self.container.add(clone)\n for attr in target.attrs:\n self.update_types(attr, inner.qname, clone.qname)\n\n @classmethod\n def clone_enumeration(cls, inner: Class, name: str) ->Class:\n clone = inner.clone()\n clone.qname = build_qname(clone.target_namespace,\n f'{name}_{clone.name}')\n return clone\n\n @classmethod\n def update_types(cls, attr: Attr, search: str, replace: str):\n for attr_type in attr.types:\n if attr_type.qname == search and attr_type.forward:\n attr_type.qname = replace\n attr_type.forward = False\n",
"step-3": "<mask token>\n\n\nclass ClassEnumerationHandler(RelativeHandlerInterface):\n \"\"\"Enumeration class processor.\"\"\"\n __slots__ = ()\n\n def process(self, target: Class):\n \"\"\"\n Process class receiver.\n\n Steps:\n 1. Filter attrs not derived from xs:enumeration\n 2. Flatten attrs derived from xs:union of enumerations\n 3. Promote inner enumeration classes to root classes\n \"\"\"\n self.filter(target)\n self.flatten(target)\n self.promote(target)\n\n @classmethod\n def filter(cls, target: Class):\n \"\"\"Filter attrs not derived from xs:enumeration if there are any\n xs:enumeration attrs.\"\"\"\n enumerations = [attr for attr in target.attrs if attr.is_enumeration]\n if enumerations:\n target.attrs = enumerations\n\n def flatten(self, target: Class):\n \"\"\"\n Flatten attrs derived from xs:union of enumeration classes.\n\n Find the enumeration classes and merge all of their members in\n the target class.\n \"\"\"\n if len(target.attrs) != 1 or target.attrs[0].tag != Tag.UNION:\n return\n enums: List[Any] = []\n for attr_type in target.attrs[0].types:\n if attr_type.forward:\n enums.extend(target.inner)\n elif not attr_type.native:\n enums.append(self.container.find(attr_type.qname))\n else:\n enums.append(None)\n merge = all(isinstance(x, Class) and x.is_enumeration for x in enums)\n if merge:\n target.attrs.clear()\n target.inner.clear()\n target.attrs.extend(attr.clone() for enum in enums for attr in\n enum.attrs)\n\n def promote(self, target: Class):\n \"\"\"\n Promote inner enumeration classes to root classes.\n\n Steps:\n 1. Find inner enumerations\n 2. Clone and update their qualified name\n 3. Update attributes types\n \"\"\"\n for inner in list(target.inner):\n if inner.is_enumeration:\n target.inner.remove(inner)\n clone = self.clone_enumeration(inner, target.name)\n self.container.add(clone)\n for attr in target.attrs:\n self.update_types(attr, inner.qname, clone.qname)\n\n @classmethod\n def clone_enumeration(cls, inner: Class, name: str) ->Class:\n clone = inner.clone()\n clone.qname = build_qname(clone.target_namespace,\n f'{name}_{clone.name}')\n return clone\n\n @classmethod\n def update_types(cls, attr: Attr, search: str, replace: str):\n for attr_type in attr.types:\n if attr_type.qname == search and attr_type.forward:\n attr_type.qname = replace\n attr_type.forward = False\n",
"step-4": "from typing import Any\nfrom typing import List\nfrom xsdata.codegen.mixins import RelativeHandlerInterface\nfrom xsdata.codegen.models import Attr\nfrom xsdata.codegen.models import Class\nfrom xsdata.models.enums import Tag\nfrom xsdata.utils.namespaces import build_qname\n\n\nclass ClassEnumerationHandler(RelativeHandlerInterface):\n \"\"\"Enumeration class processor.\"\"\"\n __slots__ = ()\n\n def process(self, target: Class):\n \"\"\"\n Process class receiver.\n\n Steps:\n 1. Filter attrs not derived from xs:enumeration\n 2. Flatten attrs derived from xs:union of enumerations\n 3. Promote inner enumeration classes to root classes\n \"\"\"\n self.filter(target)\n self.flatten(target)\n self.promote(target)\n\n @classmethod\n def filter(cls, target: Class):\n \"\"\"Filter attrs not derived from xs:enumeration if there are any\n xs:enumeration attrs.\"\"\"\n enumerations = [attr for attr in target.attrs if attr.is_enumeration]\n if enumerations:\n target.attrs = enumerations\n\n def flatten(self, target: Class):\n \"\"\"\n Flatten attrs derived from xs:union of enumeration classes.\n\n Find the enumeration classes and merge all of their members in\n the target class.\n \"\"\"\n if len(target.attrs) != 1 or target.attrs[0].tag != Tag.UNION:\n return\n enums: List[Any] = []\n for attr_type in target.attrs[0].types:\n if attr_type.forward:\n enums.extend(target.inner)\n elif not attr_type.native:\n enums.append(self.container.find(attr_type.qname))\n else:\n enums.append(None)\n merge = all(isinstance(x, Class) and x.is_enumeration for x in enums)\n if merge:\n target.attrs.clear()\n target.inner.clear()\n target.attrs.extend(attr.clone() for enum in enums for attr in\n enum.attrs)\n\n def promote(self, target: Class):\n \"\"\"\n Promote inner enumeration classes to root classes.\n\n Steps:\n 1. Find inner enumerations\n 2. Clone and update their qualified name\n 3. Update attributes types\n \"\"\"\n for inner in list(target.inner):\n if inner.is_enumeration:\n target.inner.remove(inner)\n clone = self.clone_enumeration(inner, target.name)\n self.container.add(clone)\n for attr in target.attrs:\n self.update_types(attr, inner.qname, clone.qname)\n\n @classmethod\n def clone_enumeration(cls, inner: Class, name: str) ->Class:\n clone = inner.clone()\n clone.qname = build_qname(clone.target_namespace,\n f'{name}_{clone.name}')\n return clone\n\n @classmethod\n def update_types(cls, attr: Attr, search: str, replace: str):\n for attr_type in attr.types:\n if attr_type.qname == search and attr_type.forward:\n attr_type.qname = replace\n attr_type.forward = False\n",
"step-5": "from typing import Any\nfrom typing import List\n\nfrom xsdata.codegen.mixins import RelativeHandlerInterface\nfrom xsdata.codegen.models import Attr\nfrom xsdata.codegen.models import Class\nfrom xsdata.models.enums import Tag\nfrom xsdata.utils.namespaces import build_qname\n\n\nclass ClassEnumerationHandler(RelativeHandlerInterface):\n \"\"\"Enumeration class processor.\"\"\"\n\n __slots__ = ()\n\n def process(self, target: Class):\n \"\"\"\n Process class receiver.\n\n Steps:\n 1. Filter attrs not derived from xs:enumeration\n 2. Flatten attrs derived from xs:union of enumerations\n 3. Promote inner enumeration classes to root classes\n \"\"\"\n self.filter(target)\n self.flatten(target)\n self.promote(target)\n\n @classmethod\n def filter(cls, target: Class):\n \"\"\"Filter attrs not derived from xs:enumeration if there are any\n xs:enumeration attrs.\"\"\"\n enumerations = [attr for attr in target.attrs if attr.is_enumeration]\n if enumerations:\n target.attrs = enumerations\n\n def flatten(self, target: Class):\n \"\"\"\n Flatten attrs derived from xs:union of enumeration classes.\n\n Find the enumeration classes and merge all of their members in\n the target class.\n \"\"\"\n if len(target.attrs) != 1 or target.attrs[0].tag != Tag.UNION:\n return\n\n enums: List[Any] = []\n for attr_type in target.attrs[0].types:\n if attr_type.forward:\n enums.extend(target.inner)\n elif not attr_type.native:\n enums.append(self.container.find(attr_type.qname))\n else:\n enums.append(None)\n\n merge = all(isinstance(x, Class) and x.is_enumeration for x in enums)\n if merge:\n target.attrs.clear()\n target.inner.clear()\n\n target.attrs.extend(attr.clone() for enum in enums for attr in enum.attrs)\n\n def promote(self, target: Class):\n \"\"\"\n Promote inner enumeration classes to root classes.\n\n Steps:\n 1. Find inner enumerations\n 2. Clone and update their qualified name\n 3. Update attributes types\n \"\"\"\n for inner in list(target.inner):\n if inner.is_enumeration:\n target.inner.remove(inner)\n clone = self.clone_enumeration(inner, target.name)\n self.container.add(clone)\n for attr in target.attrs:\n self.update_types(attr, inner.qname, clone.qname)\n\n @classmethod\n def clone_enumeration(cls, inner: Class, name: str) -> Class:\n clone = inner.clone()\n clone.qname = build_qname(clone.target_namespace, f\"{name}_{clone.name}\")\n return clone\n\n @classmethod\n def update_types(cls, attr: Attr, search: str, replace: str):\n for attr_type in attr.types:\n if attr_type.qname == search and attr_type.forward:\n attr_type.qname = replace\n attr_type.forward = False\n",
"step-ids": [
6,
7,
9,
10,
11
]
}
|
[
6,
7,
9,
10,
11
] |
class MiniMaxSearch(object):
def __init__(self):
self.count = 0
self.explored = set()
def max_value(self, state, a, b):
self.count += 1
value = float('-inf')
if state in self.explored:
return state.evaluate()
if state.terminal():
self.explored.add(state)
return state.evaluate()
for action in state.actions():
result = state.result(action)
if result in self.explored:
return state.evaluate()
value = max(value, self.min_value(result, a, b))
self.explored.add(result)
if value >= b:
return value
else:
a = max(a, value)
return value
def min_value(self, state, a, b):
self.count += 1
value = float('inf')
if state in self.explored:
return state.evaluate()
if state.terminal():
self.explored.add(state)
return state.evaluate()
for action in state.actions():
result = state.result(action)
if result in self.explored:
return state.evaluate()
value = min(value, self.max_value(result, a, b))
self.explored.add(result)
if value <= a:
return value
else:
b = min(b, value)
return value
def decide_min(self, state):
self.count = 0
best = self.max_value(state, float('-inf'), float('inf'))
for action in state.actions():
if best == self.min_value(state.result(action), float('-inf'), float('inf')):
print self.count
return action
|
normal
|
{
"blob_id": "15c61dbf51d676b4c339dd4ef86a76696adfc998",
"index": 4707,
"step-1": "\n\nclass MiniMaxSearch(object):\n def __init__(self):\n self.count = 0\n self.explored = set()\n\n def max_value(self, state, a, b):\n self.count += 1\n value = float('-inf')\n\n if state in self.explored:\n return state.evaluate()\n\n if state.terminal():\n self.explored.add(state)\n return state.evaluate()\n\n for action in state.actions():\n result = state.result(action)\n\n if result in self.explored:\n return state.evaluate()\n\n value = max(value, self.min_value(result, a, b))\n self.explored.add(result)\n\n if value >= b:\n return value\n else:\n a = max(a, value)\n return value\n\n def min_value(self, state, a, b):\n self.count += 1\n value = float('inf')\n\n if state in self.explored:\n return state.evaluate()\n\n if state.terminal():\n self.explored.add(state)\n return state.evaluate()\n\n for action in state.actions():\n result = state.result(action)\n\n if result in self.explored:\n return state.evaluate()\n\n value = min(value, self.max_value(result, a, b))\n self.explored.add(result)\n\n if value <= a:\n return value\n else:\n b = min(b, value)\n return value\n\n def decide_min(self, state):\n self.count = 0\n best = self.max_value(state, float('-inf'), float('inf'))\n for action in state.actions():\n if best == self.min_value(state.result(action), float('-inf'), float('inf')):\n print self.count\n return action\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
# -*- coding: iso-8859-15 -*-
# @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
# @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@8:........C@@@
# @@@@@@@@@@@@@@88@@@@@@@@@@@@@@@@@@@@@@88@@@@@@@@@@888@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@O:...........:C@
# @ .@O O@8 C@@O o@@@: cO oc 8o .@@. @c....:O@@:....:@
# @ .:c8 CO O8 :o O8 oO C@. :8. :::. ..::. ::Cc ..:8o o@: @o....:8@@:....:@
# @ c@@@O OO C8 c@ OO o8 c@. :@. :@@C O@@@@. :@@@c 8@@@@@@@@@@@@: @@@@@@@@@O.....:@
# @ ..oO OO C8 .@O o@@@@@@@. :@. :@@C O@@@@. :@@@c :C8@@@o O@@ccC @@@@@@@O.......c@
# @ oO OO C8 C@O o. c8. :@. :@@8OOCo8@@@@. :@@@8@@@@@@O@@@@@@@8C: @@@@@C.......o@@@
# @ c@@@O OO C8 c8 OO oO c@. :@. o@@@@@@@@@@@@@@@@@@@@@o 8@@@o ..o @@@C......:C@@@@@
# @ c@@@O CO C8 c8 OO o@. c@. :@..o8@@@@@@@@@@@@@@@@Oc@@@c 8@@@o oo @C......:O@@@@@@@
# @ c@@@@ .. 88 c8 O@. .: c@c :o@@@@@@@@@@@@@@@@@@@@@@@@Ooc:: Co o@. @c....:O@@@@@@@@@
# @ c@@@@@o o@@8 c@ O@@o cc c@@O. c@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@: Co o@O @c....:O8@@@@@@@@
# @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@:C@:C:..:C.:.:c.:.@o.............:@
# @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@.:o o.oo o ooCc.oC@c.............:@
# @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
#
# NCOrifle.py -- Support for squadleaders being able to choose between smg and rifle.
#
# ©2010 Spit for Forgotten Hope
import host, bf2
from game.gameplayPlugin import base
from game.utilities import rconExec, getCurrentRound
from NCOrifleData import NCO_kits
DEBUG = 0
class NCOrifle(base):
def round_start(self, hooker):
self.watched_players = []
self.choices = {}
self.spawned = []
self.spawned_dict = {}
if not hooker.hasHook('RemoteCommand', self.onRemoteCommand):
hooker.register('RemoteCommand', self.onRemoteCommand)
hooker.register('PlayerSpawn', self.onPlayerSpawn)
hooker.register('PickupKit', self.onPickupKit)
if DEBUG: print 'NCOrifle: hooks registered'
else:
if DEBUG: print 'NCOrifle: hooks already registered'
def onRemoteCommand(self, playerid, cmd):
if not (cmd == 'ncosmg' or cmd == 'ncorifle' or cmd.startswith('selectkit')): return
if playerid == -1: playerid = 255
player = bf2.playerManager.getPlayerByIndex(playerid)
if DEBUG: print 'NCOrifle: player %s executed rcon command "%s"' % (player.getName(), cmd)
if cmd.startswith('selectkit'):
if cmd.endswith('6'):
self.addPlayer(player)
else:
self.removePlayer(player)
if cmd == 'ncorifle':
self.choices[player] = 'rifle'
if DEBUG: print 'NCOrifle: player %s has chosen a rifle to spawn with' % player.getName()
elif cmd == 'ncosmg':
self.choices[player] = 'smg'
if DEBUG: print 'NCOrifle: player %s has chosen an smg to spawn with' % player.getName()
def onPickupKit(self, player, kit):
if player not in self.spawned: return
def_kit = self.getData(player)
if def_kit is None: return
if DEBUG: print 'Setting NCO kit back to default for team %d' % player.getTeam()
self.setKit(def_kit, player.getTeam(), self.spawned_dict[player])
self.spawned.remove(player)
self.spawned_dict[player] = None
def onPlayerSpawn(self, player, soldier):
try:
self._onPlayerSpawn(player, soldier)
except Exception, e:
print 'NCOrifle exception', e
def getData(self, player):
map, gamemode, size = getCurrentRound()
if map in NCO_kits.keys():
def_kit1, def_kit2 = NCO_kits[map]
exec('def_kit = def_kit%d' % player.getTeam())
return def_kit
else:
print 'NCOrifle: Can\'t find NCO kit info for map %s. Update NCOrifleData.py or provide custom map info via mapdata.py' % map
return None
def _onPlayerSpawn(self, player, soldier):
if player not in self.watched_players: return
def_kit = None
def_kit = self.getData(player)
if def_kit is None: return
if player not in self.choices.keys():
self.setKit(def_kit, player.getTeam(), soldier.templateName)
elif self.choices[player] == 'smg':
self.setKit(def_kit, player.getTeam(), soldier.templateName)
elif self.choices[player] == 'rifle':
if DEBUG: print 'NCOrifle: player %s wants to spawn with a modified NCO kit...' % player.getName()
kit = def_kit + '_rifle'
self.setKit(kit, player.getTeam(), soldier.templateName)
if player in self.spawned: return
self.spawned.append(player)
self.spawned_dict[player] = soldier.templateName
def setKit(self, kit, team, soldier):
rconExec('gameLogic.setKit %d 6 "%s" "%s"' % (team, kit, soldier))
if DEBUG: print 'NCOrifle: Set NCO kit for team %d to %s, %s' % (team, kit, soldier)
def addPlayer(self, player):
if player not in self.watched_players:
self.watched_players.append(player)
if DEBUG: print 'NCOrifle: added player %s to watched players list' % player.getName()
def removePlayer(self, player):
if player in self.watched_players:
self.watched_players.remove(player)
if DEBUG: print 'NCOrifle: removed player %s from watched players list' % player.getName()
|
normal
|
{
"blob_id": "f105ecb8229020554930bb4f0e00ecf88e83f5ae",
"index": 4288,
"step-1": "# -*- coding: iso-8859-15 -*-\r\n# @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@\r\n# @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@8:........C@@@\r\n# @@@@@@@@@@@@@@88@@@@@@@@@@@@@@@@@@@@@@88@@@@@@@@@@888@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@O:...........:C@\r\n# @ .@O O@8 C@@O o@@@: cO oc 8o .@@. @c....:O@@:....:@\r\n# @ .:c8 CO O8 :o O8 oO C@. :8. :::. ..::. ::Cc ..:8o o@: @o....:8@@:....:@\r\n# @ c@@@O OO C8 c@ OO o8 c@. :@. :@@C O@@@@. :@@@c 8@@@@@@@@@@@@: @@@@@@@@@O.....:@\r\n# @ ..oO OO C8 .@O o@@@@@@@. :@. :@@C O@@@@. :@@@c :C8@@@o O@@ccC @@@@@@@O.......c@\r\n# @ oO OO C8 C@O o. c8. :@. :@@8OOCo8@@@@. :@@@8@@@@@@O@@@@@@@8C: @@@@@C.......o@@@\r\n# @ c@@@O OO C8 c8 OO oO c@. :@. o@@@@@@@@@@@@@@@@@@@@@o 8@@@o ..o @@@C......:C@@@@@\r\n# @ c@@@O CO C8 c8 OO o@. c@. :@..o8@@@@@@@@@@@@@@@@Oc@@@c 8@@@o oo @C......:O@@@@@@@\r\n# @ c@@@@ .. 88 c8 O@. .: c@c :o@@@@@@@@@@@@@@@@@@@@@@@@Ooc:: Co o@. @c....:O@@@@@@@@@\r\n# @ c@@@@@o o@@8 c@ O@@o cc c@@O. c@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@: Co o@O @c....:O8@@@@@@@@\r\n# @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@:C@:C:..:C.:.:c.:.@o.............:@\r\n# @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@.:o o.oo o ooCc.oC@c.............:@\r\n# @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@\r\n#\r\n# NCOrifle.py -- Support for squadleaders being able to choose between smg and rifle.\r\n#\r\n# ©2010 Spit for Forgotten Hope\r\n\r\nimport host, bf2\r\nfrom game.gameplayPlugin import base\r\nfrom game.utilities import rconExec, getCurrentRound\r\nfrom NCOrifleData import NCO_kits\r\n\r\nDEBUG = 0\r\n\r\nclass NCOrifle(base):\r\n def round_start(self, hooker):\r\n self.watched_players = []\r\n self.choices = {}\r\n self.spawned = []\r\n self.spawned_dict = {}\r\n \r\n if not hooker.hasHook('RemoteCommand', self.onRemoteCommand):\r\n hooker.register('RemoteCommand', self.onRemoteCommand)\r\n hooker.register('PlayerSpawn', self.onPlayerSpawn)\r\n hooker.register('PickupKit', self.onPickupKit)\r\n if DEBUG: print 'NCOrifle: hooks registered'\r\n else:\r\n if DEBUG: print 'NCOrifle: hooks already registered'\r\n \r\n def onRemoteCommand(self, playerid, cmd):\r\n if not (cmd == 'ncosmg' or cmd == 'ncorifle' or cmd.startswith('selectkit')): return\r\n if playerid == -1: playerid = 255\r\n player = bf2.playerManager.getPlayerByIndex(playerid)\r\n if DEBUG: print 'NCOrifle: player %s executed rcon command \"%s\"' % (player.getName(), cmd)\r\n \r\n if cmd.startswith('selectkit'):\r\n if cmd.endswith('6'):\r\n self.addPlayer(player)\r\n else:\r\n self.removePlayer(player)\r\n \r\n if cmd == 'ncorifle':\r\n self.choices[player] = 'rifle'\r\n if DEBUG: print 'NCOrifle: player %s has chosen a rifle to spawn with' % player.getName() \r\n elif cmd == 'ncosmg':\r\n self.choices[player] = 'smg'\r\n if DEBUG: print 'NCOrifle: player %s has chosen an smg to spawn with' % player.getName()\r\n \r\n def onPickupKit(self, player, kit):\r\n if player not in self.spawned: return\r\n def_kit = self.getData(player)\r\n if def_kit is None: return\r\n if DEBUG: print 'Setting NCO kit back to default for team %d' % player.getTeam()\r\n self.setKit(def_kit, player.getTeam(), self.spawned_dict[player])\r\n self.spawned.remove(player)\r\n self.spawned_dict[player] = None\r\n \r\n def onPlayerSpawn(self, player, soldier):\r\n try:\r\n self._onPlayerSpawn(player, soldier)\r\n except Exception, e:\r\n print 'NCOrifle exception', e\r\n \r\n def getData(self, player):\r\n map, gamemode, size = getCurrentRound()\r\n if map in NCO_kits.keys():\r\n def_kit1, def_kit2 = NCO_kits[map]\r\n exec('def_kit = def_kit%d' % player.getTeam())\r\n return def_kit\r\n else:\r\n print 'NCOrifle: Can\\'t find NCO kit info for map %s. Update NCOrifleData.py or provide custom map info via mapdata.py' % map\r\n return None\r\n \r\n def _onPlayerSpawn(self, player, soldier):\r\n if player not in self.watched_players: return\r\n def_kit = None\r\n \r\n def_kit = self.getData(player)\r\n \r\n if def_kit is None: return\r\n \r\n if player not in self.choices.keys():\r\n self.setKit(def_kit, player.getTeam(), soldier.templateName)\r\n elif self.choices[player] == 'smg':\r\n self.setKit(def_kit, player.getTeam(), soldier.templateName)\r\n \r\n elif self.choices[player] == 'rifle':\r\n if DEBUG: print 'NCOrifle: player %s wants to spawn with a modified NCO kit...' % player.getName()\r\n kit = def_kit + '_rifle'\r\n self.setKit(kit, player.getTeam(), soldier.templateName)\r\n \r\n if player in self.spawned: return\r\n self.spawned.append(player)\r\n self.spawned_dict[player] = soldier.templateName\r\n \r\n def setKit(self, kit, team, soldier):\r\n rconExec('gameLogic.setKit %d 6 \"%s\" \"%s\"' % (team, kit, soldier))\r\n if DEBUG: print 'NCOrifle: Set NCO kit for team %d to %s, %s' % (team, kit, soldier)\r\n \r\n def addPlayer(self, player):\r\n if player not in self.watched_players:\r\n self.watched_players.append(player)\r\n if DEBUG: print 'NCOrifle: added player %s to watched players list' % player.getName()\r\n \r\n def removePlayer(self, player):\r\n if player in self.watched_players:\r\n self.watched_players.remove(player)\r\n if DEBUG: print 'NCOrifle: removed player %s from watched players list' % player.getName()\r\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
def
a = 10
b = 2
c = 3
cal(a,b,c)
|
normal
|
{
"blob_id": "1be5de71615eae6c9074e67b0dcaabbac4d82e2b",
"index": 9909,
"step-1": "def\n\na = 10\nb = 2\nc = 3\n\ncal(a,b,c)",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
<|reserved_special_token_0|>
class CharacterDropHeaderView(APIView):
"""
Set of AJAX views for a Characters
This handles different API calls for character actions.
"""
authentication_classes = [SessionAuthentication]
permission_classes = [OwnsCharacter]
def post(self, request, format=None):
header_id = int(request.POST.get('header_id', 0))
character_id = int(request.POST.get('character_id', 0))
header = Header.objects.get(pk=header_id)
character = Character.objects.get(pk=character_id)
content = {'error': 'Header is not already bought!'}
status = None
content['header_list'] = []
if header in character.headers.all():
print(
f'Header present! Dropping and adding back in {header.cost} CP...'
)
character.cp_available += header.cost
character.cp_spent -= header.cost
character.headers.remove(header)
skill_item_template_string = render_to_string(
'characters/includes/character_skill_update_item.html', {
'header': header, 'header_skills': header.skills.all(),
'header_costs': character.skillhash[header.id]}, request)
content = {'success': header.cost}
else:
status = HTTP_412_PRECONDITION_FAILED
return Response(content, status)
class CharacterAddSkillView(APIView):
"""
Set of AJAX views for a Characters
This handles different API calls for character actions.
"""
authentication_classes = [SessionAuthentication]
permission_classes = [OwnsCharacter]
def post(self, request, format=None):
skill_id = int(request.POST.get('skill_id', 0))
header_id = int(request.POST.get('header_id', 0))
character_id = int(request.POST.get('character_id', 0))
cp_available = int(request.POST.get('cp_available', 0))
try:
vector = int(request.POST.get('vector'))
except AttributeError:
return {'error': 'No change indicated'}
header_skill = HeaderSkill.objects.get(skill_id=skill_id, header_id
=header_id)
character = Character.objects.get(pk=character_id)
content = {'success': 'testing right now'}
status = None
if character.check_skill_prerequisites(header_skill.skill,
header_skill.header):
cost = character.skill_cost(header_skill) * vector
if cp_available - cost >= 0:
character_skill, created = (character.characterskills_set.
get_or_create(skill=header_skill))
if (character_skill.count and character_skill.count +
vector < 0):
content = {'error':
f"You don't have any points in {header_skill.skill}"}
status = HTTP_412_PRECONDITION_FAILED
else:
content = {'success': cost * -1}
character_skill.count = F('count') + vector
character_skill.save()
character.cp_spent = F('cp_spent') + cost
character.cp_available = F('cp_available') - cost
character.save()
else:
content = {'error':
"You don't have enough points available to purchase this skill . . ."
}
status = HTTP_412_PRECONDITION_FAILED
else:
status = HTTP_412_PRECONDITION_FAILED
return Response(content, status)
class CharacterDetailView(LoginRequiredMixin, UserPassesTestMixin, DetailView):
"""
Show the details for a character.
From here you can edit the details of a character or choose skills.
"""
model = Character
fields = '__all__'
def test_func(self):
if self.request.user.has_perm('players.view_any_player'):
return True
try:
player = Character.objects.get(pk=self.kwargs['pk']).player
return player.user == self.request.user
except Character.DoesNotExist:
return False
return False
class CharacterConceptApproveView(PermissionRequiredMixin, FormView):
"""
Approve the concept for a character.
Grant the CP for the character
Set the history approved flag.
"""
permission_required = 'players.change_any_player'
form_class = CharacterConceptApproveForm
def form_valid(self, form):
self.object = Character.objects.get(pk=form.cleaned_data[
'character_id'])
self.object.player.cp_available += 3
self.object.player.save(update_fields=['cp_available'])
self.object.concept_approved_flag = True
self.object.save(update_fields=['concept_approved_flag'])
messages.info(self.request, f'{self.object} concept approved!')
return super().form_valid(form)
def form_invalid(self, form):
self.object = Character.objects.get(pk=form.cleaned_data[
'character_id'])
for key, error in form.errors.items():
messages.error(self.request, error.as_text())
return HttpResponseRedirect(reverse('characters:character_detail',
kwargs={'pk': self.object.pk}))
def get_success_url(self):
return reverse('characters:character_detail', kwargs={'pk': self.
object.pk})
class CharacterHistoryApproveView(PermissionRequiredMixin, FormView):
"""
Approve the history for a character.
Grant the CP for the character
Set the history approved flag.
"""
permission_required = 'players.change_any_player'
form_class = CharacterHistoryApproveForm
def form_valid(self, form):
self.object = Character.objects.get(pk=form.cleaned_data[
'character_id'])
self.object.player.cp_available += 3
self.object.player.save(update_fields=['cp_available'])
self.object.history_approved_flag = True
self.object.save(update_fields=['history_approved_flag'])
messages.info(self.request, f'{self.object} history approved!')
return super().form_valid(form)
def form_invalid(self, form):
self.object = Character.objects.get(pk=form.cleaned_data[
'character_id'])
for key, error in form.errors.items():
messages.error(self.request, error.as_text())
return HttpResponseRedirect(reverse('characters:character_detail',
kwargs={'pk': self.object.pk}))
def get_success_url(self):
return reverse('characters:character_detail', kwargs={'pk': self.
object.pk})
class CharacterListView(LoginRequiredMixin, ListView):
"""
Show the list of characters.
From here, you can view, edit, delete a character.
"""
model = Character
paginate_by = 25
def get_queryset(self):
queryset = super().get_queryset()
criteria = self.request.GET.get('criteria', '')
if criteria.strip():
entry_query = get_query(criteria, ['name', 'description',
'concept', 'history', 'player_notes'])
queryset = queryset.filter(entry_query)
history_approved_flag = self.request.GET.get('history_approved_flag',
False)
if history_approved_flag:
queryset = queryset.filter(history_approved_flag=True)
concept_approved_flag = self.request.GET.get('concept_approved_flag',
False)
if concept_approved_flag:
queryset = queryset.filter(concept_approved_flag=True)
return queryset
def get_context_data(self, **kwargs):
"""
Add the form so we can filter the characters.
"""
context_data = super().get_context_data(**kwargs)
context_data.update(**self.request.GET)
return context_data
class CharacterPrintListView(LoginRequiredMixin, ListView):
"""
Show a list of characters to print.
"""
model = Character
template_name = 'characters/character_print_list.html'
def get_queryset(self):
queryset = super().get_queryset()
event_id = self.kwargs.get('event_id', None)
if not event_id:
event_id = Event.next_event().id
player_ids = Registration.objects.filter(event__id=event_id
).values_list('player_id', flat=True)
queryset = queryset.filter(player__id__in=player_ids, npc_flag=
False, active_flag=True)
return queryset
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class CharacterSkillUpdateView(LoginRequiredMixin, UserPassesTestMixin,
FormMixin, DetailView):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def test_func(self):
if self.request.user.has_perm('players.view_any_player'):
return True
try:
player = Character.objects.get(pk=self.kwargs['pk']).player
return player.user == self.request.user
except Character.DoesNotExist:
return False
return False
def get_success_url(self):
return reverse('characters:character_detail', kwargs={'pk': self.
object.pk})
def get_form_kwargs(self):
kwargs = super().get_form_kwargs()
self.skills = Header.objects.order_by('hidden_flag', 'category', 'name'
).all()
kwargs.update({'skills': self.skills})
return kwargs
def get_context_data(self, **kwargs):
context = super().get_context_data(**self.kwargs)
available_skills = self.object.skillhash.keys()
context['skills'] = filter(lambda x: x.id in available_skills or
self.request.user.has_perm('player.view_any_player'), self.skills)
context['skill_hash'] = self.object.skillhash
context['granted_skills'] = self.object.skill_grants
return context
def post(self, request, *args, **kwargs):
self.object = self.get_object()
form = self.get_form()
if form.is_valid():
return self.form_valid(form)
else:
return self.form_invalid(form)
def form_valid(self, form):
"""
Form is valid. Save the skills to that character and remove the
appropriate number of characters points.
"""
return super().form_valid(form)
class ResetPointsView(PermissionRequiredMixin, View):
"""
Resets the points for the season.
"""
permission_required = 'characters.reset_points',
def get(self, request, *args, **kwargs):
"""
Send the user back to the the originating page or back to the main
page if the referrer isn't set.
"""
Character.objects.all().update(cp_transferred=0)
messages.info(self.request, 'Point cap reset!')
return HttpResponseRedirect(self.request.META.get('HTTP_REFERER', '/'))
<|reserved_special_token_0|>
class CharacterAddHeaderView(APIView):
"""
Set of AJAX views for a Characters
This handles different API calls for character actions.
"""
authentication_classes = [SessionAuthentication]
permission_classes = [OwnsCharacter]
def post(self, request, format=None):
header_id = int(request.POST.get('header_id', 0))
character_id = int(request.POST.get('character_id', 0))
cp_available = int(request.POST.get('cp_available', 0))
header = Header.objects.get(pk=header_id)
character = Character.objects.get(pk=character_id)
content = {'error': 'prerequisites not met'}
status = None
if character.check_header_prerequisites(header):
if cp_available - header.cost >= 0:
character.cp_available -= header.cost
character.cp_spent += header.cost
character.headers.add(header)
character.save()
skill_item_template_string = render_to_string(
'characters/includes/character_skill_update_item.html',
{'header': header, 'header_skills': header.skills.all(),
'header_costs': character.skillhash[header.id]}, request)
content = {'success': header.cost * -1, 'skills':
skill_item_template_string}
else:
content = {'error':
"You don't have enough points available for this character to add this header."
}
status = HTTP_412_PRECONDITION_FAILED
else:
status = HTTP_412_PRECONDITION_FAILED
return Response(content, status)
class CharacterDropHeaderView(APIView):
"""
Set of AJAX views for a Characters
This handles different API calls for character actions.
"""
authentication_classes = [SessionAuthentication]
permission_classes = [OwnsCharacter]
def post(self, request, format=None):
header_id = int(request.POST.get('header_id', 0))
character_id = int(request.POST.get('character_id', 0))
header = Header.objects.get(pk=header_id)
character = Character.objects.get(pk=character_id)
content = {'error': 'Header is not already bought!'}
status = None
content['header_list'] = []
if header in character.headers.all():
print(
f'Header present! Dropping and adding back in {header.cost} CP...'
)
character.cp_available += header.cost
character.cp_spent -= header.cost
character.headers.remove(header)
skill_item_template_string = render_to_string(
'characters/includes/character_skill_update_item.html', {
'header': header, 'header_skills': header.skills.all(),
'header_costs': character.skillhash[header.id]}, request)
content = {'success': header.cost}
else:
status = HTTP_412_PRECONDITION_FAILED
return Response(content, status)
class CharacterAddSkillView(APIView):
"""
Set of AJAX views for a Characters
This handles different API calls for character actions.
"""
authentication_classes = [SessionAuthentication]
permission_classes = [OwnsCharacter]
def post(self, request, format=None):
skill_id = int(request.POST.get('skill_id', 0))
header_id = int(request.POST.get('header_id', 0))
character_id = int(request.POST.get('character_id', 0))
cp_available = int(request.POST.get('cp_available', 0))
try:
vector = int(request.POST.get('vector'))
except AttributeError:
return {'error': 'No change indicated'}
header_skill = HeaderSkill.objects.get(skill_id=skill_id, header_id
=header_id)
character = Character.objects.get(pk=character_id)
content = {'success': 'testing right now'}
status = None
if character.check_skill_prerequisites(header_skill.skill,
header_skill.header):
cost = character.skill_cost(header_skill) * vector
if cp_available - cost >= 0:
character_skill, created = (character.characterskills_set.
get_or_create(skill=header_skill))
if (character_skill.count and character_skill.count +
vector < 0):
content = {'error':
f"You don't have any points in {header_skill.skill}"}
status = HTTP_412_PRECONDITION_FAILED
else:
content = {'success': cost * -1}
character_skill.count = F('count') + vector
character_skill.save()
character.cp_spent = F('cp_spent') + cost
character.cp_available = F('cp_available') - cost
character.save()
else:
content = {'error':
"You don't have enough points available to purchase this skill . . ."
}
status = HTTP_412_PRECONDITION_FAILED
else:
status = HTTP_412_PRECONDITION_FAILED
return Response(content, status)
class CharacterDetailView(LoginRequiredMixin, UserPassesTestMixin, DetailView):
"""
Show the details for a character.
From here you can edit the details of a character or choose skills.
"""
model = Character
fields = '__all__'
def test_func(self):
if self.request.user.has_perm('players.view_any_player'):
return True
try:
player = Character.objects.get(pk=self.kwargs['pk']).player
return player.user == self.request.user
except Character.DoesNotExist:
return False
return False
class CharacterConceptApproveView(PermissionRequiredMixin, FormView):
"""
Approve the concept for a character.
Grant the CP for the character
Set the history approved flag.
"""
permission_required = 'players.change_any_player'
form_class = CharacterConceptApproveForm
def form_valid(self, form):
self.object = Character.objects.get(pk=form.cleaned_data[
'character_id'])
self.object.player.cp_available += 3
self.object.player.save(update_fields=['cp_available'])
self.object.concept_approved_flag = True
self.object.save(update_fields=['concept_approved_flag'])
messages.info(self.request, f'{self.object} concept approved!')
return super().form_valid(form)
def form_invalid(self, form):
self.object = Character.objects.get(pk=form.cleaned_data[
'character_id'])
for key, error in form.errors.items():
messages.error(self.request, error.as_text())
return HttpResponseRedirect(reverse('characters:character_detail',
kwargs={'pk': self.object.pk}))
def get_success_url(self):
return reverse('characters:character_detail', kwargs={'pk': self.
object.pk})
class CharacterHistoryApproveView(PermissionRequiredMixin, FormView):
"""
Approve the history for a character.
Grant the CP for the character
Set the history approved flag.
"""
permission_required = 'players.change_any_player'
form_class = CharacterHistoryApproveForm
def form_valid(self, form):
self.object = Character.objects.get(pk=form.cleaned_data[
'character_id'])
self.object.player.cp_available += 3
self.object.player.save(update_fields=['cp_available'])
self.object.history_approved_flag = True
self.object.save(update_fields=['history_approved_flag'])
messages.info(self.request, f'{self.object} history approved!')
return super().form_valid(form)
def form_invalid(self, form):
self.object = Character.objects.get(pk=form.cleaned_data[
'character_id'])
for key, error in form.errors.items():
messages.error(self.request, error.as_text())
return HttpResponseRedirect(reverse('characters:character_detail',
kwargs={'pk': self.object.pk}))
def get_success_url(self):
return reverse('characters:character_detail', kwargs={'pk': self.
object.pk})
class CharacterListView(LoginRequiredMixin, ListView):
"""
Show the list of characters.
From here, you can view, edit, delete a character.
"""
model = Character
paginate_by = 25
def get_queryset(self):
queryset = super().get_queryset()
criteria = self.request.GET.get('criteria', '')
if criteria.strip():
entry_query = get_query(criteria, ['name', 'description',
'concept', 'history', 'player_notes'])
queryset = queryset.filter(entry_query)
history_approved_flag = self.request.GET.get('history_approved_flag',
False)
if history_approved_flag:
queryset = queryset.filter(history_approved_flag=True)
concept_approved_flag = self.request.GET.get('concept_approved_flag',
False)
if concept_approved_flag:
queryset = queryset.filter(concept_approved_flag=True)
return queryset
def get_context_data(self, **kwargs):
"""
Add the form so we can filter the characters.
"""
context_data = super().get_context_data(**kwargs)
context_data.update(**self.request.GET)
return context_data
class CharacterPrintListView(LoginRequiredMixin, ListView):
"""
Show a list of characters to print.
"""
model = Character
template_name = 'characters/character_print_list.html'
def get_queryset(self):
queryset = super().get_queryset()
event_id = self.kwargs.get('event_id', None)
if not event_id:
event_id = Event.next_event().id
player_ids = Registration.objects.filter(event__id=event_id
).values_list('player_id', flat=True)
queryset = queryset.filter(player__id__in=player_ids, npc_flag=
False, active_flag=True)
return queryset
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class CharacterResetView(PermissionRequiredMixin, UserPassesTestMixin, View):
<|reserved_special_token_0|>
model = Character
permission_required = 'characters.change_character',
success_url = reverse_lazy('characters:character_list')
def test_func(self):
if self.request.user.has_perm('players.view_any_player'):
return True
try:
player = Character.objects.get(pk=self.kwargs['pk']).player
return player.user == self.request.user
except Character.DoesNotExist:
return False
return False
def get(self, request, *args, **kwargs):
"""
Send the user back to the the originating page or back to the
character they are setting active
"""
with transaction.atomic():
character = self.model.objects.get(pk=self.kwargs['pk'])
character.cp_available += character.cp_spent
character.cp_spent = 0
character.save(update_fields=['cp_available', 'cp_spent'])
character.characterskills_set.all().delete()
character.headers.clear()
messages.info(self.request, 'Character skills reset for {}.'.format
(character.name))
return HttpResponseRedirect(self.request.META.get('HTTP_REFERER',
reverse('characters:character_detail', kwargs={'pk': self.
kwargs['pk']})))
class CharacterSetActiveView(LoginRequiredMixin, UserPassesTestMixin, View):
"""
Set the active character for the characters player to the sent id.
"""
model = Character
fields = '__all__'
def test_func(self):
if self.request.user.has_perm('players.view_any_player'):
return True
try:
player = Character.objects.get(pk=self.kwargs['pk']).player
return player.user == self.request.user
except Character.DoesNotExist:
return False
return False
def get(self, request, *args, **kwargs):
"""
Send the user back to the the originating page or back to the
character they are setting active
"""
character = self.model.objects.get(pk=self.kwargs['pk'])
character.player.character_set.update(active_flag=False)
character.active_flag = True
character.save()
messages.info(self.request, 'Active Character changed to {}.'.
format(character.name))
return HttpResponseRedirect(self.request.META.get('HTTP_REFERER',
reverse('characters:character_detail', kwargs={'pk': self.
kwargs['pk']})))
class CharacterSkillUpdateView(LoginRequiredMixin, UserPassesTestMixin,
FormMixin, DetailView):
"""
Allow a user to update their chosen skills
"""
template_name = 'characters/character_skill_form.html'
form_class = CharacterSkillForm
model = Character
def test_func(self):
if self.request.user.has_perm('players.view_any_player'):
return True
try:
player = Character.objects.get(pk=self.kwargs['pk']).player
return player.user == self.request.user
except Character.DoesNotExist:
return False
return False
def get_success_url(self):
return reverse('characters:character_detail', kwargs={'pk': self.
object.pk})
def get_form_kwargs(self):
kwargs = super().get_form_kwargs()
self.skills = Header.objects.order_by('hidden_flag', 'category', 'name'
).all()
kwargs.update({'skills': self.skills})
return kwargs
def get_context_data(self, **kwargs):
context = super().get_context_data(**self.kwargs)
available_skills = self.object.skillhash.keys()
context['skills'] = filter(lambda x: x.id in available_skills or
self.request.user.has_perm('player.view_any_player'), self.skills)
context['skill_hash'] = self.object.skillhash
context['granted_skills'] = self.object.skill_grants
return context
def post(self, request, *args, **kwargs):
self.object = self.get_object()
form = self.get_form()
if form.is_valid():
return self.form_valid(form)
else:
return self.form_invalid(form)
def form_valid(self, form):
"""
Form is valid. Save the skills to that character and remove the
appropriate number of characters points.
"""
return super().form_valid(form)
class ResetPointsView(PermissionRequiredMixin, View):
"""
Resets the points for the season.
"""
permission_required = 'characters.reset_points',
def get(self, request, *args, **kwargs):
"""
Send the user back to the the originating page or back to the main
page if the referrer isn't set.
"""
Character.objects.all().update(cp_transferred=0)
messages.info(self.request, 'Point cap reset!')
return HttpResponseRedirect(self.request.META.get('HTTP_REFERER', '/'))
<|reserved_special_token_0|>
class CharacterAddHeaderView(APIView):
"""
Set of AJAX views for a Characters
This handles different API calls for character actions.
"""
authentication_classes = [SessionAuthentication]
permission_classes = [OwnsCharacter]
def post(self, request, format=None):
header_id = int(request.POST.get('header_id', 0))
character_id = int(request.POST.get('character_id', 0))
cp_available = int(request.POST.get('cp_available', 0))
header = Header.objects.get(pk=header_id)
character = Character.objects.get(pk=character_id)
content = {'error': 'prerequisites not met'}
status = None
if character.check_header_prerequisites(header):
if cp_available - header.cost >= 0:
character.cp_available -= header.cost
character.cp_spent += header.cost
character.headers.add(header)
character.save()
skill_item_template_string = render_to_string(
'characters/includes/character_skill_update_item.html',
{'header': header, 'header_skills': header.skills.all(),
'header_costs': character.skillhash[header.id]}, request)
content = {'success': header.cost * -1, 'skills':
skill_item_template_string}
else:
content = {'error':
"You don't have enough points available for this character to add this header."
}
status = HTTP_412_PRECONDITION_FAILED
else:
status = HTTP_412_PRECONDITION_FAILED
return Response(content, status)
class CharacterDropHeaderView(APIView):
"""
Set of AJAX views for a Characters
This handles different API calls for character actions.
"""
authentication_classes = [SessionAuthentication]
permission_classes = [OwnsCharacter]
def post(self, request, format=None):
header_id = int(request.POST.get('header_id', 0))
character_id = int(request.POST.get('character_id', 0))
header = Header.objects.get(pk=header_id)
character = Character.objects.get(pk=character_id)
content = {'error': 'Header is not already bought!'}
status = None
content['header_list'] = []
if header in character.headers.all():
print(
f'Header present! Dropping and adding back in {header.cost} CP...'
)
character.cp_available += header.cost
character.cp_spent -= header.cost
character.headers.remove(header)
skill_item_template_string = render_to_string(
'characters/includes/character_skill_update_item.html', {
'header': header, 'header_skills': header.skills.all(),
'header_costs': character.skillhash[header.id]}, request)
content = {'success': header.cost}
else:
status = HTTP_412_PRECONDITION_FAILED
return Response(content, status)
class CharacterAddSkillView(APIView):
"""
Set of AJAX views for a Characters
This handles different API calls for character actions.
"""
authentication_classes = [SessionAuthentication]
permission_classes = [OwnsCharacter]
def post(self, request, format=None):
skill_id = int(request.POST.get('skill_id', 0))
header_id = int(request.POST.get('header_id', 0))
character_id = int(request.POST.get('character_id', 0))
cp_available = int(request.POST.get('cp_available', 0))
try:
vector = int(request.POST.get('vector'))
except AttributeError:
return {'error': 'No change indicated'}
header_skill = HeaderSkill.objects.get(skill_id=skill_id, header_id
=header_id)
character = Character.objects.get(pk=character_id)
content = {'success': 'testing right now'}
status = None
if character.check_skill_prerequisites(header_skill.skill,
header_skill.header):
cost = character.skill_cost(header_skill) * vector
if cp_available - cost >= 0:
character_skill, created = (character.characterskills_set.
get_or_create(skill=header_skill))
if (character_skill.count and character_skill.count +
vector < 0):
content = {'error':
f"You don't have any points in {header_skill.skill}"}
status = HTTP_412_PRECONDITION_FAILED
else:
content = {'success': cost * -1}
character_skill.count = F('count') + vector
character_skill.save()
character.cp_spent = F('cp_spent') + cost
character.cp_available = F('cp_available') - cost
character.save()
else:
content = {'error':
"You don't have enough points available to purchase this skill . . ."
}
status = HTTP_412_PRECONDITION_FAILED
else:
status = HTTP_412_PRECONDITION_FAILED
return Response(content, status)
class CharacterDetailView(LoginRequiredMixin, UserPassesTestMixin, DetailView):
"""
Show the details for a character.
From here you can edit the details of a character or choose skills.
"""
model = Character
fields = '__all__'
def test_func(self):
if self.request.user.has_perm('players.view_any_player'):
return True
try:
player = Character.objects.get(pk=self.kwargs['pk']).player
return player.user == self.request.user
except Character.DoesNotExist:
return False
return False
class CharacterConceptApproveView(PermissionRequiredMixin, FormView):
"""
Approve the concept for a character.
Grant the CP for the character
Set the history approved flag.
"""
permission_required = 'players.change_any_player'
form_class = CharacterConceptApproveForm
def form_valid(self, form):
self.object = Character.objects.get(pk=form.cleaned_data[
'character_id'])
self.object.player.cp_available += 3
self.object.player.save(update_fields=['cp_available'])
self.object.concept_approved_flag = True
self.object.save(update_fields=['concept_approved_flag'])
messages.info(self.request, f'{self.object} concept approved!')
return super().form_valid(form)
def form_invalid(self, form):
self.object = Character.objects.get(pk=form.cleaned_data[
'character_id'])
for key, error in form.errors.items():
messages.error(self.request, error.as_text())
return HttpResponseRedirect(reverse('characters:character_detail',
kwargs={'pk': self.object.pk}))
def get_success_url(self):
return reverse('characters:character_detail', kwargs={'pk': self.
object.pk})
class CharacterHistoryApproveView(PermissionRequiredMixin, FormView):
"""
Approve the history for a character.
Grant the CP for the character
Set the history approved flag.
"""
permission_required = 'players.change_any_player'
form_class = CharacterHistoryApproveForm
def form_valid(self, form):
self.object = Character.objects.get(pk=form.cleaned_data[
'character_id'])
self.object.player.cp_available += 3
self.object.player.save(update_fields=['cp_available'])
self.object.history_approved_flag = True
self.object.save(update_fields=['history_approved_flag'])
messages.info(self.request, f'{self.object} history approved!')
return super().form_valid(form)
def form_invalid(self, form):
self.object = Character.objects.get(pk=form.cleaned_data[
'character_id'])
for key, error in form.errors.items():
messages.error(self.request, error.as_text())
return HttpResponseRedirect(reverse('characters:character_detail',
kwargs={'pk': self.object.pk}))
def get_success_url(self):
return reverse('characters:character_detail', kwargs={'pk': self.
object.pk})
class CharacterListView(LoginRequiredMixin, ListView):
"""
Show the list of characters.
From here, you can view, edit, delete a character.
"""
model = Character
paginate_by = 25
def get_queryset(self):
queryset = super().get_queryset()
criteria = self.request.GET.get('criteria', '')
if criteria.strip():
entry_query = get_query(criteria, ['name', 'description',
'concept', 'history', 'player_notes'])
queryset = queryset.filter(entry_query)
history_approved_flag = self.request.GET.get('history_approved_flag',
False)
if history_approved_flag:
queryset = queryset.filter(history_approved_flag=True)
concept_approved_flag = self.request.GET.get('concept_approved_flag',
False)
if concept_approved_flag:
queryset = queryset.filter(concept_approved_flag=True)
return queryset
def get_context_data(self, **kwargs):
"""
Add the form so we can filter the characters.
"""
context_data = super().get_context_data(**kwargs)
context_data.update(**self.request.GET)
return context_data
class CharacterPrintListView(LoginRequiredMixin, ListView):
"""
Show a list of characters to print.
"""
model = Character
template_name = 'characters/character_print_list.html'
def get_queryset(self):
queryset = super().get_queryset()
event_id = self.kwargs.get('event_id', None)
if not event_id:
event_id = Event.next_event().id
player_ids = Registration.objects.filter(event__id=event_id
).values_list('player_id', flat=True)
queryset = queryset.filter(player__id__in=player_ids, npc_flag=
False, active_flag=True)
return queryset
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class CharacterUpdateView(LoginRequiredMixin, UserPassesTestMixin, UpdateView):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def test_func(self):
if self.request.user.has_perm('players.view_any_player'):
return True
try:
player = Character.objects.get(pk=self.kwargs['pk']).player
return player.user == self.request.user
except Character.DoesNotExist:
return False
return False
def get_form_kwargs(self):
kwargs = super().get_form_kwargs()
kwargs['user'] = self.request.user
return kwargs
def get_success_url(self):
return reverse('characters:character_detail', kwargs={'pk': self.
object.pk})
class CharacterDeleteView(PermissionRequiredMixin, UserPassesTestMixin,
DeleteView):
"""
Removes a character permanantly.
Removing a character may have strange effects on other views.
"""
model = Character
permission_required = 'characters.change_character',
success_url = reverse_lazy('characters:character_list')
def test_func(self):
if self.request.user.has_perm('players.view_any_player'):
return True
try:
player = Character.objects.get(pk=self.kwargs['pk']).player
return player.user == self.request.user
except Character.DoesNotExist:
return False
return False
class CharacterResetView(PermissionRequiredMixin, UserPassesTestMixin, View):
"""
Resets a characters skills to none and returns their points to them.
"""
model = Character
permission_required = 'characters.change_character',
success_url = reverse_lazy('characters:character_list')
def test_func(self):
if self.request.user.has_perm('players.view_any_player'):
return True
try:
player = Character.objects.get(pk=self.kwargs['pk']).player
return player.user == self.request.user
except Character.DoesNotExist:
return False
return False
def get(self, request, *args, **kwargs):
"""
Send the user back to the the originating page or back to the
character they are setting active
"""
with transaction.atomic():
character = self.model.objects.get(pk=self.kwargs['pk'])
character.cp_available += character.cp_spent
character.cp_spent = 0
character.save(update_fields=['cp_available', 'cp_spent'])
character.characterskills_set.all().delete()
character.headers.clear()
messages.info(self.request, 'Character skills reset for {}.'.format
(character.name))
return HttpResponseRedirect(self.request.META.get('HTTP_REFERER',
reverse('characters:character_detail', kwargs={'pk': self.
kwargs['pk']})))
class CharacterSetActiveView(LoginRequiredMixin, UserPassesTestMixin, View):
"""
Set the active character for the characters player to the sent id.
"""
model = Character
fields = '__all__'
def test_func(self):
if self.request.user.has_perm('players.view_any_player'):
return True
try:
player = Character.objects.get(pk=self.kwargs['pk']).player
return player.user == self.request.user
except Character.DoesNotExist:
return False
return False
def get(self, request, *args, **kwargs):
"""
Send the user back to the the originating page or back to the
character they are setting active
"""
character = self.model.objects.get(pk=self.kwargs['pk'])
character.player.character_set.update(active_flag=False)
character.active_flag = True
character.save()
messages.info(self.request, 'Active Character changed to {}.'.
format(character.name))
return HttpResponseRedirect(self.request.META.get('HTTP_REFERER',
reverse('characters:character_detail', kwargs={'pk': self.
kwargs['pk']})))
class CharacterSkillUpdateView(LoginRequiredMixin, UserPassesTestMixin,
FormMixin, DetailView):
"""
Allow a user to update their chosen skills
"""
template_name = 'characters/character_skill_form.html'
form_class = CharacterSkillForm
model = Character
def test_func(self):
if self.request.user.has_perm('players.view_any_player'):
return True
try:
player = Character.objects.get(pk=self.kwargs['pk']).player
return player.user == self.request.user
except Character.DoesNotExist:
return False
return False
def get_success_url(self):
return reverse('characters:character_detail', kwargs={'pk': self.
object.pk})
def get_form_kwargs(self):
kwargs = super().get_form_kwargs()
self.skills = Header.objects.order_by('hidden_flag', 'category', 'name'
).all()
kwargs.update({'skills': self.skills})
return kwargs
def get_context_data(self, **kwargs):
context = super().get_context_data(**self.kwargs)
available_skills = self.object.skillhash.keys()
context['skills'] = filter(lambda x: x.id in available_skills or
self.request.user.has_perm('player.view_any_player'), self.skills)
context['skill_hash'] = self.object.skillhash
context['granted_skills'] = self.object.skill_grants
return context
def post(self, request, *args, **kwargs):
self.object = self.get_object()
form = self.get_form()
if form.is_valid():
return self.form_valid(form)
else:
return self.form_invalid(form)
def form_valid(self, form):
"""
Form is valid. Save the skills to that character and remove the
appropriate number of characters points.
"""
return super().form_valid(form)
class ResetPointsView(PermissionRequiredMixin, View):
"""
Resets the points for the season.
"""
permission_required = 'characters.reset_points',
def get(self, request, *args, **kwargs):
"""
Send the user back to the the originating page or back to the main
page if the referrer isn't set.
"""
Character.objects.all().update(cp_transferred=0)
messages.info(self.request, 'Point cap reset!')
return HttpResponseRedirect(self.request.META.get('HTTP_REFERER', '/'))
<|reserved_special_token_0|>
class CharacterAddHeaderView(APIView):
"""
Set of AJAX views for a Characters
This handles different API calls for character actions.
"""
authentication_classes = [SessionAuthentication]
permission_classes = [OwnsCharacter]
def post(self, request, format=None):
header_id = int(request.POST.get('header_id', 0))
character_id = int(request.POST.get('character_id', 0))
cp_available = int(request.POST.get('cp_available', 0))
header = Header.objects.get(pk=header_id)
character = Character.objects.get(pk=character_id)
content = {'error': 'prerequisites not met'}
status = None
if character.check_header_prerequisites(header):
if cp_available - header.cost >= 0:
character.cp_available -= header.cost
character.cp_spent += header.cost
character.headers.add(header)
character.save()
skill_item_template_string = render_to_string(
'characters/includes/character_skill_update_item.html',
{'header': header, 'header_skills': header.skills.all(),
'header_costs': character.skillhash[header.id]}, request)
content = {'success': header.cost * -1, 'skills':
skill_item_template_string}
else:
content = {'error':
"You don't have enough points available for this character to add this header."
}
status = HTTP_412_PRECONDITION_FAILED
else:
status = HTTP_412_PRECONDITION_FAILED
return Response(content, status)
class CharacterDropHeaderView(APIView):
"""
Set of AJAX views for a Characters
This handles different API calls for character actions.
"""
authentication_classes = [SessionAuthentication]
permission_classes = [OwnsCharacter]
def post(self, request, format=None):
header_id = int(request.POST.get('header_id', 0))
character_id = int(request.POST.get('character_id', 0))
header = Header.objects.get(pk=header_id)
character = Character.objects.get(pk=character_id)
content = {'error': 'Header is not already bought!'}
status = None
content['header_list'] = []
if header in character.headers.all():
print(
f'Header present! Dropping and adding back in {header.cost} CP...'
)
character.cp_available += header.cost
character.cp_spent -= header.cost
character.headers.remove(header)
skill_item_template_string = render_to_string(
'characters/includes/character_skill_update_item.html', {
'header': header, 'header_skills': header.skills.all(),
'header_costs': character.skillhash[header.id]}, request)
content = {'success': header.cost}
else:
status = HTTP_412_PRECONDITION_FAILED
return Response(content, status)
class CharacterAddSkillView(APIView):
"""
Set of AJAX views for a Characters
This handles different API calls for character actions.
"""
authentication_classes = [SessionAuthentication]
permission_classes = [OwnsCharacter]
def post(self, request, format=None):
skill_id = int(request.POST.get('skill_id', 0))
header_id = int(request.POST.get('header_id', 0))
character_id = int(request.POST.get('character_id', 0))
cp_available = int(request.POST.get('cp_available', 0))
try:
vector = int(request.POST.get('vector'))
except AttributeError:
return {'error': 'No change indicated'}
header_skill = HeaderSkill.objects.get(skill_id=skill_id, header_id
=header_id)
character = Character.objects.get(pk=character_id)
content = {'success': 'testing right now'}
status = None
if character.check_skill_prerequisites(header_skill.skill,
header_skill.header):
cost = character.skill_cost(header_skill) * vector
if cp_available - cost >= 0:
character_skill, created = (character.characterskills_set.
get_or_create(skill=header_skill))
if (character_skill.count and character_skill.count +
vector < 0):
content = {'error':
f"You don't have any points in {header_skill.skill}"}
status = HTTP_412_PRECONDITION_FAILED
else:
content = {'success': cost * -1}
character_skill.count = F('count') + vector
character_skill.save()
character.cp_spent = F('cp_spent') + cost
character.cp_available = F('cp_available') - cost
character.save()
else:
content = {'error':
"You don't have enough points available to purchase this skill . . ."
}
status = HTTP_412_PRECONDITION_FAILED
else:
status = HTTP_412_PRECONDITION_FAILED
return Response(content, status)
class CharacterDetailView(LoginRequiredMixin, UserPassesTestMixin, DetailView):
"""
Show the details for a character.
From here you can edit the details of a character or choose skills.
"""
model = Character
fields = '__all__'
def test_func(self):
if self.request.user.has_perm('players.view_any_player'):
return True
try:
player = Character.objects.get(pk=self.kwargs['pk']).player
return player.user == self.request.user
except Character.DoesNotExist:
return False
return False
class CharacterConceptApproveView(PermissionRequiredMixin, FormView):
"""
Approve the concept for a character.
Grant the CP for the character
Set the history approved flag.
"""
permission_required = 'players.change_any_player'
form_class = CharacterConceptApproveForm
def form_valid(self, form):
self.object = Character.objects.get(pk=form.cleaned_data[
'character_id'])
self.object.player.cp_available += 3
self.object.player.save(update_fields=['cp_available'])
self.object.concept_approved_flag = True
self.object.save(update_fields=['concept_approved_flag'])
messages.info(self.request, f'{self.object} concept approved!')
return super().form_valid(form)
def form_invalid(self, form):
self.object = Character.objects.get(pk=form.cleaned_data[
'character_id'])
for key, error in form.errors.items():
messages.error(self.request, error.as_text())
return HttpResponseRedirect(reverse('characters:character_detail',
kwargs={'pk': self.object.pk}))
def get_success_url(self):
return reverse('characters:character_detail', kwargs={'pk': self.
object.pk})
class CharacterHistoryApproveView(PermissionRequiredMixin, FormView):
"""
Approve the history for a character.
Grant the CP for the character
Set the history approved flag.
"""
permission_required = 'players.change_any_player'
form_class = CharacterHistoryApproveForm
def form_valid(self, form):
self.object = Character.objects.get(pk=form.cleaned_data[
'character_id'])
self.object.player.cp_available += 3
self.object.player.save(update_fields=['cp_available'])
self.object.history_approved_flag = True
self.object.save(update_fields=['history_approved_flag'])
messages.info(self.request, f'{self.object} history approved!')
return super().form_valid(form)
def form_invalid(self, form):
self.object = Character.objects.get(pk=form.cleaned_data[
'character_id'])
for key, error in form.errors.items():
messages.error(self.request, error.as_text())
return HttpResponseRedirect(reverse('characters:character_detail',
kwargs={'pk': self.object.pk}))
def get_success_url(self):
return reverse('characters:character_detail', kwargs={'pk': self.
object.pk})
class CharacterListView(LoginRequiredMixin, ListView):
"""
Show the list of characters.
From here, you can view, edit, delete a character.
"""
model = Character
paginate_by = 25
def get_queryset(self):
queryset = super().get_queryset()
criteria = self.request.GET.get('criteria', '')
if criteria.strip():
entry_query = get_query(criteria, ['name', 'description',
'concept', 'history', 'player_notes'])
queryset = queryset.filter(entry_query)
history_approved_flag = self.request.GET.get('history_approved_flag',
False)
if history_approved_flag:
queryset = queryset.filter(history_approved_flag=True)
concept_approved_flag = self.request.GET.get('concept_approved_flag',
False)
if concept_approved_flag:
queryset = queryset.filter(concept_approved_flag=True)
return queryset
def get_context_data(self, **kwargs):
"""
Add the form so we can filter the characters.
"""
context_data = super().get_context_data(**kwargs)
context_data.update(**self.request.GET)
return context_data
class CharacterPrintListView(LoginRequiredMixin, ListView):
"""
Show a list of characters to print.
"""
model = Character
template_name = 'characters/character_print_list.html'
def get_queryset(self):
queryset = super().get_queryset()
event_id = self.kwargs.get('event_id', None)
if not event_id:
event_id = Event.next_event().id
player_ids = Registration.objects.filter(event__id=event_id
).values_list('player_id', flat=True)
queryset = queryset.filter(player__id__in=player_ids, npc_flag=
False, active_flag=True)
return queryset
<|reserved_special_token_1|>
"""These are views that are used for viewing and editing characters."""
from django.contrib import messages
from django.contrib.auth.mixins import UserPassesTestMixin,\
LoginRequiredMixin, PermissionRequiredMixin
from django.db import transaction
from django.db.models import F
from django.http import HttpResponseRedirect
from django.template.loader import render_to_string
from django.urls import reverse, reverse_lazy
from django.views import View
from django.views.generic.edit import FormMixin, CreateView, UpdateView
from django.views.generic import DeleteView, DetailView, FormView, ListView
from rest_framework.status import HTTP_412_PRECONDITION_FAILED
from rest_framework.authentication import SessionAuthentication
from rest_framework.permissions import BasePermission
from rest_framework.response import Response
from rest_framework.views import APIView
from talesofvalor import get_query
from talesofvalor.events.models import Event
from talesofvalor.players.models import Registration
from talesofvalor.skills.models import Header, HeaderSkill
from .models import Character
from .forms import CharacterForm, CharacterSkillForm,\
CharacterConceptApproveForm, CharacterHistoryApproveForm
class OwnsCharacter(BasePermission):
"""
The current user is staff or owns the that is being manipulated.
"""
message = "You don't own this character"
def has_object_permission(self, request, view, obj):
if self.request.user.has_perm('players.view_any_player'):
return True
try:
player = Character.objects.get(pk=self.kwargs['pk']).player
return (player.user == self.request.user)
except Character.DoesNotExist:
return False
return False
class CharacterCreateView(LoginRequiredMixin, CreateView):
model = Character
form_class = CharacterForm
def get_initial(self):
# Get the initial dictionary from the superclass method
initial = super(CharacterCreateView, self).get_initial()
# Copy the dictionary so we don't accidentally change a mutable dict
initial = initial.copy()
# default to getting the player from the query String.
try:
initial['player'] = self.request.GET['player']
except KeyError:
initial['player'] = self.request.user.player
# etc...
return initial
def get_form_kwargs(self):
kwargs = super().get_form_kwargs()
kwargs['user'] = self.request.user # pass the 'user' in kwargs
return kwargs
def get_success_url(self):
return reverse(
'characters:character_skill_update',
kwargs={'pk': self.object.pk}
)
def form_valid(self, form):
"""
If this form is valid, then add the current player to the character
if the current user is not an admin
If the user doesn't have any other active characters, set this one
to active.
"""
if not self.request.user.has_perm('players.view_any_player'):
form.instance.player = self.request.user.player
if not form.instance.player.character_set.filter(active_flag=True).exists():
form.instance.active_flag = True
messages.info(self.request, 'New Character, "{}" created.'.format(
form.instance.name
))
return super().form_valid(form)
class CharacterUpdateView(LoginRequiredMixin, UserPassesTestMixin, UpdateView):
model = Character
form_class = CharacterForm
def test_func(self):
if self.request.user.has_perm('players.view_any_player'):
return True
try:
player = Character.objects.get(pk=self.kwargs['pk']).player
return (player.user == self.request.user)
except Character.DoesNotExist:
return False
return False
def get_form_kwargs(self):
kwargs = super().get_form_kwargs()
kwargs['user'] = self.request.user # pass the 'user' in kwargs
return kwargs
def get_success_url(self):
return reverse(
'characters:character_detail',
kwargs={'pk': self.object.pk}
)
class CharacterDeleteView(
PermissionRequiredMixin,
UserPassesTestMixin,
DeleteView
):
"""
Removes a character permanantly.
Removing a character may have strange effects on other views.
"""
model = Character
permission_required = ('characters.change_character', )
success_url = reverse_lazy('characters:character_list')
def test_func(self):
if self.request.user.has_perm('players.view_any_player'):
return True
try:
player = Character.objects.get(pk=self.kwargs['pk']).player
return (player.user == self.request.user)
except Character.DoesNotExist:
return False
return False
class CharacterResetView(
PermissionRequiredMixin,
UserPassesTestMixin,
View
):
"""
Resets a characters skills to none and returns their points to them.
"""
model = Character
permission_required = ('characters.change_character', )
success_url = reverse_lazy('characters:character_list')
def test_func(self):
if self.request.user.has_perm('players.view_any_player'):
return True
try:
player = Character.objects.get(pk=self.kwargs['pk']).player
return (player.user == self.request.user)
except Character.DoesNotExist:
return False
return False
def get(self, request, *args, **kwargs):
"""
Send the user back to the the originating page or back to the
character they are setting active
"""
with transaction.atomic():
character = self.model.objects.get(pk=self.kwargs['pk'])
character.cp_available += character.cp_spent
character.cp_spent = 0
character.save(update_fields=['cp_available', 'cp_spent'])
character.characterskills_set.all().delete()
character.headers.clear()
messages.info(self.request, 'Character skills reset for {}.'.format(
character.name
))
return HttpResponseRedirect(
self.request.META.get(
'HTTP_REFERER',
reverse(
'characters:character_detail',
kwargs={'pk': self.kwargs['pk']}
)
)
)
class CharacterSetActiveView(
LoginRequiredMixin,
UserPassesTestMixin,
View
):
"""
Set the active character for the characters player to the sent id.
"""
model = Character
fields = '__all__'
def test_func(self):
if self.request.user.has_perm('players.view_any_player'):
return True
try:
player = Character.objects.get(pk=self.kwargs['pk']).player
return (player.user == self.request.user)
except Character.DoesNotExist:
return False
return False
def get(self, request, *args, **kwargs):
"""
Send the user back to the the originating page or back to the
character they are setting active
"""
character = self.model.objects.get(pk=self.kwargs['pk'])
character.player.character_set.update(active_flag=False)
character.active_flag = True
character.save()
messages.info(self.request, 'Active Character changed to {}.'.format(
character.name
))
return HttpResponseRedirect(
self.request.META.get(
'HTTP_REFERER',
reverse(
'characters:character_detail',
kwargs={'pk': self.kwargs['pk']}
)
)
)
class CharacterSkillUpdateView(
LoginRequiredMixin,
UserPassesTestMixin,
FormMixin,
DetailView):
"""
Allow a user to update their chosen skills
"""
template_name = 'characters/character_skill_form.html'
form_class = CharacterSkillForm
model = Character
def test_func(self):
if self.request.user.has_perm('players.view_any_player'):
return True
try:
player = Character.objects.get(pk=self.kwargs['pk']).player
return (player.user == self.request.user)
except Character.DoesNotExist:
return False
return False
def get_success_url(self):
return reverse(
'characters:character_detail',
kwargs={'pk': self.object.pk}
)
def get_form_kwargs(self):
kwargs = super().get_form_kwargs()
self.skills = Header.objects\
.order_by('hidden_flag', 'category', 'name')\
.all()
kwargs.update({'skills': self.skills})
return kwargs
def get_context_data(self, **kwargs):
context = super().get_context_data(**self.kwargs)
# remove skills not in the hash.
available_skills = self.object.skillhash.keys()
context['skills'] = filter(lambda x: x.id in available_skills or self.request.user.has_perm('player.view_any_player'), self.skills)
context['skill_hash'] = self.object.skillhash
# add the bare skills granted by the rules
context['granted_skills'] = self.object.skill_grants
return context
def post(self, request, *args, **kwargs):
self.object = self.get_object()
form = self.get_form()
if form.is_valid():
return self.form_valid(form)
else:
return self.form_invalid(form)
def form_valid(self, form):
"""
Form is valid. Save the skills to that character and remove the
appropriate number of characters points.
"""
return super().form_valid(form)
class ResetPointsView(
PermissionRequiredMixin,
View
):
"""
Resets the points for the season.
"""
permission_required = ('characters.reset_points', )
def get(self, request, *args, **kwargs):
"""
Send the user back to the the originating page or back to the main
page if the referrer isn't set.
"""
Character.objects.all().update(cp_transferred=0)
messages.info(self.request, 'Point cap reset!')
return HttpResponseRedirect(
self.request.META.get(
'HTTP_REFERER',
'/'
)
)
'''
Put the AJAX work for Characters here
'''
class CharacterAddHeaderView(APIView):
'''
Set of AJAX views for a Characters
This handles different API calls for character actions.
'''
authentication_classes = [SessionAuthentication]
permission_classes = [OwnsCharacter]
def post(self, request, format=None):
header_id = int(request.POST.get('header_id', 0))
character_id = int(request.POST.get('character_id', 0))
cp_available = int(request.POST.get('cp_available', 0))
# get the character and then see if the header is allowed
header = Header.objects.get(pk=header_id)
character = Character.objects.get(pk=character_id)
# Default to error.
content = {
'error': "prerequisites not met"
}
status = None
# if the prerequisites are met, add the header to the user and return
# the list of skills
if character.check_header_prerequisites(header):
# see if the character has enough points to add the header
if (cp_available - header.cost) >= 0:
character.cp_available -= header.cost
character.cp_spent += header.cost
character.headers.add(header)
character.save()
skill_item_template_string = render_to_string(
"characters/includes/character_skill_update_item.html",
{
'header': header,
'header_skills': header.skills.all(),
'header_costs': character.skillhash[header.id]
},
request
)
content = {
'success': header.cost * -1,
'skills': skill_item_template_string
}
else:
content = {
'error': "You don't have enough points available for this character to add this header."
}
status = HTTP_412_PRECONDITION_FAILED
else:
status = HTTP_412_PRECONDITION_FAILED
return Response(content, status)
class CharacterDropHeaderView(APIView):
'''
Set of AJAX views for a Characters
This handles different API calls for character actions.
'''
authentication_classes = [SessionAuthentication]
permission_classes = [OwnsCharacter]
def post(self, request, format=None):
header_id = int(request.POST.get('header_id', 0))
character_id = int(request.POST.get('character_id', 0))
# get the character and header
header = Header.objects.get(pk=header_id)
character = Character.objects.get(pk=character_id)
# Default to error.
content = {
'error': "Header is not already bought!"
}
status = None
# if the character has the header, drop it and refund the CP
content['header_list'] = []
if header in character.headers.all():
print(f'Header present! Dropping and adding back in {header.cost} CP...')
character.cp_available += header.cost
character.cp_spent -= header.cost
character.headers.remove(header)
skill_item_template_string = render_to_string(
"characters/includes/character_skill_update_item.html",
{
'header': header,
'header_skills': header.skills.all(),
'header_costs': character.skillhash[header.id]
},
request
)
content = {
'success': header.cost,
}
else:
status = HTTP_412_PRECONDITION_FAILED
return Response(content, status)
class CharacterAddSkillView(APIView):
'''
Set of AJAX views for a Characters
This handles different API calls for character actions.
'''
authentication_classes = [SessionAuthentication]
permission_classes = [OwnsCharacter]
def post(self, request, format=None):
skill_id = int(request.POST.get('skill_id', 0))
header_id = int(request.POST.get('header_id', 0))
character_id = int(request.POST.get('character_id', 0))
cp_available = int(request.POST.get('cp_available', 0))
try:
vector = int(request.POST.get('vector'))
except AttributeError:
return {
'error': "No change indicated"
}
# get the character and then see if the skill is allowed
header_skill = HeaderSkill.objects.get(skill_id=skill_id, header_id=header_id)
character = Character.objects.get(pk=character_id)
# check that the skill is allowed.
# if the prerequisites are met, add the header to the user and return
# the list of skills
# otherwise, return an error
content = {
'success': "testing right now"
}
status = None
if character.check_skill_prerequisites(header_skill.skill, header_skill.header):
# since vector is the direction, we want to reverse it when
# dealing with what we want to change for the available points
# see if the character has enough points to add the header
cost = character.skill_cost(header_skill) * vector
if (cp_available - cost) >= 0:
# when this is returned, change the available costs
(character_skill, created) = character.characterskills_set.get_or_create(
skill=header_skill
)
if character_skill.count and (character_skill.count + vector < 0):
content = {
'error': f"You don't have any points in {header_skill.skill}"
}
status = HTTP_412_PRECONDITION_FAILED
else:
content = {
'success': cost * -1
}
character_skill.count = F('count') + vector
character_skill.save()
character.cp_spent = F('cp_spent') + cost
character.cp_available = F('cp_available') - cost
character.save()
else:
content = {
'error': "You don't have enough points available to purchase this skill . . ."
}
status = HTTP_412_PRECONDITION_FAILED
else:
status = HTTP_412_PRECONDITION_FAILED
return Response(content, status)
class CharacterDetailView(LoginRequiredMixin, UserPassesTestMixin, DetailView):
"""
Show the details for a character.
From here you can edit the details of a character or choose skills.
"""
model = Character
fields = '__all__'
def test_func(self):
if self.request.user.has_perm('players.view_any_player'):
return True
try:
player = Character.objects.get(pk=self.kwargs['pk']).player
return (player.user == self.request.user)
except Character.DoesNotExist:
return False
return False
class CharacterConceptApproveView(PermissionRequiredMixin, FormView):
"""
Approve the concept for a character.
Grant the CP for the character
Set the history approved flag.
"""
permission_required = 'players.change_any_player'
form_class = CharacterConceptApproveForm
def form_valid(self, form):
self.object = Character.objects.get(pk=form.cleaned_data['character_id'])
self.object.player.cp_available += 3
self.object.player.save(update_fields=['cp_available'])
self.object.concept_approved_flag = True
self.object.save(update_fields=['concept_approved_flag'])
messages.info(self.request, f"{self.object} concept approved!")
return super().form_valid(form)
def form_invalid(self, form):
self.object = Character.objects.get(pk=form.cleaned_data['character_id'])
for key, error in form.errors.items():
messages.error(self.request, error.as_text())
return HttpResponseRedirect(reverse(
'characters:character_detail',
kwargs={'pk': self.object.pk}
))
def get_success_url(self):
return reverse(
'characters:character_detail',
kwargs={'pk': self.object.pk}
)
class CharacterHistoryApproveView(PermissionRequiredMixin, FormView):
"""
Approve the history for a character.
Grant the CP for the character
Set the history approved flag.
"""
permission_required = 'players.change_any_player'
form_class = CharacterHistoryApproveForm
def form_valid(self, form):
self.object = Character.objects.get(pk=form.cleaned_data['character_id'])
self.object.player.cp_available += 3
self.object.player.save(update_fields=['cp_available'])
self.object.history_approved_flag = True
self.object.save(update_fields=['history_approved_flag'])
messages.info(self.request, f"{self.object} history approved!")
return super().form_valid(form)
def form_invalid(self, form):
self.object = Character.objects.get(pk=form.cleaned_data['character_id'])
for key, error in form.errors.items():
messages.error(self.request, error.as_text())
return HttpResponseRedirect(reverse(
'characters:character_detail',
kwargs={'pk': self.object.pk}
))
def get_success_url(self):
return reverse(
'characters:character_detail',
kwargs={'pk': self.object.pk}
)
class CharacterListView(LoginRequiredMixin, ListView):
"""
Show the list of characters.
From here, you can view, edit, delete a character.
"""
model = Character
paginate_by = 25
def get_queryset(self):
queryset = super().get_queryset()
criteria = self.request.GET.get('criteria', '')
if (criteria.strip()):
entry_query = get_query(
criteria,
['name', 'description', 'concept', 'history', 'player_notes']
)
queryset = queryset.filter(entry_query)
history_approved_flag = self.request.GET.get('history_approved_flag', False)
if history_approved_flag:
queryset = queryset.filter(history_approved_flag=True)
concept_approved_flag = self.request.GET.get('concept_approved_flag', False)
if concept_approved_flag:
queryset = queryset.filter(concept_approved_flag=True)
return queryset
def get_context_data(self, **kwargs):
'''
Add the form so we can filter the characters.
'''
# get the context data to add to.
context_data = super().get_context_data(**kwargs)
context_data.update(**self.request.GET)
# return the resulting context
return context_data
class CharacterPrintListView(LoginRequiredMixin, ListView):
"""
Show a list of characters to print.
"""
model = Character
template_name = "characters/character_print_list.html"
def get_queryset(self):
queryset = super().get_queryset() # filter by event
event_id = self.kwargs.get('event_id', None)
if not event_id:
event_id = Event.next_event().id
player_ids = Registration.objects.filter(event__id=event_id).values_list('player_id', flat=True)
queryset = queryset.filter(player__id__in=player_ids, npc_flag=False, active_flag=True)
return queryset
|
flexible
|
{
"blob_id": "55ea522b096b189ff67b0da0058af777b0a910e3",
"index": 4970,
"step-1": "<mask token>\n\n\nclass CharacterDropHeaderView(APIView):\n \"\"\"\n Set of AJAX views for a Characters\n\n This handles different API calls for character actions.\n \"\"\"\n authentication_classes = [SessionAuthentication]\n permission_classes = [OwnsCharacter]\n\n def post(self, request, format=None):\n header_id = int(request.POST.get('header_id', 0))\n character_id = int(request.POST.get('character_id', 0))\n header = Header.objects.get(pk=header_id)\n character = Character.objects.get(pk=character_id)\n content = {'error': 'Header is not already bought!'}\n status = None\n content['header_list'] = []\n if header in character.headers.all():\n print(\n f'Header present! Dropping and adding back in {header.cost} CP...'\n )\n character.cp_available += header.cost\n character.cp_spent -= header.cost\n character.headers.remove(header)\n skill_item_template_string = render_to_string(\n 'characters/includes/character_skill_update_item.html', {\n 'header': header, 'header_skills': header.skills.all(),\n 'header_costs': character.skillhash[header.id]}, request)\n content = {'success': header.cost}\n else:\n status = HTTP_412_PRECONDITION_FAILED\n return Response(content, status)\n\n\nclass CharacterAddSkillView(APIView):\n \"\"\"\n Set of AJAX views for a Characters\n\n This handles different API calls for character actions.\n \"\"\"\n authentication_classes = [SessionAuthentication]\n permission_classes = [OwnsCharacter]\n\n def post(self, request, format=None):\n skill_id = int(request.POST.get('skill_id', 0))\n header_id = int(request.POST.get('header_id', 0))\n character_id = int(request.POST.get('character_id', 0))\n cp_available = int(request.POST.get('cp_available', 0))\n try:\n vector = int(request.POST.get('vector'))\n except AttributeError:\n return {'error': 'No change indicated'}\n header_skill = HeaderSkill.objects.get(skill_id=skill_id, header_id\n =header_id)\n character = Character.objects.get(pk=character_id)\n content = {'success': 'testing right now'}\n status = None\n if character.check_skill_prerequisites(header_skill.skill,\n header_skill.header):\n cost = character.skill_cost(header_skill) * vector\n if cp_available - cost >= 0:\n character_skill, created = (character.characterskills_set.\n get_or_create(skill=header_skill))\n if (character_skill.count and character_skill.count +\n vector < 0):\n content = {'error':\n f\"You don't have any points in {header_skill.skill}\"}\n status = HTTP_412_PRECONDITION_FAILED\n else:\n content = {'success': cost * -1}\n character_skill.count = F('count') + vector\n character_skill.save()\n character.cp_spent = F('cp_spent') + cost\n character.cp_available = F('cp_available') - cost\n character.save()\n else:\n content = {'error':\n \"You don't have enough points available to purchase this skill . . .\"\n }\n status = HTTP_412_PRECONDITION_FAILED\n else:\n status = HTTP_412_PRECONDITION_FAILED\n return Response(content, status)\n\n\nclass CharacterDetailView(LoginRequiredMixin, UserPassesTestMixin, DetailView):\n \"\"\"\n Show the details for a character.\n\n From here you can edit the details of a character or choose skills.\n \"\"\"\n model = Character\n fields = '__all__'\n\n def test_func(self):\n if self.request.user.has_perm('players.view_any_player'):\n return True\n try:\n player = Character.objects.get(pk=self.kwargs['pk']).player\n return player.user == self.request.user\n except Character.DoesNotExist:\n return False\n return False\n\n\nclass CharacterConceptApproveView(PermissionRequiredMixin, FormView):\n \"\"\"\n Approve the concept for a character.\n Grant the CP for the character\n Set the history approved flag.\n \"\"\"\n permission_required = 'players.change_any_player'\n form_class = CharacterConceptApproveForm\n\n def form_valid(self, form):\n self.object = Character.objects.get(pk=form.cleaned_data[\n 'character_id'])\n self.object.player.cp_available += 3\n self.object.player.save(update_fields=['cp_available'])\n self.object.concept_approved_flag = True\n self.object.save(update_fields=['concept_approved_flag'])\n messages.info(self.request, f'{self.object} concept approved!')\n return super().form_valid(form)\n\n def form_invalid(self, form):\n self.object = Character.objects.get(pk=form.cleaned_data[\n 'character_id'])\n for key, error in form.errors.items():\n messages.error(self.request, error.as_text())\n return HttpResponseRedirect(reverse('characters:character_detail',\n kwargs={'pk': self.object.pk}))\n\n def get_success_url(self):\n return reverse('characters:character_detail', kwargs={'pk': self.\n object.pk})\n\n\nclass CharacterHistoryApproveView(PermissionRequiredMixin, FormView):\n \"\"\"\n Approve the history for a character.\n Grant the CP for the character\n Set the history approved flag.\n \"\"\"\n permission_required = 'players.change_any_player'\n form_class = CharacterHistoryApproveForm\n\n def form_valid(self, form):\n self.object = Character.objects.get(pk=form.cleaned_data[\n 'character_id'])\n self.object.player.cp_available += 3\n self.object.player.save(update_fields=['cp_available'])\n self.object.history_approved_flag = True\n self.object.save(update_fields=['history_approved_flag'])\n messages.info(self.request, f'{self.object} history approved!')\n return super().form_valid(form)\n\n def form_invalid(self, form):\n self.object = Character.objects.get(pk=form.cleaned_data[\n 'character_id'])\n for key, error in form.errors.items():\n messages.error(self.request, error.as_text())\n return HttpResponseRedirect(reverse('characters:character_detail',\n kwargs={'pk': self.object.pk}))\n\n def get_success_url(self):\n return reverse('characters:character_detail', kwargs={'pk': self.\n object.pk})\n\n\nclass CharacterListView(LoginRequiredMixin, ListView):\n \"\"\"\n Show the list of characters.\n\n From here, you can view, edit, delete a character.\n \"\"\"\n model = Character\n paginate_by = 25\n\n def get_queryset(self):\n queryset = super().get_queryset()\n criteria = self.request.GET.get('criteria', '')\n if criteria.strip():\n entry_query = get_query(criteria, ['name', 'description',\n 'concept', 'history', 'player_notes'])\n queryset = queryset.filter(entry_query)\n history_approved_flag = self.request.GET.get('history_approved_flag',\n False)\n if history_approved_flag:\n queryset = queryset.filter(history_approved_flag=True)\n concept_approved_flag = self.request.GET.get('concept_approved_flag',\n False)\n if concept_approved_flag:\n queryset = queryset.filter(concept_approved_flag=True)\n return queryset\n\n def get_context_data(self, **kwargs):\n \"\"\"\n Add the form so we can filter the characters.\n \"\"\"\n context_data = super().get_context_data(**kwargs)\n context_data.update(**self.request.GET)\n return context_data\n\n\nclass CharacterPrintListView(LoginRequiredMixin, ListView):\n \"\"\"\n Show a list of characters to print.\n\n \"\"\"\n model = Character\n template_name = 'characters/character_print_list.html'\n\n def get_queryset(self):\n queryset = super().get_queryset()\n event_id = self.kwargs.get('event_id', None)\n if not event_id:\n event_id = Event.next_event().id\n player_ids = Registration.objects.filter(event__id=event_id\n ).values_list('player_id', flat=True)\n queryset = queryset.filter(player__id__in=player_ids, npc_flag=\n False, active_flag=True)\n return queryset\n",
"step-2": "<mask token>\n\n\nclass CharacterSkillUpdateView(LoginRequiredMixin, UserPassesTestMixin,\n FormMixin, DetailView):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def test_func(self):\n if self.request.user.has_perm('players.view_any_player'):\n return True\n try:\n player = Character.objects.get(pk=self.kwargs['pk']).player\n return player.user == self.request.user\n except Character.DoesNotExist:\n return False\n return False\n\n def get_success_url(self):\n return reverse('characters:character_detail', kwargs={'pk': self.\n object.pk})\n\n def get_form_kwargs(self):\n kwargs = super().get_form_kwargs()\n self.skills = Header.objects.order_by('hidden_flag', 'category', 'name'\n ).all()\n kwargs.update({'skills': self.skills})\n return kwargs\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**self.kwargs)\n available_skills = self.object.skillhash.keys()\n context['skills'] = filter(lambda x: x.id in available_skills or\n self.request.user.has_perm('player.view_any_player'), self.skills)\n context['skill_hash'] = self.object.skillhash\n context['granted_skills'] = self.object.skill_grants\n return context\n\n def post(self, request, *args, **kwargs):\n self.object = self.get_object()\n form = self.get_form()\n if form.is_valid():\n return self.form_valid(form)\n else:\n return self.form_invalid(form)\n\n def form_valid(self, form):\n \"\"\"\n Form is valid. Save the skills to that character and remove the\n appropriate number of characters points.\n \"\"\"\n return super().form_valid(form)\n\n\nclass ResetPointsView(PermissionRequiredMixin, View):\n \"\"\"\n Resets the points for the season.\n \"\"\"\n permission_required = 'characters.reset_points',\n\n def get(self, request, *args, **kwargs):\n \"\"\"\n Send the user back to the the originating page or back to the main \n page if the referrer isn't set.\n \"\"\"\n Character.objects.all().update(cp_transferred=0)\n messages.info(self.request, 'Point cap reset!')\n return HttpResponseRedirect(self.request.META.get('HTTP_REFERER', '/'))\n\n\n<mask token>\n\n\nclass CharacterAddHeaderView(APIView):\n \"\"\"\n Set of AJAX views for a Characters\n\n This handles different API calls for character actions.\n \"\"\"\n authentication_classes = [SessionAuthentication]\n permission_classes = [OwnsCharacter]\n\n def post(self, request, format=None):\n header_id = int(request.POST.get('header_id', 0))\n character_id = int(request.POST.get('character_id', 0))\n cp_available = int(request.POST.get('cp_available', 0))\n header = Header.objects.get(pk=header_id)\n character = Character.objects.get(pk=character_id)\n content = {'error': 'prerequisites not met'}\n status = None\n if character.check_header_prerequisites(header):\n if cp_available - header.cost >= 0:\n character.cp_available -= header.cost\n character.cp_spent += header.cost\n character.headers.add(header)\n character.save()\n skill_item_template_string = render_to_string(\n 'characters/includes/character_skill_update_item.html',\n {'header': header, 'header_skills': header.skills.all(),\n 'header_costs': character.skillhash[header.id]}, request)\n content = {'success': header.cost * -1, 'skills':\n skill_item_template_string}\n else:\n content = {'error':\n \"You don't have enough points available for this character to add this header.\"\n }\n status = HTTP_412_PRECONDITION_FAILED\n else:\n status = HTTP_412_PRECONDITION_FAILED\n return Response(content, status)\n\n\nclass CharacterDropHeaderView(APIView):\n \"\"\"\n Set of AJAX views for a Characters\n\n This handles different API calls for character actions.\n \"\"\"\n authentication_classes = [SessionAuthentication]\n permission_classes = [OwnsCharacter]\n\n def post(self, request, format=None):\n header_id = int(request.POST.get('header_id', 0))\n character_id = int(request.POST.get('character_id', 0))\n header = Header.objects.get(pk=header_id)\n character = Character.objects.get(pk=character_id)\n content = {'error': 'Header is not already bought!'}\n status = None\n content['header_list'] = []\n if header in character.headers.all():\n print(\n f'Header present! Dropping and adding back in {header.cost} CP...'\n )\n character.cp_available += header.cost\n character.cp_spent -= header.cost\n character.headers.remove(header)\n skill_item_template_string = render_to_string(\n 'characters/includes/character_skill_update_item.html', {\n 'header': header, 'header_skills': header.skills.all(),\n 'header_costs': character.skillhash[header.id]}, request)\n content = {'success': header.cost}\n else:\n status = HTTP_412_PRECONDITION_FAILED\n return Response(content, status)\n\n\nclass CharacterAddSkillView(APIView):\n \"\"\"\n Set of AJAX views for a Characters\n\n This handles different API calls for character actions.\n \"\"\"\n authentication_classes = [SessionAuthentication]\n permission_classes = [OwnsCharacter]\n\n def post(self, request, format=None):\n skill_id = int(request.POST.get('skill_id', 0))\n header_id = int(request.POST.get('header_id', 0))\n character_id = int(request.POST.get('character_id', 0))\n cp_available = int(request.POST.get('cp_available', 0))\n try:\n vector = int(request.POST.get('vector'))\n except AttributeError:\n return {'error': 'No change indicated'}\n header_skill = HeaderSkill.objects.get(skill_id=skill_id, header_id\n =header_id)\n character = Character.objects.get(pk=character_id)\n content = {'success': 'testing right now'}\n status = None\n if character.check_skill_prerequisites(header_skill.skill,\n header_skill.header):\n cost = character.skill_cost(header_skill) * vector\n if cp_available - cost >= 0:\n character_skill, created = (character.characterskills_set.\n get_or_create(skill=header_skill))\n if (character_skill.count and character_skill.count +\n vector < 0):\n content = {'error':\n f\"You don't have any points in {header_skill.skill}\"}\n status = HTTP_412_PRECONDITION_FAILED\n else:\n content = {'success': cost * -1}\n character_skill.count = F('count') + vector\n character_skill.save()\n character.cp_spent = F('cp_spent') + cost\n character.cp_available = F('cp_available') - cost\n character.save()\n else:\n content = {'error':\n \"You don't have enough points available to purchase this skill . . .\"\n }\n status = HTTP_412_PRECONDITION_FAILED\n else:\n status = HTTP_412_PRECONDITION_FAILED\n return Response(content, status)\n\n\nclass CharacterDetailView(LoginRequiredMixin, UserPassesTestMixin, DetailView):\n \"\"\"\n Show the details for a character.\n\n From here you can edit the details of a character or choose skills.\n \"\"\"\n model = Character\n fields = '__all__'\n\n def test_func(self):\n if self.request.user.has_perm('players.view_any_player'):\n return True\n try:\n player = Character.objects.get(pk=self.kwargs['pk']).player\n return player.user == self.request.user\n except Character.DoesNotExist:\n return False\n return False\n\n\nclass CharacterConceptApproveView(PermissionRequiredMixin, FormView):\n \"\"\"\n Approve the concept for a character.\n Grant the CP for the character\n Set the history approved flag.\n \"\"\"\n permission_required = 'players.change_any_player'\n form_class = CharacterConceptApproveForm\n\n def form_valid(self, form):\n self.object = Character.objects.get(pk=form.cleaned_data[\n 'character_id'])\n self.object.player.cp_available += 3\n self.object.player.save(update_fields=['cp_available'])\n self.object.concept_approved_flag = True\n self.object.save(update_fields=['concept_approved_flag'])\n messages.info(self.request, f'{self.object} concept approved!')\n return super().form_valid(form)\n\n def form_invalid(self, form):\n self.object = Character.objects.get(pk=form.cleaned_data[\n 'character_id'])\n for key, error in form.errors.items():\n messages.error(self.request, error.as_text())\n return HttpResponseRedirect(reverse('characters:character_detail',\n kwargs={'pk': self.object.pk}))\n\n def get_success_url(self):\n return reverse('characters:character_detail', kwargs={'pk': self.\n object.pk})\n\n\nclass CharacterHistoryApproveView(PermissionRequiredMixin, FormView):\n \"\"\"\n Approve the history for a character.\n Grant the CP for the character\n Set the history approved flag.\n \"\"\"\n permission_required = 'players.change_any_player'\n form_class = CharacterHistoryApproveForm\n\n def form_valid(self, form):\n self.object = Character.objects.get(pk=form.cleaned_data[\n 'character_id'])\n self.object.player.cp_available += 3\n self.object.player.save(update_fields=['cp_available'])\n self.object.history_approved_flag = True\n self.object.save(update_fields=['history_approved_flag'])\n messages.info(self.request, f'{self.object} history approved!')\n return super().form_valid(form)\n\n def form_invalid(self, form):\n self.object = Character.objects.get(pk=form.cleaned_data[\n 'character_id'])\n for key, error in form.errors.items():\n messages.error(self.request, error.as_text())\n return HttpResponseRedirect(reverse('characters:character_detail',\n kwargs={'pk': self.object.pk}))\n\n def get_success_url(self):\n return reverse('characters:character_detail', kwargs={'pk': self.\n object.pk})\n\n\nclass CharacterListView(LoginRequiredMixin, ListView):\n \"\"\"\n Show the list of characters.\n\n From here, you can view, edit, delete a character.\n \"\"\"\n model = Character\n paginate_by = 25\n\n def get_queryset(self):\n queryset = super().get_queryset()\n criteria = self.request.GET.get('criteria', '')\n if criteria.strip():\n entry_query = get_query(criteria, ['name', 'description',\n 'concept', 'history', 'player_notes'])\n queryset = queryset.filter(entry_query)\n history_approved_flag = self.request.GET.get('history_approved_flag',\n False)\n if history_approved_flag:\n queryset = queryset.filter(history_approved_flag=True)\n concept_approved_flag = self.request.GET.get('concept_approved_flag',\n False)\n if concept_approved_flag:\n queryset = queryset.filter(concept_approved_flag=True)\n return queryset\n\n def get_context_data(self, **kwargs):\n \"\"\"\n Add the form so we can filter the characters.\n \"\"\"\n context_data = super().get_context_data(**kwargs)\n context_data.update(**self.request.GET)\n return context_data\n\n\nclass CharacterPrintListView(LoginRequiredMixin, ListView):\n \"\"\"\n Show a list of characters to print.\n\n \"\"\"\n model = Character\n template_name = 'characters/character_print_list.html'\n\n def get_queryset(self):\n queryset = super().get_queryset()\n event_id = self.kwargs.get('event_id', None)\n if not event_id:\n event_id = Event.next_event().id\n player_ids = Registration.objects.filter(event__id=event_id\n ).values_list('player_id', flat=True)\n queryset = queryset.filter(player__id__in=player_ids, npc_flag=\n False, active_flag=True)\n return queryset\n",
"step-3": "<mask token>\n\n\nclass CharacterResetView(PermissionRequiredMixin, UserPassesTestMixin, View):\n <mask token>\n model = Character\n permission_required = 'characters.change_character',\n success_url = reverse_lazy('characters:character_list')\n\n def test_func(self):\n if self.request.user.has_perm('players.view_any_player'):\n return True\n try:\n player = Character.objects.get(pk=self.kwargs['pk']).player\n return player.user == self.request.user\n except Character.DoesNotExist:\n return False\n return False\n\n def get(self, request, *args, **kwargs):\n \"\"\"\n Send the user back to the the originating page or back to the\n character they are setting active\n \"\"\"\n with transaction.atomic():\n character = self.model.objects.get(pk=self.kwargs['pk'])\n character.cp_available += character.cp_spent\n character.cp_spent = 0\n character.save(update_fields=['cp_available', 'cp_spent'])\n character.characterskills_set.all().delete()\n character.headers.clear()\n messages.info(self.request, 'Character skills reset for {}.'.format\n (character.name))\n return HttpResponseRedirect(self.request.META.get('HTTP_REFERER',\n reverse('characters:character_detail', kwargs={'pk': self.\n kwargs['pk']})))\n\n\nclass CharacterSetActiveView(LoginRequiredMixin, UserPassesTestMixin, View):\n \"\"\"\n Set the active character for the characters player to the sent id.\n \"\"\"\n model = Character\n fields = '__all__'\n\n def test_func(self):\n if self.request.user.has_perm('players.view_any_player'):\n return True\n try:\n player = Character.objects.get(pk=self.kwargs['pk']).player\n return player.user == self.request.user\n except Character.DoesNotExist:\n return False\n return False\n\n def get(self, request, *args, **kwargs):\n \"\"\"\n Send the user back to the the originating page or back to the\n character they are setting active\n \"\"\"\n character = self.model.objects.get(pk=self.kwargs['pk'])\n character.player.character_set.update(active_flag=False)\n character.active_flag = True\n character.save()\n messages.info(self.request, 'Active Character changed to {}.'.\n format(character.name))\n return HttpResponseRedirect(self.request.META.get('HTTP_REFERER',\n reverse('characters:character_detail', kwargs={'pk': self.\n kwargs['pk']})))\n\n\nclass CharacterSkillUpdateView(LoginRequiredMixin, UserPassesTestMixin,\n FormMixin, DetailView):\n \"\"\"\n Allow a user to update their chosen skills\n \"\"\"\n template_name = 'characters/character_skill_form.html'\n form_class = CharacterSkillForm\n model = Character\n\n def test_func(self):\n if self.request.user.has_perm('players.view_any_player'):\n return True\n try:\n player = Character.objects.get(pk=self.kwargs['pk']).player\n return player.user == self.request.user\n except Character.DoesNotExist:\n return False\n return False\n\n def get_success_url(self):\n return reverse('characters:character_detail', kwargs={'pk': self.\n object.pk})\n\n def get_form_kwargs(self):\n kwargs = super().get_form_kwargs()\n self.skills = Header.objects.order_by('hidden_flag', 'category', 'name'\n ).all()\n kwargs.update({'skills': self.skills})\n return kwargs\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**self.kwargs)\n available_skills = self.object.skillhash.keys()\n context['skills'] = filter(lambda x: x.id in available_skills or\n self.request.user.has_perm('player.view_any_player'), self.skills)\n context['skill_hash'] = self.object.skillhash\n context['granted_skills'] = self.object.skill_grants\n return context\n\n def post(self, request, *args, **kwargs):\n self.object = self.get_object()\n form = self.get_form()\n if form.is_valid():\n return self.form_valid(form)\n else:\n return self.form_invalid(form)\n\n def form_valid(self, form):\n \"\"\"\n Form is valid. Save the skills to that character and remove the\n appropriate number of characters points.\n \"\"\"\n return super().form_valid(form)\n\n\nclass ResetPointsView(PermissionRequiredMixin, View):\n \"\"\"\n Resets the points for the season.\n \"\"\"\n permission_required = 'characters.reset_points',\n\n def get(self, request, *args, **kwargs):\n \"\"\"\n Send the user back to the the originating page or back to the main \n page if the referrer isn't set.\n \"\"\"\n Character.objects.all().update(cp_transferred=0)\n messages.info(self.request, 'Point cap reset!')\n return HttpResponseRedirect(self.request.META.get('HTTP_REFERER', '/'))\n\n\n<mask token>\n\n\nclass CharacterAddHeaderView(APIView):\n \"\"\"\n Set of AJAX views for a Characters\n\n This handles different API calls for character actions.\n \"\"\"\n authentication_classes = [SessionAuthentication]\n permission_classes = [OwnsCharacter]\n\n def post(self, request, format=None):\n header_id = int(request.POST.get('header_id', 0))\n character_id = int(request.POST.get('character_id', 0))\n cp_available = int(request.POST.get('cp_available', 0))\n header = Header.objects.get(pk=header_id)\n character = Character.objects.get(pk=character_id)\n content = {'error': 'prerequisites not met'}\n status = None\n if character.check_header_prerequisites(header):\n if cp_available - header.cost >= 0:\n character.cp_available -= header.cost\n character.cp_spent += header.cost\n character.headers.add(header)\n character.save()\n skill_item_template_string = render_to_string(\n 'characters/includes/character_skill_update_item.html',\n {'header': header, 'header_skills': header.skills.all(),\n 'header_costs': character.skillhash[header.id]}, request)\n content = {'success': header.cost * -1, 'skills':\n skill_item_template_string}\n else:\n content = {'error':\n \"You don't have enough points available for this character to add this header.\"\n }\n status = HTTP_412_PRECONDITION_FAILED\n else:\n status = HTTP_412_PRECONDITION_FAILED\n return Response(content, status)\n\n\nclass CharacterDropHeaderView(APIView):\n \"\"\"\n Set of AJAX views for a Characters\n\n This handles different API calls for character actions.\n \"\"\"\n authentication_classes = [SessionAuthentication]\n permission_classes = [OwnsCharacter]\n\n def post(self, request, format=None):\n header_id = int(request.POST.get('header_id', 0))\n character_id = int(request.POST.get('character_id', 0))\n header = Header.objects.get(pk=header_id)\n character = Character.objects.get(pk=character_id)\n content = {'error': 'Header is not already bought!'}\n status = None\n content['header_list'] = []\n if header in character.headers.all():\n print(\n f'Header present! Dropping and adding back in {header.cost} CP...'\n )\n character.cp_available += header.cost\n character.cp_spent -= header.cost\n character.headers.remove(header)\n skill_item_template_string = render_to_string(\n 'characters/includes/character_skill_update_item.html', {\n 'header': header, 'header_skills': header.skills.all(),\n 'header_costs': character.skillhash[header.id]}, request)\n content = {'success': header.cost}\n else:\n status = HTTP_412_PRECONDITION_FAILED\n return Response(content, status)\n\n\nclass CharacterAddSkillView(APIView):\n \"\"\"\n Set of AJAX views for a Characters\n\n This handles different API calls for character actions.\n \"\"\"\n authentication_classes = [SessionAuthentication]\n permission_classes = [OwnsCharacter]\n\n def post(self, request, format=None):\n skill_id = int(request.POST.get('skill_id', 0))\n header_id = int(request.POST.get('header_id', 0))\n character_id = int(request.POST.get('character_id', 0))\n cp_available = int(request.POST.get('cp_available', 0))\n try:\n vector = int(request.POST.get('vector'))\n except AttributeError:\n return {'error': 'No change indicated'}\n header_skill = HeaderSkill.objects.get(skill_id=skill_id, header_id\n =header_id)\n character = Character.objects.get(pk=character_id)\n content = {'success': 'testing right now'}\n status = None\n if character.check_skill_prerequisites(header_skill.skill,\n header_skill.header):\n cost = character.skill_cost(header_skill) * vector\n if cp_available - cost >= 0:\n character_skill, created = (character.characterskills_set.\n get_or_create(skill=header_skill))\n if (character_skill.count and character_skill.count +\n vector < 0):\n content = {'error':\n f\"You don't have any points in {header_skill.skill}\"}\n status = HTTP_412_PRECONDITION_FAILED\n else:\n content = {'success': cost * -1}\n character_skill.count = F('count') + vector\n character_skill.save()\n character.cp_spent = F('cp_spent') + cost\n character.cp_available = F('cp_available') - cost\n character.save()\n else:\n content = {'error':\n \"You don't have enough points available to purchase this skill . . .\"\n }\n status = HTTP_412_PRECONDITION_FAILED\n else:\n status = HTTP_412_PRECONDITION_FAILED\n return Response(content, status)\n\n\nclass CharacterDetailView(LoginRequiredMixin, UserPassesTestMixin, DetailView):\n \"\"\"\n Show the details for a character.\n\n From here you can edit the details of a character or choose skills.\n \"\"\"\n model = Character\n fields = '__all__'\n\n def test_func(self):\n if self.request.user.has_perm('players.view_any_player'):\n return True\n try:\n player = Character.objects.get(pk=self.kwargs['pk']).player\n return player.user == self.request.user\n except Character.DoesNotExist:\n return False\n return False\n\n\nclass CharacterConceptApproveView(PermissionRequiredMixin, FormView):\n \"\"\"\n Approve the concept for a character.\n Grant the CP for the character\n Set the history approved flag.\n \"\"\"\n permission_required = 'players.change_any_player'\n form_class = CharacterConceptApproveForm\n\n def form_valid(self, form):\n self.object = Character.objects.get(pk=form.cleaned_data[\n 'character_id'])\n self.object.player.cp_available += 3\n self.object.player.save(update_fields=['cp_available'])\n self.object.concept_approved_flag = True\n self.object.save(update_fields=['concept_approved_flag'])\n messages.info(self.request, f'{self.object} concept approved!')\n return super().form_valid(form)\n\n def form_invalid(self, form):\n self.object = Character.objects.get(pk=form.cleaned_data[\n 'character_id'])\n for key, error in form.errors.items():\n messages.error(self.request, error.as_text())\n return HttpResponseRedirect(reverse('characters:character_detail',\n kwargs={'pk': self.object.pk}))\n\n def get_success_url(self):\n return reverse('characters:character_detail', kwargs={'pk': self.\n object.pk})\n\n\nclass CharacterHistoryApproveView(PermissionRequiredMixin, FormView):\n \"\"\"\n Approve the history for a character.\n Grant the CP for the character\n Set the history approved flag.\n \"\"\"\n permission_required = 'players.change_any_player'\n form_class = CharacterHistoryApproveForm\n\n def form_valid(self, form):\n self.object = Character.objects.get(pk=form.cleaned_data[\n 'character_id'])\n self.object.player.cp_available += 3\n self.object.player.save(update_fields=['cp_available'])\n self.object.history_approved_flag = True\n self.object.save(update_fields=['history_approved_flag'])\n messages.info(self.request, f'{self.object} history approved!')\n return super().form_valid(form)\n\n def form_invalid(self, form):\n self.object = Character.objects.get(pk=form.cleaned_data[\n 'character_id'])\n for key, error in form.errors.items():\n messages.error(self.request, error.as_text())\n return HttpResponseRedirect(reverse('characters:character_detail',\n kwargs={'pk': self.object.pk}))\n\n def get_success_url(self):\n return reverse('characters:character_detail', kwargs={'pk': self.\n object.pk})\n\n\nclass CharacterListView(LoginRequiredMixin, ListView):\n \"\"\"\n Show the list of characters.\n\n From here, you can view, edit, delete a character.\n \"\"\"\n model = Character\n paginate_by = 25\n\n def get_queryset(self):\n queryset = super().get_queryset()\n criteria = self.request.GET.get('criteria', '')\n if criteria.strip():\n entry_query = get_query(criteria, ['name', 'description',\n 'concept', 'history', 'player_notes'])\n queryset = queryset.filter(entry_query)\n history_approved_flag = self.request.GET.get('history_approved_flag',\n False)\n if history_approved_flag:\n queryset = queryset.filter(history_approved_flag=True)\n concept_approved_flag = self.request.GET.get('concept_approved_flag',\n False)\n if concept_approved_flag:\n queryset = queryset.filter(concept_approved_flag=True)\n return queryset\n\n def get_context_data(self, **kwargs):\n \"\"\"\n Add the form so we can filter the characters.\n \"\"\"\n context_data = super().get_context_data(**kwargs)\n context_data.update(**self.request.GET)\n return context_data\n\n\nclass CharacterPrintListView(LoginRequiredMixin, ListView):\n \"\"\"\n Show a list of characters to print.\n\n \"\"\"\n model = Character\n template_name = 'characters/character_print_list.html'\n\n def get_queryset(self):\n queryset = super().get_queryset()\n event_id = self.kwargs.get('event_id', None)\n if not event_id:\n event_id = Event.next_event().id\n player_ids = Registration.objects.filter(event__id=event_id\n ).values_list('player_id', flat=True)\n queryset = queryset.filter(player__id__in=player_ids, npc_flag=\n False, active_flag=True)\n return queryset\n",
"step-4": "<mask token>\n\n\nclass CharacterUpdateView(LoginRequiredMixin, UserPassesTestMixin, UpdateView):\n <mask token>\n <mask token>\n\n def test_func(self):\n if self.request.user.has_perm('players.view_any_player'):\n return True\n try:\n player = Character.objects.get(pk=self.kwargs['pk']).player\n return player.user == self.request.user\n except Character.DoesNotExist:\n return False\n return False\n\n def get_form_kwargs(self):\n kwargs = super().get_form_kwargs()\n kwargs['user'] = self.request.user\n return kwargs\n\n def get_success_url(self):\n return reverse('characters:character_detail', kwargs={'pk': self.\n object.pk})\n\n\nclass CharacterDeleteView(PermissionRequiredMixin, UserPassesTestMixin,\n DeleteView):\n \"\"\"\n Removes a character permanantly.\n\n Removing a character may have strange effects on other views.\n \"\"\"\n model = Character\n permission_required = 'characters.change_character',\n success_url = reverse_lazy('characters:character_list')\n\n def test_func(self):\n if self.request.user.has_perm('players.view_any_player'):\n return True\n try:\n player = Character.objects.get(pk=self.kwargs['pk']).player\n return player.user == self.request.user\n except Character.DoesNotExist:\n return False\n return False\n\n\nclass CharacterResetView(PermissionRequiredMixin, UserPassesTestMixin, View):\n \"\"\"\n Resets a characters skills to none and returns their points to them.\n \"\"\"\n model = Character\n permission_required = 'characters.change_character',\n success_url = reverse_lazy('characters:character_list')\n\n def test_func(self):\n if self.request.user.has_perm('players.view_any_player'):\n return True\n try:\n player = Character.objects.get(pk=self.kwargs['pk']).player\n return player.user == self.request.user\n except Character.DoesNotExist:\n return False\n return False\n\n def get(self, request, *args, **kwargs):\n \"\"\"\n Send the user back to the the originating page or back to the\n character they are setting active\n \"\"\"\n with transaction.atomic():\n character = self.model.objects.get(pk=self.kwargs['pk'])\n character.cp_available += character.cp_spent\n character.cp_spent = 0\n character.save(update_fields=['cp_available', 'cp_spent'])\n character.characterskills_set.all().delete()\n character.headers.clear()\n messages.info(self.request, 'Character skills reset for {}.'.format\n (character.name))\n return HttpResponseRedirect(self.request.META.get('HTTP_REFERER',\n reverse('characters:character_detail', kwargs={'pk': self.\n kwargs['pk']})))\n\n\nclass CharacterSetActiveView(LoginRequiredMixin, UserPassesTestMixin, View):\n \"\"\"\n Set the active character for the characters player to the sent id.\n \"\"\"\n model = Character\n fields = '__all__'\n\n def test_func(self):\n if self.request.user.has_perm('players.view_any_player'):\n return True\n try:\n player = Character.objects.get(pk=self.kwargs['pk']).player\n return player.user == self.request.user\n except Character.DoesNotExist:\n return False\n return False\n\n def get(self, request, *args, **kwargs):\n \"\"\"\n Send the user back to the the originating page or back to the\n character they are setting active\n \"\"\"\n character = self.model.objects.get(pk=self.kwargs['pk'])\n character.player.character_set.update(active_flag=False)\n character.active_flag = True\n character.save()\n messages.info(self.request, 'Active Character changed to {}.'.\n format(character.name))\n return HttpResponseRedirect(self.request.META.get('HTTP_REFERER',\n reverse('characters:character_detail', kwargs={'pk': self.\n kwargs['pk']})))\n\n\nclass CharacterSkillUpdateView(LoginRequiredMixin, UserPassesTestMixin,\n FormMixin, DetailView):\n \"\"\"\n Allow a user to update their chosen skills\n \"\"\"\n template_name = 'characters/character_skill_form.html'\n form_class = CharacterSkillForm\n model = Character\n\n def test_func(self):\n if self.request.user.has_perm('players.view_any_player'):\n return True\n try:\n player = Character.objects.get(pk=self.kwargs['pk']).player\n return player.user == self.request.user\n except Character.DoesNotExist:\n return False\n return False\n\n def get_success_url(self):\n return reverse('characters:character_detail', kwargs={'pk': self.\n object.pk})\n\n def get_form_kwargs(self):\n kwargs = super().get_form_kwargs()\n self.skills = Header.objects.order_by('hidden_flag', 'category', 'name'\n ).all()\n kwargs.update({'skills': self.skills})\n return kwargs\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**self.kwargs)\n available_skills = self.object.skillhash.keys()\n context['skills'] = filter(lambda x: x.id in available_skills or\n self.request.user.has_perm('player.view_any_player'), self.skills)\n context['skill_hash'] = self.object.skillhash\n context['granted_skills'] = self.object.skill_grants\n return context\n\n def post(self, request, *args, **kwargs):\n self.object = self.get_object()\n form = self.get_form()\n if form.is_valid():\n return self.form_valid(form)\n else:\n return self.form_invalid(form)\n\n def form_valid(self, form):\n \"\"\"\n Form is valid. Save the skills to that character and remove the\n appropriate number of characters points.\n \"\"\"\n return super().form_valid(form)\n\n\nclass ResetPointsView(PermissionRequiredMixin, View):\n \"\"\"\n Resets the points for the season.\n \"\"\"\n permission_required = 'characters.reset_points',\n\n def get(self, request, *args, **kwargs):\n \"\"\"\n Send the user back to the the originating page or back to the main \n page if the referrer isn't set.\n \"\"\"\n Character.objects.all().update(cp_transferred=0)\n messages.info(self.request, 'Point cap reset!')\n return HttpResponseRedirect(self.request.META.get('HTTP_REFERER', '/'))\n\n\n<mask token>\n\n\nclass CharacterAddHeaderView(APIView):\n \"\"\"\n Set of AJAX views for a Characters\n\n This handles different API calls for character actions.\n \"\"\"\n authentication_classes = [SessionAuthentication]\n permission_classes = [OwnsCharacter]\n\n def post(self, request, format=None):\n header_id = int(request.POST.get('header_id', 0))\n character_id = int(request.POST.get('character_id', 0))\n cp_available = int(request.POST.get('cp_available', 0))\n header = Header.objects.get(pk=header_id)\n character = Character.objects.get(pk=character_id)\n content = {'error': 'prerequisites not met'}\n status = None\n if character.check_header_prerequisites(header):\n if cp_available - header.cost >= 0:\n character.cp_available -= header.cost\n character.cp_spent += header.cost\n character.headers.add(header)\n character.save()\n skill_item_template_string = render_to_string(\n 'characters/includes/character_skill_update_item.html',\n {'header': header, 'header_skills': header.skills.all(),\n 'header_costs': character.skillhash[header.id]}, request)\n content = {'success': header.cost * -1, 'skills':\n skill_item_template_string}\n else:\n content = {'error':\n \"You don't have enough points available for this character to add this header.\"\n }\n status = HTTP_412_PRECONDITION_FAILED\n else:\n status = HTTP_412_PRECONDITION_FAILED\n return Response(content, status)\n\n\nclass CharacterDropHeaderView(APIView):\n \"\"\"\n Set of AJAX views for a Characters\n\n This handles different API calls for character actions.\n \"\"\"\n authentication_classes = [SessionAuthentication]\n permission_classes = [OwnsCharacter]\n\n def post(self, request, format=None):\n header_id = int(request.POST.get('header_id', 0))\n character_id = int(request.POST.get('character_id', 0))\n header = Header.objects.get(pk=header_id)\n character = Character.objects.get(pk=character_id)\n content = {'error': 'Header is not already bought!'}\n status = None\n content['header_list'] = []\n if header in character.headers.all():\n print(\n f'Header present! Dropping and adding back in {header.cost} CP...'\n )\n character.cp_available += header.cost\n character.cp_spent -= header.cost\n character.headers.remove(header)\n skill_item_template_string = render_to_string(\n 'characters/includes/character_skill_update_item.html', {\n 'header': header, 'header_skills': header.skills.all(),\n 'header_costs': character.skillhash[header.id]}, request)\n content = {'success': header.cost}\n else:\n status = HTTP_412_PRECONDITION_FAILED\n return Response(content, status)\n\n\nclass CharacterAddSkillView(APIView):\n \"\"\"\n Set of AJAX views for a Characters\n\n This handles different API calls for character actions.\n \"\"\"\n authentication_classes = [SessionAuthentication]\n permission_classes = [OwnsCharacter]\n\n def post(self, request, format=None):\n skill_id = int(request.POST.get('skill_id', 0))\n header_id = int(request.POST.get('header_id', 0))\n character_id = int(request.POST.get('character_id', 0))\n cp_available = int(request.POST.get('cp_available', 0))\n try:\n vector = int(request.POST.get('vector'))\n except AttributeError:\n return {'error': 'No change indicated'}\n header_skill = HeaderSkill.objects.get(skill_id=skill_id, header_id\n =header_id)\n character = Character.objects.get(pk=character_id)\n content = {'success': 'testing right now'}\n status = None\n if character.check_skill_prerequisites(header_skill.skill,\n header_skill.header):\n cost = character.skill_cost(header_skill) * vector\n if cp_available - cost >= 0:\n character_skill, created = (character.characterskills_set.\n get_or_create(skill=header_skill))\n if (character_skill.count and character_skill.count +\n vector < 0):\n content = {'error':\n f\"You don't have any points in {header_skill.skill}\"}\n status = HTTP_412_PRECONDITION_FAILED\n else:\n content = {'success': cost * -1}\n character_skill.count = F('count') + vector\n character_skill.save()\n character.cp_spent = F('cp_spent') + cost\n character.cp_available = F('cp_available') - cost\n character.save()\n else:\n content = {'error':\n \"You don't have enough points available to purchase this skill . . .\"\n }\n status = HTTP_412_PRECONDITION_FAILED\n else:\n status = HTTP_412_PRECONDITION_FAILED\n return Response(content, status)\n\n\nclass CharacterDetailView(LoginRequiredMixin, UserPassesTestMixin, DetailView):\n \"\"\"\n Show the details for a character.\n\n From here you can edit the details of a character or choose skills.\n \"\"\"\n model = Character\n fields = '__all__'\n\n def test_func(self):\n if self.request.user.has_perm('players.view_any_player'):\n return True\n try:\n player = Character.objects.get(pk=self.kwargs['pk']).player\n return player.user == self.request.user\n except Character.DoesNotExist:\n return False\n return False\n\n\nclass CharacterConceptApproveView(PermissionRequiredMixin, FormView):\n \"\"\"\n Approve the concept for a character.\n Grant the CP for the character\n Set the history approved flag.\n \"\"\"\n permission_required = 'players.change_any_player'\n form_class = CharacterConceptApproveForm\n\n def form_valid(self, form):\n self.object = Character.objects.get(pk=form.cleaned_data[\n 'character_id'])\n self.object.player.cp_available += 3\n self.object.player.save(update_fields=['cp_available'])\n self.object.concept_approved_flag = True\n self.object.save(update_fields=['concept_approved_flag'])\n messages.info(self.request, f'{self.object} concept approved!')\n return super().form_valid(form)\n\n def form_invalid(self, form):\n self.object = Character.objects.get(pk=form.cleaned_data[\n 'character_id'])\n for key, error in form.errors.items():\n messages.error(self.request, error.as_text())\n return HttpResponseRedirect(reverse('characters:character_detail',\n kwargs={'pk': self.object.pk}))\n\n def get_success_url(self):\n return reverse('characters:character_detail', kwargs={'pk': self.\n object.pk})\n\n\nclass CharacterHistoryApproveView(PermissionRequiredMixin, FormView):\n \"\"\"\n Approve the history for a character.\n Grant the CP for the character\n Set the history approved flag.\n \"\"\"\n permission_required = 'players.change_any_player'\n form_class = CharacterHistoryApproveForm\n\n def form_valid(self, form):\n self.object = Character.objects.get(pk=form.cleaned_data[\n 'character_id'])\n self.object.player.cp_available += 3\n self.object.player.save(update_fields=['cp_available'])\n self.object.history_approved_flag = True\n self.object.save(update_fields=['history_approved_flag'])\n messages.info(self.request, f'{self.object} history approved!')\n return super().form_valid(form)\n\n def form_invalid(self, form):\n self.object = Character.objects.get(pk=form.cleaned_data[\n 'character_id'])\n for key, error in form.errors.items():\n messages.error(self.request, error.as_text())\n return HttpResponseRedirect(reverse('characters:character_detail',\n kwargs={'pk': self.object.pk}))\n\n def get_success_url(self):\n return reverse('characters:character_detail', kwargs={'pk': self.\n object.pk})\n\n\nclass CharacterListView(LoginRequiredMixin, ListView):\n \"\"\"\n Show the list of characters.\n\n From here, you can view, edit, delete a character.\n \"\"\"\n model = Character\n paginate_by = 25\n\n def get_queryset(self):\n queryset = super().get_queryset()\n criteria = self.request.GET.get('criteria', '')\n if criteria.strip():\n entry_query = get_query(criteria, ['name', 'description',\n 'concept', 'history', 'player_notes'])\n queryset = queryset.filter(entry_query)\n history_approved_flag = self.request.GET.get('history_approved_flag',\n False)\n if history_approved_flag:\n queryset = queryset.filter(history_approved_flag=True)\n concept_approved_flag = self.request.GET.get('concept_approved_flag',\n False)\n if concept_approved_flag:\n queryset = queryset.filter(concept_approved_flag=True)\n return queryset\n\n def get_context_data(self, **kwargs):\n \"\"\"\n Add the form so we can filter the characters.\n \"\"\"\n context_data = super().get_context_data(**kwargs)\n context_data.update(**self.request.GET)\n return context_data\n\n\nclass CharacterPrintListView(LoginRequiredMixin, ListView):\n \"\"\"\n Show a list of characters to print.\n\n \"\"\"\n model = Character\n template_name = 'characters/character_print_list.html'\n\n def get_queryset(self):\n queryset = super().get_queryset()\n event_id = self.kwargs.get('event_id', None)\n if not event_id:\n event_id = Event.next_event().id\n player_ids = Registration.objects.filter(event__id=event_id\n ).values_list('player_id', flat=True)\n queryset = queryset.filter(player__id__in=player_ids, npc_flag=\n False, active_flag=True)\n return queryset\n",
"step-5": "\"\"\"These are views that are used for viewing and editing characters.\"\"\"\n\nfrom django.contrib import messages\nfrom django.contrib.auth.mixins import UserPassesTestMixin,\\\n LoginRequiredMixin, PermissionRequiredMixin\nfrom django.db import transaction\nfrom django.db.models import F\nfrom django.http import HttpResponseRedirect\nfrom django.template.loader import render_to_string\nfrom django.urls import reverse, reverse_lazy\nfrom django.views import View\nfrom django.views.generic.edit import FormMixin, CreateView, UpdateView\nfrom django.views.generic import DeleteView, DetailView, FormView, ListView\n\nfrom rest_framework.status import HTTP_412_PRECONDITION_FAILED\nfrom rest_framework.authentication import SessionAuthentication\nfrom rest_framework.permissions import BasePermission\nfrom rest_framework.response import Response\nfrom rest_framework.views import APIView\n\n\nfrom talesofvalor import get_query\nfrom talesofvalor.events.models import Event\nfrom talesofvalor.players.models import Registration\nfrom talesofvalor.skills.models import Header, HeaderSkill\n\nfrom .models import Character\nfrom .forms import CharacterForm, CharacterSkillForm,\\\n CharacterConceptApproveForm, CharacterHistoryApproveForm\n\n\nclass OwnsCharacter(BasePermission):\n \"\"\"\n The current user is staff or owns the that is being manipulated.\n \"\"\"\n message = \"You don't own this character\"\n\n def has_object_permission(self, request, view, obj):\n if self.request.user.has_perm('players.view_any_player'):\n return True\n try:\n player = Character.objects.get(pk=self.kwargs['pk']).player\n return (player.user == self.request.user)\n except Character.DoesNotExist:\n return False\n return False\n\n\nclass CharacterCreateView(LoginRequiredMixin, CreateView):\n model = Character\n form_class = CharacterForm\n\n def get_initial(self):\n # Get the initial dictionary from the superclass method\n initial = super(CharacterCreateView, self).get_initial()\n # Copy the dictionary so we don't accidentally change a mutable dict\n initial = initial.copy()\n # default to getting the player from the query String.\n try:\n initial['player'] = self.request.GET['player']\n except KeyError:\n initial['player'] = self.request.user.player\n # etc...\n return initial\n\n def get_form_kwargs(self):\n kwargs = super().get_form_kwargs()\n kwargs['user'] = self.request.user # pass the 'user' in kwargs\n return kwargs\n\n def get_success_url(self):\n return reverse(\n 'characters:character_skill_update',\n kwargs={'pk': self.object.pk}\n )\n\n def form_valid(self, form):\n \"\"\"\n If this form is valid, then add the current player to the character\n if the current user is not an admin\n\n If the user doesn't have any other active characters, set this one\n to active.\n \"\"\"\n if not self.request.user.has_perm('players.view_any_player'):\n form.instance.player = self.request.user.player\n\n if not form.instance.player.character_set.filter(active_flag=True).exists():\n form.instance.active_flag = True\n\n messages.info(self.request, 'New Character, \"{}\" created.'.format(\n form.instance.name\n ))\n return super().form_valid(form)\n\n\nclass CharacterUpdateView(LoginRequiredMixin, UserPassesTestMixin, UpdateView):\n model = Character\n form_class = CharacterForm\n\n def test_func(self):\n if self.request.user.has_perm('players.view_any_player'):\n return True\n try:\n player = Character.objects.get(pk=self.kwargs['pk']).player\n return (player.user == self.request.user)\n except Character.DoesNotExist:\n return False\n return False\n\n def get_form_kwargs(self):\n kwargs = super().get_form_kwargs()\n kwargs['user'] = self.request.user # pass the 'user' in kwargs\n return kwargs\n\n def get_success_url(self):\n return reverse(\n 'characters:character_detail',\n kwargs={'pk': self.object.pk}\n )\n\n\nclass CharacterDeleteView(\n PermissionRequiredMixin,\n UserPassesTestMixin,\n DeleteView\n ):\n \"\"\"\n Removes a character permanantly.\n\n Removing a character may have strange effects on other views.\n \"\"\"\n\n model = Character\n permission_required = ('characters.change_character', )\n success_url = reverse_lazy('characters:character_list')\n\n def test_func(self):\n if self.request.user.has_perm('players.view_any_player'):\n return True\n try:\n player = Character.objects.get(pk=self.kwargs['pk']).player\n return (player.user == self.request.user)\n except Character.DoesNotExist:\n return False\n return False\n\n\nclass CharacterResetView(\n PermissionRequiredMixin,\n UserPassesTestMixin,\n View\n ):\n \"\"\"\n Resets a characters skills to none and returns their points to them.\n \"\"\"\n\n model = Character\n permission_required = ('characters.change_character', )\n success_url = reverse_lazy('characters:character_list')\n\n def test_func(self):\n if self.request.user.has_perm('players.view_any_player'):\n return True\n try:\n player = Character.objects.get(pk=self.kwargs['pk']).player\n return (player.user == self.request.user)\n except Character.DoesNotExist:\n return False\n return False\n\n def get(self, request, *args, **kwargs):\n \"\"\"\n Send the user back to the the originating page or back to the\n character they are setting active\n \"\"\"\n\n with transaction.atomic():\n character = self.model.objects.get(pk=self.kwargs['pk'])\n character.cp_available += character.cp_spent\n character.cp_spent = 0\n character.save(update_fields=['cp_available', 'cp_spent'])\n character.characterskills_set.all().delete()\n character.headers.clear()\n messages.info(self.request, 'Character skills reset for {}.'.format(\n character.name\n ))\n return HttpResponseRedirect(\n self.request.META.get(\n 'HTTP_REFERER',\n reverse(\n 'characters:character_detail',\n kwargs={'pk': self.kwargs['pk']}\n )\n )\n )\n\n\nclass CharacterSetActiveView(\n LoginRequiredMixin,\n UserPassesTestMixin,\n View\n ):\n \"\"\"\n Set the active character for the characters player to the sent id.\n \"\"\"\n\n model = Character\n fields = '__all__'\n\n def test_func(self):\n if self.request.user.has_perm('players.view_any_player'):\n return True\n try:\n player = Character.objects.get(pk=self.kwargs['pk']).player\n return (player.user == self.request.user)\n except Character.DoesNotExist:\n return False\n return False\n\n def get(self, request, *args, **kwargs):\n \"\"\"\n Send the user back to the the originating page or back to the\n character they are setting active\n \"\"\"\n\n character = self.model.objects.get(pk=self.kwargs['pk'])\n character.player.character_set.update(active_flag=False)\n character.active_flag = True\n character.save()\n messages.info(self.request, 'Active Character changed to {}.'.format(\n character.name\n ))\n return HttpResponseRedirect(\n self.request.META.get(\n 'HTTP_REFERER',\n reverse(\n 'characters:character_detail',\n kwargs={'pk': self.kwargs['pk']}\n )\n )\n )\n\n\nclass CharacterSkillUpdateView(\n LoginRequiredMixin,\n UserPassesTestMixin,\n FormMixin,\n DetailView):\n \"\"\"\n Allow a user to update their chosen skills\n \"\"\"\n\n template_name = 'characters/character_skill_form.html'\n form_class = CharacterSkillForm\n model = Character\n\n def test_func(self):\n if self.request.user.has_perm('players.view_any_player'):\n return True\n try:\n player = Character.objects.get(pk=self.kwargs['pk']).player\n return (player.user == self.request.user)\n except Character.DoesNotExist:\n return False\n return False\n\n def get_success_url(self):\n return reverse(\n 'characters:character_detail',\n kwargs={'pk': self.object.pk}\n )\n\n def get_form_kwargs(self):\n kwargs = super().get_form_kwargs()\n self.skills = Header.objects\\\n .order_by('hidden_flag', 'category', 'name')\\\n .all()\n kwargs.update({'skills': self.skills})\n return kwargs\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**self.kwargs)\n\n # remove skills not in the hash.\n available_skills = self.object.skillhash.keys()\n context['skills'] = filter(lambda x: x.id in available_skills or self.request.user.has_perm('player.view_any_player'), self.skills)\n context['skill_hash'] = self.object.skillhash\n # add the bare skills granted by the rules\n context['granted_skills'] = self.object.skill_grants\n return context\n\n def post(self, request, *args, **kwargs):\n self.object = self.get_object()\n form = self.get_form()\n if form.is_valid():\n return self.form_valid(form)\n else:\n return self.form_invalid(form)\n\n def form_valid(self, form):\n \"\"\"\n Form is valid. Save the skills to that character and remove the\n appropriate number of characters points.\n \"\"\"\n return super().form_valid(form)\n\n\nclass ResetPointsView(\n PermissionRequiredMixin,\n View\n ):\n \"\"\"\n Resets the points for the season.\n \"\"\"\n\n permission_required = ('characters.reset_points', )\n\n def get(self, request, *args, **kwargs):\n \"\"\"\n Send the user back to the the originating page or back to the main \n page if the referrer isn't set.\n \"\"\"\n Character.objects.all().update(cp_transferred=0)\n messages.info(self.request, 'Point cap reset!')\n return HttpResponseRedirect(\n self.request.META.get(\n 'HTTP_REFERER',\n '/'\n )\n )\n\n\n'''\nPut the AJAX work for Characters here\n'''\n\n\nclass CharacterAddHeaderView(APIView):\n '''\n Set of AJAX views for a Characters\n\n This handles different API calls for character actions.\n '''\n\n authentication_classes = [SessionAuthentication]\n permission_classes = [OwnsCharacter]\n\n def post(self, request, format=None):\n header_id = int(request.POST.get('header_id', 0))\n character_id = int(request.POST.get('character_id', 0))\n cp_available = int(request.POST.get('cp_available', 0))\n # get the character and then see if the header is allowed\n header = Header.objects.get(pk=header_id)\n character = Character.objects.get(pk=character_id)\n # Default to error.\n content = {\n 'error': \"prerequisites not met\"\n }\n status = None\n # if the prerequisites are met, add the header to the user and return\n # the list of skills\n if character.check_header_prerequisites(header):\n # see if the character has enough points to add the header\n if (cp_available - header.cost) >= 0:\n character.cp_available -= header.cost\n character.cp_spent += header.cost\n character.headers.add(header)\n character.save()\n skill_item_template_string = render_to_string(\n \"characters/includes/character_skill_update_item.html\",\n {\n 'header': header,\n 'header_skills': header.skills.all(),\n 'header_costs': character.skillhash[header.id]\n },\n request\n )\n content = {\n 'success': header.cost * -1,\n 'skills': skill_item_template_string\n }\n else: \n content = {\n 'error': \"You don't have enough points available for this character to add this header.\"\n }\n status = HTTP_412_PRECONDITION_FAILED\n else:\n status = HTTP_412_PRECONDITION_FAILED\n return Response(content, status)\n\n\nclass CharacterDropHeaderView(APIView):\n '''\n Set of AJAX views for a Characters\n\n This handles different API calls for character actions.\n '''\n\n authentication_classes = [SessionAuthentication]\n permission_classes = [OwnsCharacter]\n\n def post(self, request, format=None):\n header_id = int(request.POST.get('header_id', 0))\n character_id = int(request.POST.get('character_id', 0))\n # get the character and header\n header = Header.objects.get(pk=header_id)\n character = Character.objects.get(pk=character_id)\n # Default to error.\n content = {\n 'error': \"Header is not already bought!\"\n }\n status = None\n # if the character has the header, drop it and refund the CP\n content['header_list'] = []\n\n if header in character.headers.all():\n print(f'Header present! Dropping and adding back in {header.cost} CP...')\n character.cp_available += header.cost\n character.cp_spent -= header.cost\n character.headers.remove(header)\n skill_item_template_string = render_to_string(\n \"characters/includes/character_skill_update_item.html\",\n {\n 'header': header,\n 'header_skills': header.skills.all(),\n 'header_costs': character.skillhash[header.id]\n },\n request\n )\n content = {\n 'success': header.cost,\n }\n else:\n status = HTTP_412_PRECONDITION_FAILED\n return Response(content, status)\n\n\nclass CharacterAddSkillView(APIView):\n '''\n Set of AJAX views for a Characters\n\n This handles different API calls for character actions.\n '''\n\n authentication_classes = [SessionAuthentication]\n permission_classes = [OwnsCharacter]\n\n def post(self, request, format=None):\n skill_id = int(request.POST.get('skill_id', 0))\n header_id = int(request.POST.get('header_id', 0))\n character_id = int(request.POST.get('character_id', 0))\n cp_available = int(request.POST.get('cp_available', 0))\n try:\n vector = int(request.POST.get('vector'))\n except AttributeError:\n return {\n 'error': \"No change indicated\"\n }\n # get the character and then see if the skill is allowed\n header_skill = HeaderSkill.objects.get(skill_id=skill_id, header_id=header_id)\n character = Character.objects.get(pk=character_id)\n # check that the skill is allowed.\n # if the prerequisites are met, add the header to the user and return\n # the list of skills\n # otherwise, return an error\n content = {\n 'success': \"testing right now\"\n }\n status = None\n if character.check_skill_prerequisites(header_skill.skill, header_skill.header):\n # since vector is the direction, we want to reverse it when\n # dealing with what we want to change for the available points\n # see if the character has enough points to add the header\n cost = character.skill_cost(header_skill) * vector\n if (cp_available - cost) >= 0:\n # when this is returned, change the available costs\n (character_skill, created) = character.characterskills_set.get_or_create(\n skill=header_skill\n )\n if character_skill.count and (character_skill.count + vector < 0):\n content = {\n 'error': f\"You don't have any points in {header_skill.skill}\"\n }\n status = HTTP_412_PRECONDITION_FAILED\n else: \n content = {\n 'success': cost * -1\n }\n character_skill.count = F('count') + vector\n character_skill.save()\n character.cp_spent = F('cp_spent') + cost\n character.cp_available = F('cp_available') - cost\n character.save()\n else: \n content = {\n 'error': \"You don't have enough points available to purchase this skill . . .\"\n }\n status = HTTP_412_PRECONDITION_FAILED\n else:\n status = HTTP_412_PRECONDITION_FAILED\n return Response(content, status)\n\n\nclass CharacterDetailView(LoginRequiredMixin, UserPassesTestMixin, DetailView):\n \"\"\"\n Show the details for a character.\n\n From here you can edit the details of a character or choose skills.\n \"\"\"\n\n model = Character\n fields = '__all__'\n\n def test_func(self):\n if self.request.user.has_perm('players.view_any_player'):\n return True\n try:\n player = Character.objects.get(pk=self.kwargs['pk']).player\n return (player.user == self.request.user)\n except Character.DoesNotExist:\n return False\n return False\n\n\nclass CharacterConceptApproveView(PermissionRequiredMixin, FormView):\n \"\"\"\n Approve the concept for a character.\n Grant the CP for the character\n Set the history approved flag.\n \"\"\"\n permission_required = 'players.change_any_player'\n form_class = CharacterConceptApproveForm\n\n def form_valid(self, form):\n self.object = Character.objects.get(pk=form.cleaned_data['character_id'])\n self.object.player.cp_available += 3\n self.object.player.save(update_fields=['cp_available'])\n self.object.concept_approved_flag = True\n self.object.save(update_fields=['concept_approved_flag'])\n messages.info(self.request, f\"{self.object} concept approved!\")\n return super().form_valid(form)\n\n def form_invalid(self, form):\n self.object = Character.objects.get(pk=form.cleaned_data['character_id'])\n for key, error in form.errors.items():\n messages.error(self.request, error.as_text())\n return HttpResponseRedirect(reverse(\n 'characters:character_detail',\n kwargs={'pk': self.object.pk}\n ))\n\n def get_success_url(self):\n return reverse(\n 'characters:character_detail',\n kwargs={'pk': self.object.pk}\n ) \n\n\nclass CharacterHistoryApproveView(PermissionRequiredMixin, FormView):\n \"\"\"\n Approve the history for a character.\n Grant the CP for the character\n Set the history approved flag.\n \"\"\"\n permission_required = 'players.change_any_player'\n form_class = CharacterHistoryApproveForm\n\n def form_valid(self, form):\n self.object = Character.objects.get(pk=form.cleaned_data['character_id'])\n self.object.player.cp_available += 3\n self.object.player.save(update_fields=['cp_available'])\n self.object.history_approved_flag = True\n self.object.save(update_fields=['history_approved_flag'])\n messages.info(self.request, f\"{self.object} history approved!\")\n return super().form_valid(form)\n\n def form_invalid(self, form):\n self.object = Character.objects.get(pk=form.cleaned_data['character_id'])\n for key, error in form.errors.items():\n messages.error(self.request, error.as_text())\n return HttpResponseRedirect(reverse(\n 'characters:character_detail',\n kwargs={'pk': self.object.pk}\n ))\n\n def get_success_url(self):\n return reverse(\n 'characters:character_detail',\n kwargs={'pk': self.object.pk}\n ) \n\n\nclass CharacterListView(LoginRequiredMixin, ListView):\n \"\"\"\n Show the list of characters.\n\n From here, you can view, edit, delete a character.\n \"\"\"\n\n model = Character\n paginate_by = 25\n\n def get_queryset(self):\n queryset = super().get_queryset()\n criteria = self.request.GET.get('criteria', '')\n if (criteria.strip()):\n entry_query = get_query(\n criteria,\n ['name', 'description', 'concept', 'history', 'player_notes']\n )\n queryset = queryset.filter(entry_query)\n history_approved_flag = self.request.GET.get('history_approved_flag', False)\n if history_approved_flag:\n queryset = queryset.filter(history_approved_flag=True)\n concept_approved_flag = self.request.GET.get('concept_approved_flag', False)\n if concept_approved_flag:\n queryset = queryset.filter(concept_approved_flag=True)\n return queryset\n\n def get_context_data(self, **kwargs):\n '''\n Add the form so we can filter the characters.\n '''\n # get the context data to add to.\n context_data = super().get_context_data(**kwargs)\n context_data.update(**self.request.GET)\n # return the resulting context\n return context_data\n\n\nclass CharacterPrintListView(LoginRequiredMixin, ListView):\n \"\"\"\n Show a list of characters to print.\n\n \"\"\"\n\n model = Character\n template_name = \"characters/character_print_list.html\"\n\n def get_queryset(self):\n queryset = super().get_queryset() # filter by event\n event_id = self.kwargs.get('event_id', None)\n if not event_id:\n event_id = Event.next_event().id\n player_ids = Registration.objects.filter(event__id=event_id).values_list('player_id', flat=True)\n queryset = queryset.filter(player__id__in=player_ids, npc_flag=False, active_flag=True)\n \n return queryset\n",
"step-ids": [
33,
48,
59,
68,
81
]
}
|
[
33,
48,
59,
68,
81
] |
from chalicelib.utilities import *
def Error(app):
@app.route('/errors', cors=True, methods=['POST'])
@printError
def errors():
request = app.current_request
data = request.json_body
print(data)
return data
|
normal
|
{
"blob_id": "f100757fcb1bef334f9f8eacae83af551d2bac5b",
"index": 3239,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef Error(app):\n\n @app.route('/errors', cors=True, methods=['POST'])\n @printError\n def errors():\n request = app.current_request\n data = request.json_body\n print(data)\n return data\n",
"step-3": "from chalicelib.utilities import *\n\n\ndef Error(app):\n\n @app.route('/errors', cors=True, methods=['POST'])\n @printError\n def errors():\n request = app.current_request\n data = request.json_body\n print(data)\n return data\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Migration(migrations.Migration):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Migration(migrations.Migration):
dependencies = [('twitter', '0002_tweet')]
operations = [migrations.CreateModel(name='TwitterKeys', fields=[('id',
models.AutoField(serialize=False, primary_key=True, auto_created=
True, verbose_name='ID')), ('consumer_key', models.CharField(
max_length=200)), ('consumer_secret', models.CharField(max_length=
200)), ('access_token', models.CharField(max_length=200)), (
'access_token_secret', models.CharField(max_length=200)), ('user',
models.ForeignKey(to='twitter.TwitterUser'))])]
<|reserved_special_token_1|>
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [('twitter', '0002_tweet')]
operations = [migrations.CreateModel(name='TwitterKeys', fields=[('id',
models.AutoField(serialize=False, primary_key=True, auto_created=
True, verbose_name='ID')), ('consumer_key', models.CharField(
max_length=200)), ('consumer_secret', models.CharField(max_length=
200)), ('access_token', models.CharField(max_length=200)), (
'access_token_secret', models.CharField(max_length=200)), ('user',
models.ForeignKey(to='twitter.TwitterUser'))])]
<|reserved_special_token_1|>
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('twitter', '0002_tweet'),
]
operations = [
migrations.CreateModel(
name='TwitterKeys',
fields=[
('id', models.AutoField(serialize=False, primary_key=True, auto_created=True, verbose_name='ID')),
('consumer_key', models.CharField(max_length=200)),
('consumer_secret', models.CharField(max_length=200)),
('access_token', models.CharField(max_length=200)),
('access_token_secret', models.CharField(max_length=200)),
('user', models.ForeignKey(to='twitter.TwitterUser')),
],
),
]
|
flexible
|
{
"blob_id": "c8406db010a506b782030c5d3f84c319851e89d6",
"index": 3662,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Migration(migrations.Migration):\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Migration(migrations.Migration):\n dependencies = [('twitter', '0002_tweet')]\n operations = [migrations.CreateModel(name='TwitterKeys', fields=[('id',\n models.AutoField(serialize=False, primary_key=True, auto_created=\n True, verbose_name='ID')), ('consumer_key', models.CharField(\n max_length=200)), ('consumer_secret', models.CharField(max_length=\n 200)), ('access_token', models.CharField(max_length=200)), (\n 'access_token_secret', models.CharField(max_length=200)), ('user',\n models.ForeignKey(to='twitter.TwitterUser'))])]\n",
"step-4": "from __future__ import unicode_literals\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n dependencies = [('twitter', '0002_tweet')]\n operations = [migrations.CreateModel(name='TwitterKeys', fields=[('id',\n models.AutoField(serialize=False, primary_key=True, auto_created=\n True, verbose_name='ID')), ('consumer_key', models.CharField(\n max_length=200)), ('consumer_secret', models.CharField(max_length=\n 200)), ('access_token', models.CharField(max_length=200)), (\n 'access_token_secret', models.CharField(max_length=200)), ('user',\n models.ForeignKey(to='twitter.TwitterUser'))])]\n",
"step-5": "# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('twitter', '0002_tweet'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='TwitterKeys',\n fields=[\n ('id', models.AutoField(serialize=False, primary_key=True, auto_created=True, verbose_name='ID')),\n ('consumer_key', models.CharField(max_length=200)),\n ('consumer_secret', models.CharField(max_length=200)),\n ('access_token', models.CharField(max_length=200)),\n ('access_token_secret', models.CharField(max_length=200)),\n ('user', models.ForeignKey(to='twitter.TwitterUser')),\n ],\n ),\n ]\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
# Generated by Django 3.1.7 on 2021-05-05 23:28
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('travels', '0011_auto_20210505_2230'),
]
operations = [
migrations.RenameField(
model_name='trip',
old_name='hotel_decription',
new_name='hotel_description',
),
migrations.AlterField(
model_name='trip',
name='hotelstars',
field=models.IntegerField(blank=True, choices=[(1, 1), (2, 2), (3, 3), (4, 4), (5, 5), (6, 6)], verbose_name='Gwiazdki hotelu'),
),
]
|
normal
|
{
"blob_id": "1e853d58c2066f3fbd381d0d603cd2fcece0cf15",
"index": 7933,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Migration(migrations.Migration):\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Migration(migrations.Migration):\n dependencies = [('travels', '0011_auto_20210505_2230')]\n operations = [migrations.RenameField(model_name='trip', old_name=\n 'hotel_decription', new_name='hotel_description'), migrations.\n AlterField(model_name='trip', name='hotelstars', field=models.\n IntegerField(blank=True, choices=[(1, 1), (2, 2), (3, 3), (4, 4), (\n 5, 5), (6, 6)], verbose_name='Gwiazdki hotelu'))]\n",
"step-4": "from django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n dependencies = [('travels', '0011_auto_20210505_2230')]\n operations = [migrations.RenameField(model_name='trip', old_name=\n 'hotel_decription', new_name='hotel_description'), migrations.\n AlterField(model_name='trip', name='hotelstars', field=models.\n IntegerField(blank=True, choices=[(1, 1), (2, 2), (3, 3), (4, 4), (\n 5, 5), (6, 6)], verbose_name='Gwiazdki hotelu'))]\n",
"step-5": "# Generated by Django 3.1.7 on 2021-05-05 23:28\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('travels', '0011_auto_20210505_2230'),\n ]\n\n operations = [\n migrations.RenameField(\n model_name='trip',\n old_name='hotel_decription',\n new_name='hotel_description',\n ),\n migrations.AlterField(\n model_name='trip',\n name='hotelstars',\n field=models.IntegerField(blank=True, choices=[(1, 1), (2, 2), (3, 3), (4, 4), (5, 5), (6, 6)], verbose_name='Gwiazdki hotelu'),\n ),\n ]\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
@project= Life_is_short_you_need_python
@file= judgement
@author= wubingyu
@create_time= 2017/12/21 下午2:58
"""
#a if condition else b
#(falseValue,trueValue)[test]
#(falseValue,trueValue)[test==True]
#(falseValue,trueValue)[bool(<expression>)]
|
normal
|
{
"blob_id": "73e23b3560294ca24428e7dd4cc995b97767335c",
"index": 4202,
"step-1": "<mask token>\n",
"step-2": "#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\"\"\"\n@project= Life_is_short_you_need_python\n@file= judgement\n@author= wubingyu\n@create_time= 2017/12/21 下午2:58\n\"\"\"\n\n#a if condition else b\n#(falseValue,trueValue)[test]\n#(falseValue,trueValue)[test==True]\n#(falseValue,trueValue)[bool(<expression>)]\n\n\n",
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0,
1
]
}
|
[
0,
1
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.